mxlpy 0.18.0__py3-none-any.whl → 0.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. mxlpy/__init__.py +13 -9
  2. mxlpy/compare.py +240 -0
  3. mxlpy/experimental/diff.py +16 -4
  4. mxlpy/fit.py +6 -11
  5. mxlpy/fns.py +37 -42
  6. mxlpy/identify.py +10 -3
  7. mxlpy/integrators/__init__.py +4 -3
  8. mxlpy/integrators/int_assimulo.py +16 -9
  9. mxlpy/integrators/int_scipy.py +13 -9
  10. mxlpy/label_map.py +7 -3
  11. mxlpy/linear_label_map.py +4 -2
  12. mxlpy/mc.py +5 -14
  13. mxlpy/mca.py +4 -4
  14. mxlpy/meta/__init__.py +6 -4
  15. mxlpy/meta/codegen_latex.py +180 -87
  16. mxlpy/meta/codegen_modebase.py +3 -1
  17. mxlpy/meta/codegen_py.py +11 -3
  18. mxlpy/meta/source_tools.py +9 -5
  19. mxlpy/model.py +187 -100
  20. mxlpy/nn/__init__.py +24 -5
  21. mxlpy/nn/_keras.py +92 -0
  22. mxlpy/nn/_torch.py +25 -18
  23. mxlpy/npe/__init__.py +21 -16
  24. mxlpy/npe/_keras.py +326 -0
  25. mxlpy/npe/_torch.py +56 -60
  26. mxlpy/parallel.py +5 -2
  27. mxlpy/parameterise.py +11 -3
  28. mxlpy/plot.py +205 -52
  29. mxlpy/report.py +33 -8
  30. mxlpy/sbml/__init__.py +3 -3
  31. mxlpy/sbml/_data.py +7 -6
  32. mxlpy/sbml/_export.py +8 -1
  33. mxlpy/sbml/_mathml.py +8 -7
  34. mxlpy/sbml/_name_conversion.py +5 -1
  35. mxlpy/scan.py +14 -19
  36. mxlpy/simulator.py +34 -31
  37. mxlpy/surrogates/__init__.py +25 -17
  38. mxlpy/surrogates/_keras.py +139 -0
  39. mxlpy/surrogates/_poly.py +25 -10
  40. mxlpy/surrogates/_qss.py +34 -0
  41. mxlpy/surrogates/_torch.py +50 -32
  42. mxlpy/symbolic/__init__.py +5 -3
  43. mxlpy/symbolic/strikepy.py +5 -2
  44. mxlpy/symbolic/symbolic_model.py +14 -5
  45. mxlpy/types.py +61 -120
  46. {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/METADATA +25 -24
  47. mxlpy-0.20.0.dist-info/RECORD +55 -0
  48. mxlpy/nn/_tensorflow.py +0 -0
  49. mxlpy-0.18.0.dist-info/RECORD +0 -51
  50. {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/WHEEL +0 -0
  51. {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/licenses/LICENSE +0 -0
mxlpy/npe/_torch.py CHANGED
@@ -12,7 +12,6 @@ Functions:
12
12
  from __future__ import annotations
13
13
 
14
14
  from dataclasses import dataclass
15
- from pathlib import Path
16
15
  from typing import TYPE_CHECKING, Self, cast
17
16
 
18
17
  import numpy as np
@@ -22,7 +21,6 @@ from torch import nn
22
21
  from torch.optim.adam import Adam
23
22
 
24
23
  from mxlpy.nn._torch import LSTM, MLP, DefaultDevice, train
25
- from mxlpy.parallel import Cache
26
24
  from mxlpy.types import AbstractEstimator
27
25
 
28
26
  if TYPE_CHECKING:
@@ -30,19 +28,17 @@ if TYPE_CHECKING:
30
28
 
31
29
  from torch.optim.optimizer import ParamsT
32
30
 
33
- DefaultCache = Cache(Path(".cache"))
34
31
 
35
32
  type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
36
33
 
37
34
  __all__ = [
38
- "DefaultCache",
39
35
  "LossFn",
40
- "TorchSteadyState",
41
- "TorchSteadyStateTrainer",
42
- "TorchTimeCourse",
43
- "TorchTimeCourseTrainer",
44
- "train_torch_steady_state",
45
- "train_torch_time_course",
36
+ "SteadyState",
37
+ "SteadyStateTrainer",
38
+ "TimeCourse",
39
+ "TimeCourseTrainer",
40
+ "train_steady_state",
41
+ "train_time_course",
46
42
  ]
47
43
 
48
44
 
@@ -61,7 +57,7 @@ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
61
57
 
62
58
 
63
59
  @dataclass(kw_only=True)
64
- class TorchSteadyState(AbstractEstimator):
60
+ class SteadyState(AbstractEstimator):
65
61
  """Estimator for steady state data using PyTorch models."""
66
62
 
67
63
  model: torch.nn.Module
@@ -74,7 +70,7 @@ class TorchSteadyState(AbstractEstimator):
74
70
 
75
71
 
76
72
  @dataclass(kw_only=True)
77
- class TorchTimeCourse(AbstractEstimator):
73
+ class TimeCourse(AbstractEstimator):
78
74
  """Estimator for time course data using PyTorch models."""
79
75
 
80
76
  model: torch.nn.Module
@@ -101,13 +97,13 @@ class TorchTimeCourse(AbstractEstimator):
101
97
 
102
98
 
103
99
  @dataclass
104
- class TorchSteadyStateTrainer:
100
+ class SteadyStateTrainer:
105
101
  """Trainer for steady state data using PyTorch models."""
106
102
 
107
103
  features: pd.DataFrame
108
104
  targets: pd.DataFrame
109
- approximator: nn.Module
110
- optimimzer: Adam
105
+ model: nn.Module
106
+ optimizer: Adam
111
107
  device: torch.device
112
108
  losses: list[pd.Series]
113
109
  loss_fn: LossFn
@@ -116,8 +112,8 @@ class TorchSteadyStateTrainer:
116
112
  self,
117
113
  features: pd.DataFrame,
118
114
  targets: pd.DataFrame,
119
- approximator: nn.Module | None = None,
120
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
115
+ model: nn.Module | None = None,
116
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
121
117
  device: torch.device = DefaultDevice,
122
118
  loss_fn: LossFn = _mean_abs,
123
119
  ) -> None:
@@ -126,8 +122,8 @@ class TorchSteadyStateTrainer:
126
122
  Args:
127
123
  features: DataFrame containing the input features for training
128
124
  targets: DataFrame containing the target values for training
129
- approximator: Predefined neural network model (None to use default MLP)
130
- optimimzer_cls: Optimizer class to use for training (default: Adam)
125
+ model: Predefined neural network model (None to use default MLP)
126
+ optimizer_cls: Optimizer class to use for training (default: Adam)
131
127
  device: Device to run the training on (default: DefaultDevice)
132
128
  loss_fn: Loss function
133
129
 
@@ -135,15 +131,15 @@ class TorchSteadyStateTrainer:
135
131
  self.features = features
136
132
  self.targets = targets
137
133
 
138
- if approximator is None:
134
+ if model is None:
139
135
  n_hidden = max(2 * len(features.columns) * len(targets.columns), 10)
140
136
  n_outputs = len(targets.columns)
141
- approximator = MLP(
137
+ model = MLP(
142
138
  n_inputs=len(features.columns),
143
139
  neurons_per_layer=[n_hidden, n_hidden, n_outputs],
144
140
  )
145
- self.approximator = approximator.to(device)
146
- self.optimizer = optimimzer_cls(approximator.parameters())
141
+ self.model = model.to(device)
142
+ self.optimizer = optimizer_cls(model.parameters())
147
143
  self.device = device
148
144
  self.loss_fn = loss_fn
149
145
  self.losses = []
@@ -161,7 +157,7 @@ class TorchSteadyStateTrainer:
161
157
 
162
158
  """
163
159
  losses = train(
164
- aprox=self.approximator,
160
+ model=self.model,
165
161
  features=self.features.to_numpy(),
166
162
  targets=self.targets.to_numpy(),
167
163
  epochs=epochs,
@@ -180,22 +176,22 @@ class TorchSteadyStateTrainer:
180
176
  """Get the loss history of the training process."""
181
177
  return pd.concat(self.losses)
182
178
 
183
- def get_estimator(self) -> TorchSteadyState:
179
+ def get_estimator(self) -> SteadyState:
184
180
  """Get the trained estimator."""
185
- return TorchSteadyState(
186
- model=self.approximator,
181
+ return SteadyState(
182
+ model=self.model,
187
183
  parameter_names=list(self.targets.columns),
188
184
  )
189
185
 
190
186
 
191
187
  @dataclass
192
- class TorchTimeCourseTrainer:
188
+ class TimeCourseTrainer:
193
189
  """Trainer for time course data using PyTorch models."""
194
190
 
195
191
  features: pd.DataFrame
196
192
  targets: pd.DataFrame
197
- approximator: nn.Module
198
- optimimzer: Adam
193
+ model: nn.Module
194
+ optimizer: Adam
199
195
  device: torch.device
200
196
  losses: list[pd.Series]
201
197
  loss_fn: LossFn
@@ -204,8 +200,8 @@ class TorchTimeCourseTrainer:
204
200
  self,
205
201
  features: pd.DataFrame,
206
202
  targets: pd.DataFrame,
207
- approximator: nn.Module | None = None,
208
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
203
+ model: nn.Module | None = None,
204
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
209
205
  device: torch.device = DefaultDevice,
210
206
  loss_fn: LossFn = _mean_abs,
211
207
  ) -> None:
@@ -214,8 +210,8 @@ class TorchTimeCourseTrainer:
214
210
  Args:
215
211
  features: DataFrame containing the input features for training
216
212
  targets: DataFrame containing the target values for training
217
- approximator: Predefined neural network model (None to use default LSTM)
218
- optimimzer_cls: Optimizer class to use for training (default: Adam)
213
+ model: Predefined neural network model (None to use default LSTM)
214
+ optimizer_cls: Optimizer class to use for training (default: Adam)
219
215
  device: Device to run the training on (default: DefaultDevice)
220
216
  loss_fn: Loss function
221
217
 
@@ -223,14 +219,14 @@ class TorchTimeCourseTrainer:
223
219
  self.features = features
224
220
  self.targets = targets
225
221
 
226
- if approximator is None:
227
- approximator = LSTM(
222
+ if model is None:
223
+ model = LSTM(
228
224
  n_inputs=len(features.columns),
229
225
  n_outputs=len(targets.columns),
230
226
  n_hidden=1,
231
227
  ).to(device)
232
- self.approximator = approximator.to(device)
233
- self.optimizer = optimimzer_cls(approximator.parameters())
228
+ self.model = model.to(device)
229
+ self.optimizer = optimizer_cls(model.parameters())
234
230
  self.device = device
235
231
  self.loss_fn = loss_fn
236
232
  self.losses = []
@@ -248,7 +244,7 @@ class TorchTimeCourseTrainer:
248
244
 
249
245
  """
250
246
  losses = train(
251
- aprox=self.approximator,
247
+ model=self.model,
252
248
  features=np.swapaxes(
253
249
  self.features.to_numpy().reshape(
254
250
  (len(self.targets), -1, len(self.features.columns))
@@ -273,23 +269,23 @@ class TorchTimeCourseTrainer:
273
269
  """Get the loss history of the training process."""
274
270
  return pd.concat(self.losses)
275
271
 
276
- def get_estimator(self) -> TorchTimeCourse:
272
+ def get_estimator(self) -> TimeCourse:
277
273
  """Get the trained estimator."""
278
- return TorchTimeCourse(
279
- model=self.approximator,
274
+ return TimeCourse(
275
+ model=self.model,
280
276
  parameter_names=list(self.targets.columns),
281
277
  )
282
278
 
283
279
 
284
- def train_torch_steady_state(
280
+ def train_steady_state(
285
281
  features: pd.DataFrame,
286
282
  targets: pd.DataFrame,
287
283
  epochs: int,
288
284
  batch_size: int | None = None,
289
- approximator: nn.Module | None = None,
290
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
285
+ model: nn.Module | None = None,
286
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
291
287
  device: torch.device = DefaultDevice,
292
- ) -> tuple[TorchSteadyState, pd.Series]:
288
+ ) -> tuple[SteadyState, pd.Series]:
293
289
  """Train a PyTorch steady state estimator.
294
290
 
295
291
  This function trains a neural network model to estimate steady state data
@@ -304,34 +300,34 @@ def train_torch_steady_state(
304
300
  targets: DataFrame containing the target values for training
305
301
  epochs: Number of training epochs
306
302
  batch_size: Size of mini-batches for training (None for full-batch)
307
- approximator: Predefined neural network model (None to use default MLP)
308
- optimimzer_cls: Optimizer class to use for training (default: Adam)
303
+ model: Predefined neural network model (None to use default MLP)
304
+ optimizer_cls: Optimizer class to use for training (default: Adam)
309
305
  device: Device to run the training on (default: DefaultDevice)
310
306
 
311
307
  Returns:
312
308
  tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
313
309
 
314
310
  """
315
- trainer = TorchSteadyStateTrainer(
311
+ trainer = SteadyStateTrainer(
316
312
  features=features,
317
313
  targets=targets,
318
- approximator=approximator,
319
- optimimzer_cls=optimimzer_cls,
314
+ model=model,
315
+ optimizer_cls=optimizer_cls,
320
316
  device=device,
321
317
  ).train(epochs=epochs, batch_size=batch_size)
322
318
 
323
319
  return trainer.get_estimator(), trainer.get_loss()
324
320
 
325
321
 
326
- def train_torch_time_course(
322
+ def train_time_course(
327
323
  features: pd.DataFrame,
328
324
  targets: pd.DataFrame,
329
325
  epochs: int,
330
326
  batch_size: int | None = None,
331
- approximator: nn.Module | None = None,
332
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
327
+ model: nn.Module | None = None,
328
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
333
329
  device: torch.device = DefaultDevice,
334
- ) -> tuple[TorchTimeCourse, pd.Series]:
330
+ ) -> tuple[TimeCourse, pd.Series]:
335
331
  """Train a PyTorch time course estimator.
336
332
 
337
333
  This function trains a neural network model to estimate time course data
@@ -346,19 +342,19 @@ def train_torch_time_course(
346
342
  targets: DataFrame containing the target values for training
347
343
  epochs: Number of training epochs
348
344
  batch_size: Size of mini-batches for training (None for full-batch)
349
- approximator: Predefined neural network model (None to use default LSTM)
350
- optimimzer_cls: Optimizer class to use for training (default: Adam)
345
+ model: Predefined neural network model (None to use default LSTM)
346
+ optimizer_cls: Optimizer class to use for training (default: Adam)
351
347
  device: Device to run the training on (default: DefaultDevice)
352
348
 
353
349
  Returns:
354
350
  tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
355
351
 
356
352
  """
357
- trainer = TorchTimeCourseTrainer(
353
+ trainer = TimeCourseTrainer(
358
354
  features=features,
359
355
  targets=targets,
360
- approximator=approximator,
361
- optimimzer_cls=optimimzer_cls,
356
+ model=model,
357
+ optimizer_cls=optimizer_cls,
362
358
  device=device,
363
359
  ).train(epochs=epochs, batch_size=batch_size)
364
360
 
mxlpy/parallel.py CHANGED
@@ -24,11 +24,14 @@ from typing import TYPE_CHECKING, Any, cast
24
24
  import pebble
25
25
  from tqdm import tqdm
26
26
 
27
- __all__ = ["Cache", "parallelise"]
28
-
29
27
  if TYPE_CHECKING:
30
28
  from collections.abc import Callable, Collection, Hashable
31
29
 
30
+ __all__ = [
31
+ "Cache",
32
+ "parallelise",
33
+ ]
34
+
32
35
 
33
36
  def _pickle_name(k: Hashable) -> str:
34
37
  return f"{k}.p"
mxlpy/parameterise.py CHANGED
@@ -1,11 +1,19 @@
1
1
  """Module to parameterise models."""
2
2
 
3
- from pathlib import Path
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING
4
6
 
5
- import pandas as pd
6
7
  from parameteriser.brenda.v0 import Brenda
7
8
 
8
- __all__ = ["get_km_and_kcat_from_brenda"]
9
+ if TYPE_CHECKING:
10
+ from pathlib import Path
11
+
12
+ import pandas as pd
13
+
14
+ __all__ = [
15
+ "get_km_and_kcat_from_brenda",
16
+ ]
9
17
 
10
18
 
11
19
  def get_km_and_kcat_from_brenda(