mxlpy 0.18.0__py3-none-any.whl → 0.20.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mxlpy/__init__.py +13 -9
- mxlpy/compare.py +240 -0
- mxlpy/experimental/diff.py +16 -4
- mxlpy/fit.py +6 -11
- mxlpy/fns.py +37 -42
- mxlpy/identify.py +10 -3
- mxlpy/integrators/__init__.py +4 -3
- mxlpy/integrators/int_assimulo.py +16 -9
- mxlpy/integrators/int_scipy.py +13 -9
- mxlpy/label_map.py +7 -3
- mxlpy/linear_label_map.py +4 -2
- mxlpy/mc.py +5 -14
- mxlpy/mca.py +4 -4
- mxlpy/meta/__init__.py +6 -4
- mxlpy/meta/codegen_latex.py +180 -87
- mxlpy/meta/codegen_modebase.py +3 -1
- mxlpy/meta/codegen_py.py +11 -3
- mxlpy/meta/source_tools.py +9 -5
- mxlpy/model.py +187 -100
- mxlpy/nn/__init__.py +24 -5
- mxlpy/nn/_keras.py +92 -0
- mxlpy/nn/_torch.py +25 -18
- mxlpy/npe/__init__.py +21 -16
- mxlpy/npe/_keras.py +326 -0
- mxlpy/npe/_torch.py +56 -60
- mxlpy/parallel.py +5 -2
- mxlpy/parameterise.py +11 -3
- mxlpy/plot.py +205 -52
- mxlpy/report.py +33 -8
- mxlpy/sbml/__init__.py +3 -3
- mxlpy/sbml/_data.py +7 -6
- mxlpy/sbml/_export.py +8 -1
- mxlpy/sbml/_mathml.py +8 -7
- mxlpy/sbml/_name_conversion.py +5 -1
- mxlpy/scan.py +14 -19
- mxlpy/simulator.py +34 -31
- mxlpy/surrogates/__init__.py +25 -17
- mxlpy/surrogates/_keras.py +139 -0
- mxlpy/surrogates/_poly.py +25 -10
- mxlpy/surrogates/_qss.py +34 -0
- mxlpy/surrogates/_torch.py +50 -32
- mxlpy/symbolic/__init__.py +5 -3
- mxlpy/symbolic/strikepy.py +5 -2
- mxlpy/symbolic/symbolic_model.py +14 -5
- mxlpy/types.py +61 -120
- {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/METADATA +25 -24
- mxlpy-0.20.0.dist-info/RECORD +55 -0
- mxlpy/nn/_tensorflow.py +0 -0
- mxlpy-0.18.0.dist-info/RECORD +0 -51
- {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/WHEEL +0 -0
- {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/licenses/LICENSE +0 -0
mxlpy/nn/_torch.py
CHANGED
@@ -24,13 +24,19 @@ if TYPE_CHECKING:
|
|
24
24
|
|
25
25
|
from mxlpy.types import Array
|
26
26
|
|
27
|
-
__all__ = [
|
27
|
+
__all__ = [
|
28
|
+
"DefaultDevice",
|
29
|
+
"LSTM",
|
30
|
+
"LossFn",
|
31
|
+
"MLP",
|
32
|
+
"train",
|
33
|
+
]
|
28
34
|
|
29
35
|
DefaultDevice = torch.device("cpu")
|
30
36
|
|
31
37
|
|
32
38
|
def train(
|
33
|
-
|
39
|
+
model: nn.Module,
|
34
40
|
features: Array,
|
35
41
|
targets: Array,
|
36
42
|
epochs: int,
|
@@ -42,7 +48,7 @@ def train(
|
|
42
48
|
"""Train the neural network using mini-batch gradient descent.
|
43
49
|
|
44
50
|
Args:
|
45
|
-
|
51
|
+
model: Neural network model to train.
|
46
52
|
features: Input features as a tensor.
|
47
53
|
targets: Target values as a tensor.
|
48
54
|
epochs: Number of training epochs.
|
@@ -71,7 +77,7 @@ def train(
|
|
71
77
|
epoch_loss = 0
|
72
78
|
for xb, yb in data_loader:
|
73
79
|
optimizer.zero_grad()
|
74
|
-
loss = loss_fn(
|
80
|
+
loss = loss_fn(model(xb), yb)
|
75
81
|
loss.backward()
|
76
82
|
optimizer.step()
|
77
83
|
epoch_loss += loss.item() * xb.size(0)
|
@@ -125,21 +131,17 @@ class MLP(nn.Module):
|
|
125
131
|
levels = []
|
126
132
|
previous_neurons = n_inputs
|
127
133
|
|
128
|
-
for
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
if self.output_activation:
|
133
|
-
levels.append(self.output_activation)
|
134
|
-
|
135
|
-
else:
|
136
|
-
levels.append(nn.Linear(previous_neurons, neurons))
|
137
|
-
|
138
|
-
if self.activation:
|
139
|
-
levels.append(self.activation)
|
140
|
-
|
134
|
+
for neurons in self.layers[:-1]:
|
135
|
+
levels.append(nn.Linear(previous_neurons, neurons))
|
136
|
+
if self.activation:
|
137
|
+
levels.append(self.activation)
|
141
138
|
previous_neurons = neurons
|
142
139
|
|
140
|
+
# Output layer
|
141
|
+
levels.append(nn.Linear(previous_neurons, self.layers[-1]))
|
142
|
+
if self.output_activation:
|
143
|
+
levels.append(self.output_activation)
|
144
|
+
|
143
145
|
self.net = nn.Sequential(*levels)
|
144
146
|
|
145
147
|
for m in self.net.modules():
|
@@ -163,7 +165,12 @@ class MLP(nn.Module):
|
|
163
165
|
class LSTM(nn.Module):
|
164
166
|
"""Default LSTM neural network model for time-series approximation."""
|
165
167
|
|
166
|
-
def __init__(
|
168
|
+
def __init__(
|
169
|
+
self,
|
170
|
+
n_inputs: int,
|
171
|
+
n_outputs: int,
|
172
|
+
n_hidden: int,
|
173
|
+
) -> None:
|
167
174
|
"""Initializes the neural network model.
|
168
175
|
|
169
176
|
Args:
|
mxlpy/npe/__init__.py
CHANGED
@@ -16,23 +16,28 @@ Functions:
|
|
16
16
|
|
17
17
|
from __future__ import annotations
|
18
18
|
|
19
|
-
import
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
19
|
+
from typing import TYPE_CHECKING
|
20
|
+
|
21
|
+
if TYPE_CHECKING:
|
22
|
+
import contextlib
|
23
|
+
|
24
|
+
with contextlib.suppress(ImportError):
|
25
|
+
from . import _keras as keras
|
26
|
+
from . import _torch as torch
|
27
|
+
else:
|
28
|
+
from lazy_import import lazy_module
|
29
|
+
|
30
|
+
keras = lazy_module(
|
31
|
+
"mxlpy.npe._keras",
|
32
|
+
error_strings={"module": "keras", "install_name": "mxlpy[tf]"},
|
29
33
|
)
|
34
|
+
torch = lazy_module(
|
35
|
+
"mxlpy.npe._torch",
|
36
|
+
error_strings={"module": "torch", "install_name": "mxlpy[torch]"},
|
37
|
+
)
|
38
|
+
|
30
39
|
|
31
40
|
__all__ = [
|
32
|
-
"
|
33
|
-
"
|
34
|
-
"TorchTimeCourse",
|
35
|
-
"TorchTimeCourseTrainer",
|
36
|
-
"train_torch_steady_state",
|
37
|
-
"train_torch_time_course",
|
41
|
+
"keras",
|
42
|
+
"torch",
|
38
43
|
]
|
mxlpy/npe/_keras.py
ADDED
@@ -0,0 +1,326 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from dataclasses import dataclass
|
4
|
+
from typing import Self, cast
|
5
|
+
|
6
|
+
import keras
|
7
|
+
import numpy as np
|
8
|
+
import pandas as pd
|
9
|
+
|
10
|
+
from mxlpy.nn._keras import LSTM, MLP, train
|
11
|
+
from mxlpy.types import AbstractEstimator
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
"DefaultLoss",
|
15
|
+
"DefaultOptimizer",
|
16
|
+
"LossFn",
|
17
|
+
"Optimizer",
|
18
|
+
"SteadyState",
|
19
|
+
"SteadyStateTrainer",
|
20
|
+
"TimeCourse",
|
21
|
+
"TimeCourseTrainer",
|
22
|
+
"train_steady_state",
|
23
|
+
"train_time_course",
|
24
|
+
]
|
25
|
+
|
26
|
+
type Optimizer = keras.optimizers.Optimizer | str
|
27
|
+
type LossFn = keras.losses.Loss | str
|
28
|
+
|
29
|
+
DefaultOptimizer = keras.optimizers.Adam()
|
30
|
+
DefaultLoss = keras.losses.MeanAbsoluteError()
|
31
|
+
|
32
|
+
|
33
|
+
@dataclass(kw_only=True)
|
34
|
+
class SteadyState(AbstractEstimator):
|
35
|
+
"""Estimator for steady state data using Keras models."""
|
36
|
+
|
37
|
+
model: keras.Model
|
38
|
+
|
39
|
+
def predict(self, features: pd.Series | pd.DataFrame) -> pd.DataFrame:
|
40
|
+
"""Predict the target values for the given features."""
|
41
|
+
return pd.DataFrame(
|
42
|
+
self.model.predict(features),
|
43
|
+
columns=self.parameter_names,
|
44
|
+
dtype=float,
|
45
|
+
)
|
46
|
+
|
47
|
+
|
48
|
+
@dataclass(kw_only=True)
|
49
|
+
class TimeCourse(AbstractEstimator):
|
50
|
+
"""Estimator for time course data using Keras models."""
|
51
|
+
|
52
|
+
model: keras.Model
|
53
|
+
|
54
|
+
def predict(self, features: pd.Series | pd.DataFrame) -> pd.DataFrame:
|
55
|
+
"""Predict the target values for the given features."""
|
56
|
+
idx = cast(pd.MultiIndex, features.index)
|
57
|
+
features_ = (
|
58
|
+
np.swapaxes(
|
59
|
+
features.to_numpy().reshape(
|
60
|
+
(
|
61
|
+
len(idx.levels[0]),
|
62
|
+
len(idx.levels[1]),
|
63
|
+
len(features.columns),
|
64
|
+
)
|
65
|
+
),
|
66
|
+
axis1=0,
|
67
|
+
axis2=1,
|
68
|
+
),
|
69
|
+
)
|
70
|
+
return pd.DataFrame(
|
71
|
+
self.model.predict(features_),
|
72
|
+
columns=self.parameter_names,
|
73
|
+
dtype=float,
|
74
|
+
)
|
75
|
+
|
76
|
+
|
77
|
+
@dataclass
|
78
|
+
class SteadyStateTrainer:
|
79
|
+
"""Trainer for steady state data using Keras models."""
|
80
|
+
|
81
|
+
features: pd.DataFrame
|
82
|
+
targets: pd.DataFrame
|
83
|
+
model: keras.Model
|
84
|
+
optimizer: Optimizer
|
85
|
+
losses: list[pd.Series]
|
86
|
+
loss_fn: LossFn
|
87
|
+
|
88
|
+
def __init__(
|
89
|
+
self,
|
90
|
+
features: pd.DataFrame,
|
91
|
+
targets: pd.DataFrame,
|
92
|
+
model: keras.Model | None = None,
|
93
|
+
optimizer: Optimizer = DefaultOptimizer,
|
94
|
+
loss: LossFn = DefaultLoss,
|
95
|
+
) -> None:
|
96
|
+
"""Initialize the trainer with features, targets, and model.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
features: DataFrame containing the input features for training
|
100
|
+
targets: DataFrame containing the target values for training
|
101
|
+
model: Predefined neural network model (None to use default MLP)
|
102
|
+
optimizer: Optimizer class to use for training (default: Adam)
|
103
|
+
loss: Loss function
|
104
|
+
|
105
|
+
"""
|
106
|
+
self.features = features
|
107
|
+
self.targets = targets
|
108
|
+
|
109
|
+
if model is None:
|
110
|
+
n_hidden = max(2 * len(features.columns) * len(targets.columns), 10)
|
111
|
+
n_outputs = len(targets.columns)
|
112
|
+
model = MLP(
|
113
|
+
n_inputs=len(features.columns),
|
114
|
+
neurons_per_layer=[n_hidden, n_hidden, n_outputs],
|
115
|
+
)
|
116
|
+
self.model = model
|
117
|
+
model.compile(optimizer=cast(str, optimizer), loss=loss)
|
118
|
+
|
119
|
+
self.loss_fn = loss
|
120
|
+
self.losses = []
|
121
|
+
|
122
|
+
def train(
|
123
|
+
self,
|
124
|
+
epochs: int,
|
125
|
+
batch_size: int | None = None,
|
126
|
+
) -> Self:
|
127
|
+
"""Train the model using the provided features and targets.
|
128
|
+
|
129
|
+
Args:
|
130
|
+
epochs: Number of training epochs
|
131
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
132
|
+
|
133
|
+
"""
|
134
|
+
losses = train(
|
135
|
+
model=self.model,
|
136
|
+
features=self.features,
|
137
|
+
targets=self.targets,
|
138
|
+
epochs=epochs,
|
139
|
+
batch_size=batch_size,
|
140
|
+
)
|
141
|
+
|
142
|
+
if len(self.losses) > 0:
|
143
|
+
losses.index += self.losses[-1].index[-1]
|
144
|
+
self.losses.append(losses)
|
145
|
+
return self
|
146
|
+
|
147
|
+
def get_loss(self) -> pd.Series:
|
148
|
+
"""Get the loss history of the training process."""
|
149
|
+
return pd.concat(self.losses)
|
150
|
+
|
151
|
+
def get_estimator(self) -> SteadyState:
|
152
|
+
"""Get the trained estimator."""
|
153
|
+
return SteadyState(
|
154
|
+
model=self.model,
|
155
|
+
parameter_names=list(self.targets.columns),
|
156
|
+
)
|
157
|
+
|
158
|
+
|
159
|
+
@dataclass
|
160
|
+
class TimeCourseTrainer:
|
161
|
+
"""Trainer for time course data using Keras models."""
|
162
|
+
|
163
|
+
features: pd.DataFrame
|
164
|
+
targets: pd.DataFrame
|
165
|
+
model: keras.Model
|
166
|
+
optimizer: Optimizer
|
167
|
+
|
168
|
+
losses: list[pd.Series]
|
169
|
+
loss_fn: LossFn
|
170
|
+
|
171
|
+
def __init__(
|
172
|
+
self,
|
173
|
+
features: pd.DataFrame,
|
174
|
+
targets: pd.DataFrame,
|
175
|
+
model: keras.Model | None = None,
|
176
|
+
optimizer: Optimizer = DefaultOptimizer,
|
177
|
+
loss: LossFn = DefaultLoss,
|
178
|
+
) -> None:
|
179
|
+
"""Initialize the trainer with features, targets, and model.
|
180
|
+
|
181
|
+
Args:
|
182
|
+
features: DataFrame containing the input features for training
|
183
|
+
targets: DataFrame containing the target values for training
|
184
|
+
model: Predefined neural network model (None to use default LSTM)
|
185
|
+
optimizer: Optimizer class to use for training
|
186
|
+
loss: Loss function
|
187
|
+
|
188
|
+
"""
|
189
|
+
self.features = features
|
190
|
+
self.targets = targets
|
191
|
+
|
192
|
+
if model is None:
|
193
|
+
model = LSTM(
|
194
|
+
n_inputs=len(features.columns),
|
195
|
+
n_outputs=len(targets.columns),
|
196
|
+
n_hidden=1,
|
197
|
+
)
|
198
|
+
self.model = model
|
199
|
+
model.compile(optimizer=cast(str, optimizer), loss=loss)
|
200
|
+
self.losses = []
|
201
|
+
|
202
|
+
def train(
|
203
|
+
self,
|
204
|
+
epochs: int,
|
205
|
+
batch_size: int | None = None,
|
206
|
+
) -> Self:
|
207
|
+
"""Train the model using the provided features and targets.
|
208
|
+
|
209
|
+
Args:
|
210
|
+
epochs: Number of training epochs
|
211
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
212
|
+
|
213
|
+
"""
|
214
|
+
losses = train(
|
215
|
+
model=self.model,
|
216
|
+
features=np.swapaxes(
|
217
|
+
self.features.to_numpy().reshape(
|
218
|
+
(len(self.targets), -1, len(self.features.columns))
|
219
|
+
),
|
220
|
+
axis1=0,
|
221
|
+
axis2=1,
|
222
|
+
),
|
223
|
+
targets=self.targets,
|
224
|
+
epochs=epochs,
|
225
|
+
batch_size=batch_size,
|
226
|
+
)
|
227
|
+
|
228
|
+
if len(self.losses) > 0:
|
229
|
+
losses.index += self.losses[-1].index[-1]
|
230
|
+
self.losses.append(losses)
|
231
|
+
return self
|
232
|
+
|
233
|
+
def get_loss(self) -> pd.Series:
|
234
|
+
"""Get the loss history of the training process."""
|
235
|
+
return pd.concat(self.losses)
|
236
|
+
|
237
|
+
def get_estimator(self) -> TimeCourse:
|
238
|
+
"""Get the trained estimator."""
|
239
|
+
return TimeCourse(
|
240
|
+
model=self.model,
|
241
|
+
parameter_names=list(self.targets.columns),
|
242
|
+
)
|
243
|
+
|
244
|
+
|
245
|
+
def train_steady_state(
|
246
|
+
features: pd.DataFrame,
|
247
|
+
targets: pd.DataFrame,
|
248
|
+
epochs: int,
|
249
|
+
batch_size: int | None = None,
|
250
|
+
model: keras.Model | None = None,
|
251
|
+
optimizer: Optimizer = DefaultOptimizer,
|
252
|
+
loss: LossFn = DefaultLoss,
|
253
|
+
) -> tuple[SteadyState, pd.Series]:
|
254
|
+
"""Train a Keras steady state estimator.
|
255
|
+
|
256
|
+
This function trains a neural network model to estimate steady state data
|
257
|
+
using the provided features and targets. It supports both full-batch and
|
258
|
+
mini-batch training.
|
259
|
+
|
260
|
+
Examples:
|
261
|
+
>>> train_keras_ss_estimator(features, targets, epochs=100)
|
262
|
+
|
263
|
+
Args:
|
264
|
+
features: DataFrame containing the input features for training
|
265
|
+
targets: DataFrame containing the target values for training
|
266
|
+
epochs: Number of training epochs
|
267
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
268
|
+
model: Predefined neural network model (None to use default MLP)
|
269
|
+
optimizer: Optimizer class to use for training (default: Adam)
|
270
|
+
loss: Loss function for the training
|
271
|
+
|
272
|
+
Returns:
|
273
|
+
tuple[KerasTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
|
274
|
+
|
275
|
+
"""
|
276
|
+
trainer = SteadyStateTrainer(
|
277
|
+
features=features,
|
278
|
+
targets=targets,
|
279
|
+
model=model,
|
280
|
+
optimizer=optimizer,
|
281
|
+
loss=loss,
|
282
|
+
).train(epochs=epochs, batch_size=batch_size)
|
283
|
+
|
284
|
+
return trainer.get_estimator(), trainer.get_loss()
|
285
|
+
|
286
|
+
|
287
|
+
def train_time_course(
|
288
|
+
features: pd.DataFrame,
|
289
|
+
targets: pd.DataFrame,
|
290
|
+
epochs: int,
|
291
|
+
batch_size: int | None = None,
|
292
|
+
model: keras.Model | None = None,
|
293
|
+
optimizer: Optimizer = DefaultOptimizer,
|
294
|
+
loss: LossFn = DefaultLoss,
|
295
|
+
) -> tuple[TimeCourse, pd.Series]:
|
296
|
+
"""Train a Keras time course estimator.
|
297
|
+
|
298
|
+
This function trains a neural network model to estimate time course data
|
299
|
+
using the provided features and targets. It supports both full-batch and
|
300
|
+
mini-batch training.
|
301
|
+
|
302
|
+
Examples:
|
303
|
+
>>> train_keras_time_course_estimator(features, targets, epochs=100)
|
304
|
+
|
305
|
+
Args:
|
306
|
+
features: DataFrame containing the input features for training
|
307
|
+
targets: DataFrame containing the target values for training
|
308
|
+
epochs: Number of training epochs
|
309
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
310
|
+
model: Predefined neural network model (None to use default LSTM)
|
311
|
+
optimizer: Optimizer class to use for training (default: Adam)
|
312
|
+
loss: Loss function for the training
|
313
|
+
|
314
|
+
Returns:
|
315
|
+
tuple[KerasTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
|
316
|
+
|
317
|
+
"""
|
318
|
+
trainer = TimeCourseTrainer(
|
319
|
+
features=features,
|
320
|
+
targets=targets,
|
321
|
+
model=model,
|
322
|
+
optimizer=optimizer,
|
323
|
+
loss=loss,
|
324
|
+
).train(epochs=epochs, batch_size=batch_size)
|
325
|
+
|
326
|
+
return trainer.get_estimator(), trainer.get_loss()
|