mxlpy 0.17.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mxlpy/__init__.py +14 -4
- mxlpy/experimental/diff.py +1 -1
- mxlpy/fit.py +173 -7
- mxlpy/identify.py +7 -1
- mxlpy/integrators/int_assimulo.py +10 -3
- mxlpy/integrators/int_scipy.py +7 -3
- mxlpy/label_map.py +3 -1
- mxlpy/meta/codegen_latex.py +1 -1
- mxlpy/meta/source_tools.py +1 -1
- mxlpy/model.py +146 -87
- mxlpy/nn/__init__.py +24 -5
- mxlpy/nn/_keras.py +85 -0
- mxlpy/nn/_torch.py +76 -15
- mxlpy/npe/__init__.py +21 -16
- mxlpy/npe/_keras.py +326 -0
- mxlpy/npe/_torch.py +73 -148
- mxlpy/plot.py +196 -52
- mxlpy/sbml/_export.py +8 -1
- mxlpy/surrogates/__init__.py +25 -17
- mxlpy/surrogates/_keras.py +137 -0
- mxlpy/surrogates/_poly.py +19 -8
- mxlpy/surrogates/_qss.py +31 -0
- mxlpy/surrogates/_torch.py +51 -127
- mxlpy/symbolic/symbolic_model.py +2 -2
- mxlpy/types.py +57 -114
- {mxlpy-0.17.0.dist-info → mxlpy-0.19.0.dist-info}/METADATA +27 -28
- mxlpy-0.19.0.dist-info/RECORD +54 -0
- mxlpy-0.19.0.dist-info/licenses/LICENSE +21 -0
- mxlpy/nn/_tensorflow.py +0 -0
- mxlpy-0.17.0.dist-info/RECORD +0 -51
- mxlpy-0.17.0.dist-info/licenses/LICENSE +0 -674
- {mxlpy-0.17.0.dist-info → mxlpy-0.19.0.dist-info}/WHEEL +0 -0
mxlpy/npe/__init__.py
CHANGED
@@ -16,23 +16,28 @@ Functions:
|
|
16
16
|
|
17
17
|
from __future__ import annotations
|
18
18
|
|
19
|
-
import
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
19
|
+
from typing import TYPE_CHECKING
|
20
|
+
|
21
|
+
if TYPE_CHECKING:
|
22
|
+
import contextlib
|
23
|
+
|
24
|
+
with contextlib.suppress(ImportError):
|
25
|
+
from . import _keras as keras
|
26
|
+
from . import _torch as torch
|
27
|
+
else:
|
28
|
+
from lazy_import import lazy_module
|
29
|
+
|
30
|
+
keras = lazy_module(
|
31
|
+
"mxlpy.npe._keras",
|
32
|
+
error_strings={"module": "keras", "install_name": "mxlpy[tf]"},
|
29
33
|
)
|
34
|
+
torch = lazy_module(
|
35
|
+
"mxlpy.npe._torch",
|
36
|
+
error_strings={"module": "torch", "install_name": "mxlpy[torch]"},
|
37
|
+
)
|
38
|
+
|
30
39
|
|
31
40
|
__all__ = [
|
32
|
-
"
|
33
|
-
"
|
34
|
-
"TorchTimeCourse",
|
35
|
-
"TorchTimeCourseTrainer",
|
36
|
-
"train_torch_steady_state",
|
37
|
-
"train_torch_time_course",
|
41
|
+
"keras",
|
42
|
+
"torch",
|
38
43
|
]
|
mxlpy/npe/_keras.py
ADDED
@@ -0,0 +1,326 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from dataclasses import dataclass
|
4
|
+
from typing import Self, cast
|
5
|
+
|
6
|
+
import keras
|
7
|
+
import numpy as np
|
8
|
+
import pandas as pd
|
9
|
+
|
10
|
+
from mxlpy.nn._keras import LSTM, MLP, train
|
11
|
+
from mxlpy.types import AbstractEstimator
|
12
|
+
|
13
|
+
__all__ = [
|
14
|
+
"DefaultLoss",
|
15
|
+
"DefaultOptimizer",
|
16
|
+
"LossFn",
|
17
|
+
"Optimizer",
|
18
|
+
"SteadyState",
|
19
|
+
"SteadyStateTrainer",
|
20
|
+
"TimeCourse",
|
21
|
+
"TimeCourseTrainer",
|
22
|
+
"train_steady_state",
|
23
|
+
"train_time_course",
|
24
|
+
]
|
25
|
+
|
26
|
+
type Optimizer = keras.optimizers.Optimizer | str
|
27
|
+
type LossFn = keras.losses.Loss | str
|
28
|
+
|
29
|
+
DefaultOptimizer = keras.optimizers.Adam()
|
30
|
+
DefaultLoss = keras.losses.MeanAbsoluteError()
|
31
|
+
|
32
|
+
|
33
|
+
@dataclass(kw_only=True)
|
34
|
+
class SteadyState(AbstractEstimator):
|
35
|
+
"""Estimator for steady state data using Keras models."""
|
36
|
+
|
37
|
+
model: keras.Model
|
38
|
+
|
39
|
+
def predict(self, features: pd.Series | pd.DataFrame) -> pd.DataFrame:
|
40
|
+
"""Predict the target values for the given features."""
|
41
|
+
return pd.DataFrame(
|
42
|
+
self.model.predict(features),
|
43
|
+
columns=self.parameter_names,
|
44
|
+
dtype=float,
|
45
|
+
)
|
46
|
+
|
47
|
+
|
48
|
+
@dataclass(kw_only=True)
|
49
|
+
class TimeCourse(AbstractEstimator):
|
50
|
+
"""Estimator for time course data using Keras models."""
|
51
|
+
|
52
|
+
model: keras.Model
|
53
|
+
|
54
|
+
def predict(self, features: pd.Series | pd.DataFrame) -> pd.DataFrame:
|
55
|
+
"""Predict the target values for the given features."""
|
56
|
+
idx = cast(pd.MultiIndex, features.index)
|
57
|
+
features_ = (
|
58
|
+
np.swapaxes(
|
59
|
+
features.to_numpy().reshape(
|
60
|
+
(
|
61
|
+
len(idx.levels[0]),
|
62
|
+
len(idx.levels[1]),
|
63
|
+
len(features.columns),
|
64
|
+
)
|
65
|
+
),
|
66
|
+
axis1=0,
|
67
|
+
axis2=1,
|
68
|
+
),
|
69
|
+
)
|
70
|
+
return pd.DataFrame(
|
71
|
+
self.model.predict(features_),
|
72
|
+
columns=self.parameter_names,
|
73
|
+
dtype=float,
|
74
|
+
)
|
75
|
+
|
76
|
+
|
77
|
+
@dataclass
|
78
|
+
class SteadyStateTrainer:
|
79
|
+
"""Trainer for steady state data using Keras models."""
|
80
|
+
|
81
|
+
features: pd.DataFrame
|
82
|
+
targets: pd.DataFrame
|
83
|
+
model: keras.Model
|
84
|
+
optimizer: Optimizer
|
85
|
+
losses: list[pd.Series]
|
86
|
+
loss_fn: LossFn
|
87
|
+
|
88
|
+
def __init__(
|
89
|
+
self,
|
90
|
+
features: pd.DataFrame,
|
91
|
+
targets: pd.DataFrame,
|
92
|
+
model: keras.Model | None = None,
|
93
|
+
optimizer: Optimizer = DefaultOptimizer,
|
94
|
+
loss: LossFn = DefaultLoss,
|
95
|
+
) -> None:
|
96
|
+
"""Initialize the trainer with features, targets, and model.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
features: DataFrame containing the input features for training
|
100
|
+
targets: DataFrame containing the target values for training
|
101
|
+
model: Predefined neural network model (None to use default MLP)
|
102
|
+
optimizer: Optimizer class to use for training (default: Adam)
|
103
|
+
loss: Loss function
|
104
|
+
|
105
|
+
"""
|
106
|
+
self.features = features
|
107
|
+
self.targets = targets
|
108
|
+
|
109
|
+
if model is None:
|
110
|
+
n_hidden = max(2 * len(features.columns) * len(targets.columns), 10)
|
111
|
+
n_outputs = len(targets.columns)
|
112
|
+
model = MLP(
|
113
|
+
n_inputs=len(features.columns),
|
114
|
+
neurons_per_layer=[n_hidden, n_hidden, n_outputs],
|
115
|
+
)
|
116
|
+
self.model = model
|
117
|
+
model.compile(optimizer=cast(str, optimizer), loss=loss)
|
118
|
+
|
119
|
+
self.loss_fn = loss
|
120
|
+
self.losses = []
|
121
|
+
|
122
|
+
def train(
|
123
|
+
self,
|
124
|
+
epochs: int,
|
125
|
+
batch_size: int | None = None,
|
126
|
+
) -> Self:
|
127
|
+
"""Train the model using the provided features and targets.
|
128
|
+
|
129
|
+
Args:
|
130
|
+
epochs: Number of training epochs
|
131
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
132
|
+
|
133
|
+
"""
|
134
|
+
losses = train(
|
135
|
+
model=self.model,
|
136
|
+
features=self.features,
|
137
|
+
targets=self.targets,
|
138
|
+
epochs=epochs,
|
139
|
+
batch_size=batch_size,
|
140
|
+
)
|
141
|
+
|
142
|
+
if len(self.losses) > 0:
|
143
|
+
losses.index += self.losses[-1].index[-1]
|
144
|
+
self.losses.append(losses)
|
145
|
+
return self
|
146
|
+
|
147
|
+
def get_loss(self) -> pd.Series:
|
148
|
+
"""Get the loss history of the training process."""
|
149
|
+
return pd.concat(self.losses)
|
150
|
+
|
151
|
+
def get_estimator(self) -> SteadyState:
|
152
|
+
"""Get the trained estimator."""
|
153
|
+
return SteadyState(
|
154
|
+
model=self.model,
|
155
|
+
parameter_names=list(self.targets.columns),
|
156
|
+
)
|
157
|
+
|
158
|
+
|
159
|
+
@dataclass
|
160
|
+
class TimeCourseTrainer:
|
161
|
+
"""Trainer for time course data using Keras models."""
|
162
|
+
|
163
|
+
features: pd.DataFrame
|
164
|
+
targets: pd.DataFrame
|
165
|
+
model: keras.Model
|
166
|
+
optimizer: Optimizer
|
167
|
+
|
168
|
+
losses: list[pd.Series]
|
169
|
+
loss_fn: LossFn
|
170
|
+
|
171
|
+
def __init__(
|
172
|
+
self,
|
173
|
+
features: pd.DataFrame,
|
174
|
+
targets: pd.DataFrame,
|
175
|
+
model: keras.Model | None = None,
|
176
|
+
optimizer: Optimizer = DefaultOptimizer,
|
177
|
+
loss: LossFn = DefaultLoss,
|
178
|
+
) -> None:
|
179
|
+
"""Initialize the trainer with features, targets, and model.
|
180
|
+
|
181
|
+
Args:
|
182
|
+
features: DataFrame containing the input features for training
|
183
|
+
targets: DataFrame containing the target values for training
|
184
|
+
model: Predefined neural network model (None to use default LSTM)
|
185
|
+
optimizer: Optimizer class to use for training
|
186
|
+
loss: Loss function
|
187
|
+
|
188
|
+
"""
|
189
|
+
self.features = features
|
190
|
+
self.targets = targets
|
191
|
+
|
192
|
+
if model is None:
|
193
|
+
model = LSTM(
|
194
|
+
n_inputs=len(features.columns),
|
195
|
+
n_outputs=len(targets.columns),
|
196
|
+
n_hidden=1,
|
197
|
+
)
|
198
|
+
self.model = model
|
199
|
+
model.compile(optimizer=cast(str, optimizer), loss=loss)
|
200
|
+
self.losses = []
|
201
|
+
|
202
|
+
def train(
|
203
|
+
self,
|
204
|
+
epochs: int,
|
205
|
+
batch_size: int | None = None,
|
206
|
+
) -> Self:
|
207
|
+
"""Train the model using the provided features and targets.
|
208
|
+
|
209
|
+
Args:
|
210
|
+
epochs: Number of training epochs
|
211
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
212
|
+
|
213
|
+
"""
|
214
|
+
losses = train(
|
215
|
+
model=self.model,
|
216
|
+
features=np.swapaxes(
|
217
|
+
self.features.to_numpy().reshape(
|
218
|
+
(len(self.targets), -1, len(self.features.columns))
|
219
|
+
),
|
220
|
+
axis1=0,
|
221
|
+
axis2=1,
|
222
|
+
),
|
223
|
+
targets=self.targets,
|
224
|
+
epochs=epochs,
|
225
|
+
batch_size=batch_size,
|
226
|
+
)
|
227
|
+
|
228
|
+
if len(self.losses) > 0:
|
229
|
+
losses.index += self.losses[-1].index[-1]
|
230
|
+
self.losses.append(losses)
|
231
|
+
return self
|
232
|
+
|
233
|
+
def get_loss(self) -> pd.Series:
|
234
|
+
"""Get the loss history of the training process."""
|
235
|
+
return pd.concat(self.losses)
|
236
|
+
|
237
|
+
def get_estimator(self) -> TimeCourse:
|
238
|
+
"""Get the trained estimator."""
|
239
|
+
return TimeCourse(
|
240
|
+
model=self.model,
|
241
|
+
parameter_names=list(self.targets.columns),
|
242
|
+
)
|
243
|
+
|
244
|
+
|
245
|
+
def train_steady_state(
|
246
|
+
features: pd.DataFrame,
|
247
|
+
targets: pd.DataFrame,
|
248
|
+
epochs: int,
|
249
|
+
batch_size: int | None = None,
|
250
|
+
model: keras.Model | None = None,
|
251
|
+
optimizer: Optimizer = DefaultOptimizer,
|
252
|
+
loss: LossFn = DefaultLoss,
|
253
|
+
) -> tuple[SteadyState, pd.Series]:
|
254
|
+
"""Train a Keras steady state estimator.
|
255
|
+
|
256
|
+
This function trains a neural network model to estimate steady state data
|
257
|
+
using the provided features and targets. It supports both full-batch and
|
258
|
+
mini-batch training.
|
259
|
+
|
260
|
+
Examples:
|
261
|
+
>>> train_keras_ss_estimator(features, targets, epochs=100)
|
262
|
+
|
263
|
+
Args:
|
264
|
+
features: DataFrame containing the input features for training
|
265
|
+
targets: DataFrame containing the target values for training
|
266
|
+
epochs: Number of training epochs
|
267
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
268
|
+
model: Predefined neural network model (None to use default MLP)
|
269
|
+
optimizer: Optimizer class to use for training (default: Adam)
|
270
|
+
loss: Loss function for the training
|
271
|
+
|
272
|
+
Returns:
|
273
|
+
tuple[KerasTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
|
274
|
+
|
275
|
+
"""
|
276
|
+
trainer = SteadyStateTrainer(
|
277
|
+
features=features,
|
278
|
+
targets=targets,
|
279
|
+
model=model,
|
280
|
+
optimizer=optimizer,
|
281
|
+
loss=loss,
|
282
|
+
).train(epochs=epochs, batch_size=batch_size)
|
283
|
+
|
284
|
+
return trainer.get_estimator(), trainer.get_loss()
|
285
|
+
|
286
|
+
|
287
|
+
def train_time_course(
|
288
|
+
features: pd.DataFrame,
|
289
|
+
targets: pd.DataFrame,
|
290
|
+
epochs: int,
|
291
|
+
batch_size: int | None = None,
|
292
|
+
model: keras.Model | None = None,
|
293
|
+
optimizer: Optimizer = DefaultOptimizer,
|
294
|
+
loss: LossFn = DefaultLoss,
|
295
|
+
) -> tuple[TimeCourse, pd.Series]:
|
296
|
+
"""Train a Keras time course estimator.
|
297
|
+
|
298
|
+
This function trains a neural network model to estimate time course data
|
299
|
+
using the provided features and targets. It supports both full-batch and
|
300
|
+
mini-batch training.
|
301
|
+
|
302
|
+
Examples:
|
303
|
+
>>> train_keras_time_course_estimator(features, targets, epochs=100)
|
304
|
+
|
305
|
+
Args:
|
306
|
+
features: DataFrame containing the input features for training
|
307
|
+
targets: DataFrame containing the target values for training
|
308
|
+
epochs: Number of training epochs
|
309
|
+
batch_size: Size of mini-batches for training (None for full-batch)
|
310
|
+
model: Predefined neural network model (None to use default LSTM)
|
311
|
+
optimizer: Optimizer class to use for training (default: Adam)
|
312
|
+
loss: Loss function for the training
|
313
|
+
|
314
|
+
Returns:
|
315
|
+
tuple[KerasTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
|
316
|
+
|
317
|
+
"""
|
318
|
+
trainer = TimeCourseTrainer(
|
319
|
+
features=features,
|
320
|
+
targets=targets,
|
321
|
+
model=model,
|
322
|
+
optimizer=optimizer,
|
323
|
+
loss=loss,
|
324
|
+
).train(epochs=epochs, batch_size=batch_size)
|
325
|
+
|
326
|
+
return trainer.get_estimator(), trainer.get_loss()
|