mxlpy 0.18.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mxlpy/__init__.py +14 -4
- mxlpy/experimental/diff.py +1 -1
- mxlpy/integrators/int_assimulo.py +10 -3
- mxlpy/integrators/int_scipy.py +7 -3
- mxlpy/label_map.py +3 -1
- mxlpy/meta/codegen_latex.py +1 -1
- mxlpy/meta/source_tools.py +1 -1
- mxlpy/model.py +146 -87
- mxlpy/nn/__init__.py +24 -5
- mxlpy/nn/_keras.py +85 -0
- mxlpy/nn/_torch.py +18 -17
- mxlpy/npe/__init__.py +21 -16
- mxlpy/npe/_keras.py +326 -0
- mxlpy/npe/_torch.py +56 -60
- mxlpy/plot.py +2 -2
- mxlpy/sbml/_export.py +8 -1
- mxlpy/surrogates/__init__.py +25 -17
- mxlpy/surrogates/_keras.py +137 -0
- mxlpy/surrogates/_poly.py +19 -8
- mxlpy/surrogates/_qss.py +31 -0
- mxlpy/surrogates/_torch.py +44 -30
- mxlpy/symbolic/symbolic_model.py +2 -2
- mxlpy/types.py +57 -111
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/METADATA +21 -22
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/RECORD +27 -24
- mxlpy/nn/_tensorflow.py +0 -0
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/WHEEL +0 -0
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/licenses/LICENSE +0 -0
mxlpy/npe/_torch.py
CHANGED
@@ -12,7 +12,6 @@ Functions:
|
|
12
12
|
from __future__ import annotations
|
13
13
|
|
14
14
|
from dataclasses import dataclass
|
15
|
-
from pathlib import Path
|
16
15
|
from typing import TYPE_CHECKING, Self, cast
|
17
16
|
|
18
17
|
import numpy as np
|
@@ -22,7 +21,6 @@ from torch import nn
|
|
22
21
|
from torch.optim.adam import Adam
|
23
22
|
|
24
23
|
from mxlpy.nn._torch import LSTM, MLP, DefaultDevice, train
|
25
|
-
from mxlpy.parallel import Cache
|
26
24
|
from mxlpy.types import AbstractEstimator
|
27
25
|
|
28
26
|
if TYPE_CHECKING:
|
@@ -30,19 +28,17 @@ if TYPE_CHECKING:
|
|
30
28
|
|
31
29
|
from torch.optim.optimizer import ParamsT
|
32
30
|
|
33
|
-
DefaultCache = Cache(Path(".cache"))
|
34
31
|
|
35
32
|
type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
|
36
33
|
|
37
34
|
__all__ = [
|
38
|
-
"DefaultCache",
|
39
35
|
"LossFn",
|
40
|
-
"
|
41
|
-
"
|
42
|
-
"
|
43
|
-
"
|
44
|
-
"
|
45
|
-
"
|
36
|
+
"SteadyState",
|
37
|
+
"SteadyStateTrainer",
|
38
|
+
"TimeCourse",
|
39
|
+
"TimeCourseTrainer",
|
40
|
+
"train_steady_state",
|
41
|
+
"train_time_course",
|
46
42
|
]
|
47
43
|
|
48
44
|
|
@@ -61,7 +57,7 @@ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
|
61
57
|
|
62
58
|
|
63
59
|
@dataclass(kw_only=True)
|
64
|
-
class
|
60
|
+
class SteadyState(AbstractEstimator):
|
65
61
|
"""Estimator for steady state data using PyTorch models."""
|
66
62
|
|
67
63
|
model: torch.nn.Module
|
@@ -74,7 +70,7 @@ class TorchSteadyState(AbstractEstimator):
|
|
74
70
|
|
75
71
|
|
76
72
|
@dataclass(kw_only=True)
|
77
|
-
class
|
73
|
+
class TimeCourse(AbstractEstimator):
|
78
74
|
"""Estimator for time course data using PyTorch models."""
|
79
75
|
|
80
76
|
model: torch.nn.Module
|
@@ -101,13 +97,13 @@ class TorchTimeCourse(AbstractEstimator):
|
|
101
97
|
|
102
98
|
|
103
99
|
@dataclass
|
104
|
-
class
|
100
|
+
class SteadyStateTrainer:
|
105
101
|
"""Trainer for steady state data using PyTorch models."""
|
106
102
|
|
107
103
|
features: pd.DataFrame
|
108
104
|
targets: pd.DataFrame
|
109
|
-
|
110
|
-
|
105
|
+
model: nn.Module
|
106
|
+
optimizer: Adam
|
111
107
|
device: torch.device
|
112
108
|
losses: list[pd.Series]
|
113
109
|
loss_fn: LossFn
|
@@ -116,8 +112,8 @@ class TorchSteadyStateTrainer:
|
|
116
112
|
self,
|
117
113
|
features: pd.DataFrame,
|
118
114
|
targets: pd.DataFrame,
|
119
|
-
|
120
|
-
|
115
|
+
model: nn.Module | None = None,
|
116
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
121
117
|
device: torch.device = DefaultDevice,
|
122
118
|
loss_fn: LossFn = _mean_abs,
|
123
119
|
) -> None:
|
@@ -126,8 +122,8 @@ class TorchSteadyStateTrainer:
|
|
126
122
|
Args:
|
127
123
|
features: DataFrame containing the input features for training
|
128
124
|
targets: DataFrame containing the target values for training
|
129
|
-
|
130
|
-
|
125
|
+
model: Predefined neural network model (None to use default MLP)
|
126
|
+
optimizer_cls: Optimizer class to use for training (default: Adam)
|
131
127
|
device: Device to run the training on (default: DefaultDevice)
|
132
128
|
loss_fn: Loss function
|
133
129
|
|
@@ -135,15 +131,15 @@ class TorchSteadyStateTrainer:
|
|
135
131
|
self.features = features
|
136
132
|
self.targets = targets
|
137
133
|
|
138
|
-
if
|
134
|
+
if model is None:
|
139
135
|
n_hidden = max(2 * len(features.columns) * len(targets.columns), 10)
|
140
136
|
n_outputs = len(targets.columns)
|
141
|
-
|
137
|
+
model = MLP(
|
142
138
|
n_inputs=len(features.columns),
|
143
139
|
neurons_per_layer=[n_hidden, n_hidden, n_outputs],
|
144
140
|
)
|
145
|
-
self.
|
146
|
-
self.optimizer =
|
141
|
+
self.model = model.to(device)
|
142
|
+
self.optimizer = optimizer_cls(model.parameters())
|
147
143
|
self.device = device
|
148
144
|
self.loss_fn = loss_fn
|
149
145
|
self.losses = []
|
@@ -161,7 +157,7 @@ class TorchSteadyStateTrainer:
|
|
161
157
|
|
162
158
|
"""
|
163
159
|
losses = train(
|
164
|
-
|
160
|
+
model=self.model,
|
165
161
|
features=self.features.to_numpy(),
|
166
162
|
targets=self.targets.to_numpy(),
|
167
163
|
epochs=epochs,
|
@@ -180,22 +176,22 @@ class TorchSteadyStateTrainer:
|
|
180
176
|
"""Get the loss history of the training process."""
|
181
177
|
return pd.concat(self.losses)
|
182
178
|
|
183
|
-
def get_estimator(self) ->
|
179
|
+
def get_estimator(self) -> SteadyState:
|
184
180
|
"""Get the trained estimator."""
|
185
|
-
return
|
186
|
-
model=self.
|
181
|
+
return SteadyState(
|
182
|
+
model=self.model,
|
187
183
|
parameter_names=list(self.targets.columns),
|
188
184
|
)
|
189
185
|
|
190
186
|
|
191
187
|
@dataclass
|
192
|
-
class
|
188
|
+
class TimeCourseTrainer:
|
193
189
|
"""Trainer for time course data using PyTorch models."""
|
194
190
|
|
195
191
|
features: pd.DataFrame
|
196
192
|
targets: pd.DataFrame
|
197
|
-
|
198
|
-
|
193
|
+
model: nn.Module
|
194
|
+
optimizer: Adam
|
199
195
|
device: torch.device
|
200
196
|
losses: list[pd.Series]
|
201
197
|
loss_fn: LossFn
|
@@ -204,8 +200,8 @@ class TorchTimeCourseTrainer:
|
|
204
200
|
self,
|
205
201
|
features: pd.DataFrame,
|
206
202
|
targets: pd.DataFrame,
|
207
|
-
|
208
|
-
|
203
|
+
model: nn.Module | None = None,
|
204
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
209
205
|
device: torch.device = DefaultDevice,
|
210
206
|
loss_fn: LossFn = _mean_abs,
|
211
207
|
) -> None:
|
@@ -214,8 +210,8 @@ class TorchTimeCourseTrainer:
|
|
214
210
|
Args:
|
215
211
|
features: DataFrame containing the input features for training
|
216
212
|
targets: DataFrame containing the target values for training
|
217
|
-
|
218
|
-
|
213
|
+
model: Predefined neural network model (None to use default LSTM)
|
214
|
+
optimizer_cls: Optimizer class to use for training (default: Adam)
|
219
215
|
device: Device to run the training on (default: DefaultDevice)
|
220
216
|
loss_fn: Loss function
|
221
217
|
|
@@ -223,14 +219,14 @@ class TorchTimeCourseTrainer:
|
|
223
219
|
self.features = features
|
224
220
|
self.targets = targets
|
225
221
|
|
226
|
-
if
|
227
|
-
|
222
|
+
if model is None:
|
223
|
+
model = LSTM(
|
228
224
|
n_inputs=len(features.columns),
|
229
225
|
n_outputs=len(targets.columns),
|
230
226
|
n_hidden=1,
|
231
227
|
).to(device)
|
232
|
-
self.
|
233
|
-
self.optimizer =
|
228
|
+
self.model = model.to(device)
|
229
|
+
self.optimizer = optimizer_cls(model.parameters())
|
234
230
|
self.device = device
|
235
231
|
self.loss_fn = loss_fn
|
236
232
|
self.losses = []
|
@@ -248,7 +244,7 @@ class TorchTimeCourseTrainer:
|
|
248
244
|
|
249
245
|
"""
|
250
246
|
losses = train(
|
251
|
-
|
247
|
+
model=self.model,
|
252
248
|
features=np.swapaxes(
|
253
249
|
self.features.to_numpy().reshape(
|
254
250
|
(len(self.targets), -1, len(self.features.columns))
|
@@ -273,23 +269,23 @@ class TorchTimeCourseTrainer:
|
|
273
269
|
"""Get the loss history of the training process."""
|
274
270
|
return pd.concat(self.losses)
|
275
271
|
|
276
|
-
def get_estimator(self) ->
|
272
|
+
def get_estimator(self) -> TimeCourse:
|
277
273
|
"""Get the trained estimator."""
|
278
|
-
return
|
279
|
-
model=self.
|
274
|
+
return TimeCourse(
|
275
|
+
model=self.model,
|
280
276
|
parameter_names=list(self.targets.columns),
|
281
277
|
)
|
282
278
|
|
283
279
|
|
284
|
-
def
|
280
|
+
def train_steady_state(
|
285
281
|
features: pd.DataFrame,
|
286
282
|
targets: pd.DataFrame,
|
287
283
|
epochs: int,
|
288
284
|
batch_size: int | None = None,
|
289
|
-
|
290
|
-
|
285
|
+
model: nn.Module | None = None,
|
286
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
291
287
|
device: torch.device = DefaultDevice,
|
292
|
-
) -> tuple[
|
288
|
+
) -> tuple[SteadyState, pd.Series]:
|
293
289
|
"""Train a PyTorch steady state estimator.
|
294
290
|
|
295
291
|
This function trains a neural network model to estimate steady state data
|
@@ -304,34 +300,34 @@ def train_torch_steady_state(
|
|
304
300
|
targets: DataFrame containing the target values for training
|
305
301
|
epochs: Number of training epochs
|
306
302
|
batch_size: Size of mini-batches for training (None for full-batch)
|
307
|
-
|
308
|
-
|
303
|
+
model: Predefined neural network model (None to use default MLP)
|
304
|
+
optimizer_cls: Optimizer class to use for training (default: Adam)
|
309
305
|
device: Device to run the training on (default: DefaultDevice)
|
310
306
|
|
311
307
|
Returns:
|
312
308
|
tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
|
313
309
|
|
314
310
|
"""
|
315
|
-
trainer =
|
311
|
+
trainer = SteadyStateTrainer(
|
316
312
|
features=features,
|
317
313
|
targets=targets,
|
318
|
-
|
319
|
-
|
314
|
+
model=model,
|
315
|
+
optimizer_cls=optimizer_cls,
|
320
316
|
device=device,
|
321
317
|
).train(epochs=epochs, batch_size=batch_size)
|
322
318
|
|
323
319
|
return trainer.get_estimator(), trainer.get_loss()
|
324
320
|
|
325
321
|
|
326
|
-
def
|
322
|
+
def train_time_course(
|
327
323
|
features: pd.DataFrame,
|
328
324
|
targets: pd.DataFrame,
|
329
325
|
epochs: int,
|
330
326
|
batch_size: int | None = None,
|
331
|
-
|
332
|
-
|
327
|
+
model: nn.Module | None = None,
|
328
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
333
329
|
device: torch.device = DefaultDevice,
|
334
|
-
) -> tuple[
|
330
|
+
) -> tuple[TimeCourse, pd.Series]:
|
335
331
|
"""Train a PyTorch time course estimator.
|
336
332
|
|
337
333
|
This function trains a neural network model to estimate time course data
|
@@ -346,19 +342,19 @@ def train_torch_time_course(
|
|
346
342
|
targets: DataFrame containing the target values for training
|
347
343
|
epochs: Number of training epochs
|
348
344
|
batch_size: Size of mini-batches for training (None for full-batch)
|
349
|
-
|
350
|
-
|
345
|
+
model: Predefined neural network model (None to use default LSTM)
|
346
|
+
optimizer_cls: Optimizer class to use for training (default: Adam)
|
351
347
|
device: Device to run the training on (default: DefaultDevice)
|
352
348
|
|
353
349
|
Returns:
|
354
350
|
tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
|
355
351
|
|
356
352
|
"""
|
357
|
-
trainer =
|
353
|
+
trainer = TimeCourseTrainer(
|
358
354
|
features=features,
|
359
355
|
targets=targets,
|
360
|
-
|
361
|
-
|
356
|
+
model=model,
|
357
|
+
optimizer_cls=optimizer_cls,
|
362
358
|
device=device,
|
363
359
|
).train(epochs=epochs, batch_size=batch_size)
|
364
360
|
|
mxlpy/plot.py
CHANGED
@@ -712,11 +712,11 @@ def _create_heatmap(
|
|
712
712
|
if title is not None:
|
713
713
|
ax.set_title(title)
|
714
714
|
ax.set_xticks(
|
715
|
-
np.arange(0, len(df.columns), 1) + 0.5,
|
715
|
+
np.arange(0, len(df.columns), 1, dtype=float) + 0.5,
|
716
716
|
labels=xticklabels,
|
717
717
|
)
|
718
718
|
ax.set_yticks(
|
719
|
-
np.arange(0, len(df.index), 1) + 0.5,
|
719
|
+
np.arange(0, len(df.index), 1, dtype=float) + 0.5,
|
720
720
|
labels=yticklabels,
|
721
721
|
)
|
722
722
|
|
mxlpy/sbml/_export.py
CHANGED
@@ -447,7 +447,14 @@ def _create_sbml_variables(
|
|
447
447
|
cpd.setConstant(False)
|
448
448
|
cpd.setBoundaryCondition(False)
|
449
449
|
cpd.setHasOnlySubstanceUnits(False)
|
450
|
-
|
450
|
+
if isinstance(value, Derived):
|
451
|
+
ar = sbml_model.createInitialAssignment()
|
452
|
+
ar.setId(_convert_id_to_sbml(id_=name, prefix="IA"))
|
453
|
+
ar.setName(_convert_id_to_sbml(id_=name, prefix="IA"))
|
454
|
+
ar.setVariable(_convert_id_to_sbml(id_=name, prefix="IA"))
|
455
|
+
ar.setMath(_sbmlify_fn(value.fn, value.args))
|
456
|
+
else:
|
457
|
+
cpd.setInitialAmount(float(value))
|
451
458
|
|
452
459
|
|
453
460
|
def _create_sbml_derived_variables(*, model: Model, sbml_model: libsbml.Model) -> None:
|
mxlpy/surrogates/__init__.py
CHANGED
@@ -4,29 +4,37 @@ This module provides classes and functions for creating and training surrogate m
|
|
4
4
|
for metabolic simulations. It includes functionality for both steady-state and time-series
|
5
5
|
data using neural networks.
|
6
6
|
|
7
|
-
Classes:
|
8
|
-
AbstractSurrogate: Abstract base class for surrogate models.
|
9
|
-
TorchSurrogate: Surrogate model using PyTorch.
|
10
|
-
Approximator: Neural network approximator for surrogate modeling.
|
11
|
-
|
12
|
-
Functions:
|
13
|
-
train_torch_surrogate: Train a PyTorch surrogate model.
|
14
|
-
train_torch_time_course_estimator: Train a PyTorch time course estimator.
|
15
7
|
"""
|
16
8
|
|
17
9
|
from __future__ import annotations
|
18
10
|
|
19
|
-
import
|
11
|
+
from typing import TYPE_CHECKING
|
20
12
|
|
21
|
-
|
22
|
-
|
13
|
+
if TYPE_CHECKING:
|
14
|
+
import contextlib
|
23
15
|
|
24
|
-
|
16
|
+
with contextlib.suppress(ImportError):
|
17
|
+
from . import _keras as keras
|
18
|
+
from . import _torch as torch
|
19
|
+
else:
|
20
|
+
from lazy_import import lazy_module
|
21
|
+
|
22
|
+
keras = lazy_module(
|
23
|
+
"mxlpy.surrogates._keras",
|
24
|
+
error_strings={"module": "keras", "install_name": "mxlpy[tf]"},
|
25
|
+
)
|
26
|
+
torch = lazy_module(
|
27
|
+
"mxlpy.surrogates._torch",
|
28
|
+
error_strings={"module": "torch", "install_name": "mxlpy[torch]"},
|
29
|
+
)
|
30
|
+
|
31
|
+
|
32
|
+
from . import _poly as poly
|
33
|
+
from . import _qss as qss
|
25
34
|
|
26
35
|
__all__ = [
|
27
|
-
"
|
28
|
-
"
|
29
|
-
"
|
30
|
-
"
|
31
|
-
"train_torch",
|
36
|
+
"keras",
|
37
|
+
"poly",
|
38
|
+
"qss",
|
39
|
+
"torch",
|
32
40
|
]
|
@@ -0,0 +1,137 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Self, cast
|
3
|
+
|
4
|
+
import keras
|
5
|
+
import numpy as np
|
6
|
+
import pandas as pd
|
7
|
+
|
8
|
+
from mxlpy.nn._keras import MLP
|
9
|
+
from mxlpy.nn._keras import train as _train
|
10
|
+
from mxlpy.types import AbstractSurrogate, Array, Derived
|
11
|
+
|
12
|
+
__all__ = [
|
13
|
+
"DefaultLoss",
|
14
|
+
"DefaultOptimizer",
|
15
|
+
"LossFn",
|
16
|
+
"Optimizer",
|
17
|
+
"Surrogate",
|
18
|
+
"Trainer",
|
19
|
+
"train",
|
20
|
+
]
|
21
|
+
|
22
|
+
type Optimizer = keras.optimizers.Optimizer | str
|
23
|
+
type LossFn = keras.losses.Loss | str
|
24
|
+
|
25
|
+
DefaultOptimizer = keras.optimizers.Adam()
|
26
|
+
DefaultLoss = keras.losses.MeanAbsoluteError()
|
27
|
+
|
28
|
+
|
29
|
+
@dataclass(kw_only=True)
|
30
|
+
class Surrogate(AbstractSurrogate):
|
31
|
+
model: keras.Model
|
32
|
+
|
33
|
+
def predict_raw(self, y: Array) -> Array:
|
34
|
+
return np.atleast_1d(np.squeeze(self.model.predict(y)))
|
35
|
+
|
36
|
+
def predict(
|
37
|
+
self, args: dict[str, float | pd.Series | pd.DataFrame]
|
38
|
+
) -> dict[str, float]:
|
39
|
+
return dict(
|
40
|
+
zip(
|
41
|
+
self.outputs,
|
42
|
+
self.predict_raw(np.array([args[arg] for arg in self.args])),
|
43
|
+
strict=True,
|
44
|
+
)
|
45
|
+
)
|
46
|
+
|
47
|
+
|
48
|
+
@dataclass(init=False)
|
49
|
+
class Trainer:
|
50
|
+
features: pd.DataFrame
|
51
|
+
targets: pd.DataFrame
|
52
|
+
model: keras.Model
|
53
|
+
optimizer: Optimizer | str
|
54
|
+
losses: list[pd.Series]
|
55
|
+
loss_fn: LossFn
|
56
|
+
|
57
|
+
def __init__(
|
58
|
+
self,
|
59
|
+
features: pd.DataFrame,
|
60
|
+
targets: pd.DataFrame,
|
61
|
+
model: keras.Model | None = None,
|
62
|
+
optimizer: Optimizer = DefaultOptimizer,
|
63
|
+
loss: LossFn = DefaultLoss,
|
64
|
+
) -> None:
|
65
|
+
self.features = features
|
66
|
+
self.targets = targets
|
67
|
+
if model is None:
|
68
|
+
model = MLP(
|
69
|
+
n_inputs=len(features.columns),
|
70
|
+
neurons_per_layer=[50, 50, len(targets.columns)],
|
71
|
+
)
|
72
|
+
self.model = model
|
73
|
+
model.compile(optimizer=cast(str, optimizer), loss=loss)
|
74
|
+
|
75
|
+
self.losses = []
|
76
|
+
|
77
|
+
def train(self, epochs: int, batch_size: int | None = None) -> Self:
|
78
|
+
losses = _train(
|
79
|
+
model=self.model,
|
80
|
+
features=self.features,
|
81
|
+
targets=self.targets,
|
82
|
+
epochs=epochs,
|
83
|
+
batch_size=batch_size,
|
84
|
+
)
|
85
|
+
|
86
|
+
if len(self.losses) > 0:
|
87
|
+
losses.index += self.losses[-1].index[-1]
|
88
|
+
self.losses.append(losses)
|
89
|
+
|
90
|
+
return self
|
91
|
+
|
92
|
+
def get_loss(self) -> pd.Series:
|
93
|
+
return pd.concat(self.losses)
|
94
|
+
|
95
|
+
def get_surrogate(
|
96
|
+
self,
|
97
|
+
surrogate_args: list[str] | None = None,
|
98
|
+
surrogate_outputs: list[str] | None = None,
|
99
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
100
|
+
) -> Surrogate:
|
101
|
+
return Surrogate(
|
102
|
+
model=self.model,
|
103
|
+
args=surrogate_args if surrogate_args is not None else [],
|
104
|
+
outputs=surrogate_outputs if surrogate_outputs is not None else [],
|
105
|
+
stoichiometries=surrogate_stoichiometries
|
106
|
+
if surrogate_stoichiometries is not None
|
107
|
+
else {},
|
108
|
+
)
|
109
|
+
|
110
|
+
|
111
|
+
def train(
|
112
|
+
features: pd.DataFrame,
|
113
|
+
targets: pd.DataFrame,
|
114
|
+
epochs: int,
|
115
|
+
surrogate_args: list[str] | None = None,
|
116
|
+
surrogate_outputs: list[str] | None = None,
|
117
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
118
|
+
batch_size: int | None = None,
|
119
|
+
model: keras.Model | None = None,
|
120
|
+
optimizer: Optimizer = DefaultOptimizer,
|
121
|
+
loss: LossFn = DefaultLoss,
|
122
|
+
) -> tuple[Surrogate, pd.Series]:
|
123
|
+
trainer = Trainer(
|
124
|
+
features=features,
|
125
|
+
targets=targets,
|
126
|
+
model=model,
|
127
|
+
optimizer=optimizer,
|
128
|
+
loss=loss,
|
129
|
+
).train(
|
130
|
+
epochs=epochs,
|
131
|
+
batch_size=batch_size,
|
132
|
+
)
|
133
|
+
return trainer.get_surrogate(
|
134
|
+
surrogate_args=surrogate_args,
|
135
|
+
surrogate_outputs=surrogate_outputs,
|
136
|
+
surrogate_stoichiometries=surrogate_stoichiometries,
|
137
|
+
), trainer.get_loss()
|
mxlpy/surrogates/_poly.py
CHANGED
@@ -6,12 +6,12 @@ import numpy as np
|
|
6
6
|
import pandas as pd
|
7
7
|
from numpy import polynomial
|
8
8
|
|
9
|
-
from mxlpy.types import AbstractSurrogate, ArrayLike
|
9
|
+
from mxlpy.types import AbstractSurrogate, ArrayLike, Derived
|
10
10
|
|
11
11
|
__all__ = [
|
12
|
-
"Polynomial",
|
13
12
|
"PolynomialExpansion",
|
14
|
-
"
|
13
|
+
"Surrogate",
|
14
|
+
"train",
|
15
15
|
]
|
16
16
|
|
17
17
|
# define custom type
|
@@ -26,14 +26,25 @@ PolynomialExpansion = (
|
|
26
26
|
|
27
27
|
|
28
28
|
@dataclass(kw_only=True)
|
29
|
-
class
|
29
|
+
class Surrogate(AbstractSurrogate):
|
30
30
|
model: PolynomialExpansion
|
31
31
|
|
32
32
|
def predict_raw(self, y: np.ndarray) -> np.ndarray:
|
33
33
|
return self.model(y)
|
34
34
|
|
35
|
+
def predict(
|
36
|
+
self, args: dict[str, float | pd.Series | pd.DataFrame]
|
37
|
+
) -> dict[str, float]:
|
38
|
+
return dict(
|
39
|
+
zip(
|
40
|
+
self.outputs,
|
41
|
+
self.model(np.array([args[arg] for arg in self.args])),
|
42
|
+
strict=True,
|
43
|
+
)
|
44
|
+
)
|
35
45
|
|
36
|
-
|
46
|
+
|
47
|
+
def train(
|
37
48
|
feature: ArrayLike | pd.Series,
|
38
49
|
target: ArrayLike | pd.Series,
|
39
50
|
series: Literal[
|
@@ -42,8 +53,8 @@ def train_polynomial(
|
|
42
53
|
degrees: Iterable[int] = (1, 2, 3, 4, 5, 6, 7),
|
43
54
|
surrogate_args: list[str] | None = None,
|
44
55
|
surrogate_outputs: list[str] | None = None,
|
45
|
-
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
46
|
-
) -> tuple[
|
56
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
57
|
+
) -> tuple[Surrogate, pd.DataFrame]:
|
47
58
|
"""Train a surrogate model based on function series expansion.
|
48
59
|
|
49
60
|
Args:
|
@@ -85,7 +96,7 @@ def train_polynomial(
|
|
85
96
|
# Choose the model with the lowest AIC
|
86
97
|
model = models[np.argmin(score)]
|
87
98
|
return (
|
88
|
-
|
99
|
+
Surrogate(
|
89
100
|
model=model,
|
90
101
|
args=surrogate_args if surrogate_args is not None else [],
|
91
102
|
outputs=surrogate_outputs if surrogate_outputs is not None else [],
|
mxlpy/surrogates/_qss.py
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from collections.abc import Callable, Iterable
|
4
|
+
from dataclasses import dataclass
|
5
|
+
from typing import TYPE_CHECKING
|
6
|
+
|
7
|
+
from mxlpy.types import AbstractSurrogate, Array
|
8
|
+
|
9
|
+
if TYPE_CHECKING:
|
10
|
+
import pandas as pd
|
11
|
+
|
12
|
+
__all__ = ["QSSFn", "Surrogate"]
|
13
|
+
|
14
|
+
type QSSFn = Callable[..., Iterable[float] | Array]
|
15
|
+
|
16
|
+
|
17
|
+
@dataclass(kw_only=True)
|
18
|
+
class Surrogate(AbstractSurrogate):
|
19
|
+
model: QSSFn
|
20
|
+
|
21
|
+
def predict(
|
22
|
+
self,
|
23
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
24
|
+
) -> dict[str, float]:
|
25
|
+
return dict(
|
26
|
+
zip(
|
27
|
+
self.outputs,
|
28
|
+
self.model(*(args[arg] for arg in self.args)),
|
29
|
+
strict=True,
|
30
|
+
)
|
31
|
+
)
|