mxlpy 0.17.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mxlpy/__init__.py +14 -4
- mxlpy/experimental/diff.py +1 -1
- mxlpy/fit.py +173 -7
- mxlpy/identify.py +7 -1
- mxlpy/integrators/int_assimulo.py +10 -3
- mxlpy/integrators/int_scipy.py +7 -3
- mxlpy/label_map.py +3 -1
- mxlpy/meta/codegen_latex.py +1 -1
- mxlpy/meta/source_tools.py +1 -1
- mxlpy/model.py +146 -87
- mxlpy/nn/__init__.py +24 -5
- mxlpy/nn/_keras.py +85 -0
- mxlpy/nn/_torch.py +76 -15
- mxlpy/npe/__init__.py +21 -16
- mxlpy/npe/_keras.py +326 -0
- mxlpy/npe/_torch.py +73 -148
- mxlpy/plot.py +196 -52
- mxlpy/sbml/_export.py +8 -1
- mxlpy/surrogates/__init__.py +25 -17
- mxlpy/surrogates/_keras.py +137 -0
- mxlpy/surrogates/_poly.py +19 -8
- mxlpy/surrogates/_qss.py +31 -0
- mxlpy/surrogates/_torch.py +51 -127
- mxlpy/symbolic/symbolic_model.py +2 -2
- mxlpy/types.py +57 -114
- {mxlpy-0.17.0.dist-info → mxlpy-0.19.0.dist-info}/METADATA +27 -28
- mxlpy-0.19.0.dist-info/RECORD +54 -0
- mxlpy-0.19.0.dist-info/licenses/LICENSE +21 -0
- mxlpy/nn/_tensorflow.py +0 -0
- mxlpy-0.17.0.dist-info/RECORD +0 -51
- mxlpy-0.17.0.dist-info/licenses/LICENSE +0 -674
- {mxlpy-0.17.0.dist-info → mxlpy-0.19.0.dist-info}/WHEEL +0 -0
mxlpy/surrogates/_poly.py
CHANGED
@@ -6,12 +6,12 @@ import numpy as np
|
|
6
6
|
import pandas as pd
|
7
7
|
from numpy import polynomial
|
8
8
|
|
9
|
-
from mxlpy.types import AbstractSurrogate, ArrayLike
|
9
|
+
from mxlpy.types import AbstractSurrogate, ArrayLike, Derived
|
10
10
|
|
11
11
|
__all__ = [
|
12
|
-
"Polynomial",
|
13
12
|
"PolynomialExpansion",
|
14
|
-
"
|
13
|
+
"Surrogate",
|
14
|
+
"train",
|
15
15
|
]
|
16
16
|
|
17
17
|
# define custom type
|
@@ -26,14 +26,25 @@ PolynomialExpansion = (
|
|
26
26
|
|
27
27
|
|
28
28
|
@dataclass(kw_only=True)
|
29
|
-
class
|
29
|
+
class Surrogate(AbstractSurrogate):
|
30
30
|
model: PolynomialExpansion
|
31
31
|
|
32
32
|
def predict_raw(self, y: np.ndarray) -> np.ndarray:
|
33
33
|
return self.model(y)
|
34
34
|
|
35
|
+
def predict(
|
36
|
+
self, args: dict[str, float | pd.Series | pd.DataFrame]
|
37
|
+
) -> dict[str, float]:
|
38
|
+
return dict(
|
39
|
+
zip(
|
40
|
+
self.outputs,
|
41
|
+
self.model(np.array([args[arg] for arg in self.args])),
|
42
|
+
strict=True,
|
43
|
+
)
|
44
|
+
)
|
35
45
|
|
36
|
-
|
46
|
+
|
47
|
+
def train(
|
37
48
|
feature: ArrayLike | pd.Series,
|
38
49
|
target: ArrayLike | pd.Series,
|
39
50
|
series: Literal[
|
@@ -42,8 +53,8 @@ def train_polynomial(
|
|
42
53
|
degrees: Iterable[int] = (1, 2, 3, 4, 5, 6, 7),
|
43
54
|
surrogate_args: list[str] | None = None,
|
44
55
|
surrogate_outputs: list[str] | None = None,
|
45
|
-
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
46
|
-
) -> tuple[
|
56
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
57
|
+
) -> tuple[Surrogate, pd.DataFrame]:
|
47
58
|
"""Train a surrogate model based on function series expansion.
|
48
59
|
|
49
60
|
Args:
|
@@ -85,7 +96,7 @@ def train_polynomial(
|
|
85
96
|
# Choose the model with the lowest AIC
|
86
97
|
model = models[np.argmin(score)]
|
87
98
|
return (
|
88
|
-
|
99
|
+
Surrogate(
|
89
100
|
model=model,
|
90
101
|
args=surrogate_args if surrogate_args is not None else [],
|
91
102
|
outputs=surrogate_outputs if surrogate_outputs is not None else [],
|
mxlpy/surrogates/_qss.py
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
from collections.abc import Callable, Iterable
|
4
|
+
from dataclasses import dataclass
|
5
|
+
from typing import TYPE_CHECKING
|
6
|
+
|
7
|
+
from mxlpy.types import AbstractSurrogate, Array
|
8
|
+
|
9
|
+
if TYPE_CHECKING:
|
10
|
+
import pandas as pd
|
11
|
+
|
12
|
+
__all__ = ["QSSFn", "Surrogate"]
|
13
|
+
|
14
|
+
type QSSFn = Callable[..., Iterable[float] | Array]
|
15
|
+
|
16
|
+
|
17
|
+
@dataclass(kw_only=True)
|
18
|
+
class Surrogate(AbstractSurrogate):
|
19
|
+
model: QSSFn
|
20
|
+
|
21
|
+
def predict(
|
22
|
+
self,
|
23
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
24
|
+
) -> dict[str, float]:
|
25
|
+
return dict(
|
26
|
+
zip(
|
27
|
+
self.outputs,
|
28
|
+
self.model(*(args[arg] for arg in self.args)),
|
29
|
+
strict=True,
|
30
|
+
)
|
31
|
+
)
|
mxlpy/surrogates/_torch.py
CHANGED
@@ -5,21 +5,21 @@ from typing import Self
|
|
5
5
|
import numpy as np
|
6
6
|
import pandas as pd
|
7
7
|
import torch
|
8
|
-
import tqdm
|
9
8
|
from torch import nn
|
10
9
|
from torch.optim.adam import Adam
|
11
10
|
from torch.optim.optimizer import ParamsT
|
12
11
|
|
13
12
|
from mxlpy.nn._torch import MLP, DefaultDevice
|
14
|
-
from mxlpy.
|
13
|
+
from mxlpy.nn._torch import train as _train
|
14
|
+
from mxlpy.types import AbstractSurrogate, Derived
|
15
15
|
|
16
16
|
type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
|
17
17
|
|
18
18
|
__all__ = [
|
19
19
|
"LossFn",
|
20
|
-
"
|
21
|
-
"
|
22
|
-
"
|
20
|
+
"Surrogate",
|
21
|
+
"Trainer",
|
22
|
+
"train",
|
23
23
|
]
|
24
24
|
|
25
25
|
|
@@ -38,7 +38,7 @@ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
|
38
38
|
|
39
39
|
|
40
40
|
@dataclass(kw_only=True)
|
41
|
-
class
|
41
|
+
class Surrogate(AbstractSurrogate):
|
42
42
|
"""Surrogate model using PyTorch.
|
43
43
|
|
44
44
|
Attributes:
|
@@ -66,12 +66,25 @@ class Torch(AbstractSurrogate):
|
|
66
66
|
torch.tensor(y, dtype=torch.float32),
|
67
67
|
).numpy()
|
68
68
|
|
69
|
+
def predict(
|
70
|
+
self,
|
71
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
72
|
+
) -> dict[str, float]:
|
73
|
+
"""Predict outputs based on input data."""
|
74
|
+
return dict(
|
75
|
+
zip(
|
76
|
+
self.outputs,
|
77
|
+
self.predict_raw(np.array([args[arg] for arg in self.args])),
|
78
|
+
strict=True,
|
79
|
+
)
|
80
|
+
)
|
81
|
+
|
69
82
|
|
70
83
|
@dataclass(init=False)
|
71
|
-
class
|
84
|
+
class Trainer:
|
72
85
|
features: pd.DataFrame
|
73
86
|
targets: pd.DataFrame
|
74
|
-
|
87
|
+
model: nn.Module
|
75
88
|
optimizer: Adam
|
76
89
|
device: torch.device
|
77
90
|
losses: list[pd.Series]
|
@@ -81,22 +94,22 @@ class TorchTrainer:
|
|
81
94
|
self,
|
82
95
|
features: pd.DataFrame,
|
83
96
|
targets: pd.DataFrame,
|
84
|
-
|
85
|
-
|
97
|
+
model: nn.Module | None = None,
|
98
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
86
99
|
device: torch.device = DefaultDevice,
|
87
100
|
loss_fn: LossFn = _mean_abs,
|
88
101
|
) -> None:
|
89
102
|
self.features = features
|
90
103
|
self.targets = targets
|
91
104
|
|
92
|
-
if
|
93
|
-
|
105
|
+
if model is None:
|
106
|
+
model = MLP(
|
94
107
|
n_inputs=len(features.columns),
|
95
108
|
neurons_per_layer=[50, 50, len(targets.columns)],
|
96
109
|
)
|
97
|
-
self.
|
110
|
+
self.model = model.to(device)
|
98
111
|
|
99
|
-
self.optimizer =
|
112
|
+
self.optimizer = optimizer_cls(model.parameters())
|
100
113
|
self.device = device
|
101
114
|
self.loss_fn = loss_fn
|
102
115
|
self.losses = []
|
@@ -106,28 +119,16 @@ class TorchTrainer:
|
|
106
119
|
epochs: int,
|
107
120
|
batch_size: int | None = None,
|
108
121
|
) -> Self:
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
else:
|
120
|
-
losses = _train_batched(
|
121
|
-
aprox=self.approximator,
|
122
|
-
features=self.features,
|
123
|
-
targets=self.targets,
|
124
|
-
epochs=epochs,
|
125
|
-
optimizer=self.optimizer,
|
126
|
-
device=self.device,
|
127
|
-
batch_size=batch_size,
|
128
|
-
loss_fn=self.loss_fn,
|
129
|
-
)
|
130
|
-
|
122
|
+
losses = _train(
|
123
|
+
model=self.model,
|
124
|
+
features=self.features.to_numpy(),
|
125
|
+
targets=self.targets.to_numpy(),
|
126
|
+
epochs=epochs,
|
127
|
+
optimizer=self.optimizer,
|
128
|
+
batch_size=batch_size,
|
129
|
+
device=self.device,
|
130
|
+
loss_fn=self.loss_fn,
|
131
|
+
)
|
131
132
|
if len(self.losses) > 0:
|
132
133
|
losses.index += self.losses[-1].index[-1]
|
133
134
|
self.losses.append(losses)
|
@@ -140,10 +141,10 @@ class TorchTrainer:
|
|
140
141
|
self,
|
141
142
|
surrogate_args: list[str] | None = None,
|
142
143
|
surrogate_outputs: list[str] | None = None,
|
143
|
-
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
144
|
-
) ->
|
145
|
-
return
|
146
|
-
model=self.
|
144
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
145
|
+
) -> Surrogate:
|
146
|
+
return Surrogate(
|
147
|
+
model=self.model,
|
147
148
|
args=surrogate_args if surrogate_args is not None else [],
|
148
149
|
outputs=surrogate_outputs if surrogate_outputs is not None else [],
|
149
150
|
stoichiometries=surrogate_stoichiometries
|
@@ -152,96 +153,19 @@ class TorchTrainer:
|
|
152
153
|
)
|
153
154
|
|
154
155
|
|
155
|
-
def
|
156
|
-
aprox: nn.Module,
|
157
|
-
features: pd.DataFrame,
|
158
|
-
targets: pd.DataFrame,
|
159
|
-
epochs: int,
|
160
|
-
optimizer: Adam,
|
161
|
-
device: torch.device,
|
162
|
-
batch_size: int,
|
163
|
-
loss_fn: LossFn,
|
164
|
-
) -> pd.Series:
|
165
|
-
"""Train the neural network using mini-batch gradient descent.
|
166
|
-
|
167
|
-
Args:
|
168
|
-
aprox: Neural network model to train.
|
169
|
-
features: Input features as a tensor.
|
170
|
-
targets: Target values as a tensor.
|
171
|
-
epochs: Number of training epochs.
|
172
|
-
optimizer: Optimizer for training.
|
173
|
-
device: torch device
|
174
|
-
batch_size: Size of mini-batches for training.
|
175
|
-
loss_fn: Loss function
|
176
|
-
|
177
|
-
Returns:
|
178
|
-
pd.Series: Series containing the training loss history.
|
179
|
-
|
180
|
-
"""
|
181
|
-
rng = np.random.default_rng()
|
182
|
-
losses = {}
|
183
|
-
for i in tqdm.trange(epochs):
|
184
|
-
idxs = rng.choice(features.index, size=batch_size)
|
185
|
-
X = torch.Tensor(features.iloc[idxs].to_numpy(), device=device)
|
186
|
-
Y = torch.Tensor(targets.iloc[idxs].to_numpy(), device=device)
|
187
|
-
optimizer.zero_grad()
|
188
|
-
loss = loss_fn(aprox(X), Y)
|
189
|
-
loss.backward()
|
190
|
-
optimizer.step()
|
191
|
-
losses[i] = loss.detach().numpy()
|
192
|
-
return pd.Series(losses, dtype=float)
|
193
|
-
|
194
|
-
|
195
|
-
def _train_full(
|
196
|
-
aprox: nn.Module,
|
197
|
-
features: pd.DataFrame,
|
198
|
-
targets: pd.DataFrame,
|
199
|
-
epochs: int,
|
200
|
-
optimizer: Adam,
|
201
|
-
device: torch.device,
|
202
|
-
loss_fn: Callable,
|
203
|
-
) -> pd.Series:
|
204
|
-
"""Train the neural network using full-batch gradient descent.
|
205
|
-
|
206
|
-
Args:
|
207
|
-
aprox: Neural network model to train.
|
208
|
-
features: Input features as a tensor.
|
209
|
-
targets: Target values as a tensor.
|
210
|
-
epochs: Number of training epochs.
|
211
|
-
optimizer: Optimizer for training.
|
212
|
-
device: Torch device
|
213
|
-
loss_fn: Loss function
|
214
|
-
|
215
|
-
Returns:
|
216
|
-
pd.Series: Series containing the training loss history.
|
217
|
-
|
218
|
-
"""
|
219
|
-
X = torch.Tensor(features.to_numpy(), device=device)
|
220
|
-
Y = torch.Tensor(targets.to_numpy(), device=device)
|
221
|
-
|
222
|
-
losses = {}
|
223
|
-
for i in tqdm.trange(epochs):
|
224
|
-
optimizer.zero_grad()
|
225
|
-
loss = loss_fn(aprox(X), Y)
|
226
|
-
loss.backward()
|
227
|
-
optimizer.step()
|
228
|
-
losses[i] = loss.detach().numpy()
|
229
|
-
return pd.Series(losses, dtype=float)
|
230
|
-
|
231
|
-
|
232
|
-
def train_torch(
|
156
|
+
def train(
|
233
157
|
features: pd.DataFrame,
|
234
158
|
targets: pd.DataFrame,
|
235
159
|
epochs: int,
|
236
160
|
surrogate_args: list[str] | None = None,
|
237
161
|
surrogate_outputs: list[str] | None = None,
|
238
|
-
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
162
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
239
163
|
batch_size: int | None = None,
|
240
|
-
|
241
|
-
|
164
|
+
model: nn.Module | None = None,
|
165
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
242
166
|
device: torch.device = DefaultDevice,
|
243
167
|
loss_fn: LossFn = _mean_abs,
|
244
|
-
) -> tuple[
|
168
|
+
) -> tuple[Surrogate, pd.Series]:
|
245
169
|
"""Train a PyTorch surrogate model.
|
246
170
|
|
247
171
|
Examples:
|
@@ -263,8 +187,8 @@ def train_torch(
|
|
263
187
|
surrogate_outputs: Names of output arguments from the surrogate.
|
264
188
|
surrogate_stoichiometries: Mapping of variables to their stoichiometries
|
265
189
|
batch_size: Size of mini-batches for training (None for full-batch).
|
266
|
-
|
267
|
-
|
190
|
+
model: Predefined neural network model (None to use default MLP features-50-50-output).
|
191
|
+
optimizer_cls: Optimizer class to use for training (default: Adam).
|
268
192
|
device: Device to run the training on (default: DefaultDevice).
|
269
193
|
loss_fn: Custom loss function or instance of torch loss object
|
270
194
|
|
@@ -272,11 +196,11 @@ def train_torch(
|
|
272
196
|
tuple[TorchSurrogate, pd.Series]: Trained surrogate model and loss history.
|
273
197
|
|
274
198
|
"""
|
275
|
-
trainer =
|
199
|
+
trainer = Trainer(
|
276
200
|
features=features,
|
277
201
|
targets=targets,
|
278
|
-
|
279
|
-
|
202
|
+
model=model,
|
203
|
+
optimizer_cls=optimizer_cls,
|
280
204
|
device=device,
|
281
205
|
loss_fn=loss_fn,
|
282
206
|
).train(
|
mxlpy/symbolic/symbolic_model.py
CHANGED
@@ -70,6 +70,6 @@ def to_symbolic_model(model: Model) -> SymbolicModel:
|
|
70
70
|
variables=variables,
|
71
71
|
parameters=parameters,
|
72
72
|
eqs=[eqs[i] for i in cache.var_names],
|
73
|
-
initial_conditions=model.
|
74
|
-
parameter_values=model.parameters
|
73
|
+
initial_conditions=model.get_initial_conditions(),
|
74
|
+
parameter_values=model.parameters,
|
75
75
|
)
|
mxlpy/types.py
CHANGED
@@ -46,10 +46,7 @@ __all__ = [
|
|
46
46
|
"unwrap2",
|
47
47
|
]
|
48
48
|
|
49
|
-
|
50
|
-
# changed between Python versions and I have no interest in
|
51
|
-
# fixing it in every file
|
52
|
-
from collections.abc import Callable, Iterator, Mapping
|
49
|
+
from collections.abc import Callable, Iterable, Iterator, Mapping
|
53
50
|
from typing import TYPE_CHECKING, Any, ParamSpec, Protocol, TypeVar, cast
|
54
51
|
|
55
52
|
import numpy as np
|
@@ -162,7 +159,7 @@ class Derived:
|
|
162
159
|
fn: RateFn
|
163
160
|
args: list[str]
|
164
161
|
|
165
|
-
def calculate(self, dependent: dict[str,
|
162
|
+
def calculate(self, dependent: dict[str, Any]) -> float:
|
166
163
|
"""Calculate the derived value.
|
167
164
|
|
168
165
|
Args:
|
@@ -174,7 +171,7 @@ class Derived:
|
|
174
171
|
"""
|
175
172
|
return cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
176
173
|
|
177
|
-
def calculate_inpl(self, name: str, dependent: dict[str,
|
174
|
+
def calculate_inpl(self, name: str, dependent: dict[str, Any]) -> None:
|
178
175
|
"""Calculate the derived value in place.
|
179
176
|
|
180
177
|
Args:
|
@@ -184,20 +181,6 @@ class Derived:
|
|
184
181
|
"""
|
185
182
|
dependent[name] = cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
186
183
|
|
187
|
-
def calculate_inpl_time_course(self, name: str, dependent: pd.DataFrame) -> None:
|
188
|
-
"""Calculate the derived value in place.
|
189
|
-
|
190
|
-
Args:
|
191
|
-
name: Name of the derived variable.
|
192
|
-
dependent: Dictionary of dependent variables.
|
193
|
-
|
194
|
-
"""
|
195
|
-
try:
|
196
|
-
dependent[name] = self.fn(*dependent.loc[:, self.args].to_numpy().T)
|
197
|
-
except ValueError: # e.g. numpy.where
|
198
|
-
sub = dependent.loc[:, self.args].to_numpy()
|
199
|
-
dependent[name] = [self.fn(*row) for row in sub]
|
200
|
-
|
201
184
|
|
202
185
|
@dataclass(kw_only=True, slots=True)
|
203
186
|
class Readout:
|
@@ -206,7 +189,7 @@ class Readout:
|
|
206
189
|
fn: RateFn
|
207
190
|
args: list[str]
|
208
191
|
|
209
|
-
def calculate(self, dependent: dict[str,
|
192
|
+
def calculate(self, dependent: dict[str, Any]) -> float:
|
210
193
|
"""Calculate the derived value.
|
211
194
|
|
212
195
|
Args:
|
@@ -218,8 +201,8 @@ class Readout:
|
|
218
201
|
"""
|
219
202
|
return cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
220
203
|
|
221
|
-
def calculate_inpl(self, name: str, dependent: dict[str,
|
222
|
-
"""Calculate the
|
204
|
+
def calculate_inpl(self, name: str, dependent: dict[str, Any]) -> None:
|
205
|
+
"""Calculate the reaction in place.
|
223
206
|
|
224
207
|
Args:
|
225
208
|
name: Name of the derived variable.
|
@@ -228,20 +211,6 @@ class Readout:
|
|
228
211
|
"""
|
229
212
|
dependent[name] = cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
230
213
|
|
231
|
-
def calculate_inpl_time_course(self, name: str, dependent: pd.DataFrame) -> None:
|
232
|
-
"""Calculate the derived value in place.
|
233
|
-
|
234
|
-
Args:
|
235
|
-
name: Name of the derived variable.
|
236
|
-
dependent: Dictionary of dependent variables.
|
237
|
-
|
238
|
-
"""
|
239
|
-
try:
|
240
|
-
dependent[name] = self.fn(*dependent.loc[:, self.args].to_numpy().T)
|
241
|
-
except ValueError: # e.g. numpy.where
|
242
|
-
sub = dependent.loc[:, self.args].to_numpy()
|
243
|
-
dependent[name] = [self.fn(*row) for row in sub]
|
244
|
-
|
245
214
|
|
246
215
|
@dataclass(kw_only=True, slots=True)
|
247
216
|
class Reaction:
|
@@ -258,7 +227,7 @@ class Reaction:
|
|
258
227
|
|
259
228
|
return [k for k in self.args if k in include and k not in exclude]
|
260
229
|
|
261
|
-
def calculate(self, dependent: dict[str,
|
230
|
+
def calculate(self, dependent: dict[str, Any]) -> float:
|
262
231
|
"""Calculate the derived value.
|
263
232
|
|
264
233
|
Args:
|
@@ -270,7 +239,7 @@ class Reaction:
|
|
270
239
|
"""
|
271
240
|
return cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
272
241
|
|
273
|
-
def calculate_inpl(self, name: str, dependent: dict[str,
|
242
|
+
def calculate_inpl(self, name: str, dependent: dict[str, Any]) -> None:
|
274
243
|
"""Calculate the reaction in place.
|
275
244
|
|
276
245
|
Args:
|
@@ -280,19 +249,57 @@ class Reaction:
|
|
280
249
|
"""
|
281
250
|
dependent[name] = cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
282
251
|
|
283
|
-
def calculate_inpl_time_course(self, name: str, dependent: pd.DataFrame) -> None:
|
284
|
-
"""Calculate the derived value in place.
|
285
252
|
|
286
|
-
|
287
|
-
|
288
|
-
|
253
|
+
@dataclass(kw_only=True)
|
254
|
+
class AbstractSurrogate:
|
255
|
+
"""Abstract base class for surrogate models.
|
289
256
|
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
|
295
|
-
|
257
|
+
Attributes:
|
258
|
+
inputs: List of input variable names.
|
259
|
+
stoichiometries: Dictionary mapping reaction names to stoichiometries.
|
260
|
+
|
261
|
+
Methods:
|
262
|
+
predict: Abstract method to predict outputs based on input data.
|
263
|
+
|
264
|
+
"""
|
265
|
+
|
266
|
+
args: list[str]
|
267
|
+
outputs: list[str]
|
268
|
+
stoichiometries: dict[str, dict[str, float | Derived]] = field(default_factory=dict)
|
269
|
+
|
270
|
+
@abstractmethod
|
271
|
+
def predict(
|
272
|
+
self, args: dict[str, float | pd.Series | pd.DataFrame]
|
273
|
+
) -> dict[str, float]:
|
274
|
+
"""Predict outputs based on input data."""
|
275
|
+
|
276
|
+
def calculate_inpl(
|
277
|
+
self,
|
278
|
+
name: str, # noqa: ARG002, for API compatibility
|
279
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
280
|
+
) -> None:
|
281
|
+
"""Predict outputs based on input data."""
|
282
|
+
args |= self.predict(args=args)
|
283
|
+
|
284
|
+
|
285
|
+
@dataclass(kw_only=True)
|
286
|
+
class MockSurrogate(AbstractSurrogate):
|
287
|
+
"""Mock surrogate model for testing purposes."""
|
288
|
+
|
289
|
+
fn: Callable[..., Iterable[float]]
|
290
|
+
|
291
|
+
def predict(
|
292
|
+
self,
|
293
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
294
|
+
) -> dict[str, float]:
|
295
|
+
"""Predict outputs based on input data."""
|
296
|
+
return dict(
|
297
|
+
zip(
|
298
|
+
self.outputs,
|
299
|
+
self.fn(*(args[i] for i in self.args)),
|
300
|
+
strict=True,
|
301
|
+
)
|
302
|
+
) # type: ignore
|
296
303
|
|
297
304
|
|
298
305
|
@dataclass(kw_only=True, slots=True)
|
@@ -432,70 +439,6 @@ class ProtocolByPars:
|
|
432
439
|
return cast(pd.DataFrame, mean.unstack().T)
|
433
440
|
|
434
441
|
|
435
|
-
@dataclass(kw_only=True)
|
436
|
-
class AbstractSurrogate:
|
437
|
-
"""Abstract base class for surrogate models.
|
438
|
-
|
439
|
-
Attributes:
|
440
|
-
inputs: List of input variable names.
|
441
|
-
stoichiometries: Dictionary mapping reaction names to stoichiometries.
|
442
|
-
|
443
|
-
Methods:
|
444
|
-
predict: Abstract method to predict outputs based on input data.
|
445
|
-
|
446
|
-
"""
|
447
|
-
|
448
|
-
args: list[str]
|
449
|
-
outputs: list[str]
|
450
|
-
stoichiometries: dict[str, dict[str, float]] = field(default_factory=dict)
|
451
|
-
|
452
|
-
@abstractmethod
|
453
|
-
def predict_raw(self, y: np.ndarray) -> np.ndarray:
|
454
|
-
"""Predict outputs based on input data."""
|
455
|
-
|
456
|
-
def predict(self, y: np.ndarray) -> dict[str, float]:
|
457
|
-
"""Predict outputs based on input data."""
|
458
|
-
return dict(
|
459
|
-
zip(
|
460
|
-
self.outputs,
|
461
|
-
self.predict_raw(y),
|
462
|
-
strict=True,
|
463
|
-
)
|
464
|
-
)
|
465
|
-
|
466
|
-
def calculate_inpl(
|
467
|
-
self,
|
468
|
-
name: str, # noqa: ARG002, for API compatibility
|
469
|
-
args: dict[str, float],
|
470
|
-
) -> None:
|
471
|
-
"""Predict outputs based on input data."""
|
472
|
-
args |= self.predict(np.array([args[arg] for arg in self.args]))
|
473
|
-
|
474
|
-
def calculate_inpl_time_course(
|
475
|
-
self,
|
476
|
-
name: str, # noqa: ARG002, for API compatibility
|
477
|
-
args: pd.DataFrame,
|
478
|
-
) -> None:
|
479
|
-
"""Predict outputs based on input data."""
|
480
|
-
args[self.outputs] = pd.DataFrame(
|
481
|
-
[self.predict(y) for y in args.loc[:, self.args].to_numpy()],
|
482
|
-
index=args.index,
|
483
|
-
dtype=float,
|
484
|
-
)
|
485
|
-
|
486
|
-
|
487
|
-
@dataclass(kw_only=True)
|
488
|
-
class MockSurrogate(AbstractSurrogate):
|
489
|
-
"""Mock surrogate model for testing purposes."""
|
490
|
-
|
491
|
-
def predict(
|
492
|
-
self,
|
493
|
-
y: np.ndarray,
|
494
|
-
) -> dict[str, float]:
|
495
|
-
"""Predict outputs based on input data."""
|
496
|
-
return dict(zip(self.outputs, y, strict=True))
|
497
|
-
|
498
|
-
|
499
442
|
@dataclass(kw_only=True)
|
500
443
|
class AbstractEstimator:
|
501
444
|
"""Abstract class for parameter estimation using neural networks."""
|