modelbase2 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- modelbase2/__init__.py +12 -1
- modelbase2/distributions.py +33 -0
- modelbase2/experimental/__init__.py +2 -0
- modelbase2/experimental/_backup.py +1017 -0
- modelbase2/experimental/strikepy.py +562 -0
- modelbase2/experimental/symbolic.py +286 -0
- modelbase2/fit.py +6 -6
- modelbase2/model.py +0 -1
- modelbase2/nnarchitectures.py +128 -0
- modelbase2/npe.py +15 -82
- modelbase2/plot.py +4 -1
- modelbase2/simulator.py +7 -3
- modelbase2/surrogates/__init__.py +1 -2
- modelbase2/surrogates/_poly.py +32 -5
- modelbase2/surrogates/_torch.py +8 -64
- modelbase2/surrogates.py +7 -1
- {modelbase2-0.2.0.dist-info → modelbase2-0.4.0.dist-info}/METADATA +14 -1
- {modelbase2-0.2.0.dist-info → modelbase2-0.4.0.dist-info}/RECORD +20 -16
- {modelbase2-0.2.0.dist-info → modelbase2-0.4.0.dist-info}/WHEEL +0 -0
- {modelbase2-0.2.0.dist-info → modelbase2-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -19,12 +19,11 @@ from __future__ import annotations
|
|
19
19
|
import contextlib
|
20
20
|
|
21
21
|
with contextlib.suppress(ImportError):
|
22
|
-
from ._torch import
|
22
|
+
from ._torch import TorchSurrogate, train_torch_surrogate
|
23
23
|
|
24
24
|
from ._poly import PolySurrogate, train_polynomial_surrogate
|
25
25
|
|
26
26
|
__all__ = [
|
27
|
-
"Dense",
|
28
27
|
"PolySurrogate",
|
29
28
|
"TorchSurrogate",
|
30
29
|
"train_polynomial_surrogate",
|
modelbase2/surrogates/_poly.py
CHANGED
@@ -3,16 +3,27 @@ from dataclasses import dataclass
|
|
3
3
|
|
4
4
|
import numpy as np
|
5
5
|
import pandas as pd
|
6
|
-
from numpy
|
6
|
+
from numpy import polynomial
|
7
|
+
from typing import Union, Literal
|
7
8
|
|
8
9
|
from modelbase2.types import AbstractSurrogate, ArrayLike
|
9
10
|
|
10
|
-
__all__ = ["PolySurrogate", "train_polynomial_surrogate"]
|
11
|
+
__all__ = ["PolySurrogate", "PolynomialExpansion", "train_polynomial_surrogate"]
|
12
|
+
|
13
|
+
# define custom type
|
14
|
+
PolynomialExpansion = (
|
15
|
+
polynomial.polynomial.Polynomial
|
16
|
+
| polynomial.chebyshev.Chebyshev
|
17
|
+
| polynomial.legendre.Legendre
|
18
|
+
| polynomial.laguerre.Laguerre
|
19
|
+
| polynomial.hermite.Hermite
|
20
|
+
| polynomial.hermite_e.HermiteE
|
21
|
+
)
|
11
22
|
|
12
23
|
|
13
24
|
@dataclass(kw_only=True)
|
14
25
|
class PolySurrogate(AbstractSurrogate):
|
15
|
-
model:
|
26
|
+
model: PolynomialExpansion
|
16
27
|
|
17
28
|
def predict_raw(self, y: np.ndarray) -> np.ndarray:
|
18
29
|
return self.model(y)
|
@@ -21,15 +32,19 @@ class PolySurrogate(AbstractSurrogate):
|
|
21
32
|
def train_polynomial_surrogate(
|
22
33
|
feature: ArrayLike,
|
23
34
|
target: ArrayLike,
|
35
|
+
series: Literal[
|
36
|
+
"Power", "Chebyshev", "Legendre", "Laguerre", "Hermite", "HermiteE"
|
37
|
+
] = "Power",
|
24
38
|
degrees: Iterable[int] = (1, 2, 3, 4, 5, 6, 7),
|
25
39
|
surrogate_args: list[str] | None = None,
|
26
40
|
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
27
41
|
) -> tuple[PolySurrogate, pd.DataFrame]:
|
28
|
-
"""Train a
|
42
|
+
"""Train a surrogate model based on function series expansion.
|
29
43
|
|
30
44
|
Args:
|
31
45
|
feature: Input data as a numpy array.
|
32
46
|
target: Output data as a numpy array.
|
47
|
+
series: Base functions for the surrogate model
|
33
48
|
degrees: Degrees of the polynomial to fit to the data.
|
34
49
|
surrogate_args: Additional arguments for the surrogate model.
|
35
50
|
surrogate_stoichiometries: Stoichiometries for the surrogate model.
|
@@ -41,7 +56,19 @@ def train_polynomial_surrogate(
|
|
41
56
|
feature = np.array(feature, dtype=float)
|
42
57
|
target = np.array(target, dtype=float)
|
43
58
|
|
44
|
-
|
59
|
+
# Choose numpy polynomial convenience classes
|
60
|
+
series_dictionary = {
|
61
|
+
"Power": polynomial.polynomial.Polynomial,
|
62
|
+
"Chebyshev": polynomial.chebyshev.Chebyshev,
|
63
|
+
"Legendre": polynomial.legendre.Legendre,
|
64
|
+
"Laguerre": polynomial.laguerre.Laguerre,
|
65
|
+
"Hermite": polynomial.hermite.Hermite,
|
66
|
+
"HermiteE": polynomial.hermite_e.HermiteE,
|
67
|
+
}
|
68
|
+
|
69
|
+
fn_series = series_dictionary[series]
|
70
|
+
|
71
|
+
models = [fn_series.fit(feature, target, degree) for degree in degrees]
|
45
72
|
predictions = np.array([model(feature) for model in models], dtype=float)
|
46
73
|
errors = np.sqrt(np.mean(np.square(predictions - target.reshape(1, -1)), axis=1))
|
47
74
|
log_likelihood = -0.5 * np.sum(
|
modelbase2/surrogates/_torch.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
from collections.abc import Callable
|
1
2
|
from dataclasses import dataclass
|
2
3
|
|
3
4
|
import numpy as np
|
@@ -6,12 +7,12 @@ import torch
|
|
6
7
|
import tqdm
|
7
8
|
from torch import nn
|
8
9
|
from torch.optim.adam import Adam
|
10
|
+
from torch.optim.optimizer import ParamsT
|
9
11
|
|
12
|
+
from modelbase2.nnarchitectures import MLP, DefaultDevice
|
10
13
|
from modelbase2.types import AbstractSurrogate
|
11
14
|
|
12
|
-
__all__ = ["
|
13
|
-
|
14
|
-
DefaultDevice = torch.device("cpu")
|
15
|
+
__all__ = ["TorchSurrogate", "train_torch_surrogate"]
|
15
16
|
|
16
17
|
|
17
18
|
@dataclass(kw_only=True)
|
@@ -44,63 +45,6 @@ class TorchSurrogate(AbstractSurrogate):
|
|
44
45
|
).numpy()
|
45
46
|
|
46
47
|
|
47
|
-
class Dense(nn.Module):
|
48
|
-
"""Neural network approximator for surrogate modeling.
|
49
|
-
|
50
|
-
Attributes:
|
51
|
-
net: Sequential neural network model.
|
52
|
-
|
53
|
-
Methods:
|
54
|
-
forward: Forward pass through the neural network.
|
55
|
-
|
56
|
-
"""
|
57
|
-
|
58
|
-
def __init__(self, n_inputs: int, n_outputs: int) -> None:
|
59
|
-
"""Initializes the surrogate model with the given number of inputs and outputs.
|
60
|
-
|
61
|
-
Args:
|
62
|
-
n_inputs (int): The number of input features.
|
63
|
-
n_outputs (int): The number of output features.
|
64
|
-
|
65
|
-
Initializes a neural network with the following architecture:
|
66
|
-
- Linear layer with `n_inputs` inputs and 50 outputs
|
67
|
-
- ReLU activation
|
68
|
-
- Linear layer with 50 inputs and 50 outputs
|
69
|
-
- ReLU activation
|
70
|
-
- Linear layer with 50 inputs and `n_outputs` outputs
|
71
|
-
|
72
|
-
The weights of the linear layers are initialized with a normal distribution
|
73
|
-
(mean=0, std=0.1) and the biases are initialized to 0.
|
74
|
-
|
75
|
-
"""
|
76
|
-
super().__init__()
|
77
|
-
|
78
|
-
self.net = nn.Sequential(
|
79
|
-
nn.Linear(n_inputs, 50),
|
80
|
-
nn.ReLU(),
|
81
|
-
nn.Linear(50, 50),
|
82
|
-
nn.ReLU(),
|
83
|
-
nn.Linear(50, n_outputs),
|
84
|
-
)
|
85
|
-
|
86
|
-
for m in self.net.modules():
|
87
|
-
if isinstance(m, nn.Linear):
|
88
|
-
nn.init.normal_(m.weight, mean=0, std=0.1)
|
89
|
-
nn.init.constant_(m.bias, val=0)
|
90
|
-
|
91
|
-
def forward(self, x: torch.Tensor) -> torch.Tensor:
|
92
|
-
"""Forward pass through the neural network.
|
93
|
-
|
94
|
-
Args:
|
95
|
-
x: Input tensor.
|
96
|
-
|
97
|
-
Returns:
|
98
|
-
torch.Tensor: Output tensor.
|
99
|
-
|
100
|
-
"""
|
101
|
-
return self.net(x)
|
102
|
-
|
103
|
-
|
104
48
|
def _train_batched(
|
105
49
|
aprox: nn.Module,
|
106
50
|
features: pd.DataFrame,
|
@@ -182,7 +126,7 @@ def train_torch_surrogate(
|
|
182
126
|
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
183
127
|
batch_size: int | None = None,
|
184
128
|
approximator: nn.Module | None = None,
|
185
|
-
optimimzer_cls:
|
129
|
+
optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
|
186
130
|
device: torch.device = DefaultDevice,
|
187
131
|
) -> tuple[TorchSurrogate, pd.Series]:
|
188
132
|
"""Train a PyTorch surrogate model.
|
@@ -205,7 +149,7 @@ def train_torch_surrogate(
|
|
205
149
|
surrogate_args: List of input variable names for the surrogate model.
|
206
150
|
surrogate_stoichiometries: Dictionary mapping reaction names to stoichiometries.
|
207
151
|
batch_size: Size of mini-batches for training (None for full-batch).
|
208
|
-
approximator: Predefined neural network model (None to use default).
|
152
|
+
approximator: Predefined neural network model (None to use default MLP features-50-50-output).
|
209
153
|
optimimzer_cls: Optimizer class to use for training (default: Adam).
|
210
154
|
device: Device to run the training on (default: DefaultDevice).
|
211
155
|
|
@@ -214,9 +158,9 @@ def train_torch_surrogate(
|
|
214
158
|
|
215
159
|
"""
|
216
160
|
if approximator is None:
|
217
|
-
approximator =
|
161
|
+
approximator = MLP(
|
218
162
|
n_inputs=len(features.columns),
|
219
|
-
|
163
|
+
layers=[50, 50, len(targets.columns)],
|
220
164
|
).to(device)
|
221
165
|
|
222
166
|
optimizer = optimimzer_cls(approximator.parameters())
|
modelbase2/surrogates.py
CHANGED
@@ -19,6 +19,7 @@ from __future__ import annotations
|
|
19
19
|
from abc import abstractmethod
|
20
20
|
from dataclasses import dataclass
|
21
21
|
from pathlib import Path
|
22
|
+
from typing import TYPE_CHECKING
|
22
23
|
|
23
24
|
import numpy as np
|
24
25
|
import pandas as pd
|
@@ -29,6 +30,11 @@ from torch.optim.adam import Adam
|
|
29
30
|
|
30
31
|
from modelbase2.parallel import Cache
|
31
32
|
|
33
|
+
if TYPE_CHECKING:
|
34
|
+
from collections.abc import Callable
|
35
|
+
|
36
|
+
from torch.optim.optimizer import ParamsT
|
37
|
+
|
32
38
|
__all__ = [
|
33
39
|
"AbstractSurrogate",
|
34
40
|
"Approximator",
|
@@ -251,7 +257,7 @@ def train_torch_surrogate(
|
|
251
257
|
surrogate_stoichiometries: dict[str, dict[str, float]],
|
252
258
|
batch_size: int | None = None,
|
253
259
|
approximator: nn.Module | None = None,
|
254
|
-
optimimzer_cls:
|
260
|
+
optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
|
255
261
|
device: torch.device = DefaultDevice,
|
256
262
|
) -> tuple[TorchSurrogate, pd.Series]:
|
257
263
|
"""Train a PyTorch surrogate model.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: modelbase2
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.4.0
|
4
4
|
Summary: A package to build metabolic models
|
5
5
|
Author-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
|
6
6
|
Maintainer-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
|
@@ -33,6 +33,7 @@ Requires-Dist: pebble>=5.0.7
|
|
33
33
|
Requires-Dist: python-libsbml>=5.20.4
|
34
34
|
Requires-Dist: scipy>=1.14.1
|
35
35
|
Requires-Dist: seaborn>=0.13.2
|
36
|
+
Requires-Dist: symbtools>=0.4.0
|
36
37
|
Requires-Dist: sympy>=1.13.1
|
37
38
|
Requires-Dist: tabulate>=0.9.0
|
38
39
|
Requires-Dist: tqdm>=4.66.6
|
@@ -59,6 +60,18 @@ Description-Content-Type: text/markdown
|
|
59
60
|
|
60
61
|
# modelbase
|
61
62
|
|
63
|
+
## Installation
|
64
|
+
|
65
|
+
You can install modelbase using pip: `pip install modelbase2`
|
66
|
+
|
67
|
+
If you want access to the sundials solver suite via the [assimulo](https://jmodelica.org/assimulo/) package, we recommend setting up a virtual environment via [pixi](https://pixi.sh/) or [mamba / conda](https://mamba.readthedocs.io/en/latest/) using the [conda-forge](https://conda-forge.org/) channel.
|
68
|
+
|
69
|
+
```bash
|
70
|
+
pixi init
|
71
|
+
pixi add python assimulo
|
72
|
+
pixi add --pypi modelbase2
|
73
|
+
```
|
74
|
+
|
62
75
|
|
63
76
|
## Development setup
|
64
77
|
|
@@ -1,27 +1,31 @@
|
|
1
|
-
modelbase2/__init__.py,sha256=
|
2
|
-
modelbase2/distributions.py,sha256=
|
3
|
-
modelbase2/fit.py,sha256=
|
1
|
+
modelbase2/__init__.py,sha256=ArYJZoCTulkjFctJzxk7c9CDBXYRl2J9_LXr1EORilk,4048
|
2
|
+
modelbase2/distributions.py,sha256=i456B0KYKdiGwuoVjoj3rey5lKdJmu9EVvacnfWXHGA,8744
|
3
|
+
modelbase2/fit.py,sha256=WEI2lxLhdHFr6ax5xXrrkHUTxcEhmXyBHebfHEcXwCY,8172
|
4
4
|
modelbase2/fns.py,sha256=8JtIzPk3DAnNHz3LoJ1ukLFTjPNO1rNCeZ7VnRmJY2o,4503
|
5
5
|
modelbase2/label_map.py,sha256=LUwcOHQWiGfBGV5XUmPM_SOwM9IyDVcQVJ11DPfVpAo,17774
|
6
6
|
modelbase2/linear_label_map.py,sha256=gA8CHxcehgtI6ovwZ9qNUPDvxfqbO1J1kBC_mltD4TY,10225
|
7
7
|
modelbase2/mc.py,sha256=zlDL7e_udpIMRhSjfFJo5AwkigD0B_3H2rQxyelBuzI,16285
|
8
8
|
modelbase2/mca.py,sha256=nMS2VnzR2VEujCFUaj9WL82CNd-oxTb3jCHP8IlJvxA,8845
|
9
|
-
modelbase2/model.py,sha256=
|
10
|
-
modelbase2/
|
9
|
+
modelbase2/model.py,sha256=mrc2Wz1IkINX1TQvuLZFdxSINqOaZGkn8FOd40qn5BY,53842
|
10
|
+
modelbase2/nnarchitectures.py,sha256=OA1X4UHrn7gsLuuqxK6Dhv5aiKnQflhHezYCUV-NuO8,4012
|
11
|
+
modelbase2/npe.py,sha256=o876zHjyfJelGijSmCL0vUBfWbIhcbQyyPkwp8hZ4NA,8743
|
11
12
|
modelbase2/parallel.py,sha256=kX4Td5YoovDwZp6kX_3cfO6QtHSS9ieJ0bMZiKs3Xv8,5002
|
12
13
|
modelbase2/parameterise.py,sha256=7VrYxrQv0visraqUthWSnWfx-cxh2evlXbszIY5031U,690
|
13
14
|
modelbase2/paths.py,sha256=uatKXDa79uniUB2Z3dr8eBJVuUPXDI-o_bf-DqPKq1Y,1039
|
14
|
-
modelbase2/plot.py,sha256=
|
15
|
+
modelbase2/plot.py,sha256=txKF6Xnyh2JPJ06Wu803Wn7_VijMMJ1Kbq4WQB-xKE8,22720
|
15
16
|
modelbase2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
17
|
modelbase2/scan.py,sha256=PvWZA4EgNS0JVMvm87bF72hSmhvo6O2KGg4vRzxve_8,18104
|
17
18
|
modelbase2/scope.py,sha256=4twnEh8LrTmlLE-uRvubVkE3SSWejlLvtBzTCPqG3Aw,3710
|
18
|
-
modelbase2/simulator.py,sha256=
|
19
|
-
modelbase2/surrogates.py,sha256=
|
19
|
+
modelbase2/simulator.py,sha256=0_KXL3QUsMHtckdH4nPlzJoGNLoQaVttV7PUt2RU1m8,19823
|
20
|
+
modelbase2/surrogates.py,sha256=Dk-YEXb-ot22W3r5Bl3rxApbCyvNdBnNCvRi_1f_NnA,9276
|
20
21
|
modelbase2/types.py,sha256=N74pWUZDcGlSMfDkjqqwHn5dkozlqgS6Wqg-58YKdvg,9904
|
21
|
-
modelbase2/experimental/__init__.py,sha256=
|
22
|
+
modelbase2/experimental/__init__.py,sha256=IzgcQ7MtMII7n6cg1PMp-oZm06CNVR19ax2exvb9Ny0,444
|
23
|
+
modelbase2/experimental/_backup.py,sha256=As-L75tMQjWfkheRPk8xcroQPKDsSSejBJ2dXqPXmVo,46644
|
22
24
|
modelbase2/experimental/codegen.py,sha256=J_0iCtojwjmDXAfC8EqiXP0gmWaSH8MPkWvdLsZWsXU,6962
|
23
25
|
modelbase2/experimental/diff.py,sha256=e7fjD9kqxkRUNxSevbAznd5cOlEdWJ6pj0y7Kd5KKrw,8973
|
24
26
|
modelbase2/experimental/notes.md,sha256=YlM2biTzub6jSlx-aDZaBYsvQcGwb7NHyVAbbl2acGE,238
|
27
|
+
modelbase2/experimental/strikepy.py,sha256=cKBs9InXR9mEPgx70Ynv0qkmAGfloksqinbpppTiC6U,19464
|
28
|
+
modelbase2/experimental/symbolic.py,sha256=QT82TSW42RSVsvXK2WTQW1XluXnflWzrHbx6RFr_YmY,9953
|
25
29
|
modelbase2/experimental/tex.py,sha256=M-Pdq3eQw1Huo-z1gv8EhWVO5ecJyFS8MMy9yoX81VI,13634
|
26
30
|
modelbase2/integrators/__init__.py,sha256=kqmV6a0TRyLGR_XqbyAI652AfptYnXAUpqbSFg0CpP8,450
|
27
31
|
modelbase2/integrators/int_assimulo.py,sha256=VEQIZFZcEovLPy8i_jR8H8XcxBRQoRVmNzzCYzInPc0,4611
|
@@ -33,10 +37,10 @@ modelbase2/sbml/_import.py,sha256=uT5JpFvCKjQNBFmGPba61xYShHmjzjczqnaYflilSMI,21
|
|
33
37
|
modelbase2/sbml/_mathml.py,sha256=bNk9RQ_NQFDhY1R354p-gwqqHaIiyAwZ1xLPHHhiguQ,24436
|
34
38
|
modelbase2/sbml/_name_conversion.py,sha256=XK9DEyzhrD0GBBwwjK9RA0yORrDX5c-Uvx0VtKMR5rA,1325
|
35
39
|
modelbase2/sbml/_unit_conversion.py,sha256=dW_I6_Ou09ccwnp6LIdrPriIQnQUK5lJcjzM2Fawm6U,1927
|
36
|
-
modelbase2/surrogates/__init__.py,sha256=
|
37
|
-
modelbase2/surrogates/_poly.py,sha256=
|
38
|
-
modelbase2/surrogates/_torch.py,sha256=
|
39
|
-
modelbase2-0.
|
40
|
-
modelbase2-0.
|
41
|
-
modelbase2-0.
|
42
|
-
modelbase2-0.
|
40
|
+
modelbase2/surrogates/__init__.py,sha256=N_iXERECKvmrHiihwnyQEKOSBsmlGEuQhEotn-mWKdk,924
|
41
|
+
modelbase2/surrogates/_poly.py,sha256=zjlNL4iYR9G51gjvZPHe3CAYF-tgACGdIBe4QUYXLQk,3110
|
42
|
+
modelbase2/surrogates/_torch.py,sha256=CBS_3JzSgI2-xQrbq9CIXY0fJQsxbhBnWkG2TQyj7Zs,5885
|
43
|
+
modelbase2-0.4.0.dist-info/METADATA,sha256=esuEQXsT-gP3-f_UJe65bcmXTVO5J6D-MiegzUtmxak,3344
|
44
|
+
modelbase2-0.4.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
45
|
+
modelbase2-0.4.0.dist-info/licenses/LICENSE,sha256=qvG2VolmSkrcocL34V1ieOx-Rn-fpVcUbb25gHzVgZw,35079
|
46
|
+
modelbase2-0.4.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|