mxlpy 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. mxlpy/__init__.py +165 -0
  2. mxlpy/distributions.py +339 -0
  3. mxlpy/experimental/__init__.py +12 -0
  4. mxlpy/experimental/diff.py +226 -0
  5. mxlpy/fit.py +291 -0
  6. mxlpy/fns.py +191 -0
  7. mxlpy/integrators/__init__.py +19 -0
  8. mxlpy/integrators/int_assimulo.py +146 -0
  9. mxlpy/integrators/int_scipy.py +146 -0
  10. mxlpy/label_map.py +610 -0
  11. mxlpy/linear_label_map.py +303 -0
  12. mxlpy/mc.py +548 -0
  13. mxlpy/mca.py +280 -0
  14. mxlpy/meta/__init__.py +11 -0
  15. mxlpy/meta/codegen_latex.py +516 -0
  16. mxlpy/meta/codegen_modebase.py +110 -0
  17. mxlpy/meta/codegen_py.py +107 -0
  18. mxlpy/meta/source_tools.py +320 -0
  19. mxlpy/model.py +1737 -0
  20. mxlpy/nn/__init__.py +10 -0
  21. mxlpy/nn/_tensorflow.py +0 -0
  22. mxlpy/nn/_torch.py +129 -0
  23. mxlpy/npe.py +277 -0
  24. mxlpy/parallel.py +171 -0
  25. mxlpy/parameterise.py +27 -0
  26. mxlpy/paths.py +36 -0
  27. mxlpy/plot.py +875 -0
  28. mxlpy/py.typed +0 -0
  29. mxlpy/sbml/__init__.py +14 -0
  30. mxlpy/sbml/_data.py +77 -0
  31. mxlpy/sbml/_export.py +644 -0
  32. mxlpy/sbml/_import.py +599 -0
  33. mxlpy/sbml/_mathml.py +691 -0
  34. mxlpy/sbml/_name_conversion.py +52 -0
  35. mxlpy/sbml/_unit_conversion.py +74 -0
  36. mxlpy/scan.py +629 -0
  37. mxlpy/simulator.py +655 -0
  38. mxlpy/surrogates/__init__.py +31 -0
  39. mxlpy/surrogates/_poly.py +97 -0
  40. mxlpy/surrogates/_torch.py +196 -0
  41. mxlpy/symbolic/__init__.py +10 -0
  42. mxlpy/symbolic/strikepy.py +582 -0
  43. mxlpy/symbolic/symbolic_model.py +75 -0
  44. mxlpy/types.py +474 -0
  45. mxlpy-0.8.0.dist-info/METADATA +106 -0
  46. mxlpy-0.8.0.dist-info/RECORD +48 -0
  47. mxlpy-0.8.0.dist-info/WHEEL +4 -0
  48. mxlpy-0.8.0.dist-info/licenses/LICENSE +674 -0
@@ -0,0 +1,97 @@
1
+ from collections.abc import Iterable
2
+ from dataclasses import dataclass
3
+ from typing import Literal
4
+
5
+ import numpy as np
6
+ import pandas as pd
7
+ from numpy import polynomial
8
+
9
+ from mxlpy.types import AbstractSurrogate, ArrayLike
10
+
11
+ __all__ = [
12
+ "PolySurrogate",
13
+ "PolynomialExpansion",
14
+ "train_polynomial_surrogate",
15
+ ]
16
+
17
+ # define custom type
18
+ PolynomialExpansion = (
19
+ polynomial.polynomial.Polynomial
20
+ | polynomial.chebyshev.Chebyshev
21
+ | polynomial.legendre.Legendre
22
+ | polynomial.laguerre.Laguerre
23
+ | polynomial.hermite.Hermite
24
+ | polynomial.hermite_e.HermiteE
25
+ )
26
+
27
+
28
+ @dataclass(kw_only=True)
29
+ class PolySurrogate(AbstractSurrogate):
30
+ model: PolynomialExpansion
31
+
32
+ def predict_raw(self, y: np.ndarray) -> np.ndarray:
33
+ return self.model(y)
34
+
35
+
36
+ def train_polynomial_surrogate(
37
+ feature: ArrayLike,
38
+ target: ArrayLike,
39
+ series: Literal[
40
+ "Power", "Chebyshev", "Legendre", "Laguerre", "Hermite", "HermiteE"
41
+ ] = "Power",
42
+ degrees: Iterable[int] = (1, 2, 3, 4, 5, 6, 7),
43
+ surrogate_args: list[str] | None = None,
44
+ surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
45
+ ) -> tuple[PolySurrogate, pd.DataFrame]:
46
+ """Train a surrogate model based on function series expansion.
47
+
48
+ Args:
49
+ feature: Input data as a numpy array.
50
+ target: Output data as a numpy array.
51
+ series: Base functions for the surrogate model
52
+ degrees: Degrees of the polynomial to fit to the data.
53
+ surrogate_args: Additional arguments for the surrogate model.
54
+ surrogate_stoichiometries: Stoichiometries for the surrogate model.
55
+
56
+ Returns:
57
+ PolySurrogate: Polynomial surrogate model.
58
+
59
+ """
60
+ feature = np.array(feature, dtype=float)
61
+ target = np.array(target, dtype=float)
62
+
63
+ # Choose numpy polynomial convenience classes
64
+ series_dictionary = {
65
+ "Power": polynomial.polynomial.Polynomial,
66
+ "Chebyshev": polynomial.chebyshev.Chebyshev,
67
+ "Legendre": polynomial.legendre.Legendre,
68
+ "Laguerre": polynomial.laguerre.Laguerre,
69
+ "Hermite": polynomial.hermite.Hermite,
70
+ "HermiteE": polynomial.hermite_e.HermiteE,
71
+ }
72
+
73
+ fn_series = series_dictionary[series]
74
+
75
+ models = [fn_series.fit(feature, target, degree) for degree in degrees]
76
+ predictions = np.array([model(feature) for model in models], dtype=float)
77
+ errors = np.sqrt(np.mean(np.square(predictions - target.reshape(1, -1)), axis=1))
78
+ log_likelihood = -0.5 * np.sum(
79
+ np.square(predictions - target.reshape(1, -1)), axis=1
80
+ )
81
+ score = 2 * np.array(degrees) - 2 * log_likelihood
82
+
83
+ # Choose the model with the lowest AIC
84
+ model = models[np.argmin(score)]
85
+ return (
86
+ PolySurrogate(
87
+ model=model,
88
+ args=surrogate_args if surrogate_args is not None else [],
89
+ stoichiometries=surrogate_stoichiometries
90
+ if surrogate_stoichiometries is not None
91
+ else {},
92
+ ),
93
+ pd.DataFrame(
94
+ {"models": models, "error": errors, "score": score},
95
+ index=pd.Index(np.array(degrees), name="degree"),
96
+ ),
97
+ )
@@ -0,0 +1,196 @@
1
+ from collections.abc import Callable
2
+ from dataclasses import dataclass
3
+
4
+ import numpy as np
5
+ import pandas as pd
6
+ import torch
7
+ import tqdm
8
+ from torch import nn
9
+ from torch.optim.adam import Adam
10
+ from torch.optim.optimizer import ParamsT
11
+
12
+ from mxlpy.nn._torch import MLP, DefaultDevice
13
+ from mxlpy.types import AbstractSurrogate
14
+
15
+ __all__ = [
16
+ "TorchSurrogate",
17
+ "train_torch_surrogate",
18
+ ]
19
+
20
+
21
+ @dataclass(kw_only=True)
22
+ class TorchSurrogate(AbstractSurrogate):
23
+ """Surrogate model using PyTorch.
24
+
25
+ Attributes:
26
+ model: PyTorch neural network model.
27
+
28
+ Methods:
29
+ predict: Predict outputs based on input data using the PyTorch model.
30
+
31
+ """
32
+
33
+ model: torch.nn.Module
34
+
35
+ def predict_raw(self, y: np.ndarray) -> np.ndarray:
36
+ """Predict outputs based on input data using the PyTorch model.
37
+
38
+ Args:
39
+ y: Input data as a numpy array.
40
+
41
+ Returns:
42
+ dict[str, float]: Dictionary mapping output variable names to predicted values.
43
+
44
+ """
45
+ with torch.no_grad():
46
+ return self.model(
47
+ torch.tensor(y, dtype=torch.float32),
48
+ ).numpy()
49
+
50
+
51
+ def _train_batched(
52
+ aprox: nn.Module,
53
+ features: pd.DataFrame,
54
+ targets: pd.DataFrame,
55
+ epochs: int,
56
+ optimizer: Adam,
57
+ device: torch.device,
58
+ batch_size: int,
59
+ ) -> pd.Series:
60
+ """Train the neural network using mini-batch gradient descent.
61
+
62
+ Args:
63
+ aprox: Neural network model to train.
64
+ features: Input features as a tensor.
65
+ targets: Target values as a tensor.
66
+ epochs: Number of training epochs.
67
+ optimizer: Optimizer for training.
68
+ device: torch device
69
+ batch_size: Size of mini-batches for training.
70
+
71
+ Returns:
72
+ pd.Series: Series containing the training loss history.
73
+
74
+ """
75
+ rng = np.random.default_rng()
76
+ losses = {}
77
+ for i in tqdm.trange(epochs):
78
+ idxs = rng.choice(features.index, size=batch_size)
79
+ X = torch.Tensor(features.iloc[idxs].to_numpy(), device=device)
80
+ Y = torch.Tensor(targets.iloc[idxs].to_numpy(), device=device)
81
+ optimizer.zero_grad()
82
+ loss = torch.mean(torch.abs(aprox(X) - Y))
83
+ loss.backward()
84
+ optimizer.step()
85
+ losses[i] = loss.detach().numpy()
86
+ return pd.Series(losses, dtype=float)
87
+
88
+
89
+ def _train_full(
90
+ aprox: nn.Module,
91
+ features: pd.DataFrame,
92
+ targets: pd.DataFrame,
93
+ epochs: int,
94
+ optimizer: Adam,
95
+ device: torch.device,
96
+ ) -> pd.Series:
97
+ """Train the neural network using full-batch gradient descent.
98
+
99
+ Args:
100
+ aprox: Neural network model to train.
101
+ features: Input features as a tensor.
102
+ targets: Target values as a tensor.
103
+ epochs: Number of training epochs.
104
+ optimizer: Optimizer for training.
105
+ device: Torch device
106
+
107
+ Returns:
108
+ pd.Series: Series containing the training loss history.
109
+
110
+ """
111
+ X = torch.Tensor(features.to_numpy(), device=device)
112
+ Y = torch.Tensor(targets.to_numpy(), device=device)
113
+
114
+ losses = {}
115
+ for i in tqdm.trange(epochs):
116
+ optimizer.zero_grad()
117
+ loss = torch.mean(torch.abs(aprox(X) - Y))
118
+ loss.backward()
119
+ optimizer.step()
120
+ losses[i] = loss.detach().numpy()
121
+ return pd.Series(losses, dtype=float)
122
+
123
+
124
+ def train_torch_surrogate(
125
+ features: pd.DataFrame,
126
+ targets: pd.DataFrame,
127
+ epochs: int,
128
+ surrogate_args: list[str] | None = None,
129
+ surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
130
+ batch_size: int | None = None,
131
+ approximator: nn.Module | None = None,
132
+ optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
133
+ device: torch.device = DefaultDevice,
134
+ ) -> tuple[TorchSurrogate, pd.Series]:
135
+ """Train a PyTorch surrogate model.
136
+
137
+ Examples:
138
+ >>> train_torch_surrogate(
139
+ ... features,
140
+ ... targets,
141
+ ... epochs=100,
142
+ ... surrogate_inputs=["x1", "x2"],
143
+ ... surrogate_stoichiometries={
144
+ ... "v1": {"x1": -1, "x2": 1, "ATP": -1},
145
+ ... },
146
+ ...)
147
+
148
+ Args:
149
+ features: DataFrame containing the input features for training.
150
+ targets: DataFrame containing the target values for training.
151
+ epochs: Number of training epochs.
152
+ surrogate_args: List of input variable names for the surrogate model.
153
+ surrogate_stoichiometries: Dictionary mapping reaction names to stoichiometries.
154
+ batch_size: Size of mini-batches for training (None for full-batch).
155
+ approximator: Predefined neural network model (None to use default MLP features-50-50-output).
156
+ optimimzer_cls: Optimizer class to use for training (default: Adam).
157
+ device: Device to run the training on (default: DefaultDevice).
158
+
159
+ Returns:
160
+ tuple[TorchSurrogate, pd.Series]: Trained surrogate model and loss history.
161
+
162
+ """
163
+ if approximator is None:
164
+ approximator = MLP(
165
+ n_inputs=len(features.columns),
166
+ neurons_per_layer=[50, 50, len(targets.columns)],
167
+ ).to(device)
168
+
169
+ optimizer = optimimzer_cls(approximator.parameters())
170
+ if batch_size is None:
171
+ losses = _train_full(
172
+ aprox=approximator,
173
+ features=features,
174
+ targets=targets,
175
+ epochs=epochs,
176
+ optimizer=optimizer,
177
+ device=device,
178
+ )
179
+ else:
180
+ losses = _train_batched(
181
+ aprox=approximator,
182
+ features=features,
183
+ targets=targets,
184
+ epochs=epochs,
185
+ optimizer=optimizer,
186
+ device=device,
187
+ batch_size=batch_size,
188
+ )
189
+ surrogate = TorchSurrogate(
190
+ model=approximator,
191
+ args=surrogate_args if surrogate_args is not None else [],
192
+ stoichiometries=surrogate_stoichiometries
193
+ if surrogate_stoichiometries is not None
194
+ else {},
195
+ )
196
+ return surrogate, losses
@@ -0,0 +1,10 @@
1
+ """Symbolic utilities."""
2
+
3
+ __all__ = [
4
+ "SymbolicModel",
5
+ "check_identifiability",
6
+ "to_symbolic_model",
7
+ ]
8
+
9
+ from .strikepy import check_identifiability
10
+ from .symbolic_model import SymbolicModel, to_symbolic_model