mxlpy 0.18.0__py3-none-any.whl → 0.19.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mxlpy/__init__.py +14 -4
- mxlpy/experimental/diff.py +1 -1
- mxlpy/integrators/int_assimulo.py +10 -3
- mxlpy/integrators/int_scipy.py +7 -3
- mxlpy/label_map.py +3 -1
- mxlpy/meta/codegen_latex.py +1 -1
- mxlpy/meta/source_tools.py +1 -1
- mxlpy/model.py +146 -87
- mxlpy/nn/__init__.py +24 -5
- mxlpy/nn/_keras.py +85 -0
- mxlpy/nn/_torch.py +18 -17
- mxlpy/npe/__init__.py +21 -16
- mxlpy/npe/_keras.py +326 -0
- mxlpy/npe/_torch.py +56 -60
- mxlpy/plot.py +2 -2
- mxlpy/sbml/_export.py +8 -1
- mxlpy/surrogates/__init__.py +25 -17
- mxlpy/surrogates/_keras.py +137 -0
- mxlpy/surrogates/_poly.py +19 -8
- mxlpy/surrogates/_qss.py +31 -0
- mxlpy/surrogates/_torch.py +44 -30
- mxlpy/symbolic/symbolic_model.py +2 -2
- mxlpy/types.py +57 -111
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/METADATA +21 -22
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/RECORD +27 -24
- mxlpy/nn/_tensorflow.py +0 -0
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/WHEEL +0 -0
- {mxlpy-0.18.0.dist-info → mxlpy-0.19.0.dist-info}/licenses/LICENSE +0 -0
mxlpy/surrogates/_torch.py
CHANGED
@@ -9,16 +9,17 @@ from torch import nn
|
|
9
9
|
from torch.optim.adam import Adam
|
10
10
|
from torch.optim.optimizer import ParamsT
|
11
11
|
|
12
|
-
from mxlpy.nn._torch import MLP, DefaultDevice
|
13
|
-
from mxlpy.
|
12
|
+
from mxlpy.nn._torch import MLP, DefaultDevice
|
13
|
+
from mxlpy.nn._torch import train as _train
|
14
|
+
from mxlpy.types import AbstractSurrogate, Derived
|
14
15
|
|
15
16
|
type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
|
16
17
|
|
17
18
|
__all__ = [
|
18
19
|
"LossFn",
|
19
|
-
"
|
20
|
-
"
|
21
|
-
"
|
20
|
+
"Surrogate",
|
21
|
+
"Trainer",
|
22
|
+
"train",
|
22
23
|
]
|
23
24
|
|
24
25
|
|
@@ -37,7 +38,7 @@ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
|
|
37
38
|
|
38
39
|
|
39
40
|
@dataclass(kw_only=True)
|
40
|
-
class
|
41
|
+
class Surrogate(AbstractSurrogate):
|
41
42
|
"""Surrogate model using PyTorch.
|
42
43
|
|
43
44
|
Attributes:
|
@@ -65,12 +66,25 @@ class Torch(AbstractSurrogate):
|
|
65
66
|
torch.tensor(y, dtype=torch.float32),
|
66
67
|
).numpy()
|
67
68
|
|
69
|
+
def predict(
|
70
|
+
self,
|
71
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
72
|
+
) -> dict[str, float]:
|
73
|
+
"""Predict outputs based on input data."""
|
74
|
+
return dict(
|
75
|
+
zip(
|
76
|
+
self.outputs,
|
77
|
+
self.predict_raw(np.array([args[arg] for arg in self.args])),
|
78
|
+
strict=True,
|
79
|
+
)
|
80
|
+
)
|
81
|
+
|
68
82
|
|
69
83
|
@dataclass(init=False)
|
70
|
-
class
|
84
|
+
class Trainer:
|
71
85
|
features: pd.DataFrame
|
72
86
|
targets: pd.DataFrame
|
73
|
-
|
87
|
+
model: nn.Module
|
74
88
|
optimizer: Adam
|
75
89
|
device: torch.device
|
76
90
|
losses: list[pd.Series]
|
@@ -80,22 +94,22 @@ class TorchTrainer:
|
|
80
94
|
self,
|
81
95
|
features: pd.DataFrame,
|
82
96
|
targets: pd.DataFrame,
|
83
|
-
|
84
|
-
|
97
|
+
model: nn.Module | None = None,
|
98
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
85
99
|
device: torch.device = DefaultDevice,
|
86
100
|
loss_fn: LossFn = _mean_abs,
|
87
101
|
) -> None:
|
88
102
|
self.features = features
|
89
103
|
self.targets = targets
|
90
104
|
|
91
|
-
if
|
92
|
-
|
105
|
+
if model is None:
|
106
|
+
model = MLP(
|
93
107
|
n_inputs=len(features.columns),
|
94
108
|
neurons_per_layer=[50, 50, len(targets.columns)],
|
95
109
|
)
|
96
|
-
self.
|
110
|
+
self.model = model.to(device)
|
97
111
|
|
98
|
-
self.optimizer =
|
112
|
+
self.optimizer = optimizer_cls(model.parameters())
|
99
113
|
self.device = device
|
100
114
|
self.loss_fn = loss_fn
|
101
115
|
self.losses = []
|
@@ -105,8 +119,8 @@ class TorchTrainer:
|
|
105
119
|
epochs: int,
|
106
120
|
batch_size: int | None = None,
|
107
121
|
) -> Self:
|
108
|
-
losses =
|
109
|
-
|
122
|
+
losses = _train(
|
123
|
+
model=self.model,
|
110
124
|
features=self.features.to_numpy(),
|
111
125
|
targets=self.targets.to_numpy(),
|
112
126
|
epochs=epochs,
|
@@ -127,10 +141,10 @@ class TorchTrainer:
|
|
127
141
|
self,
|
128
142
|
surrogate_args: list[str] | None = None,
|
129
143
|
surrogate_outputs: list[str] | None = None,
|
130
|
-
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
131
|
-
) ->
|
132
|
-
return
|
133
|
-
model=self.
|
144
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
145
|
+
) -> Surrogate:
|
146
|
+
return Surrogate(
|
147
|
+
model=self.model,
|
134
148
|
args=surrogate_args if surrogate_args is not None else [],
|
135
149
|
outputs=surrogate_outputs if surrogate_outputs is not None else [],
|
136
150
|
stoichiometries=surrogate_stoichiometries
|
@@ -139,19 +153,19 @@ class TorchTrainer:
|
|
139
153
|
)
|
140
154
|
|
141
155
|
|
142
|
-
def
|
156
|
+
def train(
|
143
157
|
features: pd.DataFrame,
|
144
158
|
targets: pd.DataFrame,
|
145
159
|
epochs: int,
|
146
160
|
surrogate_args: list[str] | None = None,
|
147
161
|
surrogate_outputs: list[str] | None = None,
|
148
|
-
surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
|
162
|
+
surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
|
149
163
|
batch_size: int | None = None,
|
150
|
-
|
151
|
-
|
164
|
+
model: nn.Module | None = None,
|
165
|
+
optimizer_cls: Callable[[ParamsT], Adam] = Adam,
|
152
166
|
device: torch.device = DefaultDevice,
|
153
167
|
loss_fn: LossFn = _mean_abs,
|
154
|
-
) -> tuple[
|
168
|
+
) -> tuple[Surrogate, pd.Series]:
|
155
169
|
"""Train a PyTorch surrogate model.
|
156
170
|
|
157
171
|
Examples:
|
@@ -173,8 +187,8 @@ def train_torch(
|
|
173
187
|
surrogate_outputs: Names of output arguments from the surrogate.
|
174
188
|
surrogate_stoichiometries: Mapping of variables to their stoichiometries
|
175
189
|
batch_size: Size of mini-batches for training (None for full-batch).
|
176
|
-
|
177
|
-
|
190
|
+
model: Predefined neural network model (None to use default MLP features-50-50-output).
|
191
|
+
optimizer_cls: Optimizer class to use for training (default: Adam).
|
178
192
|
device: Device to run the training on (default: DefaultDevice).
|
179
193
|
loss_fn: Custom loss function or instance of torch loss object
|
180
194
|
|
@@ -182,11 +196,11 @@ def train_torch(
|
|
182
196
|
tuple[TorchSurrogate, pd.Series]: Trained surrogate model and loss history.
|
183
197
|
|
184
198
|
"""
|
185
|
-
trainer =
|
199
|
+
trainer = Trainer(
|
186
200
|
features=features,
|
187
201
|
targets=targets,
|
188
|
-
|
189
|
-
|
202
|
+
model=model,
|
203
|
+
optimizer_cls=optimizer_cls,
|
190
204
|
device=device,
|
191
205
|
loss_fn=loss_fn,
|
192
206
|
).train(
|
mxlpy/symbolic/symbolic_model.py
CHANGED
@@ -70,6 +70,6 @@ def to_symbolic_model(model: Model) -> SymbolicModel:
|
|
70
70
|
variables=variables,
|
71
71
|
parameters=parameters,
|
72
72
|
eqs=[eqs[i] for i in cache.var_names],
|
73
|
-
initial_conditions=model.
|
74
|
-
parameter_values=model.parameters
|
73
|
+
initial_conditions=model.get_initial_conditions(),
|
74
|
+
parameter_values=model.parameters,
|
75
75
|
)
|
mxlpy/types.py
CHANGED
@@ -46,7 +46,7 @@ __all__ = [
|
|
46
46
|
"unwrap2",
|
47
47
|
]
|
48
48
|
|
49
|
-
from collections.abc import Callable, Iterator, Mapping
|
49
|
+
from collections.abc import Callable, Iterable, Iterator, Mapping
|
50
50
|
from typing import TYPE_CHECKING, Any, ParamSpec, Protocol, TypeVar, cast
|
51
51
|
|
52
52
|
import numpy as np
|
@@ -159,7 +159,7 @@ class Derived:
|
|
159
159
|
fn: RateFn
|
160
160
|
args: list[str]
|
161
161
|
|
162
|
-
def calculate(self, dependent: dict[str,
|
162
|
+
def calculate(self, dependent: dict[str, Any]) -> float:
|
163
163
|
"""Calculate the derived value.
|
164
164
|
|
165
165
|
Args:
|
@@ -171,7 +171,7 @@ class Derived:
|
|
171
171
|
"""
|
172
172
|
return cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
173
173
|
|
174
|
-
def calculate_inpl(self, name: str, dependent: dict[str,
|
174
|
+
def calculate_inpl(self, name: str, dependent: dict[str, Any]) -> None:
|
175
175
|
"""Calculate the derived value in place.
|
176
176
|
|
177
177
|
Args:
|
@@ -181,20 +181,6 @@ class Derived:
|
|
181
181
|
"""
|
182
182
|
dependent[name] = cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
183
183
|
|
184
|
-
def calculate_inpl_time_course(self, name: str, dependent: pd.DataFrame) -> None:
|
185
|
-
"""Calculate the derived value in place.
|
186
|
-
|
187
|
-
Args:
|
188
|
-
name: Name of the derived variable.
|
189
|
-
dependent: Dictionary of dependent variables.
|
190
|
-
|
191
|
-
"""
|
192
|
-
try:
|
193
|
-
dependent[name] = self.fn(*dependent.loc[:, self.args].to_numpy().T)
|
194
|
-
except ValueError: # e.g. numpy.where
|
195
|
-
sub = dependent.loc[:, self.args].to_numpy()
|
196
|
-
dependent[name] = [self.fn(*row) for row in sub]
|
197
|
-
|
198
184
|
|
199
185
|
@dataclass(kw_only=True, slots=True)
|
200
186
|
class Readout:
|
@@ -203,7 +189,7 @@ class Readout:
|
|
203
189
|
fn: RateFn
|
204
190
|
args: list[str]
|
205
191
|
|
206
|
-
def calculate(self, dependent: dict[str,
|
192
|
+
def calculate(self, dependent: dict[str, Any]) -> float:
|
207
193
|
"""Calculate the derived value.
|
208
194
|
|
209
195
|
Args:
|
@@ -215,8 +201,8 @@ class Readout:
|
|
215
201
|
"""
|
216
202
|
return cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
217
203
|
|
218
|
-
def calculate_inpl(self, name: str, dependent: dict[str,
|
219
|
-
"""Calculate the
|
204
|
+
def calculate_inpl(self, name: str, dependent: dict[str, Any]) -> None:
|
205
|
+
"""Calculate the reaction in place.
|
220
206
|
|
221
207
|
Args:
|
222
208
|
name: Name of the derived variable.
|
@@ -225,20 +211,6 @@ class Readout:
|
|
225
211
|
"""
|
226
212
|
dependent[name] = cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
227
213
|
|
228
|
-
def calculate_inpl_time_course(self, name: str, dependent: pd.DataFrame) -> None:
|
229
|
-
"""Calculate the derived value in place.
|
230
|
-
|
231
|
-
Args:
|
232
|
-
name: Name of the derived variable.
|
233
|
-
dependent: Dictionary of dependent variables.
|
234
|
-
|
235
|
-
"""
|
236
|
-
try:
|
237
|
-
dependent[name] = self.fn(*dependent.loc[:, self.args].to_numpy().T)
|
238
|
-
except ValueError: # e.g. numpy.where
|
239
|
-
sub = dependent.loc[:, self.args].to_numpy()
|
240
|
-
dependent[name] = [self.fn(*row) for row in sub]
|
241
|
-
|
242
214
|
|
243
215
|
@dataclass(kw_only=True, slots=True)
|
244
216
|
class Reaction:
|
@@ -255,7 +227,7 @@ class Reaction:
|
|
255
227
|
|
256
228
|
return [k for k in self.args if k in include and k not in exclude]
|
257
229
|
|
258
|
-
def calculate(self, dependent: dict[str,
|
230
|
+
def calculate(self, dependent: dict[str, Any]) -> float:
|
259
231
|
"""Calculate the derived value.
|
260
232
|
|
261
233
|
Args:
|
@@ -267,7 +239,7 @@ class Reaction:
|
|
267
239
|
"""
|
268
240
|
return cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
269
241
|
|
270
|
-
def calculate_inpl(self, name: str, dependent: dict[str,
|
242
|
+
def calculate_inpl(self, name: str, dependent: dict[str, Any]) -> None:
|
271
243
|
"""Calculate the reaction in place.
|
272
244
|
|
273
245
|
Args:
|
@@ -277,19 +249,57 @@ class Reaction:
|
|
277
249
|
"""
|
278
250
|
dependent[name] = cast(float, self.fn(*(dependent[arg] for arg in self.args)))
|
279
251
|
|
280
|
-
def calculate_inpl_time_course(self, name: str, dependent: pd.DataFrame) -> None:
|
281
|
-
"""Calculate the derived value in place.
|
282
252
|
|
283
|
-
|
284
|
-
|
285
|
-
|
253
|
+
@dataclass(kw_only=True)
|
254
|
+
class AbstractSurrogate:
|
255
|
+
"""Abstract base class for surrogate models.
|
286
256
|
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
257
|
+
Attributes:
|
258
|
+
inputs: List of input variable names.
|
259
|
+
stoichiometries: Dictionary mapping reaction names to stoichiometries.
|
260
|
+
|
261
|
+
Methods:
|
262
|
+
predict: Abstract method to predict outputs based on input data.
|
263
|
+
|
264
|
+
"""
|
265
|
+
|
266
|
+
args: list[str]
|
267
|
+
outputs: list[str]
|
268
|
+
stoichiometries: dict[str, dict[str, float | Derived]] = field(default_factory=dict)
|
269
|
+
|
270
|
+
@abstractmethod
|
271
|
+
def predict(
|
272
|
+
self, args: dict[str, float | pd.Series | pd.DataFrame]
|
273
|
+
) -> dict[str, float]:
|
274
|
+
"""Predict outputs based on input data."""
|
275
|
+
|
276
|
+
def calculate_inpl(
|
277
|
+
self,
|
278
|
+
name: str, # noqa: ARG002, for API compatibility
|
279
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
280
|
+
) -> None:
|
281
|
+
"""Predict outputs based on input data."""
|
282
|
+
args |= self.predict(args=args)
|
283
|
+
|
284
|
+
|
285
|
+
@dataclass(kw_only=True)
|
286
|
+
class MockSurrogate(AbstractSurrogate):
|
287
|
+
"""Mock surrogate model for testing purposes."""
|
288
|
+
|
289
|
+
fn: Callable[..., Iterable[float]]
|
290
|
+
|
291
|
+
def predict(
|
292
|
+
self,
|
293
|
+
args: dict[str, float | pd.Series | pd.DataFrame],
|
294
|
+
) -> dict[str, float]:
|
295
|
+
"""Predict outputs based on input data."""
|
296
|
+
return dict(
|
297
|
+
zip(
|
298
|
+
self.outputs,
|
299
|
+
self.fn(*(args[i] for i in self.args)),
|
300
|
+
strict=True,
|
301
|
+
)
|
302
|
+
) # type: ignore
|
293
303
|
|
294
304
|
|
295
305
|
@dataclass(kw_only=True, slots=True)
|
@@ -429,70 +439,6 @@ class ProtocolByPars:
|
|
429
439
|
return cast(pd.DataFrame, mean.unstack().T)
|
430
440
|
|
431
441
|
|
432
|
-
@dataclass(kw_only=True)
|
433
|
-
class AbstractSurrogate:
|
434
|
-
"""Abstract base class for surrogate models.
|
435
|
-
|
436
|
-
Attributes:
|
437
|
-
inputs: List of input variable names.
|
438
|
-
stoichiometries: Dictionary mapping reaction names to stoichiometries.
|
439
|
-
|
440
|
-
Methods:
|
441
|
-
predict: Abstract method to predict outputs based on input data.
|
442
|
-
|
443
|
-
"""
|
444
|
-
|
445
|
-
args: list[str]
|
446
|
-
outputs: list[str]
|
447
|
-
stoichiometries: dict[str, dict[str, float]] = field(default_factory=dict)
|
448
|
-
|
449
|
-
@abstractmethod
|
450
|
-
def predict_raw(self, y: np.ndarray) -> np.ndarray:
|
451
|
-
"""Predict outputs based on input data."""
|
452
|
-
|
453
|
-
def predict(self, y: np.ndarray) -> dict[str, float]:
|
454
|
-
"""Predict outputs based on input data."""
|
455
|
-
return dict(
|
456
|
-
zip(
|
457
|
-
self.outputs,
|
458
|
-
self.predict_raw(y),
|
459
|
-
strict=True,
|
460
|
-
)
|
461
|
-
)
|
462
|
-
|
463
|
-
def calculate_inpl(
|
464
|
-
self,
|
465
|
-
name: str, # noqa: ARG002, for API compatibility
|
466
|
-
args: dict[str, float],
|
467
|
-
) -> None:
|
468
|
-
"""Predict outputs based on input data."""
|
469
|
-
args |= self.predict(np.array([args[arg] for arg in self.args]))
|
470
|
-
|
471
|
-
def calculate_inpl_time_course(
|
472
|
-
self,
|
473
|
-
name: str, # noqa: ARG002, for API compatibility
|
474
|
-
args: pd.DataFrame,
|
475
|
-
) -> None:
|
476
|
-
"""Predict outputs based on input data."""
|
477
|
-
args[self.outputs] = pd.DataFrame(
|
478
|
-
[self.predict(y) for y in args.loc[:, self.args].to_numpy()],
|
479
|
-
index=args.index,
|
480
|
-
dtype=float,
|
481
|
-
)
|
482
|
-
|
483
|
-
|
484
|
-
@dataclass(kw_only=True)
|
485
|
-
class MockSurrogate(AbstractSurrogate):
|
486
|
-
"""Mock surrogate model for testing purposes."""
|
487
|
-
|
488
|
-
def predict(
|
489
|
-
self,
|
490
|
-
y: np.ndarray,
|
491
|
-
) -> dict[str, float]:
|
492
|
-
"""Predict outputs based on input data."""
|
493
|
-
return dict(zip(self.outputs, y, strict=True))
|
494
|
-
|
495
|
-
|
496
442
|
@dataclass(kw_only=True)
|
497
443
|
class AbstractEstimator:
|
498
444
|
"""Abstract class for parameter estimation using neural networks."""
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mxlpy
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.19.0
|
4
4
|
Summary: A package to build metabolic models
|
5
5
|
Author-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
|
6
6
|
Maintainer-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
|
@@ -24,6 +24,7 @@ Classifier: Topic :: Software Development
|
|
24
24
|
Requires-Python: >=3.12
|
25
25
|
Requires-Dist: dill>=0.3.9
|
26
26
|
Requires-Dist: latexify-py>=0.4.4
|
27
|
+
Requires-Dist: lazy-import>=0.2.2
|
27
28
|
Requires-Dist: matplotlib>=3.9.2
|
28
29
|
Requires-Dist: mike>=2.1.3
|
29
30
|
Requires-Dist: more-itertools>=10.5.0
|
@@ -41,20 +42,10 @@ Requires-Dist: tabulate>=0.9.0
|
|
41
42
|
Requires-Dist: toml>=0.10.2
|
42
43
|
Requires-Dist: tqdm>=4.66.6
|
43
44
|
Requires-Dist: typing-extensions>=4.12.2
|
44
|
-
Provides-Extra:
|
45
|
-
Requires-Dist:
|
46
|
-
|
47
|
-
Requires-Dist:
|
48
|
-
Requires-Dist: mkdocs-material>=9.5.42; extra == 'dev'
|
49
|
-
Requires-Dist: mkdocs>=1.6.1; extra == 'dev'
|
50
|
-
Requires-Dist: pre-commit>=4.0.1; extra == 'dev'
|
51
|
-
Requires-Dist: pyright>=1.1.387; extra == 'dev'
|
52
|
-
Requires-Dist: pytest-cov>=5.0.0; extra == 'dev'
|
53
|
-
Requires-Dist: pytest>=8.3.3; extra == 'dev'
|
54
|
-
Requires-Dist: requests>=2.32.3; extra == 'dev'
|
55
|
-
Requires-Dist: ruff>=0.7.1; extra == 'dev'
|
56
|
-
Requires-Dist: ssort>=0.13.0; extra == 'dev'
|
57
|
-
Requires-Dist: toml-sort<0.24,>=0.23.1; extra == 'dev'
|
45
|
+
Provides-Extra: keras
|
46
|
+
Requires-Dist: keras>=3.9.2; extra == 'keras'
|
47
|
+
Provides-Extra: tensorflow
|
48
|
+
Requires-Dist: tensorflow>=2.19.0; extra == 'tensorflow'
|
58
49
|
Provides-Extra: torch
|
59
50
|
Requires-Dist: torch>=2.5.1; extra == 'torch'
|
60
51
|
Description-Content-Type: text/markdown
|
@@ -78,14 +69,26 @@ Description-Content-Type: text/markdown
|
|
78
69
|
|
79
70
|
## Installation
|
80
71
|
|
81
|
-
You can install mxlpy using pip: `pip install mxlpy
|
72
|
+
You can install mxlpy using pip: `pip install mxlpy`.
|
73
|
+
|
74
|
+
Due to their sizes, the machine learning are optional dependencies. You cann install them using
|
75
|
+
|
76
|
+
```shell
|
77
|
+
# One of them respectively
|
78
|
+
pip install mxlpy[torch]
|
79
|
+
pip install mxlpy[tensorflow]
|
80
|
+
pip install mxlpy[keras]
|
81
|
+
|
82
|
+
# together
|
83
|
+
pip install mxlpy[torch, tensorflow, keras]
|
84
|
+
```
|
82
85
|
|
83
86
|
If you want access to the sundials solver suite via the [assimulo](https://jmodelica.org/assimulo/) package, we recommend setting up a virtual environment via [pixi](https://pixi.sh/) or [mamba / conda](https://mamba.readthedocs.io/en/latest/) using the [conda-forge](https://conda-forge.org/) channel.
|
84
87
|
|
85
88
|
```bash
|
86
89
|
pixi init
|
87
90
|
pixi add python assimulo
|
88
|
-
pixi add --pypi mxlpy
|
91
|
+
pixi add --pypi mxlpy
|
89
92
|
```
|
90
93
|
|
91
94
|
## How to cite
|
@@ -103,7 +106,7 @@ You have two choices here, using `uv` (pypi-only) or using `pixi` (conda-forge,
|
|
103
106
|
### uv
|
104
107
|
|
105
108
|
- Install `uv` as described in [the docs](https://docs.astral.sh/uv/getting-started/installation/).
|
106
|
-
- Run `uv sync --
|
109
|
+
- Run `uv sync --all-extras --all-groups` to install dependencies locally
|
107
110
|
|
108
111
|
### pixi
|
109
112
|
|
@@ -111,7 +114,3 @@ You have two choices here, using `uv` (pypi-only) or using `pixi` (conda-forge,
|
|
111
114
|
- Run `pixi install --frozen`
|
112
115
|
|
113
116
|
|
114
|
-
## Notes
|
115
|
-
|
116
|
-
- `uv add $package`
|
117
|
-
- `uv add --optional dev $package`
|
@@ -1,51 +1,54 @@
|
|
1
|
-
mxlpy/__init__.py,sha256=
|
1
|
+
mxlpy/__init__.py,sha256=ODndmfAX0zFTSqK1kVQN2s4W3FDqxuu5Mvf1pnv0IdE,4421
|
2
2
|
mxlpy/distributions.py,sha256=ce6RTqn19YzMMec-u09fSIUA8A92M6rehCuHuXWcX7A,8734
|
3
3
|
mxlpy/fit.py,sha256=WNg98wW47xkd4gNEgj3t8eNNTqfVpHEJTbXMRQBe22o,12457
|
4
4
|
mxlpy/fns.py,sha256=VxDDyEdtGD7fEoT5LiiEaRqFk-0fIunRXHr1dCMpCdE,14002
|
5
5
|
mxlpy/identify.py,sha256=veYYCjTDAlzibrWtciX2egfEWWgosOpqgLBgbfVj42g,2130
|
6
|
-
mxlpy/label_map.py,sha256=
|
6
|
+
mxlpy/label_map.py,sha256=_bStQJtQ4RlmTQyrW34W62pyXFF_XFVbTXv-ybdly0s,17816
|
7
7
|
mxlpy/linear_label_map.py,sha256=DqzN_akacPccZwzYAR3ANIdzAU_GU6Xe6gWV9DHAAWU,10282
|
8
8
|
mxlpy/mc.py,sha256=oYd8a3ycyZLyh-ZxTYUjDRNfsCcwSQaLWssxv0yC5Cc,17399
|
9
9
|
mxlpy/mca.py,sha256=1_qBX9lHI6svXSebtwvMldAMwPlLqMylAPmxMbMQdWw,9359
|
10
|
-
mxlpy/model.py,sha256=
|
10
|
+
mxlpy/model.py,sha256=ykdFmh7AejqN8qJUXddc9tFVhDWED4CZ8QgRyyCrmK0,60784
|
11
11
|
mxlpy/parallel.py,sha256=kX4Td5YoovDwZp6kX_3cfO6QtHSS9ieJ0bMZiKs3Xv8,5002
|
12
12
|
mxlpy/parameterise.py,sha256=2jMhhO-bHTFP_0kXercJekeATAZYBg5FrK1MQ_mWGpk,654
|
13
13
|
mxlpy/paths.py,sha256=TK2wO4N9lG-UV1JGfeB64q48JVDbwqIUj63rl55MKuQ,1022
|
14
|
-
mxlpy/plot.py,sha256=
|
14
|
+
mxlpy/plot.py,sha256=MiV2Oi5kvI04VjjWiiVbAzRCGaZWue58_hQIRGzS4ik,28879
|
15
15
|
mxlpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
16
16
|
mxlpy/report.py,sha256=ZwnjquPAvo4A8UqK-BT19SZFSEUOy1FALqoh7uTmbAI,7793
|
17
17
|
mxlpy/scan.py,sha256=FBPpjv66v4IWZ5OwG_EWUdrucLWR9gq_XEsLFC-otaw,18969
|
18
18
|
mxlpy/simulator.py,sha256=9Ne4P5Jrwgx4oAlljPvCqSCCy98_5Lv1B87y1AkbI4c,21041
|
19
|
-
mxlpy/types.py,sha256=
|
19
|
+
mxlpy/types.py,sha256=swhRTCRNbmI8-2Z3Y0YA9QkxrTxwzYJlMz2qFcfpRsA,12698
|
20
20
|
mxlpy/experimental/__init__.py,sha256=kZTE-92OErpHzNRqmgSQYH4CGXrogGJ5EL35XGZQ81M,206
|
21
|
-
mxlpy/experimental/diff.py,sha256=
|
21
|
+
mxlpy/experimental/diff.py,sha256=cxr3GkZOkhUaWDj1eXHbjKSVKciGIp_IUf7Jkh0FdEE,8968
|
22
22
|
mxlpy/integrators/__init__.py,sha256=kqmV6a0TRyLGR_XqbyAI652AfptYnXAUpqbSFg0CpP8,450
|
23
|
-
mxlpy/integrators/int_assimulo.py,sha256=
|
24
|
-
mxlpy/integrators/int_scipy.py,sha256=
|
23
|
+
mxlpy/integrators/int_assimulo.py,sha256=H092idSPeFkPKAOVbr8M8iGdDpfc-MlPeDvh3-tP4rY,5013
|
24
|
+
mxlpy/integrators/int_scipy.py,sha256=87nAVMY0DLQ_RAIDNa7pN6Xw8NFv5ifyZd8EmPT9rgY,4649
|
25
25
|
mxlpy/meta/__init__.py,sha256=Jyy4063fZy6iT4LSwjPyEAVr4N_3xxcLc8wDBoDPyKc,278
|
26
|
-
mxlpy/meta/codegen_latex.py,sha256=
|
26
|
+
mxlpy/meta/codegen_latex.py,sha256=OjDgfRRvibs9Bg75J_FSl9fh_MTs8Vqu4Wm9L2JTOCk,19982
|
27
27
|
mxlpy/meta/codegen_modebase.py,sha256=_ZAW4NvXhKwJQLGz5hkwwZpL2JMAJlfG-GUWkYIiNvw,3124
|
28
28
|
mxlpy/meta/codegen_py.py,sha256=xSdeuEGPGc-QKRMgJO4VSPGMlxCPEV5prkKjNQ2D2hg,3483
|
29
|
-
mxlpy/meta/source_tools.py,sha256=
|
30
|
-
mxlpy/nn/__init__.py,sha256=
|
31
|
-
mxlpy/nn/
|
32
|
-
mxlpy/nn/_torch.py,sha256=
|
33
|
-
mxlpy/npe/__init__.py,sha256=
|
34
|
-
mxlpy/npe/
|
29
|
+
mxlpy/meta/source_tools.py,sha256=GSSFgH2lZ24e6eQxJ-lx4WSwawoPYdIwj_klt5Kr0h8,13464
|
30
|
+
mxlpy/nn/__init__.py,sha256=Qjr-ERsY2lbD75sFBOhCUwEasQDSJKcpBn_kReLZ6oA,633
|
31
|
+
mxlpy/nn/_keras.py,sha256=wffBYvQDNGp5me6x2yW4EwpKsnMojCJbXHfKE156a-w,2175
|
32
|
+
mxlpy/nn/_torch.py,sha256=Omq7iMx2kbUXht2It-egiIYT2DzLGPbkpTCX-h17teI,5752
|
33
|
+
mxlpy/npe/__init__.py,sha256=hBHCUD2JYDBBGS2kTY8mTCfWB3u1R7m5l--wUupZt6o,1270
|
34
|
+
mxlpy/npe/_keras.py,sha256=ytvXMPK9KUCGOzTQm08_SgafiMb-MOIUdZQV7JjAO40,9721
|
35
|
+
mxlpy/npe/_torch.py,sha256=v3joh6lFJJxvYJj--wzmKXL9UMTaIN3h6hPNq0uX9NU,11250
|
35
36
|
mxlpy/sbml/__init__.py,sha256=AS7IwrBzBgN8coUZkyBEtiYa9ICWyY1wzp1ujVm5ItA,226
|
36
37
|
mxlpy/sbml/_data.py,sha256=XwT1sSxn6KLTXYMbk4ORbEAEgZhQDBfoyrjMBDAoY_s,1135
|
37
|
-
mxlpy/sbml/_export.py,sha256=
|
38
|
+
mxlpy/sbml/_export.py,sha256=4tU3SVxfEvl0E1urZWHyphkiAeH5HeRO1cODvvrczAQ,20342
|
38
39
|
mxlpy/sbml/_import.py,sha256=5odQBdpD93mQJp2bVIabmPo6NK60nxqrdSVB8fEsF_A,22099
|
39
40
|
mxlpy/sbml/_mathml.py,sha256=bNk9RQ_NQFDhY1R354p-gwqqHaIiyAwZ1xLPHHhiguQ,24436
|
40
41
|
mxlpy/sbml/_name_conversion.py,sha256=XK9DEyzhrD0GBBwwjK9RA0yORrDX5c-Uvx0VtKMR5rA,1325
|
41
42
|
mxlpy/sbml/_unit_conversion.py,sha256=dW_I6_Ou09ccwnp6LIdrPriIQnQUK5lJcjzM2Fawm6U,1927
|
42
|
-
mxlpy/surrogates/__init__.py,sha256=
|
43
|
-
mxlpy/surrogates/
|
44
|
-
mxlpy/surrogates/
|
43
|
+
mxlpy/surrogates/__init__.py,sha256=cz9qr0ToYSutIK45IvKrMe1mPP7Lj0I_V0HYGixfpZU,916
|
44
|
+
mxlpy/surrogates/_keras.py,sha256=y4nW626Nr4OQrTdZneDf-Ox2sKmqKmGmEbjpuQMEl10,3816
|
45
|
+
mxlpy/surrogates/_poly.py,sha256=qhwiWMQsQGq6qBEzagR-riM7Yp33FilsudMkGZ7mkEU,3598
|
46
|
+
mxlpy/surrogates/_qss.py,sha256=q-CoULIntdXclArm7eHGHlZpgBKQmJrZ0ZaG2Q3B_Pg,712
|
47
|
+
mxlpy/surrogates/_torch.py,sha256=lGKjLgHmUsD0iAbKzQUeKfN8C88ChQPg1pqidiYucWg,6315
|
45
48
|
mxlpy/symbolic/__init__.py,sha256=3hQjCMw8-6iOxeUdfnCg8449fF_BRF2u6lCM1GPpkRY,222
|
46
49
|
mxlpy/symbolic/strikepy.py,sha256=UMx2LMRwCkASKjdCYEvh9tKlW9dk3nDoWM9NNJXWL_8,19960
|
47
|
-
mxlpy/symbolic/symbolic_model.py,sha256=
|
48
|
-
mxlpy-0.
|
49
|
-
mxlpy-0.
|
50
|
-
mxlpy-0.
|
51
|
-
mxlpy-0.
|
50
|
+
mxlpy/symbolic/symbolic_model.py,sha256=lxkDW7L00Og_8Rwf7j0Gek0nQxRu7kyboioY4ZUqfx0,2435
|
51
|
+
mxlpy-0.19.0.dist-info/METADATA,sha256=YValpHti39pA32FnMfkI0zLgmD7yVG8M_WGWNytSrm8,4307
|
52
|
+
mxlpy-0.19.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
53
|
+
mxlpy-0.19.0.dist-info/licenses/LICENSE,sha256=lHX9Eu70g3Iv1aOxXTWNHa3vq9vaVYSPQx4jOLYmDpw,1096
|
54
|
+
mxlpy-0.19.0.dist-info/RECORD,,
|
mxlpy/nn/_tensorflow.py
DELETED
File without changes
|
File without changes
|
File without changes
|