mxlpy 0.16.0__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mxlpy/sbml/_import.py CHANGED
@@ -12,7 +12,7 @@ import libsbml
12
12
  import numpy as np # noqa: F401 # models might need it
13
13
  import sympy
14
14
 
15
- from mxlpy.model import Model, _sort_dependencies
15
+ from mxlpy.model import Dependency, Model, _sort_dependencies
16
16
  from mxlpy.paths import default_tmp_dir
17
17
  from mxlpy.sbml._data import (
18
18
  AtomicUnit,
@@ -522,7 +522,10 @@ def _codgen(name: str, sbml: Parser) -> Path:
522
522
  ^ set(variables)
523
523
  ^ set(sbml.derived)
524
524
  | {"time"},
525
- elements=[(k, set(v.args)) for k, v in sbml.initial_assignment.items()],
525
+ elements=[
526
+ Dependency(name=k, required=set(v.args), provided={k})
527
+ for k, v in sbml.initial_assignment.items()
528
+ ],
526
529
  )
527
530
 
528
531
  if len(initial_assignment_order) > 0:
@@ -19,13 +19,14 @@ from __future__ import annotations
19
19
  import contextlib
20
20
 
21
21
  with contextlib.suppress(ImportError):
22
- from ._torch import TorchSurrogate, train_torch_surrogate
22
+ from ._torch import Torch, TorchTrainer, train_torch
23
23
 
24
- from ._poly import PolySurrogate, train_polynomial_surrogate
24
+ from ._poly import Polynomial, train_polynomial
25
25
 
26
26
  __all__ = [
27
- "PolySurrogate",
28
- "TorchSurrogate",
29
- "train_polynomial_surrogate",
30
- "train_torch_surrogate",
27
+ "Polynomial",
28
+ "Torch",
29
+ "TorchTrainer",
30
+ "train_polynomial",
31
+ "train_torch",
31
32
  ]
mxlpy/surrogates/_poly.py CHANGED
@@ -9,9 +9,9 @@ from numpy import polynomial
9
9
  from mxlpy.types import AbstractSurrogate, ArrayLike
10
10
 
11
11
  __all__ = [
12
- "PolySurrogate",
12
+ "Polynomial",
13
13
  "PolynomialExpansion",
14
- "train_polynomial_surrogate",
14
+ "train_polynomial",
15
15
  ]
16
16
 
17
17
  # define custom type
@@ -26,23 +26,24 @@ PolynomialExpansion = (
26
26
 
27
27
 
28
28
  @dataclass(kw_only=True)
29
- class PolySurrogate(AbstractSurrogate):
29
+ class Polynomial(AbstractSurrogate):
30
30
  model: PolynomialExpansion
31
31
 
32
32
  def predict_raw(self, y: np.ndarray) -> np.ndarray:
33
33
  return self.model(y)
34
34
 
35
35
 
36
- def train_polynomial_surrogate(
37
- feature: ArrayLike,
38
- target: ArrayLike,
36
+ def train_polynomial(
37
+ feature: ArrayLike | pd.Series,
38
+ target: ArrayLike | pd.Series,
39
39
  series: Literal[
40
40
  "Power", "Chebyshev", "Legendre", "Laguerre", "Hermite", "HermiteE"
41
41
  ] = "Power",
42
42
  degrees: Iterable[int] = (1, 2, 3, 4, 5, 6, 7),
43
43
  surrogate_args: list[str] | None = None,
44
+ surrogate_outputs: list[str] | None = None,
44
45
  surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
45
- ) -> tuple[PolySurrogate, pd.DataFrame]:
46
+ ) -> tuple[Polynomial, pd.DataFrame]:
46
47
  """Train a surrogate model based on function series expansion.
47
48
 
48
49
  Args:
@@ -51,7 +52,8 @@ def train_polynomial_surrogate(
51
52
  series: Base functions for the surrogate model
52
53
  degrees: Degrees of the polynomial to fit to the data.
53
54
  surrogate_args: Additional arguments for the surrogate model.
54
- surrogate_stoichiometries: Stoichiometries for the surrogate model.
55
+ surrogate_outputs: Names of the surrogate model outputs.
56
+ surrogate_stoichiometries: Mapping of variables to their stoichiometries
55
57
 
56
58
  Returns:
57
59
  PolySurrogate: Polynomial surrogate model.
@@ -83,9 +85,10 @@ def train_polynomial_surrogate(
83
85
  # Choose the model with the lowest AIC
84
86
  model = models[np.argmin(score)]
85
87
  return (
86
- PolySurrogate(
88
+ Polynomial(
87
89
  model=model,
88
90
  args=surrogate_args if surrogate_args is not None else [],
91
+ outputs=surrogate_outputs if surrogate_outputs is not None else [],
89
92
  stoichiometries=surrogate_stoichiometries
90
93
  if surrogate_stoichiometries is not None
91
94
  else {},
@@ -1,5 +1,6 @@
1
1
  from collections.abc import Callable
2
2
  from dataclasses import dataclass
3
+ from typing import Self
3
4
 
4
5
  import numpy as np
5
6
  import pandas as pd
@@ -12,14 +13,32 @@ from torch.optim.optimizer import ParamsT
12
13
  from mxlpy.nn._torch import MLP, DefaultDevice
13
14
  from mxlpy.types import AbstractSurrogate
14
15
 
16
+ type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
17
+
15
18
  __all__ = [
16
- "TorchSurrogate",
17
- "train_torch_surrogate",
19
+ "LossFn",
20
+ "Torch",
21
+ "TorchTrainer",
22
+ "train_torch",
18
23
  ]
19
24
 
20
25
 
26
+ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
27
+ """Standard loss for surrogates.
28
+
29
+ Args:
30
+ x: Predictions of a model.
31
+ y: Targets.
32
+
33
+ Returns:
34
+ torch.Tensor: loss.
35
+
36
+ """
37
+ return torch.mean(torch.abs(x - y))
38
+
39
+
21
40
  @dataclass(kw_only=True)
22
- class TorchSurrogate(AbstractSurrogate):
41
+ class Torch(AbstractSurrogate):
23
42
  """Surrogate model using PyTorch.
24
43
 
25
44
  Attributes:
@@ -48,6 +67,91 @@ class TorchSurrogate(AbstractSurrogate):
48
67
  ).numpy()
49
68
 
50
69
 
70
+ @dataclass(init=False)
71
+ class TorchTrainer:
72
+ features: pd.DataFrame
73
+ targets: pd.DataFrame
74
+ approximator: nn.Module
75
+ optimizer: Adam
76
+ device: torch.device
77
+ losses: list[pd.Series]
78
+ loss_fn: LossFn
79
+
80
+ def __init__(
81
+ self,
82
+ features: pd.DataFrame,
83
+ targets: pd.DataFrame,
84
+ approximator: nn.Module | None = None,
85
+ optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
86
+ device: torch.device = DefaultDevice,
87
+ loss_fn: LossFn = _mean_abs,
88
+ ) -> None:
89
+ self.features = features
90
+ self.targets = targets
91
+
92
+ if approximator is None:
93
+ approximator = MLP(
94
+ n_inputs=len(features.columns),
95
+ neurons_per_layer=[50, 50, len(targets.columns)],
96
+ )
97
+ self.approximator = approximator.to(device)
98
+
99
+ self.optimizer = optimimzer_cls(approximator.parameters())
100
+ self.device = device
101
+ self.loss_fn = loss_fn
102
+ self.losses = []
103
+
104
+ def train(
105
+ self,
106
+ epochs: int,
107
+ batch_size: int | None = None,
108
+ ) -> Self:
109
+ if batch_size is None:
110
+ losses = _train_full(
111
+ aprox=self.approximator,
112
+ features=self.features,
113
+ targets=self.targets,
114
+ epochs=epochs,
115
+ optimizer=self.optimizer,
116
+ device=self.device,
117
+ loss_fn=self.loss_fn,
118
+ )
119
+ else:
120
+ losses = _train_batched(
121
+ aprox=self.approximator,
122
+ features=self.features,
123
+ targets=self.targets,
124
+ epochs=epochs,
125
+ optimizer=self.optimizer,
126
+ device=self.device,
127
+ batch_size=batch_size,
128
+ loss_fn=self.loss_fn,
129
+ )
130
+
131
+ if len(self.losses) > 0:
132
+ losses.index += self.losses[-1].index[-1]
133
+ self.losses.append(losses)
134
+ return self
135
+
136
+ def get_loss(self) -> pd.Series:
137
+ return pd.concat(self.losses)
138
+
139
+ def get_surrogate(
140
+ self,
141
+ surrogate_args: list[str] | None = None,
142
+ surrogate_outputs: list[str] | None = None,
143
+ surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
144
+ ) -> Torch:
145
+ return Torch(
146
+ model=self.approximator,
147
+ args=surrogate_args if surrogate_args is not None else [],
148
+ outputs=surrogate_outputs if surrogate_outputs is not None else [],
149
+ stoichiometries=surrogate_stoichiometries
150
+ if surrogate_stoichiometries is not None
151
+ else {},
152
+ )
153
+
154
+
51
155
  def _train_batched(
52
156
  aprox: nn.Module,
53
157
  features: pd.DataFrame,
@@ -56,6 +160,7 @@ def _train_batched(
56
160
  optimizer: Adam,
57
161
  device: torch.device,
58
162
  batch_size: int,
163
+ loss_fn: LossFn,
59
164
  ) -> pd.Series:
60
165
  """Train the neural network using mini-batch gradient descent.
61
166
 
@@ -67,6 +172,7 @@ def _train_batched(
67
172
  optimizer: Optimizer for training.
68
173
  device: torch device
69
174
  batch_size: Size of mini-batches for training.
175
+ loss_fn: Loss function
70
176
 
71
177
  Returns:
72
178
  pd.Series: Series containing the training loss history.
@@ -79,7 +185,7 @@ def _train_batched(
79
185
  X = torch.Tensor(features.iloc[idxs].to_numpy(), device=device)
80
186
  Y = torch.Tensor(targets.iloc[idxs].to_numpy(), device=device)
81
187
  optimizer.zero_grad()
82
- loss = torch.mean(torch.abs(aprox(X) - Y))
188
+ loss = loss_fn(aprox(X), Y)
83
189
  loss.backward()
84
190
  optimizer.step()
85
191
  losses[i] = loss.detach().numpy()
@@ -93,6 +199,7 @@ def _train_full(
93
199
  epochs: int,
94
200
  optimizer: Adam,
95
201
  device: torch.device,
202
+ loss_fn: Callable,
96
203
  ) -> pd.Series:
97
204
  """Train the neural network using full-batch gradient descent.
98
205
 
@@ -103,6 +210,7 @@ def _train_full(
103
210
  epochs: Number of training epochs.
104
211
  optimizer: Optimizer for training.
105
212
  device: Torch device
213
+ loss_fn: Loss function
106
214
 
107
215
  Returns:
108
216
  pd.Series: Series containing the training loss history.
@@ -114,24 +222,26 @@ def _train_full(
114
222
  losses = {}
115
223
  for i in tqdm.trange(epochs):
116
224
  optimizer.zero_grad()
117
- loss = torch.mean(torch.abs(aprox(X) - Y))
225
+ loss = loss_fn(aprox(X), Y)
118
226
  loss.backward()
119
227
  optimizer.step()
120
228
  losses[i] = loss.detach().numpy()
121
229
  return pd.Series(losses, dtype=float)
122
230
 
123
231
 
124
- def train_torch_surrogate(
232
+ def train_torch(
125
233
  features: pd.DataFrame,
126
234
  targets: pd.DataFrame,
127
235
  epochs: int,
128
236
  surrogate_args: list[str] | None = None,
237
+ surrogate_outputs: list[str] | None = None,
129
238
  surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
130
239
  batch_size: int | None = None,
131
240
  approximator: nn.Module | None = None,
132
241
  optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
133
242
  device: torch.device = DefaultDevice,
134
- ) -> tuple[TorchSurrogate, pd.Series]:
243
+ loss_fn: LossFn = _mean_abs,
244
+ ) -> tuple[Torch, pd.Series]:
135
245
  """Train a PyTorch surrogate model.
136
246
 
137
247
  Examples:
@@ -143,54 +253,38 @@ def train_torch_surrogate(
143
253
  ... surrogate_stoichiometries={
144
254
  ... "v1": {"x1": -1, "x2": 1, "ATP": -1},
145
255
  ... },
146
- ...)
256
+ ...)surrogate_stoichiometries
147
257
 
148
258
  Args:
149
259
  features: DataFrame containing the input features for training.
150
260
  targets: DataFrame containing the target values for training.
151
261
  epochs: Number of training epochs.
152
- surrogate_args: List of input variable names for the surrogate model.
153
- surrogate_stoichiometries: Dictionary mapping reaction names to stoichiometries.
262
+ surrogate_args: Names of inputs arguments for the surrogate model.
263
+ surrogate_outputs: Names of output arguments from the surrogate.
264
+ surrogate_stoichiometries: Mapping of variables to their stoichiometries
154
265
  batch_size: Size of mini-batches for training (None for full-batch).
155
266
  approximator: Predefined neural network model (None to use default MLP features-50-50-output).
156
267
  optimimzer_cls: Optimizer class to use for training (default: Adam).
157
268
  device: Device to run the training on (default: DefaultDevice).
269
+ loss_fn: Custom loss function or instance of torch loss object
158
270
 
159
271
  Returns:
160
272
  tuple[TorchSurrogate, pd.Series]: Trained surrogate model and loss history.
161
273
 
162
274
  """
163
- if approximator is None:
164
- approximator = MLP(
165
- n_inputs=len(features.columns),
166
- neurons_per_layer=[50, 50, len(targets.columns)],
167
- ).to(device)
168
-
169
- optimizer = optimimzer_cls(approximator.parameters())
170
- if batch_size is None:
171
- losses = _train_full(
172
- aprox=approximator,
173
- features=features,
174
- targets=targets,
175
- epochs=epochs,
176
- optimizer=optimizer,
177
- device=device,
178
- )
179
- else:
180
- losses = _train_batched(
181
- aprox=approximator,
182
- features=features,
183
- targets=targets,
184
- epochs=epochs,
185
- optimizer=optimizer,
186
- device=device,
187
- batch_size=batch_size,
188
- )
189
- surrogate = TorchSurrogate(
190
- model=approximator,
191
- args=surrogate_args if surrogate_args is not None else [],
192
- stoichiometries=surrogate_stoichiometries
193
- if surrogate_stoichiometries is not None
194
- else {},
275
+ trainer = TorchTrainer(
276
+ features=features,
277
+ targets=targets,
278
+ approximator=approximator,
279
+ optimimzer_cls=optimimzer_cls,
280
+ device=device,
281
+ loss_fn=loss_fn,
282
+ ).train(
283
+ epochs=epochs,
284
+ batch_size=batch_size,
195
285
  )
196
- return surrogate, losses
286
+ return trainer.get_surrogate(
287
+ surrogate_args=surrogate_args,
288
+ surrogate_outputs=surrogate_outputs,
289
+ surrogate_stoichiometries=surrogate_stoichiometries,
290
+ ), trainer.get_loss()
@@ -27,9 +27,7 @@ import tqdm
27
27
  from sympy import Matrix
28
28
  from sympy.matrices import zeros
29
29
 
30
- from mxlpy.model import Model
31
-
32
- from .symbolic_model import SymbolicModel, to_symbolic_model
30
+ from .symbolic_model import SymbolicModel
33
31
 
34
32
  __all__ = [
35
33
  "Options",
mxlpy/types.py CHANGED
@@ -22,6 +22,7 @@ from dataclasses import dataclass, field
22
22
  import pandas as pd
23
23
 
24
24
  __all__ = [
25
+ "AbstractEstimator",
25
26
  "AbstractSurrogate",
26
27
  "Array",
27
28
  "ArrayLike",
@@ -444,7 +445,8 @@ class AbstractSurrogate:
444
445
 
445
446
  """
446
447
 
447
- args: list[str] = field(default_factory=list)
448
+ args: list[str]
449
+ outputs: list[str]
448
450
  stoichiometries: dict[str, dict[str, float]] = field(default_factory=dict)
449
451
 
450
452
  @abstractmethod
@@ -455,7 +457,7 @@ class AbstractSurrogate:
455
457
  """Predict outputs based on input data."""
456
458
  return dict(
457
459
  zip(
458
- self.stoichiometries,
460
+ self.outputs,
459
461
  self.predict_raw(y),
460
462
  strict=True,
461
463
  )
@@ -475,7 +477,7 @@ class AbstractSurrogate:
475
477
  args: pd.DataFrame,
476
478
  ) -> None:
477
479
  """Predict outputs based on input data."""
478
- args[list(self.stoichiometries)] = pd.DataFrame(
480
+ args[self.outputs] = pd.DataFrame(
479
481
  [self.predict(y) for y in args.loc[:, self.args].to_numpy()],
480
482
  index=args.index,
481
483
  dtype=float,
@@ -491,4 +493,15 @@ class MockSurrogate(AbstractSurrogate):
491
493
  y: np.ndarray,
492
494
  ) -> dict[str, float]:
493
495
  """Predict outputs based on input data."""
494
- return dict(zip(self.stoichiometries, y, strict=True))
496
+ return dict(zip(self.outputs, y, strict=True))
497
+
498
+
499
+ @dataclass(kw_only=True)
500
+ class AbstractEstimator:
501
+ """Abstract class for parameter estimation using neural networks."""
502
+
503
+ parameter_names: list[str]
504
+
505
+ @abstractmethod
506
+ def predict(self, features: pd.Series | pd.DataFrame) -> pd.DataFrame:
507
+ """Predict the target values for the given features."""
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mxlpy
3
- Version: 0.16.0
3
+ Version: 0.17.0
4
4
  Summary: A package to build metabolic models
5
5
  Author-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
6
6
  Maintainer-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
@@ -47,7 +47,6 @@ Requires-Dist: jupyter>=1.1.1; extra == 'dev'
47
47
  Requires-Dist: mkdocs-jupyter>=0.25.1; extra == 'dev'
48
48
  Requires-Dist: mkdocs-material>=9.5.42; extra == 'dev'
49
49
  Requires-Dist: mkdocs>=1.6.1; extra == 'dev'
50
- Requires-Dist: mypy>=1.13.0; extra == 'dev'
51
50
  Requires-Dist: pre-commit>=4.0.1; extra == 'dev'
52
51
  Requires-Dist: pyright>=1.1.387; extra == 'dev'
53
52
  Requires-Dist: pytest-cov>=5.0.0; extra == 'dev'
@@ -1,50 +1,51 @@
1
- mxlpy/__init__.py,sha256=XZYNFyDC5rWcKi6139mq04cROI7LwJvxB2_3ApKwcvY,4194
1
+ mxlpy/__init__.py,sha256=lGo7XQTVuR1p8rW1J6gZsgdQWRqfYa9AWbvZQwT8oLQ,4236
2
2
  mxlpy/distributions.py,sha256=ce6RTqn19YzMMec-u09fSIUA8A92M6rehCuHuXWcX7A,8734
3
3
  mxlpy/fit.py,sha256=LwSoLfNVrqSlTtuUApwH36LjzGU0HLs4C_2qqTTjXFE,7859
4
- mxlpy/fns.py,sha256=ct_RFj9koW8vXHyr27GnbZUHUS_zfs4rDysybuFiOaU,4599
4
+ mxlpy/fns.py,sha256=VxDDyEdtGD7fEoT5LiiEaRqFk-0fIunRXHr1dCMpCdE,14002
5
5
  mxlpy/identify.py,sha256=af52SCG4nlY9sSw22goaIheuvXR09QYK4ksCT24QHWI,1946
6
6
  mxlpy/label_map.py,sha256=urv-QTb0MUEKjwWvKtJSB8H2kvhLn1EKfRIH7awQQ8Y,17769
7
7
  mxlpy/linear_label_map.py,sha256=DqzN_akacPccZwzYAR3ANIdzAU_GU6Xe6gWV9DHAAWU,10282
8
8
  mxlpy/mc.py,sha256=oYd8a3ycyZLyh-ZxTYUjDRNfsCcwSQaLWssxv0yC5Cc,17399
9
9
  mxlpy/mca.py,sha256=1_qBX9lHI6svXSebtwvMldAMwPlLqMylAPmxMbMQdWw,9359
10
- mxlpy/model.py,sha256=qzol8nDSbM3HdESh50c4UFjn6Pw5JwcvhQ5AyKnbyvc,57576
11
- mxlpy/npe.py,sha256=oiRLA43-qf-AcS2KpQfJIOt7-Ev9Aj5sF6TMq9bJn84,8747
10
+ mxlpy/model.py,sha256=H1rAKaB5pAQcMuBh5GnXuBReADTx5IDa1x0CdUZ6VlI,58411
12
11
  mxlpy/parallel.py,sha256=kX4Td5YoovDwZp6kX_3cfO6QtHSS9ieJ0bMZiKs3Xv8,5002
13
12
  mxlpy/parameterise.py,sha256=2jMhhO-bHTFP_0kXercJekeATAZYBg5FrK1MQ_mWGpk,654
14
13
  mxlpy/paths.py,sha256=TK2wO4N9lG-UV1JGfeB64q48JVDbwqIUj63rl55MKuQ,1022
15
14
  mxlpy/plot.py,sha256=4uu-6d8LH-GWX-sG_TlSpkSsnikv1DLTtnjJzA7nuRA,24670
16
15
  mxlpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
- mxlpy/report.py,sha256=h7dhcBzPFydLPxdsEXokzDf7Ce4PirXMsvLqlDZLSWM,7181
16
+ mxlpy/report.py,sha256=ZwnjquPAvo4A8UqK-BT19SZFSEUOy1FALqoh7uTmbAI,7793
18
17
  mxlpy/scan.py,sha256=FBPpjv66v4IWZ5OwG_EWUdrucLWR9gq_XEsLFC-otaw,18969
19
18
  mxlpy/simulator.py,sha256=9Ne4P5Jrwgx4oAlljPvCqSCCy98_5Lv1B87y1AkbI4c,21041
20
- mxlpy/types.py,sha256=GbdyzEDTN8QfUH6-XXdNgf_TzqIXaYvcZGxaXc5kVio,14509
19
+ mxlpy/types.py,sha256=fB8-oTJkIpkGP0affoVx1ak2zOuTpT6xH-w62oSJxiU,14814
21
20
  mxlpy/experimental/__init__.py,sha256=kZTE-92OErpHzNRqmgSQYH4CGXrogGJ5EL35XGZQ81M,206
22
21
  mxlpy/experimental/diff.py,sha256=4bztagJzFMsQJM7dlun_kv-WrWssM8CIw7gcL63hFf8,8952
23
22
  mxlpy/integrators/__init__.py,sha256=kqmV6a0TRyLGR_XqbyAI652AfptYnXAUpqbSFg0CpP8,450
24
23
  mxlpy/integrators/int_assimulo.py,sha256=TCBWQd558ZeRdBba1jCNsFyLBOssKvm8dXK36Aqg4_k,4817
25
24
  mxlpy/integrators/int_scipy.py,sha256=dFHlYTeb2zX97f3VuNdMJdI7WEYshF4JAIgprKKk2z4,4581
26
25
  mxlpy/meta/__init__.py,sha256=Jyy4063fZy6iT4LSwjPyEAVr4N_3xxcLc8wDBoDPyKc,278
27
- mxlpy/meta/codegen_latex.py,sha256=R0OJqzE7PnOCWLk52C3XWuRb-zI2eYTvV2oJZJvPsOE,13414
26
+ mxlpy/meta/codegen_latex.py,sha256=vONj--_wmFM_FJpe15aAYyT06-kolqQwSe2NEbKrQWo,19934
28
27
  mxlpy/meta/codegen_modebase.py,sha256=_ZAW4NvXhKwJQLGz5hkwwZpL2JMAJlfG-GUWkYIiNvw,3124
29
28
  mxlpy/meta/codegen_py.py,sha256=xSdeuEGPGc-QKRMgJO4VSPGMlxCPEV5prkKjNQ2D2hg,3483
30
- mxlpy/meta/source_tools.py,sha256=EN3OoGQaXeIsDTJvA7S15-xDBra3DCIyFZEJ6h0Fy0k,11125
29
+ mxlpy/meta/source_tools.py,sha256=IyiCLZ1KScSqADC9p_QSRgedoHGibs9U1RGJuXm827U,13464
31
30
  mxlpy/nn/__init__.py,sha256=yUc4o-iqfVVzkq9tZCstWwizrCqNlMft0YUwWGFFO-E,219
32
31
  mxlpy/nn/_tensorflow.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
33
32
  mxlpy/nn/_torch.py,sha256=_4Rw87zpIlCnrOsXC7iFp1c64_FcpfVmgBXBU0p8mlg,4063
33
+ mxlpy/npe/__init__.py,sha256=IQmqUPJc5A8QXJLzp6Dq6Sjm8Hh2KAYZLrMxXQVeQP8,1181
34
+ mxlpy/npe/_torch.py,sha256=pMU4PL3eO9Aqdn9waDUpvvDRdmUlmaOFtREwSkZbvNs,13874
34
35
  mxlpy/sbml/__init__.py,sha256=AS7IwrBzBgN8coUZkyBEtiYa9ICWyY1wzp1ujVm5ItA,226
35
36
  mxlpy/sbml/_data.py,sha256=XwT1sSxn6KLTXYMbk4ORbEAEgZhQDBfoyrjMBDAoY_s,1135
36
37
  mxlpy/sbml/_export.py,sha256=Q6B9rxy-yt73DORzAYu4BpfkZXxCS3MDSDUXwpoXV6Q,19970
37
- mxlpy/sbml/_import.py,sha256=5DJklsAe2sMV1CFxAbkSFRT3amPzOZmpo53y9NYv6TY,22015
38
+ mxlpy/sbml/_import.py,sha256=5odQBdpD93mQJp2bVIabmPo6NK60nxqrdSVB8fEsF_A,22099
38
39
  mxlpy/sbml/_mathml.py,sha256=bNk9RQ_NQFDhY1R354p-gwqqHaIiyAwZ1xLPHHhiguQ,24436
39
40
  mxlpy/sbml/_name_conversion.py,sha256=XK9DEyzhrD0GBBwwjK9RA0yORrDX5c-Uvx0VtKMR5rA,1325
40
41
  mxlpy/sbml/_unit_conversion.py,sha256=dW_I6_Ou09ccwnp6LIdrPriIQnQUK5lJcjzM2Fawm6U,1927
41
- mxlpy/surrogates/__init__.py,sha256=N_iXERECKvmrHiihwnyQEKOSBsmlGEuQhEotn-mWKdk,924
42
- mxlpy/surrogates/_poly.py,sha256=E54CFscQBCcYMrty1X2ynl9GlS9uoEeAUgBPnhm3iIA,3113
43
- mxlpy/surrogates/_torch.py,sha256=E_1eDUlPSVFwROkdMDCqYwwHE-61pjNMJWotnhjzge0,5891
42
+ mxlpy/surrogates/__init__.py,sha256=ofHPNwe0LAILP2ZUWivAQpOv9LyHHzLZc6iu1cV2LeQ,894
43
+ mxlpy/surrogates/_poly.py,sha256=n1pe4xuD2A4BK8jJagzZ-17WW3kqvFBO-ZYuznmfosw,3303
44
+ mxlpy/surrogates/_torch.py,sha256=Ep5e5oDyUsUdUpEqCY7WKLKKuwbPu0gcmVTiRabNzQ4,8593
44
45
  mxlpy/symbolic/__init__.py,sha256=3hQjCMw8-6iOxeUdfnCg8449fF_BRF2u6lCM1GPpkRY,222
45
- mxlpy/symbolic/strikepy.py,sha256=r6nRtckV1nxKq3i1bYYWZOkzwZ5XeKQuZM5ck44vUo0,20010
46
+ mxlpy/symbolic/strikepy.py,sha256=UMx2LMRwCkASKjdCYEvh9tKlW9dk3nDoWM9NNJXWL_8,19960
46
47
  mxlpy/symbolic/symbolic_model.py,sha256=YL9noEeP3_0DoKXwMPELtfmPuP6mgNcLIJgDRCkyB7A,2434
47
- mxlpy-0.16.0.dist-info/METADATA,sha256=ySMK4udu6wgaUpG7Wn0sa-XYUkaVz4u19C5PjMEM5p0,4551
48
- mxlpy-0.16.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
49
- mxlpy-0.16.0.dist-info/licenses/LICENSE,sha256=bEzjyjy1stQhfRDVaVHa3xV1x-V8emwdlbMvYO8Zo84,35073
50
- mxlpy-0.16.0.dist-info/RECORD,,
48
+ mxlpy-0.17.0.dist-info/METADATA,sha256=8fzqS2MFlBN-JkidtjpM3i5DyfooGggrRv3AylMRIVQ,4507
49
+ mxlpy-0.17.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
50
+ mxlpy-0.17.0.dist-info/licenses/LICENSE,sha256=bEzjyjy1stQhfRDVaVHa3xV1x-V8emwdlbMvYO8Zo84,35073
51
+ mxlpy-0.17.0.dist-info/RECORD,,