modelbase2 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
modelbase2/__init__.py CHANGED
@@ -53,6 +53,7 @@ __all__ = [
53
53
  "make_protocol",
54
54
  "mc",
55
55
  "mca",
56
+ "nnarchitectures",
56
57
  "plot",
57
58
  "sbml",
58
59
  "steady_state",
@@ -70,7 +71,17 @@ import pandas as pd
70
71
  if TYPE_CHECKING:
71
72
  from modelbase2.types import ArrayLike
72
73
 
73
- from . import distributions, experimental, fit, mc, mca, plot, sbml, surrogates
74
+ from . import (
75
+ distributions,
76
+ experimental,
77
+ fit,
78
+ mc,
79
+ mca,
80
+ nnarchitectures,
81
+ plot,
82
+ sbml,
83
+ surrogates,
84
+ )
74
85
  from .integrators import DefaultIntegrator, Scipy
75
86
  from .label_map import LabelMapper
76
87
  from .linear_label_map import LinearLabelMapper
@@ -7,6 +7,7 @@ Classes:
7
7
  Distribution (Protocol): Base protocol for all distribution classes
8
8
  Beta: Beta distribution for parameters bounded between 0 and 1
9
9
  Uniform: Uniform distribution for parameters with simple bounds
10
+ LogUniform: LogUniform distribution for parameters with simple bounds
10
11
  Normal: Normal (Gaussian) distribution for unbounded parameters
11
12
  LogNormal: Log-normal distribution for strictly positive parameters
12
13
  Skewnorm: Skewed normal distribution for asymmetric parameter distributions
@@ -35,6 +36,7 @@ __all__ = [
35
36
  "Distribution",
36
37
  "GaussianKde",
37
38
  "LogNormal",
39
+ "LogUniform",
38
40
  "Normal",
39
41
  "RNG",
40
42
  "Skewnorm",
@@ -121,6 +123,34 @@ class Uniform:
121
123
  return rng.uniform(self.lower_bound, self.upper_bound, num)
122
124
 
123
125
 
126
+ @dataclass
127
+ class LogUniform:
128
+ """LogUniform distribution for parameters with simple bounds.
129
+
130
+ Args:
131
+ lower_bound: Minimum value
132
+ upper_bound: Maximum value
133
+
134
+ """
135
+
136
+ lower_bound: float
137
+ upper_bound: float
138
+
139
+ def sample(self, num: int, rng: np.random.Generator | None = None) -> Array:
140
+ """Generate random samples from the loguniform distribution.
141
+
142
+ Args:
143
+ num: Number of samples to generate
144
+ rng: Random number generator
145
+
146
+ """
147
+ if rng is None:
148
+ rng = RNG
149
+ return stats.loguniform.rvs(
150
+ self.lower_bound, self.upper_bound, size=num, random_state=rng
151
+ )
152
+
153
+
124
154
  @dataclass
125
155
  class Normal:
126
156
  """Normal (Gaussian) distribution for unbounded parameters.
@@ -0,0 +1,128 @@
1
+ """Neural network architectures.
2
+
3
+ This module provides implementations of neural network architectures used for mechanistic learning.
4
+
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING, cast
10
+
11
+ import torch
12
+ from torch import nn
13
+
14
+ if TYPE_CHECKING:
15
+ from collections.abc import Callable
16
+
17
+ __all__ = ["DefaultDevice", "LSTMnn", "MLP"]
18
+
19
+ DefaultDevice = torch.device("cpu")
20
+
21
+
22
+ class MLP(nn.Module):
23
+ """Multilayer Perceptron (MLP) for surrogate modeling and neural posterior estimation.
24
+
25
+ Attributes:
26
+ net: Sequential neural network model.
27
+
28
+ Methods:
29
+ forward: Forward pass through the neural network.
30
+
31
+ """
32
+
33
+ def __init__(
34
+ self,
35
+ n_inputs: int,
36
+ layers: list[int],
37
+ activation: Callable | None = nn.ReLU(),
38
+ output_activation: Callable | None = None,
39
+ ) -> None:
40
+ """Initializes the MLP with the given number of inputs and list of (hidden) layers.
41
+
42
+ Args:
43
+ n_inputs (int): The number of input features.
44
+ n_outputs list(int): A list containing the number of neurons in hidden and output layer.
45
+ activation Callable | None (default nn.ReLU()): The activation function to be applied after each hidden layer
46
+ activation Callable | None (default None): The activation function to be applied after the final (output) layer
47
+
48
+ For instance, MLP(10, layers = [50, 50, 10]) initializes a neural network with the following architecture:
49
+ - Linear layer with `n_inputs` inputs and 50 outputs
50
+ - ReLU activation
51
+ - Linear layer with 50 inputs and 50 outputs
52
+ - ReLU activation
53
+ - Linear layer with 50 inputs and 10 outputs
54
+
55
+ The weights of the linear layers are initialized with a normal distribution
56
+ (mean=0, std=0.1) and the biases are initialized to 0.
57
+
58
+ """
59
+ super().__init__()
60
+ self.layers = layers
61
+ self.activation = activation
62
+ self.output_activation = output_activation
63
+
64
+ levels = []
65
+ previous_neurons = n_inputs
66
+
67
+ for idx, neurons in enumerate(self.layers):
68
+ if idx == (len(self.layers) - 1):
69
+ levels.append(nn.Linear(previous_neurons, neurons))
70
+
71
+ if self.output_activation:
72
+ levels.append(self.output_activation)
73
+
74
+ else:
75
+ levels.append(nn.Linear(previous_neurons, neurons))
76
+
77
+ if self.activation:
78
+ levels.append(self.activation)
79
+
80
+ previous_neurons = neurons
81
+
82
+ self.net = nn.Sequential(*levels)
83
+
84
+ for m in self.net.modules():
85
+ if isinstance(m, nn.Linear):
86
+ nn.init.normal_(m.weight, mean=0, std=0.1)
87
+ nn.init.constant_(m.bias, val=0)
88
+
89
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
90
+ """Forward pass through the neural network.
91
+
92
+ Args:
93
+ x: Input tensor.
94
+
95
+ Returns:
96
+ torch.Tensor: Output tensor.
97
+
98
+ """
99
+ return self.net(x)
100
+
101
+
102
+ class LSTMnn(nn.Module):
103
+ """Default LSTM neural network model for time-series approximation."""
104
+
105
+ def __init__(self, n_inputs: int, n_outputs: int, n_hidden: int) -> None:
106
+ """Initializes the neural network model.
107
+
108
+ Args:
109
+ n_inputs (int): Number of input features.
110
+ n_outputs (int): Number of output features.
111
+ n_hidden (int): Number of hidden units in the LSTM layer.
112
+
113
+ """
114
+ super().__init__()
115
+
116
+ self.n_hidden = n_hidden
117
+
118
+ self.lstm = nn.LSTM(n_inputs, n_hidden)
119
+ self.to_out = nn.Linear(n_hidden, n_outputs)
120
+
121
+ nn.init.normal_(self.to_out.weight, mean=0, std=0.1)
122
+ nn.init.constant_(self.to_out.bias, val=0)
123
+
124
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
125
+ """Forward pass through the neural network."""
126
+ # lstm_out, (hidden_state, cell_state)
127
+ _, (hn, _) = self.lstm(x)
128
+ return cast(torch.Tensor, self.to_out(hn[-1])) # Use last hidden state
modelbase2/npe.py CHANGED
@@ -4,10 +4,6 @@ This module provides classes and functions for training neural network models to
4
4
  parameters in metabolic models. It includes functionality for both steady-state and
5
5
  time-series data.
6
6
 
7
- Classes:
8
- DefaultSSAproximator: Default neural network model for steady-state approximation
9
- DefaultTimeSeriesApproximator: Default neural network model for time-series approximation
10
-
11
7
  Functions:
12
8
  train_torch_surrogate: Train a PyTorch surrogate model
13
9
  train_torch_time_course_estimator: Train a PyTorch time course estimator
@@ -18,9 +14,6 @@ from __future__ import annotations
18
14
  __all__ = [
19
15
  "AbstractEstimator",
20
16
  "DefaultCache",
21
- "DefaultDevice",
22
- "DefaultSSAproximator",
23
- "DefaultTimeSeriesApproximator",
24
17
  "TorchSSEstimator",
25
18
  "TorchTimeCourseEstimator",
26
19
  "train_torch_ss_estimator",
@@ -39,77 +32,12 @@ import tqdm
39
32
  from torch import nn
40
33
  from torch.optim.adam import Adam
41
34
 
35
+ from modelbase2.nnarchitectures import MLP, DefaultDevice, LSTMnn
42
36
  from modelbase2.parallel import Cache
43
37
 
44
- DefaultDevice = torch.device("cpu")
45
38
  DefaultCache = Cache(Path(".cache"))
46
39
 
47
40
 
48
- class DefaultSSAproximator(nn.Module):
49
- """Default neural network model for steady-state approximation."""
50
-
51
- def __init__(self, n_inputs: int, n_outputs: int, n_hidden: int = 50) -> None:
52
- """Initializes the neural network with the specified number of inputs and outputs.
53
-
54
- Args:
55
- n_inputs (int): The number of input features.
56
- n_outputs (int): The number of output features.
57
- n_hidden (int): The number of hidden units in the fully connected layers
58
-
59
- The network consists of three fully connected layers with ReLU activations in between.
60
- The weights of the linear layers are initialized with a normal distribution (mean=0, std=0.1),
61
- and the biases are initialized to zero.
62
-
63
- """
64
- super().__init__()
65
-
66
- self.net = nn.Sequential(
67
- nn.Linear(n_inputs, n_hidden),
68
- nn.ReLU(),
69
- nn.Linear(n_hidden, n_hidden),
70
- nn.ReLU(),
71
- nn.Linear(n_hidden, n_outputs),
72
- )
73
-
74
- for m in self.net.modules():
75
- if isinstance(m, nn.Linear):
76
- nn.init.normal_(m.weight, mean=0, std=0.1)
77
- nn.init.constant_(m.bias, val=0)
78
-
79
- def forward(self, x: torch.Tensor) -> torch.Tensor:
80
- """Forward pass through the neural network."""
81
- return cast(torch.Tensor, self.net(x))
82
-
83
-
84
- class DefaultTimeSeriesApproximator(nn.Module):
85
- """Default neural network model for time-series approximation."""
86
-
87
- def __init__(self, n_inputs: int, n_outputs: int, n_hidden: int) -> None:
88
- """Initializes the neural network model.
89
-
90
- Args:
91
- n_inputs (int): Number of input features.
92
- n_outputs (int): Number of output features.
93
- n_hidden (int): Number of hidden units in the LSTM layer.
94
-
95
- """
96
- super().__init__()
97
-
98
- self.n_hidden = n_hidden
99
-
100
- self.lstm = nn.LSTM(n_inputs, n_hidden)
101
- self.to_out = nn.Linear(n_hidden, n_outputs)
102
-
103
- nn.init.normal_(self.to_out.weight, mean=0, std=0.1)
104
- nn.init.constant_(self.to_out.bias, val=0)
105
-
106
- def forward(self, x: torch.Tensor) -> torch.Tensor:
107
- """Forward pass through the neural network."""
108
- # lstm_out, (hidden_state, cell_state)
109
- _, (hn, _) = self.lstm(x)
110
- return cast(torch.Tensor, self.to_out(hn[-1])) # Use last hidden state
111
-
112
-
113
41
  @dataclass(kw_only=True)
114
42
  class AbstractEstimator:
115
43
  """Abstract class for parameter estimation using neural networks."""
@@ -229,7 +157,7 @@ def train_torch_ss_estimator(
229
157
  targets: DataFrame containing the target values for training
230
158
  epochs: Number of training epochs
231
159
  batch_size: Size of mini-batches for training (None for full-batch)
232
- approximator: Predefined neural network model (None to use default)
160
+ approximator: Predefined neural network model (None to use default MLP)
233
161
  optimimzer_cls: Optimizer class to use for training (default: Adam)
234
162
  device: Device to run the training on (default: DefaultDevice)
235
163
 
@@ -238,10 +166,10 @@ def train_torch_ss_estimator(
238
166
 
239
167
  """
240
168
  if approximator is None:
241
- approximator = DefaultSSAproximator(
242
- n_inputs=len(features.columns),
243
- n_outputs=len(targets.columns),
244
- n_hidden=max(2 * len(features.columns) * len(targets.columns), 10),
169
+ n_hidden = max(2 * len(features.columns) * len(targets.columns), 10)
170
+ n_outputs = len(targets.columns)
171
+ approximator = MLP(
172
+ n_inputs=len(features.columns), layers=[n_hidden, n_hidden, n_outputs]
245
173
  ).to(device)
246
174
 
247
175
  features_ = torch.Tensor(features.to_numpy(), device=device)
@@ -295,7 +223,7 @@ def train_torch_time_course_estimator(
295
223
  targets: DataFrame containing the target values for training
296
224
  epochs: Number of training epochs
297
225
  batch_size: Size of mini-batches for training (None for full-batch)
298
- approximator: Predefined neural network model (None to use default)
226
+ approximator: Predefined neural network model (None to use default LSTM)
299
227
  optimimzer_cls: Optimizer class to use for training (default: Adam)
300
228
  device: Device to run the training on (default: DefaultDevice)
301
229
 
@@ -304,7 +232,7 @@ def train_torch_time_course_estimator(
304
232
 
305
233
  """
306
234
  if approximator is None:
307
- approximator = DefaultTimeSeriesApproximator(
235
+ approximator = LSTMnn(
308
236
  n_inputs=len(features.columns),
309
237
  n_outputs=len(targets.columns),
310
238
  n_hidden=1,
modelbase2/plot.py CHANGED
@@ -399,7 +399,10 @@ def lines(
399
399
  fig, ax = _default_fig_ax(ax=ax, grid=grid)
400
400
  ax.plot(x.index, x)
401
401
  _default_labels(ax, xlabel=x.index.name, ylabel=None)
402
- ax.legend(x.columns)
402
+ if isinstance(x, pd.Series):
403
+ ax.legend([str(x.name)])
404
+ else:
405
+ ax.legend(x.columns)
403
406
  return fig, ax
404
407
 
405
408
 
@@ -19,12 +19,11 @@ from __future__ import annotations
19
19
  import contextlib
20
20
 
21
21
  with contextlib.suppress(ImportError):
22
- from ._torch import Dense, TorchSurrogate, train_torch_surrogate
22
+ from ._torch import TorchSurrogate, train_torch_surrogate
23
23
 
24
24
  from ._poly import PolySurrogate, train_polynomial_surrogate
25
25
 
26
26
  __all__ = [
27
- "Dense",
28
27
  "PolySurrogate",
29
28
  "TorchSurrogate",
30
29
  "train_polynomial_surrogate",
@@ -3,16 +3,27 @@ from dataclasses import dataclass
3
3
 
4
4
  import numpy as np
5
5
  import pandas as pd
6
- from numpy.polynomial.polynomial import Polynomial
6
+ from numpy import polynomial
7
+ from typing import Union, Literal
7
8
 
8
9
  from modelbase2.types import AbstractSurrogate, ArrayLike
9
10
 
10
- __all__ = ["PolySurrogate", "train_polynomial_surrogate"]
11
+ __all__ = ["PolySurrogate", "PolynomialExpansion", "train_polynomial_surrogate"]
12
+
13
+ # define custom type
14
+ PolynomialExpansion = (
15
+ polynomial.polynomial.Polynomial
16
+ | polynomial.chebyshev.Chebyshev
17
+ | polynomial.legendre.Legendre
18
+ | polynomial.laguerre.Laguerre
19
+ | polynomial.hermite.Hermite
20
+ | polynomial.hermite_e.HermiteE
21
+ )
11
22
 
12
23
 
13
24
  @dataclass(kw_only=True)
14
25
  class PolySurrogate(AbstractSurrogate):
15
- model: Polynomial
26
+ model: PolynomialExpansion
16
27
 
17
28
  def predict_raw(self, y: np.ndarray) -> np.ndarray:
18
29
  return self.model(y)
@@ -21,15 +32,17 @@ class PolySurrogate(AbstractSurrogate):
21
32
  def train_polynomial_surrogate(
22
33
  feature: ArrayLike,
23
34
  target: ArrayLike,
35
+ series: Literal["Power", "Chebyshev", "Legendre", "Laguerre", "Hermite", "HermiteE"] = "Power",
24
36
  degrees: Iterable[int] = (1, 2, 3, 4, 5, 6, 7),
25
37
  surrogate_args: list[str] | None = None,
26
38
  surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
27
39
  ) -> tuple[PolySurrogate, pd.DataFrame]:
28
- """Train a polynomial surrogate model.
40
+ """Train a surrogate model based on function series expansion.
29
41
 
30
42
  Args:
31
43
  feature: Input data as a numpy array.
32
44
  target: Output data as a numpy array.
45
+ series: Base functions for the surrogate model
33
46
  degrees: Degrees of the polynomial to fit to the data.
34
47
  surrogate_args: Additional arguments for the surrogate model.
35
48
  surrogate_stoichiometries: Stoichiometries for the surrogate model.
@@ -41,7 +54,19 @@ def train_polynomial_surrogate(
41
54
  feature = np.array(feature, dtype=float)
42
55
  target = np.array(target, dtype=float)
43
56
 
44
- models = [Polynomial.fit(feature, target, degree) for degree in degrees]
57
+ # Choose numpy polynomial convenience classes
58
+ series_dictionary = {
59
+ "Power": polynomial.polynomial.Polynomial,
60
+ "Chebyshev": polynomial.chebyshev.Chebyshev,
61
+ "Legendre": polynomial.legendre.Legendre,
62
+ "Laguerre": polynomial.laguerre.Laguerre,
63
+ "Hermite": polynomial.hermite.Hermite,
64
+ "HermiteE": polynomial.hermite_e.HermiteE,
65
+ }
66
+
67
+ fn_series = series_dictionary[series]
68
+
69
+ models = [fn_series.fit(feature, target, degree) for degree in degrees]
45
70
  predictions = np.array([model(feature) for model in models], dtype=float)
46
71
  errors = np.sqrt(np.mean(np.square(predictions - target.reshape(1, -1)), axis=1))
47
72
  log_likelihood = -0.5 * np.sum(
@@ -8,10 +8,9 @@ from torch import nn
8
8
  from torch.optim.adam import Adam
9
9
 
10
10
  from modelbase2.types import AbstractSurrogate
11
+ from modelbase2.nnarchitectures import MLP, DefaultDevice
11
12
 
12
- __all__ = ["DefaultDevice", "Dense", "TorchSurrogate", "train_torch_surrogate"]
13
-
14
- DefaultDevice = torch.device("cpu")
13
+ __all__ = ["TorchSurrogate", "train_torch_surrogate"]
15
14
 
16
15
 
17
16
  @dataclass(kw_only=True)
@@ -44,63 +43,6 @@ class TorchSurrogate(AbstractSurrogate):
44
43
  ).numpy()
45
44
 
46
45
 
47
- class Dense(nn.Module):
48
- """Neural network approximator for surrogate modeling.
49
-
50
- Attributes:
51
- net: Sequential neural network model.
52
-
53
- Methods:
54
- forward: Forward pass through the neural network.
55
-
56
- """
57
-
58
- def __init__(self, n_inputs: int, n_outputs: int) -> None:
59
- """Initializes the surrogate model with the given number of inputs and outputs.
60
-
61
- Args:
62
- n_inputs (int): The number of input features.
63
- n_outputs (int): The number of output features.
64
-
65
- Initializes a neural network with the following architecture:
66
- - Linear layer with `n_inputs` inputs and 50 outputs
67
- - ReLU activation
68
- - Linear layer with 50 inputs and 50 outputs
69
- - ReLU activation
70
- - Linear layer with 50 inputs and `n_outputs` outputs
71
-
72
- The weights of the linear layers are initialized with a normal distribution
73
- (mean=0, std=0.1) and the biases are initialized to 0.
74
-
75
- """
76
- super().__init__()
77
-
78
- self.net = nn.Sequential(
79
- nn.Linear(n_inputs, 50),
80
- nn.ReLU(),
81
- nn.Linear(50, 50),
82
- nn.ReLU(),
83
- nn.Linear(50, n_outputs),
84
- )
85
-
86
- for m in self.net.modules():
87
- if isinstance(m, nn.Linear):
88
- nn.init.normal_(m.weight, mean=0, std=0.1)
89
- nn.init.constant_(m.bias, val=0)
90
-
91
- def forward(self, x: torch.Tensor) -> torch.Tensor:
92
- """Forward pass through the neural network.
93
-
94
- Args:
95
- x: Input tensor.
96
-
97
- Returns:
98
- torch.Tensor: Output tensor.
99
-
100
- """
101
- return self.net(x)
102
-
103
-
104
46
  def _train_batched(
105
47
  aprox: nn.Module,
106
48
  features: pd.DataFrame,
@@ -205,7 +147,7 @@ def train_torch_surrogate(
205
147
  surrogate_args: List of input variable names for the surrogate model.
206
148
  surrogate_stoichiometries: Dictionary mapping reaction names to stoichiometries.
207
149
  batch_size: Size of mini-batches for training (None for full-batch).
208
- approximator: Predefined neural network model (None to use default).
150
+ approximator: Predefined neural network model (None to use default MLP features-50-50-output).
209
151
  optimimzer_cls: Optimizer class to use for training (default: Adam).
210
152
  device: Device to run the training on (default: DefaultDevice).
211
153
 
@@ -214,9 +156,9 @@ def train_torch_surrogate(
214
156
 
215
157
  """
216
158
  if approximator is None:
217
- approximator = Dense(
159
+ approximator = MLP(
218
160
  n_inputs=len(features.columns),
219
- n_outputs=len(targets.columns),
161
+ layers=[50, 50, len(targets.columns)],
220
162
  ).to(device)
221
163
 
222
164
  optimizer = optimimzer_cls(approximator.parameters())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: modelbase2
3
- Version: 0.2.0
3
+ Version: 0.3.0
4
4
  Summary: A package to build metabolic models
5
5
  Author-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
6
6
  Maintainer-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
@@ -59,6 +59,18 @@ Description-Content-Type: text/markdown
59
59
 
60
60
  # modelbase
61
61
 
62
+ ## Installation
63
+
64
+ You can install modelbase using pip: `pip install modelbase2`
65
+
66
+ If you want access to the sundials solver suite via the [assimulo](https://jmodelica.org/assimulo/) package, we recommend setting up a virtual environment via [pixi](https://pixi.sh/) or [mamba / conda](https://mamba.readthedocs.io/en/latest/) using the [conda-forge](https://conda-forge.org/) channel.
67
+
68
+ ```bash
69
+ pixi init
70
+ pixi add python assimulo
71
+ pixi add --pypi modelbase2
72
+ ```
73
+
62
74
 
63
75
  ## Development setup
64
76
 
@@ -1,5 +1,5 @@
1
- modelbase2/__init__.py,sha256=mGA_ckCmdG8Bi1i_eiES7JShez_4qwobwRVYaMBe27E,3967
2
- modelbase2/distributions.py,sha256=sbqmkw3PTPK0vWOcIFqP3WXH_2Q3i-_pl_NmvAYCngM,7927
1
+ modelbase2/__init__.py,sha256=ArYJZoCTulkjFctJzxk7c9CDBXYRl2J9_LXr1EORilk,4048
2
+ modelbase2/distributions.py,sha256=biNi8bUdWNxtUWFF4A1HaPcaDYtjdi-FkBF0OmATD3c,8688
3
3
  modelbase2/fit.py,sha256=kCuwsuUs9DSRyeQXmP6RSclGGMVL1Q7XD3KLhu073yo,8010
4
4
  modelbase2/fns.py,sha256=8JtIzPk3DAnNHz3LoJ1ukLFTjPNO1rNCeZ7VnRmJY2o,4503
5
5
  modelbase2/label_map.py,sha256=LUwcOHQWiGfBGV5XUmPM_SOwM9IyDVcQVJ11DPfVpAo,17774
@@ -7,11 +7,12 @@ modelbase2/linear_label_map.py,sha256=gA8CHxcehgtI6ovwZ9qNUPDvxfqbO1J1kBC_mltD4T
7
7
  modelbase2/mc.py,sha256=zlDL7e_udpIMRhSjfFJo5AwkigD0B_3H2rQxyelBuzI,16285
8
8
  modelbase2/mca.py,sha256=nMS2VnzR2VEujCFUaj9WL82CNd-oxTb3jCHP8IlJvxA,8845
9
9
  modelbase2/model.py,sha256=SJloqNi8C5oSrIyknMkXfPFDBHPW-ybrT_F9nsbREzQ,53854
10
- modelbase2/npe.py,sha256=fSnEyXvsS1h3S7rIbPtRW2oeJc917yt73rSw75Het3o,11199
10
+ modelbase2/nnarchitectures.py,sha256=OA1X4UHrn7gsLuuqxK6Dhv5aiKnQflhHezYCUV-NuO8,4012
11
+ modelbase2/npe.py,sha256=PJO5OiUfaeklkk9HnQ3-uJ1GLsZjE_k5MDA2tM-xhV0,8591
11
12
  modelbase2/parallel.py,sha256=kX4Td5YoovDwZp6kX_3cfO6QtHSS9ieJ0bMZiKs3Xv8,5002
12
13
  modelbase2/parameterise.py,sha256=7VrYxrQv0visraqUthWSnWfx-cxh2evlXbszIY5031U,690
13
14
  modelbase2/paths.py,sha256=uatKXDa79uniUB2Z3dr8eBJVuUPXDI-o_bf-DqPKq1Y,1039
14
- modelbase2/plot.py,sha256=tsQRUeKFKpthWOw8JqKhqcxBTBaMscJlemJzNpENAnc,22640
15
+ modelbase2/plot.py,sha256=txKF6Xnyh2JPJ06Wu803Wn7_VijMMJ1Kbq4WQB-xKE8,22720
15
16
  modelbase2/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
17
  modelbase2/scan.py,sha256=PvWZA4EgNS0JVMvm87bF72hSmhvo6O2KGg4vRzxve_8,18104
17
18
  modelbase2/scope.py,sha256=4twnEh8LrTmlLE-uRvubVkE3SSWejlLvtBzTCPqG3Aw,3710
@@ -33,10 +34,10 @@ modelbase2/sbml/_import.py,sha256=uT5JpFvCKjQNBFmGPba61xYShHmjzjczqnaYflilSMI,21
33
34
  modelbase2/sbml/_mathml.py,sha256=bNk9RQ_NQFDhY1R354p-gwqqHaIiyAwZ1xLPHHhiguQ,24436
34
35
  modelbase2/sbml/_name_conversion.py,sha256=XK9DEyzhrD0GBBwwjK9RA0yORrDX5c-Uvx0VtKMR5rA,1325
35
36
  modelbase2/sbml/_unit_conversion.py,sha256=dW_I6_Ou09ccwnp6LIdrPriIQnQUK5lJcjzM2Fawm6U,1927
36
- modelbase2/surrogates/__init__.py,sha256=0OH8BmdB5toHo7sXcZHZVASx3EEnpzcc7TeTP2tF5Ek,944
37
- modelbase2/surrogates/_poly.py,sha256=zKeoj3FchMAIgbnq99VkH_RfLsdy6N5lZaV0AeG9JKs,2185
38
- modelbase2/surrogates/_torch.py,sha256=7XHES1NyVmGTc8qmcScsfroai6rJVeV4eAGU2-oqH9A,7402
39
- modelbase2-0.2.0.dist-info/METADATA,sha256=f3af5XY5IQ0GkvPWYqUcnFJ79EKxxDjufw8Z2yCwk3o,2853
40
- modelbase2-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
41
- modelbase2-0.2.0.dist-info/licenses/LICENSE,sha256=qvG2VolmSkrcocL34V1ieOx-Rn-fpVcUbb25gHzVgZw,35079
42
- modelbase2-0.2.0.dist-info/RECORD,,
37
+ modelbase2/surrogates/__init__.py,sha256=N_iXERECKvmrHiihwnyQEKOSBsmlGEuQhEotn-mWKdk,924
38
+ modelbase2/surrogates/_poly.py,sha256=IRVpuTg5fN8QFQfTdJWpKYoBDhhY8x3BwHWz8fofY3A,3096
39
+ modelbase2/surrogates/_torch.py,sha256=b8kJJjTPLZLgP81ezVo-J2HmaPjyVhTQzDVkzBfkmAQ,5791
40
+ modelbase2-0.3.0.dist-info/METADATA,sha256=EsTsuziNBP-fjSMujjRuHss6XwvxvjDskFkgQhZQeFg,3312
41
+ modelbase2-0.3.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
42
+ modelbase2-0.3.0.dist-info/licenses/LICENSE,sha256=qvG2VolmSkrcocL34V1ieOx-Rn-fpVcUbb25gHzVgZw,35079
43
+ modelbase2-0.3.0.dist-info/RECORD,,