mxlpy 0.18.0__py3-none-any.whl → 0.20.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. mxlpy/__init__.py +13 -9
  2. mxlpy/compare.py +240 -0
  3. mxlpy/experimental/diff.py +16 -4
  4. mxlpy/fit.py +6 -11
  5. mxlpy/fns.py +37 -42
  6. mxlpy/identify.py +10 -3
  7. mxlpy/integrators/__init__.py +4 -3
  8. mxlpy/integrators/int_assimulo.py +16 -9
  9. mxlpy/integrators/int_scipy.py +13 -9
  10. mxlpy/label_map.py +7 -3
  11. mxlpy/linear_label_map.py +4 -2
  12. mxlpy/mc.py +5 -14
  13. mxlpy/mca.py +4 -4
  14. mxlpy/meta/__init__.py +6 -4
  15. mxlpy/meta/codegen_latex.py +180 -87
  16. mxlpy/meta/codegen_modebase.py +3 -1
  17. mxlpy/meta/codegen_py.py +11 -3
  18. mxlpy/meta/source_tools.py +9 -5
  19. mxlpy/model.py +187 -100
  20. mxlpy/nn/__init__.py +24 -5
  21. mxlpy/nn/_keras.py +92 -0
  22. mxlpy/nn/_torch.py +25 -18
  23. mxlpy/npe/__init__.py +21 -16
  24. mxlpy/npe/_keras.py +326 -0
  25. mxlpy/npe/_torch.py +56 -60
  26. mxlpy/parallel.py +5 -2
  27. mxlpy/parameterise.py +11 -3
  28. mxlpy/plot.py +205 -52
  29. mxlpy/report.py +33 -8
  30. mxlpy/sbml/__init__.py +3 -3
  31. mxlpy/sbml/_data.py +7 -6
  32. mxlpy/sbml/_export.py +8 -1
  33. mxlpy/sbml/_mathml.py +8 -7
  34. mxlpy/sbml/_name_conversion.py +5 -1
  35. mxlpy/scan.py +14 -19
  36. mxlpy/simulator.py +34 -31
  37. mxlpy/surrogates/__init__.py +25 -17
  38. mxlpy/surrogates/_keras.py +139 -0
  39. mxlpy/surrogates/_poly.py +25 -10
  40. mxlpy/surrogates/_qss.py +34 -0
  41. mxlpy/surrogates/_torch.py +50 -32
  42. mxlpy/symbolic/__init__.py +5 -3
  43. mxlpy/symbolic/strikepy.py +5 -2
  44. mxlpy/symbolic/symbolic_model.py +14 -5
  45. mxlpy/types.py +61 -120
  46. {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/METADATA +25 -24
  47. mxlpy-0.20.0.dist-info/RECORD +55 -0
  48. mxlpy/nn/_tensorflow.py +0 -0
  49. mxlpy-0.18.0.dist-info/RECORD +0 -51
  50. {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/WHEEL +0 -0
  51. {mxlpy-0.18.0.dist-info → mxlpy-0.20.0.dist-info}/licenses/LICENSE +0 -0
mxlpy/simulator.py CHANGED
@@ -21,14 +21,17 @@ from sympy import lambdify
21
21
  from mxlpy.integrators import DefaultIntegrator
22
22
  from mxlpy.symbolic import to_symbolic_model
23
23
 
24
- __all__ = ["Result", "Simulator"]
25
-
26
24
  if TYPE_CHECKING:
27
25
  from collections.abc import Iterator
28
26
 
29
27
  from mxlpy.model import Model
30
28
  from mxlpy.types import Array, ArrayLike, IntegratorProtocol, IntegratorType
31
29
 
30
+ __all__ = [
31
+ "Result",
32
+ "Simulator",
33
+ ]
34
+
32
35
 
33
36
  def _normalise_split_results(
34
37
  results: list[pd.DataFrame],
@@ -500,6 +503,35 @@ class Simulator:
500
503
  self._handle_simulation_results(time, results, skipfirst=True)
501
504
  return self
502
505
 
506
+ def simulate_over_protocol(
507
+ self,
508
+ protocol: pd.DataFrame,
509
+ time_points_per_step: int = 10,
510
+ ) -> Self:
511
+ """Simulate the model over a given protocol.
512
+
513
+ Examples:
514
+ >>> Simulator(model).simulate_over_protocol(
515
+ ... protocol,
516
+ ... time_points_per_step=10
517
+ ... )
518
+
519
+ Args:
520
+ protocol: DataFrame containing the protocol.
521
+ time_points_per_step: Number of time points per step.
522
+
523
+ Returns:
524
+ The Simulator instance with updated results.
525
+
526
+ """
527
+ for t_end, pars in protocol.iterrows():
528
+ t_end = cast(pd.Timedelta, t_end)
529
+ self.model.update_parameters(pars.to_dict())
530
+ self.simulate(t_end.total_seconds(), steps=time_points_per_step)
531
+ if self.variables is None:
532
+ break
533
+ return self
534
+
503
535
  def simulate_to_steady_state(
504
536
  self,
505
537
  tolerance: float = 1e-6,
@@ -535,35 +567,6 @@ class Simulator:
535
567
  )
536
568
  return self
537
569
 
538
- def simulate_over_protocol(
539
- self,
540
- protocol: pd.DataFrame,
541
- time_points_per_step: int = 10,
542
- ) -> Self:
543
- """Simulate the model over a given protocol.
544
-
545
- Examples:
546
- >>> Simulator(model).simulate_over_protocol(
547
- ... protocol,
548
- ... time_points_per_step=10
549
- ... )
550
-
551
- Args:
552
- protocol: DataFrame containing the protocol.
553
- time_points_per_step: Number of time points per step.
554
-
555
- Returns:
556
- The Simulator instance with updated results.
557
-
558
- """
559
- for t_end, pars in protocol.iterrows():
560
- t_end = cast(pd.Timedelta, t_end)
561
- self.model.update_parameters(pars.to_dict())
562
- self.simulate(t_end.total_seconds(), steps=time_points_per_step)
563
- if self.variables is None:
564
- break
565
- return self
566
-
567
570
  def get_result(self) -> Result | None:
568
571
  """Get result of the simulation.
569
572
 
@@ -4,29 +4,37 @@ This module provides classes and functions for creating and training surrogate m
4
4
  for metabolic simulations. It includes functionality for both steady-state and time-series
5
5
  data using neural networks.
6
6
 
7
- Classes:
8
- AbstractSurrogate: Abstract base class for surrogate models.
9
- TorchSurrogate: Surrogate model using PyTorch.
10
- Approximator: Neural network approximator for surrogate modeling.
11
-
12
- Functions:
13
- train_torch_surrogate: Train a PyTorch surrogate model.
14
- train_torch_time_course_estimator: Train a PyTorch time course estimator.
15
7
  """
16
8
 
17
9
  from __future__ import annotations
18
10
 
19
- import contextlib
11
+ from typing import TYPE_CHECKING
20
12
 
21
- with contextlib.suppress(ImportError):
22
- from ._torch import Torch, TorchTrainer, train_torch
13
+ if TYPE_CHECKING:
14
+ import contextlib
23
15
 
24
- from ._poly import Polynomial, train_polynomial
16
+ with contextlib.suppress(ImportError):
17
+ from . import _keras as keras
18
+ from . import _torch as torch
19
+ else:
20
+ from lazy_import import lazy_module
21
+
22
+ keras = lazy_module(
23
+ "mxlpy.surrogates._keras",
24
+ error_strings={"module": "keras", "install_name": "mxlpy[tf]"},
25
+ )
26
+ torch = lazy_module(
27
+ "mxlpy.surrogates._torch",
28
+ error_strings={"module": "torch", "install_name": "mxlpy[torch]"},
29
+ )
30
+
31
+
32
+ from . import _poly as poly
33
+ from . import _qss as qss
25
34
 
26
35
  __all__ = [
27
- "Polynomial",
28
- "Torch",
29
- "TorchTrainer",
30
- "train_polynomial",
31
- "train_torch",
36
+ "keras",
37
+ "poly",
38
+ "qss",
39
+ "torch",
32
40
  ]
@@ -0,0 +1,139 @@
1
+ from __future__ import annotations
2
+
3
+ from dataclasses import dataclass
4
+ from typing import Self, cast
5
+
6
+ import keras
7
+ import numpy as np
8
+ import pandas as pd
9
+
10
+ from mxlpy.nn._keras import MLP
11
+ from mxlpy.nn._keras import train as _train
12
+ from mxlpy.types import AbstractSurrogate, Array, Derived
13
+
14
+ __all__ = [
15
+ "DefaultLoss",
16
+ "DefaultOptimizer",
17
+ "LossFn",
18
+ "Optimizer",
19
+ "Surrogate",
20
+ "Trainer",
21
+ "train",
22
+ ]
23
+
24
+ type Optimizer = keras.optimizers.Optimizer | str
25
+ type LossFn = keras.losses.Loss | str
26
+
27
+ DefaultOptimizer = keras.optimizers.Adam()
28
+ DefaultLoss = keras.losses.MeanAbsoluteError()
29
+
30
+
31
+ @dataclass(kw_only=True)
32
+ class Surrogate(AbstractSurrogate):
33
+ model: keras.Model
34
+
35
+ def predict_raw(self, y: Array) -> Array:
36
+ return np.atleast_1d(np.squeeze(self.model.predict(y)))
37
+
38
+ def predict(
39
+ self, args: dict[str, float | pd.Series | pd.DataFrame]
40
+ ) -> dict[str, float]:
41
+ return dict(
42
+ zip(
43
+ self.outputs,
44
+ self.predict_raw(np.array([args[arg] for arg in self.args])),
45
+ strict=True,
46
+ )
47
+ )
48
+
49
+
50
+ @dataclass(init=False)
51
+ class Trainer:
52
+ features: pd.DataFrame
53
+ targets: pd.DataFrame
54
+ model: keras.Model
55
+ optimizer: Optimizer | str
56
+ losses: list[pd.Series]
57
+ loss_fn: LossFn
58
+
59
+ def __init__(
60
+ self,
61
+ features: pd.DataFrame,
62
+ targets: pd.DataFrame,
63
+ model: keras.Model | None = None,
64
+ optimizer: Optimizer = DefaultOptimizer,
65
+ loss: LossFn = DefaultLoss,
66
+ ) -> None:
67
+ self.features = features
68
+ self.targets = targets
69
+ if model is None:
70
+ model = MLP(
71
+ n_inputs=len(features.columns),
72
+ neurons_per_layer=[50, 50, len(targets.columns)],
73
+ )
74
+ self.model = model
75
+ model.compile(optimizer=cast(str, optimizer), loss=loss)
76
+
77
+ self.losses = []
78
+
79
+ def train(self, epochs: int, batch_size: int | None = None) -> Self:
80
+ losses = _train(
81
+ model=self.model,
82
+ features=self.features,
83
+ targets=self.targets,
84
+ epochs=epochs,
85
+ batch_size=batch_size,
86
+ )
87
+
88
+ if len(self.losses) > 0:
89
+ losses.index += self.losses[-1].index[-1]
90
+ self.losses.append(losses)
91
+
92
+ return self
93
+
94
+ def get_loss(self) -> pd.Series:
95
+ return pd.concat(self.losses)
96
+
97
+ def get_surrogate(
98
+ self,
99
+ surrogate_args: list[str] | None = None,
100
+ surrogate_outputs: list[str] | None = None,
101
+ surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
102
+ ) -> Surrogate:
103
+ return Surrogate(
104
+ model=self.model,
105
+ args=surrogate_args if surrogate_args is not None else [],
106
+ outputs=surrogate_outputs if surrogate_outputs is not None else [],
107
+ stoichiometries=surrogate_stoichiometries
108
+ if surrogate_stoichiometries is not None
109
+ else {},
110
+ )
111
+
112
+
113
+ def train(
114
+ features: pd.DataFrame,
115
+ targets: pd.DataFrame,
116
+ epochs: int,
117
+ surrogate_args: list[str] | None = None,
118
+ surrogate_outputs: list[str] | None = None,
119
+ surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
120
+ batch_size: int | None = None,
121
+ model: keras.Model | None = None,
122
+ optimizer: Optimizer = DefaultOptimizer,
123
+ loss: LossFn = DefaultLoss,
124
+ ) -> tuple[Surrogate, pd.Series]:
125
+ trainer = Trainer(
126
+ features=features,
127
+ targets=targets,
128
+ model=model,
129
+ optimizer=optimizer,
130
+ loss=loss,
131
+ ).train(
132
+ epochs=epochs,
133
+ batch_size=batch_size,
134
+ )
135
+ return trainer.get_surrogate(
136
+ surrogate_args=surrogate_args,
137
+ surrogate_outputs=surrogate_outputs,
138
+ surrogate_stoichiometries=surrogate_stoichiometries,
139
+ ), trainer.get_loss()
mxlpy/surrogates/_poly.py CHANGED
@@ -1,17 +1,21 @@
1
- from collections.abc import Iterable
1
+ from __future__ import annotations
2
+
2
3
  from dataclasses import dataclass
3
- from typing import Literal
4
+ from typing import TYPE_CHECKING, Literal
4
5
 
5
6
  import numpy as np
6
7
  import pandas as pd
7
8
  from numpy import polynomial
8
9
 
9
- from mxlpy.types import AbstractSurrogate, ArrayLike
10
+ from mxlpy.types import AbstractSurrogate, ArrayLike, Derived
11
+
12
+ if TYPE_CHECKING:
13
+ from collections.abc import Iterable
10
14
 
11
15
  __all__ = [
12
- "Polynomial",
13
16
  "PolynomialExpansion",
14
- "train_polynomial",
17
+ "Surrogate",
18
+ "train",
15
19
  ]
16
20
 
17
21
  # define custom type
@@ -26,14 +30,25 @@ PolynomialExpansion = (
26
30
 
27
31
 
28
32
  @dataclass(kw_only=True)
29
- class Polynomial(AbstractSurrogate):
33
+ class Surrogate(AbstractSurrogate):
30
34
  model: PolynomialExpansion
31
35
 
32
36
  def predict_raw(self, y: np.ndarray) -> np.ndarray:
33
37
  return self.model(y)
34
38
 
39
+ def predict(
40
+ self, args: dict[str, float | pd.Series | pd.DataFrame]
41
+ ) -> dict[str, float]:
42
+ return dict(
43
+ zip(
44
+ self.outputs,
45
+ self.model(np.array([args[arg] for arg in self.args])),
46
+ strict=True,
47
+ )
48
+ )
49
+
35
50
 
36
- def train_polynomial(
51
+ def train(
37
52
  feature: ArrayLike | pd.Series,
38
53
  target: ArrayLike | pd.Series,
39
54
  series: Literal[
@@ -42,8 +57,8 @@ def train_polynomial(
42
57
  degrees: Iterable[int] = (1, 2, 3, 4, 5, 6, 7),
43
58
  surrogate_args: list[str] | None = None,
44
59
  surrogate_outputs: list[str] | None = None,
45
- surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
46
- ) -> tuple[Polynomial, pd.DataFrame]:
60
+ surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
61
+ ) -> tuple[Surrogate, pd.DataFrame]:
47
62
  """Train a surrogate model based on function series expansion.
48
63
 
49
64
  Args:
@@ -85,7 +100,7 @@ def train_polynomial(
85
100
  # Choose the model with the lowest AIC
86
101
  model = models[np.argmin(score)]
87
102
  return (
88
- Polynomial(
103
+ Surrogate(
89
104
  model=model,
90
105
  args=surrogate_args if surrogate_args is not None else [],
91
106
  outputs=surrogate_outputs if surrogate_outputs is not None else [],
@@ -0,0 +1,34 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Callable, Iterable
4
+ from dataclasses import dataclass
5
+ from typing import TYPE_CHECKING
6
+
7
+ from mxlpy.types import AbstractSurrogate, Array
8
+
9
+ if TYPE_CHECKING:
10
+ import pandas as pd
11
+
12
+ __all__ = [
13
+ "QSSFn",
14
+ "Surrogate",
15
+ ]
16
+
17
+ type QSSFn = Callable[..., Iterable[float] | Array]
18
+
19
+
20
+ @dataclass(kw_only=True)
21
+ class Surrogate(AbstractSurrogate):
22
+ model: QSSFn
23
+
24
+ def predict(
25
+ self,
26
+ args: dict[str, float | pd.Series | pd.DataFrame],
27
+ ) -> dict[str, float]:
28
+ return dict(
29
+ zip(
30
+ self.outputs,
31
+ self.model(*(args[arg] for arg in self.args)),
32
+ strict=True,
33
+ )
34
+ )
@@ -1,24 +1,29 @@
1
+ from __future__ import annotations
2
+
1
3
  from collections.abc import Callable
2
4
  from dataclasses import dataclass
3
- from typing import Self
5
+ from typing import TYPE_CHECKING, Self
4
6
 
5
7
  import numpy as np
6
8
  import pandas as pd
7
9
  import torch
8
10
  from torch import nn
9
11
  from torch.optim.adam import Adam
10
- from torch.optim.optimizer import ParamsT
11
12
 
12
- from mxlpy.nn._torch import MLP, DefaultDevice, train
13
- from mxlpy.types import AbstractSurrogate
13
+ from mxlpy.nn._torch import MLP, DefaultDevice
14
+ from mxlpy.nn._torch import train as _train
15
+ from mxlpy.types import AbstractSurrogate, Derived
16
+
17
+ if TYPE_CHECKING:
18
+ from torch.optim.optimizer import ParamsT
14
19
 
15
20
  type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
16
21
 
17
22
  __all__ = [
18
23
  "LossFn",
19
- "Torch",
20
- "TorchTrainer",
21
- "train_torch",
24
+ "Surrogate",
25
+ "Trainer",
26
+ "train",
22
27
  ]
23
28
 
24
29
 
@@ -37,7 +42,7 @@ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
37
42
 
38
43
 
39
44
  @dataclass(kw_only=True)
40
- class Torch(AbstractSurrogate):
45
+ class Surrogate(AbstractSurrogate):
41
46
  """Surrogate model using PyTorch.
42
47
 
43
48
  Attributes:
@@ -65,12 +70,25 @@ class Torch(AbstractSurrogate):
65
70
  torch.tensor(y, dtype=torch.float32),
66
71
  ).numpy()
67
72
 
73
+ def predict(
74
+ self,
75
+ args: dict[str, float | pd.Series | pd.DataFrame],
76
+ ) -> dict[str, float]:
77
+ """Predict outputs based on input data."""
78
+ return dict(
79
+ zip(
80
+ self.outputs,
81
+ self.predict_raw(np.array([args[arg] for arg in self.args])),
82
+ strict=True,
83
+ )
84
+ )
85
+
68
86
 
69
87
  @dataclass(init=False)
70
- class TorchTrainer:
88
+ class Trainer:
71
89
  features: pd.DataFrame
72
90
  targets: pd.DataFrame
73
- approximator: nn.Module
91
+ model: nn.Module
74
92
  optimizer: Adam
75
93
  device: torch.device
76
94
  losses: list[pd.Series]
@@ -80,22 +98,22 @@ class TorchTrainer:
80
98
  self,
81
99
  features: pd.DataFrame,
82
100
  targets: pd.DataFrame,
83
- approximator: nn.Module | None = None,
84
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
101
+ model: nn.Module | None = None,
102
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
85
103
  device: torch.device = DefaultDevice,
86
104
  loss_fn: LossFn = _mean_abs,
87
105
  ) -> None:
88
106
  self.features = features
89
107
  self.targets = targets
90
108
 
91
- if approximator is None:
92
- approximator = MLP(
109
+ if model is None:
110
+ model = MLP(
93
111
  n_inputs=len(features.columns),
94
112
  neurons_per_layer=[50, 50, len(targets.columns)],
95
113
  )
96
- self.approximator = approximator.to(device)
114
+ self.model = model.to(device)
97
115
 
98
- self.optimizer = optimimzer_cls(approximator.parameters())
116
+ self.optimizer = optimizer_cls(model.parameters())
99
117
  self.device = device
100
118
  self.loss_fn = loss_fn
101
119
  self.losses = []
@@ -105,8 +123,8 @@ class TorchTrainer:
105
123
  epochs: int,
106
124
  batch_size: int | None = None,
107
125
  ) -> Self:
108
- losses = train(
109
- aprox=self.approximator,
126
+ losses = _train(
127
+ model=self.model,
110
128
  features=self.features.to_numpy(),
111
129
  targets=self.targets.to_numpy(),
112
130
  epochs=epochs,
@@ -127,10 +145,10 @@ class TorchTrainer:
127
145
  self,
128
146
  surrogate_args: list[str] | None = None,
129
147
  surrogate_outputs: list[str] | None = None,
130
- surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
131
- ) -> Torch:
132
- return Torch(
133
- model=self.approximator,
148
+ surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
149
+ ) -> Surrogate:
150
+ return Surrogate(
151
+ model=self.model,
134
152
  args=surrogate_args if surrogate_args is not None else [],
135
153
  outputs=surrogate_outputs if surrogate_outputs is not None else [],
136
154
  stoichiometries=surrogate_stoichiometries
@@ -139,19 +157,19 @@ class TorchTrainer:
139
157
  )
140
158
 
141
159
 
142
- def train_torch(
160
+ def train(
143
161
  features: pd.DataFrame,
144
162
  targets: pd.DataFrame,
145
163
  epochs: int,
146
164
  surrogate_args: list[str] | None = None,
147
165
  surrogate_outputs: list[str] | None = None,
148
- surrogate_stoichiometries: dict[str, dict[str, float]] | None = None,
166
+ surrogate_stoichiometries: dict[str, dict[str, float | Derived]] | None = None,
149
167
  batch_size: int | None = None,
150
- approximator: nn.Module | None = None,
151
- optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
168
+ model: nn.Module | None = None,
169
+ optimizer_cls: Callable[[ParamsT], Adam] = Adam,
152
170
  device: torch.device = DefaultDevice,
153
171
  loss_fn: LossFn = _mean_abs,
154
- ) -> tuple[Torch, pd.Series]:
172
+ ) -> tuple[Surrogate, pd.Series]:
155
173
  """Train a PyTorch surrogate model.
156
174
 
157
175
  Examples:
@@ -173,8 +191,8 @@ def train_torch(
173
191
  surrogate_outputs: Names of output arguments from the surrogate.
174
192
  surrogate_stoichiometries: Mapping of variables to their stoichiometries
175
193
  batch_size: Size of mini-batches for training (None for full-batch).
176
- approximator: Predefined neural network model (None to use default MLP features-50-50-output).
177
- optimimzer_cls: Optimizer class to use for training (default: Adam).
194
+ model: Predefined neural network model (None to use default MLP features-50-50-output).
195
+ optimizer_cls: Optimizer class to use for training (default: Adam).
178
196
  device: Device to run the training on (default: DefaultDevice).
179
197
  loss_fn: Custom loss function or instance of torch loss object
180
198
 
@@ -182,11 +200,11 @@ def train_torch(
182
200
  tuple[TorchSurrogate, pd.Series]: Trained surrogate model and loss history.
183
201
 
184
202
  """
185
- trainer = TorchTrainer(
203
+ trainer = Trainer(
186
204
  features=features,
187
205
  targets=targets,
188
- approximator=approximator,
189
- optimimzer_cls=optimimzer_cls,
206
+ model=model,
207
+ optimizer_cls=optimizer_cls,
190
208
  device=device,
191
209
  loss_fn=loss_fn,
192
210
  ).train(
@@ -1,10 +1,12 @@
1
1
  """Symbolic utilities."""
2
2
 
3
+ from __future__ import annotations
4
+
5
+ from .strikepy import check_identifiability
6
+ from .symbolic_model import SymbolicModel, to_symbolic_model
7
+
3
8
  __all__ = [
4
9
  "SymbolicModel",
5
10
  "check_identifiability",
6
11
  "to_symbolic_model",
7
12
  ]
8
-
9
- from .strikepy import check_identifiability
10
- from .symbolic_model import SymbolicModel, to_symbolic_model
@@ -10,13 +10,15 @@ FIXME:
10
10
  - performance issues of generic_rank
11
11
  """
12
12
 
13
+ from __future__ import annotations
14
+
13
15
  import textwrap
14
16
  from concurrent.futures import ProcessPoolExecutor
15
17
  from dataclasses import dataclass, field
16
18
  from functools import partial
17
19
  from math import ceil, inf
18
20
  from time import time
19
- from typing import cast
21
+ from typing import TYPE_CHECKING, cast
20
22
 
21
23
  import numpy as np
22
24
  import numpy.typing as npt
@@ -27,7 +29,8 @@ import tqdm
27
29
  from sympy import Matrix
28
30
  from sympy.matrices import zeros
29
31
 
30
- from .symbolic_model import SymbolicModel
32
+ if TYPE_CHECKING:
33
+ from .symbolic_model import SymbolicModel
31
34
 
32
35
  __all__ = [
33
36
  "Options",
@@ -1,15 +1,24 @@
1
1
  # ruff: noqa: D100, D101, D102, D103, D104, D105, D106, D107, D200, D203, D400, D401
2
2
 
3
3
 
4
- from collections.abc import Iterable
4
+ from __future__ import annotations
5
+
5
6
  from dataclasses import dataclass
7
+ from typing import TYPE_CHECKING
6
8
 
7
9
  import sympy
8
10
 
9
11
  from mxlpy.meta.source_tools import fn_to_sympy
10
- from mxlpy.model import Model
11
12
 
12
- __all__ = ["SymbolicModel", "to_symbolic_model"]
13
+ if TYPE_CHECKING:
14
+ from collections.abc import Iterable
15
+
16
+ from mxlpy.model import Model
17
+
18
+ __all__ = [
19
+ "SymbolicModel",
20
+ "to_symbolic_model",
21
+ ]
13
22
 
14
23
 
15
24
  @dataclass
@@ -70,6 +79,6 @@ def to_symbolic_model(model: Model) -> SymbolicModel:
70
79
  variables=variables,
71
80
  parameters=parameters,
72
81
  eqs=[eqs[i] for i in cache.var_names],
73
- initial_conditions=model.variables.copy(),
74
- parameter_values=model.parameters.copy(),
82
+ initial_conditions=model.get_initial_conditions(),
83
+ parameter_values=model.parameters,
75
84
  )