modelbase2 0.1.79__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. modelbase2/__init__.py +148 -25
  2. modelbase2/distributions.py +336 -0
  3. modelbase2/experimental/__init__.py +17 -0
  4. modelbase2/experimental/codegen.py +239 -0
  5. modelbase2/experimental/diff.py +227 -0
  6. modelbase2/experimental/notes.md +4 -0
  7. modelbase2/experimental/tex.py +521 -0
  8. modelbase2/fit.py +284 -0
  9. modelbase2/fns.py +185 -0
  10. modelbase2/integrators/__init__.py +19 -0
  11. modelbase2/integrators/int_assimulo.py +146 -0
  12. modelbase2/integrators/int_scipy.py +147 -0
  13. modelbase2/label_map.py +610 -0
  14. modelbase2/linear_label_map.py +301 -0
  15. modelbase2/mc.py +548 -0
  16. modelbase2/mca.py +280 -0
  17. modelbase2/model.py +1621 -0
  18. modelbase2/nnarchitectures.py +128 -0
  19. modelbase2/npe.py +271 -0
  20. modelbase2/parallel.py +171 -0
  21. modelbase2/parameterise.py +28 -0
  22. modelbase2/paths.py +36 -0
  23. modelbase2/plot.py +832 -0
  24. modelbase2/sbml/__init__.py +14 -0
  25. modelbase2/sbml/_data.py +77 -0
  26. modelbase2/sbml/_export.py +656 -0
  27. modelbase2/sbml/_import.py +585 -0
  28. modelbase2/sbml/_mathml.py +691 -0
  29. modelbase2/sbml/_name_conversion.py +52 -0
  30. modelbase2/sbml/_unit_conversion.py +74 -0
  31. modelbase2/scan.py +616 -0
  32. modelbase2/scope.py +96 -0
  33. modelbase2/simulator.py +635 -0
  34. modelbase2/surrogates/__init__.py +31 -0
  35. modelbase2/surrogates/_poly.py +91 -0
  36. modelbase2/surrogates/_torch.py +191 -0
  37. modelbase2/surrogates.py +316 -0
  38. modelbase2/types.py +352 -11
  39. modelbase2-0.3.0.dist-info/METADATA +93 -0
  40. modelbase2-0.3.0.dist-info/RECORD +43 -0
  41. {modelbase2-0.1.79.dist-info → modelbase2-0.3.0.dist-info}/WHEEL +1 -1
  42. modelbase2/core/__init__.py +0 -29
  43. modelbase2/core/algebraic_module_container.py +0 -130
  44. modelbase2/core/constant_container.py +0 -113
  45. modelbase2/core/data.py +0 -109
  46. modelbase2/core/name_container.py +0 -29
  47. modelbase2/core/reaction_container.py +0 -115
  48. modelbase2/core/utils.py +0 -28
  49. modelbase2/core/variable_container.py +0 -24
  50. modelbase2/ode/__init__.py +0 -13
  51. modelbase2/ode/integrator.py +0 -80
  52. modelbase2/ode/mca.py +0 -270
  53. modelbase2/ode/model.py +0 -470
  54. modelbase2/ode/simulator.py +0 -153
  55. modelbase2/utils/__init__.py +0 -0
  56. modelbase2/utils/plotting.py +0 -372
  57. modelbase2-0.1.79.dist-info/METADATA +0 -44
  58. modelbase2-0.1.79.dist-info/RECORD +0 -22
  59. {modelbase2-0.1.79.dist-info → modelbase2-0.3.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,635 @@
1
+ """Simulation Module.
2
+
3
+ This module provides classes and functions for simulating metabolic models.
4
+ It includes functionality for running simulations, normalizing results, and
5
+ retrieving simulation data.
6
+
7
+ Classes:
8
+ Simulator: Class for running simulations on a metabolic model.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from dataclasses import dataclass
14
+ from typing import TYPE_CHECKING, Literal, Self, cast, overload
15
+
16
+ import numpy as np
17
+ import pandas as pd
18
+
19
+ from modelbase2.integrators import DefaultIntegrator
20
+
21
+ __all__ = ["Simulator"]
22
+
23
+ if TYPE_CHECKING:
24
+ from modelbase2.model import Model
25
+ from modelbase2.types import ArrayLike, IntegratorProtocol
26
+
27
+
28
+ def _normalise_split_results(
29
+ results: list[pd.DataFrame],
30
+ normalise: float | ArrayLike,
31
+ ) -> list[pd.DataFrame]:
32
+ """Normalize split results by a given factor or array.
33
+
34
+ Args:
35
+ results: List of DataFrames containing the results to normalize.
36
+ normalise: Normalization factor or array.
37
+
38
+ Returns:
39
+ list[pd.DataFrame]: List of normalized DataFrames.
40
+
41
+ """
42
+ if isinstance(normalise, int | float):
43
+ return [i / normalise for i in results]
44
+ if len(normalise) == len(results):
45
+ return [(i.T / j).T for i, j in zip(results, normalise, strict=True)]
46
+
47
+ results = []
48
+ start = 0
49
+ end = 0
50
+ for i in results:
51
+ end += len(i)
52
+ results.append(i / np.reshape(normalise[start:end], (len(i), 1))) # type: ignore
53
+ start += end
54
+ return results
55
+
56
+
57
+ @dataclass(
58
+ init=False,
59
+ slots=True,
60
+ eq=False,
61
+ )
62
+ class Simulator:
63
+ """Simulator class for running simulations on a metabolic model.
64
+
65
+ Attributes:
66
+ model: Model instance to simulate.
67
+ y0: Initial conditions for the simulation.
68
+ integrator: Integrator protocol to use for the simulation.
69
+ concs: List of DataFrames containing concentration results.
70
+ args: List of DataFrames containing argument values.
71
+ simulation_parameters: List of dictionaries containing simulation parameters.
72
+
73
+ """
74
+
75
+ model: Model
76
+ y0: ArrayLike
77
+ integrator: IntegratorProtocol
78
+ concs: list[pd.DataFrame] | None
79
+ args: list[pd.DataFrame] | None
80
+ simulation_parameters: list[dict[str, float]] | None
81
+
82
+ def __init__(
83
+ self,
84
+ model: Model,
85
+ y0: dict[str, float] | None = None,
86
+ integrator: type[IntegratorProtocol] = DefaultIntegrator,
87
+ *,
88
+ test_run: bool = True,
89
+ ) -> None:
90
+ """Initialize the Simulator.
91
+
92
+ Args:
93
+ model (Model): The model to be simulated.
94
+ y0 (dict[str, float] | None, optional): Initial conditions for the model variables.
95
+ If None, the initial conditions are obtained from the model. Defaults to None.
96
+ integrator (type[IntegratorProtocol], optional): The integrator to use for the simulation.
97
+ Defaults to DefaultIntegrator.
98
+ test_run (bool, optional): If True, performs a test run to ensure the model's methods
99
+ (get_full_concs, get_fluxes, get_right_hand_side) work correctly with the initial conditions.
100
+ Defaults to True.
101
+
102
+ """
103
+ self.model = model
104
+ y0 = model.get_initial_conditions() if y0 is None else y0
105
+ self.y0 = [y0[k] for k in model.get_variable_names()]
106
+
107
+ self.integrator = integrator(self.model, y0=self.y0)
108
+ self.concs = None
109
+ self.args = None
110
+ self.simulation_parameters = None
111
+
112
+ if test_run:
113
+ y0 = dict(zip(model.get_variable_names(), self.y0, strict=True))
114
+ self.model.get_full_concs(y0, 0)
115
+ self.model.get_fluxes(y0, 0)
116
+ self.model.get_right_hand_side(y0, 0)
117
+
118
+ def _save_simulation_results(
119
+ self,
120
+ *,
121
+ results: pd.DataFrame,
122
+ skipfirst: bool,
123
+ ) -> None:
124
+ """Save simulation results.
125
+
126
+ Args:
127
+ results: DataFrame containing the simulation results.
128
+ skipfirst: Whether to skip the first row of results.
129
+
130
+ """
131
+ if self.concs is None:
132
+ self.concs = [results]
133
+ elif skipfirst:
134
+ self.concs.append(results.iloc[1:, :])
135
+ else:
136
+ self.concs.append(results)
137
+
138
+ if self.simulation_parameters is None:
139
+ self.simulation_parameters = []
140
+ self.simulation_parameters.append(self.model.parameters)
141
+
142
+ def clear_results(self) -> None:
143
+ """Clear simulation results."""
144
+ self.concs = None
145
+ self.args = None
146
+ self.simulation_parameters = None
147
+ if self.integrator is not None:
148
+ self.integrator.reset()
149
+
150
+ def _handle_simulation_results(
151
+ self,
152
+ time: ArrayLike | None,
153
+ results: ArrayLike | None,
154
+ *,
155
+ skipfirst: bool,
156
+ ) -> None:
157
+ """Handle simulation results.
158
+
159
+ Args:
160
+ time: Array of time points for the simulation.
161
+ results: Array of results for the simulation.
162
+ skipfirst: Whether to skip the first row of results.
163
+
164
+ """
165
+ if time is None or results is None:
166
+ # Need to clear results in case continued integration fails
167
+ # to keep expectation that failure = None
168
+ self.clear_results()
169
+ return
170
+
171
+ # NOTE: IMPORTANT!
172
+ # model._get_rhs sorts the return array by model.get_compounds()
173
+ # Do NOT change this ordering
174
+ results_df = pd.DataFrame(
175
+ results,
176
+ index=time,
177
+ columns=self.model.get_variable_names(),
178
+ )
179
+ self._save_simulation_results(results=results_df, skipfirst=skipfirst)
180
+
181
+ def simulate(
182
+ self,
183
+ t_end: float,
184
+ steps: int | None = None,
185
+ ) -> Self:
186
+ """Simulate the model.
187
+
188
+ Examples:
189
+ >>> s.simulate(t_end=100)
190
+ >>> s.simulate(t_end=100, steps=100)
191
+
192
+ You can either supply only a terminal time point, or additionally also the
193
+ number of steps for which values should be returned.
194
+
195
+ Args:
196
+ t_end: Terminal time point for the simulation.
197
+ steps: Number of steps for the simulation.
198
+
199
+ Returns:
200
+ Self: The Simulator instance with updated results.
201
+
202
+ """
203
+ time, results = self.integrator.integrate(t_end=t_end, steps=steps)
204
+ self._handle_simulation_results(time, results, skipfirst=True)
205
+ return self
206
+
207
+ def simulate_time_course(self, time_points: ArrayLike) -> Self:
208
+ """Simulate the model over a given set of time points.
209
+
210
+ Examples:
211
+ >>> Simulator(model).simulate_time_course([1, 2, 3])
212
+
213
+ You can either supply only a terminal time point, or additionally also the
214
+ number of steps or exact time points for which values should be returned.
215
+
216
+ Args:
217
+ t_end: Terminal time point for the simulation.
218
+ steps: Number of steps for the simulation.
219
+ time_points: Exact time points for which values should be returned.
220
+
221
+ Returns:
222
+ Self: The Simulator instance with updated results.
223
+
224
+ """
225
+ time, results = self.integrator.integrate_time_course(time_points=time_points)
226
+ self._handle_simulation_results(time, results, skipfirst=True)
227
+ return self
228
+
229
+ def simulate_to_steady_state(
230
+ self,
231
+ tolerance: float = 1e-6,
232
+ *,
233
+ rel_norm: bool = False,
234
+ ) -> Self:
235
+ """Simulate the model to steady state.
236
+
237
+ Examples:
238
+ >>> Simulator(model).simulate_to_steady_state()
239
+ >>> Simulator(model).simulate_to_steady_state(tolerance=1e-8)
240
+ >>> Simulator(model).simulate_to_steady_state(rel_norm=True)
241
+
242
+ You can either supply only a terminal time point, or additionally also the
243
+ number of steps or exact time points for which values should be returned.
244
+
245
+ Args:
246
+ tolerance: Tolerance for the steady-state calculation.
247
+ rel_norm: Whether to use relative norm for the steady-state calculation.
248
+
249
+ Returns:
250
+ Self: The Simulator instance with updated results.
251
+
252
+ """
253
+ time, results = self.integrator.integrate_to_steady_state(
254
+ tolerance=tolerance,
255
+ rel_norm=rel_norm,
256
+ )
257
+ self._handle_simulation_results(
258
+ [time] if time is not None else None,
259
+ [results] if results is not None else None, # type: ignore
260
+ skipfirst=False,
261
+ )
262
+ return self
263
+
264
+ def simulate_over_protocol(
265
+ self,
266
+ protocol: pd.DataFrame,
267
+ time_points_per_step: int = 10,
268
+ ) -> Self:
269
+ """Simulate the model over a given protocol.
270
+
271
+ Examples:
272
+ >>> Simulator(model).simulate_over_protocol(
273
+ ... protocol,
274
+ ... time_points_per_step=10
275
+ ... )
276
+
277
+ Args:
278
+ protocol: DataFrame containing the protocol.
279
+ time_points_per_step: Number of time points per step.
280
+
281
+ Returns:
282
+ The Simulator instance with updated results.
283
+
284
+ """
285
+ for t_end, pars in protocol.iterrows():
286
+ t_end = cast(pd.Timedelta, t_end)
287
+ self.model.update_parameters(pars.to_dict())
288
+ self.simulate(t_end.total_seconds(), steps=time_points_per_step)
289
+ if self.concs is None:
290
+ break
291
+ return self
292
+
293
+ def _get_args_vectorised(
294
+ self,
295
+ concs: list[pd.DataFrame],
296
+ params: list[dict[str, float]],
297
+ *,
298
+ include_readouts: bool = True,
299
+ ) -> list[pd.DataFrame]:
300
+ args: list[pd.DataFrame] = []
301
+
302
+ for res, p in zip(concs, params, strict=True):
303
+ self.model.update_parameters(p)
304
+ args.append(
305
+ self.model.get_args_time_course(
306
+ concs=res,
307
+ include_readouts=include_readouts,
308
+ )
309
+ )
310
+ return args
311
+
312
+ @overload
313
+ def get_concs( # type: ignore
314
+ self,
315
+ *,
316
+ normalise: float | ArrayLike | None = None,
317
+ concatenated: Literal[False],
318
+ ) -> None | list[pd.DataFrame]: ...
319
+
320
+ @overload
321
+ def get_concs(
322
+ self,
323
+ *,
324
+ normalise: float | ArrayLike | None = None,
325
+ concatenated: Literal[True],
326
+ ) -> None | pd.DataFrame: ...
327
+
328
+ @overload
329
+ def get_concs(
330
+ self,
331
+ *,
332
+ normalise: float | ArrayLike | None = None,
333
+ concatenated: Literal[True] = True,
334
+ ) -> None | pd.DataFrame: ...
335
+
336
+ def get_concs(
337
+ self,
338
+ *,
339
+ normalise: float | ArrayLike | None = None,
340
+ concatenated: bool = True,
341
+ ) -> None | pd.DataFrame | list[pd.DataFrame]:
342
+ """Get the concentration results.
343
+
344
+ Examples:
345
+ >>> Simulator(model).get_concs()
346
+ Time ATP NADPH
347
+ 0.000000 1.000000 1.000000
348
+ 0.000100 0.999900 0.999900
349
+ 0.000200 0.999800 0.999800
350
+
351
+ Returns:
352
+ pd.DataFrame: DataFrame of concentrations.
353
+
354
+ """
355
+ if self.concs is None:
356
+ return None
357
+
358
+ results = self.concs.copy()
359
+ if normalise is not None:
360
+ results = _normalise_split_results(results=results, normalise=normalise)
361
+ if concatenated:
362
+ return pd.concat(results, axis=0)
363
+
364
+ return results
365
+
366
+ @overload
367
+ def get_full_concs( # type: ignore
368
+ self,
369
+ *,
370
+ normalise: float | ArrayLike | None = None,
371
+ concatenated: Literal[False],
372
+ include_readouts: bool = True,
373
+ ) -> list[pd.DataFrame] | None: ...
374
+
375
+ @overload
376
+ def get_full_concs(
377
+ self,
378
+ *,
379
+ normalise: float | ArrayLike | None = None,
380
+ concatenated: Literal[True],
381
+ include_readouts: bool = True,
382
+ ) -> pd.DataFrame | None: ...
383
+
384
+ @overload
385
+ def get_full_concs(
386
+ self,
387
+ *,
388
+ normalise: float | ArrayLike | None = None,
389
+ concatenated: bool = True,
390
+ include_readouts: bool = True,
391
+ ) -> pd.DataFrame | None: ...
392
+
393
+ def get_full_concs(
394
+ self,
395
+ *,
396
+ normalise: float | ArrayLike | None = None,
397
+ concatenated: bool = True,
398
+ include_readouts: bool = True,
399
+ ) -> pd.DataFrame | list[pd.DataFrame] | None:
400
+ """Get the full concentration results, including derived quantities.
401
+
402
+ Examples:
403
+ >>> Simulator(model).get_full_concs()
404
+ Time ATP NADPH
405
+ 0.000000 1.000000 1.000000
406
+ 0.000100 0.999900 0.999900
407
+ 0.000200 0.999800 0.999800
408
+
409
+ Returns: DataFrame of full concentrations.
410
+
411
+ """
412
+ if (concs := self.concs) is None:
413
+ return None
414
+ if (params := self.simulation_parameters) is None:
415
+ return None
416
+ if (args := self.args) is None:
417
+ args = self._get_args_vectorised(concs, params)
418
+
419
+ names = (
420
+ self.model.get_variable_names() + self.model.get_derived_variable_names()
421
+ )
422
+ if include_readouts:
423
+ names.extend(self.model.get_readout_names())
424
+ full_concs = [i.loc[:, names] for i in args]
425
+ if normalise is not None:
426
+ full_concs = _normalise_split_results(
427
+ results=full_concs,
428
+ normalise=normalise,
429
+ )
430
+ if concatenated:
431
+ return pd.concat(full_concs, axis=0)
432
+ return full_concs
433
+
434
+ @overload
435
+ def get_fluxes( # type: ignore
436
+ self,
437
+ *,
438
+ normalise: float | ArrayLike | None = None,
439
+ concatenated: Literal[False],
440
+ ) -> list[pd.DataFrame] | None: ...
441
+
442
+ @overload
443
+ def get_fluxes(
444
+ self,
445
+ *,
446
+ normalise: float | ArrayLike | None = None,
447
+ concatenated: Literal[True],
448
+ ) -> pd.DataFrame | None: ...
449
+
450
+ @overload
451
+ def get_fluxes(
452
+ self,
453
+ *,
454
+ normalise: float | ArrayLike | None = None,
455
+ concatenated: bool = True,
456
+ ) -> pd.DataFrame | None: ...
457
+
458
+ def get_fluxes(
459
+ self,
460
+ *,
461
+ normalise: float | ArrayLike | None = None,
462
+ concatenated: bool = True,
463
+ ) -> pd.DataFrame | list[pd.DataFrame] | None:
464
+ """Get the flux results.
465
+
466
+ Examples:
467
+ >>> Simulator(model).get_fluxes()
468
+ Time v1 v2
469
+ 0.000000 1.000000 10.00000
470
+ 0.000100 0.999900 9.999000
471
+ 0.000200 0.999800 9.998000
472
+
473
+ Returns:
474
+ pd.DataFrame: DataFrame of fluxes.
475
+
476
+ """
477
+ if (concs := self.concs) is None:
478
+ return None
479
+ if (params := self.simulation_parameters) is None:
480
+ return None
481
+ if (args := self.args) is None:
482
+ args = self._get_args_vectorised(concs, params)
483
+
484
+ fluxes: list[pd.DataFrame] = []
485
+ for y, p in zip(args, params, strict=True):
486
+ self.model.update_parameters(p)
487
+ fluxes.append(self.model.get_fluxes_time_course(args=y))
488
+
489
+ if normalise is not None:
490
+ fluxes = _normalise_split_results(
491
+ results=fluxes,
492
+ normalise=normalise,
493
+ )
494
+ if concatenated:
495
+ return pd.concat(fluxes, axis=0)
496
+ return fluxes
497
+
498
+ def get_concs_and_fluxes(self) -> tuple[pd.DataFrame | None, pd.DataFrame | None]:
499
+ """Get the concentrations and fluxes.
500
+
501
+ Examples:
502
+ >>> Simulator(model).get_concs_and_fluxes()
503
+ (concs, fluxes)
504
+
505
+
506
+ Returns:
507
+ tuple[pd.DataFrame, pd.DataFrame]: Tuple of concentrations and fluxes.
508
+
509
+ """
510
+ return self.get_concs(), self.get_fluxes()
511
+
512
+ def get_full_concs_and_fluxes(
513
+ self,
514
+ *,
515
+ include_readouts: bool = True,
516
+ ) -> tuple[pd.DataFrame | None, pd.DataFrame | None]:
517
+ """Get the full concentrations and fluxes.
518
+
519
+ >>> Simulator(model).get_full_concs_and_fluxes()
520
+ (full_concs, full_fluxes)
521
+
522
+ Args:
523
+ include_readouts: Whether to include readouts in the results.
524
+
525
+ Returns:
526
+ Full concentrations and fluxes
527
+
528
+ """
529
+ return (
530
+ self.get_full_concs(include_readouts=include_readouts),
531
+ self.get_fluxes(),
532
+ )
533
+
534
+ def get_results(self) -> pd.DataFrame | None:
535
+ """Get the combined results of concentrations and fluxes.
536
+
537
+ Examples:
538
+ >>> Simulator(model).get_results()
539
+ Time ATP NADPH v1 v2
540
+ 0.000000 1.000000 1.000000 1.000000 1.000000
541
+ 0.000100 0.999900 0.999900 0.999900 0.999900
542
+ 0.000200 0.999800 0.999800 0.999800 0.999800
543
+
544
+ Returns:
545
+ pd.DataFrame: Combined DataFrame of concentrations and fluxes.
546
+
547
+ """
548
+ c, v = self.get_concs_and_fluxes()
549
+ if c is None or v is None:
550
+ return None
551
+ return pd.concat((c, v), axis=1)
552
+
553
+ def get_full_results(self) -> pd.DataFrame | None:
554
+ """Get the combined full results of concentrations and fluxes.
555
+
556
+ Examples:
557
+ >>> Simulator(model).get_full_results()
558
+ Time ATP NADPH v1 v2
559
+ 0.000000 1.000000 1.000000 1.000000 1.000000
560
+ 0.000100 0.999900 0.999900 0.999900 0.999900
561
+ 0.000200 0.999800 0.999800 0.999800 0.999800
562
+
563
+ """
564
+ c, v = self.get_full_concs_and_fluxes()
565
+ if c is None or v is None:
566
+ return None
567
+ return pd.concat((c, v), axis=1)
568
+
569
+ def get_new_y0(self) -> dict[str, float] | None:
570
+ """Get the new initial conditions after the simulation.
571
+
572
+ Examples:
573
+ >>> Simulator(model).get_new_y0()
574
+ {"ATP": 1.0, "NADPH": 1.0}
575
+
576
+ """
577
+ if (res := self.get_concs()) is None:
578
+ return None
579
+ return dict(res.iloc[-1])
580
+
581
+ def update_parameter(self, parameter: str, value: float) -> Self:
582
+ """Updates the value of a specified parameter in the model.
583
+
584
+ Examples:
585
+ >>> Simulator(model).update_parameter("k1", 0.1)
586
+
587
+ Args:
588
+ parameter: The name of the parameter to update.
589
+ value: The new value to set for the parameter.
590
+
591
+ """
592
+ self.model.update_parameter(parameter, value)
593
+ return self
594
+
595
+ def update_parameters(self, parameters: dict[str, float]) -> Self:
596
+ """Updates the model parameters with the provided dictionary of parameters.
597
+
598
+ Examples:
599
+ >>> Simulator(model).update_parameters({"k1": 0.1, "k2": 0.2})
600
+
601
+ Args:
602
+ parameters: A dictionary where the keys are parameter names
603
+ and the values are the new parameter values.
604
+
605
+ """
606
+ self.model.update_parameters(parameters)
607
+ return self
608
+
609
+ def scale_parameter(self, parameter: str, factor: float) -> Self:
610
+ """Scales the value of a specified parameter in the model.
611
+
612
+ Examples:
613
+ >>> Simulator(model).scale_parameter("k1", 0.1)
614
+
615
+ Args:
616
+ parameter: The name of the parameter to scale.
617
+ factor: The factor by which to scale the parameter.
618
+
619
+ """
620
+ self.model.scale_parameter(parameter, factor)
621
+ return self
622
+
623
+ def scale_parameters(self, parameters: dict[str, float]) -> Self:
624
+ """Scales the values of specified parameters in the model.
625
+
626
+ Examples:
627
+ >>> Simulator(model).scale_parameters({"k1": 0.1, "k2": 0.2})
628
+
629
+ Args:
630
+ parameters: A dictionary where the keys are parameter names
631
+ and the values are the scaling factors.
632
+
633
+ """
634
+ self.model.scale_parameters(parameters)
635
+ return self
@@ -0,0 +1,31 @@
1
+ """Surrogate Models Module.
2
+
3
+ This module provides classes and functions for creating and training surrogate models
4
+ for metabolic simulations. It includes functionality for both steady-state and time-series
5
+ data using neural networks.
6
+
7
+ Classes:
8
+ AbstractSurrogate: Abstract base class for surrogate models.
9
+ TorchSurrogate: Surrogate model using PyTorch.
10
+ Approximator: Neural network approximator for surrogate modeling.
11
+
12
+ Functions:
13
+ train_torch_surrogate: Train a PyTorch surrogate model.
14
+ train_torch_time_course_estimator: Train a PyTorch time course estimator.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import contextlib
20
+
21
+ with contextlib.suppress(ImportError):
22
+ from ._torch import TorchSurrogate, train_torch_surrogate
23
+
24
+ from ._poly import PolySurrogate, train_polynomial_surrogate
25
+
26
+ __all__ = [
27
+ "PolySurrogate",
28
+ "TorchSurrogate",
29
+ "train_polynomial_surrogate",
30
+ "train_torch_surrogate",
31
+ ]