modelbase2 0.1.79__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. modelbase2/__init__.py +148 -25
  2. modelbase2/distributions.py +336 -0
  3. modelbase2/experimental/__init__.py +17 -0
  4. modelbase2/experimental/codegen.py +239 -0
  5. modelbase2/experimental/diff.py +227 -0
  6. modelbase2/experimental/notes.md +4 -0
  7. modelbase2/experimental/tex.py +521 -0
  8. modelbase2/fit.py +284 -0
  9. modelbase2/fns.py +185 -0
  10. modelbase2/integrators/__init__.py +19 -0
  11. modelbase2/integrators/int_assimulo.py +146 -0
  12. modelbase2/integrators/int_scipy.py +147 -0
  13. modelbase2/label_map.py +610 -0
  14. modelbase2/linear_label_map.py +301 -0
  15. modelbase2/mc.py +548 -0
  16. modelbase2/mca.py +280 -0
  17. modelbase2/model.py +1621 -0
  18. modelbase2/nnarchitectures.py +128 -0
  19. modelbase2/npe.py +271 -0
  20. modelbase2/parallel.py +171 -0
  21. modelbase2/parameterise.py +28 -0
  22. modelbase2/paths.py +36 -0
  23. modelbase2/plot.py +832 -0
  24. modelbase2/sbml/__init__.py +14 -0
  25. modelbase2/sbml/_data.py +77 -0
  26. modelbase2/sbml/_export.py +656 -0
  27. modelbase2/sbml/_import.py +585 -0
  28. modelbase2/sbml/_mathml.py +691 -0
  29. modelbase2/sbml/_name_conversion.py +52 -0
  30. modelbase2/sbml/_unit_conversion.py +74 -0
  31. modelbase2/scan.py +616 -0
  32. modelbase2/scope.py +96 -0
  33. modelbase2/simulator.py +635 -0
  34. modelbase2/surrogates/__init__.py +31 -0
  35. modelbase2/surrogates/_poly.py +91 -0
  36. modelbase2/surrogates/_torch.py +191 -0
  37. modelbase2/surrogates.py +316 -0
  38. modelbase2/types.py +352 -11
  39. modelbase2-0.3.0.dist-info/METADATA +93 -0
  40. modelbase2-0.3.0.dist-info/RECORD +43 -0
  41. {modelbase2-0.1.79.dist-info → modelbase2-0.3.0.dist-info}/WHEEL +1 -1
  42. modelbase2/core/__init__.py +0 -29
  43. modelbase2/core/algebraic_module_container.py +0 -130
  44. modelbase2/core/constant_container.py +0 -113
  45. modelbase2/core/data.py +0 -109
  46. modelbase2/core/name_container.py +0 -29
  47. modelbase2/core/reaction_container.py +0 -115
  48. modelbase2/core/utils.py +0 -28
  49. modelbase2/core/variable_container.py +0 -24
  50. modelbase2/ode/__init__.py +0 -13
  51. modelbase2/ode/integrator.py +0 -80
  52. modelbase2/ode/mca.py +0 -270
  53. modelbase2/ode/model.py +0 -470
  54. modelbase2/ode/simulator.py +0 -153
  55. modelbase2/utils/__init__.py +0 -0
  56. modelbase2/utils/plotting.py +0 -372
  57. modelbase2-0.1.79.dist-info/METADATA +0 -44
  58. modelbase2-0.1.79.dist-info/RECORD +0 -22
  59. {modelbase2-0.1.79.dist-info → modelbase2-0.3.0.dist-info/licenses}/LICENSE +0 -0
modelbase2/mc.py ADDED
@@ -0,0 +1,548 @@
1
+ """Monte Carlo Analysis (MC) Module for Metabolic Models.
2
+
3
+ This module provides functions for performing Monte Carlo analysis on metabolic models.
4
+ It includes functionality for steady-state and time-course simulations, as well as
5
+ response coefficient calculations.
6
+
7
+ Functions:
8
+ steady_state: Perform Monte Carlo analysis for steady-state simulations
9
+ time_course: Perform Monte Carlo analysis for time-course simulations
10
+ time_course_over_protocol: Perform Monte Carlo analysis for time-course simulations over a protocol
11
+ parameter_scan_ss: Perform Monte Carlo analysis for steady-state parameter scans
12
+ compound_elasticities: Calculate compound elasticities using Monte Carlo analysis
13
+ parameter_elasticities: Calculate parameter elasticities using Monte Carlo analysis
14
+ response_coefficients: Calculate response coefficients using Monte Carlo analysis
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ from functools import partial
20
+ from typing import TYPE_CHECKING, Protocol, cast
21
+
22
+ import pandas as pd
23
+
24
+ from modelbase2 import mca, scan
25
+ from modelbase2.parallel import Cache, parallelise
26
+ from modelbase2.scan import (
27
+ ProtocolWorker,
28
+ SteadyStateWorker,
29
+ TimeCourseWorker,
30
+ _protocol_worker,
31
+ _steady_state_worker,
32
+ _time_course_worker,
33
+ _update_parameters_and,
34
+ )
35
+ from modelbase2.types import (
36
+ McSteadyStates,
37
+ ProtocolByPars,
38
+ ResponseCoefficientsByPars,
39
+ SteadyStates,
40
+ TimeCourseByPars,
41
+ )
42
+
43
+ __all__ = [
44
+ "ParameterScanWorker",
45
+ "parameter_elasticities",
46
+ "response_coefficients",
47
+ "scan_steady_state",
48
+ "steady_state",
49
+ "time_course",
50
+ "time_course_over_protocol",
51
+ "variable_elasticities",
52
+ ]
53
+
54
+ if TYPE_CHECKING:
55
+ from modelbase2.model import Model
56
+ from modelbase2.types import Array
57
+
58
+ __ALL__ = [
59
+ "steady_state",
60
+ "time_course",
61
+ "time_course_over_protocol",
62
+ "parameter_scan_ss",
63
+ "compound_elasticities",
64
+ "parameter_elasticities",
65
+ "response_coefficients",
66
+ ]
67
+
68
+
69
+ class ParameterScanWorker(Protocol):
70
+ """Protocol for the parameter scan worker function."""
71
+
72
+ def __call__(
73
+ self,
74
+ model: Model,
75
+ y0: dict[str, float] | None,
76
+ *,
77
+ parameters: pd.DataFrame,
78
+ rel_norm: bool,
79
+ ) -> SteadyStates:
80
+ """Call the worker function."""
81
+ ...
82
+
83
+
84
+ def _parameter_scan_worker(
85
+ model: Model,
86
+ y0: dict[str, float] | None,
87
+ *,
88
+ parameters: pd.DataFrame,
89
+ rel_norm: bool,
90
+ ) -> SteadyStates:
91
+ """Worker function for parallel steady state scanning across parameter sets.
92
+
93
+ This function executes a parameter scan for steady state solutions for a
94
+ given model and parameter combinations. It's designed to be used as a worker
95
+ in parallel processing.
96
+
97
+ Args: model : Model
98
+ The model object to analyze
99
+ y0 : dict[str, float] | None
100
+ Initial conditions for the solver. If None, default initial conditions
101
+ are used.
102
+ parameters : pd.DataFrame
103
+ DataFrame containing parameter combinations to scan over. Each row
104
+ represents one parameter set.
105
+ rel_norm : bool
106
+ Whether to use relative normalization in the steady state calculations
107
+
108
+ Returns:
109
+ SteadyStates
110
+ Object containing the steady state solutions for the given parameter
111
+ combinations
112
+
113
+ """
114
+ return scan.steady_state(
115
+ model,
116
+ parameters=parameters,
117
+ y0=y0,
118
+ parallel=False,
119
+ rel_norm=rel_norm,
120
+ )
121
+
122
+
123
+ def steady_state(
124
+ model: Model,
125
+ mc_parameters: pd.DataFrame,
126
+ *,
127
+ y0: dict[str, float] | None = None,
128
+ max_workers: int | None = None,
129
+ cache: Cache | None = None,
130
+ rel_norm: bool = False,
131
+ worker: SteadyStateWorker = _steady_state_worker,
132
+ ) -> SteadyStates:
133
+ """Monte-carlo scan of steady states.
134
+
135
+ Examples:
136
+ >>> steady_state(model, mc_parameters)
137
+ p t x y
138
+ 0 0.0 0.1 0.00
139
+ 1.0 0.2 0.01
140
+ 2.0 0.3 0.02
141
+ 3.0 0.4 0.03
142
+ ... ... ...
143
+ 1 0.0 0.1 0.00
144
+ 1.0 0.2 0.01
145
+ 2.0 0.3 0.02
146
+ 3.0 0.4 0.03
147
+
148
+ Returns:
149
+ SteadyStates: Object containing the steady state solutions for the given parameter
150
+
151
+ """
152
+ res = parallelise(
153
+ partial(
154
+ _update_parameters_and,
155
+ fn=partial(
156
+ worker,
157
+ y0=y0,
158
+ rel_norm=rel_norm,
159
+ ),
160
+ model=model,
161
+ ),
162
+ inputs=list(mc_parameters.iterrows()),
163
+ max_workers=max_workers,
164
+ cache=cache,
165
+ )
166
+ concs = {k: v.concs for k, v in res.items()}
167
+ fluxes = {k: v.fluxes for k, v in res.items()}
168
+ return SteadyStates(
169
+ concs=pd.concat(concs, axis=1).T,
170
+ fluxes=pd.concat(fluxes, axis=1).T,
171
+ parameters=mc_parameters,
172
+ )
173
+
174
+
175
+ def time_course(
176
+ model: Model,
177
+ time_points: Array,
178
+ mc_parameters: pd.DataFrame,
179
+ y0: dict[str, float] | None = None,
180
+ max_workers: int | None = None,
181
+ cache: Cache | None = None,
182
+ worker: TimeCourseWorker = _time_course_worker,
183
+ ) -> TimeCourseByPars:
184
+ """MC time course.
185
+
186
+ Examples:
187
+ >>> time_course(model, time_points, mc_parameters)
188
+ p t x y
189
+ 0 0.0 0.1 0.00
190
+ 1.0 0.2 0.01
191
+ 2.0 0.3 0.02
192
+ 3.0 0.4 0.03
193
+ ... ... ...
194
+ 1 0.0 0.1 0.00
195
+ 1.0 0.2 0.01
196
+ 2.0 0.3 0.02
197
+ 3.0 0.4 0.03
198
+ Returns:
199
+ tuple[concentrations, fluxes] using pandas multiindex
200
+ Both dataframes are of shape (#time_points * #mc_parameters, #variables)
201
+
202
+ """
203
+ res = parallelise(
204
+ partial(
205
+ _update_parameters_and,
206
+ fn=partial(
207
+ worker,
208
+ time_points=time_points,
209
+ y0=y0,
210
+ ),
211
+ model=model,
212
+ ),
213
+ inputs=list(mc_parameters.iterrows()),
214
+ max_workers=max_workers,
215
+ cache=cache,
216
+ )
217
+ concs = {k: v.concs.T for k, v in res.items()}
218
+ fluxes = {k: v.fluxes.T for k, v in res.items()}
219
+ return TimeCourseByPars(
220
+ parameters=mc_parameters,
221
+ concs=pd.concat(concs, axis=1).T,
222
+ fluxes=pd.concat(fluxes, axis=1).T,
223
+ )
224
+
225
+
226
+ def time_course_over_protocol(
227
+ model: Model,
228
+ protocol: pd.DataFrame,
229
+ mc_parameters: pd.DataFrame,
230
+ y0: dict[str, float] | None = None,
231
+ time_points_per_step: int = 10,
232
+ max_workers: int | None = None,
233
+ cache: Cache | None = None,
234
+ worker: ProtocolWorker = _protocol_worker,
235
+ ) -> ProtocolByPars:
236
+ """MC time course.
237
+
238
+ Examples:
239
+ >>> time_course_over_protocol(model, protocol, mc_parameters)
240
+ p t x y
241
+ 0 0.0 0.1 0.00
242
+ 1.0 0.2 0.01
243
+ 2.0 0.3 0.02
244
+ 3.0 0.4 0.03
245
+ ... ... ...
246
+ 1 0.0 0.1 0.00
247
+ 1.0 0.2 0.01
248
+ 2.0 0.3 0.02
249
+ 3.0 0.4 0.03
250
+
251
+ Returns:
252
+ tuple[concentrations, fluxes] using pandas multiindex
253
+ Both dataframes are of shape (#time_points * #mc_parameters, #variables)
254
+
255
+ """
256
+ res = parallelise(
257
+ partial(
258
+ _update_parameters_and,
259
+ fn=partial(
260
+ worker,
261
+ protocol=protocol,
262
+ y0=y0,
263
+ time_points_per_step=time_points_per_step,
264
+ ),
265
+ model=model,
266
+ ),
267
+ inputs=list(mc_parameters.iterrows()),
268
+ max_workers=max_workers,
269
+ cache=cache,
270
+ )
271
+ concs = {k: v.concs.T for k, v in res.items()}
272
+ fluxes = {k: v.fluxes.T for k, v in res.items()}
273
+ return ProtocolByPars(
274
+ concs=pd.concat(concs, axis=1).T,
275
+ fluxes=pd.concat(fluxes, axis=1).T,
276
+ parameters=mc_parameters,
277
+ protocol=protocol,
278
+ )
279
+
280
+
281
+ def scan_steady_state(
282
+ model: Model,
283
+ parameters: pd.DataFrame,
284
+ mc_parameters: pd.DataFrame,
285
+ *,
286
+ y0: dict[str, float] | None = None,
287
+ max_workers: int | None = None,
288
+ cache: Cache | None = None,
289
+ rel_norm: bool = False,
290
+ worker: ParameterScanWorker = _parameter_scan_worker,
291
+ ) -> McSteadyStates:
292
+ """Parameter scan of mc distributed steady states.
293
+
294
+ Examples:
295
+ >>> scan_steady_state(
296
+ ... model,
297
+ ... parameters=pd.DataFrame({"k1": np.linspace(0, 1, 3)}),
298
+ ... mc_parameters=mc_parameters,
299
+ ... ).concs
300
+ x y
301
+ k1
302
+ 0 0.0 -0.00 -0.00
303
+ 0.5 0.44 0.20
304
+ 1.0 0.88 0.40
305
+ 1 0.0 -0.00 -0.00
306
+ 0.5 0.45 0.14
307
+ 1.0 0.90 0.28
308
+
309
+
310
+
311
+ Args:
312
+ model: The model to analyze
313
+ parameters: DataFrame containing parameter combinations to scan over
314
+ mc_parameters: DataFrame containing Monte Carlo parameter sets
315
+ y0: Initial conditions for the solver
316
+ max_workers: Maximum number of workers for parallel processing
317
+ cache: Cache object for storing results
318
+ rel_norm: Whether to use relative normalization in the steady state calculations
319
+ worker: Worker function for parallel steady state scanning across parameter sets
320
+
321
+ Returns:
322
+ McSteadyStates: Object containing the steady state solutions for the given parameter
323
+
324
+ """
325
+ res = parallelise(
326
+ partial(
327
+ _update_parameters_and,
328
+ fn=partial(
329
+ worker,
330
+ parameters=parameters,
331
+ y0=y0,
332
+ rel_norm=rel_norm,
333
+ ),
334
+ model=model,
335
+ ),
336
+ inputs=list(mc_parameters.iterrows()),
337
+ cache=cache,
338
+ max_workers=max_workers,
339
+ )
340
+ concs = {k: v.concs.T for k, v in res.items()}
341
+ fluxes = {k: v.fluxes.T for k, v in res.items()}
342
+ return McSteadyStates(
343
+ concs=pd.concat(concs, axis=1).T,
344
+ fluxes=pd.concat(fluxes, axis=1).T,
345
+ parameters=parameters,
346
+ mc_parameters=mc_parameters,
347
+ )
348
+
349
+
350
+ def variable_elasticities(
351
+ model: Model,
352
+ variables: list[str],
353
+ concs: dict[str, float],
354
+ mc_parameters: pd.DataFrame,
355
+ *,
356
+ time: float = 0,
357
+ cache: Cache | None = None,
358
+ max_workers: int | None = None,
359
+ normalized: bool = True,
360
+ displacement: float = 1e-4,
361
+ ) -> pd.DataFrame:
362
+ """Calculate variable elasticities using Monte Carlo analysis.
363
+
364
+ Examples:
365
+ >>> variable_elasticities(
366
+ ... model,
367
+ ... variables=["x1", "x2"],
368
+ ... concs={"x1": 1, "x2": 2},
369
+ ... mc_parameters=mc_parameters
370
+ ... )
371
+ x1 x2
372
+ 0 v1 0.0 0.0
373
+ v2 1.0 0.0
374
+ v3 0.0 -1.4
375
+ 1 v1 0.0 0.0
376
+ v2 1.0 0.0
377
+ v3 0.0 -1.4
378
+
379
+ Args:
380
+ model: The model to analyze
381
+ variables: List of variables for which to calculate elasticities
382
+ concs: Dictionary of concentrations for the model
383
+ mc_parameters: DataFrame containing Monte Carlo parameter sets
384
+ time: Time point for the analysis
385
+ cache: Cache object for storing results
386
+ max_workers: Maximum number of workers for parallel processing
387
+ normalized: Whether to use normalized elasticities
388
+ displacement: Displacement for finite difference calculations
389
+
390
+ Returns:
391
+ pd.DataFrame: DataFrame containing the compound elasticities for the given variables
392
+
393
+ """
394
+ res = parallelise(
395
+ partial(
396
+ _update_parameters_and,
397
+ fn=partial(
398
+ mca.variable_elasticities,
399
+ variables=variables,
400
+ concs=concs,
401
+ time=time,
402
+ displacement=displacement,
403
+ normalized=normalized,
404
+ ),
405
+ model=model,
406
+ ),
407
+ inputs=list(mc_parameters.iterrows()),
408
+ cache=cache,
409
+ max_workers=max_workers,
410
+ )
411
+ return cast(pd.DataFrame, pd.concat(res))
412
+
413
+
414
+ def parameter_elasticities(
415
+ model: Model,
416
+ parameters: list[str],
417
+ concs: dict[str, float],
418
+ mc_parameters: pd.DataFrame,
419
+ *,
420
+ time: float = 0,
421
+ cache: Cache | None = None,
422
+ max_workers: int | None = None,
423
+ normalized: bool = True,
424
+ displacement: float = 1e-4,
425
+ ) -> pd.DataFrame:
426
+ """Calculate parameter elasticities using Monte Carlo analysis.
427
+
428
+ Examples:
429
+ >>> parameter_elasticities(
430
+ ... model,
431
+ ... variables=["p1", "p2"],
432
+ ... concs={"x1": 1, "x2": 2},
433
+ ... mc_parameters=mc_parameters
434
+ ... )
435
+ p1 p2
436
+ 0 v1 0.0 0.0
437
+ v2 1.0 0.0
438
+ v3 0.0 -1.4
439
+ 1 v1 0.0 0.0
440
+ v2 1.0 0.0
441
+ v3 0.0 -1.4
442
+
443
+ Args:
444
+ model: The model to analyze
445
+ parameters: List of parameters for which to calculate elasticities
446
+ concs: Dictionary of concentrations for the model
447
+ mc_parameters: DataFrame containing Monte Carlo parameter sets
448
+ time: Time point for the analysis
449
+ cache: Cache object for storing results
450
+ max_workers: Maximum number of workers for parallel processing
451
+ normalized: Whether to use normalized elasticities
452
+ displacement: Displacement for finite difference calculations
453
+
454
+ Returns:
455
+ pd.DataFrame: DataFrame containing the parameter elasticities for the given variables
456
+
457
+ """
458
+ res = parallelise(
459
+ partial(
460
+ _update_parameters_and,
461
+ fn=partial(
462
+ mca.parameter_elasticities,
463
+ parameters=parameters,
464
+ concs=concs,
465
+ time=time,
466
+ displacement=displacement,
467
+ normalized=normalized,
468
+ ),
469
+ model=model,
470
+ ),
471
+ inputs=list(mc_parameters.iterrows()),
472
+ cache=cache,
473
+ max_workers=max_workers,
474
+ )
475
+ return cast(pd.DataFrame, pd.concat(res))
476
+
477
+
478
+ def response_coefficients(
479
+ model: Model,
480
+ parameters: list[str],
481
+ mc_parameters: pd.DataFrame,
482
+ *,
483
+ y0: dict[str, float] | None = None,
484
+ cache: Cache | None = None,
485
+ normalized: bool = True,
486
+ displacement: float = 1e-4,
487
+ disable_tqdm: bool = False,
488
+ max_workers: int | None = None,
489
+ rel_norm: bool = False,
490
+ ) -> ResponseCoefficientsByPars:
491
+ """Calculate response coefficients using Monte Carlo analysis.
492
+
493
+ Examples:
494
+ >>> response_coefficients(
495
+ ... model,
496
+ ... parameters=["vmax1", "vmax2"],
497
+ ... mc_parameters=mc_parameters,
498
+ ... ).concs
499
+ x1 x2
500
+ 0 vmax_1 0.01 0.01
501
+ vmax_2 0.02 0.02
502
+ 1 vmax_1 0.03 0.03
503
+ vmax_2 0.04 0.04
504
+
505
+ Args:
506
+ model: The model to analyze
507
+ parameters: List of parameters for which to calculate elasticities
508
+ mc_parameters: DataFrame containing Monte Carlo parameter sets
509
+ y0: Initial conditions for the solver
510
+ cache: Cache object for storing results
511
+ normalized: Whether to use normalized elasticities
512
+ displacement: Displacement for finite difference calculations
513
+ disable_tqdm: Whether to disable the tqdm progress bar
514
+ max_workers: Maximum number of workers for parallel processing
515
+ rel_norm: Whether to use relative normalization in the steady state calculations
516
+
517
+ Returns:
518
+ ResponseCoefficientsByPars: Object containing the response coefficients for the given parameters
519
+
520
+ """
521
+ res = parallelise(
522
+ fn=partial(
523
+ _update_parameters_and,
524
+ fn=partial(
525
+ mca.response_coefficients,
526
+ parameters=parameters,
527
+ y0=y0,
528
+ normalized=normalized,
529
+ displacement=displacement,
530
+ rel_norm=rel_norm,
531
+ disable_tqdm=disable_tqdm,
532
+ parallel=False,
533
+ ),
534
+ model=model,
535
+ ),
536
+ inputs=list(mc_parameters.iterrows()),
537
+ cache=cache,
538
+ max_workers=max_workers,
539
+ )
540
+
541
+ crcs = {k: v.concs for k, v in res.items()}
542
+ frcs = {k: v.fluxes for k, v in res.items()}
543
+
544
+ return ResponseCoefficientsByPars(
545
+ concs=cast(pd.DataFrame, pd.concat(crcs)),
546
+ fluxes=cast(pd.DataFrame, pd.concat(frcs)),
547
+ parameters=mc_parameters,
548
+ )