mxlpy 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. mxlpy/__init__.py +165 -0
  2. mxlpy/distributions.py +339 -0
  3. mxlpy/experimental/__init__.py +12 -0
  4. mxlpy/experimental/diff.py +226 -0
  5. mxlpy/fit.py +291 -0
  6. mxlpy/fns.py +191 -0
  7. mxlpy/integrators/__init__.py +19 -0
  8. mxlpy/integrators/int_assimulo.py +146 -0
  9. mxlpy/integrators/int_scipy.py +146 -0
  10. mxlpy/label_map.py +610 -0
  11. mxlpy/linear_label_map.py +303 -0
  12. mxlpy/mc.py +548 -0
  13. mxlpy/mca.py +280 -0
  14. mxlpy/meta/__init__.py +11 -0
  15. mxlpy/meta/codegen_latex.py +516 -0
  16. mxlpy/meta/codegen_modebase.py +110 -0
  17. mxlpy/meta/codegen_py.py +107 -0
  18. mxlpy/meta/source_tools.py +320 -0
  19. mxlpy/model.py +1737 -0
  20. mxlpy/nn/__init__.py +10 -0
  21. mxlpy/nn/_tensorflow.py +0 -0
  22. mxlpy/nn/_torch.py +129 -0
  23. mxlpy/npe.py +277 -0
  24. mxlpy/parallel.py +171 -0
  25. mxlpy/parameterise.py +27 -0
  26. mxlpy/paths.py +36 -0
  27. mxlpy/plot.py +875 -0
  28. mxlpy/py.typed +0 -0
  29. mxlpy/sbml/__init__.py +14 -0
  30. mxlpy/sbml/_data.py +77 -0
  31. mxlpy/sbml/_export.py +644 -0
  32. mxlpy/sbml/_import.py +599 -0
  33. mxlpy/sbml/_mathml.py +691 -0
  34. mxlpy/sbml/_name_conversion.py +52 -0
  35. mxlpy/sbml/_unit_conversion.py +74 -0
  36. mxlpy/scan.py +629 -0
  37. mxlpy/simulator.py +655 -0
  38. mxlpy/surrogates/__init__.py +31 -0
  39. mxlpy/surrogates/_poly.py +97 -0
  40. mxlpy/surrogates/_torch.py +196 -0
  41. mxlpy/symbolic/__init__.py +10 -0
  42. mxlpy/symbolic/strikepy.py +582 -0
  43. mxlpy/symbolic/symbolic_model.py +75 -0
  44. mxlpy/types.py +474 -0
  45. mxlpy-0.8.0.dist-info/METADATA +106 -0
  46. mxlpy-0.8.0.dist-info/RECORD +48 -0
  47. mxlpy-0.8.0.dist-info/WHEEL +4 -0
  48. mxlpy-0.8.0.dist-info/licenses/LICENSE +674 -0
mxlpy/scan.py ADDED
@@ -0,0 +1,629 @@
1
+ """Parameter Scanning Module.
2
+
3
+ This module provides functions and classes for performing parameter scans on metabolic models.
4
+ It includes functionality for steady-state and time-course simulations, as well as protocol-based simulations.
5
+
6
+ Classes:
7
+ TimePoint: Represents a single time point in a simulation.
8
+ TimeCourse: Represents a time course in a simulation.
9
+
10
+ Functions:
11
+ parameter_scan_ss: Get steady-state results over supplied parameters.
12
+ parameter_scan_time_course: Get time course for each supplied parameter.
13
+ parameter_scan_protocol: Get protocol course for each supplied parameter.
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ __all__ = [
19
+ "ProtocolWorker",
20
+ "SteadyStateWorker",
21
+ "TimeCourse",
22
+ "TimeCourseWorker",
23
+ "TimePoint",
24
+ "steady_state",
25
+ "time_course",
26
+ "time_course_over_protocol",
27
+ ]
28
+
29
+ from dataclasses import dataclass
30
+ from functools import partial
31
+ from typing import TYPE_CHECKING, Protocol, Self, cast
32
+
33
+ import numpy as np
34
+ import pandas as pd
35
+
36
+ from mxlpy.parallel import Cache, parallelise
37
+ from mxlpy.simulator import Result, Simulator
38
+ from mxlpy.types import ProtocolByPars, SteadyStates, TimeCourseByPars
39
+
40
+ if TYPE_CHECKING:
41
+ from collections.abc import Callable
42
+
43
+ from mxlpy.model import Model
44
+ from mxlpy.types import Array
45
+
46
+
47
+ def _update_parameters_and[T](
48
+ pars: pd.Series,
49
+ fn: Callable[[Model], T],
50
+ model: Model,
51
+ ) -> T:
52
+ """Update model parameters and execute a function.
53
+
54
+ Args:
55
+ pars: Series containing parameter values to update.
56
+ fn: Function to execute after updating parameters.
57
+ model: Model instance to update.
58
+
59
+ Returns:
60
+ Result of the function execution.
61
+
62
+ """
63
+ model.update_parameters(pars.to_dict())
64
+ return fn(model)
65
+
66
+
67
+ def _empty_conc_series(model: Model) -> pd.Series:
68
+ """Create an empty concentration series for the model.
69
+
70
+ Args:
71
+ model: Model instance to generate the series for.
72
+
73
+ Returns:
74
+ pd.Series: Series with NaN values for each model variable.
75
+
76
+ """
77
+ return pd.Series(
78
+ data=np.full(shape=len(model.get_variable_names()), fill_value=np.nan),
79
+ index=model.get_variable_names(),
80
+ )
81
+
82
+
83
+ def _empty_flux_series(model: Model) -> pd.Series:
84
+ """Create an empty flux series for the model.
85
+
86
+ Args:
87
+ model: Model instance to generate the series for.
88
+
89
+ Returns:
90
+ pd.Series: Series with NaN values for each model reaction.
91
+
92
+ """
93
+ return pd.Series(
94
+ data=np.full(shape=len(model.get_reaction_names()), fill_value=np.nan),
95
+ index=model.get_reaction_names(),
96
+ )
97
+
98
+
99
+ def _empty_conc_df(model: Model, time_points: Array) -> pd.DataFrame:
100
+ """Create an empty concentration DataFrame for the model over given time points.
101
+
102
+ Args:
103
+ model: Model instance to generate the DataFrame for.
104
+ time_points: Array of time points.
105
+
106
+ Returns:
107
+ pd.DataFrame: DataFrame with NaN values for each model variable at each time point.
108
+
109
+ """
110
+ return pd.DataFrame(
111
+ data=np.full(
112
+ shape=(len(time_points), len(model.get_variable_names())),
113
+ fill_value=np.nan,
114
+ ),
115
+ index=time_points,
116
+ columns=model.get_variable_names(),
117
+ )
118
+
119
+
120
+ def _empty_flux_df(model: Model, time_points: Array) -> pd.DataFrame:
121
+ """Create an empty concentration DataFrame for the model over given time points.
122
+
123
+ Args:
124
+ model: Model instance to generate the DataFrame for.
125
+ time_points: Array of time points.
126
+
127
+ Returns:
128
+ pd.DataFrame: DataFrame with NaN values for each model variable at each time point.
129
+
130
+ """
131
+ return pd.DataFrame(
132
+ data=np.full(
133
+ shape=(len(time_points), len(model.get_reaction_names())),
134
+ fill_value=np.nan,
135
+ ),
136
+ index=time_points,
137
+ columns=model.get_reaction_names(),
138
+ )
139
+
140
+
141
+ ###############################################################################
142
+ # Single returns
143
+ ###############################################################################
144
+
145
+
146
+ @dataclass(slots=True)
147
+ class TimePoint:
148
+ """Represents a single time point in a simulation.
149
+
150
+ Attributes:
151
+ concs: Series of concentrations at the time point.
152
+ fluxes: Series of fluxes at the time point.
153
+
154
+ Args:
155
+ model: Model instance to generate the time point for.
156
+ concs: DataFrame of concentrations (default: None).
157
+ fluxes: DataFrame of fluxes (default: None).
158
+ idx: Index of the time point in the DataFrame (default: -1).
159
+
160
+ """
161
+
162
+ variables: pd.Series
163
+ fluxes: pd.Series
164
+
165
+ @classmethod
166
+ def from_result(
167
+ cls,
168
+ *,
169
+ model: Model,
170
+ result: Result | None,
171
+ idx: int = -1,
172
+ ) -> Self:
173
+ """Initialize the Scan object.
174
+
175
+ Args:
176
+ model: The model object.
177
+ result: Result of the simulation
178
+ idx: Index to select specific row from concs and fluxes DataFrames.
179
+
180
+ """
181
+ if result is None:
182
+ return cls(
183
+ variables=_empty_conc_series(model),
184
+ fluxes=_empty_flux_series(model),
185
+ )
186
+
187
+ return cls(
188
+ variables=result.variables.iloc[idx],
189
+ fluxes=result.fluxes.iloc[idx],
190
+ )
191
+
192
+ @property
193
+ def results(self) -> pd.Series:
194
+ """Get the combined results of concentrations and fluxes.
195
+
196
+ Example:
197
+ >>> time_point.results
198
+ x1 1.0
199
+ x2 0.5
200
+ v1 0.1
201
+ v2 0.2
202
+
203
+ Returns:
204
+ pd.Series: Combined series of concentrations and fluxes.
205
+
206
+ """
207
+ return pd.concat((self.variables, self.fluxes), axis=0)
208
+
209
+
210
+ @dataclass(slots=True)
211
+ class TimeCourse:
212
+ """Represents a time course in a simulation.
213
+
214
+ Attributes:
215
+ variables: DataFrame of concentrations over time.
216
+ fluxes: DataFrame of fluxes over time.
217
+
218
+ """
219
+
220
+ variables: pd.DataFrame
221
+ fluxes: pd.DataFrame
222
+
223
+ @classmethod
224
+ def from_scan(
225
+ cls,
226
+ *,
227
+ model: Model,
228
+ time_points: Array,
229
+ result: Result | None,
230
+ ) -> Self:
231
+ """Initialize the Scan object.
232
+
233
+ Args:
234
+ model (Model): The model object.
235
+ time_points (Array): Array of time points.
236
+ result: Result of the simulation
237
+
238
+ """
239
+ if result is None:
240
+ return cls(
241
+ _empty_conc_df(model, time_points),
242
+ _empty_flux_df(model, time_points),
243
+ )
244
+ return cls(
245
+ result.variables,
246
+ result.fluxes,
247
+ )
248
+
249
+ @property
250
+ def results(self) -> pd.DataFrame:
251
+ """Get the combined results of concentrations and fluxes over time.
252
+
253
+ Examples:
254
+ >>> time_course.results
255
+ Time x1 x2 v1 v2
256
+ 0.0 1.0 1.00 1.00 1.00
257
+ 0.1 0.9 0.99 0.99 0.99
258
+ 0.2 0.8 0.99 0.99 0.99
259
+
260
+ Returns:
261
+ pd.DataFrame: Combined DataFrame of concentrations and fluxes.
262
+
263
+ """
264
+ return pd.concat((self.variables, self.fluxes), axis=1)
265
+
266
+
267
+ ###############################################################################
268
+ # Workers
269
+ ###############################################################################
270
+
271
+
272
+ class SteadyStateWorker(Protocol):
273
+ """Worker function for steady-state simulations."""
274
+
275
+ def __call__(
276
+ self,
277
+ model: Model,
278
+ y0: dict[str, float] | None,
279
+ *,
280
+ rel_norm: bool,
281
+ ) -> TimePoint:
282
+ """Call the worker function."""
283
+ ...
284
+
285
+
286
+ class TimeCourseWorker(Protocol):
287
+ """Worker function for time-course simulations."""
288
+
289
+ def __call__(
290
+ self,
291
+ model: Model,
292
+ y0: dict[str, float] | None,
293
+ time_points: Array,
294
+ ) -> TimeCourse:
295
+ """Call the worker function."""
296
+ ...
297
+
298
+
299
+ class ProtocolWorker(Protocol):
300
+ """Worker function for protocol-based simulations."""
301
+
302
+ def __call__(
303
+ self,
304
+ model: Model,
305
+ y0: dict[str, float] | None,
306
+ protocol: pd.DataFrame,
307
+ time_points_per_step: int = 10,
308
+ ) -> TimeCourse:
309
+ """Call the worker function."""
310
+ ...
311
+
312
+
313
+ def _steady_state_worker(
314
+ model: Model,
315
+ y0: dict[str, float] | None,
316
+ *,
317
+ rel_norm: bool,
318
+ ) -> TimePoint:
319
+ """Simulate the model to steady state and return concentrations and fluxes.
320
+
321
+ Args:
322
+ model: Model instance to simulate.
323
+ y0: Initial conditions as a dictionary {species: value}.
324
+ rel_norm: Whether to use relative normalization.
325
+
326
+ Returns:
327
+ TimePoint: Object containing steady-state concentrations and fluxes.
328
+
329
+ """
330
+ try:
331
+ res = (
332
+ Simulator(model, y0=y0)
333
+ .simulate_to_steady_state(rel_norm=rel_norm)
334
+ .get_result()
335
+ )
336
+ except ZeroDivisionError:
337
+ res = None
338
+ return TimePoint.from_result(model=model, result=res)
339
+
340
+
341
+ def _time_course_worker(
342
+ model: Model,
343
+ y0: dict[str, float] | None,
344
+ time_points: Array,
345
+ ) -> TimeCourse:
346
+ """Simulate the model to steady state and return concentrations and fluxes.
347
+
348
+ Args:
349
+ model: Model instance to simulate.
350
+ y0: Initial conditions as a dictionary {species: value}.
351
+ time_points: Array of time points for the simulation.
352
+
353
+ Returns:
354
+ TimePoint: Object containing steady-state concentrations and fluxes.
355
+
356
+ """
357
+ try:
358
+ res = (
359
+ Simulator(model, y0=y0)
360
+ .simulate_time_course(time_points=time_points)
361
+ .get_result()
362
+ )
363
+ except ZeroDivisionError:
364
+ res = None
365
+ return TimeCourse.from_scan(
366
+ model=model,
367
+ time_points=time_points,
368
+ result=res,
369
+ )
370
+
371
+
372
+ def _protocol_worker(
373
+ model: Model,
374
+ y0: dict[str, float] | None,
375
+ protocol: pd.DataFrame,
376
+ time_points_per_step: int = 10,
377
+ ) -> TimeCourse:
378
+ """Simulate the model over a protocol and return concentrations and fluxes.
379
+
380
+ Args:
381
+ model: Model instance to simulate.
382
+ y0: Initial conditions as a dictionary {species: value}.
383
+ protocol: DataFrame containing the protocol steps.
384
+ time_points_per_step: Number of time points per protocol step (default: 10).
385
+
386
+ Returns:
387
+ TimeCourse: Object containing protocol series concentrations and fluxes.
388
+
389
+ """
390
+ try:
391
+ res = (
392
+ Simulator(model, y0=y0)
393
+ .simulate_over_protocol(
394
+ protocol=protocol,
395
+ time_points_per_step=time_points_per_step,
396
+ )
397
+ .get_result()
398
+ )
399
+ except ZeroDivisionError:
400
+ res = None
401
+
402
+ time_points = np.linspace(
403
+ 0,
404
+ protocol.index[-1].total_seconds(),
405
+ len(protocol) * time_points_per_step,
406
+ )
407
+ return TimeCourse.from_scan(
408
+ model=model,
409
+ time_points=time_points,
410
+ result=res,
411
+ )
412
+
413
+
414
+ def steady_state(
415
+ model: Model,
416
+ parameters: pd.DataFrame,
417
+ y0: dict[str, float] | None = None,
418
+ *,
419
+ parallel: bool = True,
420
+ rel_norm: bool = False,
421
+ cache: Cache | None = None,
422
+ worker: SteadyStateWorker = _steady_state_worker,
423
+ ) -> SteadyStates:
424
+ """Get steady-state results over supplied parameters.
425
+
426
+ Args:
427
+ model: Model instance to simulate.
428
+ parameters: DataFrame containing parameter values to scan.
429
+ y0: Initial conditions as a dictionary {variable: value}.
430
+ parallel: Whether to execute in parallel (default: True).
431
+ rel_norm: Whether to use relative normalization (default: False).
432
+ cache: Optional cache to store and retrieve results.
433
+ worker: Worker function to use for the simulation.
434
+
435
+ Returns:
436
+ SteadyStates: Steady-state results for each parameter set.
437
+
438
+ Examples:
439
+ >>> steady_state(
440
+ >>> model,
441
+ >>> parameters=pd.DataFrame({"k1": np.linspace(1, 2, 3)})
442
+ >>> ).variables
443
+ idx x y
444
+ 1.0 0.50 1.00
445
+ 1.5 0.75 1.50
446
+ 2.0 1.00 2.00
447
+
448
+ >>> steady_state(
449
+ >>> model,
450
+ >>> parameters=cartesian_product({"k1": [1, 2], "k2": [3, 4]})
451
+ >>> ).variables
452
+
453
+ | idx | x | y |
454
+ | (1, 3) | 0.33 | 1 |
455
+ | (1, 4) | 0.25 | 1 |
456
+ | (2, 3) | 0.66 | 2 |
457
+ | (2, 4) | 0.5 | 2 |
458
+
459
+ """
460
+ res = parallelise(
461
+ partial(
462
+ _update_parameters_and,
463
+ fn=partial(
464
+ worker,
465
+ y0=y0,
466
+ rel_norm=rel_norm,
467
+ ),
468
+ model=model,
469
+ ),
470
+ inputs=list(parameters.iterrows()),
471
+ cache=cache,
472
+ parallel=parallel,
473
+ )
474
+ concs = pd.DataFrame({k: v.variables.T for k, v in res.items()}).T
475
+ fluxes = pd.DataFrame({k: v.fluxes.T for k, v in res.items()}).T
476
+ idx = (
477
+ pd.Index(parameters.iloc[:, 0])
478
+ if parameters.shape[1] == 1
479
+ else pd.MultiIndex.from_frame(parameters)
480
+ )
481
+ concs.index = idx
482
+ fluxes.index = idx
483
+ return SteadyStates(variables=concs, fluxes=fluxes, parameters=parameters)
484
+
485
+
486
+ def time_course(
487
+ model: Model,
488
+ parameters: pd.DataFrame,
489
+ time_points: Array,
490
+ y0: dict[str, float] | None = None,
491
+ *,
492
+ parallel: bool = True,
493
+ cache: Cache | None = None,
494
+ worker: TimeCourseWorker = _time_course_worker,
495
+ ) -> TimeCourseByPars:
496
+ """Get time course for each supplied parameter.
497
+
498
+ Examples:
499
+ >>> time_course(
500
+ >>> model,
501
+ >>> parameters=pd.DataFrame({"k1": [1, 1.5, 2]}),
502
+ >>> time_points=np.linspace(0, 1, 3)
503
+ >>> ).variables
504
+
505
+ | (n, time) | x | y |
506
+ |:----------|---------:|--------:|
507
+ | (0, 0.0) | 1 | 1 |
508
+ | (0, 0.5) | 0.68394 | 1.23865 |
509
+ | (0, 1.0) | 0.567668 | 1.23254 |
510
+ | (1, 0.0) | 1 | 1 |
511
+ | (1, 0.5) | 0.84197 | 1.31606 |
512
+ | (1, 1.0) | 0.783834 | 1.43233 |
513
+ | (2, 0.0) | 1 | 1 |
514
+ | (2, 0.5) | 1 | 1.39347 |
515
+ | (2, 1.0) | 1 | 1.63212 |
516
+
517
+ >>> time_course(
518
+ >>> model,
519
+ >>> parameters=cartesian_product({"k1": [1, 2], "k2": [3, 4]}),
520
+ >>> time_points=[0.0, 0.5, 1.0],
521
+ >>> ).variables
522
+
523
+ | (n, time) | x | y |
524
+ |:----------|---------:|-------:|
525
+ | (0, 0.0) | 1 | 1 |
526
+ | (0, 0.5) | 0.482087 | 1.3834 |
527
+ | (1, 0.0) | 1 | 1 |
528
+ | (1, 0.5) | 0.351501 | 1.4712 |
529
+ | (2, 0.0) | 1 | 1 |
530
+
531
+ Args:
532
+ model: Model instance to simulate.
533
+ parameters: DataFrame containing parameter values to scan.
534
+ time_points: Array of time points for the simulation.
535
+ y0: Initial conditions as a dictionary {variable: value}.
536
+ cache: Optional cache to store and retrieve results.
537
+ parallel: Whether to execute in parallel (default: True).
538
+ worker: Worker function to use for the simulation.
539
+
540
+ Returns:
541
+ TimeCourseByPars: Time series results for each parameter set.
542
+
543
+
544
+ """
545
+ res = parallelise(
546
+ partial(
547
+ _update_parameters_and,
548
+ fn=partial(
549
+ worker,
550
+ time_points=time_points,
551
+ y0=y0,
552
+ ),
553
+ model=model,
554
+ ),
555
+ inputs=list(parameters.iterrows()),
556
+ cache=cache,
557
+ parallel=parallel,
558
+ )
559
+ concs = cast(dict, {k: v.variables for k, v in res.items()})
560
+ fluxes = cast(dict, {k: v.fluxes for k, v in res.items()})
561
+ return TimeCourseByPars(
562
+ parameters=parameters,
563
+ variables=pd.concat(concs, names=["n", "time"]),
564
+ fluxes=pd.concat(fluxes, names=["n", "time"]),
565
+ )
566
+
567
+
568
+ def time_course_over_protocol(
569
+ model: Model,
570
+ parameters: pd.DataFrame,
571
+ protocol: pd.DataFrame,
572
+ time_points_per_step: int = 10,
573
+ y0: dict[str, float] | None = None,
574
+ *,
575
+ parallel: bool = True,
576
+ cache: Cache | None = None,
577
+ worker: ProtocolWorker = _protocol_worker,
578
+ ) -> ProtocolByPars:
579
+ """Get protocol series for each supplied parameter.
580
+
581
+ Examples:
582
+ >>> scan.time_course_over_protocol(
583
+ ... model,
584
+ ... parameters=pd.DataFrame({"k2": np.linspace(1, 2, 11)}),
585
+ ... protocol=make_protocol(
586
+ ... {
587
+ ... 1: {"k1": 1},
588
+ ... 2: {"k1": 2},
589
+ ... }
590
+ ... ),
591
+ ... )
592
+
593
+ Args:
594
+ model: Model instance to simulate.
595
+ parameters: DataFrame containing parameter values to scan.
596
+ protocol: Protocol to follow for the simulation.
597
+ time_points_per_step: Number of time points per protocol step (default: 10).
598
+ y0: Initial conditions as a dictionary {variable: value}.
599
+ parallel: Whether to execute in parallel (default: True).
600
+ cache: Optional cache to store and retrieve results.
601
+ worker: Worker function to use for the simulation.
602
+
603
+ Returns:
604
+ TimeCourseByPars: Protocol series results for each parameter set.
605
+
606
+ """
607
+ res = parallelise(
608
+ partial(
609
+ _update_parameters_and,
610
+ fn=partial(
611
+ worker,
612
+ protocol=protocol,
613
+ y0=y0,
614
+ time_points_per_step=time_points_per_step,
615
+ ),
616
+ model=model,
617
+ ),
618
+ inputs=list(parameters.iterrows()),
619
+ cache=cache,
620
+ parallel=parallel,
621
+ )
622
+ concs = cast(dict, {k: v.variables for k, v in res.items()})
623
+ fluxes = cast(dict, {k: v.fluxes for k, v in res.items()})
624
+ return ProtocolByPars(
625
+ parameters=parameters,
626
+ protocol=protocol,
627
+ variables=pd.concat(concs, names=["n", "time"]),
628
+ fluxes=pd.concat(fluxes, names=["n", "time"]),
629
+ )