mxlpy 0.20.0__py3-none-any.whl → 0.21.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mxlpy/carousel.py +166 -0
- mxlpy/fit.py +274 -28
- mxlpy/identify.py +6 -7
- mxlpy/mc.py +21 -20
- mxlpy/mca.py +7 -5
- mxlpy/parallel.py +7 -6
- mxlpy/scan.py +31 -19
- mxlpy/simulator.py +2 -2
- {mxlpy-0.20.0.dist-info → mxlpy-0.21.0.dist-info}/METADATA +3 -1
- {mxlpy-0.20.0.dist-info → mxlpy-0.21.0.dist-info}/RECORD +12 -11
- {mxlpy-0.20.0.dist-info → mxlpy-0.21.0.dist-info}/WHEEL +0 -0
- {mxlpy-0.20.0.dist-info → mxlpy-0.21.0.dist-info}/licenses/LICENSE +0 -0
mxlpy/carousel.py
ADDED
@@ -0,0 +1,166 @@
|
|
1
|
+
"""Reaction carousel."""
|
2
|
+
|
3
|
+
from __future__ import annotations
|
4
|
+
|
5
|
+
import itertools as it
|
6
|
+
from copy import deepcopy
|
7
|
+
from dataclasses import dataclass, field
|
8
|
+
from functools import partial
|
9
|
+
from typing import TYPE_CHECKING
|
10
|
+
|
11
|
+
import pandas as pd
|
12
|
+
|
13
|
+
from mxlpy import parallel, scan
|
14
|
+
|
15
|
+
__all__ = ["Carousel", "CarouselSteadyState", "CarouselTimeCourse", "ReactionTemplate"]
|
16
|
+
|
17
|
+
if TYPE_CHECKING:
|
18
|
+
from collections.abc import Iterable, Mapping
|
19
|
+
|
20
|
+
from mxlpy import Model
|
21
|
+
from mxlpy.types import Array, IntegratorType, RateFn
|
22
|
+
|
23
|
+
|
24
|
+
@dataclass
|
25
|
+
class ReactionTemplate:
|
26
|
+
"""Template for a reaction in a model."""
|
27
|
+
|
28
|
+
fn: RateFn
|
29
|
+
args: list[str]
|
30
|
+
additional_parameters: dict[str, float] = field(default_factory=dict)
|
31
|
+
|
32
|
+
|
33
|
+
@dataclass
|
34
|
+
class CarouselSteadyState:
|
35
|
+
"""Time course of a carousel simulation."""
|
36
|
+
|
37
|
+
carousel: list[Model]
|
38
|
+
results: list[scan.TimePoint]
|
39
|
+
|
40
|
+
def get_variables_by_model(self) -> pd.DataFrame:
|
41
|
+
"""Get the variables of the time course results, indexed by model."""
|
42
|
+
return pd.DataFrame({i: r.variables for i, r in enumerate(self.results)}).T
|
43
|
+
|
44
|
+
|
45
|
+
@dataclass
|
46
|
+
class CarouselTimeCourse:
|
47
|
+
"""Time course of a carousel simulation."""
|
48
|
+
|
49
|
+
carousel: list[Model]
|
50
|
+
results: list[scan.TimeCourse]
|
51
|
+
|
52
|
+
def get_variables_by_model(self) -> pd.DataFrame:
|
53
|
+
"""Get the variables of the time course results, indexed by model."""
|
54
|
+
return pd.concat({i: r.variables for i, r in enumerate(self.results)})
|
55
|
+
|
56
|
+
|
57
|
+
def _dict_product[T1, T2](d: Mapping[T1, Iterable[T2]]) -> Iterable[dict[T1, T2]]:
|
58
|
+
yield from (dict(zip(d.keys(), x, strict=True)) for x in it.product(*d.values()))
|
59
|
+
|
60
|
+
|
61
|
+
def _make_reaction_carousel(
|
62
|
+
model: Model, rxns: dict[str, list[ReactionTemplate]]
|
63
|
+
) -> Iterable[Model]:
|
64
|
+
for d in _dict_product(rxns):
|
65
|
+
new = deepcopy(model)
|
66
|
+
for rxn, template in d.items():
|
67
|
+
new.add_parameters(template.additional_parameters)
|
68
|
+
new.update_reaction(name=rxn, fn=template.fn, args=template.args)
|
69
|
+
yield new
|
70
|
+
|
71
|
+
|
72
|
+
class Carousel:
|
73
|
+
"""A carousel of models with different reaction templates."""
|
74
|
+
|
75
|
+
variants: list[Model]
|
76
|
+
|
77
|
+
def __init__(
|
78
|
+
self,
|
79
|
+
model: Model,
|
80
|
+
variants: dict[str, list[ReactionTemplate]],
|
81
|
+
) -> None:
|
82
|
+
"""Initialize the carousel with a model and reaction templates."""
|
83
|
+
self.variants = list(
|
84
|
+
_make_reaction_carousel(
|
85
|
+
model=model,
|
86
|
+
rxns=variants,
|
87
|
+
)
|
88
|
+
)
|
89
|
+
|
90
|
+
def time_course(
|
91
|
+
self,
|
92
|
+
time_points: Array,
|
93
|
+
*,
|
94
|
+
y0: dict[str, float] | None = None,
|
95
|
+
integrator: IntegratorType | None = None,
|
96
|
+
) -> CarouselTimeCourse:
|
97
|
+
"""Simulate the carousel of models over a time course."""
|
98
|
+
results = [
|
99
|
+
i[1]
|
100
|
+
for i in parallel.parallelise(
|
101
|
+
partial(
|
102
|
+
scan._time_course_worker, # noqa: SLF001
|
103
|
+
time_points=time_points,
|
104
|
+
integrator=integrator,
|
105
|
+
y0=y0,
|
106
|
+
),
|
107
|
+
list(enumerate(self.variants)),
|
108
|
+
)
|
109
|
+
]
|
110
|
+
|
111
|
+
return CarouselTimeCourse(
|
112
|
+
carousel=self.variants,
|
113
|
+
results=results,
|
114
|
+
)
|
115
|
+
|
116
|
+
def protocol_time_course(
|
117
|
+
self,
|
118
|
+
protocol: pd.DataFrame,
|
119
|
+
*,
|
120
|
+
y0: dict[str, float] | None = None,
|
121
|
+
integrator: IntegratorType | None = None,
|
122
|
+
) -> CarouselTimeCourse:
|
123
|
+
"""Simulate the carousel of models over a protocol time course."""
|
124
|
+
results = [
|
125
|
+
i[1]
|
126
|
+
for i in parallel.parallelise(
|
127
|
+
partial(
|
128
|
+
scan._protocol_worker, # noqa: SLF001
|
129
|
+
protocol=protocol,
|
130
|
+
integrator=integrator,
|
131
|
+
y0=y0,
|
132
|
+
),
|
133
|
+
list(enumerate(self.variants)),
|
134
|
+
)
|
135
|
+
]
|
136
|
+
|
137
|
+
return CarouselTimeCourse(
|
138
|
+
carousel=self.variants,
|
139
|
+
results=results,
|
140
|
+
)
|
141
|
+
|
142
|
+
def steady_state(
|
143
|
+
self,
|
144
|
+
*,
|
145
|
+
y0: dict[str, float] | None = None,
|
146
|
+
integrator: IntegratorType | None = None,
|
147
|
+
rel_norm: bool = False,
|
148
|
+
) -> CarouselSteadyState:
|
149
|
+
"""Simulate the carousel of models over a time course."""
|
150
|
+
results = [
|
151
|
+
i[1]
|
152
|
+
for i in parallel.parallelise(
|
153
|
+
partial(
|
154
|
+
scan._steady_state_worker, # noqa: SLF001
|
155
|
+
integrator=integrator,
|
156
|
+
rel_norm=rel_norm,
|
157
|
+
y0=y0,
|
158
|
+
),
|
159
|
+
list(enumerate(self.variants)),
|
160
|
+
)
|
161
|
+
]
|
162
|
+
|
163
|
+
return CarouselSteadyState(
|
164
|
+
carousel=self.variants,
|
165
|
+
results=results,
|
166
|
+
)
|
mxlpy/fit.py
CHANGED
@@ -10,29 +10,42 @@ Functions:
|
|
10
10
|
|
11
11
|
from __future__ import annotations
|
12
12
|
|
13
|
+
import logging
|
14
|
+
from copy import deepcopy
|
15
|
+
from dataclasses import dataclass
|
13
16
|
from functools import partial
|
14
17
|
from typing import TYPE_CHECKING, Protocol
|
15
18
|
|
16
19
|
import numpy as np
|
17
20
|
from scipy.optimize import minimize
|
18
21
|
|
19
|
-
from mxlpy
|
22
|
+
from mxlpy import parallel
|
20
23
|
from mxlpy.simulator import Simulator
|
21
24
|
from mxlpy.types import Array, ArrayLike, Callable, IntegratorType, cast
|
22
25
|
|
23
26
|
if TYPE_CHECKING:
|
24
27
|
import pandas as pd
|
25
28
|
|
29
|
+
from mxlpy.carousel import Carousel
|
26
30
|
from mxlpy.model import Model
|
27
31
|
|
32
|
+
LOGGER = logging.getLogger(__name__)
|
33
|
+
|
28
34
|
__all__ = [
|
35
|
+
"CarouselFit",
|
36
|
+
"FitResult",
|
29
37
|
"InitialGuess",
|
38
|
+
"LOGGER",
|
30
39
|
"LossFn",
|
40
|
+
"MinResult",
|
31
41
|
"MinimizeFn",
|
32
42
|
"ProtocolResidualFn",
|
33
43
|
"ResidualFn",
|
34
44
|
"SteadyStateResidualFn",
|
35
45
|
"TimeSeriesResidualFn",
|
46
|
+
"carousel_steady_state",
|
47
|
+
"carousel_time_course",
|
48
|
+
"carousel_time_course_over_protocol",
|
36
49
|
"rmse",
|
37
50
|
"steady_state",
|
38
51
|
"time_course",
|
@@ -40,9 +53,37 @@ __all__ = [
|
|
40
53
|
]
|
41
54
|
|
42
55
|
|
56
|
+
@dataclass
|
57
|
+
class MinResult:
|
58
|
+
"""Result of a minimization operation."""
|
59
|
+
|
60
|
+
parameters: dict[str, float]
|
61
|
+
residual: float
|
62
|
+
|
63
|
+
|
64
|
+
@dataclass
|
65
|
+
class FitResult:
|
66
|
+
"""Result of a fit operation."""
|
67
|
+
|
68
|
+
model: Model
|
69
|
+
best_pars: dict[str, float]
|
70
|
+
loss: float
|
71
|
+
|
72
|
+
|
73
|
+
@dataclass
|
74
|
+
class CarouselFit:
|
75
|
+
"""Result of a carousel fit operation."""
|
76
|
+
|
77
|
+
fits: list[FitResult]
|
78
|
+
|
79
|
+
def get_best_fit(self) -> FitResult:
|
80
|
+
"""Get the best fit from the carousel."""
|
81
|
+
return min(self.fits, key=lambda x: x.loss)
|
82
|
+
|
83
|
+
|
43
84
|
type InitialGuess = dict[str, float]
|
44
85
|
type ResidualFn = Callable[[Array], float]
|
45
|
-
type MinimizeFn = Callable[[ResidualFn, InitialGuess],
|
86
|
+
type MinimizeFn = Callable[[ResidualFn, InitialGuess], MinResult | None]
|
46
87
|
type LossFn = Callable[
|
47
88
|
[
|
48
89
|
pd.DataFrame | pd.Series,
|
@@ -70,7 +111,7 @@ class SteadyStateResidualFn(Protocol):
|
|
70
111
|
par_names: list[str],
|
71
112
|
data: pd.Series,
|
72
113
|
model: Model,
|
73
|
-
y0: dict[str, float],
|
114
|
+
y0: dict[str, float] | None,
|
74
115
|
integrator: IntegratorType,
|
75
116
|
loss_fn: LossFn,
|
76
117
|
) -> float:
|
@@ -88,7 +129,7 @@ class TimeSeriesResidualFn(Protocol):
|
|
88
129
|
par_names: list[str],
|
89
130
|
data: pd.DataFrame,
|
90
131
|
model: Model,
|
91
|
-
y0: dict[str, float],
|
132
|
+
y0: dict[str, float] | None,
|
92
133
|
integrator: IntegratorType,
|
93
134
|
loss_fn: LossFn,
|
94
135
|
) -> float:
|
@@ -106,7 +147,7 @@ class ProtocolResidualFn(Protocol):
|
|
106
147
|
par_names: list[str],
|
107
148
|
data: pd.DataFrame,
|
108
149
|
model: Model,
|
109
|
-
y0: dict[str, float],
|
150
|
+
y0: dict[str, float] | None,
|
110
151
|
integrator: IntegratorType,
|
111
152
|
loss_fn: LossFn,
|
112
153
|
protocol: pd.DataFrame,
|
@@ -119,22 +160,27 @@ class ProtocolResidualFn(Protocol):
|
|
119
160
|
def _default_minimize_fn(
|
120
161
|
residual_fn: ResidualFn,
|
121
162
|
p0: dict[str, float],
|
122
|
-
) ->
|
163
|
+
) -> MinResult | None:
|
123
164
|
res = minimize(
|
124
165
|
residual_fn,
|
125
166
|
x0=list(p0.values()),
|
126
|
-
bounds=[(
|
167
|
+
bounds=[(0, None) for _ in range(len(p0))],
|
127
168
|
method="L-BFGS-B",
|
128
169
|
)
|
129
170
|
if res.success:
|
130
|
-
return
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
171
|
+
return MinResult(
|
172
|
+
parameters=dict(
|
173
|
+
zip(
|
174
|
+
p0,
|
175
|
+
res.x,
|
176
|
+
strict=True,
|
177
|
+
),
|
178
|
+
),
|
179
|
+
residual=res.fun,
|
136
180
|
)
|
137
|
-
|
181
|
+
|
182
|
+
LOGGER.warning("Minimisation failed.")
|
183
|
+
return None
|
138
184
|
|
139
185
|
|
140
186
|
def _steady_state_residual(
|
@@ -285,14 +331,15 @@ def _protocol_residual(
|
|
285
331
|
|
286
332
|
def steady_state(
|
287
333
|
model: Model,
|
334
|
+
*,
|
288
335
|
p0: dict[str, float],
|
289
336
|
data: pd.Series,
|
290
337
|
y0: dict[str, float] | None = None,
|
291
338
|
minimize_fn: MinimizeFn = _default_minimize_fn,
|
292
339
|
residual_fn: SteadyStateResidualFn = _steady_state_residual,
|
293
|
-
integrator: IntegratorType =
|
340
|
+
integrator: IntegratorType | None = None,
|
294
341
|
loss_fn: LossFn = rmse,
|
295
|
-
) ->
|
342
|
+
) -> FitResult | None:
|
296
343
|
"""Fit model parameters to steady-state experimental data.
|
297
344
|
|
298
345
|
Examples:
|
@@ -333,23 +380,30 @@ def steady_state(
|
|
333
380
|
loss_fn=loss_fn,
|
334
381
|
),
|
335
382
|
)
|
336
|
-
|
337
|
-
|
338
|
-
# Restore
|
383
|
+
min_result = minimize_fn(fn, p0)
|
384
|
+
# Restore original model
|
339
385
|
model.update_parameters(p_orig)
|
340
|
-
|
386
|
+
if min_result is None:
|
387
|
+
return min_result
|
388
|
+
|
389
|
+
return FitResult(
|
390
|
+
model=deepcopy(model).update_parameters(min_result.parameters),
|
391
|
+
best_pars=min_result.parameters,
|
392
|
+
loss=min_result.residual,
|
393
|
+
)
|
341
394
|
|
342
395
|
|
343
396
|
def time_course(
|
344
397
|
model: Model,
|
398
|
+
*,
|
345
399
|
p0: dict[str, float],
|
346
400
|
data: pd.DataFrame,
|
347
401
|
y0: dict[str, float] | None = None,
|
348
402
|
minimize_fn: MinimizeFn = _default_minimize_fn,
|
349
403
|
residual_fn: TimeSeriesResidualFn = _time_course_residual,
|
350
|
-
integrator: IntegratorType =
|
404
|
+
integrator: IntegratorType | None = None,
|
351
405
|
loss_fn: LossFn = rmse,
|
352
|
-
) ->
|
406
|
+
) -> FitResult | None:
|
353
407
|
"""Fit model parameters to time course of experimental data.
|
354
408
|
|
355
409
|
Examples:
|
@@ -388,23 +442,33 @@ def time_course(
|
|
388
442
|
loss_fn=loss_fn,
|
389
443
|
),
|
390
444
|
)
|
391
|
-
|
445
|
+
|
446
|
+
min_result = minimize_fn(fn, p0)
|
447
|
+
# Restore original model
|
392
448
|
model.update_parameters(p_orig)
|
393
|
-
|
449
|
+
if min_result is None:
|
450
|
+
return min_result
|
451
|
+
|
452
|
+
return FitResult(
|
453
|
+
model=deepcopy(model).update_parameters(min_result.parameters),
|
454
|
+
best_pars=min_result.parameters,
|
455
|
+
loss=min_result.residual,
|
456
|
+
)
|
394
457
|
|
395
458
|
|
396
459
|
def time_course_over_protocol(
|
397
460
|
model: Model,
|
461
|
+
*,
|
398
462
|
p0: dict[str, float],
|
399
463
|
data: pd.DataFrame,
|
400
464
|
protocol: pd.DataFrame,
|
401
465
|
y0: dict[str, float] | None = None,
|
402
466
|
minimize_fn: MinimizeFn = _default_minimize_fn,
|
403
467
|
residual_fn: ProtocolResidualFn = _protocol_residual,
|
404
|
-
integrator: IntegratorType =
|
468
|
+
integrator: IntegratorType | None = None,
|
405
469
|
loss_fn: LossFn = rmse,
|
406
470
|
time_points_per_step: int = 10,
|
407
|
-
) ->
|
471
|
+
) -> FitResult | None:
|
408
472
|
"""Fit model parameters to time course of experimental data.
|
409
473
|
|
410
474
|
Examples:
|
@@ -447,6 +511,188 @@ def time_course_over_protocol(
|
|
447
511
|
time_points_per_step=time_points_per_step,
|
448
512
|
),
|
449
513
|
)
|
450
|
-
|
514
|
+
|
515
|
+
min_result = minimize_fn(fn, p0)
|
516
|
+
# Restore original model
|
451
517
|
model.update_parameters(p_orig)
|
452
|
-
|
518
|
+
if min_result is None:
|
519
|
+
return min_result
|
520
|
+
|
521
|
+
return FitResult(
|
522
|
+
model=deepcopy(model).update_parameters(min_result.parameters),
|
523
|
+
best_pars=min_result.parameters,
|
524
|
+
loss=min_result.residual,
|
525
|
+
)
|
526
|
+
|
527
|
+
|
528
|
+
def _carousel_steady_state_worker(
|
529
|
+
model: Model,
|
530
|
+
p0: dict[str, float],
|
531
|
+
data: pd.Series,
|
532
|
+
y0: dict[str, float] | None,
|
533
|
+
integrator: IntegratorType | None,
|
534
|
+
loss_fn: LossFn,
|
535
|
+
minimize_fn: MinimizeFn,
|
536
|
+
residual_fn: SteadyStateResidualFn,
|
537
|
+
) -> FitResult | None:
|
538
|
+
model_pars = model.parameters
|
539
|
+
|
540
|
+
return steady_state(
|
541
|
+
model,
|
542
|
+
p0={k: v for k, v in p0.items() if k in model_pars},
|
543
|
+
y0=y0,
|
544
|
+
data=data,
|
545
|
+
minimize_fn=minimize_fn,
|
546
|
+
residual_fn=residual_fn,
|
547
|
+
integrator=integrator,
|
548
|
+
loss_fn=loss_fn,
|
549
|
+
)
|
550
|
+
|
551
|
+
|
552
|
+
def _carousel_time_course_worker(
|
553
|
+
model: Model,
|
554
|
+
p0: dict[str, float],
|
555
|
+
data: pd.DataFrame,
|
556
|
+
y0: dict[str, float] | None,
|
557
|
+
integrator: IntegratorType | None,
|
558
|
+
loss_fn: LossFn,
|
559
|
+
minimize_fn: MinimizeFn,
|
560
|
+
residual_fn: TimeSeriesResidualFn,
|
561
|
+
) -> FitResult | None:
|
562
|
+
model_pars = model.parameters
|
563
|
+
return time_course(
|
564
|
+
model,
|
565
|
+
p0={k: v for k, v in p0.items() if k in model_pars},
|
566
|
+
y0=y0,
|
567
|
+
data=data,
|
568
|
+
minimize_fn=minimize_fn,
|
569
|
+
residual_fn=residual_fn,
|
570
|
+
integrator=integrator,
|
571
|
+
loss_fn=loss_fn,
|
572
|
+
)
|
573
|
+
|
574
|
+
|
575
|
+
def _carousel_protocol_worker(
|
576
|
+
model: Model,
|
577
|
+
p0: dict[str, float],
|
578
|
+
data: pd.DataFrame,
|
579
|
+
protocol: pd.DataFrame,
|
580
|
+
y0: dict[str, float] | None,
|
581
|
+
integrator: IntegratorType | None,
|
582
|
+
loss_fn: LossFn,
|
583
|
+
minimize_fn: MinimizeFn,
|
584
|
+
residual_fn: ProtocolResidualFn,
|
585
|
+
) -> FitResult | None:
|
586
|
+
model_pars = model.parameters
|
587
|
+
return time_course_over_protocol(
|
588
|
+
model,
|
589
|
+
p0={k: v for k, v in p0.items() if k in model_pars},
|
590
|
+
y0=y0,
|
591
|
+
protocol=protocol,
|
592
|
+
data=data,
|
593
|
+
minimize_fn=minimize_fn,
|
594
|
+
residual_fn=residual_fn,
|
595
|
+
integrator=integrator,
|
596
|
+
loss_fn=loss_fn,
|
597
|
+
)
|
598
|
+
|
599
|
+
|
600
|
+
def carousel_steady_state(
|
601
|
+
carousel: Carousel,
|
602
|
+
*,
|
603
|
+
p0: dict[str, float],
|
604
|
+
data: pd.Series,
|
605
|
+
y0: dict[str, float] | None = None,
|
606
|
+
minimize_fn: MinimizeFn = _default_minimize_fn,
|
607
|
+
residual_fn: SteadyStateResidualFn = _steady_state_residual,
|
608
|
+
integrator: IntegratorType | None = None,
|
609
|
+
loss_fn: LossFn = rmse,
|
610
|
+
) -> CarouselFit:
|
611
|
+
"""Fit model parameters to steady-state experimental data over a carousel."""
|
612
|
+
return CarouselFit(
|
613
|
+
[
|
614
|
+
fit
|
615
|
+
for i in parallel.parallelise(
|
616
|
+
partial(
|
617
|
+
_carousel_steady_state_worker,
|
618
|
+
p0=p0,
|
619
|
+
data=data,
|
620
|
+
y0=y0,
|
621
|
+
integrator=integrator,
|
622
|
+
loss_fn=loss_fn,
|
623
|
+
minimize_fn=minimize_fn,
|
624
|
+
residual_fn=residual_fn,
|
625
|
+
),
|
626
|
+
inputs=list(enumerate(carousel.variants)),
|
627
|
+
)
|
628
|
+
if (fit := i[1]) is not None
|
629
|
+
]
|
630
|
+
)
|
631
|
+
|
632
|
+
|
633
|
+
def carousel_time_course(
|
634
|
+
carousel: Carousel,
|
635
|
+
*,
|
636
|
+
p0: dict[str, float],
|
637
|
+
data: pd.DataFrame,
|
638
|
+
y0: dict[str, float] | None = None,
|
639
|
+
minimize_fn: MinimizeFn = _default_minimize_fn,
|
640
|
+
residual_fn: TimeSeriesResidualFn = _time_course_residual,
|
641
|
+
integrator: IntegratorType | None = None,
|
642
|
+
loss_fn: LossFn = rmse,
|
643
|
+
) -> CarouselFit:
|
644
|
+
"""Fit model parameters to time course of experimental data over a carousel."""
|
645
|
+
return CarouselFit(
|
646
|
+
[
|
647
|
+
fit
|
648
|
+
for i in parallel.parallelise(
|
649
|
+
partial(
|
650
|
+
_carousel_time_course_worker,
|
651
|
+
p0=p0,
|
652
|
+
data=data,
|
653
|
+
y0=y0,
|
654
|
+
integrator=integrator,
|
655
|
+
loss_fn=loss_fn,
|
656
|
+
minimize_fn=minimize_fn,
|
657
|
+
residual_fn=residual_fn,
|
658
|
+
),
|
659
|
+
inputs=list(enumerate(carousel.variants)),
|
660
|
+
)
|
661
|
+
if (fit := i[1]) is not None
|
662
|
+
]
|
663
|
+
)
|
664
|
+
|
665
|
+
|
666
|
+
def carousel_time_course_over_protocol(
|
667
|
+
carousel: Carousel,
|
668
|
+
*,
|
669
|
+
p0: dict[str, float],
|
670
|
+
data: pd.DataFrame,
|
671
|
+
protocol: pd.DataFrame,
|
672
|
+
y0: dict[str, float] | None = None,
|
673
|
+
minimize_fn: MinimizeFn = _default_minimize_fn,
|
674
|
+
residual_fn: ProtocolResidualFn = _protocol_residual,
|
675
|
+
integrator: IntegratorType | None = None,
|
676
|
+
loss_fn: LossFn = rmse,
|
677
|
+
) -> CarouselFit:
|
678
|
+
"""Fit model parameters to time course of experimental data over a protocol."""
|
679
|
+
return CarouselFit(
|
680
|
+
[
|
681
|
+
fit
|
682
|
+
for i in parallel.parallelise(
|
683
|
+
partial(
|
684
|
+
_carousel_protocol_worker,
|
685
|
+
p0=p0,
|
686
|
+
data=data,
|
687
|
+
protocol=protocol,
|
688
|
+
y0=y0,
|
689
|
+
integrator=integrator,
|
690
|
+
loss_fn=loss_fn,
|
691
|
+
minimize_fn=minimize_fn,
|
692
|
+
residual_fn=residual_fn,
|
693
|
+
),
|
694
|
+
inputs=list(enumerate(carousel.variants)),
|
695
|
+
)
|
696
|
+
if (fit := i[1]) is not None
|
697
|
+
]
|
698
|
+
)
|
mxlpy/identify.py
CHANGED
@@ -28,16 +28,15 @@ def _mc_fit_time_course_worker(
|
|
28
28
|
data: pd.DataFrame,
|
29
29
|
loss_fn: fit.LossFn,
|
30
30
|
) -> float:
|
31
|
-
|
32
|
-
return fit._time_course_residual( # noqa: SLF001
|
33
|
-
par_values=list(p_fit.values()),
|
34
|
-
par_names=list(p_fit.keys()),
|
35
|
-
data=data,
|
31
|
+
fit_result = fit.time_course(
|
36
32
|
model=model,
|
37
|
-
|
38
|
-
|
33
|
+
p0=p0.to_dict(),
|
34
|
+
data=data,
|
39
35
|
loss_fn=loss_fn,
|
40
36
|
)
|
37
|
+
if fit_result is None:
|
38
|
+
return np.inf
|
39
|
+
return fit_result.loss
|
41
40
|
|
42
41
|
|
43
42
|
def profile_likelihood(
|
mxlpy/mc.py
CHANGED
@@ -22,7 +22,6 @@ from typing import TYPE_CHECKING, Protocol, cast
|
|
22
22
|
import pandas as pd
|
23
23
|
|
24
24
|
from mxlpy import mca, scan
|
25
|
-
from mxlpy.integrators import DefaultIntegrator
|
26
25
|
from mxlpy.parallel import Cache, parallelise
|
27
26
|
from mxlpy.scan import (
|
28
27
|
ProtocolWorker,
|
@@ -122,7 +121,7 @@ def steady_state(
|
|
122
121
|
cache: Cache | None = None,
|
123
122
|
rel_norm: bool = False,
|
124
123
|
worker: SteadyStateWorker = _steady_state_worker,
|
125
|
-
integrator: IntegratorType =
|
124
|
+
integrator: IntegratorType | None = None,
|
126
125
|
) -> SteadyStates:
|
127
126
|
"""Monte-carlo scan of steady states.
|
128
127
|
|
@@ -153,6 +152,7 @@ def steady_state(
|
|
153
152
|
worker,
|
154
153
|
rel_norm=rel_norm,
|
155
154
|
integrator=integrator,
|
155
|
+
y0=None,
|
156
156
|
),
|
157
157
|
model=model,
|
158
158
|
),
|
@@ -161,8 +161,8 @@ def steady_state(
|
|
161
161
|
cache=cache,
|
162
162
|
)
|
163
163
|
return SteadyStates(
|
164
|
-
variables=pd.concat({k: v.variables for k, v in res
|
165
|
-
fluxes=pd.concat({k: v.fluxes for k, v in res
|
164
|
+
variables=pd.concat({k: v.variables for k, v in res}, axis=1).T,
|
165
|
+
fluxes=pd.concat({k: v.fluxes for k, v in res}, axis=1).T,
|
166
166
|
parameters=mc_to_scan,
|
167
167
|
)
|
168
168
|
|
@@ -176,7 +176,7 @@ def time_course(
|
|
176
176
|
max_workers: int | None = None,
|
177
177
|
cache: Cache | None = None,
|
178
178
|
worker: TimeCourseWorker = _time_course_worker,
|
179
|
-
integrator: IntegratorType =
|
179
|
+
integrator: IntegratorType | None = None,
|
180
180
|
) -> TimeCourseByPars:
|
181
181
|
"""MC time course.
|
182
182
|
|
@@ -207,6 +207,7 @@ def time_course(
|
|
207
207
|
worker,
|
208
208
|
time_points=time_points,
|
209
209
|
integrator=integrator,
|
210
|
+
y0=None,
|
210
211
|
),
|
211
212
|
model=model,
|
212
213
|
),
|
@@ -217,8 +218,8 @@ def time_course(
|
|
217
218
|
|
218
219
|
return TimeCourseByPars(
|
219
220
|
parameters=mc_to_scan,
|
220
|
-
variables=pd.concat({k: v.variables.T for k, v in res
|
221
|
-
fluxes=pd.concat({k: v.fluxes.T for k, v in res
|
221
|
+
variables=pd.concat({k: v.variables.T for k, v in res}, axis=1).T,
|
222
|
+
fluxes=pd.concat({k: v.fluxes.T for k, v in res}, axis=1).T,
|
222
223
|
)
|
223
224
|
|
224
225
|
|
@@ -232,7 +233,7 @@ def time_course_over_protocol(
|
|
232
233
|
max_workers: int | None = None,
|
233
234
|
cache: Cache | None = None,
|
234
235
|
worker: ProtocolWorker = _protocol_worker,
|
235
|
-
integrator: IntegratorType =
|
236
|
+
integrator: IntegratorType | None = None,
|
236
237
|
) -> ProtocolByPars:
|
237
238
|
"""MC time course.
|
238
239
|
|
@@ -264,6 +265,7 @@ def time_course_over_protocol(
|
|
264
265
|
worker,
|
265
266
|
protocol=protocol,
|
266
267
|
integrator=integrator,
|
268
|
+
y0=None,
|
267
269
|
time_points_per_step=time_points_per_step,
|
268
270
|
),
|
269
271
|
model=model,
|
@@ -272,8 +274,8 @@ def time_course_over_protocol(
|
|
272
274
|
max_workers=max_workers,
|
273
275
|
cache=cache,
|
274
276
|
)
|
275
|
-
concs = {k: v.variables.T for k, v in res
|
276
|
-
fluxes = {k: v.fluxes.T for k, v in res
|
277
|
+
concs = {k: v.variables.T for k, v in res}
|
278
|
+
fluxes = {k: v.fluxes.T for k, v in res}
|
277
279
|
return ProtocolByPars(
|
278
280
|
variables=pd.concat(concs, axis=1).T,
|
279
281
|
fluxes=pd.concat(fluxes, axis=1).T,
|
@@ -292,7 +294,7 @@ def scan_steady_state(
|
|
292
294
|
cache: Cache | None = None,
|
293
295
|
rel_norm: bool = False,
|
294
296
|
worker: ParameterScanWorker = _parameter_scan_worker,
|
295
|
-
integrator: IntegratorType =
|
297
|
+
integrator: IntegratorType | None = None,
|
296
298
|
) -> McSteadyStates:
|
297
299
|
"""Parameter scan of mc distributed steady states.
|
298
300
|
|
@@ -339,6 +341,7 @@ def scan_steady_state(
|
|
339
341
|
parameters=to_scan,
|
340
342
|
rel_norm=rel_norm,
|
341
343
|
integrator=integrator,
|
344
|
+
y0=None,
|
342
345
|
),
|
343
346
|
model=model,
|
344
347
|
),
|
@@ -346,8 +349,8 @@ def scan_steady_state(
|
|
346
349
|
cache=cache,
|
347
350
|
max_workers=max_workers,
|
348
351
|
)
|
349
|
-
concs = {k: v.variables.T for k, v in res
|
350
|
-
fluxes = {k: v.fluxes.T for k, v in res
|
352
|
+
concs = {k: v.variables.T for k, v in res}
|
353
|
+
fluxes = {k: v.fluxes.T for k, v in res}
|
351
354
|
return McSteadyStates(
|
352
355
|
variables=pd.concat(concs, axis=1).T,
|
353
356
|
fluxes=pd.concat(fluxes, axis=1).T,
|
@@ -422,7 +425,7 @@ def variable_elasticities(
|
|
422
425
|
cache=cache,
|
423
426
|
max_workers=max_workers,
|
424
427
|
)
|
425
|
-
return cast(pd.DataFrame, pd.concat(res))
|
428
|
+
return cast(pd.DataFrame, pd.concat(dict(res)))
|
426
429
|
|
427
430
|
|
428
431
|
def parameter_elasticities(
|
@@ -486,7 +489,7 @@ def parameter_elasticities(
|
|
486
489
|
cache=cache,
|
487
490
|
max_workers=max_workers,
|
488
491
|
)
|
489
|
-
return cast(pd.DataFrame, pd.concat(res))
|
492
|
+
return cast(pd.DataFrame, pd.concat(dict(res)))
|
490
493
|
|
491
494
|
|
492
495
|
def response_coefficients(
|
@@ -501,7 +504,7 @@ def response_coefficients(
|
|
501
504
|
disable_tqdm: bool = False,
|
502
505
|
max_workers: int | None = None,
|
503
506
|
rel_norm: bool = False,
|
504
|
-
integrator: IntegratorType =
|
507
|
+
integrator: IntegratorType | None = None,
|
505
508
|
) -> ResponseCoefficientsByPars:
|
506
509
|
"""Calculate response coefficients using Monte Carlo analysis.
|
507
510
|
|
@@ -558,9 +561,7 @@ def response_coefficients(
|
|
558
561
|
)
|
559
562
|
|
560
563
|
return ResponseCoefficientsByPars(
|
561
|
-
variables=cast(
|
562
|
-
|
563
|
-
),
|
564
|
-
fluxes=cast(pd.DataFrame, pd.concat({k: v.fluxes for k, v in res.items()})),
|
564
|
+
variables=cast(pd.DataFrame, pd.concat({k: v.variables for k, v in res})),
|
565
|
+
fluxes=cast(pd.DataFrame, pd.concat({k: v.fluxes for k, v in res})),
|
565
566
|
parameters=mc_to_scan,
|
566
567
|
)
|
mxlpy/mca.py
CHANGED
@@ -22,7 +22,6 @@ from typing import TYPE_CHECKING
|
|
22
22
|
|
23
23
|
import pandas as pd
|
24
24
|
|
25
|
-
from mxlpy.integrators import DefaultIntegrator
|
26
25
|
from mxlpy.parallel import parallelise
|
27
26
|
from mxlpy.scan import _steady_state_worker
|
28
27
|
from mxlpy.types import ResponseCoefficients
|
@@ -46,7 +45,7 @@ def _response_coefficient_worker(
|
|
46
45
|
normalized: bool,
|
47
46
|
rel_norm: bool,
|
48
47
|
displacement: float = 1e-4,
|
49
|
-
integrator: IntegratorType,
|
48
|
+
integrator: IntegratorType | None,
|
50
49
|
) -> tuple[pd.Series, pd.Series]:
|
51
50
|
"""Calculate response coefficients for a single parameter.
|
52
51
|
|
@@ -81,6 +80,7 @@ def _response_coefficient_worker(
|
|
81
80
|
model,
|
82
81
|
rel_norm=rel_norm,
|
83
82
|
integrator=integrator,
|
83
|
+
y0=None,
|
84
84
|
)
|
85
85
|
|
86
86
|
model.update_parameters({parameter: old * (1 - displacement)})
|
@@ -88,6 +88,7 @@ def _response_coefficient_worker(
|
|
88
88
|
model,
|
89
89
|
rel_norm=rel_norm,
|
90
90
|
integrator=integrator,
|
91
|
+
y0=None,
|
91
92
|
)
|
92
93
|
|
93
94
|
conc_resp = (upper.variables - lower.variables) / (2 * displacement * old)
|
@@ -99,6 +100,7 @@ def _response_coefficient_worker(
|
|
99
100
|
model,
|
100
101
|
rel_norm=rel_norm,
|
101
102
|
integrator=integrator,
|
103
|
+
y0=None,
|
102
104
|
)
|
103
105
|
conc_resp *= old / norm.variables
|
104
106
|
flux_resp *= old / norm.fluxes
|
@@ -237,7 +239,7 @@ def response_coefficients(
|
|
237
239
|
parallel: bool = True,
|
238
240
|
max_workers: int | None = None,
|
239
241
|
rel_norm: bool = False,
|
240
|
-
integrator: IntegratorType =
|
242
|
+
integrator: IntegratorType | None = None,
|
241
243
|
) -> ResponseCoefficients:
|
242
244
|
"""Calculate response coefficients.
|
243
245
|
|
@@ -284,6 +286,6 @@ def response_coefficients(
|
|
284
286
|
max_workers=max_workers,
|
285
287
|
)
|
286
288
|
return ResponseCoefficients(
|
287
|
-
variables=pd.DataFrame({k: v[0] for k, v in res
|
288
|
-
fluxes=pd.DataFrame({k: v[1] for k, v in res
|
289
|
+
variables=pd.DataFrame({k: v[0] for k, v in res}),
|
290
|
+
fluxes=pd.DataFrame({k: v[1] for k, v in res}),
|
289
291
|
)
|
mxlpy/parallel.py
CHANGED
@@ -103,7 +103,7 @@ def parallelise[K: Hashable, Tin, Tout](
|
|
103
103
|
timeout: float | None = None,
|
104
104
|
disable_tqdm: bool = False,
|
105
105
|
tqdm_desc: str | None = None,
|
106
|
-
) ->
|
106
|
+
) -> list[tuple[K, Tout]]:
|
107
107
|
"""Execute a function in parallel over a collection of inputs.
|
108
108
|
|
109
109
|
Examples:
|
@@ -136,9 +136,9 @@ def parallelise[K: Hashable, Tin, Tout](
|
|
136
136
|
cache=cache,
|
137
137
|
) # type: ignore
|
138
138
|
|
139
|
-
results:
|
139
|
+
results: list[tuple[K, Tout]]
|
140
140
|
if parallel:
|
141
|
-
results =
|
141
|
+
results = []
|
142
142
|
max_workers = (
|
143
143
|
multiprocessing.cpu_count() if max_workers is None else max_workers
|
144
144
|
)
|
@@ -157,18 +157,19 @@ def parallelise[K: Hashable, Tin, Tout](
|
|
157
157
|
try:
|
158
158
|
key, value = next(it)
|
159
159
|
pbar.update(1)
|
160
|
-
results
|
160
|
+
results.append((key, value))
|
161
161
|
except StopIteration:
|
162
162
|
break
|
163
163
|
except TimeoutError:
|
164
164
|
pbar.update(1)
|
165
165
|
else:
|
166
|
-
results =
|
166
|
+
results = list(
|
167
167
|
tqdm(
|
168
168
|
map(worker, inputs), # type: ignore
|
169
169
|
total=len(inputs),
|
170
170
|
disable=disable_tqdm,
|
171
171
|
desc=tqdm_desc,
|
172
|
-
)
|
172
|
+
)
|
173
173
|
) # type: ignore
|
174
|
+
|
174
175
|
return results
|
mxlpy/scan.py
CHANGED
@@ -22,7 +22,6 @@ from typing import TYPE_CHECKING, Protocol, Self, cast
|
|
22
22
|
import numpy as np
|
23
23
|
import pandas as pd
|
24
24
|
|
25
|
-
from mxlpy.integrators import DefaultIntegrator
|
26
25
|
from mxlpy.parallel import Cache, parallelise
|
27
26
|
from mxlpy.simulator import Result, Simulator
|
28
27
|
from mxlpy.types import IntegratorType, ProtocolByPars, SteadyStates, TimeCourseByPars
|
@@ -281,7 +280,8 @@ class SteadyStateWorker(Protocol):
|
|
281
280
|
model: Model,
|
282
281
|
*,
|
283
282
|
rel_norm: bool,
|
284
|
-
integrator: IntegratorType,
|
283
|
+
integrator: IntegratorType | None,
|
284
|
+
y0: dict[str, float] | None,
|
285
285
|
) -> TimePoint:
|
286
286
|
"""Call the worker function."""
|
287
287
|
...
|
@@ -295,7 +295,8 @@ class TimeCourseWorker(Protocol):
|
|
295
295
|
model: Model,
|
296
296
|
time_points: Array,
|
297
297
|
*,
|
298
|
-
integrator: IntegratorType,
|
298
|
+
integrator: IntegratorType | None,
|
299
|
+
y0: dict[str, float] | None,
|
299
300
|
) -> TimeCourse:
|
300
301
|
"""Call the worker function."""
|
301
302
|
...
|
@@ -309,7 +310,8 @@ class ProtocolWorker(Protocol):
|
|
309
310
|
model: Model,
|
310
311
|
protocol: pd.DataFrame,
|
311
312
|
*,
|
312
|
-
integrator: IntegratorType,
|
313
|
+
integrator: IntegratorType | None,
|
314
|
+
y0: dict[str, float] | None,
|
313
315
|
time_points_per_step: int = 10,
|
314
316
|
) -> TimeCourse:
|
315
317
|
"""Call the worker function."""
|
@@ -320,7 +322,8 @@ def _steady_state_worker(
|
|
320
322
|
model: Model,
|
321
323
|
*,
|
322
324
|
rel_norm: bool,
|
323
|
-
integrator: IntegratorType,
|
325
|
+
integrator: IntegratorType | None,
|
326
|
+
y0: dict[str, float] | None,
|
324
327
|
) -> TimePoint:
|
325
328
|
"""Simulate the model to steady state and return concentrations and fluxes.
|
326
329
|
|
@@ -336,7 +339,7 @@ def _steady_state_worker(
|
|
336
339
|
"""
|
337
340
|
try:
|
338
341
|
res = (
|
339
|
-
Simulator(model, integrator=integrator)
|
342
|
+
Simulator(model, integrator=integrator, y0=y0)
|
340
343
|
.simulate_to_steady_state(rel_norm=rel_norm)
|
341
344
|
.get_result()
|
342
345
|
)
|
@@ -348,7 +351,8 @@ def _steady_state_worker(
|
|
348
351
|
def _time_course_worker(
|
349
352
|
model: Model,
|
350
353
|
time_points: Array,
|
351
|
-
|
354
|
+
y0: dict[str, float] | None,
|
355
|
+
integrator: IntegratorType | None,
|
352
356
|
) -> TimeCourse:
|
353
357
|
"""Simulate the model to steady state and return concentrations and fluxes.
|
354
358
|
|
@@ -364,7 +368,7 @@ def _time_course_worker(
|
|
364
368
|
"""
|
365
369
|
try:
|
366
370
|
res = (
|
367
|
-
Simulator(model, integrator=integrator)
|
371
|
+
Simulator(model, integrator=integrator, y0=y0)
|
368
372
|
.simulate_time_course(time_points=time_points)
|
369
373
|
.get_result()
|
370
374
|
)
|
@@ -381,7 +385,8 @@ def _protocol_worker(
|
|
381
385
|
model: Model,
|
382
386
|
protocol: pd.DataFrame,
|
383
387
|
*,
|
384
|
-
integrator: IntegratorType
|
388
|
+
integrator: IntegratorType | None,
|
389
|
+
y0: dict[str, float] | None,
|
385
390
|
time_points_per_step: int = 10,
|
386
391
|
) -> TimeCourse:
|
387
392
|
"""Simulate the model over a protocol and return concentrations and fluxes.
|
@@ -399,7 +404,7 @@ def _protocol_worker(
|
|
399
404
|
"""
|
400
405
|
try:
|
401
406
|
res = (
|
402
|
-
Simulator(model, integrator=integrator)
|
407
|
+
Simulator(model, integrator=integrator, y0=y0)
|
403
408
|
.simulate_over_protocol(
|
404
409
|
protocol=protocol,
|
405
410
|
time_points_per_step=time_points_per_step,
|
@@ -430,7 +435,7 @@ def steady_state(
|
|
430
435
|
rel_norm: bool = False,
|
431
436
|
cache: Cache | None = None,
|
432
437
|
worker: SteadyStateWorker = _steady_state_worker,
|
433
|
-
integrator: IntegratorType =
|
438
|
+
integrator: IntegratorType | None = None,
|
434
439
|
) -> SteadyStates:
|
435
440
|
"""Get steady-state results over supplied values.
|
436
441
|
|
@@ -479,6 +484,7 @@ def steady_state(
|
|
479
484
|
worker,
|
480
485
|
rel_norm=rel_norm,
|
481
486
|
integrator=integrator,
|
487
|
+
y0=None,
|
482
488
|
),
|
483
489
|
model=model,
|
484
490
|
),
|
@@ -486,8 +492,8 @@ def steady_state(
|
|
486
492
|
cache=cache,
|
487
493
|
parallel=parallel,
|
488
494
|
)
|
489
|
-
concs = pd.DataFrame({k: v.variables.T for k, v in res
|
490
|
-
fluxes = pd.DataFrame({k: v.fluxes.T for k, v in res
|
495
|
+
concs = pd.DataFrame({k: v.variables.T for k, v in res}).T
|
496
|
+
fluxes = pd.DataFrame({k: v.fluxes.T for k, v in res}).T
|
491
497
|
idx = (
|
492
498
|
pd.Index(to_scan.iloc[:, 0])
|
493
499
|
if to_scan.shape[1] == 1
|
@@ -506,8 +512,8 @@ def time_course(
|
|
506
512
|
y0: dict[str, float] | None = None,
|
507
513
|
parallel: bool = True,
|
508
514
|
cache: Cache | None = None,
|
515
|
+
integrator: IntegratorType | None = None,
|
509
516
|
worker: TimeCourseWorker = _time_course_worker,
|
510
|
-
integrator: IntegratorType = DefaultIntegrator,
|
511
517
|
) -> TimeCourseByPars:
|
512
518
|
"""Get time course for each supplied parameter.
|
513
519
|
|
@@ -559,6 +565,8 @@ def time_course(
|
|
559
565
|
|
560
566
|
|
561
567
|
"""
|
568
|
+
# We update the initial conditions separately here, because `to_scan` might also
|
569
|
+
# contain initial conditions.
|
562
570
|
if y0 is not None:
|
563
571
|
model.update_variables(y0)
|
564
572
|
|
@@ -569,6 +577,7 @@ def time_course(
|
|
569
577
|
worker,
|
570
578
|
time_points=time_points,
|
571
579
|
integrator=integrator,
|
580
|
+
y0=None, # See comment above
|
572
581
|
),
|
573
582
|
model=model,
|
574
583
|
),
|
@@ -576,8 +585,8 @@ def time_course(
|
|
576
585
|
cache=cache,
|
577
586
|
parallel=parallel,
|
578
587
|
)
|
579
|
-
concs = cast(dict, {k: v.variables for k, v in res
|
580
|
-
fluxes = cast(dict, {k: v.fluxes for k, v in res
|
588
|
+
concs = cast(dict, {k: v.variables for k, v in res})
|
589
|
+
fluxes = cast(dict, {k: v.fluxes for k, v in res})
|
581
590
|
return TimeCourseByPars(
|
582
591
|
parameters=to_scan,
|
583
592
|
variables=pd.concat(concs, names=["n", "time"]),
|
@@ -595,7 +604,7 @@ def time_course_over_protocol(
|
|
595
604
|
parallel: bool = True,
|
596
605
|
cache: Cache | None = None,
|
597
606
|
worker: ProtocolWorker = _protocol_worker,
|
598
|
-
integrator: IntegratorType =
|
607
|
+
integrator: IntegratorType | None = None,
|
599
608
|
) -> ProtocolByPars:
|
600
609
|
"""Get protocol series for each supplied parameter.
|
601
610
|
|
@@ -626,6 +635,8 @@ def time_course_over_protocol(
|
|
626
635
|
TimeCourseByPars: Protocol series results for each parameter set.
|
627
636
|
|
628
637
|
"""
|
638
|
+
# We update the initial conditions separately here, because `to_scan` might also
|
639
|
+
# contain initial conditions.
|
629
640
|
if y0 is not None:
|
630
641
|
model.update_variables(y0)
|
631
642
|
|
@@ -637,6 +648,7 @@ def time_course_over_protocol(
|
|
637
648
|
protocol=protocol,
|
638
649
|
time_points_per_step=time_points_per_step,
|
639
650
|
integrator=integrator,
|
651
|
+
y0=None,
|
640
652
|
),
|
641
653
|
model=model,
|
642
654
|
),
|
@@ -644,8 +656,8 @@ def time_course_over_protocol(
|
|
644
656
|
cache=cache,
|
645
657
|
parallel=parallel,
|
646
658
|
)
|
647
|
-
concs = cast(dict, {k: v.variables for k, v in res
|
648
|
-
fluxes = cast(dict, {k: v.fluxes for k, v in res
|
659
|
+
concs = cast(dict, {k: v.variables for k, v in res})
|
660
|
+
fluxes = cast(dict, {k: v.fluxes for k, v in res})
|
649
661
|
return ProtocolByPars(
|
650
662
|
parameters=to_scan,
|
651
663
|
protocol=protocol,
|
mxlpy/simulator.py
CHANGED
@@ -335,7 +335,7 @@ class Simulator:
|
|
335
335
|
self,
|
336
336
|
model: Model,
|
337
337
|
y0: dict[str, float] | None = None,
|
338
|
-
integrator: IntegratorType =
|
338
|
+
integrator: IntegratorType | None = None,
|
339
339
|
*,
|
340
340
|
use_jacobian: bool = False,
|
341
341
|
test_run: bool = True,
|
@@ -354,7 +354,7 @@ class Simulator:
|
|
354
354
|
self.model = model
|
355
355
|
self.y0 = model.get_initial_conditions() if y0 is None else y0
|
356
356
|
|
357
|
-
self._integrator_type = integrator
|
357
|
+
self._integrator_type = DefaultIntegrator if integrator is None else integrator
|
358
358
|
self._time_shift = None
|
359
359
|
self.variables = None
|
360
360
|
self.dependent = None
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: mxlpy
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.21.0
|
4
4
|
Summary: A package to build metabolic models
|
5
5
|
Author-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
|
6
6
|
Maintainer-email: Marvin van Aalst <marvin.vanaalst@gmail.com>
|
@@ -69,6 +69,8 @@ Description-Content-Type: text/markdown
|
|
69
69
|
[docs-badge]: https://img.shields.io/badge/docs-main-green.svg?style=flat-square
|
70
70
|
[docs]: https://computational-biology-aachen.github.io/mxlpy/
|
71
71
|
|
72
|
+
MxlPy (pronounced "em axe el pie") is a Python package for mechanistic learning (Mxl) - the combination of mechanistic modeling and machine learning to deliver explainable, data-informed solutions.
|
73
|
+
|
72
74
|
## Installation
|
73
75
|
|
74
76
|
You can install mxlpy using pip: `pip install mxlpy`.
|
@@ -1,22 +1,23 @@
|
|
1
1
|
mxlpy/__init__.py,sha256=4pbDeyLhQjfL76h2oXdodARzKkrkX5wESV7kEjwC3K8,4399
|
2
|
+
mxlpy/carousel.py,sha256=o72YKzfPCDhT5oHhow4oNvIShG-i3-Z0UMEQLt2iE5A,4699
|
2
3
|
mxlpy/compare.py,sha256=6iIl6yKXP9guSVLgqqnaqILP_BF_oqyx7DTGbdpwAjM,7800
|
3
4
|
mxlpy/distributions.py,sha256=ce6RTqn19YzMMec-u09fSIUA8A92M6rehCuHuXWcX7A,8734
|
4
|
-
mxlpy/fit.py,sha256=
|
5
|
+
mxlpy/fit.py,sha256=vJGd_kl-MqyI7db96uM9STeBnvPVu1VGLCiGuH8mEKc,19009
|
5
6
|
mxlpy/fns.py,sha256=NLxYwa3ylS7SkISBjw_TgQSKEm7WnkZF9wPigX_ZCAM,13915
|
6
|
-
mxlpy/identify.py,sha256=
|
7
|
+
mxlpy/identify.py,sha256=yU6ccd0yDJhtyo5gkemMuecZALzjR1KzT0vKPmlL4kg,2107
|
7
8
|
mxlpy/label_map.py,sha256=Zla9tey-7_POTE57XNEuCSeTqdAbMWZdj_j_OwokngY,17823
|
8
9
|
mxlpy/linear_label_map.py,sha256=5FyD0MMdSGsC3eKeBnpd1LBHyVBqIDWCDjgR8_q6XZo,10289
|
9
|
-
mxlpy/mc.py,sha256=
|
10
|
-
mxlpy/mca.py,sha256=
|
10
|
+
mxlpy/mc.py,sha256=bt2DrMaovWO_LM3PfkVr0cvK6k_ZSjLRnudasgC5SRM,17132
|
11
|
+
mxlpy/mca.py,sha256=B1bRb_EHim3uJ90KJkTZ5HXrVOBjrctcRCsQG4PXU-U,9351
|
11
12
|
mxlpy/model.py,sha256=14gncyYft39rwoiJPb5AynL3whXnZrJY3N7SLExH0Qk,62056
|
12
|
-
mxlpy/parallel.py,sha256=
|
13
|
+
mxlpy/parallel.py,sha256=yLQLw5O4vnPVp_Zmtw1UhPWtB3483foimxQB-TwFKPg,5016
|
13
14
|
mxlpy/parameterise.py,sha256=IgbvfEnkmaqVq_5GgFjHqApGUN9CJrsVD3Fr7pg9iFA,758
|
14
15
|
mxlpy/paths.py,sha256=TK2wO4N9lG-UV1JGfeB64q48JVDbwqIUj63rl55MKuQ,1022
|
15
16
|
mxlpy/plot.py,sha256=PA7tAmy2XXACxBLtdnfpxKUFRzi-lnCQjr7gw_nzxKU,32544
|
16
17
|
mxlpy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
17
18
|
mxlpy/report.py,sha256=6V_kH5usFtar2lUGLjG5k7WIJjUi1TD5qIO7V_6V3Gc,8773
|
18
|
-
mxlpy/scan.py,sha256=
|
19
|
-
mxlpy/simulator.py,sha256=
|
19
|
+
mxlpy/scan.py,sha256=_MwwTiL-IQPlUU9e2L1Wc-V95sElcaPw-DkxoXYanzc,19421
|
20
|
+
mxlpy/simulator.py,sha256=tdxTUbsWo3SdYi7a1lsMah9D_Dz_FqnMjTfK-3oT2Gk,21091
|
20
21
|
mxlpy/types.py,sha256=FXBkwHgQ3v_k4ER49hDqyFMIA6i1BQf8isPma97LJdg,12605
|
21
22
|
mxlpy/experimental/__init__.py,sha256=kZTE-92OErpHzNRqmgSQYH4CGXrogGJ5EL35XGZQ81M,206
|
22
23
|
mxlpy/experimental/diff.py,sha256=MoM15rbMAHa7p9Zva8NxIc7K585kHJYKFaD1LnN5e10,9088
|
@@ -49,7 +50,7 @@ mxlpy/surrogates/_torch.py,sha256=gU0secuRBYgewhNqZmSo6_Xf804dSzwWwIYmdKA7y60,63
|
|
49
50
|
mxlpy/symbolic/__init__.py,sha256=_vM5YM5I6OH0QDbFt9uGYKO8Z5Vly0wbGuvUScVrPRU,258
|
50
51
|
mxlpy/symbolic/strikepy.py,sha256=tzo3uvPpXLDex09hWTuitVzuTNwbgl7jWGjD8g6a8iI,20033
|
51
52
|
mxlpy/symbolic/symbolic_model.py,sha256=JFzcIdyfJihvKjef748DMXU6WI8nHjgjIk5BwUuB4HQ,2543
|
52
|
-
mxlpy-0.
|
53
|
-
mxlpy-0.
|
54
|
-
mxlpy-0.
|
55
|
-
mxlpy-0.
|
53
|
+
mxlpy-0.21.0.dist-info/METADATA,sha256=ZKxvZTNcrNCaadkRmKSU_6bQlSsJdTtuz_idNVEVbVs,4601
|
54
|
+
mxlpy-0.21.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
55
|
+
mxlpy-0.21.0.dist-info/licenses/LICENSE,sha256=lHX9Eu70g3Iv1aOxXTWNHa3vq9vaVYSPQx4jOLYmDpw,1096
|
56
|
+
mxlpy-0.21.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|