mxlpy 0.14.0__py3-none-any.whl → 0.16.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mxlpy/integrators/int_assimulo.py +2 -1
- mxlpy/linear_label_map.py +2 -2
- mxlpy/mc.py +84 -70
- mxlpy/mca.py +97 -98
- mxlpy/meta/codegen_latex.py +1 -3
- mxlpy/meta/codegen_modebase.py +1 -1
- mxlpy/model.py +9 -13
- mxlpy/scan.py +40 -38
- mxlpy/simulator.py +19 -24
- mxlpy/types.py +32 -27
- {mxlpy-0.14.0.dist-info → mxlpy-0.16.0.dist-info}/METADATA +5 -3
- {mxlpy-0.14.0.dist-info → mxlpy-0.16.0.dist-info}/RECORD +14 -14
- {mxlpy-0.14.0.dist-info → mxlpy-0.16.0.dist-info}/WHEEL +0 -0
- {mxlpy-0.14.0.dist-info → mxlpy-0.16.0.dist-info}/licenses/LICENSE +0 -0
mxlpy/mca.py
CHANGED
@@ -38,6 +38,73 @@ if TYPE_CHECKING:
|
|
38
38
|
from mxlpy.types import IntegratorType
|
39
39
|
|
40
40
|
|
41
|
+
def _response_coefficient_worker(
|
42
|
+
parameter: str,
|
43
|
+
*,
|
44
|
+
model: Model,
|
45
|
+
y0: dict[str, float] | None,
|
46
|
+
normalized: bool,
|
47
|
+
rel_norm: bool,
|
48
|
+
displacement: float = 1e-4,
|
49
|
+
integrator: IntegratorType,
|
50
|
+
) -> tuple[pd.Series, pd.Series]:
|
51
|
+
"""Calculate response coefficients for a single parameter.
|
52
|
+
|
53
|
+
Internal helper function that computes concentration and flux response
|
54
|
+
coefficients using finite differences. The function:
|
55
|
+
1. Perturbs the parameter up and down by a small displacement
|
56
|
+
2. Calculates steady states for each perturbation
|
57
|
+
3. Computes response coefficients from the differences
|
58
|
+
4. Optionally normalizes the results
|
59
|
+
|
60
|
+
Args:
|
61
|
+
parameter: Name of the parameter to analyze
|
62
|
+
model: Metabolic model instance
|
63
|
+
y0: Initial conditions as a dictionary {species: value}
|
64
|
+
normalized: Whether to normalize the coefficients
|
65
|
+
rel_norm: Whether to use relative normalization
|
66
|
+
displacement: Relative perturbation size (default: 1e-4)
|
67
|
+
integrator: Integrator function to use for steady state calculation
|
68
|
+
|
69
|
+
Returns:
|
70
|
+
tuple[pd.Series, pd.Series]: Tuple containing:
|
71
|
+
- Series of concentration response coefficients
|
72
|
+
- Series of flux response coefficients
|
73
|
+
|
74
|
+
"""
|
75
|
+
old = model.parameters[parameter]
|
76
|
+
if y0 is not None:
|
77
|
+
model.update_variables(y0)
|
78
|
+
|
79
|
+
model.update_parameters({parameter: old * (1 + displacement)})
|
80
|
+
upper = _steady_state_worker(
|
81
|
+
model,
|
82
|
+
rel_norm=rel_norm,
|
83
|
+
integrator=integrator,
|
84
|
+
)
|
85
|
+
|
86
|
+
model.update_parameters({parameter: old * (1 - displacement)})
|
87
|
+
lower = _steady_state_worker(
|
88
|
+
model,
|
89
|
+
rel_norm=rel_norm,
|
90
|
+
integrator=integrator,
|
91
|
+
)
|
92
|
+
|
93
|
+
conc_resp = (upper.variables - lower.variables) / (2 * displacement * old)
|
94
|
+
flux_resp = (upper.fluxes - lower.fluxes) / (2 * displacement * old)
|
95
|
+
# Reset
|
96
|
+
model.update_parameters({parameter: old})
|
97
|
+
if normalized:
|
98
|
+
norm = _steady_state_worker(
|
99
|
+
model,
|
100
|
+
rel_norm=rel_norm,
|
101
|
+
integrator=integrator,
|
102
|
+
)
|
103
|
+
conc_resp *= old / norm.variables
|
104
|
+
flux_resp *= old / norm.fluxes
|
105
|
+
return conc_resp, flux_resp
|
106
|
+
|
107
|
+
|
41
108
|
###############################################################################
|
42
109
|
# Non-steady state
|
43
110
|
###############################################################################
|
@@ -46,8 +113,8 @@ if TYPE_CHECKING:
|
|
46
113
|
def variable_elasticities(
|
47
114
|
model: Model,
|
48
115
|
*,
|
49
|
-
|
50
|
-
variables:
|
116
|
+
to_scan: list[str] | None = None,
|
117
|
+
variables: dict[str, float] | None = None,
|
51
118
|
time: float = 0,
|
52
119
|
normalized: bool = True,
|
53
120
|
displacement: float = 1e-4,
|
@@ -67,8 +134,8 @@ def variable_elasticities(
|
|
67
134
|
|
68
135
|
Args:
|
69
136
|
model: Metabolic model instance
|
70
|
-
|
71
|
-
variables:
|
137
|
+
to_scan: List of variables to analyze. Uses all if None
|
138
|
+
variables: Custom variable values. Defaults to initial conditions.
|
72
139
|
time: Time point for evaluation
|
73
140
|
normalized: Whether to normalize coefficients
|
74
141
|
displacement: Relative perturbation size
|
@@ -77,23 +144,23 @@ def variable_elasticities(
|
|
77
144
|
DataFrame with elasticity coefficients (reactions x metabolites)
|
78
145
|
|
79
146
|
"""
|
80
|
-
|
81
|
-
|
147
|
+
variables = model.get_initial_conditions() if variables is None else variables
|
148
|
+
to_scan = model.get_variable_names() if to_scan is None else to_scan
|
82
149
|
elasticities = {}
|
83
150
|
|
84
|
-
for var in
|
85
|
-
old =
|
151
|
+
for var in to_scan:
|
152
|
+
old = variables[var]
|
86
153
|
|
87
154
|
upper = model.get_fluxes(
|
88
|
-
variables=
|
155
|
+
variables=variables | {var: old * (1 + displacement)}, time=time
|
89
156
|
)
|
90
157
|
lower = model.get_fluxes(
|
91
|
-
variables=
|
158
|
+
variables=variables | {var: old * (1 - displacement)}, time=time
|
92
159
|
)
|
93
160
|
|
94
161
|
elasticity_coef = (upper - lower) / (2 * displacement * old)
|
95
162
|
if normalized:
|
96
|
-
elasticity_coef *= old / model.get_fluxes(variables=
|
163
|
+
elasticity_coef *= old / model.get_fluxes(variables=variables, time=time)
|
97
164
|
elasticities[var] = elasticity_coef
|
98
165
|
|
99
166
|
return pd.DataFrame(data=elasticities)
|
@@ -101,10 +168,10 @@ def variable_elasticities(
|
|
101
168
|
|
102
169
|
def parameter_elasticities(
|
103
170
|
model: Model,
|
104
|
-
parameters: list[str] | None = None,
|
105
|
-
concs: dict[str, float] | None = None,
|
106
|
-
time: float = 0,
|
107
171
|
*,
|
172
|
+
to_scan: list[str] | None = None,
|
173
|
+
variables: dict[str, float] | None = None,
|
174
|
+
time: float = 0,
|
108
175
|
normalized: bool = True,
|
109
176
|
displacement: float = 1e-4,
|
110
177
|
) -> pd.DataFrame:
|
@@ -119,8 +186,8 @@ def parameter_elasticities(
|
|
119
186
|
|
120
187
|
Args:
|
121
188
|
model: Metabolic model instance
|
122
|
-
|
123
|
-
|
189
|
+
to_scan: List of parameters to analyze. Uses all if None
|
190
|
+
variables: Custom variable values. Defaults to initial conditions.
|
124
191
|
time: Time point for evaluation
|
125
192
|
normalized: Whether to normalize coefficients
|
126
193
|
displacement: Relative perturbation size
|
@@ -129,26 +196,26 @@ def parameter_elasticities(
|
|
129
196
|
DataFrame with parameter elasticities (reactions x parameters)
|
130
197
|
|
131
198
|
"""
|
132
|
-
|
133
|
-
|
199
|
+
variables = model.get_initial_conditions() if variables is None else variables
|
200
|
+
to_scan = model.get_parameter_names() if to_scan is None else to_scan
|
134
201
|
|
135
202
|
elasticities = {}
|
136
203
|
|
137
|
-
|
138
|
-
for par in
|
204
|
+
variables = model.get_initial_conditions() if variables is None else variables
|
205
|
+
for par in to_scan:
|
139
206
|
old = model.parameters[par]
|
140
207
|
|
141
208
|
model.update_parameters({par: old * (1 + displacement)})
|
142
|
-
upper = model.get_fluxes(variables=
|
209
|
+
upper = model.get_fluxes(variables=variables, time=time)
|
143
210
|
|
144
211
|
model.update_parameters({par: old * (1 - displacement)})
|
145
|
-
lower = model.get_fluxes(variables=
|
212
|
+
lower = model.get_fluxes(variables=variables, time=time)
|
146
213
|
|
147
214
|
# Reset
|
148
215
|
model.update_parameters({par: old})
|
149
216
|
elasticity_coef = (upper - lower) / (2 * displacement * old)
|
150
217
|
if normalized:
|
151
|
-
elasticity_coef *= old / model.get_fluxes(variables=
|
218
|
+
elasticity_coef *= old / model.get_fluxes(variables=variables, time=time)
|
152
219
|
elasticities[par] = elasticity_coef
|
153
220
|
|
154
221
|
return pd.DataFrame(data=elasticities)
|
@@ -159,79 +226,11 @@ def parameter_elasticities(
|
|
159
226
|
# ###############################################################################
|
160
227
|
|
161
228
|
|
162
|
-
def _response_coefficient_worker(
|
163
|
-
parameter: str,
|
164
|
-
*,
|
165
|
-
model: Model,
|
166
|
-
y0: dict[str, float] | None,
|
167
|
-
normalized: bool,
|
168
|
-
rel_norm: bool,
|
169
|
-
displacement: float = 1e-4,
|
170
|
-
integrator: IntegratorType,
|
171
|
-
) -> tuple[pd.Series, pd.Series]:
|
172
|
-
"""Calculate response coefficients for a single parameter.
|
173
|
-
|
174
|
-
Internal helper function that computes concentration and flux response
|
175
|
-
coefficients using finite differences. The function:
|
176
|
-
1. Perturbs the parameter up and down by a small displacement
|
177
|
-
2. Calculates steady states for each perturbation
|
178
|
-
3. Computes response coefficients from the differences
|
179
|
-
4. Optionally normalizes the results
|
180
|
-
|
181
|
-
Args:
|
182
|
-
parameter: Name of the parameter to analyze
|
183
|
-
model: Metabolic model instance
|
184
|
-
y0: Initial conditions as a dictionary {species: value}
|
185
|
-
normalized: Whether to normalize the coefficients
|
186
|
-
rel_norm: Whether to use relative normalization
|
187
|
-
displacement: Relative perturbation size (default: 1e-4)
|
188
|
-
integrator: Integrator function to use for steady state calculation
|
189
|
-
|
190
|
-
Returns:
|
191
|
-
tuple[pd.Series, pd.Series]: Tuple containing:
|
192
|
-
- Series of concentration response coefficients
|
193
|
-
- Series of flux response coefficients
|
194
|
-
|
195
|
-
"""
|
196
|
-
old = model.parameters[parameter]
|
197
|
-
|
198
|
-
model.update_parameters({parameter: old * (1 + displacement)})
|
199
|
-
upper = _steady_state_worker(
|
200
|
-
model,
|
201
|
-
y0=y0,
|
202
|
-
rel_norm=rel_norm,
|
203
|
-
integrator=integrator,
|
204
|
-
)
|
205
|
-
|
206
|
-
model.update_parameters({parameter: old * (1 - displacement)})
|
207
|
-
lower = _steady_state_worker(
|
208
|
-
model,
|
209
|
-
y0=y0,
|
210
|
-
rel_norm=rel_norm,
|
211
|
-
integrator=integrator,
|
212
|
-
)
|
213
|
-
|
214
|
-
conc_resp = (upper.variables - lower.variables) / (2 * displacement * old)
|
215
|
-
flux_resp = (upper.fluxes - lower.fluxes) / (2 * displacement * old)
|
216
|
-
# Reset
|
217
|
-
model.update_parameters({parameter: old})
|
218
|
-
if normalized:
|
219
|
-
norm = _steady_state_worker(
|
220
|
-
model,
|
221
|
-
y0=y0,
|
222
|
-
rel_norm=rel_norm,
|
223
|
-
integrator=integrator,
|
224
|
-
)
|
225
|
-
conc_resp *= old / norm.variables
|
226
|
-
flux_resp *= old / norm.fluxes
|
227
|
-
return conc_resp, flux_resp
|
228
|
-
|
229
|
-
|
230
229
|
def response_coefficients(
|
231
230
|
model: Model,
|
232
|
-
parameters: list[str] | None = None,
|
233
231
|
*,
|
234
|
-
|
232
|
+
to_scan: list[str] | None = None,
|
233
|
+
variables: dict[str, float] | None = None,
|
235
234
|
normalized: bool = True,
|
236
235
|
displacement: float = 1e-4,
|
237
236
|
disable_tqdm: bool = False,
|
@@ -250,8 +249,8 @@ def response_coefficients(
|
|
250
249
|
|
251
250
|
Args:
|
252
251
|
model: Metabolic model instance
|
253
|
-
|
254
|
-
|
252
|
+
to_scan: Parameters to analyze. Uses all if None
|
253
|
+
variables: Custom variable values. Defaults to initial conditions.
|
255
254
|
normalized: Whether to normalize coefficients
|
256
255
|
displacement: Relative perturbation size
|
257
256
|
disable_tqdm: Disable progress bar
|
@@ -266,19 +265,19 @@ def response_coefficients(
|
|
266
265
|
- Concentration response coefficients
|
267
266
|
|
268
267
|
"""
|
269
|
-
|
268
|
+
to_scan = model.get_parameter_names() if to_scan is None else to_scan
|
270
269
|
|
271
270
|
res = parallelise(
|
272
271
|
partial(
|
273
272
|
_response_coefficient_worker,
|
274
273
|
model=model,
|
275
|
-
y0=
|
274
|
+
y0=variables,
|
276
275
|
normalized=normalized,
|
277
276
|
displacement=displacement,
|
278
277
|
rel_norm=rel_norm,
|
279
278
|
integrator=integrator,
|
280
279
|
),
|
281
|
-
inputs=list(zip(
|
280
|
+
inputs=list(zip(to_scan, to_scan, strict=True)),
|
282
281
|
cache=None,
|
283
282
|
disable_tqdm=disable_tqdm,
|
284
283
|
parallel=parallel,
|
mxlpy/meta/codegen_latex.py
CHANGED
@@ -326,9 +326,7 @@ class TexExport:
|
|
326
326
|
parameters={gls.get(k, k): v for k, v in self.parameters.items()},
|
327
327
|
variables={gls.get(k, k): v for k, v in self.variables.items()},
|
328
328
|
derived={
|
329
|
-
gls.get(k, k): Derived(
|
330
|
-
name=k, fn=v.fn, args=[gls.get(i, i) for i in v.args]
|
331
|
-
)
|
329
|
+
gls.get(k, k): Derived(fn=v.fn, args=[gls.get(i, i) for i in v.args])
|
332
330
|
for k, v in self.derived.items()
|
333
331
|
},
|
334
332
|
reactions={
|
mxlpy/meta/codegen_modebase.py
CHANGED
@@ -64,7 +64,7 @@ def generate_mxlpy_code(model: Model) -> str:
|
|
64
64
|
)
|
65
65
|
args = ", ".join(f'"{k}"' for k in stoich.args)
|
66
66
|
stoich = ( # noqa: PLW2901
|
67
|
-
f"""Derived(
|
67
|
+
f"""Derived(fn={fn.__name__}, args=[{args}])"""
|
68
68
|
)
|
69
69
|
stoichiometry.append(f""""{var}": {stoich}""")
|
70
70
|
|
mxlpy/model.py
CHANGED
@@ -908,7 +908,7 @@ class Model:
|
|
908
908
|
|
909
909
|
"""
|
910
910
|
self._insert_id(name=name, ctx="derived")
|
911
|
-
self._derived[name] = Derived(
|
911
|
+
self._derived[name] = Derived(fn=fn, args=args)
|
912
912
|
return self
|
913
913
|
|
914
914
|
def get_derived_parameter_names(self) -> list[str]:
|
@@ -1058,12 +1058,10 @@ class Model:
|
|
1058
1058
|
self._insert_id(name=name, ctx="reaction")
|
1059
1059
|
|
1060
1060
|
stoich: dict[str, Derived | float] = {
|
1061
|
-
k: Derived(
|
1061
|
+
k: Derived(fn=fns.constant, args=[v]) if isinstance(v, str) else v
|
1062
1062
|
for k, v in stoichiometry.items()
|
1063
1063
|
}
|
1064
|
-
self._reactions[name] = Reaction(
|
1065
|
-
name=name, fn=fn, stoichiometry=stoich, args=args
|
1066
|
-
)
|
1064
|
+
self._reactions[name] = Reaction(fn=fn, stoichiometry=stoich, args=args)
|
1067
1065
|
return self
|
1068
1066
|
|
1069
1067
|
def get_reaction_names(self) -> list[str]:
|
@@ -1112,9 +1110,7 @@ class Model:
|
|
1112
1110
|
|
1113
1111
|
if stoichiometry is not None:
|
1114
1112
|
stoich = {
|
1115
|
-
k: Derived(
|
1116
|
-
if isinstance(v, str)
|
1117
|
-
else v
|
1113
|
+
k: Derived(fn=fns.constant, args=[v]) if isinstance(v, str) else v
|
1118
1114
|
for k, v in stoichiometry.items()
|
1119
1115
|
}
|
1120
1116
|
rxn.stoichiometry = stoich
|
@@ -1188,7 +1184,7 @@ class Model:
|
|
1188
1184
|
|
1189
1185
|
"""
|
1190
1186
|
self._insert_id(name=name, ctx="readout")
|
1191
|
-
self._readouts[name] = Readout(
|
1187
|
+
self._readouts[name] = Readout(fn=fn, args=args)
|
1192
1188
|
return self
|
1193
1189
|
|
1194
1190
|
def get_readout_names(self) -> list[str]:
|
@@ -1354,7 +1350,7 @@ class Model:
|
|
1354
1350
|
|
1355
1351
|
containers = self._derived | self._reactions | self._surrogates
|
1356
1352
|
for name in cache.order:
|
1357
|
-
containers[name].calculate_inpl(args)
|
1353
|
+
containers[name].calculate_inpl(name, args)
|
1358
1354
|
|
1359
1355
|
return args
|
1360
1356
|
|
@@ -1399,8 +1395,8 @@ class Model:
|
|
1399
1395
|
)
|
1400
1396
|
|
1401
1397
|
if include_readouts:
|
1402
|
-
for ro in self._readouts.
|
1403
|
-
ro.calculate_inpl(args)
|
1398
|
+
for name, ro in self._readouts.items(): # FIXME: order?
|
1399
|
+
ro.calculate_inpl(name, args)
|
1404
1400
|
|
1405
1401
|
return pd.Series(args, dtype=float)
|
1406
1402
|
|
@@ -1449,7 +1445,7 @@ class Model:
|
|
1449
1445
|
|
1450
1446
|
containers = self._derived | self._reactions | self._surrogates
|
1451
1447
|
for name in cache.order:
|
1452
|
-
containers[name].calculate_inpl_time_course(args)
|
1448
|
+
containers[name].calculate_inpl_time_course(name, args)
|
1453
1449
|
|
1454
1450
|
if include_readouts:
|
1455
1451
|
for name, ro in self._readouts.items():
|
mxlpy/scan.py
CHANGED
@@ -51,7 +51,7 @@ if TYPE_CHECKING:
|
|
51
51
|
from mxlpy.types import Array
|
52
52
|
|
53
53
|
|
54
|
-
def
|
54
|
+
def _update_parameters_and_initial_conditions[T](
|
55
55
|
pars: pd.Series,
|
56
56
|
fn: Callable[[Model], T],
|
57
57
|
model: Model,
|
@@ -67,7 +67,9 @@ def _update_parameters_and[T](
|
|
67
67
|
Result of the function execution.
|
68
68
|
|
69
69
|
"""
|
70
|
-
|
70
|
+
pd = pars.to_dict()
|
71
|
+
model.update_variables({k: v for k, v in pd.items() if k in model._variables}) # noqa: SLF001
|
72
|
+
model.update_parameters({k: v for k, v in pd.items() if k in model._parameters}) # noqa: SLF001
|
71
73
|
return fn(model)
|
72
74
|
|
73
75
|
|
@@ -282,7 +284,6 @@ class SteadyStateWorker(Protocol):
|
|
282
284
|
def __call__(
|
283
285
|
self,
|
284
286
|
model: Model,
|
285
|
-
y0: dict[str, float] | None,
|
286
287
|
*,
|
287
288
|
rel_norm: bool,
|
288
289
|
integrator: IntegratorType,
|
@@ -297,7 +298,6 @@ class TimeCourseWorker(Protocol):
|
|
297
298
|
def __call__(
|
298
299
|
self,
|
299
300
|
model: Model,
|
300
|
-
y0: dict[str, float] | None,
|
301
301
|
time_points: Array,
|
302
302
|
*,
|
303
303
|
integrator: IntegratorType,
|
@@ -312,7 +312,6 @@ class ProtocolWorker(Protocol):
|
|
312
312
|
def __call__(
|
313
313
|
self,
|
314
314
|
model: Model,
|
315
|
-
y0: dict[str, float] | None,
|
316
315
|
protocol: pd.DataFrame,
|
317
316
|
*,
|
318
317
|
integrator: IntegratorType,
|
@@ -324,7 +323,6 @@ class ProtocolWorker(Protocol):
|
|
324
323
|
|
325
324
|
def _steady_state_worker(
|
326
325
|
model: Model,
|
327
|
-
y0: dict[str, float] | None,
|
328
326
|
*,
|
329
327
|
rel_norm: bool,
|
330
328
|
integrator: IntegratorType,
|
@@ -343,7 +341,7 @@ def _steady_state_worker(
|
|
343
341
|
"""
|
344
342
|
try:
|
345
343
|
res = (
|
346
|
-
Simulator(model,
|
344
|
+
Simulator(model, integrator=integrator)
|
347
345
|
.simulate_to_steady_state(rel_norm=rel_norm)
|
348
346
|
.get_result()
|
349
347
|
)
|
@@ -354,7 +352,6 @@ def _steady_state_worker(
|
|
354
352
|
|
355
353
|
def _time_course_worker(
|
356
354
|
model: Model,
|
357
|
-
y0: dict[str, float] | None,
|
358
355
|
time_points: Array,
|
359
356
|
integrator: IntegratorType,
|
360
357
|
) -> TimeCourse:
|
@@ -372,7 +369,7 @@ def _time_course_worker(
|
|
372
369
|
"""
|
373
370
|
try:
|
374
371
|
res = (
|
375
|
-
Simulator(model,
|
372
|
+
Simulator(model, integrator=integrator)
|
376
373
|
.simulate_time_course(time_points=time_points)
|
377
374
|
.get_result()
|
378
375
|
)
|
@@ -387,7 +384,6 @@ def _time_course_worker(
|
|
387
384
|
|
388
385
|
def _protocol_worker(
|
389
386
|
model: Model,
|
390
|
-
y0: dict[str, float] | None,
|
391
387
|
protocol: pd.DataFrame,
|
392
388
|
*,
|
393
389
|
integrator: IntegratorType = DefaultIntegrator,
|
@@ -408,7 +404,7 @@ def _protocol_worker(
|
|
408
404
|
"""
|
409
405
|
try:
|
410
406
|
res = (
|
411
|
-
Simulator(model,
|
407
|
+
Simulator(model, integrator=integrator)
|
412
408
|
.simulate_over_protocol(
|
413
409
|
protocol=protocol,
|
414
410
|
time_points_per_step=time_points_per_step,
|
@@ -432,20 +428,20 @@ def _protocol_worker(
|
|
432
428
|
|
433
429
|
def steady_state(
|
434
430
|
model: Model,
|
435
|
-
parameters: pd.DataFrame,
|
436
|
-
y0: dict[str, float] | None = None,
|
437
431
|
*,
|
432
|
+
to_scan: pd.DataFrame,
|
433
|
+
y0: dict[str, float] | None = None,
|
438
434
|
parallel: bool = True,
|
439
435
|
rel_norm: bool = False,
|
440
436
|
cache: Cache | None = None,
|
441
437
|
worker: SteadyStateWorker = _steady_state_worker,
|
442
438
|
integrator: IntegratorType = DefaultIntegrator,
|
443
439
|
) -> SteadyStates:
|
444
|
-
"""Get steady-state results over supplied
|
440
|
+
"""Get steady-state results over supplied values.
|
445
441
|
|
446
442
|
Args:
|
447
443
|
model: Model instance to simulate.
|
448
|
-
|
444
|
+
to_scan: DataFrame containing parameter or initial values to scan.
|
449
445
|
y0: Initial conditions as a dictionary {variable: value}.
|
450
446
|
parallel: Whether to execute in parallel (default: True).
|
451
447
|
rel_norm: Whether to use relative normalization (default: False).
|
@@ -478,39 +474,41 @@ def steady_state(
|
|
478
474
|
| (2, 4) | 0.5 | 2 |
|
479
475
|
|
480
476
|
"""
|
477
|
+
if y0 is not None:
|
478
|
+
model.update_variables(y0)
|
479
|
+
|
481
480
|
res = parallelise(
|
482
481
|
partial(
|
483
|
-
|
482
|
+
_update_parameters_and_initial_conditions,
|
484
483
|
fn=partial(
|
485
484
|
worker,
|
486
|
-
y0=y0,
|
487
485
|
rel_norm=rel_norm,
|
488
486
|
integrator=integrator,
|
489
487
|
),
|
490
488
|
model=model,
|
491
489
|
),
|
492
|
-
inputs=list(
|
490
|
+
inputs=list(to_scan.iterrows()),
|
493
491
|
cache=cache,
|
494
492
|
parallel=parallel,
|
495
493
|
)
|
496
494
|
concs = pd.DataFrame({k: v.variables.T for k, v in res.items()}).T
|
497
495
|
fluxes = pd.DataFrame({k: v.fluxes.T for k, v in res.items()}).T
|
498
496
|
idx = (
|
499
|
-
pd.Index(
|
500
|
-
if
|
501
|
-
else pd.MultiIndex.from_frame(
|
497
|
+
pd.Index(to_scan.iloc[:, 0])
|
498
|
+
if to_scan.shape[1] == 1
|
499
|
+
else pd.MultiIndex.from_frame(to_scan)
|
502
500
|
)
|
503
501
|
concs.index = idx
|
504
502
|
fluxes.index = idx
|
505
|
-
return SteadyStates(variables=concs, fluxes=fluxes, parameters=
|
503
|
+
return SteadyStates(variables=concs, fluxes=fluxes, parameters=to_scan)
|
506
504
|
|
507
505
|
|
508
506
|
def time_course(
|
509
507
|
model: Model,
|
510
|
-
|
508
|
+
*,
|
509
|
+
to_scan: pd.DataFrame,
|
511
510
|
time_points: Array,
|
512
511
|
y0: dict[str, float] | None = None,
|
513
|
-
*,
|
514
512
|
parallel: bool = True,
|
515
513
|
cache: Cache | None = None,
|
516
514
|
worker: TimeCourseWorker = _time_course_worker,
|
@@ -521,7 +519,7 @@ def time_course(
|
|
521
519
|
Examples:
|
522
520
|
>>> time_course(
|
523
521
|
>>> model,
|
524
|
-
>>>
|
522
|
+
>>> to_scan=pd.DataFrame({"k1": [1, 1.5, 2]}),
|
525
523
|
>>> time_points=np.linspace(0, 1, 3)
|
526
524
|
>>> ).variables
|
527
525
|
|
@@ -539,7 +537,7 @@ def time_course(
|
|
539
537
|
|
540
538
|
>>> time_course(
|
541
539
|
>>> model,
|
542
|
-
>>>
|
540
|
+
>>> to_scan=cartesian_product({"k1": [1, 2], "k2": [3, 4]}),
|
543
541
|
>>> time_points=[0.0, 0.5, 1.0],
|
544
542
|
>>> ).variables
|
545
543
|
|
@@ -553,7 +551,7 @@ def time_course(
|
|
553
551
|
|
554
552
|
Args:
|
555
553
|
model: Model instance to simulate.
|
556
|
-
|
554
|
+
to_scan: DataFrame containing parameter or initial values to scan.
|
557
555
|
time_points: Array of time points for the simulation.
|
558
556
|
y0: Initial conditions as a dictionary {variable: value}.
|
559
557
|
cache: Optional cache to store and retrieve results.
|
@@ -566,25 +564,27 @@ def time_course(
|
|
566
564
|
|
567
565
|
|
568
566
|
"""
|
567
|
+
if y0 is not None:
|
568
|
+
model.update_variables(y0)
|
569
|
+
|
569
570
|
res = parallelise(
|
570
571
|
partial(
|
571
|
-
|
572
|
+
_update_parameters_and_initial_conditions,
|
572
573
|
fn=partial(
|
573
574
|
worker,
|
574
575
|
time_points=time_points,
|
575
|
-
y0=y0,
|
576
576
|
integrator=integrator,
|
577
577
|
),
|
578
578
|
model=model,
|
579
579
|
),
|
580
|
-
inputs=list(
|
580
|
+
inputs=list(to_scan.iterrows()),
|
581
581
|
cache=cache,
|
582
582
|
parallel=parallel,
|
583
583
|
)
|
584
584
|
concs = cast(dict, {k: v.variables for k, v in res.items()})
|
585
585
|
fluxes = cast(dict, {k: v.fluxes for k, v in res.items()})
|
586
586
|
return TimeCourseByPars(
|
587
|
-
parameters=
|
587
|
+
parameters=to_scan,
|
588
588
|
variables=pd.concat(concs, names=["n", "time"]),
|
589
589
|
fluxes=pd.concat(fluxes, names=["n", "time"]),
|
590
590
|
)
|
@@ -592,11 +592,11 @@ def time_course(
|
|
592
592
|
|
593
593
|
def time_course_over_protocol(
|
594
594
|
model: Model,
|
595
|
-
|
595
|
+
*,
|
596
|
+
to_scan: pd.DataFrame,
|
596
597
|
protocol: pd.DataFrame,
|
597
598
|
time_points_per_step: int = 10,
|
598
599
|
y0: dict[str, float] | None = None,
|
599
|
-
*,
|
600
600
|
parallel: bool = True,
|
601
601
|
cache: Cache | None = None,
|
602
602
|
worker: ProtocolWorker = _protocol_worker,
|
@@ -618,7 +618,7 @@ def time_course_over_protocol(
|
|
618
618
|
|
619
619
|
Args:
|
620
620
|
model: Model instance to simulate.
|
621
|
-
|
621
|
+
to_scan: DataFrame containing parameter or initial values to scan.
|
622
622
|
protocol: Protocol to follow for the simulation.
|
623
623
|
time_points_per_step: Number of time points per protocol step (default: 10).
|
624
624
|
y0: Initial conditions as a dictionary {variable: value}.
|
@@ -631,26 +631,28 @@ def time_course_over_protocol(
|
|
631
631
|
TimeCourseByPars: Protocol series results for each parameter set.
|
632
632
|
|
633
633
|
"""
|
634
|
+
if y0 is not None:
|
635
|
+
model.update_variables(y0)
|
636
|
+
|
634
637
|
res = parallelise(
|
635
638
|
partial(
|
636
|
-
|
639
|
+
_update_parameters_and_initial_conditions,
|
637
640
|
fn=partial(
|
638
641
|
worker,
|
639
642
|
protocol=protocol,
|
640
|
-
y0=y0,
|
641
643
|
time_points_per_step=time_points_per_step,
|
642
644
|
integrator=integrator,
|
643
645
|
),
|
644
646
|
model=model,
|
645
647
|
),
|
646
|
-
inputs=list(
|
648
|
+
inputs=list(to_scan.iterrows()),
|
647
649
|
cache=cache,
|
648
650
|
parallel=parallel,
|
649
651
|
)
|
650
652
|
concs = cast(dict, {k: v.variables for k, v in res.items()})
|
651
653
|
fluxes = cast(dict, {k: v.fluxes for k, v in res.items()})
|
652
654
|
return ProtocolByPars(
|
653
|
-
parameters=
|
655
|
+
parameters=to_scan,
|
654
656
|
protocol=protocol,
|
655
657
|
variables=pd.concat(concs, names=["n", "time"]),
|
656
658
|
fluxes=pd.concat(fluxes, names=["n", "time"]),
|