modelbase2 0.1.79__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. modelbase2/__init__.py +138 -26
  2. modelbase2/distributions.py +306 -0
  3. modelbase2/experimental/__init__.py +17 -0
  4. modelbase2/experimental/codegen.py +239 -0
  5. modelbase2/experimental/diff.py +227 -0
  6. modelbase2/experimental/notes.md +4 -0
  7. modelbase2/experimental/tex.py +521 -0
  8. modelbase2/fit.py +284 -0
  9. modelbase2/fns.py +185 -0
  10. modelbase2/integrators/__init__.py +19 -0
  11. modelbase2/integrators/int_assimulo.py +146 -0
  12. modelbase2/integrators/int_scipy.py +147 -0
  13. modelbase2/label_map.py +610 -0
  14. modelbase2/linear_label_map.py +301 -0
  15. modelbase2/mc.py +548 -0
  16. modelbase2/mca.py +280 -0
  17. modelbase2/model.py +1621 -0
  18. modelbase2/npe.py +343 -0
  19. modelbase2/parallel.py +171 -0
  20. modelbase2/parameterise.py +28 -0
  21. modelbase2/paths.py +36 -0
  22. modelbase2/plot.py +829 -0
  23. modelbase2/sbml/__init__.py +14 -0
  24. modelbase2/sbml/_data.py +77 -0
  25. modelbase2/sbml/_export.py +656 -0
  26. modelbase2/sbml/_import.py +585 -0
  27. modelbase2/sbml/_mathml.py +691 -0
  28. modelbase2/sbml/_name_conversion.py +52 -0
  29. modelbase2/sbml/_unit_conversion.py +74 -0
  30. modelbase2/scan.py +616 -0
  31. modelbase2/scope.py +96 -0
  32. modelbase2/simulator.py +635 -0
  33. modelbase2/surrogates/__init__.py +32 -0
  34. modelbase2/surrogates/_poly.py +66 -0
  35. modelbase2/surrogates/_torch.py +249 -0
  36. modelbase2/surrogates.py +316 -0
  37. modelbase2/types.py +352 -11
  38. modelbase2-0.2.0.dist-info/METADATA +81 -0
  39. modelbase2-0.2.0.dist-info/RECORD +42 -0
  40. {modelbase2-0.1.79.dist-info → modelbase2-0.2.0.dist-info}/WHEEL +1 -1
  41. modelbase2/core/__init__.py +0 -29
  42. modelbase2/core/algebraic_module_container.py +0 -130
  43. modelbase2/core/constant_container.py +0 -113
  44. modelbase2/core/data.py +0 -109
  45. modelbase2/core/name_container.py +0 -29
  46. modelbase2/core/reaction_container.py +0 -115
  47. modelbase2/core/utils.py +0 -28
  48. modelbase2/core/variable_container.py +0 -24
  49. modelbase2/ode/__init__.py +0 -13
  50. modelbase2/ode/integrator.py +0 -80
  51. modelbase2/ode/mca.py +0 -270
  52. modelbase2/ode/model.py +0 -470
  53. modelbase2/ode/simulator.py +0 -153
  54. modelbase2/utils/__init__.py +0 -0
  55. modelbase2/utils/plotting.py +0 -372
  56. modelbase2-0.1.79.dist-info/METADATA +0 -44
  57. modelbase2-0.1.79.dist-info/RECORD +0 -22
  58. {modelbase2-0.1.79.dist-info → modelbase2-0.2.0.dist-info/licenses}/LICENSE +0 -0
modelbase2/scan.py ADDED
@@ -0,0 +1,616 @@
1
+ """Parameter Scanning Module.
2
+
3
+ This module provides functions and classes for performing parameter scans on metabolic models.
4
+ It includes functionality for steady-state and time-course simulations, as well as protocol-based simulations.
5
+
6
+ Classes:
7
+ TimePoint: Represents a single time point in a simulation.
8
+ TimeCourse: Represents a time course in a simulation.
9
+
10
+ Functions:
11
+ parameter_scan_ss: Get steady-state results over supplied parameters.
12
+ parameter_scan_time_course: Get time course for each supplied parameter.
13
+ parameter_scan_protocol: Get protocol course for each supplied parameter.
14
+ """
15
+
16
+ from __future__ import annotations
17
+
18
+ __all__ = [
19
+ "ProtocolWorker",
20
+ "SteadyStateWorker",
21
+ "TimeCourse",
22
+ "TimeCourseWorker",
23
+ "TimePoint",
24
+ "steady_state",
25
+ "time_course",
26
+ "time_course_over_protocol",
27
+ ]
28
+
29
+ from dataclasses import dataclass
30
+ from functools import partial
31
+ from typing import TYPE_CHECKING, Protocol, Self, cast
32
+
33
+ import numpy as np
34
+ import pandas as pd
35
+
36
+ from modelbase2.parallel import Cache, parallelise
37
+ from modelbase2.simulator import Simulator
38
+ from modelbase2.types import ProtocolByPars, SteadyStates, TimeCourseByPars
39
+
40
+ if TYPE_CHECKING:
41
+ from collections.abc import Callable
42
+
43
+ from modelbase2.model import Model
44
+ from modelbase2.types import Array
45
+
46
+
47
+ def _update_parameters_and[T](
48
+ pars: pd.Series,
49
+ fn: Callable[[Model], T],
50
+ model: Model,
51
+ ) -> T:
52
+ """Update model parameters and execute a function.
53
+
54
+ Args:
55
+ pars: Series containing parameter values to update.
56
+ fn: Function to execute after updating parameters.
57
+ model: Model instance to update.
58
+
59
+ Returns:
60
+ Result of the function execution.
61
+
62
+ """
63
+ model.update_parameters(pars.to_dict())
64
+ return fn(model)
65
+
66
+
67
+ def _empty_conc_series(model: Model) -> pd.Series:
68
+ """Create an empty concentration series for the model.
69
+
70
+ Args:
71
+ model: Model instance to generate the series for.
72
+
73
+ Returns:
74
+ pd.Series: Series with NaN values for each model variable.
75
+
76
+ """
77
+ return pd.Series(
78
+ data=np.full(shape=len(model.get_variable_names()), fill_value=np.nan),
79
+ index=model.get_variable_names(),
80
+ )
81
+
82
+
83
+ def _empty_flux_series(model: Model) -> pd.Series:
84
+ """Create an empty flux series for the model.
85
+
86
+ Args:
87
+ model: Model instance to generate the series for.
88
+
89
+ Returns:
90
+ pd.Series: Series with NaN values for each model reaction.
91
+
92
+ """
93
+ return pd.Series(
94
+ data=np.full(shape=len(model.get_reaction_names()), fill_value=np.nan),
95
+ index=model.get_reaction_names(),
96
+ )
97
+
98
+
99
+ def _empty_conc_df(model: Model, time_points: Array) -> pd.DataFrame:
100
+ """Create an empty concentration DataFrame for the model over given time points.
101
+
102
+ Args:
103
+ model: Model instance to generate the DataFrame for.
104
+ time_points: Array of time points.
105
+
106
+ Returns:
107
+ pd.DataFrame: DataFrame with NaN values for each model variable at each time point.
108
+
109
+ """
110
+ return pd.DataFrame(
111
+ data=np.full(
112
+ shape=(len(time_points), len(model.get_variable_names())),
113
+ fill_value=np.nan,
114
+ ),
115
+ index=time_points,
116
+ columns=model.get_variable_names(),
117
+ )
118
+
119
+
120
+ def _empty_flux_df(model: Model, time_points: Array) -> pd.DataFrame:
121
+ """Create an empty concentration DataFrame for the model over given time points.
122
+
123
+ Args:
124
+ model: Model instance to generate the DataFrame for.
125
+ time_points: Array of time points.
126
+
127
+ Returns:
128
+ pd.DataFrame: DataFrame with NaN values for each model variable at each time point.
129
+
130
+ """
131
+ return pd.DataFrame(
132
+ data=np.full(
133
+ shape=(len(time_points), len(model.get_reaction_names())),
134
+ fill_value=np.nan,
135
+ ),
136
+ index=time_points,
137
+ columns=model.get_reaction_names(),
138
+ )
139
+
140
+
141
+ ###############################################################################
142
+ # Single returns
143
+ ###############################################################################
144
+
145
+
146
+ @dataclass(slots=True)
147
+ class TimePoint:
148
+ """Represents a single time point in a simulation.
149
+
150
+ Attributes:
151
+ concs: Series of concentrations at the time point.
152
+ fluxes: Series of fluxes at the time point.
153
+
154
+ Args:
155
+ model: Model instance to generate the time point for.
156
+ concs: DataFrame of concentrations (default: None).
157
+ fluxes: DataFrame of fluxes (default: None).
158
+ idx: Index of the time point in the DataFrame (default: -1).
159
+
160
+ """
161
+
162
+ concs: pd.Series
163
+ fluxes: pd.Series
164
+
165
+ @classmethod
166
+ def from_scan(
167
+ cls,
168
+ model: Model,
169
+ concs: pd.DataFrame | None,
170
+ fluxes: pd.DataFrame | None,
171
+ idx: int = -1,
172
+ ) -> Self:
173
+ """Initialize the Scan object.
174
+
175
+ Args:
176
+ model (Model): The model object.
177
+ concs (pd.DataFrame | None): DataFrame containing concentration data. If None, an empty concentration series is created.
178
+ fluxes (pd.DataFrame | None): DataFrame containing flux data. If None, an empty flux series is created.
179
+ idx (int, optional): Index to select specific row from concs and fluxes DataFrames. Defaults to -1.
180
+
181
+ """
182
+ return cls(
183
+ concs=_empty_conc_series(model) if concs is None else concs.iloc[idx],
184
+ fluxes=_empty_flux_series(model) if fluxes is None else fluxes.iloc[idx],
185
+ )
186
+
187
+ @property
188
+ def results(self) -> pd.Series:
189
+ """Get the combined results of concentrations and fluxes.
190
+
191
+ Example:
192
+ >>> time_point.results
193
+ x1 1.0
194
+ x2 0.5
195
+ v1 0.1
196
+ v2 0.2
197
+
198
+ Returns:
199
+ pd.Series: Combined series of concentrations and fluxes.
200
+
201
+ """
202
+ return pd.concat((self.concs, self.fluxes), axis=0)
203
+
204
+
205
+ @dataclass(slots=True)
206
+ class TimeCourse:
207
+ """Represents a time course in a simulation.
208
+
209
+ Attributes:
210
+ concs: DataFrame of concentrations over time.
211
+ fluxes: DataFrame of fluxes over time.
212
+
213
+ Args:
214
+ model: Model instance to generate the time course for.
215
+ time_points: Array of time points.
216
+ concs: DataFrame of concentrations (default: None).
217
+ fluxes: DataFrame of fluxes (default: None).
218
+
219
+ """
220
+
221
+ concs: pd.DataFrame
222
+ fluxes: pd.DataFrame
223
+
224
+ @classmethod
225
+ def from_scan(
226
+ cls,
227
+ model: Model,
228
+ time_points: Array,
229
+ concs: pd.DataFrame | None,
230
+ fluxes: pd.DataFrame | None,
231
+ ) -> Self:
232
+ """Initialize the Scan object.
233
+
234
+ Args:
235
+ model (Model): The model object.
236
+ time_points (Array): Array of time points.
237
+ concs (pd.DataFrame | None): DataFrame containing concentration data. If None, an empty DataFrame is created.
238
+ fluxes (pd.DataFrame | None): DataFrame containing flux data. If None, an empty DataFrame is created.
239
+
240
+ """
241
+ return cls(
242
+ _empty_conc_df(model, time_points) if concs is None else concs,
243
+ _empty_flux_df(model, time_points) if fluxes is None else fluxes,
244
+ )
245
+
246
+ @property
247
+ def results(self) -> pd.DataFrame:
248
+ """Get the combined results of concentrations and fluxes over time.
249
+
250
+ Examples:
251
+ >>> time_course.results
252
+ Time x1 x2 v1 v2
253
+ 0.0 1.0 1.00 1.00 1.00
254
+ 0.1 0.9 0.99 0.99 0.99
255
+ 0.2 0.8 0.99 0.99 0.99
256
+
257
+ Returns:
258
+ pd.DataFrame: Combined DataFrame of concentrations and fluxes.
259
+
260
+ """
261
+ return pd.concat((self.concs, self.fluxes), axis=1)
262
+
263
+
264
+ ###############################################################################
265
+ # Workers
266
+ ###############################################################################
267
+
268
+
269
+ class SteadyStateWorker(Protocol):
270
+ """Worker function for steady-state simulations."""
271
+
272
+ def __call__(
273
+ self,
274
+ model: Model,
275
+ y0: dict[str, float] | None,
276
+ *,
277
+ rel_norm: bool,
278
+ ) -> TimePoint:
279
+ """Call the worker function."""
280
+ ...
281
+
282
+
283
+ class TimeCourseWorker(Protocol):
284
+ """Worker function for time-course simulations."""
285
+
286
+ def __call__(
287
+ self,
288
+ model: Model,
289
+ y0: dict[str, float] | None,
290
+ time_points: Array,
291
+ ) -> TimeCourse:
292
+ """Call the worker function."""
293
+ ...
294
+
295
+
296
+ class ProtocolWorker(Protocol):
297
+ """Worker function for protocol-based simulations."""
298
+
299
+ def __call__(
300
+ self,
301
+ model: Model,
302
+ y0: dict[str, float] | None,
303
+ protocol: pd.DataFrame,
304
+ time_points_per_step: int = 10,
305
+ ) -> TimeCourse:
306
+ """Call the worker function."""
307
+ ...
308
+
309
+
310
+ def _steady_state_worker(
311
+ model: Model,
312
+ y0: dict[str, float] | None,
313
+ *,
314
+ rel_norm: bool,
315
+ ) -> TimePoint:
316
+ """Simulate the model to steady state and return concentrations and fluxes.
317
+
318
+ Args:
319
+ model: Model instance to simulate.
320
+ y0: Initial conditions as a dictionary {species: value}.
321
+ rel_norm: Whether to use relative normalization.
322
+
323
+ Returns:
324
+ TimePoint: Object containing steady-state concentrations and fluxes.
325
+
326
+ """
327
+ try:
328
+ c, v = (
329
+ Simulator(model, y0=y0)
330
+ .simulate_to_steady_state(rel_norm=rel_norm)
331
+ .get_full_concs_and_fluxes()
332
+ )
333
+ except ZeroDivisionError:
334
+ c = None
335
+ v = None
336
+ return TimePoint.from_scan(model, c, v)
337
+
338
+
339
+ def _time_course_worker(
340
+ model: Model,
341
+ y0: dict[str, float] | None,
342
+ time_points: Array,
343
+ ) -> TimeCourse:
344
+ """Simulate the model to steady state and return concentrations and fluxes.
345
+
346
+ Args:
347
+ model: Model instance to simulate.
348
+ y0: Initial conditions as a dictionary {species: value}.
349
+ time_points: Array of time points for the simulation.
350
+
351
+ Returns:
352
+ TimePoint: Object containing steady-state concentrations and fluxes.
353
+
354
+ """
355
+ try:
356
+ c, v = (
357
+ Simulator(model, y0=y0)
358
+ .simulate_time_course(time_points=time_points)
359
+ .get_full_concs_and_fluxes()
360
+ )
361
+ except ZeroDivisionError:
362
+ c = None
363
+ v = None
364
+ return TimeCourse.from_scan(model, time_points, c, v)
365
+
366
+
367
+ def _protocol_worker(
368
+ model: Model,
369
+ y0: dict[str, float] | None,
370
+ protocol: pd.DataFrame,
371
+ time_points_per_step: int = 10,
372
+ ) -> TimeCourse:
373
+ """Simulate the model over a protocol and return concentrations and fluxes.
374
+
375
+ Args:
376
+ model: Model instance to simulate.
377
+ y0: Initial conditions as a dictionary {species: value}.
378
+ protocol: DataFrame containing the protocol steps.
379
+ time_points_per_step: Number of time points per protocol step (default: 10).
380
+
381
+ Returns:
382
+ TimeCourse: Object containing protocol series concentrations and fluxes.
383
+
384
+ """
385
+ c, v = (
386
+ Simulator(model, y0=y0)
387
+ .simulate_over_protocol(
388
+ protocol=protocol,
389
+ time_points_per_step=time_points_per_step,
390
+ )
391
+ .get_full_concs_and_fluxes()
392
+ )
393
+ time_points = np.linspace(
394
+ 0,
395
+ protocol.index[-1].total_seconds(),
396
+ len(protocol) * time_points_per_step,
397
+ )
398
+ return TimeCourse.from_scan(model, time_points, c, v)
399
+
400
+
401
+ def steady_state(
402
+ model: Model,
403
+ parameters: pd.DataFrame,
404
+ y0: dict[str, float] | None = None,
405
+ *,
406
+ parallel: bool = True,
407
+ rel_norm: bool = False,
408
+ cache: Cache | None = None,
409
+ worker: SteadyStateWorker = _steady_state_worker,
410
+ ) -> SteadyStates:
411
+ """Get steady-state results over supplied parameters.
412
+
413
+ Args:
414
+ model: Model instance to simulate.
415
+ parameters: DataFrame containing parameter values to scan.
416
+ y0: Initial conditions as a dictionary {variable: value}.
417
+ parallel: Whether to execute in parallel (default: True).
418
+ rel_norm: Whether to use relative normalization (default: False).
419
+ cache: Optional cache to store and retrieve results.
420
+ worker: Worker function to use for the simulation.
421
+
422
+ Returns:
423
+ SteadyStates: Steady-state results for each parameter set.
424
+
425
+ Examples:
426
+ >>> steady_state(
427
+ >>> model,
428
+ >>> parameters=pd.DataFrame({"k1": np.linspace(1, 2, 3)})
429
+ >>> ).concs
430
+ idx x y
431
+ 1.0 0.50 1.00
432
+ 1.5 0.75 1.50
433
+ 2.0 1.00 2.00
434
+
435
+ >>> steady_state(
436
+ >>> model,
437
+ >>> parameters=cartesian_product({"k1": [1, 2], "k2": [3, 4]})
438
+ >>> ).concs
439
+
440
+ | idx | x | y |
441
+ | (1, 3) | 0.33 | 1 |
442
+ | (1, 4) | 0.25 | 1 |
443
+ | (2, 3) | 0.66 | 2 |
444
+ | (2, 4) | 0.5 | 2 |
445
+
446
+ """
447
+ res = parallelise(
448
+ partial(
449
+ _update_parameters_and,
450
+ fn=partial(
451
+ worker,
452
+ y0=y0,
453
+ rel_norm=rel_norm,
454
+ ),
455
+ model=model,
456
+ ),
457
+ inputs=list(parameters.iterrows()),
458
+ cache=cache,
459
+ parallel=parallel,
460
+ )
461
+ concs = pd.DataFrame({k: v.concs.T for k, v in res.items()}).T
462
+ fluxes = pd.DataFrame({k: v.fluxes.T for k, v in res.items()}).T
463
+ idx = (
464
+ pd.Index(parameters.iloc[:, 0])
465
+ if parameters.shape[1] == 1
466
+ else pd.MultiIndex.from_frame(parameters)
467
+ )
468
+ concs.index = idx
469
+ fluxes.index = idx
470
+ return SteadyStates(concs, fluxes, parameters=parameters)
471
+
472
+
473
+ def time_course(
474
+ model: Model,
475
+ parameters: pd.DataFrame,
476
+ time_points: Array,
477
+ y0: dict[str, float] | None = None,
478
+ *,
479
+ parallel: bool = True,
480
+ cache: Cache | None = None,
481
+ worker: TimeCourseWorker = _time_course_worker,
482
+ ) -> TimeCourseByPars:
483
+ """Get time course for each supplied parameter.
484
+
485
+ Examples:
486
+ >>> time_course(
487
+ >>> model,
488
+ >>> parameters=pd.DataFrame({"k1": [1, 1.5, 2]}),
489
+ >>> time_points=np.linspace(0, 1, 3)
490
+ >>> ).concs
491
+
492
+ | (n, time) | x | y |
493
+ |:----------|---------:|--------:|
494
+ | (0, 0.0) | 1 | 1 |
495
+ | (0, 0.5) | 0.68394 | 1.23865 |
496
+ | (0, 1.0) | 0.567668 | 1.23254 |
497
+ | (1, 0.0) | 1 | 1 |
498
+ | (1, 0.5) | 0.84197 | 1.31606 |
499
+ | (1, 1.0) | 0.783834 | 1.43233 |
500
+ | (2, 0.0) | 1 | 1 |
501
+ | (2, 0.5) | 1 | 1.39347 |
502
+ | (2, 1.0) | 1 | 1.63212 |
503
+
504
+ >>> time_course(
505
+ >>> model,
506
+ >>> parameters=cartesian_product({"k1": [1, 2], "k2": [3, 4]}),
507
+ >>> time_points=[0.0, 0.5, 1.0],
508
+ >>> ).concs
509
+
510
+ | (n, time) | x | y |
511
+ |:----------|---------:|-------:|
512
+ | (0, 0.0) | 1 | 1 |
513
+ | (0, 0.5) | 0.482087 | 1.3834 |
514
+ | (1, 0.0) | 1 | 1 |
515
+ | (1, 0.5) | 0.351501 | 1.4712 |
516
+ | (2, 0.0) | 1 | 1 |
517
+
518
+ Args:
519
+ model: Model instance to simulate.
520
+ parameters: DataFrame containing parameter values to scan.
521
+ time_points: Array of time points for the simulation.
522
+ y0: Initial conditions as a dictionary {variable: value}.
523
+ cache: Optional cache to store and retrieve results.
524
+ parallel: Whether to execute in parallel (default: True).
525
+ worker: Worker function to use for the simulation.
526
+
527
+ Returns:
528
+ TimeCourseByPars: Time series results for each parameter set.
529
+
530
+
531
+ """
532
+ res = parallelise(
533
+ partial(
534
+ _update_parameters_and,
535
+ fn=partial(
536
+ worker,
537
+ time_points=time_points,
538
+ y0=y0,
539
+ ),
540
+ model=model,
541
+ ),
542
+ inputs=list(parameters.iterrows()),
543
+ cache=cache,
544
+ parallel=parallel,
545
+ )
546
+ concs = cast(dict, {k: v.concs for k, v in res.items()})
547
+ fluxes = cast(dict, {k: v.fluxes for k, v in res.items()})
548
+ return TimeCourseByPars(
549
+ parameters=parameters,
550
+ concs=pd.concat(concs, names=["n", "time"]),
551
+ fluxes=pd.concat(fluxes, names=["n", "time"]),
552
+ )
553
+
554
+
555
+ def time_course_over_protocol(
556
+ model: Model,
557
+ parameters: pd.DataFrame,
558
+ protocol: pd.DataFrame,
559
+ time_points_per_step: int = 10,
560
+ y0: dict[str, float] | None = None,
561
+ *,
562
+ parallel: bool = True,
563
+ cache: Cache | None = None,
564
+ worker: ProtocolWorker = _protocol_worker,
565
+ ) -> ProtocolByPars:
566
+ """Get protocol series for each supplied parameter.
567
+
568
+ Examples:
569
+ >>> scan.time_course_over_protocol(
570
+ ... model,
571
+ ... parameters=pd.DataFrame({"k2": np.linspace(1, 2, 11)}),
572
+ ... protocol=make_protocol(
573
+ ... {
574
+ ... 1: {"k1": 1},
575
+ ... 2: {"k1": 2},
576
+ ... }
577
+ ... ),
578
+ ... )
579
+
580
+ Args:
581
+ model: Model instance to simulate.
582
+ parameters: DataFrame containing parameter values to scan.
583
+ protocol: Protocol to follow for the simulation.
584
+ time_points_per_step: Number of time points per protocol step (default: 10).
585
+ y0: Initial conditions as a dictionary {variable: value}.
586
+ parallel: Whether to execute in parallel (default: True).
587
+ cache: Optional cache to store and retrieve results.
588
+ worker: Worker function to use for the simulation.
589
+
590
+ Returns:
591
+ TimeCourseByPars: Protocol series results for each parameter set.
592
+
593
+ """
594
+ res = parallelise(
595
+ partial(
596
+ _update_parameters_and,
597
+ fn=partial(
598
+ worker,
599
+ protocol=protocol,
600
+ y0=y0,
601
+ time_points_per_step=time_points_per_step,
602
+ ),
603
+ model=model,
604
+ ),
605
+ inputs=list(parameters.iterrows()),
606
+ cache=cache,
607
+ parallel=parallel,
608
+ )
609
+ concs = cast(dict, {k: v.concs for k, v in res.items()})
610
+ fluxes = cast(dict, {k: v.fluxes for k, v in res.items()})
611
+ return ProtocolByPars(
612
+ parameters=parameters,
613
+ protocol=protocol,
614
+ concs=pd.concat(concs, names=["n", "time"]),
615
+ fluxes=pd.concat(fluxes, names=["n", "time"]),
616
+ )
modelbase2/scope.py ADDED
@@ -0,0 +1,96 @@
1
+ """Label Scope Module.
2
+
3
+ This module provides functions for creating and managing label scopes in metabolic models.
4
+ It includes functionality for initializing label scopes and retrieving reachable label positions.
5
+
6
+ Functions:
7
+ get_label_scope: Return all label positions that can be reached step by step.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ # def _create_label_scope_seed(
13
+ # self, *, initial_labels: dict[str, int] | dict[str, list[int]]
14
+ # ) -> dict[str, int]:
15
+ # """Create initial label scope seed."""
16
+ # # initialise all compounds with 0 (no label)
17
+ # labelled_compounds = {compound: 0 for compound in self.get_compounds()}
18
+ # # Set all unlabelled compounds to 1
19
+ # for name, compound in self.label_compounds.items():
20
+ # num_labels = compound["num_labels"]
21
+ # labelled_compounds[f"{name}__{'0' * num_labels}"] = 1
22
+ # # Also set all non-label compounds to 1
23
+ # for name in self.nonlabel_compounds:
24
+ # labelled_compounds[name] = 1
25
+ # # Set initial label
26
+ # for i in [
27
+ # self.get_compound_isotopomer_with_label_position(
28
+ # base_compound=base_compound, label_position=label_position
29
+ # )
30
+ # for base_compound, label_position in initial_labels.items()
31
+ # ]:
32
+ # labelled_compounds[i] = 1
33
+ # return labelled_compounds
34
+
35
+
36
+ # def get_label_scope(
37
+ # self,
38
+ # initial_labels: dict[str, int] | dict[str, list[int]],
39
+ # ) -> dict[int, set[str]]:
40
+ # """Return all label positions that can be reached step by step.
41
+
42
+ # Parameters:
43
+ # initial_labels : dict(str: num)
44
+
45
+ # Returns:
46
+ # label_scope : dict{step : set of new positions}
47
+
48
+ # Examples:
49
+ # >>> l.get_label_scope({"x": 0})
50
+ # >>> l.get_label_scope({"x": [0, 1], "y": 0})
51
+
52
+ # """
53
+ # labelled_compounds = self._create_label_scope_seed(initial_labels=initial_labels)
54
+ # new_labels = set("non empty entry to not fulfill while condition")
55
+ # # Loop until no new labels are inserted
56
+ # loop_count = 0
57
+ # result = {}
58
+ # while new_labels != set():
59
+ # new_cpds = labelled_compounds.copy()
60
+ # for rec, cpd_dict in self.get_stoichiometries().items():
61
+ # # Isolate substrates
62
+ # cpds = [i for i, j in cpd_dict.items() if j < 0]
63
+ # # Count how many of the substrates are 1
64
+ # i = 0
65
+ # for j in cpds:
66
+ # i += labelled_compounds[j]
67
+ # # If all substrates are 1, set all products to 1
68
+ # if i == len(cpds):
69
+ # for cpd in self.get_stoichiometries()[rec]:
70
+ # new_cpds[cpd] = 1
71
+ # if self.rates[rec]["reversible"]:
72
+ # # Isolate substrates
73
+ # cpds = [i for i, j in cpd_dict.items() if j > 0]
74
+ # # Count how many of the substrates are 1
75
+ # i = 0
76
+ # for j in cpds:
77
+ # i += labelled_compounds[j]
78
+ # # If all substrates are 1, set all products to 1
79
+ # if i == len(cpds):
80
+ # for cpd in self.get_stoichiometries()[rec]:
81
+ # new_cpds[cpd] = 1
82
+ # # Isolate "old" labels
83
+ # s1 = pd.Series(labelled_compounds)
84
+ # s1 = cast(pd.Series, s1[s1 == 1])
85
+ # # Isolate new labels
86
+ # s2 = pd.Series(new_cpds)
87
+ # s2 = cast(pd.Series, s2[s2 == 1])
88
+ # # Find new labels
89
+ # new_labels = cast(set[str], set(s2.index).difference(set(s1.index)))
90
+ # # Break the loop once no new labels can be found
91
+ # if new_labels == set():
92
+ # break
93
+ # labelled_compounds = new_cpds
94
+ # result[loop_count] = new_labels
95
+ # loop_count += 1
96
+ # return result