mxlpy 0.17.0__py3-none-any.whl → 0.19.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mxlpy/__init__.py CHANGED
@@ -48,18 +48,15 @@ from . import (
48
48
  fns,
49
49
  mc,
50
50
  mca,
51
- npe,
52
51
  plot,
53
52
  report,
54
53
  sbml,
55
- surrogates,
56
54
  )
57
55
  from .integrators import DefaultIntegrator, Scipy
58
56
  from .label_map import LabelMapper
59
57
  from .linear_label_map import LinearLabelMapper
60
58
  from .mc import Cache
61
59
  from .model import Model
62
- from .nn import _torch
63
60
  from .scan import (
64
61
  steady_state,
65
62
  time_course,
@@ -75,6 +72,19 @@ with contextlib.suppress(ImportError):
75
72
  if TYPE_CHECKING:
76
73
  from mxlpy.types import ArrayLike
77
74
 
75
+ from . import (
76
+ nn,
77
+ npe,
78
+ surrogates,
79
+ )
80
+ else:
81
+ from lazy_import import lazy_module
82
+
83
+ nn = lazy_module("mxlpy.nn")
84
+ npe = lazy_module("mxlpy.npe")
85
+ surrogates = lazy_module("mxlpy.surrogates")
86
+
87
+
78
88
  __all__ = [
79
89
  "Assimulo",
80
90
  "Cache",
@@ -87,7 +97,6 @@ __all__ = [
87
97
  "Scipy",
88
98
  "Simulator",
89
99
  "SymbolicModel",
90
- "_torch",
91
100
  "cartesian_product",
92
101
  "distributions",
93
102
  "experimental",
@@ -96,6 +105,7 @@ __all__ = [
96
105
  "make_protocol",
97
106
  "mc",
98
107
  "mca",
108
+ "nn",
99
109
  "npe",
100
110
  "plot",
101
111
  "report",
@@ -181,7 +181,7 @@ def model_diff(m1: Model, m2: Model) -> ModelDiff:
181
181
  if (v2 := m2._variables.get(k)) is None: # noqa: SLF001
182
182
  diff.missing_variables.add(k)
183
183
  elif v1 != v2:
184
- diff.different_variables[k] = (v1, v2)
184
+ diff.different_variables[k] = (v1, v2) # type: ignore
185
185
 
186
186
  for k, v1 in m1._readouts.items(): # noqa: SLF001
187
187
  if (v2 := m2._readouts.get(k)) is None: # noqa: SLF001
mxlpy/fit.py CHANGED
@@ -28,12 +28,16 @@ from mxlpy.types import (
28
28
 
29
29
  __all__ = [
30
30
  "InitialGuess",
31
+ "LossFn",
31
32
  "MinimizeFn",
33
+ "ProtocolResidualFn",
32
34
  "ResidualFn",
33
35
  "SteadyStateResidualFn",
34
36
  "TimeSeriesResidualFn",
37
+ "rmse",
35
38
  "steady_state",
36
39
  "time_course",
40
+ "time_course_over_protocol",
37
41
  ]
38
42
 
39
43
  if TYPE_CHECKING:
@@ -44,6 +48,21 @@ if TYPE_CHECKING:
44
48
  type InitialGuess = dict[str, float]
45
49
  type ResidualFn = Callable[[Array], float]
46
50
  type MinimizeFn = Callable[[ResidualFn, InitialGuess], dict[str, float]]
51
+ type LossFn = Callable[
52
+ [
53
+ pd.DataFrame | pd.Series,
54
+ pd.DataFrame | pd.Series,
55
+ ],
56
+ float,
57
+ ]
58
+
59
+
60
+ def rmse(
61
+ y_pred: pd.DataFrame | pd.Series,
62
+ y_true: pd.DataFrame | pd.Series,
63
+ ) -> float:
64
+ """Calculate root mean square error between model and data."""
65
+ return cast(float, np.sqrt(np.mean(np.square(y_pred - y_true))))
47
66
 
48
67
 
49
68
  class SteadyStateResidualFn(Protocol):
@@ -58,6 +77,7 @@ class SteadyStateResidualFn(Protocol):
58
77
  model: Model,
59
78
  y0: dict[str, float],
60
79
  integrator: IntegratorType,
80
+ loss_fn: LossFn,
61
81
  ) -> float:
62
82
  """Calculate residual error between model steady state and experimental data."""
63
83
  ...
@@ -75,6 +95,27 @@ class TimeSeriesResidualFn(Protocol):
75
95
  model: Model,
76
96
  y0: dict[str, float],
77
97
  integrator: IntegratorType,
98
+ loss_fn: LossFn,
99
+ ) -> float:
100
+ """Calculate residual error between model time course and experimental data."""
101
+ ...
102
+
103
+
104
+ class ProtocolResidualFn(Protocol):
105
+ """Protocol for time series residual functions."""
106
+
107
+ def __call__(
108
+ self,
109
+ par_values: Array,
110
+ # This will be filled out by partial
111
+ par_names: list[str],
112
+ data: pd.DataFrame,
113
+ model: Model,
114
+ y0: dict[str, float],
115
+ integrator: IntegratorType,
116
+ loss_fn: LossFn,
117
+ protocol: pd.DataFrame,
118
+ time_points_per_step: int = 10,
78
119
  ) -> float:
79
120
  """Calculate residual error between model time course and experimental data."""
80
121
  ...
@@ -109,6 +150,7 @@ def _steady_state_residual(
109
150
  model: Model,
110
151
  y0: dict[str, float] | None,
111
152
  integrator: IntegratorType,
153
+ loss_fn: LossFn,
112
154
  ) -> float:
113
155
  """Calculate residual error between model steady state and experimental data.
114
156
 
@@ -119,6 +161,7 @@ def _steady_state_residual(
119
161
  y0: Initial conditions
120
162
  par_names: Names of parameters being fit
121
163
  integrator: ODE integrator class to use
164
+ loss_fn: Loss function to use for residual calculation
122
165
 
123
166
  Returns:
124
167
  float: Root mean square error between model and data
@@ -143,9 +186,11 @@ def _steady_state_residual(
143
186
  )
144
187
  if res is None:
145
188
  return cast(float, np.inf)
146
- results_ss = res.get_combined()
147
- diff = data - results_ss.loc[:, data.index] # type: ignore
148
- return cast(float, np.sqrt(np.mean(np.square(diff))))
189
+
190
+ return loss_fn(
191
+ res.get_combined().loc[:, cast(list, data.index)],
192
+ data,
193
+ )
149
194
 
150
195
 
151
196
  def _time_course_residual(
@@ -156,6 +201,53 @@ def _time_course_residual(
156
201
  model: Model,
157
202
  y0: dict[str, float] | None,
158
203
  integrator: IntegratorType,
204
+ loss_fn: LossFn,
205
+ ) -> float:
206
+ """Calculate residual error between model time course and experimental data.
207
+
208
+ Args:
209
+ par_values: Parameter values to test
210
+ data: Experimental time course data
211
+ model: Model instance to simulate
212
+ y0: Initial conditions
213
+ par_names: Names of parameters being fit
214
+ integrator: ODE integrator class to use
215
+ loss_fn: Loss function to use for residual calculation
216
+
217
+ Returns:
218
+ float: Root mean square error between model and data
219
+
220
+ """
221
+ res = (
222
+ Simulator(
223
+ model.update_parameters(dict(zip(par_names, par_values, strict=True))),
224
+ y0=y0,
225
+ integrator=integrator,
226
+ )
227
+ .simulate_time_course(cast(list, data.index))
228
+ .get_result()
229
+ )
230
+ if res is None:
231
+ return cast(float, np.inf)
232
+ results_ss = res.get_combined()
233
+
234
+ return loss_fn(
235
+ results_ss.loc[:, cast(list, data.columns)],
236
+ data,
237
+ )
238
+
239
+
240
+ def _protocol_residual(
241
+ par_values: ArrayLike,
242
+ # This will be filled out by partial
243
+ par_names: list[str],
244
+ data: pd.DataFrame,
245
+ model: Model,
246
+ y0: dict[str, float] | None,
247
+ integrator: IntegratorType,
248
+ loss_fn: LossFn,
249
+ protocol: pd.DataFrame,
250
+ time_points_per_step: int = 10,
159
251
  ) -> float:
160
252
  """Calculate residual error between model time course and experimental data.
161
253
 
@@ -166,6 +258,9 @@ def _time_course_residual(
166
258
  y0: Initial conditions
167
259
  par_names: Names of parameters being fit
168
260
  integrator: ODE integrator class to use
261
+ loss_fn: Loss function to use for residual calculation
262
+ protocol: Experimental protocol
263
+ time_points_per_step: Number of time points per step in the protocol
169
264
 
170
265
  Returns:
171
266
  float: Root mean square error between model and data
@@ -177,14 +272,20 @@ def _time_course_residual(
177
272
  y0=y0,
178
273
  integrator=integrator,
179
274
  )
180
- .simulate_time_course(data.index) # type: ignore
275
+ .simulate_over_protocol(
276
+ protocol=protocol,
277
+ time_points_per_step=time_points_per_step,
278
+ )
181
279
  .get_result()
182
280
  )
183
281
  if res is None:
184
282
  return cast(float, np.inf)
185
283
  results_ss = res.get_combined()
186
- diff = data - results_ss.loc[:, data.columns] # type: ignore
187
- return cast(float, np.sqrt(np.mean(np.square(diff))))
284
+
285
+ return loss_fn(
286
+ results_ss.loc[:, cast(list, data.columns)],
287
+ data,
288
+ )
188
289
 
189
290
 
190
291
  def steady_state(
@@ -195,6 +296,7 @@ def steady_state(
195
296
  minimize_fn: MinimizeFn = _default_minimize_fn,
196
297
  residual_fn: SteadyStateResidualFn = _steady_state_residual,
197
298
  integrator: IntegratorType = DefaultIntegrator,
299
+ loss_fn: LossFn = rmse,
198
300
  ) -> dict[str, float]:
199
301
  """Fit model parameters to steady-state experimental data.
200
302
 
@@ -210,6 +312,7 @@ def steady_state(
210
312
  minimize_fn: Function to minimize fitting error
211
313
  residual_fn: Function to calculate fitting error
212
314
  integrator: ODE integrator class
315
+ loss_fn: Loss function to use for residual calculation
213
316
 
214
317
  Returns:
215
318
  dict[str, float]: Fitted parameters as {parameter_name: fitted_value}
@@ -232,6 +335,7 @@ def steady_state(
232
335
  y0=y0,
233
336
  par_names=par_names,
234
337
  integrator=integrator,
338
+ loss_fn=loss_fn,
235
339
  ),
236
340
  )
237
341
  res = minimize_fn(fn, p0)
@@ -249,6 +353,62 @@ def time_course(
249
353
  minimize_fn: MinimizeFn = _default_minimize_fn,
250
354
  residual_fn: TimeSeriesResidualFn = _time_course_residual,
251
355
  integrator: IntegratorType = DefaultIntegrator,
356
+ loss_fn: LossFn = rmse,
357
+ ) -> dict[str, float]:
358
+ """Fit model parameters to time course of experimental data.
359
+
360
+ Examples:
361
+ >>> time_course(model, p0, data)
362
+ {'k1': 0.1, 'k2': 0.2}
363
+
364
+ Args:
365
+ model: Model instance to fit
366
+ data: Experimental time course data
367
+ p0: Initial parameter guesses as {parameter_name: value}
368
+ y0: Initial conditions as {species_name: value}
369
+ minimize_fn: Function to minimize fitting error
370
+ residual_fn: Function to calculate fitting error
371
+ integrator: ODE integrator class
372
+ loss_fn: Loss function to use for residual calculation
373
+
374
+ Returns:
375
+ dict[str, float]: Fitted parameters as {parameter_name: fitted_value}
376
+
377
+ Note:
378
+ Uses L-BFGS-B optimization with bounds [1e-12, 1e6] for all parameters
379
+
380
+ """
381
+ par_names = list(p0.keys())
382
+ p_orig = model.parameters
383
+
384
+ fn = cast(
385
+ ResidualFn,
386
+ partial(
387
+ residual_fn,
388
+ data=data,
389
+ model=model,
390
+ y0=y0,
391
+ par_names=par_names,
392
+ integrator=integrator,
393
+ loss_fn=loss_fn,
394
+ ),
395
+ )
396
+ res = minimize_fn(fn, p0)
397
+ model.update_parameters(p_orig)
398
+ return res
399
+
400
+
401
+ def time_course_over_protocol(
402
+ model: Model,
403
+ p0: dict[str, float],
404
+ data: pd.DataFrame,
405
+ protocol: pd.DataFrame,
406
+ y0: dict[str, float] | None = None,
407
+ minimize_fn: MinimizeFn = _default_minimize_fn,
408
+ residual_fn: ProtocolResidualFn = _protocol_residual,
409
+ integrator: IntegratorType = DefaultIntegrator,
410
+ loss_fn: LossFn = rmse,
411
+ time_points_per_step: int = 10,
252
412
  ) -> dict[str, float]:
253
413
  """Fit model parameters to time course of experimental data.
254
414
 
@@ -258,12 +418,15 @@ def time_course(
258
418
 
259
419
  Args:
260
420
  model: Model instance to fit
261
- data: Experimental time course data as pandas DataFrame
262
421
  p0: Initial parameter guesses as {parameter_name: value}
422
+ data: Experimental time course data
423
+ protocol: Experimental protocol
263
424
  y0: Initial conditions as {species_name: value}
264
425
  minimize_fn: Function to minimize fitting error
265
426
  residual_fn: Function to calculate fitting error
266
427
  integrator: ODE integrator class
428
+ loss_fn: Loss function to use for residual calculation
429
+ time_points_per_step: Number of time points per step in the protocol
267
430
 
268
431
  Returns:
269
432
  dict[str, float]: Fitted parameters as {parameter_name: fitted_value}
@@ -284,6 +447,9 @@ def time_course(
284
447
  y0=y0,
285
448
  par_names=par_names,
286
449
  integrator=integrator,
450
+ loss_fn=loss_fn,
451
+ protocol=protocol,
452
+ time_points_per_step=time_points_per_step,
287
453
  ),
288
454
  )
289
455
  res = minimize_fn(fn, p0)
mxlpy/identify.py CHANGED
@@ -19,6 +19,7 @@ def _mc_fit_time_course_worker(
19
19
  p0: pd.Series,
20
20
  model: Model,
21
21
  data: pd.DataFrame,
22
+ loss_fn: fit.LossFn,
22
23
  ) -> float:
23
24
  p_fit = fit.time_course(model=model, p0=p0.to_dict(), data=data)
24
25
  return fit._time_course_residual( # noqa: SLF001
@@ -28,6 +29,7 @@ def _mc_fit_time_course_worker(
28
29
  model=model,
29
30
  y0=None,
30
31
  integrator=fit.DefaultIntegrator,
32
+ loss_fn=loss_fn,
31
33
  )
32
34
 
33
35
 
@@ -37,6 +39,7 @@ def profile_likelihood(
37
39
  parameter_name: str,
38
40
  parameter_values: Array,
39
41
  n_random: int = 10,
42
+ loss_fn: fit.LossFn = fit.rmse,
40
43
  ) -> pd.Series:
41
44
  """Estimate the profile likelihood of model parameters given data.
42
45
 
@@ -46,6 +49,7 @@ def profile_likelihood(
46
49
  parameter_name: The name of the parameter to profile.
47
50
  parameter_values: The values of the parameter to profile.
48
51
  n_random: Number of Monte Carlo samples.
52
+ loss_fn: Loss function to use for fitting.
49
53
 
50
54
  """
51
55
  parameter_distributions = sample(
@@ -57,7 +61,9 @@ def profile_likelihood(
57
61
  for value in tqdm(parameter_values, desc=parameter_name):
58
62
  model.update_parameter(parameter_name, value)
59
63
  res[value] = parallelise(
60
- partial(_mc_fit_time_course_worker, model=model, data=data),
64
+ partial(
65
+ _mc_fit_time_course_worker, model=model, data=data, loss_fn=loss_fn
66
+ ),
61
67
  inputs=list(
62
68
  parameter_distributions.drop(columns=parameter_name).iterrows()
63
69
  ),
@@ -97,7 +97,11 @@ class Assimulo:
97
97
  if steps is None:
98
98
  steps = 0
99
99
  try:
100
- return self.integrator.simulate(t_end, steps) # type: ignore
100
+ t, y = self.integrator.simulate(t_end, steps)
101
+ return (
102
+ np.atleast_1d(np.array(t, dtype=float)),
103
+ np.atleast_2d(np.array(y, dtype=float)),
104
+ )
101
105
  except CVodeError:
102
106
  return None, None
103
107
 
@@ -116,8 +120,11 @@ class Assimulo:
116
120
 
117
121
  """
118
122
  try:
119
- t, y = self.integrator.simulate(time_points[-1], 0, time_points) # type: ignore
120
- return np.array(t, dtype=float), np.array(y, dtype=float)
123
+ t, y = self.integrator.simulate(time_points[-1], 0, time_points)
124
+ return (
125
+ np.atleast_1d(np.array(t, dtype=float)),
126
+ np.atleast_2d(np.array(y, dtype=float)),
127
+ )
121
128
  except CVodeError:
122
129
  return None, None
123
130
 
@@ -108,10 +108,14 @@ class Scipy:
108
108
  rtol=self.rtol,
109
109
  method="LSODA",
110
110
  )
111
+
111
112
  if res.success:
112
- self.t0 = time_points[-1]
113
- self.y0 = res.y[:, -1]
114
- return np.array(time_points, dtype=float), res.y.T
113
+ t = np.atleast_1d(np.array(res.t, dtype=float))
114
+ y = np.atleast_2d(np.array(res.y, dtype=float).T)
115
+
116
+ self.t0 = t[-1]
117
+ self.y0 = y[-1]
118
+ return t, y
115
119
  return None, None
116
120
 
117
121
  def integrate_to_steady_state(
mxlpy/label_map.py CHANGED
@@ -29,6 +29,8 @@ __all__ = ["LabelMapper"]
29
29
  if TYPE_CHECKING:
30
30
  from collections.abc import Callable, Mapping
31
31
 
32
+ from mxlpy.types import Derived
33
+
32
34
 
33
35
  def _total_concentration(*args: float) -> float:
34
36
  """Calculate sum of isotopomer concentrations.
@@ -552,7 +554,7 @@ class LabelMapper:
552
554
  for name, dp in self.model.derived_parameters.items():
553
555
  m.add_derived(name, fn=dp.fn, args=dp.args)
554
556
 
555
- variables: dict[str, float] = {}
557
+ variables: dict[str, float | Derived] = {}
556
558
  for k, v in self.model.variables.items():
557
559
  if (isos := isotopomers.get(k)) is None:
558
560
  variables[k] = v
@@ -694,7 +694,7 @@ class TexExport:
694
694
  def _to_tex_export(self: Model) -> TexExport:
695
695
  return TexExport(
696
696
  parameters=self.parameters,
697
- variables=self.variables,
697
+ variables=self.get_initial_conditions(), # FIXME: think about this later
698
698
  derived=self.derived,
699
699
  reactions={k: TexReaction(v.fn, v.args) for k, v in self.reactions.items()},
700
700
  stoichiometries={k: v.stoichiometry for k, v in self.reactions.items()},
@@ -434,5 +434,5 @@ def _handle_call(node: ast.Call, ctx: Context) -> sympy.Expr:
434
434
  ctx=ctx.updated(parent_module=imports[module_name.id]),
435
435
  )
436
436
 
437
- msg = f"Onsupported function type {node.func}"
437
+ msg = f"Unsupported function type {node.func}"
438
438
  raise NotImplementedError(msg)