mxlpy 0.16.0__py3-none-any.whl → 0.17.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mxlpy/model.py CHANGED
@@ -18,6 +18,7 @@ import pandas as pd
18
18
 
19
19
  from mxlpy import fns
20
20
  from mxlpy.types import (
21
+ AbstractSurrogate,
21
22
  Array,
22
23
  Derived,
23
24
  Reaction,
@@ -27,6 +28,7 @@ from mxlpy.types import (
27
28
  __all__ = [
28
29
  "ArityMismatchError",
29
30
  "CircularDependencyError",
31
+ "Dependency",
30
32
  "MissingDependenciesError",
31
33
  "Model",
32
34
  "ModelCache",
@@ -36,7 +38,16 @@ if TYPE_CHECKING:
36
38
  from collections.abc import Iterable, Mapping
37
39
  from inspect import FullArgSpec
38
40
 
39
- from mxlpy.types import AbstractSurrogate, Callable, Param, RateFn, RetType
41
+ from mxlpy.types import Callable, Param, RateFn, RetType
42
+
43
+
44
+ @dataclass
45
+ class Dependency:
46
+ """Container class for building dependency tree."""
47
+
48
+ name: str
49
+ required: set[str]
50
+ provided: set[str]
40
51
 
41
52
 
42
53
  class MissingDependenciesError(Exception):
@@ -145,30 +156,33 @@ def _invalidate_cache(method: Callable[Param, RetType]) -> Callable[Param, RetTy
145
156
 
146
157
  def _check_if_is_sortable(
147
158
  available: set[str],
148
- elements: list[tuple[str, set[str]]],
159
+ elements: list[Dependency],
149
160
  ) -> None:
150
161
  all_available = available.copy()
151
- for name, _ in elements:
152
- all_available.add(name)
162
+ for dependency in elements:
163
+ all_available.update(dependency.provided)
153
164
 
154
165
  # Check if it can be sorted in the first place
155
166
  not_solvable = {}
156
- for name, args in elements:
157
- if not args.issubset(all_available):
158
- not_solvable[name] = sorted(args.difference(all_available))
167
+ for dependency in elements:
168
+ if not dependency.required.issubset(all_available):
169
+ not_solvable[dependency.name] = sorted(
170
+ dependency.required.difference(all_available)
171
+ )
159
172
 
160
173
  if not_solvable:
161
174
  raise MissingDependenciesError(not_solvable=not_solvable)
162
175
 
163
176
 
164
177
  def _sort_dependencies(
165
- available: set[str], elements: list[tuple[str, set[str]]]
178
+ available: set[str],
179
+ elements: list[Dependency],
166
180
  ) -> list[str]:
167
181
  """Sort model elements topologically based on their dependencies.
168
182
 
169
183
  Args:
170
184
  available: Set of available component names
171
- elements: List of (name, dependencies) tuples to sort
185
+ elements: List of (name, dependencies, supplier) tuples to sort
172
186
 
173
187
  Returns:
174
188
  List of element names in dependency order
@@ -184,26 +198,27 @@ def _sort_dependencies(
184
198
  order = []
185
199
  # FIXME: what is the worst case here?
186
200
  max_iterations = len(elements) ** 2
187
- queue: SimpleQueue[tuple[str, set[str]]] = SimpleQueue()
188
- for k, v in elements:
189
- queue.put((k, v))
201
+ queue: SimpleQueue[Dependency] = SimpleQueue()
202
+ for dependency in elements:
203
+ queue.put(dependency)
190
204
 
191
205
  last_name = None
192
206
  i = 0
193
207
  while True:
194
208
  try:
195
- new, args = queue.get_nowait()
209
+ dependency = queue.get_nowait()
196
210
  except Empty:
197
211
  break
198
- if args.issubset(available):
199
- available.add(new)
200
- order.append(new)
212
+ if dependency.required.issubset(available):
213
+ available.update(dependency.provided)
214
+ order.append(dependency.name)
215
+
201
216
  else:
202
- if last_name == new:
203
- order.append(new)
217
+ if last_name == dependency.name:
218
+ order.append(last_name)
204
219
  break
205
- queue.put((new, args))
206
- last_name = new
220
+ queue.put(dependency)
221
+ last_name = dependency.name
207
222
  i += 1
208
223
 
209
224
  # Failure case
@@ -211,11 +226,13 @@ def _sort_dependencies(
211
226
  unsorted = []
212
227
  while True:
213
228
  try:
214
- unsorted.append(queue.get_nowait()[0])
229
+ unsorted.append(queue.get_nowait().name)
215
230
  except Empty:
216
231
  break
217
232
 
218
- mod_to_args: dict[str, set[str]] = dict(elements)
233
+ mod_to_args: dict[str, set[str]] = {
234
+ dependency.name: dependency.required for dependency in elements
235
+ }
219
236
  missing = {k: mod_to_args[k].difference(available) for k in unsorted}
220
237
  raise CircularDependencyError(missing=missing)
221
238
  return order
@@ -303,7 +320,12 @@ class Model:
303
320
  to_sort = self._derived | self._reactions | self._surrogates
304
321
  order = _sort_dependencies(
305
322
  available=set(self._parameters) | set(self._variables) | {"time"},
306
- elements=[(k, set(v.args)) for k, v in to_sort.items()],
323
+ elements=[
324
+ Dependency(name=k, required=set(v.args), provided={k})
325
+ if not isinstance(v, AbstractSurrogate)
326
+ else Dependency(name=k, required=set(v.args), provided=set(v.outputs))
327
+ for k, v in to_sort.items()
328
+ ],
307
329
  )
308
330
 
309
331
  # Split derived into parameters and variables
@@ -1227,6 +1249,7 @@ class Model:
1227
1249
  name: str,
1228
1250
  surrogate: AbstractSurrogate,
1229
1251
  args: list[str] | None = None,
1252
+ outputs: list[str] | None = None,
1230
1253
  stoichiometries: dict[str, dict[str, float]] | None = None,
1231
1254
  ) -> Self:
1232
1255
  """Adds a surrogate model to the current instance.
@@ -1237,7 +1260,8 @@ class Model:
1237
1260
  Args:
1238
1261
  name (str): The name of the surrogate model.
1239
1262
  surrogate (AbstractSurrogate): The surrogate model instance to be added.
1240
- args: A list of arguments for the surrogate model.
1263
+ args: Names of the values passed for the surrogate model.
1264
+ outputs: Names of values produced by the surrogate model.
1241
1265
  stoichiometries: A dictionary mapping reaction names to stoichiometries.
1242
1266
 
1243
1267
  Returns:
@@ -1248,6 +1272,8 @@ class Model:
1248
1272
 
1249
1273
  if args is not None:
1250
1274
  surrogate.args = args
1275
+ if outputs is not None:
1276
+ surrogate.outputs = outputs
1251
1277
  if stoichiometries is not None:
1252
1278
  surrogate.stoichiometries = stoichiometries
1253
1279
 
mxlpy/npe/__init__.py ADDED
@@ -0,0 +1,38 @@
1
+ """Neural Process Estimation (NPE) module.
2
+
3
+ This module provides classes and functions for estimating metabolic processes using
4
+ neural networks. It includes functionality for both steady-state and time-course data.
5
+
6
+ Classes:
7
+ TorchSteadyState: Class for steady-state neural network estimation.
8
+ TorchSteadyStateTrainer: Class for training steady-state neural networks.
9
+ TorchTimeCourse: Class for time-course neural network estimation.
10
+ TorchTimeCourseTrainer: Class for training time-course neural networks.
11
+
12
+ Functions:
13
+ train_torch_steady_state: Train a PyTorch steady-state neural network.
14
+ train_torch_time_course: Train a PyTorch time-course neural network.
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import contextlib
20
+
21
+ with contextlib.suppress(ImportError):
22
+ from ._torch import (
23
+ TorchSteadyState,
24
+ TorchSteadyStateTrainer,
25
+ TorchTimeCourse,
26
+ TorchTimeCourseTrainer,
27
+ train_torch_steady_state,
28
+ train_torch_time_course,
29
+ )
30
+
31
+ __all__ = [
32
+ "TorchSteadyState",
33
+ "TorchSteadyStateTrainer",
34
+ "TorchTimeCourse",
35
+ "TorchTimeCourseTrainer",
36
+ "train_torch_steady_state",
37
+ "train_torch_time_course",
38
+ ]
mxlpy/npe/_torch.py ADDED
@@ -0,0 +1,436 @@
1
+ """Neural Network Parameter Estimation (NPE) Module.
2
+
3
+ This module provides classes and functions for training neural network models to estimate
4
+ parameters in metabolic models. It includes functionality for both steady-state and
5
+ time-series data.
6
+
7
+ Functions:
8
+ train_torch_surrogate: Train a PyTorch surrogate model
9
+ train_torch_time_course_estimator: Train a PyTorch time course estimator
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from dataclasses import dataclass
15
+ from pathlib import Path
16
+ from typing import TYPE_CHECKING, Self, cast
17
+
18
+ import numpy as np
19
+ import pandas as pd
20
+ import torch
21
+ import tqdm
22
+ from torch import nn
23
+ from torch.optim.adam import Adam
24
+
25
+ from mxlpy.nn._torch import LSTM, MLP, DefaultDevice
26
+ from mxlpy.parallel import Cache
27
+ from mxlpy.types import AbstractEstimator
28
+
29
+ if TYPE_CHECKING:
30
+ from collections.abc import Callable
31
+
32
+ from torch.optim.optimizer import ParamsT
33
+
34
+ DefaultCache = Cache(Path(".cache"))
35
+
36
+ type LossFn = Callable[[torch.Tensor, torch.Tensor], torch.Tensor]
37
+
38
+ __all__ = [
39
+ "DefaultCache",
40
+ "LossFn",
41
+ "TorchSteadyState",
42
+ "TorchSteadyStateTrainer",
43
+ "TorchTimeCourse",
44
+ "TorchTimeCourseTrainer",
45
+ "train_torch_steady_state",
46
+ "train_torch_time_course",
47
+ ]
48
+
49
+
50
+ def _mean_abs(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
51
+ """Standard loss for surrogates.
52
+
53
+ Args:
54
+ x: Predictions of a model.
55
+ y: Targets.
56
+
57
+ Returns:
58
+ torch.Tensor: loss.
59
+
60
+ """
61
+ return torch.mean(torch.abs(x - y))
62
+
63
+
64
+ @dataclass(kw_only=True)
65
+ class TorchSteadyState(AbstractEstimator):
66
+ """Estimator for steady state data using PyTorch models."""
67
+
68
+ model: torch.nn.Module
69
+
70
+ def predict(self, features: pd.Series | pd.DataFrame) -> pd.DataFrame:
71
+ """Predict the target values for the given features."""
72
+ with torch.no_grad():
73
+ pred = self.model(torch.tensor(features.to_numpy(), dtype=torch.float32))
74
+ return pd.DataFrame(pred, columns=self.parameter_names)
75
+
76
+
77
+ @dataclass(kw_only=True)
78
+ class TorchTimeCourse(AbstractEstimator):
79
+ """Estimator for time course data using PyTorch models."""
80
+
81
+ model: torch.nn.Module
82
+
83
+ def predict(self, features: pd.Series | pd.DataFrame) -> pd.DataFrame:
84
+ """Predict the target values for the given features."""
85
+ idx = cast(pd.MultiIndex, features.index)
86
+ features_ = torch.Tensor(
87
+ np.swapaxes(
88
+ features.to_numpy().reshape(
89
+ (
90
+ len(idx.levels[0]),
91
+ len(idx.levels[1]),
92
+ len(features.columns),
93
+ )
94
+ ),
95
+ axis1=0,
96
+ axis2=1,
97
+ ),
98
+ )
99
+ with torch.no_grad():
100
+ pred = self.model(features_)
101
+ return pd.DataFrame(pred, columns=self.parameter_names)
102
+
103
+
104
+ @dataclass
105
+ class TorchSteadyStateTrainer:
106
+ """Trainer for steady state data using PyTorch models."""
107
+
108
+ features: pd.DataFrame
109
+ targets: pd.DataFrame
110
+ approximator: nn.Module
111
+ optimimzer: Adam
112
+ device: torch.device
113
+ losses: list[pd.Series]
114
+ loss_fn: LossFn
115
+
116
+ def __init__(
117
+ self,
118
+ features: pd.DataFrame,
119
+ targets: pd.DataFrame,
120
+ approximator: nn.Module | None = None,
121
+ optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
122
+ device: torch.device = DefaultDevice,
123
+ loss_fn: LossFn = _mean_abs,
124
+ ) -> None:
125
+ """Initialize the trainer with features, targets, and model.
126
+
127
+ Args:
128
+ features: DataFrame containing the input features for training
129
+ targets: DataFrame containing the target values for training
130
+ approximator: Predefined neural network model (None to use default MLP)
131
+ optimimzer_cls: Optimizer class to use for training (default: Adam)
132
+ device: Device to run the training on (default: DefaultDevice)
133
+ loss_fn: Loss function
134
+
135
+ """
136
+ self.features = features
137
+ self.targets = targets
138
+
139
+ if approximator is None:
140
+ n_hidden = max(2 * len(features.columns) * len(targets.columns), 10)
141
+ n_outputs = len(targets.columns)
142
+ approximator = MLP(
143
+ n_inputs=len(features.columns),
144
+ neurons_per_layer=[n_hidden, n_hidden, n_outputs],
145
+ )
146
+ self.approximator = approximator.to(device)
147
+ self.optimizer = optimimzer_cls(approximator.parameters())
148
+ self.device = device
149
+ self.loss_fn = loss_fn
150
+ self.losses = []
151
+
152
+ def train(
153
+ self,
154
+ epochs: int,
155
+ batch_size: int | None = None,
156
+ ) -> Self:
157
+ """Train the model using the provided features and targets.
158
+
159
+ Args:
160
+ epochs: Number of training epochs
161
+ batch_size: Size of mini-batches for training (None for full-batch)
162
+
163
+ """
164
+ features = torch.Tensor(self.features.to_numpy(), device=self.device)
165
+ targets = torch.Tensor(self.targets.to_numpy(), device=self.device)
166
+
167
+ if batch_size is None:
168
+ losses = _train_full(
169
+ approximator=self.approximator,
170
+ features=features,
171
+ targets=targets,
172
+ epochs=epochs,
173
+ optimizer=self.optimizer,
174
+ loss_fn=self.loss_fn,
175
+ )
176
+ else:
177
+ losses = _train_batched(
178
+ approximator=self.approximator,
179
+ features=features,
180
+ targets=targets,
181
+ epochs=epochs,
182
+ optimizer=self.optimizer,
183
+ batch_size=batch_size,
184
+ loss_fn=self.loss_fn,
185
+ )
186
+
187
+ if len(self.losses) > 0:
188
+ losses.index += self.losses[-1].index[-1]
189
+ self.losses.append(losses)
190
+ return self
191
+
192
+ def get_loss(self) -> pd.Series:
193
+ """Get the loss history of the training process."""
194
+ return pd.concat(self.losses)
195
+
196
+ def get_estimator(self) -> TorchSteadyState:
197
+ """Get the trained estimator."""
198
+ return TorchSteadyState(
199
+ model=self.approximator,
200
+ parameter_names=list(self.targets.columns),
201
+ )
202
+
203
+
204
+ @dataclass
205
+ class TorchTimeCourseTrainer:
206
+ """Trainer for time course data using PyTorch models."""
207
+
208
+ features: pd.DataFrame
209
+ targets: pd.DataFrame
210
+ approximator: nn.Module
211
+ optimimzer: Adam
212
+ device: torch.device
213
+ losses: list[pd.Series]
214
+ loss_fn: LossFn
215
+
216
+ def __init__(
217
+ self,
218
+ features: pd.DataFrame,
219
+ targets: pd.DataFrame,
220
+ approximator: nn.Module | None = None,
221
+ optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
222
+ device: torch.device = DefaultDevice,
223
+ loss_fn: LossFn = _mean_abs,
224
+ ) -> None:
225
+ """Initialize the trainer with features, targets, and model.
226
+
227
+ Args:
228
+ features: DataFrame containing the input features for training
229
+ targets: DataFrame containing the target values for training
230
+ approximator: Predefined neural network model (None to use default LSTM)
231
+ optimimzer_cls: Optimizer class to use for training (default: Adam)
232
+ device: Device to run the training on (default: DefaultDevice)
233
+ loss_fn: Loss function
234
+
235
+ """
236
+ self.features = features
237
+ self.targets = targets
238
+
239
+ if approximator is None:
240
+ approximator = LSTM(
241
+ n_inputs=len(features.columns),
242
+ n_outputs=len(targets.columns),
243
+ n_hidden=1,
244
+ ).to(device)
245
+ self.approximator = approximator.to(device)
246
+ self.optimizer = optimimzer_cls(approximator.parameters())
247
+ self.device = device
248
+ self.loss_fn = loss_fn
249
+ self.losses = []
250
+
251
+ def train(
252
+ self,
253
+ epochs: int,
254
+ batch_size: int | None = None,
255
+ ) -> Self:
256
+ """Train the model using the provided features and targets.
257
+
258
+ Args:
259
+ epochs: Number of training epochs
260
+ batch_size: Size of mini-batches for training (None for full-batch)
261
+
262
+ """
263
+ features = torch.Tensor(
264
+ np.swapaxes(
265
+ self.features.to_numpy().reshape(
266
+ (len(self.targets), -1, len(self.features.columns))
267
+ ),
268
+ axis1=0,
269
+ axis2=1,
270
+ ),
271
+ device=self.device,
272
+ )
273
+ targets = torch.Tensor(self.targets.to_numpy(), device=self.device)
274
+
275
+ if batch_size is None:
276
+ losses = _train_full(
277
+ approximator=self.approximator,
278
+ features=features,
279
+ targets=targets,
280
+ epochs=epochs,
281
+ optimizer=self.optimizer,
282
+ loss_fn=self.loss_fn,
283
+ )
284
+ else:
285
+ losses = _train_batched(
286
+ approximator=self.approximator,
287
+ features=features,
288
+ targets=targets,
289
+ epochs=epochs,
290
+ optimizer=self.optimizer,
291
+ batch_size=batch_size,
292
+ loss_fn=self.loss_fn,
293
+ )
294
+
295
+ if len(self.losses) > 0:
296
+ losses.index += self.losses[-1].index[-1]
297
+ self.losses.append(losses)
298
+ return self
299
+
300
+ def get_loss(self) -> pd.Series:
301
+ """Get the loss history of the training process."""
302
+ return pd.concat(self.losses)
303
+
304
+ def get_estimator(self) -> TorchTimeCourse:
305
+ """Get the trained estimator."""
306
+ return TorchTimeCourse(
307
+ model=self.approximator,
308
+ parameter_names=list(self.targets.columns),
309
+ )
310
+
311
+
312
+ def _train_batched(
313
+ approximator: nn.Module,
314
+ features: torch.Tensor,
315
+ targets: torch.Tensor,
316
+ epochs: int,
317
+ optimizer: Adam,
318
+ batch_size: int,
319
+ loss_fn: LossFn,
320
+ ) -> pd.Series:
321
+ losses = {}
322
+ for epoch in tqdm.trange(epochs):
323
+ permutation = torch.randperm(features.size()[0])
324
+ epoch_loss = 0
325
+ for i in range(0, features.size()[0], batch_size):
326
+ optimizer.zero_grad()
327
+ indices = permutation[i : i + batch_size]
328
+ loss = loss_fn(approximator(features[indices]), targets[indices])
329
+ loss.backward()
330
+ optimizer.step()
331
+ epoch_loss += loss.detach().numpy()
332
+
333
+ losses[epoch] = epoch_loss / (features.size()[0] / batch_size)
334
+ return pd.Series(losses, dtype=float)
335
+
336
+
337
+ def _train_full(
338
+ approximator: nn.Module,
339
+ features: torch.Tensor,
340
+ targets: torch.Tensor,
341
+ epochs: int,
342
+ optimizer: Adam,
343
+ loss_fn: LossFn,
344
+ ) -> pd.Series:
345
+ losses = {}
346
+ for i in tqdm.trange(epochs):
347
+ optimizer.zero_grad()
348
+ loss = loss_fn(approximator(features), targets)
349
+ loss.backward()
350
+ optimizer.step()
351
+ losses[i] = loss.detach().numpy()
352
+ return pd.Series(losses, dtype=float)
353
+
354
+
355
+ def train_torch_steady_state(
356
+ features: pd.DataFrame,
357
+ targets: pd.DataFrame,
358
+ epochs: int,
359
+ batch_size: int | None = None,
360
+ approximator: nn.Module | None = None,
361
+ optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
362
+ device: torch.device = DefaultDevice,
363
+ ) -> tuple[TorchSteadyState, pd.Series]:
364
+ """Train a PyTorch steady state estimator.
365
+
366
+ This function trains a neural network model to estimate steady state data
367
+ using the provided features and targets. It supports both full-batch and
368
+ mini-batch training.
369
+
370
+ Examples:
371
+ >>> train_torch_ss_estimator(features, targets, epochs=100)
372
+
373
+ Args:
374
+ features: DataFrame containing the input features for training
375
+ targets: DataFrame containing the target values for training
376
+ epochs: Number of training epochs
377
+ batch_size: Size of mini-batches for training (None for full-batch)
378
+ approximator: Predefined neural network model (None to use default MLP)
379
+ optimimzer_cls: Optimizer class to use for training (default: Adam)
380
+ device: Device to run the training on (default: DefaultDevice)
381
+
382
+ Returns:
383
+ tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
384
+
385
+ """
386
+ trainer = TorchSteadyStateTrainer(
387
+ features=features,
388
+ targets=targets,
389
+ approximator=approximator,
390
+ optimimzer_cls=optimimzer_cls,
391
+ device=device,
392
+ ).train(epochs=epochs, batch_size=batch_size)
393
+
394
+ return trainer.get_estimator(), trainer.get_loss()
395
+
396
+
397
+ def train_torch_time_course(
398
+ features: pd.DataFrame,
399
+ targets: pd.DataFrame,
400
+ epochs: int,
401
+ batch_size: int | None = None,
402
+ approximator: nn.Module | None = None,
403
+ optimimzer_cls: Callable[[ParamsT], Adam] = Adam,
404
+ device: torch.device = DefaultDevice,
405
+ ) -> tuple[TorchTimeCourse, pd.Series]:
406
+ """Train a PyTorch time course estimator.
407
+
408
+ This function trains a neural network model to estimate time course data
409
+ using the provided features and targets. It supports both full-batch and
410
+ mini-batch training.
411
+
412
+ Examples:
413
+ >>> train_torch_time_course_estimator(features, targets, epochs=100)
414
+
415
+ Args:
416
+ features: DataFrame containing the input features for training
417
+ targets: DataFrame containing the target values for training
418
+ epochs: Number of training epochs
419
+ batch_size: Size of mini-batches for training (None for full-batch)
420
+ approximator: Predefined neural network model (None to use default LSTM)
421
+ optimimzer_cls: Optimizer class to use for training (default: Adam)
422
+ device: Device to run the training on (default: DefaultDevice)
423
+
424
+ Returns:
425
+ tuple[TorchTimeSeriesEstimator, pd.Series]: Trained estimator and loss history
426
+
427
+ """
428
+ trainer = TorchTimeCourseTrainer(
429
+ features=features,
430
+ targets=targets,
431
+ approximator=approximator,
432
+ optimimzer_cls=optimimzer_cls,
433
+ device=device,
434
+ ).train(epochs=epochs, batch_size=batch_size)
435
+
436
+ return trainer.get_estimator(), trainer.get_loss()
mxlpy/report.py CHANGED
@@ -48,12 +48,39 @@ def markdown(
48
48
  ) -> str:
49
49
  """Generate a markdown report comparing two models.
50
50
 
51
- Args:
52
- m1: The first model to compare.
53
- m2: The second model to compare.
54
- analyses: A list of functions that take a Path and return a tuple of a string and a Path. Defaults to None.
55
- rel_change: The relative change threshold for numerical differences. Defaults to 1e-2.
56
- img_path: The path to save images. Defaults to Path().
51
+ Parameters
52
+ ----------
53
+ m1
54
+ The first model to compare
55
+ m2
56
+ The second model to compare
57
+ analyses
58
+ A list of functions that analyze both models and return a report section with image
59
+ rel_change
60
+ The relative change threshold for numerical differences
61
+ img_path
62
+ The path to save images
63
+
64
+ Returns
65
+ -------
66
+ str
67
+ Markdown formatted report comparing the two models
68
+
69
+ Examples
70
+ --------
71
+ >>> from mxlpy import Model
72
+ >>> m1 = Model().add_parameter("k1", 0.1).add_variable("S", 1.0)
73
+ >>> m2 = Model().add_parameter("k1", 0.2).add_variable("S", 1.0)
74
+ >>> report = markdown(m1, m2)
75
+ >>> "Parameters" in report and "k1" in report
76
+ True
77
+
78
+ >>> # With custom analysis function
79
+ >>> def custom_analysis(m1, m2, path):
80
+ ... return "## Custom analysis", path / "image.png"
81
+ >>> report = markdown(m1, m2, analyses=[custom_analysis])
82
+ >>> "Custom analysis" in report
83
+ True
57
84
 
58
85
  """
59
86
  content: list[str] = [