sqil-core 0.0.2__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. sqil_core/__init__.py +6 -2
  2. sqil_core/config.py +13 -0
  3. sqil_core/config_log.py +42 -0
  4. sqil_core/experiment/__init__.py +11 -0
  5. sqil_core/experiment/_analysis.py +95 -0
  6. sqil_core/experiment/_events.py +25 -0
  7. sqil_core/experiment/_experiment.py +553 -0
  8. sqil_core/experiment/data/plottr.py +778 -0
  9. sqil_core/experiment/helpers/_function_override_handler.py +111 -0
  10. sqil_core/experiment/helpers/_labone_wrappers.py +12 -0
  11. sqil_core/experiment/instruments/__init__.py +2 -0
  12. sqil_core/experiment/instruments/_instrument.py +190 -0
  13. sqil_core/experiment/instruments/drivers/SignalCore_SC5511A.py +515 -0
  14. sqil_core/experiment/instruments/local_oscillator.py +205 -0
  15. sqil_core/experiment/instruments/server.py +175 -0
  16. sqil_core/experiment/instruments/setup.yaml +21 -0
  17. sqil_core/experiment/instruments/zurich_instruments.py +55 -0
  18. sqil_core/fit/__init__.py +38 -0
  19. sqil_core/fit/_core.py +1084 -0
  20. sqil_core/fit/_fit.py +1191 -0
  21. sqil_core/fit/_guess.py +232 -0
  22. sqil_core/fit/_models.py +127 -0
  23. sqil_core/fit/_quality.py +266 -0
  24. sqil_core/resonator/__init__.py +13 -0
  25. sqil_core/resonator/_resonator.py +989 -0
  26. sqil_core/utils/__init__.py +85 -5
  27. sqil_core/utils/_analysis.py +415 -0
  28. sqil_core/utils/_const.py +105 -0
  29. sqil_core/utils/_formatter.py +259 -0
  30. sqil_core/utils/_plot.py +373 -0
  31. sqil_core/utils/_read.py +262 -0
  32. sqil_core/utils/_utils.py +164 -0
  33. {sqil_core-0.0.2.dist-info → sqil_core-1.0.0.dist-info}/METADATA +40 -7
  34. sqil_core-1.0.0.dist-info/RECORD +36 -0
  35. {sqil_core-0.0.2.dist-info → sqil_core-1.0.0.dist-info}/WHEEL +1 -1
  36. {sqil_core-0.0.2.dist-info → sqil_core-1.0.0.dist-info}/entry_points.txt +1 -1
  37. sqil_core/utils/analysis.py +0 -68
  38. sqil_core/utils/const.py +0 -38
  39. sqil_core/utils/formatter.py +0 -134
  40. sqil_core/utils/read.py +0 -156
  41. sqil_core-0.0.2.dist-info/RECORD +0 -10
sqil_core/fit/_core.py ADDED
@@ -0,0 +1,1084 @@
1
+ import inspect
2
+ import warnings
3
+ from functools import wraps
4
+
5
+ import numpy as np
6
+ import scipy.optimize as spopt
7
+ from lmfit.model import ModelResult
8
+
9
+ from sqil_core.fit._quality import FitQuality, evaluate_fit_quality, format_fit_metrics
10
+ from sqil_core.utils._formatter import format_fit_params
11
+ from sqil_core.utils._utils import _count_function_parameters
12
+
13
+
14
+ class FitResult:
15
+ """
16
+ Stores the result of a fitting procedure.
17
+
18
+ This class encapsulates the fitted parameters, their standard errors, optimizer output,
19
+ and fit quality metrics. It also provides functionality for summarizing the results and
20
+ making predictions using the fitted model.
21
+
22
+ Parameters
23
+ ----------
24
+ params : dict
25
+ Array of fitted parameters.
26
+ std_err : dict
27
+ Array of standard errors of the fitted parameters.
28
+ fit_output : any
29
+ Raw output from the optimization routine.
30
+ metrics : dict, optional
31
+ Dictionary of fit quality metrics (e.g., R-squared, reduced chi-squared).
32
+ predict : callable, optional
33
+ Function of x that returns predictions based on the fitted parameters.
34
+ If not provided, an exception will be raised when calling it.
35
+ param_names : list, optional
36
+ List of parameter names, defaulting to a range based on the number of parameters.
37
+ model_name : str, optional
38
+ Name of the model used to fit the data.
39
+ metadata : dict, optional
40
+ Additional information that can be passed in the fit result.
41
+
42
+ Methods
43
+ -------
44
+ summary()
45
+ Prints a detailed summary of the fit results, including parameter values,
46
+ standard errors, and fit quality metrics.
47
+ _no_prediction()
48
+ Raises an exception when no prediction function is available.
49
+ """
50
+
51
+ def __init__(
52
+ self,
53
+ params,
54
+ std_err,
55
+ fit_output,
56
+ metrics={},
57
+ predict=None,
58
+ param_names=None,
59
+ model_name=None,
60
+ metadata={},
61
+ ):
62
+ self.params = params
63
+ self.std_err = std_err
64
+ self.output = fit_output
65
+ self.metrics = metrics
66
+ self.predict = predict or self._no_prediction
67
+ self.param_names = param_names or list(range(len(params)))
68
+ self.model_name = model_name
69
+ self.metadata = metadata
70
+
71
+ self.params_by_name = dict(zip(self.param_names, self.params))
72
+
73
+ def __repr__(self):
74
+ return (
75
+ f"FitResult(\n"
76
+ f" params={self.params},\n"
77
+ f" std_err={self.std_err},\n"
78
+ f" metrics={self.metrics}\n)"
79
+ )
80
+
81
+ def summary(self, no_print=False):
82
+ """Prints a detailed summary of the fit results."""
83
+ s = format_fit_metrics(self.metrics) + "\n"
84
+ s += format_fit_params(
85
+ self.param_names,
86
+ self.params,
87
+ self.std_err,
88
+ np.array(self.std_err) / self.params * 100,
89
+ )
90
+ if not no_print:
91
+ print(s)
92
+ return s
93
+
94
+ def quality(self, recipe="nrmse"):
95
+ return evaluate_fit_quality(self.metrics, recipe)
96
+
97
+ def is_acceptable(self, recipe="nrmse", threshold=FitQuality.ACCEPTABLE):
98
+ return self.quality(recipe) >= threshold
99
+
100
+ def _no_prediction(self):
101
+ raise Exception("No predition function available")
102
+
103
+
104
+ def fit_output(fit_func):
105
+ """
106
+ Decorator to standardize the output of fitting functions.
107
+
108
+ This decorator processes the raw output of various fitting libraries
109
+ (such as SciPy's curve_fit, least_squares leastsq, and minimize, as well as lmfit)
110
+ and converts it into a unified `FitResult` object. It extracts
111
+ optimized parameters, their standard errors, fit quality metrics,
112
+ and a prediction function.
113
+
114
+ Parameters
115
+ ----------
116
+ fit_func : Callable
117
+ A function that performs fitting and returns raw fit output,
118
+ possibly along with metadata.
119
+
120
+ Returns
121
+ -------
122
+ Callable
123
+ A wrapped function that returns a `FitResult` object containing:
124
+ - `params` : list
125
+ Optimized parameter values.
126
+ - `std_err` : list or None
127
+ Standard errors of the fitted parameters.
128
+ - `metrics` : dict or None
129
+ Dictionary of fit quality metrics (e.g., reduced chi-squared).
130
+ - `predict` : Callable or None
131
+ A function that predicts values using the optimized parameters.
132
+ - `output` : object
133
+ The raw optimizer output from the fitting process.
134
+ - `param_names` : list or None
135
+ Names of the fitted parameters.
136
+ - `metadata` : dict
137
+ A dictionary containing extra information. Advanced uses include passing
138
+ functions that get evaluated after fit result has been processed.
139
+ See the documentation, Notebooks/The fit_output decorator
140
+
141
+ Raises
142
+ ------
143
+ TypeError
144
+ If the fitting function's output format is not recognized.
145
+
146
+ Notes
147
+ -----
148
+ - If the fit function returns a tuple `(raw_output, metadata)`,
149
+ the metadata is extracted and applied to enhance the fit results.
150
+ In case of any conflicts, the metadata overrides the computed values.
151
+
152
+ Examples
153
+ --------
154
+ >>> @fit_output
155
+ ... def my_fitting_function(x, y):
156
+ ... return some_raw_fit_output
157
+ ...
158
+ >>> fit_result = my_fitting_function(x_data, y_data)
159
+ >>> print(fit_result.params)
160
+ """
161
+
162
+ @wraps(fit_func)
163
+ def wrapper(*args, **kwargs):
164
+ # Perform the fit
165
+ fit_result = fit_func(*args, **kwargs)
166
+
167
+ # Extract information from function arguments
168
+ x_data, y_data = _get_xy_data_from_fit_args(*args, **kwargs)
169
+ sigma = kwargs.get("sigma", None)
170
+ has_sigma = isinstance(sigma, (list, np.ndarray))
171
+
172
+ # Initilize variables
173
+ sqil_keys = ["params", "std_err", "metrics", "predict", "output", "param_names"]
174
+ sqil_dict = {key: None for key in sqil_keys}
175
+ metadata = {}
176
+ formatted = None
177
+ # Set the default parameters to an empty array instead of None
178
+ sqil_dict["params"] = []
179
+
180
+ # Check if the fit output is a tuple and separate it into raw_fit_ouput and metadata
181
+ if (
182
+ isinstance(fit_result, tuple)
183
+ and (len(fit_result) == 2)
184
+ and isinstance(fit_result[1], dict)
185
+ ):
186
+ raw_fit_output, metadata = fit_result
187
+ else:
188
+ raw_fit_output = fit_result
189
+ sqil_dict["output"] = raw_fit_output
190
+
191
+ # Check if there are variables to override in metadata before continuing
192
+ if "fit_output_vars" in metadata:
193
+ overrides = metadata["fit_output_vars"]
194
+ x_data = overrides.get("x_data", x_data)
195
+ y_data = overrides.get("y_data", y_data)
196
+ del metadata["fit_output_vars"]
197
+
198
+ # Format the raw_fit_output into a standardized dict
199
+ if raw_fit_output is None:
200
+ raise TypeError("Fit didn't coverge, result is None")
201
+ # Scipy tuple (curve_fit, leastsq)
202
+ elif _is_scipy_tuple(raw_fit_output):
203
+ formatted = _format_scipy_tuple(raw_fit_output, y_data, has_sigma=has_sigma)
204
+
205
+ # Scipy least squares
206
+ elif _is_scipy_least_squares(raw_fit_output):
207
+ formatted = _format_scipy_least_squares(
208
+ raw_fit_output, y_data, has_sigma=has_sigma
209
+ )
210
+
211
+ # Scipy minimize
212
+ elif _is_scipy_minimize(raw_fit_output):
213
+ residuals = None
214
+ predict = metadata.get("predict", None)
215
+ if (x_data is not None) and (predict is not None) and callable(predict):
216
+ residuals = y_data - metadata["predict"](x_data, *raw_fit_output.x)
217
+ formatted = _format_scipy_minimize(
218
+ raw_fit_output, residuals=residuals, y_data=y_data, has_sigma=has_sigma
219
+ )
220
+
221
+ # lmfit
222
+ elif _is_lmfit(raw_fit_output):
223
+ formatted = _format_lmfit(raw_fit_output)
224
+
225
+ # Custom fit output
226
+ elif isinstance(raw_fit_output, dict):
227
+ formatted = raw_fit_output
228
+
229
+ else:
230
+ raise TypeError(
231
+ "Couldn't recognize the output.\n"
232
+ + "Are you using scipy? Did you forget to set `full_output=True` in your fit method?"
233
+ )
234
+
235
+ # Update sqil_dict with the formatted fit_output
236
+ if formatted is not None:
237
+ sqil_dict.update(formatted)
238
+
239
+ # Add/override fileds using metadata
240
+ sqil_dict.update(metadata)
241
+
242
+ # Process metadata
243
+ metadata = _process_metadata(metadata, sqil_dict)
244
+ # Remove fields already present in sqil_dict from metadata
245
+ filtered_metadata = {k: v for k, v in metadata.items() if k not in sqil_keys}
246
+
247
+ # Assign the optimized parameters to the prediction function
248
+ model_name = metadata.get("model_name", None)
249
+ if sqil_dict["predict"] is not None:
250
+ if model_name is None:
251
+ model_name = sqil_dict["predict"].__name__
252
+ params = sqil_dict["params"]
253
+ predict = sqil_dict["predict"]
254
+ n_inputs = _count_function_parameters(predict)
255
+ if n_inputs == 1 + len(params):
256
+ sqil_dict["predict"] = lambda x: predict(x, *params)
257
+
258
+ return FitResult(
259
+ params=sqil_dict.get("params", []),
260
+ std_err=sqil_dict.get("std_err", None),
261
+ fit_output=raw_fit_output,
262
+ metrics=sqil_dict.get("metrics", {}),
263
+ predict=sqil_dict.get("predict", None),
264
+ param_names=sqil_dict.get("param_names", None),
265
+ model_name=model_name,
266
+ metadata=filtered_metadata,
267
+ )
268
+
269
+ return wrapper
270
+
271
+
272
+ # TODO: make a function that handles the bounds for lmfit.
273
+ # Such a function will take the bounds as they are returned by @fit_input, i.e lower and upper bounds
274
+ # and it will iterated through the lmfit parameters to apply the bounds.
275
+ def fit_input(fit_func):
276
+ """
277
+ Decorator to handle optional fitting inputs like initial guesses, bounds, and fixed parameters
278
+ for a fitting function.
279
+
280
+ - `guess` : list or np.ndarray, optional, default=None
281
+ The initial guess for the fit. If None it's not passed to the fit function.
282
+ - `bounds` : list or np.ndarray, optional, default=(-np.inf, np.inf)
283
+ The bounds on the fit parameters in the form [(min, max), (min, max), ...].
284
+ - `fixed_params` : list or np.ndarray, optional, default=None
285
+ Indices of the parameters that must remain fixed during the optimization.
286
+ For example fitting `f(x, a, b)`, if we want to fix the value of `a` we would pass
287
+ `fit_f(guess=[a_guess, b_guess], fixed_params=[0])`
288
+ - `fixed_bound_factor` : float, optional, default=1e-6
289
+ The relative tolerance allowed for parameters that must remain fixed (`fixed_params`).
290
+
291
+ IMPORTANT: This decorator requires the x and y input vectors to be named `x_data` and `y_data`.
292
+ The initial guess must be called `guess` and the bounds `bounds`.
293
+
294
+ Parameters
295
+ ----------
296
+ fit_func : callable
297
+ The fitting function to be decorated. This function should accept `x_data` and `y_data` as
298
+ mandatory parameters and may optionally accept `guess` and `bounds` (plus any other additional
299
+ parameter).
300
+
301
+ Returns
302
+ -------
303
+ callable
304
+ A wrapper function that processes the input arguments and then calls the original fitting
305
+ function with the preprocessed inputs. This function also handles warnings if unsupported
306
+ parameters are passed to the fit function.
307
+
308
+ Notes
309
+ -----
310
+ - The parameters in `guess`, `bounds` and `fixed_params` must be in the same order as in the
311
+ modeled function definition.
312
+ - The decorator can fix certain parameters by narrowing their bounds based on an initial guess
313
+ and a specified `fixed_bound_factor`.
314
+ - The decorator processes bounds by setting them as `(-np.inf, np.inf)` if they are not specified (`None`).
315
+
316
+ Examples
317
+ -------
318
+ >>> @fit_input
319
+ ... def my_fit_func(x_data, y_data, guess=None, bounds=None, fixed_params=None):
320
+ ... # Perform fitting...
321
+ ... return fit_result
322
+ >>> x_data = np.linspace(0, 10, 100)
323
+ >>> y_data = np.sin(x_data) + np.random.normal(0, 0.1, 100)
324
+ >>> result = my_fit_func(x_data, y_data, guess=[1, 1], bounds=[(0, 5), (-np.inf, np.inf)])
325
+ """
326
+
327
+ @wraps(fit_func)
328
+ def wrapper(
329
+ *params,
330
+ guess=None,
331
+ bounds=None,
332
+ fixed_params=None,
333
+ fixed_bound_factor=1e-6,
334
+ sigma=None,
335
+ **kwargs,
336
+ ):
337
+ # Inspect function to check if it requires guess and bounds
338
+ func_params = inspect.signature(fit_func).parameters
339
+
340
+ # Check if the user passed parameters that are not supported by the fit fun
341
+ if (guess is not None) and ("guess" not in func_params):
342
+ warnings.warn("The fit function doesn't allow any initial guess.")
343
+ if (bounds is not None) and ("bounds" not in func_params):
344
+ warnings.warn("The fit function doesn't allow any bounds.")
345
+ if (fixed_params is not None) and (guess is None):
346
+ raise ValueError("Using fixed_params requires an initial guess.")
347
+
348
+ # Process bounds if the function accepts it
349
+ if (bounds is not None) and ("bounds" in func_params):
350
+ processed_bounds = np.array(
351
+ [(-np.inf, np.inf) if b is None else b for b in bounds],
352
+ dtype=np.float64,
353
+ )
354
+ lower_bounds, upper_bounds = (
355
+ processed_bounds[:, 0],
356
+ processed_bounds[:, 1],
357
+ )
358
+ else:
359
+ lower_bounds, upper_bounds = None, None
360
+
361
+ # Fix parameters by setting a very tight bound
362
+ if (fixed_params is not None) and (guess is not None):
363
+ if bounds is None:
364
+ lower_bounds = -np.inf * np.ones(len(guess))
365
+ upper_bounds = np.inf * np.ones(len(guess))
366
+ for idx in fixed_params:
367
+ tolerance = (
368
+ abs(guess[idx]) * fixed_bound_factor
369
+ if guess[idx] != 0
370
+ else fixed_bound_factor
371
+ )
372
+ lower_bounds[idx] = guess[idx] - tolerance
373
+ upper_bounds[idx] = guess[idx] + tolerance
374
+
375
+ # Prepare arguments dynamically
376
+ fit_args = {**kwargs}
377
+
378
+ if guess is not None and "guess" in func_params:
379
+ fit_args["guess"] = guess
380
+ if (
381
+ (bounds is not None) or (fixed_params is not None)
382
+ ) and "bounds" in func_params:
383
+ fit_args["bounds"] = (lower_bounds, upper_bounds)
384
+
385
+ # Call the wrapped function with preprocessed inputs
386
+ fit_args = {**kwargs, **fit_args}
387
+ return fit_func(*params, **fit_args)
388
+
389
+ return wrapper
390
+
391
+
392
+ def _process_metadata(metadata: dict, sqil_dict: dict):
393
+ """Process metadata by computing values that cannot be calculated before having
394
+ the sqil_dict. For example use the standard errors to compute a different metric.
395
+
396
+ Treats items whose key starts with @ as functions that take sqil_dict as input.
397
+ So it evaluates them and renames the key removing the @.
398
+ """
399
+ res = metadata.copy()
400
+ for key, value in metadata.items():
401
+ if key.startswith("@"):
402
+ res[key[1:]] = value(sqil_dict)
403
+ del res[key]
404
+ return res
405
+
406
+
407
+ def compute_adjusted_standard_errors(
408
+ pcov: np.ndarray,
409
+ residuals: np.ndarray,
410
+ red_chi2=None,
411
+ cov_rescaled=True,
412
+ sigma=None,
413
+ ) -> np.ndarray:
414
+ """
415
+ Compute adjusted standard errors for fitted parameters.
416
+
417
+ This function adjusts the covariance matrix based on the reduced chi-squared
418
+ value and calculates the standard errors for each parameter. It accounts for
419
+ cases where the covariance matrix is not available or the fit is nearly perfect.
420
+
421
+ Parameters
422
+ ----------
423
+ pcov : np.ndarray
424
+ Covariance matrix of the fitted parameters, typically obtained from an
425
+ optimization routine.
426
+ residuals : np.ndarray
427
+ Residuals of the fit, defined as the difference between observed and
428
+ model-predicted values.
429
+ red_chi2 : float, optional
430
+ Precomputed reduced chi-squared value. If `None`, it is computed from
431
+ `residuals` and `sigma`.
432
+ cov_rescaled : bool, default=True
433
+ Whether the fitting process already rescales the covariance matrix with
434
+ the reduced chi-squared.
435
+ sigma : np.ndarray, optional
436
+ Experimental uncertainties. Only used if `cov_rescaled=False` AND
437
+ known experimental errors are available.
438
+
439
+ Returns
440
+ -------
441
+ np.ndarray
442
+ Standard errors for each fitted parameter. If the covariance matrix is
443
+ undefined, returns `None`.
444
+
445
+ Warnings
446
+ --------
447
+ - If the covariance matrix is not available (`pcov is None`), the function
448
+ issues a warning about possible numerical instability or a near-perfect fit.
449
+ - If the reduced chi-squared value is `NaN`, the function returns `NaN` for
450
+ all standard errors.
451
+
452
+ Notes
453
+ -----
454
+ - The covariance matrix is scaled by the reduced chi-squared value to adjust
455
+ for under- or overestimation of uncertainties.
456
+ - If `red_chi2` is not provided, it is computed internally using the residuals.
457
+ - If a near-perfect fit is detected (all residuals close to zero), the function
458
+ warns that standard errors may not be necessary.
459
+
460
+ Examples
461
+ --------
462
+ >>> pcov = np.array([[0.04, 0.01], [0.01, 0.09]])
463
+ >>> residuals = np.array([0.1, -0.2, 0.15])
464
+ >>> compute_adjusted_standard_errors(pcov, residuals)
465
+ array([0.2, 0.3])
466
+ """
467
+ # Check for invalid covariance
468
+ if pcov is None:
469
+ if np.allclose(residuals, 0, atol=1e-10):
470
+ warnings.warn(
471
+ "Covariance matrix could not be estimated due to an almost perfect fit. "
472
+ "Standard errors are undefined but may not be necessary in this case."
473
+ )
474
+ else:
475
+ warnings.warn(
476
+ "Covariance matrix could not be estimated. This could be due to poor model fit "
477
+ "or numerical instability. Review the data or model configuration."
478
+ )
479
+ return None
480
+
481
+ # Calculate reduced chi-squared
482
+ n_params = len(np.diag(pcov))
483
+ if red_chi2 is None:
484
+ _, red_chi2 = compute_chi2(
485
+ residuals, n_params, cov_rescaled=cov_rescaled, sigma=sigma
486
+ )
487
+
488
+ # Rescale the covariance matrix
489
+ if np.isnan(red_chi2):
490
+ pcov_rescaled = np.nan
491
+ else:
492
+ pcov_rescaled = pcov * red_chi2
493
+
494
+ # Calculate standard errors for each parameter
495
+ if np.any(np.isnan(pcov_rescaled)):
496
+ standard_errors = np.full(n_params, np.nan, dtype=float)
497
+ else:
498
+ standard_errors = np.sqrt(np.diag(pcov_rescaled))
499
+
500
+ return standard_errors
501
+
502
+
503
+ def compute_chi2(residuals, n_params=None, cov_rescaled=True, sigma: np.ndarray = None):
504
+ """
505
+ Compute the chi-squared (χ²) and reduced chi-squared (χ²_red) statistics.
506
+
507
+ This function calculates the chi-squared value based on residuals and an
508
+ estimated or provided uncertainty (`sigma`). If the number of model parameters
509
+ (`n_params`) is specified, it also computes the reduced chi-squared.
510
+
511
+ Parameters
512
+ ----------
513
+ residuals : np.ndarray
514
+ The difference between observed and model-predicted values.
515
+ n_params : int, optional
516
+ Number of fitted parameters. If provided, the function also computes
517
+ the reduced chi-squared (χ²_red).
518
+ cov_rescaled : bool, default=True
519
+ Whether the covariance matrix has been already rescaled by the fit method.
520
+ If `True`, the function assumes proper uncertainty scaling. Otherwise,
521
+ it estimates uncertainty from the standard deviation of the residuals.
522
+ sigma : np.ndarray, optional
523
+ Experimental uncertainties. Should only be used when the fitting process
524
+ does not account for experimental errors AND known uncertainties are available.
525
+
526
+ Returns
527
+ -------
528
+ chi2 : float
529
+ The chi-squared statistic (χ²), which measures the goodness of fit.
530
+ red_chi2 : float (if `n_params` is provided)
531
+ The reduced chi-squared statistic (χ²_red), computed as χ² divided by
532
+ the degrees of freedom (N - p). If `n_params` is `None`, only χ² is returned.
533
+
534
+ Warnings
535
+ --------
536
+ - If the degrees of freedom (N - p) is non-positive, a warning is issued,
537
+ and χ²_red is set to NaN. This may indicate overfitting or an insufficient
538
+ number of data points.
539
+ - If any uncertainty value in `sigma` is zero, it is replaced with machine epsilon
540
+ to prevent division by zero.
541
+
542
+ Notes
543
+ -----
544
+ - If `sigma` is not provided and `cov_rescaled=False`, the function estimates
545
+ the uncertainty using the standard deviation of residuals.
546
+ - The reduced chi-squared value (χ²_red) should ideally be close to 1 for a good fit.
547
+ Values significantly greater than 1 indicate underfitting, while values much less
548
+ than 1 suggest overfitting.
549
+
550
+ Examples
551
+ --------
552
+ >>> residuals = np.array([0.1, -0.2, 0.15, -0.05])
553
+ >>> compute_chi2(residuals, n_params=2)
554
+ (0.085, 0.0425) # Example output
555
+ """
556
+ # If the optimization does not account for th experimental sigma,
557
+ # approximate it with the std of the residuals
558
+ S = 1 if cov_rescaled else np.std(residuals)
559
+ # If the experimental error is provided, use that instead
560
+ if sigma is not None:
561
+ S = sigma
562
+
563
+ # Replace 0 elements of S with the machine epsilon to avoid divisions by 0
564
+ if not np.isscalar(S):
565
+ S_safe = np.where(S == 0, np.finfo(float).eps, S)
566
+ else:
567
+ S_safe = np.finfo(float).eps if S == 0 else S
568
+
569
+ # Compute chi squared
570
+ chi2 = np.sum((residuals / S_safe) ** 2)
571
+ # If number of parameters is not provided return just chi2
572
+ if n_params is None:
573
+ return chi2
574
+
575
+ # Reduced chi squared
576
+ dof = len(residuals) - n_params # degrees of freedom (N - p)
577
+ if dof <= 0:
578
+ warnings.warn(
579
+ "Degrees of freedom (dof) is non-positive. This may indicate overfitting or insufficient data."
580
+ )
581
+ red_chi2 = np.nan
582
+ else:
583
+ red_chi2 = chi2 / dof
584
+
585
+ return chi2, red_chi2
586
+
587
+
588
+ def compute_aic(residuals: np.ndarray, n_params: int) -> float:
589
+ """
590
+ Computes the Akaike Information Criterion (AIC) for a given model fit.
591
+
592
+ The AIC is a metric used to compare the relative quality of statistical models
593
+ for a given dataset. It balances model fit with complexity, penalizing models
594
+ with more parameters to prevent overfitting.
595
+
596
+ Interpretation: The AIC has no maeaning on its own, only the difference between
597
+ the AIC of model1 and the one of model2.
598
+ ΔAIC = AIC_1 - AIC_2
599
+ If ΔAIC > 10 -> model 2 fits much better.
600
+
601
+ Parameters
602
+ ----------
603
+ residuals : np.ndarray
604
+ Array of residuals between the observed data and model predictions.
605
+ n_params : int
606
+ Number of free parameters in the fitted model.
607
+
608
+ Returns
609
+ -------
610
+ float
611
+ The Akaike Information Criterion value.
612
+ """
613
+
614
+ n = len(residuals)
615
+ rss = np.sum(residuals**2)
616
+ return 2 * n_params + n * np.log(rss / n)
617
+
618
+
619
+ def compute_nrmse(residuals: np.ndarray, y_data: np.ndarray) -> float:
620
+ """
621
+ Computes the Normalized Root Mean Squared Error (NRMSE) of a model fit.
622
+
623
+ Lower is better.
624
+
625
+ The NRMSE is a scale-independent metric that quantifies the average magnitude
626
+ of residual errors normalized by the range of the observed data. It is useful
627
+ for comparing the fit quality across different datasets or models.
628
+
629
+ For complex data it's computed using the L2 norm and the span of the magnitude.
630
+
631
+ Parameters
632
+ ----------
633
+ residuals : np.ndarray
634
+ Array of residuals between the observed data and model predictions.
635
+ y_data : np.ndarray
636
+ The original observed data used in the model fitting.
637
+
638
+ Returns
639
+ -------
640
+ float
641
+ The normalized root mean squared error (NRMSE).
642
+ """
643
+ n = len(residuals)
644
+ if np.iscomplexobj(y_data):
645
+ y_abs_span = np.max(np.abs(y_data)) - np.min(np.abs(y_data))
646
+ if y_abs_span == 0:
647
+ warnings.warn(
648
+ "y_data has zero span in magnitude. NRMSE is undefined.", RuntimeWarning
649
+ )
650
+ return np.nan
651
+ rmse = np.linalg.norm(residuals) / np.sqrt(n)
652
+ nrmse = rmse / y_abs_span
653
+ else:
654
+ y_span = np.max(y_data) - np.min(y_data)
655
+ if y_span == 0:
656
+ warnings.warn("y_data has zero span. NRMSE is undefined.", RuntimeWarning)
657
+ return np.nan
658
+ rss = np.sum(residuals**2)
659
+ nrmse = np.sqrt(rss / n) / y_span
660
+
661
+ return nrmse
662
+
663
+
664
+ def _is_scipy_tuple(result):
665
+ """
666
+ Check whether the given result follows the expected structure of a SciPy optimization tuple.
667
+ """
668
+ if isinstance(result, tuple):
669
+ if len(result) < 3:
670
+ raise TypeError(
671
+ "Fit result is a tuple, but couldn't recognize it.\n"
672
+ + "Are you using scipy? Did you forget to set `full_output=True` in your fit method?"
673
+ )
674
+ popt = result[0]
675
+ cov_ish = result[1]
676
+ infodict = result[2]
677
+ keys_to_check = ["fvec"]
678
+
679
+ if cov_ish is not None:
680
+ cov_check = isinstance(cov_ish, np.ndarray) and cov_ish.ndim == 2
681
+ else:
682
+ cov_check = True
683
+ return (
684
+ isinstance(popt, np.ndarray)
685
+ and cov_check
686
+ and (all(key in infodict for key in keys_to_check))
687
+ )
688
+ return False
689
+
690
+
691
+ def _is_scipy_minimize(result):
692
+ """
693
+ Check whether the given result follows the expected structure of a SciPy minimize.
694
+ """
695
+ return (
696
+ isinstance(result, spopt.OptimizeResult)
697
+ and hasattr(result, "fun")
698
+ and np.isscalar(result.fun)
699
+ and hasattr(result, "jac")
700
+ )
701
+
702
+
703
+ def _is_scipy_least_squares(result):
704
+ """
705
+ Check whether the given result follows the expected structure of a SciPy least_squares.
706
+ """
707
+ return (
708
+ isinstance(result, spopt.OptimizeResult)
709
+ and hasattr(result, "cost")
710
+ and hasattr(result, "fun")
711
+ and hasattr(result, "jac")
712
+ )
713
+
714
+
715
+ def _is_lmfit(result):
716
+ """
717
+ Check whether the given result follows the expected structure of a lmfit fit.
718
+ """
719
+ return isinstance(result, ModelResult)
720
+
721
+
722
+ def _format_scipy_tuple(result, y_data=None, has_sigma=False):
723
+ """
724
+ Formats the output of a SciPy fitting function into a standardized dictionary.
725
+
726
+ This function takes the tuple returned by SciPy optimization functions (e.g., `curve_fit`, `leastsq`)
727
+ and extracts relevant fitting parameters, standard errors, and reduced chi-squared values. It ensures
728
+ the result is structured consistently for further processing.
729
+
730
+ Parameters
731
+ ----------
732
+ result : tuple
733
+ A tuple containing the fitting results from a SciPy function. Expected structure:
734
+ - `result[0]`: `popt` (optimized parameters, NumPy array)
735
+ - `result[1]`: `pcov` (covariance matrix, NumPy array or None)
736
+ - `result[2]`: `infodict` (dictionary containing residuals, required for error computation)
737
+
738
+ y_data: bool, optional
739
+ The y data that has been fit. Used to compute some fit metrics.
740
+
741
+ has_sigma : bool, optional
742
+ Indicates whether the fitting procedure considered experimental errors (`sigma`).
743
+ If `True`, the covariance matrix (`pcov`) does not need rescaling.
744
+
745
+ Returns
746
+ -------
747
+ dict
748
+ A dictionary containing:
749
+ - `"params"`: The optimized parameters (`popt`).
750
+ - `"std_err"`: The standard errors computed from the covariance matrix (`pcov`).
751
+ - `"metrics"`: A dictionary containing the reduced chi-squared (`red_chi2`).
752
+ """
753
+ if not isinstance(result, tuple):
754
+ raise TypeError("Fit result must be a tuple")
755
+
756
+ popt, pcov, infodict = None, None, None
757
+ std_err = None
758
+ metrics = {}
759
+
760
+ # Extract output parameters
761
+ length = len(result)
762
+ popt = result[0]
763
+ pcov = result[1] if length > 1 else None
764
+ infodict = result[2] if length > 2 else None
765
+
766
+ if infodict is not None:
767
+ residuals = infodict["fvec"]
768
+ # Reduced chi squared
769
+ _, red_chi2 = compute_chi2(
770
+ residuals, n_params=len(popt), cov_rescaled=has_sigma
771
+ )
772
+ # AIC
773
+ aic = compute_aic(residuals, len(popt))
774
+ # NRMSE
775
+ if y_data is not None:
776
+ nrmse = compute_nrmse(residuals, y_data)
777
+ metrics.update({"nrmse": nrmse})
778
+ metrics.update({"red_chi2": red_chi2, "aic": aic})
779
+ # Standard error
780
+ if pcov is not None:
781
+ std_err = compute_adjusted_standard_errors(
782
+ pcov, residuals, cov_rescaled=has_sigma, red_chi2=red_chi2
783
+ )
784
+ return {
785
+ "params": popt,
786
+ "std_err": std_err,
787
+ "metrics": metrics,
788
+ }
789
+
790
+
791
+ def _format_scipy_least_squares(result, y_data=None, has_sigma=False):
792
+ """
793
+ Formats the output of a SciPy least-squares optimization into a standardized dictionary.
794
+
795
+ This function processes the result of a SciPy least-squares fitting function (e.g., `scipy.optimize.least_squares`)
796
+ and structures the fitting parameters, standard errors, and reduced chi-squared values for consistent downstream use.
797
+
798
+ Parameters
799
+ ----------
800
+ result : `scipy.optimize.OptimizeResult`
801
+ The result of a least-squares optimization (e.g., from `scipy.optimize.least_squares`).
802
+ It must contain the following fields:
803
+ - `result.x`: Optimized parameters (NumPy array)
804
+ - `result.fun`: Residuals (array of differences between the observed and fitted data)
805
+ - `result.jac`: Jacobian matrix (used to estimate covariance)
806
+
807
+ y_data: bool, optional
808
+ The y data that has been fit. Used to compute some fit metrics.
809
+
810
+ has_sigma : bool, optional
811
+ Indicates whether the fitting procedure considered experimental errors (`sigma`).
812
+ If `True`, the covariance matrix does not need rescaling.
813
+
814
+ Returns
815
+ -------
816
+ dict
817
+ A dictionary containing:
818
+ - `"params"`: Optimized parameters (`result.x`).
819
+ - `"std_err"`: Standard errors computed from the covariance matrix and residuals.
820
+ - `"metrics"`: A dictionary containing the reduced chi-squared (`red_chi2`).
821
+ """
822
+ metrics = {}
823
+
824
+ params = result.x
825
+ residuals = result.fun
826
+ cov = np.linalg.inv(result.jac.T @ result.jac)
827
+
828
+ _, red_chi2 = compute_chi2(residuals, n_params=len(params), cov_rescaled=has_sigma)
829
+ aic = compute_aic(residuals, len(params))
830
+ if y_data is not None:
831
+ nrmse = compute_nrmse(residuals, y_data)
832
+ metrics.update({"nrmse": nrmse})
833
+ metrics.update({"red_chi2": red_chi2, "aic": aic})
834
+
835
+ std_err = compute_adjusted_standard_errors(
836
+ cov, residuals, cov_rescaled=has_sigma, red_chi2=red_chi2
837
+ )
838
+
839
+ return {"params": params, "std_err": std_err, "metrics": metrics}
840
+
841
+
842
+ def _format_scipy_minimize(result, residuals=None, y_data=None, has_sigma=False):
843
+ """
844
+ Formats the output of a SciPy minimize optimization into a standardized dictionary.
845
+
846
+ This function processes the result of a SciPy minimization optimization (e.g., `scipy.optimize.minimize`)
847
+ and structures the fitting parameters, standard errors, and reduced chi-squared values for consistent downstream use.
848
+
849
+ Parameters
850
+ ----------
851
+ result : `scipy.optimize.OptimizeResult`
852
+ The result of a minimization optimization (e.g., from `scipy.optimize.minimize`).
853
+ It must contain the following fields:
854
+ - `result.x`: Optimized parameters (NumPy array).
855
+ - `result.hess_inv`: Inverse Hessian matrix used to estimate the covariance.
856
+
857
+ residuals : array-like, optional
858
+ The residuals (differences between observed data and fitted model).
859
+ If not provided, standard errors will be computed based on the inverse Hessian matrix.
860
+
861
+ y_data: bool, optional
862
+ The y data that has been fit. Used to compute some fit metrics.
863
+
864
+ has_sigma : bool, optional
865
+ Indicates whether the fitting procedure considered experimental errors (`sigma`).
866
+ If `True`, the covariance matrix does not need rescaling.
867
+
868
+ Returns
869
+ -------
870
+ dict
871
+ A dictionary containing:
872
+ - `"params"`: Optimized parameters (`result.x`).
873
+ - `"std_err"`: Standard errors computed either from the Hessian matrix or based on the residuals.
874
+ - `"metrics"`: A dictionary containing the reduced chi-squared (`red_chi2`), if residuals are provided.
875
+ """
876
+ params = result.x
877
+ cov = _get_covariance_from_scipy_optimize_result(result)
878
+ metrics = None
879
+
880
+ if residuals is None:
881
+ std_err = np.sqrt(np.abs(result.hess_inv.diagonal()))
882
+ else:
883
+ std_err = compute_adjusted_standard_errors(
884
+ cov, residuals, cov_rescaled=has_sigma
885
+ )
886
+
887
+ _, red_chi2 = compute_chi2(
888
+ residuals, n_params=len(params), cov_rescaled=has_sigma
889
+ )
890
+ aic = compute_aic(residuals, len(params))
891
+ if y_data is not None:
892
+ nrmse = compute_nrmse(residuals, y_data)
893
+ metrics.update({"nrmse": nrmse})
894
+ metrics.update({"red_chi2": red_chi2, "aic": aic})
895
+
896
+ return {"params": params, "std_err": std_err, "metrics": metrics}
897
+
898
+
899
+ def _format_lmfit(result: ModelResult):
900
+ """
901
+ Formats the output of an lmfit model fitting result into a standardized dictionary.
902
+
903
+ This function processes the result of an lmfit model fitting (e.g., from `lmfit.Model.fit`) and
904
+ structures the fitting parameters, their standard errors, reduced chi-squared, and a prediction function.
905
+
906
+ Parameters
907
+ ----------
908
+ result : `lmfit.ModelResult`
909
+ The result of an lmfit model fitting procedure. It must contain the following fields:
910
+ - `result.params`: A dictionary of fitted parameters and their values.
911
+ - `result.redchi`: The reduced chi-squared value.
912
+ - `result.eval`: A method to evaluate the fitted model using independent variable values.
913
+ - `result.userkws`: Dictionary of user-supplied keywords that includes the independent variable.
914
+ - `result.model.independent_vars`: List of independent variable names in the model.
915
+
916
+ Returns
917
+ -------
918
+ dict
919
+ A dictionary containing:
920
+ - `"params"`: Optimized parameters (as a NumPy array).
921
+ - `"std_err"`: Standard errors of the parameters.
922
+ - `"metrics"`: A dictionary containing the reduced chi-squared (`red_chi2`).
923
+ - `"predict"`: A function that predicts the model's output given an input (using optimized parameters).
924
+ - `"param_names"`: List of parameter names.
925
+
926
+ Notes
927
+ -----
928
+ - lmfit already rescales standard errors by the reduced chi-squared, so no further adjustments are made.
929
+ - The independent variable name used in the fit is determined from `result.userkws` and `result.model.independent_vars`.
930
+ - The function creates a prediction function (`predict`) from the fitted model.
931
+ """
932
+ params = np.array([param.value for param in result.params.values()])
933
+ param_names = list(result.params.keys())
934
+ std_err = np.array(
935
+ [
936
+ param.stderr if param.stderr is not None else np.nan
937
+ for param in result.params.values()
938
+ ]
939
+ )
940
+
941
+ aic = compute_aic(result.residual, len(params))
942
+ nrmse = compute_nrmse(result.residual, result.data)
943
+ metrics = {"red_chi2": result.redchi, "aic": aic, "nrmse": nrmse}
944
+
945
+ # Determine the independent variable name used in the fit
946
+ independent_var = result.userkws.keys() & result.model.independent_vars
947
+ independent_var = (
948
+ independent_var.pop() if independent_var else result.model.independent_vars[0]
949
+ )
950
+ fit_function = lambda x: result.eval(**{independent_var: x})
951
+
952
+ return {
953
+ "params": params,
954
+ "std_err": std_err,
955
+ "metrics": metrics,
956
+ "predict": fit_function,
957
+ "param_names": param_names,
958
+ }
959
+
960
+
961
+ def _get_covariance_from_scipy_optimize_result(
962
+ result: spopt.OptimizeResult,
963
+ ) -> np.ndarray:
964
+ """
965
+ Extracts the covariance matrix (or an approximation) from a scipy optimization result.
966
+
967
+ This function attempts to retrieve the covariance matrix of the fitted parameters from the
968
+ result object returned by a scipy optimization method. It first checks for the presence of
969
+ the inverse Hessian (`hess_inv`), which is used to estimate the covariance. If it's not available,
970
+ the function attempts to compute the covariance using the Hessian matrix (`hess`).
971
+
972
+ Parameters
973
+ ----------
974
+ result : `scipy.optimize.OptimizeResult`
975
+ The result object returned by a scipy optimization function, such as `scipy.optimize.minimize` or `scipy.optimize.curve_fit`.
976
+ This object contains the optimization results, including the Hessian or its inverse.
977
+
978
+ Returns
979
+ -------
980
+ np.ndarray or None
981
+ The covariance matrix of the optimized parameters, or `None` if it cannot be computed.
982
+ If the inverse Hessian (`hess_inv`) is available, it will be returned directly.
983
+ If the Hessian matrix (`hess`) is available and not singular, its inverse will be computed and returned.
984
+ If neither is available, the function returns `None`.
985
+
986
+ Notes
987
+ -----
988
+ - If the Hessian matrix (`hess`) is singular or nearly singular, the covariance matrix cannot be computed.
989
+ - In some cases, the inverse Hessian (`hess_inv`) is directly available and provides the covariance without further computation.
990
+ """
991
+ if hasattr(result, "hess_inv"):
992
+ hess_inv = result.hess_inv
993
+
994
+ # Handle different types of hess_inv
995
+ if isinstance(hess_inv, np.ndarray):
996
+ return hess_inv
997
+ elif hasattr(hess_inv, "todense"):
998
+ return hess_inv.todense()
999
+
1000
+ if hasattr(result, "hess") and result.hess is not None:
1001
+ try:
1002
+ return np.linalg.inv(result.hess)
1003
+ except np.linalg.LinAlgError:
1004
+ pass # Hessian is singular, cannot compute covariance
1005
+
1006
+ return None
1007
+
1008
+
1009
+ def _get_xy_data_from_fit_args(*args, **kwargs):
1010
+ """
1011
+ Extracts x and y data from the given arguments and keyword arguments.
1012
+
1013
+ This helper function retrieves the x and y data (1D vectors) from the function's arguments or keyword arguments.
1014
+ The function checks for common keyword names like "x_data", "xdata", "x", "y_data", "ydata", and "y", and returns
1015
+ the corresponding data. If no keyword arguments are found, it attempts to extract the first two consecutive 1D
1016
+ vectors from the positional arguments.
1017
+
1018
+ Parameters
1019
+ ----------
1020
+ *args : variable length argument list
1021
+ The positional arguments passed to the function, potentially containing the x and y data.
1022
+
1023
+ **kwargs : keyword arguments
1024
+ The keyword arguments passed to the function, potentially containing keys such as "x_data", "x", "y_data", or "y".
1025
+
1026
+ Returns
1027
+ -------
1028
+ tuple of (np.ndarray, np.ndarray)
1029
+ A tuple containing the x data and y data as 1D numpy arrays or lists. If no valid data is found, returns (None, None).
1030
+
1031
+ Raises
1032
+ ------
1033
+ ValueError
1034
+ If both x and y data cannot be found in the input arguments.
1035
+
1036
+ Notes
1037
+ -----
1038
+ - The function looks for the x and y data in the keyword arguments first, in the order of x_keys and y_keys.
1039
+ - If both x and y data are not found in keyword arguments, the function will look for the first two consecutive
1040
+ 1D vectors in the positional arguments.
1041
+ - If the data cannot be found, the function will return (None, None).
1042
+ - The function validates that the extracted x and y data are 1D vectors (either lists or numpy arrays).
1043
+ """
1044
+ # Possible keyword names for x and y data
1045
+ x_keys = ["x_data", "xdata", "x"]
1046
+ y_keys = ["y_data", "ydata", "y"]
1047
+
1048
+ # Validate if an object is a 1D vector
1049
+ def is_valid_vector(obj):
1050
+ return isinstance(obj, (list, np.ndarray)) and np.ndim(obj) == 1
1051
+
1052
+ x_data, y_data = None, None
1053
+
1054
+ # Look for x_data in keyword arguments
1055
+ for key in x_keys:
1056
+ if key in kwargs and is_valid_vector(kwargs[key]):
1057
+ x_data = kwargs[key]
1058
+ break
1059
+ # Look for y_data in keyword arguments
1060
+ for key in y_keys:
1061
+ if key in kwargs and is_valid_vector(kwargs[key]):
1062
+ y_data = kwargs[key]
1063
+ break
1064
+
1065
+ # If both parameters were found, return them
1066
+ if (x_data is not None) and (y_data is not None):
1067
+ return x_data, y_data
1068
+
1069
+ # If the args have only 1 entry
1070
+ if len(args) == 1 and is_valid_vector(args[0]):
1071
+ if y_data is not None:
1072
+ x_data = args[0]
1073
+ else:
1074
+ y_data = args[0]
1075
+
1076
+ # If x and y were not found, try finding the first two consecutive vectors in args
1077
+ if x_data is None or y_data is None:
1078
+ # Check pairs of consecutive elements
1079
+ for i in range(len(args) - 1):
1080
+ if is_valid_vector(args[i]) and is_valid_vector(args[i + 1]):
1081
+ x_data, y_data = args[i], args[i + 1]
1082
+ break
1083
+
1084
+ return x_data, y_data