sqil-core 0.0.2__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. sqil_core/__init__.py +6 -2
  2. sqil_core/config.py +13 -0
  3. sqil_core/config_log.py +42 -0
  4. sqil_core/experiment/__init__.py +11 -0
  5. sqil_core/experiment/_analysis.py +95 -0
  6. sqil_core/experiment/_events.py +25 -0
  7. sqil_core/experiment/_experiment.py +553 -0
  8. sqil_core/experiment/data/plottr.py +778 -0
  9. sqil_core/experiment/helpers/_function_override_handler.py +111 -0
  10. sqil_core/experiment/helpers/_labone_wrappers.py +12 -0
  11. sqil_core/experiment/instruments/__init__.py +2 -0
  12. sqil_core/experiment/instruments/_instrument.py +190 -0
  13. sqil_core/experiment/instruments/drivers/SignalCore_SC5511A.py +515 -0
  14. sqil_core/experiment/instruments/local_oscillator.py +205 -0
  15. sqil_core/experiment/instruments/server.py +175 -0
  16. sqil_core/experiment/instruments/setup.yaml +21 -0
  17. sqil_core/experiment/instruments/zurich_instruments.py +55 -0
  18. sqil_core/fit/__init__.py +38 -0
  19. sqil_core/fit/_core.py +1084 -0
  20. sqil_core/fit/_fit.py +1191 -0
  21. sqil_core/fit/_guess.py +232 -0
  22. sqil_core/fit/_models.py +127 -0
  23. sqil_core/fit/_quality.py +266 -0
  24. sqil_core/resonator/__init__.py +13 -0
  25. sqil_core/resonator/_resonator.py +989 -0
  26. sqil_core/utils/__init__.py +85 -5
  27. sqil_core/utils/_analysis.py +415 -0
  28. sqil_core/utils/_const.py +105 -0
  29. sqil_core/utils/_formatter.py +259 -0
  30. sqil_core/utils/_plot.py +373 -0
  31. sqil_core/utils/_read.py +262 -0
  32. sqil_core/utils/_utils.py +164 -0
  33. {sqil_core-0.0.2.dist-info → sqil_core-1.0.0.dist-info}/METADATA +40 -7
  34. sqil_core-1.0.0.dist-info/RECORD +36 -0
  35. {sqil_core-0.0.2.dist-info → sqil_core-1.0.0.dist-info}/WHEEL +1 -1
  36. {sqil_core-0.0.2.dist-info → sqil_core-1.0.0.dist-info}/entry_points.txt +1 -1
  37. sqil_core/utils/analysis.py +0 -68
  38. sqil_core/utils/const.py +0 -38
  39. sqil_core/utils/formatter.py +0 -134
  40. sqil_core/utils/read.py +0 -156
  41. sqil_core-0.0.2.dist-info/RECORD +0 -10
sqil_core/fit/_fit.py ADDED
@@ -0,0 +1,1191 @@
1
+ from __future__ import annotations
2
+
3
+ import warnings
4
+ from typing import Callable
5
+
6
+ import numpy as np
7
+ from scipy.optimize import curve_fit, fsolve, least_squares, leastsq, minimize
8
+
9
+ import sqil_core.fit._models as _models
10
+ from sqil_core.utils._utils import fill_gaps, has_at_least_one, make_iterable
11
+
12
+ from ._core import FitResult, fit_input, fit_output
13
+ from ._guess import (
14
+ decaying_oscillations_bounds,
15
+ decaying_oscillations_guess,
16
+ gaussian_bounds,
17
+ gaussian_guess,
18
+ lorentzian_bounds,
19
+ lorentzian_guess,
20
+ oscillations_bounds,
21
+ oscillations_guess,
22
+ )
23
+
24
+
25
+ @fit_input
26
+ @fit_output
27
+ def fit_lorentzian(
28
+ x_data: np.ndarray,
29
+ y_data: np.ndarray,
30
+ guess: list = None,
31
+ bounds: list[tuple[float]] | tuple = None,
32
+ ) -> FitResult:
33
+ r"""
34
+ Fits a Lorentzian function to the provided data. The function estimates the
35
+ amplitude (A), center (x0), full width at half maximum (FWHM), and baseline (y0)
36
+ of the Lorentzian function.
37
+
38
+ L(x) = A * (|FWHM| / 2) / ((x - x0)^2 + (FWHM^2 / 4)) + y0
39
+
40
+ $$L(x) = A \frac{\left| \text{FWHM} \right|}{2} \frac{1}{(x - x_0)^2 + \frac{\text{FWHM}^2}{4}} + y_0$$
41
+
42
+ Parameters
43
+ ----------
44
+ x_data : np.ndarray
45
+ The independent variable (e.g., x values of the data).
46
+
47
+ y_data : np.ndarray
48
+ The dependent variable (e.g., y values of the data).
49
+
50
+ guess : list, optional
51
+ Initial guesses for the fit parameters [A, x0, fwhm, y0]. If not provided,
52
+ defaults are calculated based on the data.
53
+
54
+ bounds : list[tuple[float]], optional
55
+ The bounds for the fit parameters in the format [(min, max), ...].
56
+ If not provided, defaults are calculated.
57
+
58
+ fixed_params : list[int], optional, default: None
59
+ A list of indices representing parameters in the initial guess that should
60
+ remain unchanged during the fitting process.
61
+
62
+ Returns
63
+ -------
64
+ FitResult
65
+ A `FitResult` object containing:
66
+ - Fitted parameters (`params`).
67
+ - Standard errors (`std_err`).
68
+ - Goodness-of-fit metrics (`rmse`, root mean squared error).
69
+ - A callable `predict` function for generating fitted responses.
70
+ """
71
+
72
+ x, y = x_data, y_data
73
+
74
+ # Default intial guess if not provided
75
+ if has_at_least_one(guess, None):
76
+ guess = fill_gaps(guess, lorentzian_guess(x_data, y_data))
77
+
78
+ # Default bounds if not provided
79
+ if bounds is None:
80
+ bounds = ([None] * len(guess), [None] * len(guess))
81
+ if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
82
+ lower, upper = bounds
83
+ lower_guess, upper_guess = lorentzian_bounds(x_data, y_data, guess)
84
+ bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
85
+
86
+ res = curve_fit(_models.lorentzian, x, y, p0=guess, bounds=bounds, full_output=True)
87
+
88
+ return res, {
89
+ "param_names": ["A", "x0", "fwhm", "y0"],
90
+ "predict": _models.lorentzian,
91
+ }
92
+
93
+
94
+ @fit_input
95
+ @fit_output
96
+ def fit_two_lorentzians_shared_x0(
97
+ x_data_1,
98
+ y_data_1,
99
+ x_data_2,
100
+ y_data_2,
101
+ guess: list = None,
102
+ bounds: list[tuple[float]] | tuple = None,
103
+ ):
104
+ y_all = np.concatenate([y_data_1, y_data_2])
105
+
106
+ if has_at_least_one(guess, None):
107
+ guess_1 = lorentzian_guess(x_data_1, y_data_1)
108
+ guess_2 = lorentzian_guess(x_data_2, y_data_2)
109
+ x01, x02 = guess_1[1], guess_2[1]
110
+ x0 = np.mean([x01, x02])
111
+ guess = fill_gaps(
112
+ guess, np.concatenate([np.delete(guess_1, 1), np.delete(guess_2, 1), [x0]])
113
+ )
114
+
115
+ if bounds == None:
116
+ bounds = [[None] * len(guess), [None] * len(guess)]
117
+ if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
118
+ lower, upper = bounds
119
+ lower_guess_1, upper_guess_1 = lorentzian_bounds(x_data_1, y_data_1, guess_1)
120
+ lower_guess_2, upper_guess_2 = lorentzian_bounds(x_data_2, y_data_2, guess_2)
121
+ # Combine bounds for 1 and 2
122
+ lower_guess = np.concatenate(
123
+ [
124
+ np.delete(lower_guess_1, 1),
125
+ np.delete(lower_guess_2, 1),
126
+ [np.min([lower_guess_1, lower_guess_2])],
127
+ ]
128
+ )
129
+ upper_guess = np.concatenate(
130
+ [
131
+ np.delete(upper_guess_1, 1),
132
+ np.delete(upper_guess_2, 1),
133
+ [np.max([upper_guess_1, upper_guess_2])],
134
+ ]
135
+ )
136
+ lower = fill_gaps(lower, lower_guess)
137
+ upper = fill_gaps(upper, upper_guess)
138
+ bounds = (lower, upper)
139
+
140
+ res = curve_fit(
141
+ lambda _, A1, fwhm1, y01, A2, fwhm2, y02, x0: _models.two_lorentzians_shared_x0(
142
+ x_data_1, x_data_2, A1, fwhm1, y01, A2, fwhm2, y02, x0
143
+ ),
144
+ xdata=np.zeros_like(y_all), # dummy x, since x1 and x2 are fixed via closure
145
+ ydata=y_all,
146
+ p0=guess,
147
+ # bounds=bounds,
148
+ full_output=True,
149
+ )
150
+
151
+ return res, {
152
+ "param_names": ["A1", "fwhm1", "y01", "A2", "fwhm2", "y02", "x0"],
153
+ "predict": _models.two_lorentzians_shared_x0,
154
+ "fit_output_vars": {
155
+ "x_data": np.concatenate([x_data_1, x_data_2]),
156
+ "y_data": y_all,
157
+ },
158
+ }
159
+
160
+
161
+ @fit_input
162
+ @fit_output
163
+ def fit_gaussian(
164
+ x_data: np.ndarray,
165
+ y_data: np.ndarray,
166
+ guess: list = None,
167
+ bounds: list[tuple[float]] | tuple = None,
168
+ ) -> FitResult:
169
+ r"""
170
+ Fits a Gaussian function to the provided data. The function estimates the
171
+ amplitude, mean, standard deviation (sigma), and baseline of the Gaussian
172
+ function, and computes the full width at half maximum (FWHM).
173
+
174
+ G(x) = A / (|σ| * sqrt(2π)) * exp(- (x - x0)^2 / (2σ^2)) + y0
175
+
176
+ $$G(x) = A \frac{1}{\left| \sigma \right| \sqrt{2\pi}} \exp\left( -\frac{(x - x_0)^2}{2\sigma^2} \right) + y_0$$
177
+
178
+ Parameters
179
+ ----------
180
+ x_data : np.ndarray
181
+ The independent variable (e.g., x values of the data).
182
+
183
+ y_data : np.ndarray
184
+ The dependent variable (e.g., y values of the data).
185
+
186
+ guess : list, optional
187
+ Initial guesses for the fit parameters [A, x0, sigma, y0]. If not provided,
188
+ defaults are calculated based on the data.
189
+
190
+ bounds : list[tuple[float]], optional
191
+ The bounds for the fit parameters in the format [(min, max), ...].
192
+ If not provided, defaults are calculated.
193
+
194
+ fixed_params : list[int], optional, default: None
195
+ A list of indices representing parameters in the initial guess that should
196
+ remain unchanged during the fitting process.
197
+
198
+ Returns
199
+ -------
200
+ FitResult
201
+ A `FitResult` object containing:
202
+ - Fitted parameters (`params`).
203
+ - Standard errors (`std_err`).
204
+ - Goodness-of-fit metrics (`rmse`, root mean squared error).
205
+ - A callable `predict` function for generating fitted responses.
206
+ - A metadata dictionary containing the FWHM.
207
+ """
208
+
209
+ x, y = x_data, y_data
210
+
211
+ # Default initial guess if not provided
212
+ if has_at_least_one(guess, None):
213
+ guess = fill_gaps(guess, gaussian_guess(x_data, y_data))
214
+ # Default bounds if not provided
215
+ if bounds is None:
216
+ bounds = ([None] * len(guess), [None] * len(guess))
217
+ if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
218
+ lower, upper = bounds
219
+ lower_guess, upper_guess = gaussian_bounds(x_data, y_data, guess)
220
+ bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
221
+
222
+ res = curve_fit(_models.gaussian, x, y, p0=guess, bounds=bounds, full_output=True)
223
+
224
+ # Compute FWHM from sigma
225
+ _, _, sigma, _ = res[0]
226
+ fwhm = 2 * np.sqrt(2 * np.log(2)) * sigma
227
+
228
+ return res, {
229
+ "param_names": ["A", "x0", "sigma", "y0"],
230
+ "predict": _models.gaussian,
231
+ "fwhm": fwhm,
232
+ }
233
+
234
+
235
+ @fit_input
236
+ @fit_output
237
+ def fit_two_gaussians_shared_x0(
238
+ x_data_1,
239
+ y_data_1,
240
+ x_data_2,
241
+ y_data_2,
242
+ guess: list = None,
243
+ bounds: list[tuple[float]] | tuple = None,
244
+ ):
245
+ y_all = np.concatenate([y_data_1, y_data_2])
246
+
247
+ if has_at_least_one(guess, None):
248
+ guess_1 = gaussian_guess(x_data_1, y_data_1)
249
+ guess_2 = gaussian_guess(x_data_2, y_data_2)
250
+ x01, x02 = guess_1[1], guess_2[1]
251
+ x0 = np.mean([x01, x02])
252
+ guess = fill_gaps(
253
+ guess, np.concatenate([np.delete(guess_1, 1), np.delete(guess_2, 1), [x0]])
254
+ )
255
+
256
+ if bounds == None:
257
+ bounds = [[None] * len(guess), [None] * len(guess)]
258
+ if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
259
+ lower, upper = bounds
260
+ lower_guess_1, upper_guess_1 = gaussian_bounds(x_data_1, y_data_1, guess_1)
261
+ lower_guess_2, upper_guess_2 = gaussian_bounds(x_data_2, y_data_2, guess_2)
262
+ # Combine bounds for 1 and 2
263
+ lower_guess = np.concatenate(
264
+ [
265
+ np.delete(lower_guess_1, 1),
266
+ np.delete(lower_guess_2, 1),
267
+ [np.min([lower_guess_1, lower_guess_2])],
268
+ ]
269
+ )
270
+ upper_guess = np.concatenate(
271
+ [
272
+ np.delete(upper_guess_1, 1),
273
+ np.delete(upper_guess_2, 1),
274
+ [np.max([upper_guess_1, upper_guess_2])],
275
+ ]
276
+ )
277
+ lower = fill_gaps(lower, lower_guess)
278
+ upper = fill_gaps(upper, upper_guess)
279
+ bounds = (lower, upper)
280
+
281
+ res = curve_fit(
282
+ lambda _, A1, fwhm1, y01, A2, fwhm2, y02, x0: _models.two_gaussians_shared_x0(
283
+ x_data_1, x_data_2, A1, fwhm1, y01, A2, fwhm2, y02, x0
284
+ ),
285
+ xdata=np.zeros_like(y_all), # dummy x, since x1 and x2 are fixed via closure
286
+ ydata=y_all,
287
+ p0=guess,
288
+ # bounds=bounds,
289
+ full_output=True,
290
+ )
291
+
292
+ return res, {
293
+ "param_names": ["A1", "fwhm1", "y01", "A2", "fwhm2", "y02", "x0"],
294
+ "predict": _models.two_gaussians_shared_x0,
295
+ "fit_output_vars": {
296
+ "x_data": np.concatenate([x_data_1, x_data_2]),
297
+ "y_data": y_all,
298
+ },
299
+ }
300
+
301
+
302
+ @fit_input
303
+ @fit_output
304
+ def fit_decaying_exp(
305
+ x_data: np.ndarray,
306
+ y_data: np.ndarray,
307
+ guess: list = None,
308
+ bounds: list[tuple[float]] | tuple = (-np.inf, np.inf),
309
+ ) -> FitResult:
310
+ r"""
311
+ Fits a decaying exponential function to the provided data. The function estimates
312
+ the amplitude (A), decay time constant (tau), and baseline (y0) of the decaying
313
+ exponential function.
314
+
315
+ f(x) = A * exp(-x / τ) + y0
316
+
317
+ $$f(x) = A \exp\left( -\frac{x}{\tau} \right) + y_0$$
318
+
319
+ Parameters
320
+ ----------
321
+ x_data : np.ndarray
322
+ The independent variable (e.g., x values of the data).
323
+
324
+ y_data : np.ndarray
325
+ The dependent variable (e.g., y values of the data).
326
+
327
+ guess : list, optional
328
+ Initial guesses for the fit parameters [A, tau, y0]. If not provided,
329
+ defaults are calculated based on the data.
330
+
331
+ bounds : list[tuple[float]], optional
332
+ The bounds for the fit parameters in the format [(min, max), ...].
333
+ If not provided, defaults are calculated.
334
+
335
+ fixed_params : list[int], optional, default: None
336
+ A list of indices representing parameters in the initial guess that should
337
+ remain unchanged during the fitting process.
338
+
339
+ Returns
340
+ -------
341
+ FitResult
342
+ A `FitResult` object containing:
343
+ - Fitted parameters (`params`).
344
+ - Standard errors (`std_err`).
345
+ - Goodness-of-fit metrics (`rmse`, root mean squared error).
346
+ - A callable `predict` function for generating fitted responses.
347
+ """
348
+ x, y = x_data, y_data
349
+
350
+ # Default initial guess if not provided
351
+ if guess is None:
352
+ max_y = np.max(y)
353
+ min_y = np.min(y)
354
+ half = 0.5 * (max_y + min_y)
355
+
356
+ if y[0] > y[-1]:
357
+ tau0_idx = np.argmax(y < half)
358
+ else:
359
+ tau0_idx = np.argmax(y > half)
360
+
361
+ b0 = x[tau0_idx] if tau0_idx != 0 else 0.5 * (x[0] + x[-1])
362
+ guess = [y[0] - y[-1], b0, y[-1]]
363
+
364
+ # Default bounds if not provided
365
+ if bounds is None:
366
+ span_y = np.max(y) - np.min(y)
367
+ c0_min = np.min(y) - 100.0 * span_y
368
+ c0_max = np.max(y) + 100.0 * span_y
369
+ bounds = (
370
+ [-100.0 * span_y, 0.0, c0_min],
371
+ [100.0 * span_y, 100.0 * (np.max(x) - np.min(x)), c0_max],
372
+ )
373
+
374
+ res = curve_fit(
375
+ _models.decaying_exp, x, y, p0=guess, bounds=bounds, full_output=True
376
+ )
377
+
378
+ return res, {
379
+ "param_names": ["A", "tau", "y0"],
380
+ "predict": _models.decaying_exp,
381
+ }
382
+
383
+
384
+ @fit_input
385
+ @fit_output
386
+ def fit_qubit_relaxation_qp(
387
+ x_data: np.ndarray,
388
+ y_data: np.ndarray,
389
+ guess: list[float] | None = None,
390
+ bounds: list[tuple[float]] | tuple = (-np.inf, np.inf),
391
+ maxfev: int = 10000,
392
+ ftol: float = 1e-11,
393
+ ) -> FitResult:
394
+ r"""
395
+ Fits a qubit relaxation model with quasiparticle (QP) effects using a
396
+ biexponential decay function. The fitting procedure starts with an initial
397
+ guess derived from a single exponential fit.
398
+
399
+ f(x) = A * exp(|nQP| * (exp(-x / T1QP) - 1)) * exp(-x / T1R) + y0
400
+
401
+ $$f(x) = A \exp\left( |\text{n}_{\text{QP}}| \left( \exp\left(-\frac{x}{T_{1QP}}\right)
402
+ - 1 \right) \right) \exp\left(-\frac{x}{T_{1R}}\right) + y_0$$
403
+
404
+ Parameters
405
+ ----------
406
+ x_data : np.ndarray
407
+ Time data points for the relaxation curve.
408
+
409
+ y_data : np.ndarray
410
+ Measured relaxation data.
411
+
412
+ guess : list[float], optional
413
+ Initial parameter guesses. If None, a default guess is computed
414
+ using a single exponential fit.
415
+
416
+ bounds : tuple[list[float], list[float]], optional
417
+ The bounds for the fit parameters in the format [(min, max), ...].
418
+ If None, reasonable bounds based on the initial guess are applied.
419
+
420
+ maxfev : int, optional, default=10000
421
+ Maximum number of function evaluations allowed for the curve fitting.
422
+
423
+ ftol : float, optional, default=1e-11
424
+ Relative tolerance for convergence in the least-squares optimization.
425
+
426
+ fixed_params : list[int], optional, default: None
427
+ A list of indices representing parameters in the initial guess that should
428
+ remain unchanged during the fitting process.
429
+
430
+ Returns
431
+ -------
432
+ FitResult
433
+ A `FitResult` object containing:
434
+ - Fitted parameters (`params`).
435
+ - Standard errors (`std_err`).
436
+ - Goodness-of-fit metrics (`rmse`, root mean squared error).
437
+ - A callable `predict` function for generating fitted responses.
438
+ """
439
+
440
+ # Use a single exponential fit for initial parameter guesses
441
+ from scipy.optimize import curve_fit
442
+
443
+ def single_exp(x, a, tau, c):
444
+ return a * np.exp(-x / tau) + c
445
+
446
+ single_guess = [y_data[0] - y_data[-1], np.mean(x_data), y_data[-1]]
447
+ single_popt, _ = curve_fit(single_exp, x_data, y_data, p0=single_guess)
448
+
449
+ a_guess, T1R_guess, c_guess = single_popt
450
+ T1QP_guess = 0.1 * T1R_guess
451
+ nQP_guess = 1.0
452
+
453
+ # Default initial guess
454
+ if guess is None:
455
+ guess = [a_guess * np.exp(1.0), 2.0 * T1R_guess, c_guess, T1QP_guess, nQP_guess]
456
+
457
+ # Default parameter bounds
458
+ if bounds is None:
459
+ bounds = (
460
+ [
461
+ -20.0 * np.abs(a_guess),
462
+ 1.0e-1 * T1R_guess,
463
+ -10.0 * np.abs(c_guess),
464
+ 1.0e-4 * T1R_guess,
465
+ 0.0,
466
+ ],
467
+ [
468
+ 20.0 * np.abs(a_guess),
469
+ 1.0e3 * T1R_guess,
470
+ 10.0 * np.abs(c_guess),
471
+ 10.0 * T1R_guess,
472
+ 1.0e3,
473
+ ],
474
+ )
475
+
476
+ res = curve_fit(
477
+ _models.qubit_relaxation_qp,
478
+ x_data,
479
+ y_data,
480
+ p0=guess,
481
+ bounds=bounds,
482
+ maxfev=maxfev,
483
+ ftol=ftol,
484
+ full_output=True,
485
+ )
486
+
487
+ return res, {
488
+ "param_names": ["A", "T1R", "y0", "T1QP", "nQP"],
489
+ "predict": _models.qubit_relaxation_qp,
490
+ }
491
+
492
+
493
+ @fit_input
494
+ @fit_output
495
+ def fit_decaying_oscillations(
496
+ x_data: np.ndarray,
497
+ y_data: np.ndarray,
498
+ guess: list[float] | None = None,
499
+ bounds: list[tuple[float]] | tuple = None,
500
+ num_init: int = 10,
501
+ ) -> FitResult:
502
+ r"""
503
+ Fits a decaying oscillation model to data. The function estimates key features
504
+ like the oscillation period and phase, and tries multiple initial guesses for
505
+ the optimization process.
506
+
507
+ f(x) = A * exp(-x / τ) * cos(2π * (x - φ) / T) + y0
508
+
509
+ $$f(x) = A \exp\left( -\frac{x}{\tau} \right) \cos\left( 2\pi \frac{x - \phi}{T} \right) + y_0$$
510
+
511
+ Parameters
512
+ ----------
513
+ x_data : np.ndarray
514
+ Independent variable array (e.g., time or frequency).
515
+ y_data : np.ndarray
516
+ Dependent variable array representing the measured signal.
517
+ guess : list[float] or None, optional
518
+ Initial parameter estimates [A, tau, y0, phi, T]. Missing values are automatically filled.
519
+ bounds : list[tuple[float]] or tuple, optional
520
+ Lower and upper bounds for parameters during fitting, by default no bounds.
521
+ num_init : int, optional
522
+ Number of phase values to try when guessing, by default 10.
523
+
524
+ Returns
525
+ -------
526
+ FitResult
527
+ A `FitResult` object containing:
528
+ - Fitted parameters (`params`).
529
+ - Standard errors (`std_err`).
530
+ - Goodness-of-fit metrics (`rmse`, root mean squared error).
531
+ - A callable `predict` function for generating fitted responses.
532
+ - A metadata dictionary containing the pi_time and its standard error.
533
+ """
534
+ # Default intial guess if not provided
535
+ if has_at_least_one(guess, None):
536
+ guess = fill_gaps(guess, decaying_oscillations_guess(x_data, y_data, num_init))
537
+
538
+ # Default bounds if not provided
539
+ if bounds is None:
540
+ bounds = ([None] * len(guess), [None] * len(guess))
541
+ if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
542
+ lower, upper = bounds
543
+ lower_guess, upper_guess = decaying_oscillations_bounds(x_data, y_data, guess)
544
+ bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
545
+
546
+ A, tau, y0, phi, T = guess
547
+ phi = make_iterable(phi)
548
+ y0 = make_iterable(y0)
549
+
550
+ best_fit = None
551
+ best_popt = None
552
+ best_nrmse = np.inf
553
+
554
+ @fit_output
555
+ def _curve_fit_osc(x_data, y_data, p0, bounds):
556
+ return curve_fit(
557
+ _models.decaying_oscillations,
558
+ x_data,
559
+ y_data,
560
+ p0,
561
+ bounds=bounds,
562
+ full_output=True,
563
+ )
564
+
565
+ # Try multiple initializations
566
+ for phi_guess in phi:
567
+ for offset in y0:
568
+ p0 = [A, tau, offset, phi_guess, T]
569
+
570
+ try:
571
+ with warnings.catch_warnings():
572
+ warnings.simplefilter("ignore")
573
+ fit_res = _curve_fit_osc(x_data, y_data, p0, bounds)
574
+ if fit_res.metrics["nrmse"] < best_nrmse:
575
+ best_fit, best_popt = fit_res.output, fit_res.params
576
+ best_nrmse = fit_res.metrics["nrmse"]
577
+ except:
578
+ if best_fit is None:
579
+
580
+ def _decaying_osc_res(p, x, y):
581
+ return _models.decaying_oscillations(x, *p) - y
582
+
583
+ result = least_squares(
584
+ _decaying_osc_res,
585
+ p0,
586
+ loss="soft_l1",
587
+ f_scale=0.1,
588
+ bounds=bounds,
589
+ args=(x_data, y_data),
590
+ )
591
+ best_fit, best_popt = result, result.x
592
+
593
+ if best_fit is None:
594
+ return None
595
+
596
+ # Compute pi-time (half-period + phase offset)
597
+ pi_time_raw = 0.5 * best_popt[4] + best_popt[3]
598
+ while pi_time_raw > 0.75 * np.abs(best_popt[4]):
599
+ pi_time_raw -= 0.5 * np.abs(best_popt[4])
600
+ while pi_time_raw < 0.25 * np.abs(best_popt[4]):
601
+ pi_time_raw += 0.5 * np.abs(best_popt[4])
602
+
603
+ def _get_pi_time_std_err(sqil_dict):
604
+ if sqil_dict["std_err"] is not None:
605
+ phi_err = sqil_dict["std_err"][3]
606
+ T_err = sqil_dict["std_err"][4]
607
+ if np.isfinite(T_err) and np.isfinite(phi_err):
608
+ return np.sqrt((T_err / 2) ** 2 + phi_err**2)
609
+ return np.nan
610
+
611
+ # Metadata dictionary
612
+ metadata = {
613
+ "param_names": ["A", "tau", "y0", "phi", "T"],
614
+ "predict": _models.decaying_oscillations,
615
+ "pi_time": pi_time_raw,
616
+ "@pi_time_std_err": _get_pi_time_std_err,
617
+ }
618
+
619
+ return best_fit, metadata
620
+
621
+
622
+ @fit_input
623
+ @fit_output
624
+ def fit_oscillations(
625
+ x_data: np.ndarray,
626
+ y_data: np.ndarray,
627
+ guess: list[float] | None = None,
628
+ bounds: list[tuple[float]] | tuple = None,
629
+ num_init: int = 10,
630
+ ) -> FitResult:
631
+ r"""
632
+ Fits an oscillation model to data. The function estimates key features
633
+ like the oscillation period and phase, and tries multiple initial guesses for
634
+ the optimization process.
635
+
636
+ f(x) = A * cos(2π * (x - φ) / T) + y0
637
+
638
+ $$f(x) = A \cos\left( 2\pi \frac{x - \phi}{T} \right) + y_0$$
639
+
640
+ Parameters
641
+ ----------
642
+ x_data : np.ndarray
643
+ Independent variable array (e.g., time or frequency).
644
+ y_data : np.ndarray
645
+ Dependent variable array representing the measured signal.
646
+ guess : list[float] or None, optional
647
+ Initial parameter estimates [A, y0, phi, T]. Missing values are automatically filled.
648
+ bounds : list[tuple[float]] or tuple, optional
649
+ Lower and upper bounds for parameters during fitting, by default no bounds.
650
+ num_init : int, optional
651
+ Number of phase values to try when guessing, by default 10.
652
+
653
+ Returns
654
+ -------
655
+ FitResult
656
+ A `FitResult` object containing:
657
+ - Fitted parameters (`params`).
658
+ - Standard errors (`std_err`).
659
+ - Goodness-of-fit metrics (`rmse`, root mean squared error).
660
+ - A callable `predict` function for generating fitted responses.
661
+ - A metadata dictionary containing the pi_time and its standard error.
662
+ """
663
+ # Default intial guess if not provided
664
+ if has_at_least_one(guess, None):
665
+ guess = fill_gaps(guess, oscillations_guess(x_data, y_data, num_init))
666
+
667
+ # Default bounds if not provided
668
+ if bounds is None:
669
+ bounds = ([None] * len(guess), [None] * len(guess))
670
+ if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
671
+ lower, upper = bounds
672
+ lower_guess, upper_guess = oscillations_bounds(x_data, y_data, guess)
673
+ bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
674
+
675
+ A, y0, phi, T = guess
676
+ phi = make_iterable(phi)
677
+ y0 = make_iterable(y0)
678
+
679
+ best_fit = None
680
+ best_popt = None
681
+ best_nrmse = np.inf
682
+
683
+ @fit_output
684
+ def _curve_fit_osc(x_data, y_data, p0, bounds):
685
+ return curve_fit(
686
+ _models.oscillations,
687
+ x_data,
688
+ y_data,
689
+ p0,
690
+ bounds=bounds,
691
+ full_output=True,
692
+ )
693
+
694
+ # Try multiple initializations
695
+ for phi_guess in phi:
696
+ for offset in y0:
697
+ p0 = [A, offset, phi_guess, T]
698
+
699
+ try:
700
+ with warnings.catch_warnings():
701
+ warnings.simplefilter("ignore")
702
+ fit_res = _curve_fit_osc(x_data, y_data, p0, bounds)
703
+ if fit_res.metrics["nrmse"] < best_nrmse:
704
+ best_fit, best_popt = fit_res.output, fit_res.params
705
+ best_nrmse = fit_res.metrics["nrmse"]
706
+ except:
707
+ if best_fit is None:
708
+
709
+ def _oscillations_res(p, x, y):
710
+ return _models.oscillations(x, *p) - y
711
+
712
+ result = least_squares(
713
+ _oscillations_res,
714
+ p0,
715
+ loss="soft_l1",
716
+ f_scale=0.1,
717
+ bounds=bounds,
718
+ args=(x_data, y_data),
719
+ )
720
+ best_fit, best_popt = result, result.x
721
+
722
+ if best_fit is None:
723
+ return None
724
+
725
+ # Compute pi-time (half-period + phase offset)
726
+ pi_time_raw = 0.5 * best_popt[3] + best_popt[2]
727
+ while pi_time_raw > 0.75 * np.abs(best_popt[3]):
728
+ pi_time_raw -= 0.5 * np.abs(best_popt[3])
729
+ while pi_time_raw < 0.25 * np.abs(best_popt[3]):
730
+ pi_time_raw += 0.5 * np.abs(best_popt[3])
731
+
732
+ def _get_pi_time_std_err(sqil_dict):
733
+ if sqil_dict["std_err"] is not None:
734
+ phi_err = sqil_dict["std_err"][2]
735
+ T_err = sqil_dict["std_err"][3]
736
+ if np.isfinite(T_err) and np.isfinite(phi_err):
737
+ return np.sqrt((T_err / 2) ** 2 + phi_err**2)
738
+ return np.nan
739
+
740
+ # Metadata dictionary
741
+ metadata = {
742
+ "param_names": ["A", "y0", "phi", "T"],
743
+ "predict": _models.oscillations,
744
+ "pi_time": pi_time_raw,
745
+ "@pi_time_std_err": _get_pi_time_std_err,
746
+ }
747
+
748
+ return best_fit, metadata
749
+
750
+
751
+ @fit_output
752
+ def fit_circle_algebraic(x_data: np.ndarray, y_data: np.ndarray) -> FitResult:
753
+ """Fits a circle in the xy plane and returns the radius and the position of the center.
754
+
755
+ Reference: https://arxiv.org/abs/1410.3365
756
+ This function uses an algebraic method to fit a circle to the provided data points.
757
+ The algebraic approach is generally faster and more precise than iterative methods,
758
+ but it can be more sensitive to noise in the data.
759
+
760
+ Parameters
761
+ ----------
762
+ x : np.ndarray
763
+ Array of x-coordinates of the data points.
764
+ y : np.ndarray
765
+ Array of y-coordinates of the data points.
766
+
767
+ Returns
768
+ -------
769
+ FitResult
770
+ A `FitResult` object containing:
771
+ - Fitted parameters (`params`).
772
+ - Standard errors (`std_err`).
773
+ - Goodness-of-fit metrics (`rmse`, root mean squared error).
774
+ - A callable `predict` function for generating fitted responses.
775
+
776
+ Examples
777
+ --------
778
+ >>> fit_result = fit_circle_algebraic(x_data, y_data)
779
+ >>> fit_result.summary()
780
+ """
781
+ z_data = x_data + 1j * y_data
782
+
783
+ def calc_moments(z_data):
784
+ xi = z_data.real
785
+ xi_sqr = xi * xi
786
+ yi = z_data.imag
787
+ yi_sqr = yi * yi
788
+ zi = xi_sqr + yi_sqr
789
+ Nd = float(len(xi))
790
+ xi_sum = xi.sum()
791
+ yi_sum = yi.sum()
792
+ zi_sum = zi.sum()
793
+ xiyi_sum = (xi * yi).sum()
794
+ xizi_sum = (xi * zi).sum()
795
+ yizi_sum = (yi * zi).sum()
796
+ return np.array(
797
+ [
798
+ [(zi * zi).sum(), xizi_sum, yizi_sum, zi_sum],
799
+ [xizi_sum, xi_sqr.sum(), xiyi_sum, xi_sum],
800
+ [yizi_sum, xiyi_sum, yi_sqr.sum(), yi_sum],
801
+ [zi_sum, xi_sum, yi_sum, Nd],
802
+ ]
803
+ )
804
+
805
+ M = calc_moments(z_data)
806
+
807
+ a0 = (
808
+ (
809
+ (M[2][0] * M[3][2] - M[2][2] * M[3][0]) * M[1][1]
810
+ - M[1][2] * M[2][0] * M[3][1]
811
+ - M[1][0] * M[2][1] * M[3][2]
812
+ + M[1][0] * M[2][2] * M[3][1]
813
+ + M[1][2] * M[2][1] * M[3][0]
814
+ )
815
+ * M[0][3]
816
+ + (
817
+ M[0][2] * M[2][3] * M[3][0]
818
+ - M[0][2] * M[2][0] * M[3][3]
819
+ + M[0][0] * M[2][2] * M[3][3]
820
+ - M[0][0] * M[2][3] * M[3][2]
821
+ )
822
+ * M[1][1]
823
+ + (
824
+ M[0][1] * M[1][3] * M[3][0]
825
+ - M[0][1] * M[1][0] * M[3][3]
826
+ - M[0][0] * M[1][3] * M[3][1]
827
+ )
828
+ * M[2][2]
829
+ + (-M[0][1] * M[1][2] * M[2][3] - M[0][2] * M[1][3] * M[2][1]) * M[3][0]
830
+ + (
831
+ (M[2][3] * M[3][1] - M[2][1] * M[3][3]) * M[1][2]
832
+ + M[2][1] * M[3][2] * M[1][3]
833
+ )
834
+ * M[0][0]
835
+ + (
836
+ M[1][0] * M[2][3] * M[3][2]
837
+ + M[2][0] * (M[1][2] * M[3][3] - M[1][3] * M[3][2])
838
+ )
839
+ * M[0][1]
840
+ + (
841
+ (M[2][1] * M[3][3] - M[2][3] * M[3][1]) * M[1][0]
842
+ + M[1][3] * M[2][0] * M[3][1]
843
+ )
844
+ * M[0][2]
845
+ )
846
+ a1 = (
847
+ (
848
+ (M[3][0] - 2.0 * M[2][2]) * M[1][1]
849
+ - M[1][0] * M[3][1]
850
+ + M[2][2] * M[3][0]
851
+ + 2.0 * M[1][2] * M[2][1]
852
+ - M[2][0] * M[3][2]
853
+ )
854
+ * M[0][3]
855
+ + (
856
+ 2.0 * M[2][0] * M[3][2]
857
+ - M[0][0] * M[3][3]
858
+ - 2.0 * M[2][2] * M[3][0]
859
+ + 2.0 * M[0][2] * M[2][3]
860
+ )
861
+ * M[1][1]
862
+ + (-M[0][0] * M[3][3] + 2.0 * M[0][1] * M[1][3] + 2.0 * M[1][0] * M[3][1])
863
+ * M[2][2]
864
+ + (-M[0][1] * M[1][3] + 2.0 * M[1][2] * M[2][1] - M[0][2] * M[2][3]) * M[3][0]
865
+ + (M[1][3] * M[3][1] + M[2][3] * M[3][2]) * M[0][0]
866
+ + (M[1][0] * M[3][3] - 2.0 * M[1][2] * M[2][3]) * M[0][1]
867
+ + (M[2][0] * M[3][3] - 2.0 * M[1][3] * M[2][1]) * M[0][2]
868
+ - 2.0 * M[1][2] * M[2][0] * M[3][1]
869
+ - 2.0 * M[1][0] * M[2][1] * M[3][2]
870
+ )
871
+ a2 = (
872
+ (2.0 * M[1][1] - M[3][0] + 2.0 * M[2][2]) * M[0][3]
873
+ + (2.0 * M[3][0] - 4.0 * M[2][2]) * M[1][1]
874
+ - 2.0 * M[2][0] * M[3][2]
875
+ + 2.0 * M[2][2] * M[3][0]
876
+ + M[0][0] * M[3][3]
877
+ + 4.0 * M[1][2] * M[2][1]
878
+ - 2.0 * M[0][1] * M[1][3]
879
+ - 2.0 * M[1][0] * M[3][1]
880
+ - 2.0 * M[0][2] * M[2][3]
881
+ )
882
+ a3 = -2.0 * M[3][0] + 4.0 * M[1][1] + 4.0 * M[2][2] - 2.0 * M[0][3]
883
+ a4 = -4.0
884
+
885
+ def func(x):
886
+ return a0 + a1 * x + a2 * x * x + a3 * x * x * x + a4 * x * x * x * x
887
+
888
+ def d_func(x):
889
+ return a1 + 2 * a2 * x + 3 * a3 * x * x + 4 * a4 * x * x * x
890
+
891
+ x0 = fsolve(func, 0.0, fprime=d_func)
892
+
893
+ def solve_eq_sys(val, M):
894
+ # prepare
895
+ M[3][0] = M[3][0] + 2 * val
896
+ M[0][3] = M[0][3] + 2 * val
897
+ M[1][1] = M[1][1] - val
898
+ M[2][2] = M[2][2] - val
899
+ return np.linalg.svd(M)
900
+
901
+ U, s, Vt = solve_eq_sys(x0[0], M)
902
+
903
+ A_vec = Vt[np.argmin(s), :]
904
+
905
+ xc = -A_vec[1] / (2.0 * A_vec[0])
906
+ yc = -A_vec[2] / (2.0 * A_vec[0])
907
+ # the term *sqrt term corrects for the constraint, because it may be altered due to numerical inaccuracies during calculation
908
+ r0 = (
909
+ 1.0
910
+ / (2.0 * np.absolute(A_vec[0]))
911
+ * np.sqrt(A_vec[1] * A_vec[1] + A_vec[2] * A_vec[2] - 4.0 * A_vec[0] * A_vec[3])
912
+ )
913
+
914
+ std_err = _compute_circle_fit_errors(x_data, y_data, xc, yc, r0)
915
+ return {
916
+ "params": [xc, yc, r0],
917
+ "std_err": std_err,
918
+ "metrics": _compute_circle_fit_metrics(x_data, y_data, xc, yc, r0),
919
+ "predict": lambda theta: (xc + r0 * np.cos(theta), yc + r0 * np.sin(theta)),
920
+ "output": {},
921
+ "param_names": ["xc", "yc", "r0"],
922
+ }
923
+
924
+
925
+ def _compute_circle_fit_errors(x, y, xc, yc, r0):
926
+ """Compute the standard errors for the algebraic circle fit"""
927
+ # Residuals: distance from each point to the fitted circle
928
+ distances = np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
929
+ residuals = distances - r0
930
+
931
+ # Estimate variance of the residuals
932
+ dof = len(x) - 3 # Degrees of freedom: N - number of parameters
933
+ variance = np.sum(residuals**2) / dof
934
+
935
+ # Jacobian matrix of residuals with respect to (xc, yc, r0)
936
+ J = np.zeros((len(x), 3))
937
+ J[:, 0] = (xc - x) / distances # ∂residual/∂xc
938
+ J[:, 1] = (yc - y) / distances # ∂residual/∂yc
939
+ J[:, 2] = -1 # ∂residual/∂r0
940
+
941
+ # Covariance matrix approximation: variance * (JᵗJ)⁻¹
942
+ JTJ_inv = np.linalg.inv(J.T @ J)
943
+ pcov = variance * JTJ_inv
944
+
945
+ # Standard errors are the square roots of the diagonal of the covariance matrix
946
+ standard_errors = np.sqrt(np.diag(pcov))
947
+
948
+ return standard_errors
949
+
950
+
951
+ def _compute_circle_fit_metrics(x_data, y_data, xc, yc, r0):
952
+ """Computed metrics for the algebraic circle fit"""
953
+ # Compute the distance of each data point to the fitted circle center
954
+ r_data = np.sqrt((x_data - xc) ** 2 + (y_data - yc) ** 2)
955
+
956
+ # Compute residuals
957
+ residuals = r_data - r0
958
+
959
+ # Calculate R-squared (R²)
960
+ ssr = np.sum(residuals**2)
961
+ sst = np.sum((r_data - np.mean(r_data)) ** 2)
962
+ r2 = 1 - (ssr / sst) if sst > 0 else 0
963
+
964
+ # Compute RMSE
965
+ rmse = np.sqrt(np.mean(residuals**2))
966
+
967
+ # Return results
968
+ return {"rmse": rmse}
969
+
970
+
971
+ @fit_output
972
+ def fit_skewed_lorentzian(x_data: np.ndarray, y_data: np.ndarray):
973
+ r"""
974
+ Fits a skewed Lorentzian model to the given data using least squares optimization.
975
+
976
+ This function performs a two-step fitting process to find the best-fitting parameters for a skewed Lorentzian model.
977
+ The first fitting step provides initial estimates for the parameters, and the second step refines those estimates
978
+ using a full model fit.
979
+
980
+ L(f) = A1 + A2 * (f - fr) + (A3 + A4 * (f - fr)) / [1 + (2 * Q_tot * ((f / fr) - 1))²]
981
+
982
+ $$L(f) = A_1 + A_2 \cdot (f - f_r)+ \frac{A_3 + A_4 \cdot (f - f_r)}{1
983
+ + 4 Q_{\text{tot}}^2 \left( \frac{f - f_r}{f_r} \right)^2}$$
984
+
985
+ Parameters
986
+ ----------
987
+ x_data : np.ndarray
988
+ A 1D numpy array containing the x data points for the fit.
989
+
990
+ y_data : np.ndarray
991
+ A 1D numpy array containing the y data points for the fit.
992
+
993
+ Returns
994
+ -------
995
+ FitResult
996
+ A `FitResult` object containing:
997
+ - Fitted parameters (`params`).
998
+ - Standard errors (`std_err`).
999
+ - Goodness-of-fit metrics (`red_chi2`).
1000
+ - A callable `predict` function for generating fitted responses.
1001
+
1002
+ Examples
1003
+ --------
1004
+ >>> fit_result = fit_skewed_lorentzian(x_data, y_data)
1005
+ >>> fit_result.summary()
1006
+ """
1007
+ A1a = np.minimum(y_data[0], y_data[-1])
1008
+ A3a = -np.max(y_data)
1009
+ fra = x_data[np.argmin(y_data)]
1010
+
1011
+ # First fit to get initial estimates for the more complex fit
1012
+ def residuals(p, x, y):
1013
+ A2, A4, Q_tot = p
1014
+ err = y - (
1015
+ A1a
1016
+ + A2 * (x - fra)
1017
+ + (A3a + A4 * (x - fra)) / (1.0 + 4.0 * Q_tot**2 * ((x - fra) / fra) ** 2)
1018
+ )
1019
+ return err
1020
+
1021
+ p0 = [0.0, 0.0, 1e3]
1022
+ p_final, _ = leastsq(residuals, p0, args=(np.array(x_data), np.array(y_data)))
1023
+ A2a, A4a, Q_tota = p_final
1024
+
1025
+ # Full parameter fit
1026
+ def residuals2(p, x, y):
1027
+ A1, A2, A3, A4, fr, Q_tot = p
1028
+ err = y - (
1029
+ A1
1030
+ + A2 * (x - fr)
1031
+ + (A3 + A4 * (x - fr)) / (1.0 + 4.0 * Q_tot**2 * ((x - fr) / fr) ** 2)
1032
+ )
1033
+ return err
1034
+
1035
+ p0 = [A1a, A2a, A3a, A4a, fra, Q_tota]
1036
+ popt, pcov, infodict, errmsg, ier = leastsq(
1037
+ residuals2, p0, args=(np.array(x_data), np.array(y_data)), full_output=True
1038
+ )
1039
+ # Since Q_tot is always present as a square it may turn out negative
1040
+ popt[-1] = np.abs(popt[-1])
1041
+
1042
+ return (
1043
+ (popt, pcov, infodict, errmsg, ier),
1044
+ {
1045
+ "predict": lambda x: _models.skewed_lorentzian(x, *popt),
1046
+ "param_names": ["A1", "A2", "A3", "A4", "fr", "Q_tot"],
1047
+ },
1048
+ )
1049
+
1050
+
1051
+ def transform_data(
1052
+ data: np.ndarray,
1053
+ transform_type: str = "optm",
1054
+ params: list = None,
1055
+ deg: bool = True,
1056
+ inv_transform: bool = False,
1057
+ full_output: bool = False,
1058
+ ) -> (
1059
+ np.ndarray
1060
+ | tuple[np.ndarray, Callable]
1061
+ | tuple[np.ndarray, Callable, list, np.ndarray]
1062
+ ):
1063
+ """
1064
+ Transforms complex-valued data using various transformation methods, including
1065
+ optimization-based alignment, real/imaginary extraction, amplitude, and phase.
1066
+
1067
+ Parameters
1068
+ ----------
1069
+ data : np.ndarray
1070
+ The complex-valued data to be transformed.
1071
+
1072
+ transform_type : str, optional
1073
+ The type of transformation to apply. Options include:
1074
+ - 'optm' (default): Optimized translation and rotation.
1075
+ - 'trrt': Translation and rotation using provided params.
1076
+ - 'real': Extract the real part.
1077
+ - 'imag': Extract the imaginary part.
1078
+ - 'ampl': Compute the amplitude.
1079
+ - 'angl': Compute the phase (in degrees if `deg=True`).
1080
+
1081
+ params : list, optional
1082
+ Transformation parameters [x0, y0, phi]. If None and `transform_type='optm'`,
1083
+ parameters are estimated automatically.
1084
+
1085
+ deg : bool, optional
1086
+ If True, phase transformations return values in degrees (default: True).
1087
+
1088
+ inv_transform : bool, optional
1089
+ If true returns transformed data and the function to perform the inverse transform.
1090
+
1091
+ full_output : bool, optional
1092
+ If True, returns transformed data, the function to perform the inverse transform,
1093
+ transformation parameters, and residuals.
1094
+
1095
+ Returns
1096
+ -------
1097
+ np.ndarray
1098
+ The transformed data.
1099
+
1100
+ tuple[np.ndarray, list, np.ndarray] (if `full_output=True`)
1101
+ Transformed data, transformation parameters, and residuals.
1102
+
1103
+ Notes
1104
+ -----
1105
+ - The function applies different transformations based on `transform_type`.
1106
+ - If `optm` is selected and `params` is not provided, an optimization routine
1107
+ is used to determine the best transformation parameters.
1108
+
1109
+ Example
1110
+ -------
1111
+ >>> data = np.array([1 + 1j, 2 + 2j, 3 + 3j])
1112
+ >>> transformed, params, residuals = transform_data(data, full_output=True)
1113
+ >>> print(transformed, params, residuals)
1114
+ """
1115
+
1116
+ def transform(data, x0, y0, phi):
1117
+ return (data - x0 - 1.0j * y0) * np.exp(1.0j * phi)
1118
+
1119
+ def _inv_transform(data, x0, y0, phi):
1120
+ return data * np.exp(-1.0j * phi) + x0 + 1.0j * y0
1121
+
1122
+ def opt_transform(data):
1123
+ """Finds optimal transformation parameters."""
1124
+
1125
+ def transform_err(x):
1126
+ return np.sum((transform(data, x[0], x[1], x[2]).imag) ** 2)
1127
+
1128
+ res = minimize(
1129
+ fun=transform_err,
1130
+ method="Nelder-Mead",
1131
+ x0=[
1132
+ np.mean(data.real),
1133
+ np.mean(data.imag),
1134
+ -np.arctan2(np.std(data.imag), np.std(data.real)),
1135
+ ],
1136
+ options={"maxiter": 1000},
1137
+ )
1138
+
1139
+ params = res.x
1140
+ transformed_data = transform(data, *params)
1141
+ if transformed_data[0] < transformed_data[-1]:
1142
+ params[2] += np.pi
1143
+ return params
1144
+
1145
+ # Normalize transform_type
1146
+ transform_type = str(transform_type).lower()
1147
+ if transform_type.startswith(("op", "pr")):
1148
+ transform_type = "optm"
1149
+ elif transform_type.startswith("translation+rotation"):
1150
+ transform_type = "trrt"
1151
+ elif transform_type.startswith(("re", "qu")):
1152
+ transform_type = "real"
1153
+ elif transform_type.startswith(("im", "in")):
1154
+ transform_type = "imag"
1155
+ elif transform_type.startswith("am"):
1156
+ transform_type = "ampl"
1157
+ elif transform_type.startswith(("ph", "an")):
1158
+ transform_type = "angl"
1159
+
1160
+ # Compute parameters if needed
1161
+ if transform_type == "optm" and params is None:
1162
+ params = opt_transform(data)
1163
+
1164
+ # Apply transformation
1165
+ if transform_type in ["optm", "trrt"]:
1166
+ transformed_data = transform(data, *params).real
1167
+ residual = transform(data, *params).imag
1168
+ elif transform_type == "real":
1169
+ transformed_data = data.real
1170
+ residual = data.imag
1171
+ elif transform_type == "imag":
1172
+ transformed_data = data.imag
1173
+ residual = data.real
1174
+ elif transform_type == "ampl":
1175
+ transformed_data = np.abs(data)
1176
+ residual = np.unwrap(np.angle(data))
1177
+ if deg:
1178
+ residual = np.degrees(residual)
1179
+ elif transform_type == "angl":
1180
+ transformed_data = np.unwrap(np.angle(data))
1181
+ residual = np.abs(data)
1182
+ if deg:
1183
+ transformed_data = np.degrees(transformed_data)
1184
+
1185
+ inv_transform_fun = lambda data: _inv_transform(data, *params)
1186
+
1187
+ if full_output:
1188
+ return np.array(transformed_data), inv_transform_fun, params, residual
1189
+ if inv_transform:
1190
+ return np.array(transformed_data), inv_transform_fun
1191
+ return np.array(transformed_data)