sqil-core 0.1.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqil_core/__init__.py +1 -0
- sqil_core/config_log.py +42 -0
- sqil_core/experiment/__init__.py +11 -0
- sqil_core/experiment/_analysis.py +125 -0
- sqil_core/experiment/_events.py +25 -0
- sqil_core/experiment/_experiment.py +553 -0
- sqil_core/experiment/data/plottr.py +778 -0
- sqil_core/experiment/helpers/_function_override_handler.py +111 -0
- sqil_core/experiment/helpers/_labone_wrappers.py +12 -0
- sqil_core/experiment/instruments/__init__.py +2 -0
- sqil_core/experiment/instruments/_instrument.py +190 -0
- sqil_core/experiment/instruments/drivers/SignalCore_SC5511A.py +515 -0
- sqil_core/experiment/instruments/local_oscillator.py +205 -0
- sqil_core/experiment/instruments/server.py +175 -0
- sqil_core/experiment/instruments/setup.yaml +21 -0
- sqil_core/experiment/instruments/zurich_instruments.py +55 -0
- sqil_core/fit/__init__.py +23 -0
- sqil_core/fit/_core.py +179 -31
- sqil_core/fit/_fit.py +544 -94
- sqil_core/fit/_guess.py +304 -0
- sqil_core/fit/_models.py +50 -1
- sqil_core/fit/_quality.py +266 -0
- sqil_core/resonator/__init__.py +2 -0
- sqil_core/resonator/_resonator.py +256 -74
- sqil_core/utils/__init__.py +40 -13
- sqil_core/utils/_analysis.py +226 -0
- sqil_core/utils/_const.py +83 -18
- sqil_core/utils/_formatter.py +127 -55
- sqil_core/utils/_plot.py +272 -6
- sqil_core/utils/_read.py +178 -95
- sqil_core/utils/_utils.py +147 -0
- {sqil_core-0.1.0.dist-info → sqil_core-1.1.0.dist-info}/METADATA +9 -1
- sqil_core-1.1.0.dist-info/RECORD +36 -0
- {sqil_core-0.1.0.dist-info → sqil_core-1.1.0.dist-info}/WHEEL +1 -1
- sqil_core-0.1.0.dist-info/RECORD +0 -19
- {sqil_core-0.1.0.dist-info → sqil_core-1.1.0.dist-info}/entry_points.txt +0 -0
sqil_core/fit/_fit.py
CHANGED
@@ -1,11 +1,27 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
1
3
|
import warnings
|
4
|
+
from typing import Callable
|
2
5
|
|
3
6
|
import numpy as np
|
4
|
-
from scipy.optimize import curve_fit, fsolve, least_squares, leastsq
|
7
|
+
from scipy.optimize import curve_fit, fsolve, least_squares, leastsq, minimize
|
5
8
|
|
6
9
|
import sqil_core.fit._models as _models
|
10
|
+
from sqil_core.utils._utils import fill_gaps, has_at_least_one, make_iterable
|
7
11
|
|
8
12
|
from ._core import FitResult, fit_input, fit_output
|
13
|
+
from ._guess import (
|
14
|
+
decaying_exp_guess,
|
15
|
+
decaying_oscillations_bounds,
|
16
|
+
decaying_oscillations_guess,
|
17
|
+
gaussian_bounds,
|
18
|
+
gaussian_guess,
|
19
|
+
lorentzian_bounds,
|
20
|
+
lorentzian_guess,
|
21
|
+
many_decaying_oscillations_guess,
|
22
|
+
oscillations_bounds,
|
23
|
+
oscillations_guess,
|
24
|
+
)
|
9
25
|
|
10
26
|
|
11
27
|
@fit_input
|
@@ -14,7 +30,7 @@ def fit_lorentzian(
|
|
14
30
|
x_data: np.ndarray,
|
15
31
|
y_data: np.ndarray,
|
16
32
|
guess: list = None,
|
17
|
-
bounds: list[tuple[float]] | tuple =
|
33
|
+
bounds: list[tuple[float]] | tuple = None,
|
18
34
|
) -> FitResult:
|
19
35
|
r"""
|
20
36
|
Fits a Lorentzian function to the provided data. The function estimates the
|
@@ -58,35 +74,16 @@ def fit_lorentzian(
|
|
58
74
|
x, y = x_data, y_data
|
59
75
|
|
60
76
|
# Default intial guess if not provided
|
61
|
-
if guess
|
62
|
-
|
63
|
-
max_y, min_y = np.max(y), np.min(y)
|
64
|
-
|
65
|
-
# Determine A, x0, y0 based on peak prominence
|
66
|
-
if max_y - median_y >= median_y - min_y:
|
67
|
-
y0 = min_y
|
68
|
-
idx = np.argmax(y)
|
69
|
-
A = 1 / (max_y - median_y)
|
70
|
-
else:
|
71
|
-
y0 = max_y
|
72
|
-
idx = np.argmin(y)
|
73
|
-
A = 1 / (min_y - median_y)
|
74
|
-
|
75
|
-
x0 = x[idx]
|
76
|
-
half = y0 + A / 2.0
|
77
|
-
dx = np.abs(np.diff(x[np.argsort(np.abs(y - half))]))
|
78
|
-
dx_min = np.abs(np.diff(x))
|
79
|
-
dx = dx[dx >= 2.0 * dx_min]
|
80
|
-
|
81
|
-
fwhm = dx[0] / 2.0 if dx.size else dx_min
|
82
|
-
guess = [A, x0, fwhm, y0]
|
77
|
+
if has_at_least_one(guess, None):
|
78
|
+
guess = fill_gaps(guess, lorentzian_guess(x_data, y_data))
|
83
79
|
|
84
80
|
# Default bounds if not provided
|
85
81
|
if bounds is None:
|
86
|
-
bounds = (
|
87
|
-
|
88
|
-
|
89
|
-
)
|
82
|
+
bounds = ([None] * len(guess), [None] * len(guess))
|
83
|
+
if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
|
84
|
+
lower, upper = bounds
|
85
|
+
lower_guess, upper_guess = lorentzian_bounds(x_data, y_data, guess)
|
86
|
+
bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
|
90
87
|
|
91
88
|
res = curve_fit(_models.lorentzian, x, y, p0=guess, bounds=bounds, full_output=True)
|
92
89
|
|
@@ -96,13 +93,80 @@ def fit_lorentzian(
|
|
96
93
|
}
|
97
94
|
|
98
95
|
|
96
|
+
@fit_input
|
97
|
+
@fit_output
|
98
|
+
def fit_two_lorentzians_shared_x0(
|
99
|
+
x_data_1,
|
100
|
+
y_data_1,
|
101
|
+
x_data_2,
|
102
|
+
y_data_2,
|
103
|
+
guess: list = None,
|
104
|
+
bounds: list[tuple[float]] | tuple = None,
|
105
|
+
):
|
106
|
+
y_all = np.concatenate([y_data_1, y_data_2])
|
107
|
+
|
108
|
+
if has_at_least_one(guess, None):
|
109
|
+
guess_1 = lorentzian_guess(x_data_1, y_data_1)
|
110
|
+
guess_2 = lorentzian_guess(x_data_2, y_data_2)
|
111
|
+
x01, x02 = guess_1[1], guess_2[1]
|
112
|
+
x0 = np.mean([x01, x02])
|
113
|
+
guess = fill_gaps(
|
114
|
+
guess, np.concatenate([np.delete(guess_1, 1), np.delete(guess_2, 1), [x0]])
|
115
|
+
)
|
116
|
+
|
117
|
+
if bounds == None:
|
118
|
+
bounds = [[None] * len(guess), [None] * len(guess)]
|
119
|
+
if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
|
120
|
+
lower, upper = bounds
|
121
|
+
lower_guess_1, upper_guess_1 = lorentzian_bounds(x_data_1, y_data_1, guess_1)
|
122
|
+
lower_guess_2, upper_guess_2 = lorentzian_bounds(x_data_2, y_data_2, guess_2)
|
123
|
+
# Combine bounds for 1 and 2
|
124
|
+
lower_guess = np.concatenate(
|
125
|
+
[
|
126
|
+
np.delete(lower_guess_1, 1),
|
127
|
+
np.delete(lower_guess_2, 1),
|
128
|
+
[np.min([lower_guess_1, lower_guess_2])],
|
129
|
+
]
|
130
|
+
)
|
131
|
+
upper_guess = np.concatenate(
|
132
|
+
[
|
133
|
+
np.delete(upper_guess_1, 1),
|
134
|
+
np.delete(upper_guess_2, 1),
|
135
|
+
[np.max([upper_guess_1, upper_guess_2])],
|
136
|
+
]
|
137
|
+
)
|
138
|
+
lower = fill_gaps(lower, lower_guess)
|
139
|
+
upper = fill_gaps(upper, upper_guess)
|
140
|
+
bounds = (lower, upper)
|
141
|
+
|
142
|
+
res = curve_fit(
|
143
|
+
lambda _, A1, fwhm1, y01, A2, fwhm2, y02, x0: _models.two_lorentzians_shared_x0(
|
144
|
+
x_data_1, x_data_2, A1, fwhm1, y01, A2, fwhm2, y02, x0
|
145
|
+
),
|
146
|
+
xdata=np.zeros_like(y_all), # dummy x, since x1 and x2 are fixed via closure
|
147
|
+
ydata=y_all,
|
148
|
+
p0=guess,
|
149
|
+
# bounds=bounds,
|
150
|
+
full_output=True,
|
151
|
+
)
|
152
|
+
|
153
|
+
return res, {
|
154
|
+
"param_names": ["A1", "fwhm1", "y01", "A2", "fwhm2", "y02", "x0"],
|
155
|
+
"predict": _models.two_lorentzians_shared_x0,
|
156
|
+
"fit_output_vars": {
|
157
|
+
"x_data": np.concatenate([x_data_1, x_data_2]),
|
158
|
+
"y_data": y_all,
|
159
|
+
},
|
160
|
+
}
|
161
|
+
|
162
|
+
|
99
163
|
@fit_input
|
100
164
|
@fit_output
|
101
165
|
def fit_gaussian(
|
102
166
|
x_data: np.ndarray,
|
103
167
|
y_data: np.ndarray,
|
104
168
|
guess: list = None,
|
105
|
-
bounds: list[tuple[float]] | tuple =
|
169
|
+
bounds: list[tuple[float]] | tuple = None,
|
106
170
|
) -> FitResult:
|
107
171
|
r"""
|
108
172
|
Fits a Gaussian function to the provided data. The function estimates the
|
@@ -147,36 +211,15 @@ def fit_gaussian(
|
|
147
211
|
x, y = x_data, y_data
|
148
212
|
|
149
213
|
# Default initial guess if not provided
|
150
|
-
if guess
|
151
|
-
|
152
|
-
max_x, min_x = np.max(x), np.min(x)
|
153
|
-
max_y, min_y = np.max(y), np.min(y)
|
154
|
-
|
155
|
-
# Determine A, x0, y0 based on peak prominence
|
156
|
-
if max_y - median_y >= median_y - min_y:
|
157
|
-
y0 = min_y
|
158
|
-
idx = np.argmax(y)
|
159
|
-
A = max_y - median_y
|
160
|
-
else:
|
161
|
-
y0 = max_y
|
162
|
-
idx = np.argmin(y)
|
163
|
-
A = min_y - median_y
|
164
|
-
|
165
|
-
x0 = x[idx]
|
166
|
-
half = y0 + A / 2.0
|
167
|
-
dx = np.abs(np.diff(x[np.argsort(np.abs(y - half))]))
|
168
|
-
dx_min = np.abs(np.diff(x))
|
169
|
-
dx = dx[dx >= 2.0 * dx_min]
|
170
|
-
|
171
|
-
sigma = dx[0] / 2.0 if dx.size else dx_min
|
172
|
-
guess = [A, x0, sigma, y0]
|
173
|
-
|
214
|
+
if has_at_least_one(guess, None):
|
215
|
+
guess = fill_gaps(guess, gaussian_guess(x_data, y_data))
|
174
216
|
# Default bounds if not provided
|
175
217
|
if bounds is None:
|
176
|
-
bounds = (
|
177
|
-
|
178
|
-
|
179
|
-
)
|
218
|
+
bounds = ([None] * len(guess), [None] * len(guess))
|
219
|
+
if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
|
220
|
+
lower, upper = bounds
|
221
|
+
lower_guess, upper_guess = gaussian_bounds(x_data, y_data, guess)
|
222
|
+
bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
|
180
223
|
|
181
224
|
res = curve_fit(_models.gaussian, x, y, p0=guess, bounds=bounds, full_output=True)
|
182
225
|
|
@@ -191,6 +234,73 @@ def fit_gaussian(
|
|
191
234
|
}
|
192
235
|
|
193
236
|
|
237
|
+
@fit_input
|
238
|
+
@fit_output
|
239
|
+
def fit_two_gaussians_shared_x0(
|
240
|
+
x_data_1,
|
241
|
+
y_data_1,
|
242
|
+
x_data_2,
|
243
|
+
y_data_2,
|
244
|
+
guess: list = None,
|
245
|
+
bounds: list[tuple[float]] | tuple = None,
|
246
|
+
):
|
247
|
+
y_all = np.concatenate([y_data_1, y_data_2])
|
248
|
+
|
249
|
+
if has_at_least_one(guess, None):
|
250
|
+
guess_1 = gaussian_guess(x_data_1, y_data_1)
|
251
|
+
guess_2 = gaussian_guess(x_data_2, y_data_2)
|
252
|
+
x01, x02 = guess_1[1], guess_2[1]
|
253
|
+
x0 = np.mean([x01, x02])
|
254
|
+
guess = fill_gaps(
|
255
|
+
guess, np.concatenate([np.delete(guess_1, 1), np.delete(guess_2, 1), [x0]])
|
256
|
+
)
|
257
|
+
|
258
|
+
if bounds == None:
|
259
|
+
bounds = [[None] * len(guess), [None] * len(guess)]
|
260
|
+
if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
|
261
|
+
lower, upper = bounds
|
262
|
+
lower_guess_1, upper_guess_1 = gaussian_bounds(x_data_1, y_data_1, guess_1)
|
263
|
+
lower_guess_2, upper_guess_2 = gaussian_bounds(x_data_2, y_data_2, guess_2)
|
264
|
+
# Combine bounds for 1 and 2
|
265
|
+
lower_guess = np.concatenate(
|
266
|
+
[
|
267
|
+
np.delete(lower_guess_1, 1),
|
268
|
+
np.delete(lower_guess_2, 1),
|
269
|
+
[np.min([lower_guess_1, lower_guess_2])],
|
270
|
+
]
|
271
|
+
)
|
272
|
+
upper_guess = np.concatenate(
|
273
|
+
[
|
274
|
+
np.delete(upper_guess_1, 1),
|
275
|
+
np.delete(upper_guess_2, 1),
|
276
|
+
[np.max([upper_guess_1, upper_guess_2])],
|
277
|
+
]
|
278
|
+
)
|
279
|
+
lower = fill_gaps(lower, lower_guess)
|
280
|
+
upper = fill_gaps(upper, upper_guess)
|
281
|
+
bounds = (lower, upper)
|
282
|
+
|
283
|
+
res = curve_fit(
|
284
|
+
lambda _, A1, fwhm1, y01, A2, fwhm2, y02, x0: _models.two_gaussians_shared_x0(
|
285
|
+
x_data_1, x_data_2, A1, fwhm1, y01, A2, fwhm2, y02, x0
|
286
|
+
),
|
287
|
+
xdata=np.zeros_like(y_all), # dummy x, since x1 and x2 are fixed via closure
|
288
|
+
ydata=y_all,
|
289
|
+
p0=guess,
|
290
|
+
# bounds=bounds,
|
291
|
+
full_output=True,
|
292
|
+
)
|
293
|
+
|
294
|
+
return res, {
|
295
|
+
"param_names": ["A1", "fwhm1", "y01", "A2", "fwhm2", "y02", "x0"],
|
296
|
+
"predict": _models.two_gaussians_shared_x0,
|
297
|
+
"fit_output_vars": {
|
298
|
+
"x_data": np.concatenate([x_data_1, x_data_2]),
|
299
|
+
"y_data": y_all,
|
300
|
+
},
|
301
|
+
}
|
302
|
+
|
303
|
+
|
194
304
|
@fit_input
|
195
305
|
@fit_output
|
196
306
|
def fit_decaying_exp(
|
@@ -239,19 +349,9 @@ def fit_decaying_exp(
|
|
239
349
|
"""
|
240
350
|
x, y = x_data, y_data
|
241
351
|
|
242
|
-
# Default
|
243
|
-
if guess
|
244
|
-
|
245
|
-
min_y = np.min(y)
|
246
|
-
half = 0.5 * (max_y + min_y)
|
247
|
-
|
248
|
-
if y[0] > y[-1]:
|
249
|
-
tau0_idx = np.argmax(y < half)
|
250
|
-
else:
|
251
|
-
tau0_idx = np.argmax(y > half)
|
252
|
-
|
253
|
-
b0 = x[tau0_idx] if tau0_idx != 0 else 0.5 * (x[0] + x[-1])
|
254
|
-
guess = [y[0] - y[-1], b0, y[-1]]
|
352
|
+
# Default intial guess if not provided
|
353
|
+
if has_at_least_one(guess, None):
|
354
|
+
guess = fill_gaps(guess, decaying_exp_guess(x_data, y_data))
|
255
355
|
|
256
356
|
# Default bounds if not provided
|
257
357
|
if bounds is None:
|
@@ -382,9 +482,14 @@ def fit_qubit_relaxation_qp(
|
|
382
482
|
}
|
383
483
|
|
384
484
|
|
485
|
+
@fit_input
|
385
486
|
@fit_output
|
386
487
|
def fit_decaying_oscillations(
|
387
|
-
x_data: np.ndarray,
|
488
|
+
x_data: np.ndarray,
|
489
|
+
y_data: np.ndarray,
|
490
|
+
guess: list[float] | None = None,
|
491
|
+
bounds: list[tuple[float]] | tuple = None,
|
492
|
+
num_init: int = 10,
|
388
493
|
) -> FitResult:
|
389
494
|
r"""
|
390
495
|
Fits a decaying oscillation model to data. The function estimates key features
|
@@ -398,13 +503,15 @@ def fit_decaying_oscillations(
|
|
398
503
|
Parameters
|
399
504
|
----------
|
400
505
|
x_data : np.ndarray
|
401
|
-
|
402
|
-
|
506
|
+
Independent variable array (e.g., time or frequency).
|
403
507
|
y_data : np.ndarray
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
508
|
+
Dependent variable array representing the measured signal.
|
509
|
+
guess : list[float] or None, optional
|
510
|
+
Initial parameter estimates [A, tau, y0, phi, T]. Missing values are automatically filled.
|
511
|
+
bounds : list[tuple[float]] or tuple, optional
|
512
|
+
Lower and upper bounds for parameters during fitting, by default no bounds.
|
513
|
+
num_init : int, optional
|
514
|
+
Number of phase values to try when guessing, by default 10.
|
408
515
|
|
409
516
|
Returns
|
410
517
|
-------
|
@@ -416,31 +523,49 @@ def fit_decaying_oscillations(
|
|
416
523
|
- A callable `predict` function for generating fitted responses.
|
417
524
|
- A metadata dictionary containing the pi_time and its standard error.
|
418
525
|
"""
|
419
|
-
#
|
420
|
-
|
421
|
-
|
422
|
-
|
526
|
+
# Default intial guess if not provided
|
527
|
+
if has_at_least_one(guess, None):
|
528
|
+
guess = fill_gaps(guess, decaying_oscillations_guess(x_data, y_data, num_init))
|
529
|
+
|
530
|
+
# Default bounds if not provided
|
531
|
+
if bounds is None:
|
532
|
+
bounds = ([None] * len(guess), [None] * len(guess))
|
533
|
+
if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
|
534
|
+
lower, upper = bounds
|
535
|
+
lower_guess, upper_guess = decaying_oscillations_bounds(x_data, y_data, guess)
|
536
|
+
bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
|
537
|
+
|
538
|
+
A, tau, y0, phi, T = guess
|
539
|
+
phi = make_iterable(phi)
|
540
|
+
y0 = make_iterable(y0)
|
423
541
|
|
424
542
|
best_fit = None
|
425
543
|
best_popt = None
|
544
|
+
best_nrmse = np.inf
|
545
|
+
|
546
|
+
@fit_output
|
547
|
+
def _curve_fit_osc(x_data, y_data, p0, bounds):
|
548
|
+
return curve_fit(
|
549
|
+
_models.decaying_oscillations,
|
550
|
+
x_data,
|
551
|
+
y_data,
|
552
|
+
p0,
|
553
|
+
bounds=bounds,
|
554
|
+
full_output=True,
|
555
|
+
)
|
426
556
|
|
427
557
|
# Try multiple initializations
|
428
|
-
for phi_guess in
|
429
|
-
for
|
430
|
-
p0 = [
|
558
|
+
for phi_guess in phi:
|
559
|
+
for offset in y0:
|
560
|
+
p0 = [A, tau, offset, phi_guess, T]
|
431
561
|
|
432
562
|
try:
|
433
563
|
with warnings.catch_warnings():
|
434
564
|
warnings.simplefilter("ignore")
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
p0,
|
440
|
-
full_output=True,
|
441
|
-
)
|
442
|
-
popt = fit_output[0]
|
443
|
-
best_fit, best_popt = fit_output, popt
|
565
|
+
fit_res = _curve_fit_osc(x_data, y_data, p0, bounds)
|
566
|
+
if fit_res.metrics["nrmse"] < best_nrmse:
|
567
|
+
best_fit, best_popt = fit_res.output, fit_res.params
|
568
|
+
best_nrmse = fit_res.metrics["nrmse"]
|
444
569
|
except:
|
445
570
|
if best_fit is None:
|
446
571
|
|
@@ -452,11 +577,15 @@ def fit_decaying_oscillations(
|
|
452
577
|
p0,
|
453
578
|
loss="soft_l1",
|
454
579
|
f_scale=0.1,
|
580
|
+
bounds=bounds,
|
455
581
|
args=(x_data, y_data),
|
456
582
|
)
|
457
583
|
best_fit, best_popt = result, result.x
|
458
584
|
|
459
|
-
|
585
|
+
if best_fit is None:
|
586
|
+
return None
|
587
|
+
|
588
|
+
# Compute pi-time (half-period + phase offset)
|
460
589
|
pi_time_raw = 0.5 * best_popt[4] + best_popt[3]
|
461
590
|
while pi_time_raw > 0.75 * np.abs(best_popt[4]):
|
462
591
|
pi_time_raw -= 0.5 * np.abs(best_popt[4])
|
@@ -482,6 +611,184 @@ def fit_decaying_oscillations(
|
|
482
611
|
return best_fit, metadata
|
483
612
|
|
484
613
|
|
614
|
+
@fit_output
|
615
|
+
def fit_many_decaying_oscillations(
|
616
|
+
x_data: np.ndarray, y_data: np.ndarray, n: int, guess=None
|
617
|
+
):
|
618
|
+
"""
|
619
|
+
Fits a sum of `n` exponentially decaying oscillations to the given data.
|
620
|
+
|
621
|
+
Each component of the model is of the form:
|
622
|
+
A_i * exp(-x / tau_i) * cos(2π * T_i * x + phi_i)
|
623
|
+
|
624
|
+
Parameters
|
625
|
+
----------
|
626
|
+
x_data : np.ndarray
|
627
|
+
1D array of x-values (e.g., time).
|
628
|
+
y_data : np.ndarray
|
629
|
+
1D array of y-values (e.g., signal amplitude).
|
630
|
+
n : int
|
631
|
+
Number of decaying oscillation components to fit.
|
632
|
+
guess : list or None, optional
|
633
|
+
Optional initial parameter guess. If None, a guess is automatically generated
|
634
|
+
using `many_decaying_oscillations_guess`.
|
635
|
+
|
636
|
+
Returns
|
637
|
+
-------
|
638
|
+
FitResult
|
639
|
+
"""
|
640
|
+
|
641
|
+
if has_at_least_one(guess, None):
|
642
|
+
guess = fill_gaps(guess, many_decaying_oscillations_guess(x_data, y_data, n))
|
643
|
+
|
644
|
+
res = curve_fit(
|
645
|
+
_models.many_decaying_oscillations,
|
646
|
+
x_data,
|
647
|
+
y_data,
|
648
|
+
p0=guess,
|
649
|
+
# maxfev=10000,
|
650
|
+
full_output=True,
|
651
|
+
)
|
652
|
+
|
653
|
+
metadata = {
|
654
|
+
"param_names": [f"{p}{i}" for i in range(n) for p in ("A", "tau", "phi", "T")]
|
655
|
+
+ ["y0"],
|
656
|
+
"predict": lambda x: _models.many_decaying_oscillations(x, *res[0]),
|
657
|
+
"model_name": f"many_decaying_oscillations({n})",
|
658
|
+
}
|
659
|
+
|
660
|
+
return res, metadata
|
661
|
+
|
662
|
+
|
663
|
+
@fit_input
|
664
|
+
@fit_output
|
665
|
+
def fit_oscillations(
|
666
|
+
x_data: np.ndarray,
|
667
|
+
y_data: np.ndarray,
|
668
|
+
guess: list[float] | None = None,
|
669
|
+
bounds: list[tuple[float]] | tuple = None,
|
670
|
+
num_init: int = 10,
|
671
|
+
) -> FitResult:
|
672
|
+
r"""
|
673
|
+
Fits an oscillation model to data. The function estimates key features
|
674
|
+
like the oscillation period and phase, and tries multiple initial guesses for
|
675
|
+
the optimization process.
|
676
|
+
|
677
|
+
f(x) = A * cos(2π * (x - φ) / T) + y0
|
678
|
+
|
679
|
+
$$f(x) = A \cos\left( 2\pi \frac{x - \phi}{T} \right) + y_0$$
|
680
|
+
|
681
|
+
Parameters
|
682
|
+
----------
|
683
|
+
x_data : np.ndarray
|
684
|
+
Independent variable array (e.g., time or frequency).
|
685
|
+
y_data : np.ndarray
|
686
|
+
Dependent variable array representing the measured signal.
|
687
|
+
guess : list[float] or None, optional
|
688
|
+
Initial parameter estimates [A, y0, phi, T]. Missing values are automatically filled.
|
689
|
+
bounds : list[tuple[float]] or tuple, optional
|
690
|
+
Lower and upper bounds for parameters during fitting, by default no bounds.
|
691
|
+
num_init : int, optional
|
692
|
+
Number of phase values to try when guessing, by default 10.
|
693
|
+
|
694
|
+
Returns
|
695
|
+
-------
|
696
|
+
FitResult
|
697
|
+
A `FitResult` object containing:
|
698
|
+
- Fitted parameters (`params`).
|
699
|
+
- Standard errors (`std_err`).
|
700
|
+
- Goodness-of-fit metrics (`rmse`, root mean squared error).
|
701
|
+
- A callable `predict` function for generating fitted responses.
|
702
|
+
- A metadata dictionary containing the pi_time and its standard error.
|
703
|
+
"""
|
704
|
+
# Default intial guess if not provided
|
705
|
+
if has_at_least_one(guess, None):
|
706
|
+
guess = fill_gaps(guess, oscillations_guess(x_data, y_data, num_init))
|
707
|
+
|
708
|
+
# Default bounds if not provided
|
709
|
+
if bounds is None:
|
710
|
+
bounds = ([None] * len(guess), [None] * len(guess))
|
711
|
+
if has_at_least_one(bounds[0], None) or has_at_least_one(bounds[1], None):
|
712
|
+
lower, upper = bounds
|
713
|
+
lower_guess, upper_guess = oscillations_bounds(x_data, y_data, guess)
|
714
|
+
bounds = (fill_gaps(lower, lower_guess), fill_gaps(upper, upper_guess))
|
715
|
+
|
716
|
+
A, y0, phi, T = guess
|
717
|
+
phi = make_iterable(phi)
|
718
|
+
y0 = make_iterable(y0)
|
719
|
+
|
720
|
+
best_fit = None
|
721
|
+
best_popt = None
|
722
|
+
best_nrmse = np.inf
|
723
|
+
|
724
|
+
@fit_output
|
725
|
+
def _curve_fit_osc(x_data, y_data, p0, bounds):
|
726
|
+
return curve_fit(
|
727
|
+
_models.oscillations,
|
728
|
+
x_data,
|
729
|
+
y_data,
|
730
|
+
p0,
|
731
|
+
bounds=bounds,
|
732
|
+
full_output=True,
|
733
|
+
)
|
734
|
+
|
735
|
+
# Try multiple initializations
|
736
|
+
for phi_guess in phi:
|
737
|
+
for offset in y0:
|
738
|
+
p0 = [A, offset, phi_guess, T]
|
739
|
+
|
740
|
+
try:
|
741
|
+
with warnings.catch_warnings():
|
742
|
+
warnings.simplefilter("ignore")
|
743
|
+
fit_res = _curve_fit_osc(x_data, y_data, p0, bounds)
|
744
|
+
if fit_res.metrics["nrmse"] < best_nrmse:
|
745
|
+
best_fit, best_popt = fit_res.output, fit_res.params
|
746
|
+
best_nrmse = fit_res.metrics["nrmse"]
|
747
|
+
except:
|
748
|
+
if best_fit is None:
|
749
|
+
|
750
|
+
def _oscillations_res(p, x, y):
|
751
|
+
return _models.oscillations(x, *p) - y
|
752
|
+
|
753
|
+
result = least_squares(
|
754
|
+
_oscillations_res,
|
755
|
+
p0,
|
756
|
+
loss="soft_l1",
|
757
|
+
f_scale=0.1,
|
758
|
+
bounds=bounds,
|
759
|
+
args=(x_data, y_data),
|
760
|
+
)
|
761
|
+
best_fit, best_popt = result, result.x
|
762
|
+
|
763
|
+
if best_fit is None:
|
764
|
+
return None
|
765
|
+
|
766
|
+
# Compute pi-time (half-period + phase offset)
|
767
|
+
pi_time_raw = 0.5 * best_popt[3] + best_popt[2]
|
768
|
+
while pi_time_raw > 0.75 * np.abs(best_popt[3]):
|
769
|
+
pi_time_raw -= 0.5 * np.abs(best_popt[3])
|
770
|
+
while pi_time_raw < 0.25 * np.abs(best_popt[3]):
|
771
|
+
pi_time_raw += 0.5 * np.abs(best_popt[3])
|
772
|
+
|
773
|
+
def _get_pi_time_std_err(sqil_dict):
|
774
|
+
if sqil_dict["std_err"] is not None:
|
775
|
+
phi_err = sqil_dict["std_err"][2]
|
776
|
+
T_err = sqil_dict["std_err"][3]
|
777
|
+
if np.isfinite(T_err) and np.isfinite(phi_err):
|
778
|
+
return np.sqrt((T_err / 2) ** 2 + phi_err**2)
|
779
|
+
return np.nan
|
780
|
+
|
781
|
+
# Metadata dictionary
|
782
|
+
metadata = {
|
783
|
+
"param_names": ["A", "y0", "phi", "T"],
|
784
|
+
"predict": _models.oscillations,
|
785
|
+
"pi_time": pi_time_raw,
|
786
|
+
"@pi_time_std_err": _get_pi_time_std_err,
|
787
|
+
}
|
788
|
+
|
789
|
+
return best_fit, metadata
|
790
|
+
|
791
|
+
|
485
792
|
@fit_output
|
486
793
|
def fit_circle_algebraic(x_data: np.ndarray, y_data: np.ndarray) -> FitResult:
|
487
794
|
"""Fits a circle in the xy plane and returns the radius and the position of the center.
|
@@ -780,3 +1087,146 @@ def fit_skewed_lorentzian(x_data: np.ndarray, y_data: np.ndarray):
|
|
780
1087
|
"param_names": ["A1", "A2", "A3", "A4", "fr", "Q_tot"],
|
781
1088
|
},
|
782
1089
|
)
|
1090
|
+
|
1091
|
+
|
1092
|
+
def transform_data(
|
1093
|
+
data: np.ndarray,
|
1094
|
+
transform_type: str = "optm",
|
1095
|
+
params: list = None,
|
1096
|
+
deg: bool = True,
|
1097
|
+
inv_transform: bool = False,
|
1098
|
+
full_output: bool = False,
|
1099
|
+
) -> (
|
1100
|
+
np.ndarray
|
1101
|
+
| tuple[np.ndarray, Callable]
|
1102
|
+
| tuple[np.ndarray, Callable, list, np.ndarray]
|
1103
|
+
):
|
1104
|
+
"""
|
1105
|
+
Transforms complex-valued data using various transformation methods, including
|
1106
|
+
optimization-based alignment, real/imaginary extraction, amplitude, and phase.
|
1107
|
+
|
1108
|
+
Parameters
|
1109
|
+
----------
|
1110
|
+
data : np.ndarray
|
1111
|
+
The complex-valued data to be transformed.
|
1112
|
+
|
1113
|
+
transform_type : str, optional
|
1114
|
+
The type of transformation to apply. Options include:
|
1115
|
+
- 'optm' (default): Optimized translation and rotation.
|
1116
|
+
- 'trrt': Translation and rotation using provided params.
|
1117
|
+
- 'real': Extract the real part.
|
1118
|
+
- 'imag': Extract the imaginary part.
|
1119
|
+
- 'ampl': Compute the amplitude.
|
1120
|
+
- 'angl': Compute the phase (in degrees if `deg=True`).
|
1121
|
+
|
1122
|
+
params : list, optional
|
1123
|
+
Transformation parameters [x0, y0, phi]. If None and `transform_type='optm'`,
|
1124
|
+
parameters are estimated automatically.
|
1125
|
+
|
1126
|
+
deg : bool, optional
|
1127
|
+
If True, phase transformations return values in degrees (default: True).
|
1128
|
+
|
1129
|
+
inv_transform : bool, optional
|
1130
|
+
If true returns transformed data and the function to perform the inverse transform.
|
1131
|
+
|
1132
|
+
full_output : bool, optional
|
1133
|
+
If True, returns transformed data, the function to perform the inverse transform,
|
1134
|
+
transformation parameters, and residuals.
|
1135
|
+
|
1136
|
+
Returns
|
1137
|
+
-------
|
1138
|
+
np.ndarray
|
1139
|
+
The transformed data.
|
1140
|
+
|
1141
|
+
tuple[np.ndarray, list, np.ndarray] (if `full_output=True`)
|
1142
|
+
Transformed data, transformation parameters, and residuals.
|
1143
|
+
|
1144
|
+
Notes
|
1145
|
+
-----
|
1146
|
+
- The function applies different transformations based on `transform_type`.
|
1147
|
+
- If `optm` is selected and `params` is not provided, an optimization routine
|
1148
|
+
is used to determine the best transformation parameters.
|
1149
|
+
|
1150
|
+
Example
|
1151
|
+
-------
|
1152
|
+
>>> data = np.array([1 + 1j, 2 + 2j, 3 + 3j])
|
1153
|
+
>>> transformed, params, residuals = transform_data(data, full_output=True)
|
1154
|
+
>>> print(transformed, params, residuals)
|
1155
|
+
"""
|
1156
|
+
|
1157
|
+
def transform(data, x0, y0, phi):
|
1158
|
+
return (data - x0 - 1.0j * y0) * np.exp(1.0j * phi)
|
1159
|
+
|
1160
|
+
def _inv_transform(data, x0, y0, phi):
|
1161
|
+
return data * np.exp(-1.0j * phi) + x0 + 1.0j * y0
|
1162
|
+
|
1163
|
+
def opt_transform(data):
|
1164
|
+
"""Finds optimal transformation parameters."""
|
1165
|
+
|
1166
|
+
def transform_err(x):
|
1167
|
+
return np.sum((transform(data, x[0], x[1], x[2]).imag) ** 2)
|
1168
|
+
|
1169
|
+
res = minimize(
|
1170
|
+
fun=transform_err,
|
1171
|
+
method="Nelder-Mead",
|
1172
|
+
x0=[
|
1173
|
+
np.mean(data.real),
|
1174
|
+
np.mean(data.imag),
|
1175
|
+
-np.arctan2(np.std(data.imag), np.std(data.real)),
|
1176
|
+
],
|
1177
|
+
options={"maxiter": 1000},
|
1178
|
+
)
|
1179
|
+
|
1180
|
+
params = res.x
|
1181
|
+
transformed_data = transform(data, *params)
|
1182
|
+
if transformed_data[0] < transformed_data[-1]:
|
1183
|
+
params[2] += np.pi
|
1184
|
+
return params
|
1185
|
+
|
1186
|
+
# Normalize transform_type
|
1187
|
+
transform_type = str(transform_type).lower()
|
1188
|
+
if transform_type.startswith(("op", "pr")):
|
1189
|
+
transform_type = "optm"
|
1190
|
+
elif transform_type.startswith("translation+rotation"):
|
1191
|
+
transform_type = "trrt"
|
1192
|
+
elif transform_type.startswith(("re", "qu")):
|
1193
|
+
transform_type = "real"
|
1194
|
+
elif transform_type.startswith(("im", "in")):
|
1195
|
+
transform_type = "imag"
|
1196
|
+
elif transform_type.startswith("am"):
|
1197
|
+
transform_type = "ampl"
|
1198
|
+
elif transform_type.startswith(("ph", "an")):
|
1199
|
+
transform_type = "angl"
|
1200
|
+
|
1201
|
+
# Compute parameters if needed
|
1202
|
+
if transform_type == "optm" and params is None:
|
1203
|
+
params = opt_transform(data)
|
1204
|
+
|
1205
|
+
# Apply transformation
|
1206
|
+
if transform_type in ["optm", "trrt"]:
|
1207
|
+
transformed_data = transform(data, *params).real
|
1208
|
+
residual = transform(data, *params).imag
|
1209
|
+
elif transform_type == "real":
|
1210
|
+
transformed_data = data.real
|
1211
|
+
residual = data.imag
|
1212
|
+
elif transform_type == "imag":
|
1213
|
+
transformed_data = data.imag
|
1214
|
+
residual = data.real
|
1215
|
+
elif transform_type == "ampl":
|
1216
|
+
transformed_data = np.abs(data)
|
1217
|
+
residual = np.unwrap(np.angle(data))
|
1218
|
+
if deg:
|
1219
|
+
residual = np.degrees(residual)
|
1220
|
+
elif transform_type == "angl":
|
1221
|
+
transformed_data = np.unwrap(np.angle(data))
|
1222
|
+
residual = np.abs(data)
|
1223
|
+
if deg:
|
1224
|
+
transformed_data = np.degrees(transformed_data)
|
1225
|
+
|
1226
|
+
inv_transform_fun = lambda data: _inv_transform(data, *params)
|
1227
|
+
|
1228
|
+
if full_output:
|
1229
|
+
return np.array(transformed_data), inv_transform_fun, params, residual
|
1230
|
+
if inv_transform:
|
1231
|
+
return np.array(transformed_data), inv_transform_fun
|
1232
|
+
return np.array(transformed_data)
|