sqil-core 1.0.0__tar.gz → 1.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {sqil_core-1.0.0 → sqil_core-1.1.0}/PKG-INFO +1 -1
  2. {sqil_core-1.0.0 → sqil_core-1.1.0}/pyproject.toml +1 -1
  3. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/_analysis.py +30 -0
  4. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/fit/__init__.py +1 -0
  5. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/fit/_fit.py +54 -13
  6. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/fit/_guess.py +72 -0
  7. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/fit/_models.py +18 -0
  8. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/utils/__init__.py +4 -0
  9. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/utils/_analysis.py +104 -1
  10. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/utils/_const.py +9 -0
  11. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/utils/_formatter.py +5 -4
  12. {sqil_core-1.0.0 → sqil_core-1.1.0}/README.md +0 -0
  13. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/__init__.py +0 -0
  14. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/config.py +0 -0
  15. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/config_log.py +0 -0
  16. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/__init__.py +0 -0
  17. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/_events.py +0 -0
  18. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/_experiment.py +0 -0
  19. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/data/plottr.py +0 -0
  20. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/helpers/_function_override_handler.py +0 -0
  21. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/helpers/_labone_wrappers.py +0 -0
  22. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/instruments/__init__.py +0 -0
  23. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/instruments/_instrument.py +0 -0
  24. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/instruments/drivers/SignalCore_SC5511A.py +0 -0
  25. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/instruments/local_oscillator.py +0 -0
  26. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/instruments/server.py +0 -0
  27. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/instruments/setup.yaml +0 -0
  28. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/experiment/instruments/zurich_instruments.py +0 -0
  29. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/fit/_core.py +0 -0
  30. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/fit/_quality.py +0 -0
  31. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/resonator/__init__.py +0 -0
  32. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/resonator/_resonator.py +0 -0
  33. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/utils/_plot.py +0 -0
  34. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/utils/_read.py +0 -0
  35. {sqil_core-1.0.0 → sqil_core-1.1.0}/sqil_core/utils/_utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: sqil-core
3
- Version: 1.0.0
3
+ Version: 1.1.0
4
4
  Summary: The codebase of the SQIL group in EPFL
5
5
  Author: Andrea Duina
6
6
  Requires-Python: >=3.10,<4.0
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "sqil-core"
3
- version = "v1.0.0"
3
+ version = "v1.1.0"
4
4
  description = "The codebase of the SQIL group in EPFL"
5
5
  authors = ["Andrea Duina"]
6
6
  readme = "README.md"
@@ -17,6 +17,36 @@ if TYPE_CHECKING:
17
17
 
18
18
 
19
19
  class AnalysisResult:
20
+ """
21
+ Container for storing and managing results from a quantum measurement analysis.
22
+
23
+ Attributes
24
+ ----------
25
+ updated_params : dict[str, dict]
26
+ Dictionary containing the updated parameters for each qubit.
27
+ figures : dict[str, matplotlib.figure.Figure]
28
+ Dictionary of matplotlib figures.
29
+ fits : dict[str, FitResult]
30
+ Dictionary of fit results.
31
+ extra_data : dict[str, np.ndarray]
32
+ Dictionary of auxiliary computed arrays (e.g., processed IQ data, FFT results).
33
+
34
+ Methods
35
+ -------
36
+ add_exp_info_to_figures(dir_path)
37
+ Annotates each figure with experiment ID and cooldown name from directory path.
38
+ save_figures(dir_path)
39
+ Saves all figures as PNG and interactive HTML using mpld3.
40
+ aggregate_fit_summaries()
41
+ Aggregates human-readable summaries from all fit results.
42
+ save_fits(dir_path)
43
+ Saves aggregated fit summaries to a markdown file.
44
+ save_extra_data(dir_path)
45
+ Saves extra data arrays into an HDF5 file.
46
+ save_all(dir_path)
47
+ Runs all save methods and annotates figures with experimental metadata.
48
+ """
49
+
20
50
  updated_params: dict[str, dict] = {}
21
51
  figures: dict[str, Figure] = {}
22
52
  fits: dict[str, FitResult] = {}
@@ -11,6 +11,7 @@ from ._fit import (
11
11
  fit_decaying_oscillations,
12
12
  fit_gaussian,
13
13
  fit_lorentzian,
14
+ fit_many_decaying_oscillations,
14
15
  fit_oscillations,
15
16
  fit_qubit_relaxation_qp,
16
17
  fit_skewed_lorentzian,
@@ -11,12 +11,14 @@ from sqil_core.utils._utils import fill_gaps, has_at_least_one, make_iterable
11
11
 
12
12
  from ._core import FitResult, fit_input, fit_output
13
13
  from ._guess import (
14
+ decaying_exp_guess,
14
15
  decaying_oscillations_bounds,
15
16
  decaying_oscillations_guess,
16
17
  gaussian_bounds,
17
18
  gaussian_guess,
18
19
  lorentzian_bounds,
19
20
  lorentzian_guess,
21
+ many_decaying_oscillations_guess,
20
22
  oscillations_bounds,
21
23
  oscillations_guess,
22
24
  )
@@ -347,19 +349,9 @@ def fit_decaying_exp(
347
349
  """
348
350
  x, y = x_data, y_data
349
351
 
350
- # Default initial guess if not provided
351
- if guess is None:
352
- max_y = np.max(y)
353
- min_y = np.min(y)
354
- half = 0.5 * (max_y + min_y)
355
-
356
- if y[0] > y[-1]:
357
- tau0_idx = np.argmax(y < half)
358
- else:
359
- tau0_idx = np.argmax(y > half)
360
-
361
- b0 = x[tau0_idx] if tau0_idx != 0 else 0.5 * (x[0] + x[-1])
362
- guess = [y[0] - y[-1], b0, y[-1]]
352
+ # Default intial guess if not provided
353
+ if has_at_least_one(guess, None):
354
+ guess = fill_gaps(guess, decaying_exp_guess(x_data, y_data))
363
355
 
364
356
  # Default bounds if not provided
365
357
  if bounds is None:
@@ -619,6 +611,55 @@ def fit_decaying_oscillations(
619
611
  return best_fit, metadata
620
612
 
621
613
 
614
+ @fit_output
615
+ def fit_many_decaying_oscillations(
616
+ x_data: np.ndarray, y_data: np.ndarray, n: int, guess=None
617
+ ):
618
+ """
619
+ Fits a sum of `n` exponentially decaying oscillations to the given data.
620
+
621
+ Each component of the model is of the form:
622
+ A_i * exp(-x / tau_i) * cos(2π * T_i * x + phi_i)
623
+
624
+ Parameters
625
+ ----------
626
+ x_data : np.ndarray
627
+ 1D array of x-values (e.g., time).
628
+ y_data : np.ndarray
629
+ 1D array of y-values (e.g., signal amplitude).
630
+ n : int
631
+ Number of decaying oscillation components to fit.
632
+ guess : list or None, optional
633
+ Optional initial parameter guess. If None, a guess is automatically generated
634
+ using `many_decaying_oscillations_guess`.
635
+
636
+ Returns
637
+ -------
638
+ FitResult
639
+ """
640
+
641
+ if has_at_least_one(guess, None):
642
+ guess = fill_gaps(guess, many_decaying_oscillations_guess(x_data, y_data, n))
643
+
644
+ res = curve_fit(
645
+ _models.many_decaying_oscillations,
646
+ x_data,
647
+ y_data,
648
+ p0=guess,
649
+ # maxfev=10000,
650
+ full_output=True,
651
+ )
652
+
653
+ metadata = {
654
+ "param_names": [f"{p}{i}" for i in range(n) for p in ("A", "tau", "phi", "T")]
655
+ + ["y0"],
656
+ "predict": lambda x: _models.many_decaying_oscillations(x, *res[0]),
657
+ "model_name": f"many_decaying_oscillations({n})",
658
+ }
659
+
660
+ return res, metadata
661
+
662
+
622
663
  @fit_input
623
664
  @fit_output
624
665
  def fit_oscillations(
@@ -2,6 +2,8 @@ import numpy as np
2
2
  from numpy.fft import rfft, rfftfreq
3
3
  from scipy.signal import hilbert
4
4
 
5
+ from sqil_core.utils import compute_fft, get_peaks
6
+
5
7
 
6
8
  def estimate_peak(
7
9
  x_data: np.ndarray, y_data: np.ndarray
@@ -213,6 +215,12 @@ def decaying_oscillations_guess(x_data, y_data, num_init=10):
213
215
  except Exception:
214
216
  tau = np.ptp(x_data)
215
217
 
218
+ # Rough estimate of y0 with a local min or mean of last N points
219
+ N_tail = max(3, int(0.1 * len(y_data)))
220
+ tail_mean = np.mean(y_data[-N_tail:])
221
+ y0_decay = min(np.min(y_data), tail_mean)
222
+ y0_candidates.append(y0_decay)
223
+
216
224
  return [A, tau, y0_candidates, phi_candidates, T]
217
225
 
218
226
 
@@ -230,3 +238,67 @@ def decaying_oscillations_bounds(x_data, y_data, guess):
230
238
  lower.insert(1, tau_min)
231
239
  upper.insert(1, tau_max)
232
240
  return (lower, upper)
241
+
242
+
243
+ def many_decaying_oscillations_guess(x_data, y_data, n):
244
+ offset = np.mean(y_data)
245
+ y_centered = y_data - offset
246
+
247
+ freqs, fft_mag = compute_fft(x_data, y_centered)
248
+ peak_freqs, peak_mags = get_peaks(freqs, fft_mag)
249
+
250
+ if len(peak_freqs) < n:
251
+ raise ValueError(
252
+ f"Not enough frequency peaks found to initialize {n} oscillations."
253
+ )
254
+
255
+ guess = []
256
+ signal_duration = x_data[-1] - x_data[0]
257
+
258
+ for i in range(n):
259
+ A = peak_mags[i]
260
+ tau = signal_duration / (2 + i) # Increasing τ for later oscillations
261
+ phi = 0.0 # Can be refined
262
+ T = peak_freqs[i]
263
+ guess.extend([A, tau, phi, T])
264
+
265
+ guess.append(offset)
266
+ return guess
267
+
268
+
269
+ def decaying_exp_guess(x_data: np.ndarray, y_data: np.ndarray) -> list[float]:
270
+ """
271
+ Robust initial guess for decaying exponential even if the full decay isn't captured.
272
+ """
273
+ x = np.asarray(x_data)
274
+ y = np.asarray(y_data)
275
+
276
+ # Rough estimate of y0 with a local min or mean of last N points
277
+ N_tail = max(3, int(0.1 * len(y)))
278
+ tail_mean = np.mean(y[-N_tail:])
279
+ y0 = min(np.min(y), tail_mean)
280
+
281
+ # Amplitude
282
+ A = y[0] - y0
283
+ A = np.clip(A, 1e-12, None)
284
+
285
+ # Ensure sign consistency
286
+ if np.abs(np.max(y) - y0) > np.abs(A):
287
+ A = np.max(y) - y0
288
+
289
+ # Estimate tau using log-linear fit of the first ~30% of data
290
+ N_fit = max(5, int(0.3 * len(x)))
291
+ y_fit = y[:N_fit] - y0
292
+ mask = y_fit > 0 # log() only valid on positive values
293
+
294
+ if np.count_nonzero(mask) > 1:
295
+ x_fit = x[:N_fit][mask]
296
+ log_y = np.log(y_fit[mask])
297
+ slope, intercept = np.polyfit(x_fit, log_y, 1)
298
+ tau = -1 / slope if slope < 0 else (x[-1] - x[0]) / 3
299
+ else:
300
+ tau = (x[-1] - x[0]) / 3
301
+
302
+ tau = max(tau, np.finfo(float).eps)
303
+
304
+ return [A, tau, y0]
@@ -74,6 +74,24 @@ def decaying_oscillations(x, A, tau, y0, phi, T):
74
74
  return A * np.exp(-x / tau) * np.cos(2.0 * np.pi * (x - phi) / T) + y0
75
75
 
76
76
 
77
+ def many_decaying_oscillations(t, *params):
78
+ r"""
79
+ f(x) = SUM_i A_i * exp(-x / τ_i) * cos(2π * (x - φ_i) / T_i) + y0
80
+
81
+ $$f(x) = \sum_i A_i \cdot e^{-x/\tau_i} \cdot \cos\left(\frac{2\pi (x - \phi_i)}{T_i}\right) + y_0$$
82
+ """
83
+ n = (len(params) - 1) // 4 # Each oscillation has 4 params: A, tau, phi, T
84
+ offset = params[-1]
85
+ result = np.zeros_like(t)
86
+ for i in range(n):
87
+ A = params[4 * i]
88
+ tau = params[4 * i + 1]
89
+ phi = params[4 * i + 2]
90
+ T = params[4 * i + 3]
91
+ result += A * np.exp(-t / tau) * np.cos(2 * np.pi * T * t + phi)
92
+ return result + offset
93
+
94
+
77
95
  def oscillations(x, A, y0, phi, T):
78
96
  r"""
79
97
  f(x) = A * cos(2π * (x - φ) / T) + y0
@@ -1,8 +1,10 @@
1
1
  from ._analysis import (
2
+ compute_fft,
2
3
  compute_snr_peaked,
3
4
  estimate_linear_background,
4
5
  find_closest_index,
5
6
  find_first_minima_idx,
7
+ get_peaks,
6
8
  line_between_2_points,
7
9
  linear_interpolation,
8
10
  remove_linear_background,
@@ -52,6 +54,8 @@ __all__ = [
52
54
  "find_closest_index",
53
55
  "compute_snr_peaked",
54
56
  "find_first_minima_idx",
57
+ "compute_fft",
58
+ "get_peaks",
55
59
  # Const
56
60
  "PARAM_METADATA",
57
61
  "ONE_TONE_PARAMS",
@@ -1,5 +1,5 @@
1
1
  import numpy as np
2
- from scipy.signal import argrelextrema
2
+ from scipy.signal import argrelextrema, find_peaks
3
3
 
4
4
 
5
5
  def remove_offset(data: np.ndarray, avg: int = 3) -> np.ndarray:
@@ -413,3 +413,106 @@ def find_first_minima_idx(data):
413
413
  return minima_indices[0]
414
414
 
415
415
  return None
416
+
417
+
418
+ def compute_fft(x_data, y_data):
419
+ """
420
+ Computes the Fast Fourier Transform (FFT) of a signal and returns the positive frequency spectrum.
421
+
422
+ Parameters
423
+ ----------
424
+ x_data : np.ndarray
425
+ Time or independent variable array, assumed to be uniformly spaced.
426
+ y_data : np.ndarray
427
+ Signal data corresponding to `x_data`. Can be real or complex.
428
+
429
+ Returns
430
+ -------
431
+ positive_freqs : np.ndarray
432
+ Array of positive frequency components corresponding to the FFT.
433
+ fft_magnitude : np.ndarray
434
+ Magnitude of the FFT at the positive frequencies.
435
+
436
+ Notes
437
+ -----
438
+ - The signal is centered by subtracting its mean before computing the FFT, which removes the DC component.
439
+ - Only the positive frequency half of the FFT spectrum is returned, assuming a real-valued input signal.
440
+ - If `y_data` is complex, returned FFT values still reflect magnitude only.
441
+ - The input `x_data` must be uniformly spaced for the frequency axis to be accurate.
442
+
443
+ Examples
444
+ --------
445
+ >>> t = np.linspace(0, 1, 1000)
446
+ >>> y = np.sin(2 * np.pi * 50 * t)
447
+ >>> freqs, spectrum = compute_fft(t, y)
448
+ """
449
+
450
+ # Subtract DC offset to focus on oscillations
451
+ y_data_centered = y_data - np.mean(y_data)
452
+
453
+ # Calculate time step (assumes uniform spacing)
454
+ dt = x_data[1] - x_data[0]
455
+ N = len(x_data)
456
+
457
+ # Compute FFT and frequency axis
458
+ fft_vals = np.fft.fft(y_data_centered)
459
+ freqs = np.fft.fftfreq(N, dt)
460
+
461
+ # Take only positive frequencies
462
+ positive_freqs = freqs[: N // 2]
463
+ fft_magnitude = np.abs(fft_vals[: N // 2])
464
+
465
+ return positive_freqs, fft_magnitude
466
+
467
+
468
+ def get_peaks(x_data, y_data, prominence: float | None = None, sort=True):
469
+ """
470
+ Detects and returns peaks in a 1D signal based on prominence.
471
+
472
+ Parameters
473
+ ----------
474
+ x_data : np.ndarray
475
+ 1D array of x-values corresponding to `y_data` (e.g., frequency or time axis).
476
+ y_data : np.ndarray
477
+ 1D array of y-values representing the signal in which to find peaks.
478
+ prominence : float or None, optional
479
+ Minimum prominence of peaks to detect. If None, defaults to 5% of the maximum
480
+ value in `y_data`.
481
+ sort : bool, optional
482
+ If True, peaks are sorted in descending order of magnitude. Default is True.
483
+
484
+ Returns
485
+ -------
486
+ peak_freqs : np.ndarray
487
+ x-values at which peaks occur.
488
+ peak_magnitudes : np.ndarray
489
+ y-values (magnitudes) at the detected peaks.
490
+
491
+ Notes
492
+ -----
493
+ - Uses `scipy.signal.find_peaks` for detection.
494
+
495
+ Examples
496
+ --------
497
+ >>> x = np.linspace(0, 10, 1000)
498
+ >>> y = np.sin(2 * np.pi * x) + 0.1 * np.random.randn(1000)
499
+ >>> freqs, mags = get_peaks(x, np.abs(y))
500
+ >>> print(freqs[:3], mags[:3]) # Show top 3 peak locations and magnitudes
501
+ """
502
+
503
+ if prominence is None:
504
+ prominence = 0.05 * np.max(y_data)
505
+
506
+ # Find peaks
507
+ peaks, properties = find_peaks(y_data, prominence=prominence)
508
+
509
+ # Get the corresponding frequencies and magnitudes
510
+ peak_freqs = x_data[peaks]
511
+ peak_magnitudes = y_data[peaks]
512
+
513
+ if sort:
514
+ sorted_indices = np.argsort(peak_magnitudes)[::-1]
515
+ peak_freqs = peak_freqs[sorted_indices]
516
+ peak_magnitudes = peak_magnitudes[sorted_indices]
517
+
518
+ return peak_freqs, peak_magnitudes
@@ -91,6 +91,15 @@ PARAM_METADATA = {
91
91
  "unit": "s",
92
92
  "scale": 1e9,
93
93
  },
94
+ "ge_T1": {"name": "T1", "symbol": "T_1", "unit": "s", "scale": 1e6},
95
+ "ge_T2": {"name": "T2", "symbol": "T_2", "unit": "s", "scale": 1e6},
96
+ "ge_T2_star": {"name": "T2*", "symbol": "T_2^*", "unit": "s", "scale": 1e6},
97
+ "reset_delay_length": {
98
+ "name": "Reset delay",
99
+ "symbol": "T_{reset}",
100
+ "unit": "s",
101
+ "scale": 1e6,
102
+ },
94
103
  }
95
104
 
96
105
  ONE_TONE_PARAMS = np.array(
@@ -184,10 +184,11 @@ class ParamInfo:
184
184
  }
185
185
 
186
186
  @property
187
- def name_and_unit(self):
188
- return self.name + (
189
- f" [{self.rescaled_unit}]" if self.unit or self.scale != 1 else ""
190
- )
187
+ def name_and_unit(self, latex=True):
188
+ unit = f"[{self.rescaled_unit}]" if self.unit or self.scale != 1 else ""
189
+ if unit == "":
190
+ return unit
191
+ return self.name + rf" ${unit}$" if latex else rf" {unit}"
191
192
 
192
193
  @property
193
194
  def rescaled_unit(self):
File without changes
File without changes