sqil-core 0.1.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sqil_core/__init__.py +1 -0
- sqil_core/config_log.py +42 -0
- sqil_core/experiment/__init__.py +11 -0
- sqil_core/experiment/_analysis.py +125 -0
- sqil_core/experiment/_events.py +25 -0
- sqil_core/experiment/_experiment.py +553 -0
- sqil_core/experiment/data/plottr.py +778 -0
- sqil_core/experiment/helpers/_function_override_handler.py +111 -0
- sqil_core/experiment/helpers/_labone_wrappers.py +12 -0
- sqil_core/experiment/instruments/__init__.py +2 -0
- sqil_core/experiment/instruments/_instrument.py +190 -0
- sqil_core/experiment/instruments/drivers/SignalCore_SC5511A.py +515 -0
- sqil_core/experiment/instruments/local_oscillator.py +205 -0
- sqil_core/experiment/instruments/server.py +175 -0
- sqil_core/experiment/instruments/setup.yaml +21 -0
- sqil_core/experiment/instruments/zurich_instruments.py +55 -0
- sqil_core/fit/__init__.py +23 -0
- sqil_core/fit/_core.py +179 -31
- sqil_core/fit/_fit.py +544 -94
- sqil_core/fit/_guess.py +304 -0
- sqil_core/fit/_models.py +50 -1
- sqil_core/fit/_quality.py +266 -0
- sqil_core/resonator/__init__.py +2 -0
- sqil_core/resonator/_resonator.py +256 -74
- sqil_core/utils/__init__.py +40 -13
- sqil_core/utils/_analysis.py +226 -0
- sqil_core/utils/_const.py +83 -18
- sqil_core/utils/_formatter.py +127 -55
- sqil_core/utils/_plot.py +272 -6
- sqil_core/utils/_read.py +178 -95
- sqil_core/utils/_utils.py +147 -0
- {sqil_core-0.1.0.dist-info → sqil_core-1.1.0.dist-info}/METADATA +9 -1
- sqil_core-1.1.0.dist-info/RECORD +36 -0
- {sqil_core-0.1.0.dist-info → sqil_core-1.1.0.dist-info}/WHEEL +1 -1
- sqil_core-0.1.0.dist-info/RECORD +0 -19
- {sqil_core-0.1.0.dist-info → sqil_core-1.1.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,553 @@
|
|
1
|
+
from __future__ import annotations
|
2
|
+
|
3
|
+
import copy
|
4
|
+
import itertools
|
5
|
+
import json
|
6
|
+
import os
|
7
|
+
from abc import ABC, abstractmethod
|
8
|
+
from typing import TYPE_CHECKING, Callable, cast
|
9
|
+
|
10
|
+
import attrs
|
11
|
+
import matplotlib.pyplot as plt
|
12
|
+
import numpy as np
|
13
|
+
from laboneq import serializers, workflow
|
14
|
+
from laboneq.dsl.quantum import TransmonParameters
|
15
|
+
from laboneq.dsl.quantum.qpu import QPU
|
16
|
+
from laboneq.dsl.quantum.quantum_element import QuantumElement
|
17
|
+
from laboneq.dsl.session import Session
|
18
|
+
from laboneq.simple import DeviceSetup
|
19
|
+
from laboneq.simple import Experiment as LaboneQExperiment
|
20
|
+
from laboneq.workflow.tasks import compile_experiment, run_experiment
|
21
|
+
from laboneq_applications.analysis.resonator_spectroscopy import analysis_workflow
|
22
|
+
from laboneq_applications.experiments.options import TuneUpWorkflowOptions
|
23
|
+
from laboneq_applications.tasks.parameter_updating import (
|
24
|
+
temporary_modify,
|
25
|
+
update_qubits,
|
26
|
+
)
|
27
|
+
from numpy.typing import ArrayLike
|
28
|
+
from qcodes import Instrument as QCodesInstrument
|
29
|
+
|
30
|
+
from sqil_core.config_log import logger
|
31
|
+
from sqil_core.experiment._analysis import AnalysisResult
|
32
|
+
from sqil_core.experiment._events import (
|
33
|
+
after_experiment,
|
34
|
+
after_sequence,
|
35
|
+
before_experiment,
|
36
|
+
before_sequence,
|
37
|
+
clear_signal,
|
38
|
+
)
|
39
|
+
from sqil_core.experiment.data.plottr import DataDict, DDH5Writer
|
40
|
+
from sqil_core.experiment.helpers._labone_wrappers import w_save
|
41
|
+
from sqil_core.experiment.instruments.local_oscillator import LocalOscillator
|
42
|
+
from sqil_core.experiment.instruments.server import (
|
43
|
+
connect_instruments,
|
44
|
+
link_instrument_server,
|
45
|
+
)
|
46
|
+
from sqil_core.experiment.instruments.zurich_instruments import ZI_Instrument
|
47
|
+
|
48
|
+
# from sqil_core.experiment.setup_registry import setup_registry
|
49
|
+
from sqil_core.utils._read import copy_folder, read_yaml
|
50
|
+
from sqil_core.utils._utils import _extract_variables_from_module
|
51
|
+
|
52
|
+
|
53
|
+
class Instruments:
|
54
|
+
def __init__(self, data):
|
55
|
+
self._instruments = data
|
56
|
+
for key, value in data.items():
|
57
|
+
setattr(self, key, value)
|
58
|
+
|
59
|
+
def __iter__(self):
|
60
|
+
"""Allow iteration directly over instrument instances."""
|
61
|
+
return iter(self._instruments.values())
|
62
|
+
|
63
|
+
|
64
|
+
class ExperimentHandler(ABC):
|
65
|
+
setup: dict
|
66
|
+
instruments: Instruments | None = None
|
67
|
+
|
68
|
+
zi_setup: DeviceSetup
|
69
|
+
zi_session: Session
|
70
|
+
qpu: QPU
|
71
|
+
|
72
|
+
db_schema: dict = None
|
73
|
+
|
74
|
+
def __init__(
|
75
|
+
self,
|
76
|
+
params: dict = {},
|
77
|
+
param_dict_path: str = "",
|
78
|
+
setup_path: str = "",
|
79
|
+
server=False,
|
80
|
+
):
|
81
|
+
# Read setup file
|
82
|
+
if not setup_path:
|
83
|
+
config = read_yaml("config.yaml")
|
84
|
+
setup_path = config.get("setup_path", "setup.py")
|
85
|
+
self.setup = _extract_variables_from_module("setup", setup_path)
|
86
|
+
|
87
|
+
# Get instruments through the server or connect locally
|
88
|
+
if server:
|
89
|
+
server, instrument_instances = link_instrument_server()
|
90
|
+
else:
|
91
|
+
instrument_dict = self.setup.get("instruments", None)
|
92
|
+
if not instrument_dict:
|
93
|
+
logger.warning(
|
94
|
+
f"Unable to find any instruments in {setup_path}"
|
95
|
+
+ "Do you have an `instruments` entry in your setup file?"
|
96
|
+
)
|
97
|
+
# Reset event listeners
|
98
|
+
clear_signal(before_experiment)
|
99
|
+
clear_signal(before_sequence)
|
100
|
+
clear_signal(after_sequence)
|
101
|
+
clear_signal(after_experiment)
|
102
|
+
instrument_instances = connect_instruments(instrument_dict)
|
103
|
+
|
104
|
+
# Create Zurich Instruments session
|
105
|
+
zi = cast(ZI_Instrument, instrument_instances.get("zi", None))
|
106
|
+
if zi is not None:
|
107
|
+
self.zi_setup = zi.generate_setup()
|
108
|
+
# self.zi_setup = DeviceSetup.from_descriptor(zi.descriptor, zi.address)
|
109
|
+
self.zi_session = Session(self.zi_setup)
|
110
|
+
self.zi_session.connect()
|
111
|
+
self._load_qpu(zi.generate_qpu)
|
112
|
+
|
113
|
+
self.instruments = Instruments(instrument_instances)
|
114
|
+
self._setup_instruments()
|
115
|
+
|
116
|
+
def _load_qpu(self, generate_qpu: Callable):
|
117
|
+
qpu_filename = self.setup["storage"].get("qpu_filename", "qpu.json")
|
118
|
+
db_path_local = self.setup["storage"]["db_path_local"]
|
119
|
+
try:
|
120
|
+
self.qpu = serializers.load(os.path.join(db_path_local, qpu_filename))
|
121
|
+
except FileNotFoundError:
|
122
|
+
logger.warning(
|
123
|
+
f"Cannot find QPU file name {qpu_filename} in {db_path_local}"
|
124
|
+
)
|
125
|
+
logger.warning(f" -> Creating a new QPU file")
|
126
|
+
self.qpu = generate_qpu(self.zi_setup)
|
127
|
+
os.makedirs(db_path_local, exist_ok=True)
|
128
|
+
w_save(
|
129
|
+
self.qpu,
|
130
|
+
os.path.join(db_path_local, qpu_filename),
|
131
|
+
)
|
132
|
+
|
133
|
+
# Move to server
|
134
|
+
def _setup_instruments(self):
|
135
|
+
"""Default setup for all instruments with support for custom setups"""
|
136
|
+
logger.info("Setting up instruments")
|
137
|
+
if not hasattr(self, "instruments"):
|
138
|
+
logger.warning("No instruments to set up")
|
139
|
+
return
|
140
|
+
|
141
|
+
for instrument in self.instruments:
|
142
|
+
if not hasattr(instrument, "setup"):
|
143
|
+
continue
|
144
|
+
instrument.setup()
|
145
|
+
|
146
|
+
@abstractmethod
|
147
|
+
def sequence(self, *args, **kwargs):
|
148
|
+
"""Experimental sequence defined by the user"""
|
149
|
+
pass
|
150
|
+
|
151
|
+
@abstractmethod
|
152
|
+
def analyze(self, path, *args, **kwargs):
|
153
|
+
pass
|
154
|
+
|
155
|
+
def run(self, *args, **kwargs):
|
156
|
+
try:
|
157
|
+
db_type = self.setup.get("storage", {}).get("db_type", "")
|
158
|
+
|
159
|
+
if db_type == "plottr":
|
160
|
+
return self.run_with_plottr(*args, **kwargs)
|
161
|
+
else:
|
162
|
+
return self.run_raw(*args, **kwargs)
|
163
|
+
|
164
|
+
finally:
|
165
|
+
# Close and delete QCodes instances to avoid connection issues in following experiments
|
166
|
+
QCodesInstrument.close_all()
|
167
|
+
for instrument in self.instruments:
|
168
|
+
del instrument
|
169
|
+
|
170
|
+
def run_with_plottr(self, *args, **kwargs):
|
171
|
+
logger.info("Before exp")
|
172
|
+
before_experiment.send(sender=self)
|
173
|
+
|
174
|
+
# Map input parameters index to their name
|
175
|
+
params_map, _ = map_inputs(self.sequence)
|
176
|
+
|
177
|
+
# Get information on sweeps
|
178
|
+
sweeps: dict = kwargs.get("sweeps", None)
|
179
|
+
sweep_keys = []
|
180
|
+
sweep_grid = []
|
181
|
+
sweep_schema = {}
|
182
|
+
if sweeps is not None:
|
183
|
+
# Name of the parameters to sweep
|
184
|
+
sweep_keys = list(sweeps.keys())
|
185
|
+
# Create a mesh grid of all the sweep parameters
|
186
|
+
sweep_grid = list(itertools.product(*sweeps.values()))
|
187
|
+
# Add sweeps to the database schema
|
188
|
+
for i, key in enumerate(sweep_keys):
|
189
|
+
# TODO: dynamically add unit
|
190
|
+
sweep_schema[f"sweep{i}"] = {"role": "axis", "param_id": key}
|
191
|
+
|
192
|
+
# Create the plotter datadict (database) using the inferred schema
|
193
|
+
db_schema = {**self.db_schema, **sweep_schema}
|
194
|
+
datadict = build_plottr_dict(db_schema)
|
195
|
+
# Get local and server storage folders
|
196
|
+
db_path = self.setup["storage"]["db_path"]
|
197
|
+
db_path_local = self.setup["storage"]["db_path_local"]
|
198
|
+
|
199
|
+
# TODO: dynamically assign self.exp_name to class name if not provided
|
200
|
+
with DDH5Writer(datadict, db_path_local, name=self.exp_name) as writer:
|
201
|
+
# Get the path to the folder where the data will be stored
|
202
|
+
storage_path = get_plottr_path(writer, db_path)
|
203
|
+
storage_path_local = get_plottr_path(writer, db_path_local)
|
204
|
+
# Save helper files
|
205
|
+
writer.save_text("paths.md", f"{storage_path_local}\n{storage_path}")
|
206
|
+
# Save backup qpu
|
207
|
+
old_qubits = self.qpu.copy_quantum_elements()
|
208
|
+
serializers.save(self.qpu, os.path.join(storage_path_local, "qpu_old.json"))
|
209
|
+
|
210
|
+
# TODO: for index sweep don't recompile laboneq
|
211
|
+
for sweep_values in sweep_grid or [None]:
|
212
|
+
data_to_save = {}
|
213
|
+
|
214
|
+
# Run/create the experiment. Creates it for laboneq, runs it otherwise
|
215
|
+
seq = self.sequence(*args, **kwargs)
|
216
|
+
# Detect if the sequence created a laboneq experiment
|
217
|
+
is_laboneq_exp = type(seq) == LaboneQExperiment
|
218
|
+
|
219
|
+
if is_laboneq_exp:
|
220
|
+
qu_indices = kwargs.get("qu_idx", [0])
|
221
|
+
if type(qu_indices) == int:
|
222
|
+
qu_indices = [qu_indices]
|
223
|
+
used_qubits = [self.qpu.quantum_elements[i] for i in qu_indices]
|
224
|
+
qu_idx_by_uid = [qubit.uid for qubit in self.qpu.quantum_elements]
|
225
|
+
# TODO: save and re-apply old qubit params
|
226
|
+
# Reset to the first value of every sweep,
|
227
|
+
# then override current sweep value for all qubits
|
228
|
+
for qubit in used_qubits:
|
229
|
+
tmp = dict(zip(sweep_keys, sweep_values or []))
|
230
|
+
qubit.update(**tmp)
|
231
|
+
# Create the experiment (required to update params)
|
232
|
+
seq = self.sequence(*args, **kwargs)
|
233
|
+
compiled_exp = compile_experiment(self.zi_session, seq)
|
234
|
+
# pulse_sheet(self.zi_setup, compiled_exp, self.exp_name)
|
235
|
+
before_sequence.send(sender=self)
|
236
|
+
result = run_experiment(self.zi_session, compiled_exp)
|
237
|
+
after_sequence.send(sender=self)
|
238
|
+
# TODO: handle multiple qubits. Maybe different datadicts?
|
239
|
+
raw_data = result[qu_idx_by_uid[qu_indices[0]]].result.data
|
240
|
+
data_to_save["data"] = raw_data
|
241
|
+
result = raw_data
|
242
|
+
else:
|
243
|
+
# TODO: handle results for different instrumets
|
244
|
+
data_to_save["data"] = seq
|
245
|
+
|
246
|
+
# Add parameters to the data to save
|
247
|
+
datadict_keys = datadict.keys()
|
248
|
+
for key, value in params_map.items():
|
249
|
+
if key in datadict_keys:
|
250
|
+
data_to_save[key] = args[value]
|
251
|
+
# Add sweeps to the data to save
|
252
|
+
if sweeps is not None:
|
253
|
+
for i, key in enumerate(sweep_keys):
|
254
|
+
data_to_save[f"sweep{i}"] = sweep_values[i]
|
255
|
+
|
256
|
+
# Save data using plottr
|
257
|
+
writer.add_data(**data_to_save)
|
258
|
+
|
259
|
+
after_experiment.send()
|
260
|
+
|
261
|
+
# Reset the qpu to its previous state
|
262
|
+
self.qpu.quantum_operations.detach_qpu()
|
263
|
+
self.qpu = QPU(old_qubits, self.qpu.quantum_operations)
|
264
|
+
|
265
|
+
# Run analysis script
|
266
|
+
try:
|
267
|
+
anal_res = self.analyze(storage_path_local, *args, **kwargs)
|
268
|
+
if type(anal_res) == AnalysisResult:
|
269
|
+
anal_res = cast(AnalysisResult, anal_res)
|
270
|
+
anal_res.save_all(storage_path_local)
|
271
|
+
# Update QPU
|
272
|
+
if is_laboneq_exp and not kwargs.get("no_update", False):
|
273
|
+
for qu_id in anal_res.updated_params.keys():
|
274
|
+
qubit = self.qpu.quantum_element_by_uid(qu_id)
|
275
|
+
qubit.update(**anal_res.updated_params[qu_id])
|
276
|
+
# writer.save_text("analysis.md", anal_res)
|
277
|
+
plt.show()
|
278
|
+
except Exception as e:
|
279
|
+
logger.error(f"Error while analyzing the data {e}")
|
280
|
+
|
281
|
+
w_save(self.qpu, os.path.join(storage_path_local, "qpu_new.json"))
|
282
|
+
qpu_filename = self.setup["storage"].get("qpu_filename", "qpu.json")
|
283
|
+
w_save(
|
284
|
+
self.qpu,
|
285
|
+
os.path.join(db_path_local, qpu_filename),
|
286
|
+
)
|
287
|
+
|
288
|
+
# Copy the local folder to the server
|
289
|
+
copy_folder(storage_path_local, storage_path)
|
290
|
+
|
291
|
+
def run_raw(self, *args, **kwargs):
|
292
|
+
before_experiment.send(sender=self)
|
293
|
+
|
294
|
+
seq = self.sequence(*args, **kwargs)
|
295
|
+
is_laboneq_exp = type(seq) == LaboneQExperiment
|
296
|
+
result = None
|
297
|
+
|
298
|
+
if is_laboneq_exp:
|
299
|
+
compiled_exp = compile_experiment(self.zi_session, seq)
|
300
|
+
result = run_experiment(self.zi_session, compiled_exp)
|
301
|
+
else:
|
302
|
+
result = seq
|
303
|
+
|
304
|
+
after_experiment.send(sender=self)
|
305
|
+
|
306
|
+
return result
|
307
|
+
|
308
|
+
def sweep_around(
|
309
|
+
self,
|
310
|
+
center: str | float,
|
311
|
+
span: float | tuple[float, float],
|
312
|
+
n_points: int = None,
|
313
|
+
step: float = None,
|
314
|
+
scale: str = "linear",
|
315
|
+
qu_uid="q0",
|
316
|
+
):
|
317
|
+
"""
|
318
|
+
Generates a sweep of values around a specified center, either numerically or by referencing
|
319
|
+
a qubit parameter.
|
320
|
+
|
321
|
+
Parameters
|
322
|
+
----------
|
323
|
+
center : str or float
|
324
|
+
Center of the sweep. If a string, it's interpreted as the name of a qubit parameter
|
325
|
+
and resolved via `qubit_value`. If a float, used directly.
|
326
|
+
span : float or tuple of float
|
327
|
+
If a float, sweep will extend symmetrically by `span` on both sides of `center`.
|
328
|
+
If a tuple `(left, right)`, creates an asymmetric sweep: `center - left` to `center + right`.
|
329
|
+
n_points : int, optional
|
330
|
+
Number of points in the sweep. Specify exactly one of `n_points` or `step`.
|
331
|
+
step : float, optional
|
332
|
+
Step size in the sweep. Specify exactly one of `n_points` or `step`.
|
333
|
+
scale : {'linear', 'log'}, default 'linear'
|
334
|
+
Whether to generate the sweep on a linear or logarithmic scale.
|
335
|
+
For logarithmic sweeps, all generated values must be > 0.
|
336
|
+
qu_uid : str, default "q0"
|
337
|
+
Qubit identifier used to resolve `center` if it is a parameter name.
|
338
|
+
|
339
|
+
Returns
|
340
|
+
-------
|
341
|
+
np.ndarray
|
342
|
+
Array of sweep values.
|
343
|
+
|
344
|
+
Raises
|
345
|
+
------
|
346
|
+
AttributeError
|
347
|
+
If `center` is a string and not found in the qubit's parameter set.
|
348
|
+
ValueError
|
349
|
+
If scale is not one of 'linear' or 'log'.
|
350
|
+
If a log-scale sweep is requested with non-positive start/stop values.
|
351
|
+
If both or neither of `n_points` and `step` are provided.
|
352
|
+
|
353
|
+
Notes
|
354
|
+
-----
|
355
|
+
- For log scale and `step`-based sweeps, the step is interpreted in multiplicative terms,
|
356
|
+
and an approximate number of points is derived.
|
357
|
+
- Sweep boundaries are inclusive when using `step`, thanks to the `+ step / 2` adjustment.
|
358
|
+
"""
|
359
|
+
|
360
|
+
if isinstance(center, str):
|
361
|
+
value = self.qubit_value(param_id=center, qu_uid=qu_uid)
|
362
|
+
if value is None:
|
363
|
+
raise AttributeError(
|
364
|
+
f"No attribute {center} in qubit {qu_uid} parameters."
|
365
|
+
)
|
366
|
+
center = value
|
367
|
+
|
368
|
+
# Handle symmetric or asymmetric span
|
369
|
+
if isinstance(span, tuple):
|
370
|
+
left, right = span
|
371
|
+
else:
|
372
|
+
left = right = span
|
373
|
+
|
374
|
+
start = center - left
|
375
|
+
stop = center + right
|
376
|
+
|
377
|
+
if scale not in ("linear", "log"):
|
378
|
+
raise ValueError("scale must be 'linear' or 'log'")
|
379
|
+
|
380
|
+
if start <= 0 or stop <= 0:
|
381
|
+
if scale == "log":
|
382
|
+
raise ValueError("Logarithmic sweep requires all values > 0")
|
383
|
+
|
384
|
+
if (n_points is None) == (step is None):
|
385
|
+
raise ValueError("Specify exactly one of 'n_points' or 'step'")
|
386
|
+
|
387
|
+
if scale == "linear":
|
388
|
+
if step is not None:
|
389
|
+
return np.arange(start, stop + step / 2, step)
|
390
|
+
else:
|
391
|
+
return np.linspace(start, stop, n_points)
|
392
|
+
|
393
|
+
else: # scale == "log"
|
394
|
+
if step is not None:
|
395
|
+
# Compute approximate number of points from step in log space
|
396
|
+
log_start = np.log10(start)
|
397
|
+
log_stop = np.log10(stop)
|
398
|
+
num_steps = (
|
399
|
+
int(np.floor((log_stop - log_start) / np.log10(1 + step / start)))
|
400
|
+
+ 1
|
401
|
+
)
|
402
|
+
return np.logspace(log_start, log_stop, num=num_steps)
|
403
|
+
else:
|
404
|
+
return np.logspace(np.log10(start), np.log10(stop), n_points)
|
405
|
+
|
406
|
+
def qubit_value(self, param_id, qu_uid="q0"):
|
407
|
+
"""Get a qubit parameter value from the QPU."""
|
408
|
+
params = self.qpu.quantum_element_by_uid(qu_uid).parameters
|
409
|
+
return attrs.asdict(params).get(param_id)
|
410
|
+
|
411
|
+
|
412
|
+
def build_plottr_dict(db_schema):
|
413
|
+
"""Create a DataDict object from the given schema."""
|
414
|
+
axes = []
|
415
|
+
db = {}
|
416
|
+
|
417
|
+
data_key = "data"
|
418
|
+
data_unit = ""
|
419
|
+
|
420
|
+
for key, value in db_schema.items():
|
421
|
+
if value.get("role") in ("axis", "x-axis"):
|
422
|
+
unit = value.get("unit", "")
|
423
|
+
db[key] = dict(unit=unit)
|
424
|
+
axes.append(key)
|
425
|
+
elif value.get("role") == "data":
|
426
|
+
data_key = key
|
427
|
+
data_unit = value.get("unit", "")
|
428
|
+
db[data_key] = dict(axes=axes, unit=data_unit)
|
429
|
+
datadict = DataDict(**db)
|
430
|
+
|
431
|
+
datadict.add_meta("schema", json.dumps(db_schema))
|
432
|
+
|
433
|
+
return datadict
|
434
|
+
|
435
|
+
|
436
|
+
import inspect
|
437
|
+
|
438
|
+
|
439
|
+
def map_inputs(func):
|
440
|
+
"""Extracts parameter names and keyword arguments from a function signature."""
|
441
|
+
sig = inspect.signature(func)
|
442
|
+
params = {}
|
443
|
+
kwargs = []
|
444
|
+
|
445
|
+
for index, (name, param) in enumerate(sig.parameters.items()):
|
446
|
+
if param.default == inspect.Parameter.empty:
|
447
|
+
# Positional or required argument
|
448
|
+
params[name] = index
|
449
|
+
else:
|
450
|
+
# Keyword argument
|
451
|
+
kwargs.append(name)
|
452
|
+
|
453
|
+
return params, kwargs
|
454
|
+
|
455
|
+
|
456
|
+
def get_plottr_path(writer: DDH5Writer, root_path):
|
457
|
+
filepath_parent = writer.filepath.parent
|
458
|
+
path = str(filepath_parent)
|
459
|
+
last_two_parts = path.split(os.sep)[-2:]
|
460
|
+
return os.path.join(root_path, *last_two_parts)
|
461
|
+
|
462
|
+
|
463
|
+
from laboneq.simple import OutputSimulator
|
464
|
+
|
465
|
+
|
466
|
+
def pulse_sheet(device_setup, compiled_exp, name):
|
467
|
+
start = 0
|
468
|
+
end = 0.15e-6
|
469
|
+
colors = [
|
470
|
+
"tab:blue",
|
471
|
+
"tab:orange",
|
472
|
+
"tab:green",
|
473
|
+
"tab:red",
|
474
|
+
"tab:purple",
|
475
|
+
"tab:brown",
|
476
|
+
]
|
477
|
+
|
478
|
+
# Get physical channel references via the logical signals
|
479
|
+
drive_iq_port = device_setup.logical_signal_by_uid("q0/drive").physical_channel
|
480
|
+
measure_iq_port = device_setup.logical_signal_by_uid("q0/measure").physical_channel
|
481
|
+
acquire_port = device_setup.logical_signal_by_uid("q0/acquire").physical_channel
|
482
|
+
|
483
|
+
# Get waveform snippets from the simulation
|
484
|
+
simulation = OutputSimulator(compiled_exp)
|
485
|
+
|
486
|
+
drive_snippet = simulation.get_snippet(
|
487
|
+
drive_iq_port, start=start, output_length=end
|
488
|
+
)
|
489
|
+
|
490
|
+
measure_snippet = simulation.get_snippet(
|
491
|
+
measure_iq_port, start=start, output_length=end
|
492
|
+
)
|
493
|
+
|
494
|
+
acquire_snippet = simulation.get_snippet(
|
495
|
+
acquire_port, start=start, output_length=end
|
496
|
+
)
|
497
|
+
|
498
|
+
fig = plt.figure(figsize=(15, 5))
|
499
|
+
plt.plot(
|
500
|
+
drive_snippet.time * 1e6,
|
501
|
+
drive_snippet.wave.real,
|
502
|
+
color=colors[0],
|
503
|
+
label="Qubit I",
|
504
|
+
)
|
505
|
+
plt.fill_between(
|
506
|
+
drive_snippet.time * 1e6, drive_snippet.wave.real, color=colors[0], alpha=0.6
|
507
|
+
)
|
508
|
+
plt.plot(
|
509
|
+
drive_snippet.time * 1e6,
|
510
|
+
drive_snippet.wave.imag,
|
511
|
+
color=colors[1],
|
512
|
+
label="Qubit Q",
|
513
|
+
)
|
514
|
+
plt.fill_between(
|
515
|
+
drive_snippet.time * 1e6, drive_snippet.wave.imag, color=colors[1], alpha=0.6
|
516
|
+
)
|
517
|
+
|
518
|
+
plt.plot(
|
519
|
+
measure_snippet.time * 1e6,
|
520
|
+
measure_snippet.wave.real,
|
521
|
+
color=colors[2],
|
522
|
+
label="Readout I",
|
523
|
+
)
|
524
|
+
plt.fill_between(
|
525
|
+
measure_snippet.time * 1e6,
|
526
|
+
measure_snippet.wave.real,
|
527
|
+
color=colors[2],
|
528
|
+
alpha=0.6,
|
529
|
+
)
|
530
|
+
plt.plot(
|
531
|
+
measure_snippet.time * 1e6,
|
532
|
+
measure_snippet.wave.imag,
|
533
|
+
color=colors[3],
|
534
|
+
label="Readout Q",
|
535
|
+
)
|
536
|
+
plt.fill_between(
|
537
|
+
measure_snippet.time * 1e6,
|
538
|
+
measure_snippet.wave.imag,
|
539
|
+
color=colors[3],
|
540
|
+
alpha=0.6,
|
541
|
+
)
|
542
|
+
plt.plot(
|
543
|
+
acquire_snippet.time * 1e6,
|
544
|
+
acquire_snippet.wave.real,
|
545
|
+
color=colors[4],
|
546
|
+
label="acquire start",
|
547
|
+
)
|
548
|
+
|
549
|
+
plt.legend()
|
550
|
+
plt.xlabel(r"Time($\mu s$)")
|
551
|
+
plt.ylabel("Amplitude")
|
552
|
+
plt.title(name)
|
553
|
+
plt.show()
|