iqm-benchmarks 1.7__py3-none-any.whl → 1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of iqm-benchmarks might be problematic. Click here for more details.

@@ -19,14 +19,18 @@ GHZ state benchmark
19
19
  from io import BytesIO
20
20
  from itertools import chain
21
21
  import json
22
- from time import strftime, time
23
- from typing import Dict, List, Optional, Tuple, Type, cast
22
+ from time import strftime
23
+ from typing import Any, Dict, List, Optional, Sequence, Tuple, Type, cast
24
24
 
25
+ from matplotlib.figure import Figure
26
+ import matplotlib.pyplot as plt
27
+ import networkx
25
28
  from networkx import Graph, all_pairs_shortest_path, is_connected, minimum_spanning_tree
26
29
  import numpy as np
27
30
  import pycurl
28
31
  from qiskit import QuantumCircuit, transpile
29
32
  from qiskit.quantum_info import random_clifford
33
+ from qiskit.transpiler import CouplingMap
30
34
  from qiskit_aer import Aer
31
35
  from scipy.spatial.distance import hamming
32
36
  import xarray as xr
@@ -45,7 +49,9 @@ from iqm.benchmarks.readout_mitigation import apply_readout_error_mitigation
45
49
  from iqm.benchmarks.utils import (
46
50
  perform_backend_transpilation,
47
51
  reduce_to_active_qubits,
52
+ retrieve_all_counts,
48
53
  set_coupling_map,
54
+ submit_execute,
49
55
  timeit,
50
56
  xrvariable_to_counts,
51
57
  )
@@ -98,7 +104,7 @@ def append_rms(
98
104
 
99
105
  def fidelity_ghz_randomized_measurements(
100
106
  dataset: xr.Dataset, qubit_layout, ideal_probabilities: List[Dict[str, int]], num_qubits: int
101
- ) -> Dict[str, float]:
107
+ ) -> tuple[dict[str, Any], dict[str, Any]]:
102
108
  """
103
109
  Estimates GHZ state fidelity through cross-correlations of RMs.
104
110
  Implementation of Eq. (34) in https://arxiv.org/abs/1812.02624
@@ -109,21 +115,24 @@ def fidelity_ghz_randomized_measurements(
109
115
  ideal_probabilities (List[Dict[str, int]]):
110
116
  num_qubits (int):
111
117
  Returns:
112
- Dict[str, float]
118
+ values: dict[str, Any]
119
+ The fidelities
120
+ uncertainties: dict[str, Any]
121
+ The uncertainties for the fidelities
113
122
  """
114
-
123
+ idx = BenchmarkObservationIdentifier(qubit_layout).string_identifier
115
124
  # List for each RM contribution to the fidelity
116
125
  fid_rm = []
117
126
 
118
127
  # Loop through RMs and add each contribution
119
- num_rms = len(dataset.attrs["transpiled_circuits"][f"{str(qubit_layout)}"][tuple(qubit_layout)])
128
+ num_rms = len(dataset.attrs["transpiled_circuits"][f"{idx}"])
120
129
  for u in range(num_rms):
121
130
  # Probability estimates for noisy measurements
122
131
  probabilities_sample = {}
123
- c_keys = dataset[f"{str(qubit_layout)}_state_{u}"].data # measurements[u].keys()
124
- num_shots_noisy = sum(dataset[f"{str(qubit_layout)}_counts_{u}"].data)
132
+ c_keys = dataset[f"{idx}_state_{u}"].data # measurements[u].keys()
133
+ num_shots_noisy = sum(dataset[f"{idx}_counts_{u}"].data)
125
134
  for k, key in enumerate(c_keys):
126
- probabilities_sample[key] = dataset[f"{str(qubit_layout)}_counts_{u}"].data[k] / num_shots_noisy
135
+ probabilities_sample[key] = dataset[f"{idx}_counts_{u}"].data[k] / num_shots_noisy
127
136
  # Keys for corresponding ideal probabilities
128
137
  c_id_keys = ideal_probabilities[u].keys()
129
138
 
@@ -133,17 +142,18 @@ def fidelity_ghz_randomized_measurements(
133
142
  exponent = hamming(list(sa), list(sb)) * num_qubits
134
143
  p_sum.append(np.power(-2, -exponent) * probabilities_sample[sa] * ideal_probabilities[u][sb])
135
144
  fid_rm.append((2**num_qubits) * sum(p_sum))
136
- fidelities = {"mean": np.mean(fid_rm), "std": np.std(fid_rm) / np.sqrt(num_rms)}
145
+ values = {"fidelity": np.mean(fid_rm)}
146
+ uncertainties = {"fidelity": np.std(fid_rm) / np.sqrt(num_rms)}
137
147
 
138
148
  if dataset.attrs["rem"]:
139
149
  fid_rm_rem = []
140
150
  for u in range(num_rms):
141
151
  # Probability estimates for noisy measurements
142
152
  probabilities_sample = {}
143
- c_keys = dataset[f"{str(qubit_layout)}_rem_state_{u}"].data # measurements[u].keys()
144
- num_shots_noisy = sum(dataset[f"{str(qubit_layout)}_rem_counts_{u}"].data)
153
+ c_keys = dataset[f"{idx}_rem_state_{u}"].data # measurements[u].keys()
154
+ num_shots_noisy = sum(dataset[f"{idx}_rem_counts_{u}"].data)
145
155
  for k, key in enumerate(c_keys):
146
- probabilities_sample[key] = dataset[f"{str(qubit_layout)}_rem_counts_{u}"].data[k] / num_shots_noisy
156
+ probabilities_sample[key] = dataset[f"{idx}_rem_counts_{u}"].data[k] / num_shots_noisy
147
157
  # Keys for corresponding ideal probabilities
148
158
  c_id_keys = ideal_probabilities[u].keys()
149
159
 
@@ -153,11 +163,12 @@ def fidelity_ghz_randomized_measurements(
153
163
  exponent = hamming(list(sa), list(sb)) * num_qubits
154
164
  p_sum.append(np.power(-2, -exponent) * probabilities_sample[sa] * ideal_probabilities[u][sb])
155
165
  fid_rm_rem.append((2**num_qubits) * sum(p_sum))
156
- fidelities = fidelities | {"mean_rem": np.mean(fid_rm_rem), "std_rem": np.std(fid_rm_rem) / np.sqrt(num_rms)}
157
- return fidelities
166
+ values = values | {"fidelity_rem": np.mean(fid_rm_rem)}
167
+ uncertainties = uncertainties | {"fidelity_rem": np.std(fid_rm_rem) / np.sqrt(num_rms)}
168
+ return values, uncertainties
158
169
 
159
170
 
160
- def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int]) -> List[float]:
171
+ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int]) -> list[Any]:
161
172
  """
162
173
  Estimates the GHZ state fidelity based on the multiple quantum coherences method based on [Mooney, 2021]
163
174
 
@@ -168,12 +179,13 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int]) -> Lis
168
179
  The subset of system-qubits used in the protocol
169
180
 
170
181
  Returns:
171
- List[int]: The ghz fidelity or, if rem=True, fidelity and readout error mitigated fidelity
182
+ dict[str, dict[str, Any]]: The ghz fidelity or, if rem=True, fidelity and readout error mitigated fidelity
172
183
  """
173
184
 
174
185
  num_qubits = len(qubit_layout)
175
186
  phases = [np.pi * i / (num_qubits + 1) for i in range(2 * num_qubits + 2)]
176
- transpiled_circuits = dataset.attrs["transpiled_circuits"][f"{str(qubit_layout)}"][tuple(qubit_layout)]
187
+ idx = BenchmarkObservationIdentifier(qubit_layout).string_identifier
188
+ transpiled_circuits = dataset.attrs["transpiled_circuits"][idx]
177
189
  num_shots = dataset.attrs["shots"]
178
190
  num_circuits = len(transpiled_circuits)
179
191
 
@@ -181,16 +193,7 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int]) -> Lis
181
193
  complex_coefficients = np.exp(1j * num_qubits * np.array(phases))
182
194
 
183
195
  # Loading the counts from the dataset
184
- counts = xrvariable_to_counts(dataset, str(qubit_layout), num_circuits)
185
- # for u in range(num_circuits):
186
- # counts.append(
187
- # dict(
188
- # zip(
189
- # list(dataset[f"{str(qubit_layout)}_state_{u}"].data),
190
- # dataset[f"{str(qubit_layout)}_counts_{u}"].data,
191
- # )
192
- # )
193
- # )
196
+ counts = xrvariable_to_counts(dataset, f"{idx}", num_circuits)
194
197
  all_zero_probability_list = [] # An ordered list for storing the probabilities of returning to the |00..0> state
195
198
  for count in counts[1:]:
196
199
  if "0" * num_qubits in count.keys():
@@ -212,16 +215,7 @@ def fidelity_ghz_coherences(dataset: xr.Dataset, qubit_layout: List[int]) -> Lis
212
215
 
213
216
  # Same procedure for error mitigated data
214
217
  if dataset.attrs["rem"]:
215
- probs_mit = xrvariable_to_counts(dataset, f"{str(qubit_layout)}_rem", num_circuits)
216
- # for u in range(num_circuits):
217
- # probs_mit.append(
218
- # dict(
219
- # zip(
220
- # list(dataset[f"{str(qubit_layout)}_rem_state_{u}"].data),
221
- # dataset[f"{str(qubit_layout)}_rem_counts_{u}"].data,
222
- # )
223
- # )
224
- # )
218
+ probs_mit = xrvariable_to_counts(dataset, f"{idx}_rem", num_circuits)
225
219
  all_zero_probability_list_mit = []
226
220
  for prob in probs_mit[1:]:
227
221
  if "0" * num_qubits in prob.keys():
@@ -258,26 +252,28 @@ def fidelity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
258
252
  for qubit_layout in qubit_layouts:
259
253
  if routine == "randomized_measurements":
260
254
  ideal_simulator = Aer.get_backend("statevector_simulator")
261
- for qubit_layout in qubit_layouts:
262
- ideal_probabilities = []
263
- all_circuits = run.dataset.attrs["transpiled_circuits"][str(qubit_layout)][tuple(qubit_layout)]
264
- for qc in all_circuits:
265
- qc_copy = qc.copy()
266
- qc_copy.remove_final_measurements()
267
- deflated_qc = reduce_to_active_qubits(qc_copy, backend_name)
268
- ideal_probabilities.append(
269
- dict(sorted(ideal_simulator.run(deflated_qc).result().get_counts().items()))
255
+ ideal_probabilities = []
256
+ idx = BenchmarkObservationIdentifier(qubit_layout).string_identifier
257
+ all_circuits = run.dataset.attrs["transpiled_circuits"][idx]
258
+ for qc in all_circuits:
259
+ qc_copy = qc.copy()
260
+ qc_copy.remove_final_measurements()
261
+ deflated_qc = reduce_to_active_qubits(qc_copy, backend_name)
262
+ ideal_probabilities.append(dict(sorted(ideal_simulator.run(deflated_qc).result().get_counts().items())))
263
+ values, uncertainties = fidelity_ghz_randomized_measurements(
264
+ dataset, qubit_layout, ideal_probabilities, len(qubit_layout)
265
+ )
266
+ observation_list.extend(
267
+ [
268
+ BenchmarkObservation(
269
+ name=key,
270
+ identifier=BenchmarkObservationIdentifier(qubit_layout),
271
+ value=value,
272
+ uncertainty=uncertainties[key],
270
273
  )
271
- observation_list.extend(
272
- [
273
- BenchmarkObservation(
274
- name=key, identifier=BenchmarkObservationIdentifier(qubit_layout), value=value
275
- )
276
- for key, value in fidelity_ghz_randomized_measurements(
277
- dataset, qubit_layout, ideal_probabilities, len(qubit_layout)
278
- ).items()
279
- ]
280
- )
274
+ for key, value in values.items()
275
+ ]
276
+ )
281
277
  else: # default routine == "coherences":
282
278
  fidelity = fidelity_ghz_coherences(dataset, qubit_layout)
283
279
  observation_list.extend(
@@ -288,13 +284,13 @@ def fidelity_analysis(run: BenchmarkRunResult) -> BenchmarkAnalysisResult:
288
284
  ]
289
285
  )
290
286
  if len(fidelity) > 1:
291
-
292
287
  observation_list.append(
293
288
  BenchmarkObservation(
294
289
  name="fidelity_rem", identifier=BenchmarkObservationIdentifier(qubit_layout), value=fidelity[1]
295
290
  )
296
291
  )
297
- return BenchmarkAnalysisResult(dataset=dataset, observations=observation_list)
292
+ plots = {"All layout fidelities": plot_fidelities(observation_list, qubit_layouts)}
293
+ return BenchmarkAnalysisResult(dataset=dataset, observations=observation_list, plots=plots)
298
294
 
299
295
 
300
296
  def generate_ghz_linear(num_qubits: int) -> QuantumCircuit:
@@ -443,18 +439,23 @@ def extract_fidelities(cal_url: str, qubit_layout: List[int]) -> Tuple[List[List
443
439
  return list_couplings, list_fids
444
440
 
445
441
 
446
- def get_edges(coupling_map, qubit_layout, edges_cal=None, fidelities_cal=None):
442
+ def get_edges(
443
+ coupling_map: CouplingMap,
444
+ qubit_layout: List[int],
445
+ edges_cal: Optional[List[List[int]]] = None,
446
+ fidelities_cal: Optional[List[float]] = None,
447
+ ):
447
448
  """Produces a networkx.Graph from coupling map fidelity information, with edges given by couplings
448
449
  and edge weights given by fidelities
449
450
 
450
451
  Args:
451
- coupling_map: List[int]
452
+ coupling_map (CouplingMap):
452
453
  The list pairs on which 2-qubit gates are natively supported
453
- qubit_layout: List[int]
454
+ qubit_layout (List[int]):
454
455
  The subset of system-qubits used in the protocol, indexed from 0
455
- edges_cal: List[int]
456
- Same as the coupling map, but only with connections that have CZ fidelities in the calibration data
457
- fidelities_cal: List[float]
456
+ edges_cal (Optional[List[List[int]]]):
457
+ A coupling map of qubit pairs that have CZ fidelities in the calibration data
458
+ fidelities_cal (Optional[List[float]]):
458
459
  A list of CZ fidelities ordered in the same way as edges_cal
459
460
 
460
461
  Returns:
@@ -467,16 +468,18 @@ def get_edges(coupling_map, qubit_layout, edges_cal=None, fidelities_cal=None):
467
468
  if edge[0] in qubit_layout and edge[1] in qubit_layout:
468
469
  edges_patch.append([edge[0], edge[1]])
469
470
 
470
- if fidelities_cal is None:
471
- weights = np.ones(len(edges_patch))
472
- else:
473
- fidelities_cal = np.minimum(np.array(fidelities_cal), np.ones(len(fidelities_cal))) # get rid of > 1 fidelities
471
+ if fidelities_cal is not None:
472
+ fidelities_cal = list(
473
+ np.minimum(np.array(fidelities_cal), np.ones(len(fidelities_cal)))
474
+ ) # get rid of > 1 fidelities
474
475
  fidelities_patch = []
475
476
  for edge in edges_patch:
476
- for idx, edge_2 in enumerate(edges_cal):
477
+ for idx, edge_2 in enumerate(cast(List[int], edges_cal)):
477
478
  if edge == edge_2:
478
479
  fidelities_patch.append(fidelities_cal[idx])
479
480
  weights = -np.log(np.array(fidelities_patch))
481
+ else:
482
+ weights = np.ones(len(edges_patch))
480
483
  graph = Graph()
481
484
  for idx, edge in enumerate(edges_patch):
482
485
  graph.add_edge(*edge, weight=weights[idx])
@@ -485,7 +488,7 @@ def get_edges(coupling_map, qubit_layout, edges_cal=None, fidelities_cal=None):
485
488
  return graph
486
489
 
487
490
 
488
- def get_cx_map(qubit_layout, graph) -> list[list[int]]:
491
+ def get_cx_map(qubit_layout: List[int], graph: networkx.Graph) -> list[list[int]]:
489
492
  """Calculate the cx_map based on participating qubits and the 2QB gate fidelities between them.
490
493
 
491
494
  Uses networkx graph algorithms to calculate the minimal spanning tree of the subgraph defined by qubit_layout.
@@ -526,8 +529,56 @@ def get_cx_map(qubit_layout, graph) -> list[list[int]]:
526
529
  return cx_map
527
530
 
528
531
 
532
+ def plot_fidelities(observations: List[BenchmarkObservation], qubit_layouts: List[List[int]]) -> Figure:
533
+ """Plots all the fidelities stored in the observations into a single plot of fidelity vs. number of qubits
534
+
535
+ Parameters
536
+ ----------
537
+ observations: List[BenchmarkObservation]
538
+ A list of Observations, each assumed to be a fidelity
539
+ qubit_layouts
540
+ The list of qubit layouts as given by the user. This is used to name the layouts in order for identification
541
+ in the plot.
542
+ Returns
543
+ -------
544
+ fig :Figure
545
+ The figure object with the fidelity plot.
546
+ """
547
+ fig, ax = plt.subplots()
548
+ layout_short = {str(qubit_layout): f" L{i}" for i, qubit_layout in enumerate(qubit_layouts)}
549
+ recorded_labels = []
550
+ for i, obs in enumerate(observations):
551
+ label = "With REM" if "rem" in obs.name else "Unmitigated"
552
+ if label in recorded_labels:
553
+ label = "_nolegend_"
554
+ else:
555
+ recorded_labels.append(label)
556
+ x = sum(c.isdigit() for c in obs.identifier.string_identifier)
557
+ y = obs.value
558
+ ax.errorbar(
559
+ x,
560
+ y,
561
+ yerr=obs.uncertainty,
562
+ capsize=4,
563
+ color="orange" if "rem" in obs.name else "cornflowerblue",
564
+ label=label,
565
+ fmt="o",
566
+ alpha=1,
567
+ markersize=5,
568
+ )
569
+ ax.annotate(layout_short[obs.identifier.string_identifier], (x, y))
570
+ ax.axhline(0.5, linestyle="--", color="black", label="GME threshold")
571
+ # ax.set_ylim([0,1])
572
+ ax.set_title("GHZ fidelities of all qubit layouts")
573
+ ax.set_xlabel("Number of qubits")
574
+ ax.set_ylabel("Fidelity")
575
+ ax.legend(framealpha=0.5)
576
+ plt.close()
577
+ return fig
578
+
579
+
529
580
  class GHZBenchmark(Benchmark):
530
- """The GHZ Benchmark estimates the quality of generated Greenberger-Horne-Zeilinger states"""
581
+ """The GHZ Benchmark estimates the quality of generated GreenbergerHorneZeilinger states"""
531
582
 
532
583
  analysis_function = staticmethod(fidelity_analysis)
533
584
  name = "ghz"
@@ -542,28 +593,27 @@ class GHZBenchmark(Benchmark):
542
593
  super().__init__(backend, configuration)
543
594
 
544
595
  self.state_generation_routine = configuration.state_generation_routine
545
- self.choose_qubits_routine = configuration.choose_qubits_routine
596
+ # self.choose_qubits_routine = configuration.choose_qubits_routine
546
597
  if configuration.custom_qubits_array:
547
598
  self.custom_qubits_array = configuration.custom_qubits_array
548
599
  else:
549
- self.custom_qubits_array = list(set(chain(*backend.coupling_map)))
600
+ self.custom_qubits_array = [list(set(chain(*backend.coupling_map)))]
601
+ self.qubit_counts: Sequence[int] | List[int]
550
602
  if not configuration.qubit_counts:
551
603
  self.qubit_counts = [len(layout) for layout in self.custom_qubits_array]
552
604
  else:
553
605
  if any(np.max(configuration.qubit_counts) > [len(layout) for layout in self.custom_qubits_array]):
554
606
  raise ValueError("The maximum given qubit count is larger than the size of the smallest qubit layout.")
555
607
  self.qubit_counts = configuration.qubit_counts
608
+ # self.layout_idx_mapping = {str(qubit_layout): idx for idx, qubit_layout in enumerate(self.custom_qubits_array)}
556
609
 
557
610
  self.qiskit_optim_level = configuration.qiskit_optim_level
558
611
  self.optimize_sqg = configuration.optimize_sqg
559
-
560
612
  self.fidelity_routine = configuration.fidelity_routine
561
613
  self.num_RMs = configuration.num_RMs
562
-
563
614
  self.rem = configuration.rem
564
615
  self.mit_shots = configuration.mit_shots
565
616
  self.cal_url = configuration.cal_url
566
-
567
617
  self.timestamp = strftime("%Y%m%d-%H%M%S")
568
618
 
569
619
  # @staticmethod
@@ -588,10 +638,11 @@ class GHZBenchmark(Benchmark):
588
638
  """
589
639
  # num_qubits = len(qubit_layout)
590
640
  fixed_coupling_map = set_coupling_map(qubit_layout, self.backend, "fixed")
591
-
641
+ idx = BenchmarkObservationIdentifier(qubit_layout).string_identifier
642
+ ghz_native_transpiled: List[QuantumCircuit]
592
643
  if routine == "naive":
593
644
  ghz = generate_ghz_linear(qubit_count)
594
- self.untranspiled_circuits[str(qubit_layout)].update({qubit_count: ghz})
645
+ self.untranspiled_circuits[idx].update({qubit_count: ghz})
595
646
  ghz_native_transpiled, _ = perform_backend_transpilation(
596
647
  [ghz],
597
648
  self.backend,
@@ -608,7 +659,7 @@ class GHZBenchmark(Benchmark):
608
659
  else:
609
660
  graph = get_edges(self.backend.coupling_map, qubit_layout)
610
661
  ghz, _ = generate_ghz_spanning_tree(graph, qubit_layout, qubit_count)
611
- self.untranspiled_circuits[str(qubit_layout)].update({qubit_count: ghz})
662
+ self.untranspiled_circuits[idx].update({qubit_count: ghz})
612
663
  ghz_native_transpiled, _ = perform_backend_transpilation(
613
664
  [ghz],
614
665
  self.backend,
@@ -632,11 +683,11 @@ class GHZBenchmark(Benchmark):
632
683
  if ghz_native_transpiled[0].depth() == ghz_native_transpiled[1].depth():
633
684
  index_min_2q = np.argmin([c.count_ops()["cz"] for c in ghz_native_transpiled])
634
685
  final_ghz = ghz_native_transpiled[index_min_2q]
635
- self.untranspiled_circuits[str(qubit_layout)].update({qubit_count: ghz_log[index_min_2q]})
686
+ self.untranspiled_circuits[idx].update({qubit_count: ghz_log[index_min_2q]})
636
687
  else:
637
688
  index_min_depth = np.argmin([c.depth() for c in ghz_native_transpiled])
638
689
  final_ghz = ghz_native_transpiled[index_min_depth]
639
- self.untranspiled_circuits[str(qubit_layout)].update({qubit_count: ghz_log[index_min_depth]})
690
+ self.untranspiled_circuits[idx].update({qubit_count: ghz_log[index_min_depth]})
640
691
  return final_ghz[0]
641
692
 
642
693
  def generate_coherence_meas_circuits(self, qubit_layout: List[int], qubit_count: int) -> List[QuantumCircuit]:
@@ -654,7 +705,8 @@ class GHZBenchmark(Benchmark):
654
705
  A list of transpiled quantum circuits to be measured
655
706
  """
656
707
 
657
- qc = self.untranspiled_circuits[str(qubit_layout)][qubit_count]
708
+ idx = BenchmarkObservationIdentifier(qubit_layout).string_identifier
709
+ qc = self.untranspiled_circuits[idx][qubit_count]
658
710
  qc_list = [qc.copy()]
659
711
 
660
712
  qc.remove_final_measurements()
@@ -679,7 +731,7 @@ class GHZBenchmark(Benchmark):
679
731
  qiskit_optim_level=self.qiskit_optim_level,
680
732
  optimize_sqg=self.optimize_sqg,
681
733
  )
682
- self.untranspiled_circuits[str(qubit_layout)].update({qubit_count: qc_list})
734
+ self.untranspiled_circuits[idx].update({qubit_count: qc_list})
683
735
  return qc_list_transpiled
684
736
 
685
737
  def generate_readout_circuit(self, qubit_layout, qubit_count):
@@ -698,33 +750,31 @@ class GHZBenchmark(Benchmark):
698
750
  A list of transpiled quantum circuits to be measured
699
751
  """
700
752
  # Generate the list of circuits
701
- self.untranspiled_circuits[str(qubit_layout)] = {}
702
- self.transpiled_circuits[str(qubit_layout)] = {}
753
+ idx = BenchmarkObservationIdentifier(qubit_layout).string_identifier
754
+ self.untranspiled_circuits[idx] = {}
755
+ self.transpiled_circuits[idx] = {}
703
756
 
704
757
  qcvv_logger.info(f"Now generating a {len(qubit_layout)}-qubit GHZ state on qubits {qubit_layout}")
705
758
  transpiled_ghz = self.generate_native_ghz(qubit_layout, qubit_count, self.state_generation_routine)
706
759
 
707
760
  if self.fidelity_routine == "randomized_measurements":
708
761
  all_circuits_list, _ = append_rms(transpiled_ghz, cast(int, self.num_RMs), self.backend)
709
- all_circuits_dict = {tuple(qubit_layout): all_circuits_list}
710
762
  elif self.fidelity_routine == "coherences":
711
763
  all_circuits_list = self.generate_coherence_meas_circuits(qubit_layout, qubit_count)
712
- all_circuits_dict = {tuple(qubit_layout): all_circuits_list}
713
764
  else:
714
765
  all_circuits_list = transpiled_ghz
715
- all_circuits_dict = {tuple(qubit_layout): all_circuits_list}
716
766
 
717
- self.transpiled_circuits[str(qubit_layout)].update(all_circuits_dict)
767
+ self.transpiled_circuits.update({idx: all_circuits_list})
718
768
  return all_circuits_list
719
769
 
720
- def add_configuration_to_dataset(self, dataset): # CHECK
770
+ def add_configuration_to_dataset(self, dataset: xr.Dataset): # CHECK
721
771
  """
722
- Creates an xarray.Dataset and adds the circuits and configuration metadata to it
772
+ Creates a xarray.Dataset and adds the circuits and configuration metadata to it.
723
773
 
724
774
  Args:
725
- self: Source class
775
+ dataset (xr.Dataset):
726
776
  Returns:
727
- dataset: xarray.Dataset to be used for further data storage
777
+ xr.Dataset: dataset to be used for further data storage
728
778
  """
729
779
 
730
780
  for key, value in self.configuration:
@@ -743,28 +793,38 @@ class GHZBenchmark(Benchmark):
743
793
  aux_custom_qubits_array = cast(List[List[int]], self.custom_qubits_array).copy()
744
794
  dataset = xr.Dataset()
745
795
 
796
+ # Submit all
797
+ all_jobs: Dict = {}
746
798
  for qubit_layout in aux_custom_qubits_array:
799
+ Id = BenchmarkObservationIdentifier(qubit_layout)
800
+ idx = Id.string_identifier
801
+ # for qubit_count in self.qubit_counts[idx]:
747
802
  qubit_count = len(qubit_layout)
748
803
  circuits = self.generate_readout_circuit(qubit_layout, qubit_count)
749
-
750
- qcvv_logger.info(f"Retrieving results")
751
- t_start = time()
752
- job = backend.run(circuits, shots=self.shots)
753
- counts = job.result().get_counts()
754
- print(f"\t Getting counts took {time()-t_start:.2f} sec")
755
-
756
- # coordinates = [(f"qubit_layout", [str(qubit_layout)])]
757
- identifier = str(qubit_layout)
758
- qcvv_logger.info(f"Adding counts to dataset")
759
- dataset, _ = add_counts_to_dataset(counts, identifier, dataset)
804
+ transpiled_circuit_dict = {tuple(qubit_layout): circuits}
805
+ all_jobs[idx], _ = submit_execute(
806
+ transpiled_circuit_dict,
807
+ backend,
808
+ self.shots,
809
+ self.calset_id,
810
+ max_gates_per_batch=self.max_gates_per_batch,
811
+ )
812
+ # Retrieve all
813
+ qcvv_logger.info(f"Retrieving counts and adding counts to dataset...")
814
+ for qubit_layout in aux_custom_qubits_array:
815
+ # for qubit_count in self.qubit_counts[idx]:
816
+ Id = BenchmarkObservationIdentifier(qubit_layout)
817
+ idx = Id.string_identifier
818
+ qubit_count = len(qubit_layout)
819
+ counts, _ = retrieve_all_counts(all_jobs[idx])
820
+ dataset, _ = add_counts_to_dataset(counts, idx, dataset)
760
821
  if self.rem:
761
822
  qcvv_logger.info(f"Applying readout error mitigation")
762
- rem_results, _ = apply_readout_error_mitigation(
763
- backend, circuits, job.result().get_counts(), self.mit_shots
764
- )
823
+ circuits = self.transpiled_circuits[idx]
824
+ rem_results, _ = apply_readout_error_mitigation(backend, circuits, counts, self.mit_shots)
765
825
  rem_results_dist = [counts_mit.nearest_probability_distribution() for counts_mit in rem_results]
766
- qcvv_logger.info(f"Adding REM results to dataset")
767
- dataset, _ = add_counts_to_dataset(rem_results_dist, f"{identifier}_rem", dataset)
826
+ dataset, _ = add_counts_to_dataset(rem_results_dist, f"{idx}_rem", dataset)
827
+
768
828
  self.add_configuration_to_dataset(dataset)
769
829
  return dataset
770
830
 
@@ -788,9 +848,10 @@ class GHZConfiguration(BenchmarkConfigurationBase):
788
848
  custom_qubits_array (Optional[Sequence[Sequence[int]]]): A sequence (e.g., Tuple or List) of sequences of
789
849
  physical qubit layouts, as specified by integer labels, where the benchmark is meant to be run.
790
850
  * If None, takes all qubits specified in the backend coupling map.
791
- qubit_counts (Optional[Sequence[int]]): A sequence (e.g., Tuple or List) of integers denoting number of qubits
851
+ qubit_counts (Optional[Sequence[int]]): CURRENTLY NOT SUPPORTED, A sequence (e.g., Tuple or List) of integers
852
+ denoting number of qubits
792
853
  for which the benchmark is meant to be run. The largest qubit count provided here has to be smaller than the
793
- smallest given qubit layout.
854
+ smalles given qubit layout.
794
855
  qiskit_optim_level (int): The optimization level used for transpilation to backend architecture.
795
856
  * Default: 3
796
857
  optimize_sqg (bool): Whether consecutive single qubit gates are optimized for reduced gate count via
@@ -815,13 +876,13 @@ class GHZConfiguration(BenchmarkConfigurationBase):
815
876
 
816
877
  benchmark: Type[Benchmark] = GHZBenchmark
817
878
  state_generation_routine: str = "tree"
818
- choose_qubits_routine: str = "custom"
819
- custom_qubits_array: Optional[List[List[int]]] = None
820
- qubit_counts: Optional[List[int]] = None
879
+ custom_qubits_array: Optional[Sequence[Sequence[int]]] = None
880
+ qubit_counts: Optional[Sequence[int]] = None
881
+ shots: int = 2**10
821
882
  qiskit_optim_level: int = 3
822
883
  optimize_sqg: bool = True
823
884
  fidelity_routine: str = "coherences"
824
- num_RMs: Optional[int] = 24
885
+ num_RMs: Optional[int] = 100
825
886
  rem: bool = True
826
887
  mit_shots: int = 1_000
827
888
  cal_url: Optional[str] = None
iqm/benchmarks/utils.py CHANGED
@@ -138,37 +138,6 @@ def count_native_gates(
138
138
  return avg_native_operations
139
139
 
140
140
 
141
- # DD code to be adapted to Pulla version once released
142
- # @timeit
143
- # def execute_with_dd(
144
- # backend: IQMBackendBase, transpiled_circuits: List[QuantumCircuit], shots: int, dd_strategy: DDStrategy
145
- # ) -> List[Dict[str, int]]:
146
- # """Executes a list of transpiled quantum circuits with dynamical decoupling according to a specified strategy
147
- # Args:
148
- # backend (IQMBackendBase):
149
- # transpiled_circuits (List[QuantumCircuit]):
150
- # shots (int):
151
- # dd_strategy (DDStrategy):
152
- #
153
- # Returns:
154
- # List[Dict[str, int]]: The counts of the execution with dynamical decoupling
155
- # """
156
- # warnings.warn("Suppressing INFO messages from Pulla with logging.disable(sys.maxsize) - update if problematic!")
157
- # logging.disable(sys.maxsize)
158
- #
159
- # pulla_obj = Pulla(cocos_url=iqm_url)
160
- #
161
- # execution_results = dd.execute_with_dd(
162
- # pulla_obj,
163
- # backend=backend,
164
- # circuits=transpiled_circuits,
165
- # shots=shots,
166
- # dd_strategy=dd_strategy,
167
- # )
168
- #
169
- # return execution_results
170
-
171
-
172
141
  # pylint: disable=too-many-branches
173
142
  def get_iqm_backend(backend_label: str) -> IQMBackendBase:
174
143
  """Get the IQM backend object from a backend name (str).
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: iqm-benchmarks
3
- Version: 1.7
3
+ Version: 1.8
4
4
  Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
5
5
  Author-email: IQM Finland Oy <developers@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
6
6
  Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
@@ -48,15 +48,22 @@ Requires-Dist: pytest-pylint==0.21.0; extra == "test"
48
48
 
49
49
  # IQM Benchmarks
50
50
 
51
- The IQM Benchmarks is a suite of quantum characterization, verification, and validation (QCVV) tools for quantum computing. It is designed to be a comprehensive tool for benchmarking quantum hardware. The suite is designed to be modular, allowing users to easily add new benchmarks and customize existing ones. The suite is designed to be easy to use, with a simple API that allows users to run benchmarks with a single command.
51
+ IQM Benchmarks is a suite of Quantum Characterization, Verification, and Validation (QCVV) tools for quantum computing. It is designed to be a comprehensive tool for benchmarking quantum hardware. The suite is designed to be modular, allowing users to easily add new benchmarks and customize existing ones. The suite is designed to be easy to use, with a simple API that allows users to run benchmarks with a single command.
52
52
 
53
53
 
54
54
  Below is a list of the benchmarks currently available in the suite:
55
- * Randomized Benchmarking: A suite of randomized benchmarking protocols for characterizing the performance of quantum gates (Clifford Randomized Benchmarking, Mirror Randomized Benchmarking, Interleaved Randomized Benchmarking).
56
- * Quantum Volume: A benchmark for characterizing the performance of quantum computers.
57
- * Q-Score: A benchmark that estimates the size of combinatorial optimization problems a given number of qubits can execute with meaningful results.
58
- * GHZ State Benchmarking: A benchmark for characterizing the performance of multi-qubit entangled states.
59
-
55
+ * Gates / Layers:
56
+ - Standard Clifford Randomized Benchmarking [[Phys. Rev. A 85, 042311](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.85.042311) (2012)]
57
+ - Interleaved Randomized Benchmarking [[Phys. Rev. Lett. 109, 080505](https://doi.org/10.1103/PhysRevLett.109.080505) (2012)]
58
+ - Compressive Gate Set Tomography [[PRX Quantum 4, 010325](https://journals.aps.org/prxquantum/abstract/10.1103/PRXQuantum.4.010325) (2023)]
59
+ - Mirror Randomized Benchmarking [[Phys. Rev. Lett. 129, 150502](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.129.150502) (2022)]
60
+ * Holistic:
61
+ - Quantum Volume [[Phys. Rev. A 100, 032328](https://doi.org/10.1103/PhysRevA.100.032328) (2019)]
62
+ - CLOPS [[arXiv:2110.14108 [quant-ph]](https://arxiv.org/abs/2110.14108) (2021)]
63
+ * Entanglement:
64
+ - GHZ State Fidelity [[arXiv:0712.0921 [quant-ph]](https://arxiv.org/abs/0712.0921) (2007)]
65
+ * Optimization:
66
+ - Q-Score [[IEEE Trans. Quantum Eng., 2](https://doi.org/10.1109/TQE.2021.3090207) (2021)]
60
67
 
61
68
  The project is split into different benchmarks, all sharing the `Benchmark` class or the legacy `BenchmarkBase` class. Each individual benchmark takes as an argument their own `BenchmarkConfigurationBase` class. All the (legacy) benchmarks executed at once are wrapped by the `BenchmarkExperiment` class, which handles dependencies among the benchmarks, storing the results, producing the plots...
62
69