iqm-benchmarks 2.47__py3-none-any.whl → 2.49__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- iqm/benchmarks/coherence/coherence.py +12 -5
- iqm/benchmarks/compressive_gst/compressive_gst.py +14 -5
- iqm/benchmarks/entanglement/ghz.py +8 -2
- iqm/benchmarks/entanglement/graph_states.py +6 -3
- iqm/benchmarks/optimization/qscore.py +11 -4
- iqm/benchmarks/quantum_volume/quantum_volume.py +8 -3
- iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py +10 -1
- iqm/benchmarks/randomized_benchmarking/direct_rb/direct_rb.py +10 -3
- iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py +11 -1
- iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py +8 -1
- {iqm_benchmarks-2.47.dist-info → iqm_benchmarks-2.49.dist-info}/METADATA +5 -4
- {iqm_benchmarks-2.47.dist-info → iqm_benchmarks-2.49.dist-info}/RECORD +16 -16
- mGST/algorithm.py +1 -1
- {iqm_benchmarks-2.47.dist-info → iqm_benchmarks-2.49.dist-info}/WHEEL +0 -0
- {iqm_benchmarks-2.47.dist-info → iqm_benchmarks-2.49.dist-info}/licenses/LICENSE +0 -0
- {iqm_benchmarks-2.47.dist-info → iqm_benchmarks-2.49.dist-info}/top_level.txt +0 -0
|
@@ -422,6 +422,8 @@ class CoherenceBenchmark(Benchmark):
|
|
|
422
422
|
) -> xr.Dataset:
|
|
423
423
|
"""Executes the benchmark."""
|
|
424
424
|
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
425
|
+
total_submit: float = 0
|
|
426
|
+
total_retrieve: float = 0
|
|
425
427
|
|
|
426
428
|
dataset = xr.Dataset()
|
|
427
429
|
self.add_all_meta_to_dataset(dataset)
|
|
@@ -457,7 +459,7 @@ class CoherenceBenchmark(Benchmark):
|
|
|
457
459
|
qcvv_logger.debug(f"Executing on {self.coherence_exp}.")
|
|
458
460
|
qcvv_logger.setLevel(logging.WARNING)
|
|
459
461
|
|
|
460
|
-
jobs,
|
|
462
|
+
jobs, time_submit = submit_execute(
|
|
461
463
|
sorted_transpiled_qc_list,
|
|
462
464
|
self.backend,
|
|
463
465
|
self.shots,
|
|
@@ -466,9 +468,11 @@ class CoherenceBenchmark(Benchmark):
|
|
|
466
468
|
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
467
469
|
circuit_compilation_options=self.circuit_compilation_options,
|
|
468
470
|
)
|
|
471
|
+
total_submit += time_submit
|
|
469
472
|
|
|
470
473
|
qcvv_logger.setLevel(logging.INFO)
|
|
471
|
-
execution_results = retrieve_all_counts(jobs)
|
|
474
|
+
execution_results, time_retrieve = retrieve_all_counts(jobs)
|
|
475
|
+
total_retrieve += time_retrieve
|
|
472
476
|
identifier = BenchmarkObservationIdentifier(qubit_set)
|
|
473
477
|
dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
|
|
474
478
|
dataset.attrs.update(
|
|
@@ -494,8 +498,7 @@ class CoherenceBenchmark(Benchmark):
|
|
|
494
498
|
# Execute on the backend
|
|
495
499
|
if self.configuration.use_dd is True:
|
|
496
500
|
raise ValueError("Coherence benchmarks should not be run with dynamical decoupling.")
|
|
497
|
-
|
|
498
|
-
jobs, _ = submit_execute(
|
|
501
|
+
jobs, time_submit = submit_execute(
|
|
499
502
|
sorted_transpiled_qc_list,
|
|
500
503
|
self.backend,
|
|
501
504
|
self.shots,
|
|
@@ -504,8 +507,10 @@ class CoherenceBenchmark(Benchmark):
|
|
|
504
507
|
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
505
508
|
circuit_compilation_options=self.circuit_compilation_options,
|
|
506
509
|
)
|
|
510
|
+
total_submit += time_submit
|
|
507
511
|
qcvv_logger.setLevel(logging.INFO)
|
|
508
|
-
execution_results = retrieve_all_counts(jobs)
|
|
512
|
+
execution_results, time_retrieve = retrieve_all_counts(jobs)
|
|
513
|
+
total_retrieve += time_retrieve
|
|
509
514
|
identifier = BenchmarkObservationIdentifier(group)
|
|
510
515
|
dataset, _ = add_counts_to_dataset(execution_results, identifier.string_identifier, dataset)
|
|
511
516
|
|
|
@@ -524,6 +529,8 @@ class CoherenceBenchmark(Benchmark):
|
|
|
524
529
|
self.transpiled_circuits.circuit_groups.append(
|
|
525
530
|
CircuitGroup(name=self.coherence_exp, circuits=transpiled_qc_list)
|
|
526
531
|
)
|
|
532
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
533
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
527
534
|
|
|
528
535
|
return dataset
|
|
529
536
|
|
|
@@ -32,6 +32,7 @@ from typing import Any, Dict, List, Tuple, Type, Union
|
|
|
32
32
|
import numpy as np
|
|
33
33
|
from qiskit.circuit.library import CZGate, RGate
|
|
34
34
|
import xarray as xr
|
|
35
|
+
from time import strftime
|
|
35
36
|
|
|
36
37
|
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
37
38
|
from iqm.benchmarks.benchmark_definition import Benchmark, BenchmarkObservationIdentifier, add_counts_to_dataset
|
|
@@ -242,7 +243,9 @@ class CompressiveGST(Benchmark):
|
|
|
242
243
|
"""
|
|
243
244
|
The main GST execution routine
|
|
244
245
|
"""
|
|
245
|
-
|
|
246
|
+
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
247
|
+
total_submit: float = 0
|
|
248
|
+
total_retrieve: float = 0
|
|
246
249
|
dataset = xr.Dataset()
|
|
247
250
|
qcvv_logger.info(f"Generating {self.configuration.num_circuits} random GST circuits")
|
|
248
251
|
|
|
@@ -255,7 +258,7 @@ class CompressiveGST(Benchmark):
|
|
|
255
258
|
transpiled_circuit_dict = {
|
|
256
259
|
tuple(range(self.backend.num_qubits)): transpiled_circuits[str(self.qubit_layouts[0])].circuits
|
|
257
260
|
}
|
|
258
|
-
all_jobs_parallel,
|
|
261
|
+
all_jobs_parallel, time_submit = submit_execute(
|
|
259
262
|
transpiled_circuit_dict,
|
|
260
263
|
backend,
|
|
261
264
|
self.configuration.shots,
|
|
@@ -264,15 +267,17 @@ class CompressiveGST(Benchmark):
|
|
|
264
267
|
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
265
268
|
circuit_compilation_options=self.circuit_compilation_options,
|
|
266
269
|
)
|
|
270
|
+
total_submit += time_submit
|
|
267
271
|
# Retrieve
|
|
268
272
|
qcvv_logger.info(f"Now executing the corresponding circuit batch")
|
|
269
|
-
counts,
|
|
273
|
+
counts, time_retrieve = retrieve_all_counts(all_jobs_parallel)
|
|
274
|
+
total_retrieve += time_retrieve
|
|
270
275
|
dataset, _ = add_counts_to_dataset(counts, f"parallel_results", dataset)
|
|
271
276
|
else:
|
|
272
277
|
all_jobs: Dict = {}
|
|
273
278
|
for qubit_layout in self.qubit_layouts:
|
|
274
279
|
transpiled_circuit_dict = {tuple(qubit_layout): transpiled_circuits[str(qubit_layout)].circuits}
|
|
275
|
-
all_jobs[str(qubit_layout)],
|
|
280
|
+
all_jobs[str(qubit_layout)], time_submit = submit_execute(
|
|
276
281
|
transpiled_circuit_dict,
|
|
277
282
|
backend,
|
|
278
283
|
self.configuration.shots,
|
|
@@ -280,14 +285,18 @@ class CompressiveGST(Benchmark):
|
|
|
280
285
|
max_gates_per_batch=self.configuration.max_gates_per_batch,
|
|
281
286
|
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
282
287
|
)
|
|
288
|
+
total_submit += time_submit
|
|
283
289
|
# Retrieve all
|
|
284
290
|
qcvv_logger.info(f"Now executing the corresponding circuit batch")
|
|
285
291
|
for qubit_layout in self.qubit_layouts:
|
|
286
|
-
counts,
|
|
292
|
+
counts, time_retrieve = retrieve_all_counts(all_jobs[str(qubit_layout)])
|
|
293
|
+
total_retrieve += time_retrieve
|
|
287
294
|
dataset, _ = add_counts_to_dataset(counts, str(qubit_layout), dataset)
|
|
288
295
|
|
|
289
296
|
self.circuits.benchmark_circuits = [transpiled_circuits, untranspiled_circuits]
|
|
290
297
|
self.add_configuration_to_dataset(dataset)
|
|
298
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
299
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
291
300
|
qcvv_logger.info(f"Run completed")
|
|
292
301
|
return dataset
|
|
293
302
|
|
|
@@ -828,6 +828,8 @@ class GHZBenchmark(Benchmark):
|
|
|
828
828
|
Executes the benchmark.
|
|
829
829
|
"""
|
|
830
830
|
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
831
|
+
total_submit: float = 0
|
|
832
|
+
total_retrieve: float = 0
|
|
831
833
|
aux_custom_qubits_array = cast(List[List[int]], self.custom_qubits_array).copy()
|
|
832
834
|
dataset = xr.Dataset()
|
|
833
835
|
|
|
@@ -844,7 +846,7 @@ class GHZBenchmark(Benchmark):
|
|
|
844
846
|
qubit_count = len(qubit_layout)
|
|
845
847
|
circuit_group: CircuitGroup = self.generate_readout_circuit(qubit_layout, qubit_count)
|
|
846
848
|
transpiled_circuit_dict = {tuple(qubit_layout): circuit_group.circuits}
|
|
847
|
-
all_jobs[idx],
|
|
849
|
+
all_jobs[idx], time_submit = submit_execute(
|
|
848
850
|
transpiled_circuit_dict,
|
|
849
851
|
backend,
|
|
850
852
|
self.shots,
|
|
@@ -853,6 +855,7 @@ class GHZBenchmark(Benchmark):
|
|
|
853
855
|
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
854
856
|
circuit_compilation_options=self.circuit_compilation_options,
|
|
855
857
|
)
|
|
858
|
+
total_submit += time_submit
|
|
856
859
|
|
|
857
860
|
# Retrieve all
|
|
858
861
|
for qubit_layout in aux_custom_qubits_array:
|
|
@@ -860,7 +863,8 @@ class GHZBenchmark(Benchmark):
|
|
|
860
863
|
Id = BenchmarkObservationIdentifier(qubit_layout)
|
|
861
864
|
idx = Id.string_identifier
|
|
862
865
|
qubit_count = len(qubit_layout)
|
|
863
|
-
counts,
|
|
866
|
+
counts, time_retrieve = retrieve_all_counts(all_jobs[idx])
|
|
867
|
+
total_retrieve += time_retrieve
|
|
864
868
|
dataset, _ = add_counts_to_dataset(counts, idx, dataset)
|
|
865
869
|
if self.rem:
|
|
866
870
|
qcvv_logger.info(f"Applying readout error mitigation")
|
|
@@ -870,6 +874,8 @@ class GHZBenchmark(Benchmark):
|
|
|
870
874
|
dataset, _ = add_counts_to_dataset(rem_results_dist, f"{idx}_rem", dataset)
|
|
871
875
|
|
|
872
876
|
self.add_configuration_to_dataset(dataset)
|
|
877
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
878
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
873
879
|
return dataset
|
|
874
880
|
|
|
875
881
|
|
|
@@ -1151,6 +1151,8 @@ class GraphStateBenchmark(Benchmark):
|
|
|
1151
1151
|
Executes the benchmark.
|
|
1152
1152
|
"""
|
|
1153
1153
|
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
1154
|
+
total_submit: float = 0
|
|
1155
|
+
total_retrieve: float = 0
|
|
1154
1156
|
|
|
1155
1157
|
dataset = xr.Dataset()
|
|
1156
1158
|
self.add_all_meta_to_dataset(dataset)
|
|
@@ -1280,7 +1282,7 @@ class GraphStateBenchmark(Benchmark):
|
|
|
1280
1282
|
graph_jobs, time_submit = submit_execute(
|
|
1281
1283
|
sorted_transpiled_qc_list, backend, self.shots, self.calset_id, self.max_gates_per_batch
|
|
1282
1284
|
)
|
|
1283
|
-
|
|
1285
|
+
total_submit += time_submit
|
|
1284
1286
|
all_graph_submit_results.append(
|
|
1285
1287
|
{
|
|
1286
1288
|
"unprojected_qubits": unprojected_qubits[idx],
|
|
@@ -1295,7 +1297,7 @@ class GraphStateBenchmark(Benchmark):
|
|
|
1295
1297
|
unprojected_qubits = job_dict["unprojected_qubits"]
|
|
1296
1298
|
# Retrieve counts
|
|
1297
1299
|
execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier=str(unprojected_qubits))
|
|
1298
|
-
|
|
1300
|
+
total_retrieve += time_retrieve
|
|
1299
1301
|
# Retrieve all job meta data
|
|
1300
1302
|
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
1301
1303
|
|
|
@@ -1320,7 +1322,8 @@ class GraphStateBenchmark(Benchmark):
|
|
|
1320
1322
|
# if self.rem: TODO: add REM functionality
|
|
1321
1323
|
|
|
1322
1324
|
qcvv_logger.info(f"Graph State benchmark experiment execution concluded !")
|
|
1323
|
-
|
|
1325
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
1326
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
1324
1327
|
return dataset
|
|
1325
1328
|
|
|
1326
1329
|
|
|
@@ -741,6 +741,8 @@ class QScoreBenchmark(Benchmark):
|
|
|
741
741
|
) -> xr.Dataset:
|
|
742
742
|
"""Executes the benchmark."""
|
|
743
743
|
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
744
|
+
total_submit: float = 0
|
|
745
|
+
total_retrieve: float = 0
|
|
744
746
|
|
|
745
747
|
dataset = xr.Dataset()
|
|
746
748
|
self.add_all_meta_to_dataset(dataset)
|
|
@@ -874,7 +876,7 @@ class QScoreBenchmark(Benchmark):
|
|
|
874
876
|
|
|
875
877
|
sorted_transpiled_qc_list = {tuple(qubit_set): transpiled_qc}
|
|
876
878
|
# Execute on the backend
|
|
877
|
-
jobs,
|
|
879
|
+
jobs, time_submit = submit_execute(
|
|
878
880
|
sorted_transpiled_qc_list,
|
|
879
881
|
self.backend,
|
|
880
882
|
self.shots,
|
|
@@ -883,13 +885,15 @@ class QScoreBenchmark(Benchmark):
|
|
|
883
885
|
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
884
886
|
circuit_compilation_options=self.circuit_compilation_options,
|
|
885
887
|
)
|
|
888
|
+
total_submit += time_submit
|
|
886
889
|
qc_transpiled_list.append(transpiled_qc)
|
|
887
890
|
qcvv_logger.setLevel(logging.INFO)
|
|
888
891
|
instance_with_edges = set(range(self.num_instances)) - set(no_edge_instances)
|
|
889
892
|
num_instances_with_edges = len(instance_with_edges)
|
|
890
893
|
if self.REM:
|
|
894
|
+
counts_retrieved, time_retrieve = retrieve_all_counts(jobs)
|
|
891
895
|
rem_counts = apply_readout_error_mitigation(
|
|
892
|
-
backend, transpiled_qc,
|
|
896
|
+
backend, transpiled_qc, counts_retrieved, self.mit_shots
|
|
893
897
|
)
|
|
894
898
|
execution_results.extend(
|
|
895
899
|
rem_counts[0][instance].nearest_probability_distribution()
|
|
@@ -897,8 +901,9 @@ class QScoreBenchmark(Benchmark):
|
|
|
897
901
|
)
|
|
898
902
|
# execution_results.append(rem_distribution)
|
|
899
903
|
else:
|
|
900
|
-
|
|
901
|
-
|
|
904
|
+
counts_retrieved, time_retrieve = retrieve_all_counts(jobs)
|
|
905
|
+
execution_results.extend(counts_retrieved)
|
|
906
|
+
total_retrieve += time_retrieve
|
|
902
907
|
dataset.attrs.update(
|
|
903
908
|
{
|
|
904
909
|
num_nodes: {
|
|
@@ -921,6 +926,8 @@ class QScoreBenchmark(Benchmark):
|
|
|
921
926
|
)
|
|
922
927
|
|
|
923
928
|
self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
|
|
929
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
930
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
924
931
|
|
|
925
932
|
return dataset
|
|
926
933
|
|
|
@@ -17,7 +17,7 @@ Quantum Volume benchmark
|
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
19
|
from copy import deepcopy
|
|
20
|
-
from time import strftime
|
|
20
|
+
from time import strftime, time
|
|
21
21
|
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type
|
|
22
22
|
|
|
23
23
|
from matplotlib.figure import Figure
|
|
@@ -716,6 +716,8 @@ class QuantumVolumeBenchmark(Benchmark):
|
|
|
716
716
|
"""Executes the benchmark."""
|
|
717
717
|
|
|
718
718
|
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
719
|
+
total_submit: float = 0
|
|
720
|
+
total_retrieve: float = 0
|
|
719
721
|
|
|
720
722
|
dataset = xr.Dataset()
|
|
721
723
|
self.add_all_meta_to_dataset(dataset)
|
|
@@ -780,7 +782,9 @@ class QuantumVolumeBenchmark(Benchmark):
|
|
|
780
782
|
all_op_counts[str(qubits)] = count_native_gates(backend, transpiled_qc_list)
|
|
781
783
|
|
|
782
784
|
# Submit
|
|
785
|
+
t_start = time()
|
|
783
786
|
all_qv_jobs.append(self.submit_single_qv_job(backend, qubits, sorted_transpiled_qc_list))
|
|
787
|
+
total_submit += time() - t_start
|
|
784
788
|
qcvv_logger.info(f"Job for layout {qubits} submitted successfully!")
|
|
785
789
|
|
|
786
790
|
# Retrieve counts of jobs for all qubit layouts
|
|
@@ -791,7 +795,7 @@ class QuantumVolumeBenchmark(Benchmark):
|
|
|
791
795
|
execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], str(qubits))
|
|
792
796
|
# Retrieve all job meta data
|
|
793
797
|
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
794
|
-
|
|
798
|
+
total_retrieve += time_retrieve
|
|
795
799
|
# Export all to dataset
|
|
796
800
|
dataset.attrs.update(
|
|
797
801
|
{
|
|
@@ -829,7 +833,8 @@ class QuantumVolumeBenchmark(Benchmark):
|
|
|
829
833
|
self.mit_shots,
|
|
830
834
|
)
|
|
831
835
|
dataset.attrs.update({"REM_quasidistributions": rem_quasidistros})
|
|
832
|
-
|
|
836
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
837
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
833
838
|
qcvv_logger.info(f"QV experiment execution concluded !")
|
|
834
839
|
return dataset
|
|
835
840
|
|
|
@@ -16,7 +16,7 @@
|
|
|
16
16
|
'Standard' Clifford Randomized Benchmarking.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
-
from time import strftime
|
|
19
|
+
from time import strftime, time
|
|
20
20
|
from typing import Any, Dict, List, Sequence, Type
|
|
21
21
|
|
|
22
22
|
import numpy as np
|
|
@@ -252,6 +252,8 @@ class CliffordRandomizedBenchmarking(Benchmark):
|
|
|
252
252
|
# Submit jobs for all qubit layouts
|
|
253
253
|
all_rb_jobs: List[Dict[str, Any]] = []
|
|
254
254
|
time_circuit_generation: Dict[str, float] = {}
|
|
255
|
+
total_submit: float = 0
|
|
256
|
+
total_retrieve: float = 0
|
|
255
257
|
|
|
256
258
|
# Initialize the variable to contain the circuits for each layout
|
|
257
259
|
self.untranspiled_circuits = BenchmarkCircuit("untranspiled_circuits")
|
|
@@ -290,6 +292,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
|
|
|
290
292
|
# Submit all
|
|
291
293
|
flat_qubits_array = [x for y in self.qubits_array for x in y]
|
|
292
294
|
sorted_transpiled_qc_list = {tuple(flat_qubits_array): parallel_transpiled_rb_circuits[seq_length]}
|
|
295
|
+
t_start = time()
|
|
293
296
|
all_rb_jobs.append(
|
|
294
297
|
submit_parallel_rb_job(
|
|
295
298
|
backend,
|
|
@@ -302,6 +305,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
|
|
|
302
305
|
self.configuration.max_circuits_per_batch,
|
|
303
306
|
)
|
|
304
307
|
)
|
|
308
|
+
total_submit += time() - t_start
|
|
305
309
|
qcvv_logger.info(f"Job for sequence length {seq_length} submitted successfully!")
|
|
306
310
|
|
|
307
311
|
self.untranspiled_circuits.circuit_groups.append(
|
|
@@ -351,6 +355,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
|
|
|
351
355
|
)
|
|
352
356
|
|
|
353
357
|
# Submit
|
|
358
|
+
t_start = time()
|
|
354
359
|
all_rb_jobs.extend(
|
|
355
360
|
submit_sequential_rb_jobs(
|
|
356
361
|
qubits,
|
|
@@ -363,6 +368,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
|
|
|
363
368
|
circuit_compilation_options=self.circuit_compilation_options,
|
|
364
369
|
)
|
|
365
370
|
)
|
|
371
|
+
total_submit += time() - t_start
|
|
366
372
|
qcvv_logger.info(
|
|
367
373
|
f"All jobs for qubits {qubits} and sequence lengths {self.sequence_lengths} submitted successfully!"
|
|
368
374
|
)
|
|
@@ -386,6 +392,7 @@ class CliffordRandomizedBenchmarking(Benchmark):
|
|
|
386
392
|
execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier)
|
|
387
393
|
# Retrieve all job meta data
|
|
388
394
|
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
395
|
+
total_retrieve += time_retrieve
|
|
389
396
|
# Export all to dataset
|
|
390
397
|
dataset.attrs[qubit_idx[str(qubits)]].update(
|
|
391
398
|
{
|
|
@@ -401,6 +408,8 @@ class CliffordRandomizedBenchmarking(Benchmark):
|
|
|
401
408
|
qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
|
|
402
409
|
dataset, _ = add_counts_to_dataset(execution_results, identifier, dataset)
|
|
403
410
|
|
|
411
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
412
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
404
413
|
qcvv_logger.info(f"RB experiment concluded !")
|
|
405
414
|
self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
|
|
406
415
|
|
|
@@ -3,7 +3,7 @@ Direct Randomized Benchmarking.
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
import random
|
|
6
|
-
from time import strftime
|
|
6
|
+
from time import strftime, time
|
|
7
7
|
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type, cast
|
|
8
8
|
|
|
9
9
|
import numpy as np
|
|
@@ -751,6 +751,8 @@ class DirectRandomizedBenchmarking(Benchmark):
|
|
|
751
751
|
xr.Dataset: Dataset containing benchmark results and metadata
|
|
752
752
|
"""
|
|
753
753
|
self.execution_timestamp = strftime("%Y%m%d-%H%M%S")
|
|
754
|
+
total_submit: float = 0
|
|
755
|
+
total_retrieve: float = 0
|
|
754
756
|
|
|
755
757
|
dataset = xr.Dataset()
|
|
756
758
|
|
|
@@ -816,6 +818,7 @@ class DirectRandomizedBenchmarking(Benchmark):
|
|
|
816
818
|
# Submit all
|
|
817
819
|
flat_qubits_array = [x for y in loop_qubits_sequence for x in y]
|
|
818
820
|
sorted_transpiled_qc_list = {tuple(flat_qubits_array): parallel_drb_circuits[depth]["transpiled"]}
|
|
821
|
+
t_start = time()
|
|
819
822
|
all_drb_jobs.append(
|
|
820
823
|
submit_parallel_rb_job(
|
|
821
824
|
backend,
|
|
@@ -828,6 +831,7 @@ class DirectRandomizedBenchmarking(Benchmark):
|
|
|
828
831
|
max_circuits_per_batch=self.configuration.max_circuits_per_batch,
|
|
829
832
|
)
|
|
830
833
|
)
|
|
834
|
+
total_submit += time() - t_start
|
|
831
835
|
qcvv_logger.info(f"Job for depth {depth} submitted successfully!")
|
|
832
836
|
|
|
833
837
|
self.untranspiled_circuits.circuit_groups.append(
|
|
@@ -884,6 +888,7 @@ class DirectRandomizedBenchmarking(Benchmark):
|
|
|
884
888
|
sorted_transpiled_qc_list = {
|
|
885
889
|
cast(Tuple[int, ...], tuple(qubits)): drb_transpiled_circuits_lists[depth]
|
|
886
890
|
}
|
|
891
|
+
t_start = time()
|
|
887
892
|
all_drb_jobs.append(
|
|
888
893
|
self.submit_single_drb_job(
|
|
889
894
|
backend,
|
|
@@ -892,7 +897,7 @@ class DirectRandomizedBenchmarking(Benchmark):
|
|
|
892
897
|
cast(dict[tuple[int, ...], list[Any]], sorted_transpiled_qc_list),
|
|
893
898
|
)
|
|
894
899
|
)
|
|
895
|
-
|
|
900
|
+
total_submit += time() - t_start
|
|
896
901
|
qcvv_logger.info(f"Job for layout {qubits} & depth {depth} submitted successfully!")
|
|
897
902
|
|
|
898
903
|
self.untranspiled_circuits.circuit_groups.append(
|
|
@@ -916,6 +921,7 @@ class DirectRandomizedBenchmarking(Benchmark):
|
|
|
916
921
|
execution_results, time_retrieve = retrieve_all_counts(
|
|
917
922
|
job_dict["jobs"], f"qubits_{str(qubits)}_depth_{str(depth)}"
|
|
918
923
|
)
|
|
924
|
+
total_retrieve += time_retrieve
|
|
919
925
|
# Retrieve all job meta data
|
|
920
926
|
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
921
927
|
# Export all to dataset
|
|
@@ -934,7 +940,8 @@ class DirectRandomizedBenchmarking(Benchmark):
|
|
|
934
940
|
dataset, _ = add_counts_to_dataset(execution_results, f"qubits_{str(qubits)}_depth_{str(depth)}", dataset)
|
|
935
941
|
|
|
936
942
|
self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
|
|
937
|
-
|
|
943
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
944
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
938
945
|
qcvv_logger.info(f"DRB experiment execution concluded!")
|
|
939
946
|
|
|
940
947
|
return dataset
|
|
@@ -16,12 +16,13 @@
|
|
|
16
16
|
Interleaved Clifford Randomized Benchmarking.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
-
from time import strftime
|
|
19
|
+
from time import strftime, time
|
|
20
20
|
from typing import Any, Dict, List, Literal, Optional, Sequence, Type
|
|
21
21
|
|
|
22
22
|
from matplotlib.figure import Figure
|
|
23
23
|
import numpy as np
|
|
24
24
|
import xarray as xr
|
|
25
|
+
from pycparser.ply.ctokens import t_STRING
|
|
25
26
|
|
|
26
27
|
from iqm.benchmarks.benchmark import BenchmarkConfigurationBase
|
|
27
28
|
from iqm.benchmarks.benchmark_definition import (
|
|
@@ -323,6 +324,8 @@ class InterleavedRandomizedBenchmarking(Benchmark):
|
|
|
323
324
|
# Submit jobs for all qubit layouts
|
|
324
325
|
all_rb_jobs: Dict[str, List[Dict[str, Any]]] = {} # Label by Clifford or Interleaved
|
|
325
326
|
time_circuit_generation: Dict[str, float] = {}
|
|
327
|
+
total_submit: float = 0
|
|
328
|
+
total_retrieve: float = 0
|
|
326
329
|
|
|
327
330
|
# Initialize the variable to contain the circuits for each layout
|
|
328
331
|
|
|
@@ -398,6 +401,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
|
|
|
398
401
|
sorted_transpiled_interleaved_rb_qc_list = {
|
|
399
402
|
tuple(flat_qubits_array): parallel_transpiled_interleaved_rb_circuits[seq_length]
|
|
400
403
|
}
|
|
404
|
+
t_start = time()
|
|
401
405
|
all_rb_jobs["clifford"].append(
|
|
402
406
|
submit_parallel_rb_job(
|
|
403
407
|
backend,
|
|
@@ -422,6 +426,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
|
|
|
422
426
|
self.configuration.max_circuits_per_batch,
|
|
423
427
|
)
|
|
424
428
|
)
|
|
429
|
+
total_submit += time() - t_start
|
|
425
430
|
qcvv_logger.info(f"Both jobs for sequence length {seq_length} submitted successfully!")
|
|
426
431
|
|
|
427
432
|
self.untranspiled_circuits.circuit_groups.append(
|
|
@@ -507,6 +512,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
|
|
|
507
512
|
time_circuit_generation[str(qubits)] = t_clifford + t_inter
|
|
508
513
|
|
|
509
514
|
# Submit Clifford then Interleaved
|
|
515
|
+
t_start = time()
|
|
510
516
|
all_rb_jobs["clifford"].extend(
|
|
511
517
|
submit_sequential_rb_jobs(
|
|
512
518
|
qubits,
|
|
@@ -531,6 +537,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
|
|
|
531
537
|
circuit_compilation_options=self.circuit_compilation_options,
|
|
532
538
|
)
|
|
533
539
|
)
|
|
540
|
+
total_submit += time() - t_start
|
|
534
541
|
qcvv_logger.info(
|
|
535
542
|
f"All jobs for qubits {qubits} and sequence lengths {self.sequence_lengths} submitted successfully!"
|
|
536
543
|
)
|
|
@@ -568,6 +575,7 @@ class InterleavedRandomizedBenchmarking(Benchmark):
|
|
|
568
575
|
execution_results, time_retrieve = retrieve_all_counts(job_dict["jobs"], identifier)
|
|
569
576
|
# Retrieve all job meta data
|
|
570
577
|
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
578
|
+
total_retrieve += time_retrieve
|
|
571
579
|
# Export all to dataset
|
|
572
580
|
dataset.attrs[qubit_idx[str(qubits)]].update(
|
|
573
581
|
{
|
|
@@ -585,6 +593,8 @@ class InterleavedRandomizedBenchmarking(Benchmark):
|
|
|
585
593
|
qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
|
|
586
594
|
dataset, _ = add_counts_to_dataset(execution_results, identifier, dataset)
|
|
587
595
|
|
|
596
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
597
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
588
598
|
qcvv_logger.info(f"Interleaved RB experiment concluded !")
|
|
589
599
|
self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
|
|
590
600
|
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
Mirror Randomized Benchmarking.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
-
from time import strftime
|
|
5
|
+
from time import strftime, time
|
|
6
6
|
from typing import Any, Dict, List, Literal, Optional, Sequence, Tuple, Type
|
|
7
7
|
import warnings
|
|
8
8
|
|
|
@@ -592,6 +592,8 @@ class MirrorRandomizedBenchmarking(Benchmark):
|
|
|
592
592
|
# Submit jobs for all qubit layouts
|
|
593
593
|
all_mrb_jobs: List[Dict[str, Any]] = []
|
|
594
594
|
time_circuit_generation: Dict[str, float] = {}
|
|
595
|
+
total_submit: float = 0
|
|
596
|
+
total_retrieve: float = 0
|
|
595
597
|
|
|
596
598
|
# The depths should be assigned to each set of qubits!
|
|
597
599
|
# The real final MRB depths are twice the originally specified, must be taken into account here!
|
|
@@ -653,7 +655,9 @@ class MirrorRandomizedBenchmarking(Benchmark):
|
|
|
653
655
|
|
|
654
656
|
# Submit
|
|
655
657
|
sorted_transpiled_qc_list = {tuple(qubits): mrb_transpiled_circuits_lists[depth]}
|
|
658
|
+
t_start = time()
|
|
656
659
|
all_mrb_jobs.append(self.submit_single_mrb_job(backend, qubits, depth, sorted_transpiled_qc_list))
|
|
660
|
+
total_retrieve += time() - t_start
|
|
657
661
|
qcvv_logger.info(f"Job for layout {qubits} & depth {depth} submitted successfully!")
|
|
658
662
|
|
|
659
663
|
self.untranspiled_circuits.circuit_groups.append(
|
|
@@ -675,6 +679,7 @@ class MirrorRandomizedBenchmarking(Benchmark):
|
|
|
675
679
|
)
|
|
676
680
|
# Retrieve all job meta data
|
|
677
681
|
all_job_metadata = retrieve_all_job_metadata(job_dict["jobs"])
|
|
682
|
+
total_retrieve += time_retrieve
|
|
678
683
|
# Export all to dataset
|
|
679
684
|
dataset.attrs[qubit_idx[str(qubits)]].update(
|
|
680
685
|
{
|
|
@@ -690,6 +695,8 @@ class MirrorRandomizedBenchmarking(Benchmark):
|
|
|
690
695
|
qcvv_logger.info(f"Adding counts of qubits {qubits} and depth {depth} run to the dataset")
|
|
691
696
|
dataset, _ = add_counts_to_dataset(execution_results, f"qubits_{str(qubits)}_depth_{str(depth)}", dataset)
|
|
692
697
|
|
|
698
|
+
dataset.attrs["total_submit_time"] = total_submit
|
|
699
|
+
dataset.attrs["total_retrieve_time"] = total_retrieve
|
|
693
700
|
self.circuits = Circuits([self.transpiled_circuits, self.untranspiled_circuits])
|
|
694
701
|
|
|
695
702
|
qcvv_logger.info(f"MRB experiment execution concluded !")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: iqm-benchmarks
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.49
|
|
4
4
|
Summary: A package for implementation of Quantum Characterization, Verification and Validation (QCVV) techniques on IQM's hardware at gate level abstraction
|
|
5
5
|
Author-email: IQM Finland Oy <developers@meetiqm.com>, Adrian Auer <adrian.auer@meetiqm.com>, Raphael Brieger <raphael.brieger@meetiqm.com>, Alessio Calzona <alessio.calzona@meetiqm.com>, Pedro Figueroa Romero <pedro.romero@meetiqm.com>, Amin Hosseinkhani <amin.hosseinkhani@meetiqm.com>, Miikka Koistinen <miikka@meetiqm.com>, Nadia Milazzo <nadia.milazzo@meetiqm.com>, Vicente Pina Canelles <vicente.pina@meetiqm.com>, Aniket Rath <aniket.rath@meetiqm.com>, Jami Rönkkö <jami@meetiqm.com>, Stefan Seegerer <stefan.seegerer@meetiqm.com>
|
|
6
6
|
Project-URL: Homepage, https://github.com/iqm-finland/iqm-benchmarks
|
|
@@ -8,7 +8,7 @@ Classifier: Development Status :: 4 - Beta
|
|
|
8
8
|
Classifier: Programming Language :: Python :: 3 :: Only
|
|
9
9
|
Classifier: Topic :: Scientific/Engineering :: Physics
|
|
10
10
|
Classifier: Intended Audience :: Science/Research
|
|
11
|
-
Requires-Python:
|
|
11
|
+
Requires-Python: <3.13,>=3.11
|
|
12
12
|
Description-Content-Type: text/markdown
|
|
13
13
|
License-File: LICENSE
|
|
14
14
|
Requires-Dist: lmfit>=1.2
|
|
@@ -19,7 +19,7 @@ Requires-Dist: networkx<4.0,>=3.3
|
|
|
19
19
|
Requires-Dist: rustworkx>=0.16.0
|
|
20
20
|
Requires-Dist: numpy<2.0,>=1.25.2
|
|
21
21
|
Requires-Dist: qiskit<=1.4.2,>=1.2.4
|
|
22
|
-
Requires-Dist: iqm-client[qiskit]<
|
|
22
|
+
Requires-Dist: iqm-client[qiskit]<31.0,>=30.1.0
|
|
23
23
|
Requires-Dist: iqm-station-control-client>=9.3.0
|
|
24
24
|
Requires-Dist: requests<3.0,>=2.32.3
|
|
25
25
|
Requires-Dist: scikit-optimize<0.11.0,>=0.10.2
|
|
@@ -28,7 +28,8 @@ Requires-Dist: uncertainties<3.3.0,>=3.2.2
|
|
|
28
28
|
Requires-Dist: pycurl<8.0,>=7.45.3
|
|
29
29
|
Requires-Dist: xarray<2025.0.0,>=2024.6.0
|
|
30
30
|
Requires-Dist: types-requests
|
|
31
|
-
Requires-Dist: myst-nb==1.
|
|
31
|
+
Requires-Dist: myst-nb==1.3.0
|
|
32
|
+
Requires-Dist: pyyaml!=5.4.0,!=5.4.1,!=6.0.0
|
|
32
33
|
Provides-Extra: cicd
|
|
33
34
|
Requires-Dist: build==1.0.3; extra == "cicd"
|
|
34
35
|
Requires-Dist: pip-licenses==5.0.0; extra == "cicd"
|
|
@@ -8,45 +8,45 @@ iqm/benchmarks/utils.py,sha256=kJz9T9nJXpLl_iFYYUDtSq83N-Y3JFQBuvW1o7-AVSM,44137
|
|
|
8
8
|
iqm/benchmarks/utils_plots.py,sha256=CaqA9fJNgRnrbYqwBdpzFUlhwvKw5lhZX3KfRlroQV4,24420
|
|
9
9
|
iqm/benchmarks/utils_shadows.py,sha256=e77PV_uaAO5m_woox9lAzompKAvFeDJ-0AKJrNJ7NFg,9728
|
|
10
10
|
iqm/benchmarks/coherence/__init__.py,sha256=yeyhk-_Lp8IbJ-f5lQj0HP5Q1HSKK_FzuXHazotUrVY,704
|
|
11
|
-
iqm/benchmarks/coherence/coherence.py,sha256=
|
|
11
|
+
iqm/benchmarks/coherence/coherence.py,sha256=RvbOxV2sJpAyr_bXqjBfSjk9v8XjRN-nrkK8w7RyfH0,21596
|
|
12
12
|
iqm/benchmarks/compressive_gst/__init__.py,sha256=LneifgYXtcwo2jcXo7GdUEHL6_peipukShhkrdaTRCA,929
|
|
13
|
-
iqm/benchmarks/compressive_gst/compressive_gst.py,sha256=
|
|
13
|
+
iqm/benchmarks/compressive_gst/compressive_gst.py,sha256=_thQfc9qmIJqAcS3Kg4ITEYl8Ofi8xgC_oZotrmyzVk,28484
|
|
14
14
|
iqm/benchmarks/compressive_gst/gst_analysis.py,sha256=H6EQGbpI_sig69Jy6hflg6alMTtjB0t9tHftygzA2YA,41240
|
|
15
15
|
iqm/benchmarks/entanglement/__init__.py,sha256=sHVVToRWRCz0LSntk1rQaoSNNeyZLPoiTjUKWZWrk1E,778
|
|
16
|
-
iqm/benchmarks/entanglement/ghz.py,sha256=
|
|
17
|
-
iqm/benchmarks/entanglement/graph_states.py,sha256=
|
|
16
|
+
iqm/benchmarks/entanglement/ghz.py,sha256=bM0bqKnyyT3gnN4QNQfzOS8lXp7bqo1pNlUfo4LK3ug,41586
|
|
17
|
+
iqm/benchmarks/entanglement/graph_states.py,sha256=6qACedd3UXpiowXc9GW4QhSwO-CzHXnBA3dIC6nCIbE,62788
|
|
18
18
|
iqm/benchmarks/optimization/__init__.py,sha256=_ajW_OibYLCtzU5AUv5c2zuuVYn8ZNeZUcUUSIGt51M,747
|
|
19
|
-
iqm/benchmarks/optimization/qscore.py,sha256=
|
|
19
|
+
iqm/benchmarks/optimization/qscore.py,sha256=KmRv0eapeohr2fdomzk33GlysG1b7Gtfr18d2vX_4UM,37779
|
|
20
20
|
iqm/benchmarks/quantum_volume/__init__.py,sha256=i-Q4SpDWELBw7frXnxm1j4wJRcxbIyrS5uEK_v06YHo,951
|
|
21
21
|
iqm/benchmarks/quantum_volume/clops.py,sha256=EUtO-_OYBYvwqb4xY3aubI2gc2Z6cBokRzt_E0608WA,31242
|
|
22
|
-
iqm/benchmarks/quantum_volume/quantum_volume.py,sha256=
|
|
22
|
+
iqm/benchmarks/quantum_volume/quantum_volume.py,sha256=af9C4SdEPcYyZgQgtJYy2h_F8QWv1a0hEtN6hr4KeM0,36861
|
|
23
23
|
iqm/benchmarks/randomized_benchmarking/__init__.py,sha256=IkKo-7zUChxZZd3my_csQCJfJfZNsV3-JTvdG8uqys4,734
|
|
24
24
|
iqm/benchmarks/randomized_benchmarking/clifford_1q.pkl,sha256=yrmSJqhv7Lb1yqiqU9-2baqTljJPNmTUPQR-AH6GGfc,7800
|
|
25
25
|
iqm/benchmarks/randomized_benchmarking/clifford_2q.pkl,sha256=mJQLubWPOb-DbmFi4oKYJqAMW_Yyo3eJjRjLGl9Sqmo,10282247
|
|
26
26
|
iqm/benchmarks/randomized_benchmarking/multi_lmfit.py,sha256=Se1ygR4mXn_2_P82Ch31KBnCmY-g_A9NKzE9Ir8nEvw,3247
|
|
27
27
|
iqm/benchmarks/randomized_benchmarking/randomized_benchmarking_common.py,sha256=1lXaqUq6BagOjsaEdKZIN4GAZ1jphk_3khZcaid65n0,54296
|
|
28
28
|
iqm/benchmarks/randomized_benchmarking/clifford_rb/__init__.py,sha256=bTDA156LAl7OLGcMec--1nzDrV1XpPRVq3CquTmucgE,677
|
|
29
|
-
iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py,sha256=
|
|
29
|
+
iqm/benchmarks/randomized_benchmarking/clifford_rb/clifford_rb.py,sha256=1BOYA9xRM1fhCElZCFU2EXuLAXrzMb3w5eoU20yqG7Q,19085
|
|
30
30
|
iqm/benchmarks/randomized_benchmarking/direct_rb/__init__.py,sha256=lCIIeWMFZHnMUUEUTjUBvrhhUur6uBTHIVkxFBSfHC4,681
|
|
31
|
-
iqm/benchmarks/randomized_benchmarking/direct_rb/direct_rb.py,sha256=
|
|
31
|
+
iqm/benchmarks/randomized_benchmarking/direct_rb/direct_rb.py,sha256=X4MVojCcv1KabopKql4-58sNXP_q0WcFZBeUbDTY83w,49399
|
|
32
32
|
iqm/benchmarks/randomized_benchmarking/eplg/__init__.py,sha256=1MeGZTErElXJypQV2rQf7hwqLLvIp_JNVbwNhaP5vyI,696
|
|
33
33
|
iqm/benchmarks/randomized_benchmarking/eplg/eplg.py,sha256=3A_gxzAs6mi3APKvqCwYDcNwRogIZNy5SDL33Cro89E,17036
|
|
34
34
|
iqm/benchmarks/randomized_benchmarking/interleaved_rb/__init__.py,sha256=sq6MgN_hwlpkOj10vyCU4e6eKSX-oLcF2L9na6W2Gt4,681
|
|
35
|
-
iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py,sha256=
|
|
35
|
+
iqm/benchmarks/randomized_benchmarking/interleaved_rb/interleaved_rb.py,sha256=OHoAWajCE48dRDInwQUT8VvtzKad0ExefdqvZFTaYzs,28918
|
|
36
36
|
iqm/benchmarks/randomized_benchmarking/mirror_rb/__init__.py,sha256=jRKbivWCZ3xdO1k0sx-ygC3s5DUkGSModd975PoAtcg,692
|
|
37
|
-
iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py,sha256=
|
|
38
|
-
iqm_benchmarks-2.
|
|
37
|
+
iqm/benchmarks/randomized_benchmarking/mirror_rb/mirror_rb.py,sha256=ijieNymik3BeEUpXS-m64mtgdHz9iAFELuLooHeZY0E,33252
|
|
38
|
+
iqm_benchmarks-2.49.dist-info/licenses/LICENSE,sha256=2Ncb40-hqkTil78RPv3-YiJfKaJ8te9USJgliKqIdSY,11558
|
|
39
39
|
mGST/LICENSE,sha256=TtHNq55cUcbglb7uhVudeBLUh_qPdUoAEvU0BBwFz-k,1098
|
|
40
40
|
mGST/README.md,sha256=v_5kw253csHF4-RfE-44KqFmBXIsSMRmOtN0AUPrRxE,5050
|
|
41
41
|
mGST/additional_fns.py,sha256=MV0Pm5ap59IjhT_E3QhsZyM7lXOF1RZ9SD11zoaf43A,31781
|
|
42
|
-
mGST/algorithm.py,sha256=
|
|
42
|
+
mGST/algorithm.py,sha256=07q9S4nivYIlR-0xxb295l1mwGuqcoBj1GMSnlAFoXA,27493
|
|
43
43
|
mGST/compatibility.py,sha256=00DsPnNfOtrQcDTvxBDs-0aMhmuXmOIIxl_Ohy-Emkg,8920
|
|
44
44
|
mGST/low_level_jit.py,sha256=Ih1MxcwU0GnSRu0TI9GaYxpB94CMuJ8Is1eTeOwsfA8,30772
|
|
45
45
|
mGST/optimization.py,sha256=x9tJ9wMQ5aONWpNpBMVtK0rwE6DRcOU33htNgrt0tx4,11015
|
|
46
46
|
mGST/qiskit_interface.py,sha256=uCdn-Q9CXI2f4FQSxGUy8GmmzQhr9NhCOFb2VPj0gTs,10061
|
|
47
47
|
mGST/reporting/figure_gen.py,sha256=xFPAHx1Trdqz7swn0kRqwc_jbRaNxhG9Nvx0jeitooo,25847
|
|
48
48
|
mGST/reporting/reporting.py,sha256=Wss1-zFsMEhzrrXKfP-RICau80ezjDIzcN555KhSehc,34160
|
|
49
|
-
iqm_benchmarks-2.
|
|
50
|
-
iqm_benchmarks-2.
|
|
51
|
-
iqm_benchmarks-2.
|
|
52
|
-
iqm_benchmarks-2.
|
|
49
|
+
iqm_benchmarks-2.49.dist-info/METADATA,sha256=LbHTeCSeWZw8_DIXm3IGc41yfJz2MzGjfdg_u_qRb4o,10961
|
|
50
|
+
iqm_benchmarks-2.49.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
51
|
+
iqm_benchmarks-2.49.dist-info/top_level.txt,sha256=3G23Z-1LGf-IOzTCUl6QwWqiQ3USz25Zt90Ihq192to,9
|
|
52
|
+
iqm_benchmarks-2.49.dist-info/RECORD,,
|
mGST/algorithm.py
CHANGED
|
@@ -303,7 +303,7 @@ def gd(K, E, rho, y, J, d, r, rK, fixed_gates, ls="COBYLA", mle=False):
|
|
|
303
303
|
Delta = tangent_proj(K, Delta, d, rK)
|
|
304
304
|
|
|
305
305
|
res = minimize(
|
|
306
|
-
lineobjf_isom_geodesic, 1e-8, args=(Delta, K, E, rho, J, y, mle),
|
|
306
|
+
lineobjf_isom_geodesic, 1e-8, args=(Delta, K, E, rho, J, y, mle), options={"maxiter": 200}
|
|
307
307
|
)
|
|
308
308
|
a = res.x
|
|
309
309
|
K_new = update_K_geodesic(K, Delta, a)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|