pyGSTi 0.9.12.1__cp310-cp310-win_amd64.whl → 0.9.13__cp310-cp310-win_amd64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (221) hide show
  1. pyGSTi-0.9.13.dist-info/METADATA +197 -0
  2. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/RECORD +207 -217
  3. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/WHEEL +1 -1
  4. pygsti/_version.py +2 -2
  5. pygsti/algorithms/contract.py +1 -1
  6. pygsti/algorithms/core.py +42 -28
  7. pygsti/algorithms/fiducialselection.py +17 -8
  8. pygsti/algorithms/gaugeopt.py +2 -2
  9. pygsti/algorithms/germselection.py +87 -77
  10. pygsti/algorithms/mirroring.py +0 -388
  11. pygsti/algorithms/randomcircuit.py +165 -1333
  12. pygsti/algorithms/rbfit.py +0 -234
  13. pygsti/baseobjs/basis.py +94 -396
  14. pygsti/baseobjs/errorgenbasis.py +0 -132
  15. pygsti/baseobjs/errorgenspace.py +0 -10
  16. pygsti/baseobjs/label.py +52 -168
  17. pygsti/baseobjs/opcalc/fastopcalc.cp310-win_amd64.pyd +0 -0
  18. pygsti/baseobjs/opcalc/fastopcalc.pyx +2 -2
  19. pygsti/baseobjs/polynomial.py +13 -595
  20. pygsti/baseobjs/statespace.py +1 -0
  21. pygsti/circuits/__init__.py +1 -1
  22. pygsti/circuits/circuit.py +682 -505
  23. pygsti/circuits/circuitconstruction.py +0 -4
  24. pygsti/circuits/circuitlist.py +47 -5
  25. pygsti/circuits/circuitparser/__init__.py +8 -8
  26. pygsti/circuits/circuitparser/fastcircuitparser.cp310-win_amd64.pyd +0 -0
  27. pygsti/circuits/circuitstructure.py +3 -3
  28. pygsti/circuits/cloudcircuitconstruction.py +1 -1
  29. pygsti/data/datacomparator.py +2 -7
  30. pygsti/data/dataset.py +46 -44
  31. pygsti/data/hypothesistest.py +0 -7
  32. pygsti/drivers/bootstrap.py +0 -49
  33. pygsti/drivers/longsequence.py +2 -1
  34. pygsti/evotypes/basereps_cython.cp310-win_amd64.pyd +0 -0
  35. pygsti/evotypes/chp/opreps.py +0 -61
  36. pygsti/evotypes/chp/statereps.py +0 -32
  37. pygsti/evotypes/densitymx/effectcreps.cpp +9 -10
  38. pygsti/evotypes/densitymx/effectreps.cp310-win_amd64.pyd +0 -0
  39. pygsti/evotypes/densitymx/effectreps.pyx +1 -1
  40. pygsti/evotypes/densitymx/opreps.cp310-win_amd64.pyd +0 -0
  41. pygsti/evotypes/densitymx/opreps.pyx +2 -2
  42. pygsti/evotypes/densitymx/statereps.cp310-win_amd64.pyd +0 -0
  43. pygsti/evotypes/densitymx/statereps.pyx +1 -1
  44. pygsti/evotypes/densitymx_slow/effectreps.py +7 -23
  45. pygsti/evotypes/densitymx_slow/opreps.py +16 -23
  46. pygsti/evotypes/densitymx_slow/statereps.py +10 -3
  47. pygsti/evotypes/evotype.py +39 -2
  48. pygsti/evotypes/stabilizer/effectreps.cp310-win_amd64.pyd +0 -0
  49. pygsti/evotypes/stabilizer/effectreps.pyx +0 -4
  50. pygsti/evotypes/stabilizer/opreps.cp310-win_amd64.pyd +0 -0
  51. pygsti/evotypes/stabilizer/opreps.pyx +0 -4
  52. pygsti/evotypes/stabilizer/statereps.cp310-win_amd64.pyd +0 -0
  53. pygsti/evotypes/stabilizer/statereps.pyx +1 -5
  54. pygsti/evotypes/stabilizer/termreps.cp310-win_amd64.pyd +0 -0
  55. pygsti/evotypes/stabilizer/termreps.pyx +0 -7
  56. pygsti/evotypes/stabilizer_slow/effectreps.py +0 -22
  57. pygsti/evotypes/stabilizer_slow/opreps.py +0 -4
  58. pygsti/evotypes/stabilizer_slow/statereps.py +0 -4
  59. pygsti/evotypes/statevec/effectreps.cp310-win_amd64.pyd +0 -0
  60. pygsti/evotypes/statevec/effectreps.pyx +1 -1
  61. pygsti/evotypes/statevec/opreps.cp310-win_amd64.pyd +0 -0
  62. pygsti/evotypes/statevec/opreps.pyx +2 -2
  63. pygsti/evotypes/statevec/statereps.cp310-win_amd64.pyd +0 -0
  64. pygsti/evotypes/statevec/statereps.pyx +1 -1
  65. pygsti/evotypes/statevec/termreps.cp310-win_amd64.pyd +0 -0
  66. pygsti/evotypes/statevec/termreps.pyx +0 -7
  67. pygsti/evotypes/statevec_slow/effectreps.py +0 -3
  68. pygsti/evotypes/statevec_slow/opreps.py +0 -5
  69. pygsti/extras/__init__.py +0 -1
  70. pygsti/extras/drift/stabilityanalyzer.py +3 -1
  71. pygsti/extras/interpygate/__init__.py +12 -0
  72. pygsti/extras/interpygate/core.py +0 -36
  73. pygsti/extras/interpygate/process_tomography.py +44 -10
  74. pygsti/extras/rpe/rpeconstruction.py +0 -2
  75. pygsti/forwardsims/__init__.py +1 -0
  76. pygsti/forwardsims/forwardsim.py +14 -55
  77. pygsti/forwardsims/mapforwardsim.py +69 -18
  78. pygsti/forwardsims/mapforwardsim_calc_densitymx.cp310-win_amd64.pyd +0 -0
  79. pygsti/forwardsims/mapforwardsim_calc_densitymx.pyx +65 -66
  80. pygsti/forwardsims/mapforwardsim_calc_generic.py +91 -13
  81. pygsti/forwardsims/matrixforwardsim.py +63 -15
  82. pygsti/forwardsims/termforwardsim.py +8 -110
  83. pygsti/forwardsims/termforwardsim_calc_stabilizer.cp310-win_amd64.pyd +0 -0
  84. pygsti/forwardsims/termforwardsim_calc_statevec.cp310-win_amd64.pyd +0 -0
  85. pygsti/forwardsims/termforwardsim_calc_statevec.pyx +0 -651
  86. pygsti/forwardsims/torchfwdsim.py +265 -0
  87. pygsti/forwardsims/weakforwardsim.py +2 -2
  88. pygsti/io/__init__.py +1 -2
  89. pygsti/io/mongodb.py +0 -2
  90. pygsti/io/stdinput.py +6 -22
  91. pygsti/layouts/copalayout.py +10 -12
  92. pygsti/layouts/distlayout.py +0 -40
  93. pygsti/layouts/maplayout.py +103 -25
  94. pygsti/layouts/matrixlayout.py +99 -60
  95. pygsti/layouts/prefixtable.py +1534 -52
  96. pygsti/layouts/termlayout.py +1 -1
  97. pygsti/modelmembers/instruments/instrument.py +3 -3
  98. pygsti/modelmembers/instruments/tpinstrument.py +2 -2
  99. pygsti/modelmembers/modelmember.py +0 -17
  100. pygsti/modelmembers/operations/__init__.py +2 -4
  101. pygsti/modelmembers/operations/affineshiftop.py +1 -0
  102. pygsti/modelmembers/operations/composederrorgen.py +1 -1
  103. pygsti/modelmembers/operations/composedop.py +1 -24
  104. pygsti/modelmembers/operations/denseop.py +5 -5
  105. pygsti/modelmembers/operations/eigpdenseop.py +2 -2
  106. pygsti/modelmembers/operations/embeddederrorgen.py +1 -1
  107. pygsti/modelmembers/operations/embeddedop.py +0 -1
  108. pygsti/modelmembers/operations/experrorgenop.py +2 -2
  109. pygsti/modelmembers/operations/fullarbitraryop.py +1 -0
  110. pygsti/modelmembers/operations/fullcptpop.py +2 -2
  111. pygsti/modelmembers/operations/fulltpop.py +28 -6
  112. pygsti/modelmembers/operations/fullunitaryop.py +5 -4
  113. pygsti/modelmembers/operations/lindbladcoefficients.py +93 -78
  114. pygsti/modelmembers/operations/lindbladerrorgen.py +268 -441
  115. pygsti/modelmembers/operations/linearop.py +7 -27
  116. pygsti/modelmembers/operations/opfactory.py +1 -1
  117. pygsti/modelmembers/operations/repeatedop.py +1 -24
  118. pygsti/modelmembers/operations/staticstdop.py +1 -1
  119. pygsti/modelmembers/povms/__init__.py +3 -3
  120. pygsti/modelmembers/povms/basepovm.py +7 -36
  121. pygsti/modelmembers/povms/complementeffect.py +4 -9
  122. pygsti/modelmembers/povms/composedeffect.py +0 -320
  123. pygsti/modelmembers/povms/computationaleffect.py +1 -1
  124. pygsti/modelmembers/povms/computationalpovm.py +3 -1
  125. pygsti/modelmembers/povms/effect.py +3 -5
  126. pygsti/modelmembers/povms/marginalizedpovm.py +0 -79
  127. pygsti/modelmembers/povms/tppovm.py +74 -2
  128. pygsti/modelmembers/states/__init__.py +2 -5
  129. pygsti/modelmembers/states/composedstate.py +0 -317
  130. pygsti/modelmembers/states/computationalstate.py +3 -3
  131. pygsti/modelmembers/states/cptpstate.py +4 -4
  132. pygsti/modelmembers/states/densestate.py +6 -4
  133. pygsti/modelmembers/states/fullpurestate.py +0 -24
  134. pygsti/modelmembers/states/purestate.py +1 -1
  135. pygsti/modelmembers/states/state.py +5 -6
  136. pygsti/modelmembers/states/tpstate.py +28 -10
  137. pygsti/modelmembers/term.py +3 -6
  138. pygsti/modelmembers/torchable.py +50 -0
  139. pygsti/modelpacks/_modelpack.py +1 -1
  140. pygsti/modelpacks/smq1Q_ZN.py +3 -1
  141. pygsti/modelpacks/smq2Q_XXYYII.py +2 -1
  142. pygsti/modelpacks/smq2Q_XY.py +3 -3
  143. pygsti/modelpacks/smq2Q_XYI.py +2 -2
  144. pygsti/modelpacks/smq2Q_XYICNOT.py +3 -3
  145. pygsti/modelpacks/smq2Q_XYICPHASE.py +3 -3
  146. pygsti/modelpacks/smq2Q_XYXX.py +1 -1
  147. pygsti/modelpacks/smq2Q_XYZICNOT.py +3 -3
  148. pygsti/modelpacks/smq2Q_XYZZ.py +1 -1
  149. pygsti/modelpacks/stdtarget.py +0 -121
  150. pygsti/models/cloudnoisemodel.py +1 -2
  151. pygsti/models/explicitcalc.py +3 -3
  152. pygsti/models/explicitmodel.py +3 -13
  153. pygsti/models/fogistore.py +5 -3
  154. pygsti/models/localnoisemodel.py +1 -2
  155. pygsti/models/memberdict.py +0 -12
  156. pygsti/models/model.py +800 -65
  157. pygsti/models/modelconstruction.py +4 -4
  158. pygsti/models/modelnoise.py +2 -2
  159. pygsti/models/modelparaminterposer.py +1 -1
  160. pygsti/models/oplessmodel.py +1 -1
  161. pygsti/models/qutrit.py +15 -14
  162. pygsti/objectivefns/objectivefns.py +73 -138
  163. pygsti/objectivefns/wildcardbudget.py +2 -7
  164. pygsti/optimize/__init__.py +1 -0
  165. pygsti/optimize/arraysinterface.py +28 -0
  166. pygsti/optimize/customcg.py +0 -12
  167. pygsti/optimize/customlm.py +129 -323
  168. pygsti/optimize/customsolve.py +2 -2
  169. pygsti/optimize/optimize.py +0 -84
  170. pygsti/optimize/simplerlm.py +841 -0
  171. pygsti/optimize/wildcardopt.py +19 -598
  172. pygsti/protocols/confidenceregionfactory.py +28 -14
  173. pygsti/protocols/estimate.py +31 -14
  174. pygsti/protocols/gst.py +142 -68
  175. pygsti/protocols/modeltest.py +6 -10
  176. pygsti/protocols/protocol.py +9 -37
  177. pygsti/protocols/rb.py +450 -79
  178. pygsti/protocols/treenode.py +8 -2
  179. pygsti/protocols/vb.py +108 -206
  180. pygsti/protocols/vbdataframe.py +1 -1
  181. pygsti/report/factory.py +0 -15
  182. pygsti/report/fogidiagram.py +1 -17
  183. pygsti/report/modelfunction.py +12 -3
  184. pygsti/report/mpl_colormaps.py +1 -1
  185. pygsti/report/plothelpers.py +8 -2
  186. pygsti/report/reportables.py +41 -37
  187. pygsti/report/templates/offline/pygsti_dashboard.css +6 -0
  188. pygsti/report/templates/offline/pygsti_dashboard.js +12 -0
  189. pygsti/report/workspace.py +2 -14
  190. pygsti/report/workspaceplots.py +326 -504
  191. pygsti/tools/basistools.py +9 -36
  192. pygsti/tools/edesigntools.py +124 -96
  193. pygsti/tools/fastcalc.cp310-win_amd64.pyd +0 -0
  194. pygsti/tools/fastcalc.pyx +35 -81
  195. pygsti/tools/internalgates.py +151 -15
  196. pygsti/tools/jamiolkowski.py +5 -5
  197. pygsti/tools/lindbladtools.py +19 -11
  198. pygsti/tools/listtools.py +0 -114
  199. pygsti/tools/matrixmod2.py +1 -1
  200. pygsti/tools/matrixtools.py +173 -339
  201. pygsti/tools/nameddict.py +1 -1
  202. pygsti/tools/optools.py +154 -88
  203. pygsti/tools/pdftools.py +0 -25
  204. pygsti/tools/rbtheory.py +3 -320
  205. pygsti/tools/slicetools.py +64 -12
  206. pyGSTi-0.9.12.1.dist-info/METADATA +0 -155
  207. pygsti/algorithms/directx.py +0 -711
  208. pygsti/evotypes/qibo/__init__.py +0 -33
  209. pygsti/evotypes/qibo/effectreps.py +0 -78
  210. pygsti/evotypes/qibo/opreps.py +0 -376
  211. pygsti/evotypes/qibo/povmreps.py +0 -98
  212. pygsti/evotypes/qibo/statereps.py +0 -174
  213. pygsti/extras/rb/__init__.py +0 -13
  214. pygsti/extras/rb/benchmarker.py +0 -957
  215. pygsti/extras/rb/dataset.py +0 -378
  216. pygsti/extras/rb/io.py +0 -814
  217. pygsti/extras/rb/simulate.py +0 -1020
  218. pygsti/io/legacyio.py +0 -385
  219. pygsti/modelmembers/povms/denseeffect.py +0 -142
  220. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/LICENSE +0 -0
  221. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/top_level.txt +0 -0
@@ -1,957 +0,0 @@
1
- """ Encapsulates RB results and dataset objects """
2
- #***************************************************************************************************
3
- # Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
4
- # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
5
- # in this software.
6
- # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
7
- # in compliance with the License. You may obtain a copy of the License at
8
- # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
9
- #***************************************************************************************************
10
-
11
- import copy as _copy
12
- import warnings as _warnings
13
- from itertools import cycle as _cycle
14
-
15
- import numpy as _np
16
-
17
- from pygsti.data import dataset as _stdds, multidataset as _multids, datacomparator as _dcomp
18
- from pygsti.models import oplessmodel as _oplessmodel
19
-
20
- #from . import analysis as _analysis
21
- _analysis = None # MOVED - and this module is deprecated & broken now, so just set to None
22
-
23
-
24
- class Benchmarker(object):
25
- """
26
- todo
27
-
28
- """
29
-
30
- def __init__(self, specs, ds=None, summary_data=None, predicted_summary_data=None,
31
- dstype='standard', success_outcome='success', success_key='target',
32
- dscomparator=None):
33
- """
34
- todo
35
-
36
- dstype : ('success-fail', 'standard')
37
-
38
- specs: dictionary of (name, RBSpec) key-value pairs. The names are arbitrary
39
-
40
- """
41
- if ds is not None:
42
- assert(dstype in ('success-fail', 'standard', 'dict')), "Unknown format for the dataset!"
43
- self.dstype = dstype
44
- if self.dstype == 'success-fail' or self.dstype == 'dict':
45
- self.success_outcome = success_outcome
46
- else:
47
- self.success_outcome = None
48
- if self.dstype == 'standard' or self.dstype == 'dict':
49
- self.success_key = success_key
50
- else:
51
- self.success_key = None
52
-
53
- if dstype == 'dict':
54
- assert('standard' in ds.keys() and 'success-fail' in ds.keys())
55
- self.multids = ds
56
- else:
57
- self.multids = {}
58
- if isinstance(ds, _stdds.DataSet):
59
- self.multids[self.dstype] = _multids.MultiDataSet()
60
- self.multids[self.dstype].add_dataset(0, ds)
61
- elif isinstance(ds, list):
62
- self.multids[self.dstype] = _multids.MultiDataSet()
63
- for i, subds in enumerate(ds):
64
- self.multids[self.dstype].add_dataset(i, ds)
65
- elif isinstance(ds, _multids.MultiDataSet):
66
- self.multids[self.dstype] = ds
67
- else:
68
- raise ValueError("If specified, `ds` must be a DataSet, a list of DataSets,"
69
- + " a MultiDataSet or a dictionary of MultiDataSets!")
70
-
71
- self.numpasses = len(self.multids[list(self.multids.keys())[0]])
72
- else:
73
- assert(summary_data is not None), "Must specify one or more DataSets or a summary data dict!"
74
- self.multids = None
75
- self.success_outcome = None
76
- self.success_key = None
77
- self
78
-
79
- self.dscomparator = _copy.deepcopy(dscomparator)
80
-
81
- self._specs = tuple(specs.values())
82
- self._speckeys = tuple(specs.keys())
83
-
84
- if summary_data is None:
85
- self.pass_summary_data = {}
86
- self.global_summary_data = {}
87
- self.aux = {}
88
- else:
89
- assert(isinstance(summary_data, dict)), "The summary data must be a dictionary"
90
- self.pass_summary_data = summary_data['pass'].copy()
91
- self.global_summary_data = summary_data['global'].copy()
92
- self.aux = summary_data.get('aux', {}).copy()
93
- if self.multids is None:
94
- arbqubits = self._specs[0].get_structure()[0]
95
- arbkey = list(self.pass_summary_data[0][arbqubits].keys())[0]
96
- arbdepth = list(self.pass_summary_data[0][arbqubits][arbkey].keys())[0]
97
- self.numpasses = len(self.pass_summary_data[0][arbqubits][arbkey][arbdepth])
98
-
99
- if predicted_summary_data is None:
100
- self.predicted_summary_data = {}
101
- else:
102
- self.predicted_summary_data = predicted_summary_data.copy()
103
-
104
- def select_volumetric_benchmark_regions(self, depths, boundary, widths='all', datatype='success_probabilities',
105
- statistic='mean', merit='aboveboundary', specs=None, aggregate=True,
106
- passnum=None, rescaler='auto'):
107
-
108
- # Selected regions encodes the selected regions, but in the slighty obtuse format of a dictionary of spec
109
- # indices and a list of tuples of qubit regions. (so, e.g., if 1- and 2-qubit circuit are run in parallel
110
- # the width-1 and width-2 spec chosen could by encoded as the index of that spec and a length-2 list of those
111
- # regions.). A less obtuse way to represent the region selection should maybe be used in the future.
112
- selected_regions = {}
113
- assert(statistic in ('max', 'mean', 'min'))
114
-
115
- if specs is None:
116
- specs = self._specs
117
-
118
- specsbywidth = {}
119
- for ind, structure in specs.items():
120
- for qs in structure:
121
- w = len(qs)
122
- if widths == 'all' or w in widths:
123
- if w not in specsbywidth.keys():
124
- specsbywidth[w] = []
125
- specsbywidth[w].append((ind, qs))
126
-
127
- if not aggregate:
128
- assert(passnum is not None), "Must specify the passnumber data to use for selection if not aggregating!"
129
-
130
- for w, specsforw in specsbywidth.items():
131
-
132
- if len(specsforw) == 1: # There's no decision to make: only one benchmark of one region of the size w.
133
- (ind, qs) = specsforw[0]
134
- if ind not in selected_regions:
135
- selected_regions[ind] = [qs, ]
136
- else:
137
- selected_regions[ind].append(qs)
138
-
139
- else: # There's data for more than one region (and/or multiple benchmarks of a single region) of size w
140
- best_boundary_index = 0
141
- best_vb_at_best_boundary_index = None
142
- for (ind, qs) in specsforw:
143
- vbdata = self.volumetric_benchmark_data(depths, widths=[w, ], datatype=datatype,
144
- statistic=statistic, specs={ind: [qs, ]},
145
- aggregate=aggregate, rescaler=rescaler)['data']
146
- # Only looking at 1 width, so drop the width key, and keep only the depths with data
147
- if not aggregate:
148
- vbdata = {d: vbdata[d][w][passnum] for d in vbdata.keys() if w in vbdata[d].keys()}
149
- else:
150
- vbdata = {d: vbdata[d][w] for d in vbdata.keys() if w in vbdata[d].keys()}
151
-
152
- # We calcluate the depth index of the largest depth at which the data is above/below the boundary,
153
- # ignoring cases where there's data missing at some depths as long as we're still above/below the
154
- # boundard at a larger depth.
155
- if merit == 'aboveboundary':
156
- x = [vbdata[d] > boundary if d in vbdata.keys() else None for d in depths]
157
- if merit == 'belowboundary':
158
- x = [vbdata[d] < boundary if d in vbdata.keys() else None for d in depths]
159
- try:
160
- x = x[:x.index(False)]
161
- except:
162
- pass
163
- x.reverse()
164
- try:
165
- boundary_index = len(x) - 1 - x.index(True)
166
- #print("There's a non-zero boundary!", str(w), qs)
167
- except:
168
- boundary_index = 0
169
- #print("Zero boundary!", str(w), qs)
170
-
171
- if boundary_index > best_boundary_index:
172
- best_boundary_index = boundary_index
173
- selected_region_at_w = (ind, qs)
174
- best_vb_at_best_boundary_index = vbdata[depths[boundary_index]]
175
- elif boundary_index == best_boundary_index:
176
- if best_vb_at_best_boundary_index is None:
177
- # On first run through we automatically select that region
178
- selected_region_at_w = (ind, qs)
179
- best_vb_at_best_boundary_index = vbdata[depths[boundary_index]]
180
- else:
181
- if merit == 'aboveboundary' \
182
- and vbdata[depths[boundary_index]] > best_vb_at_best_boundary_index:
183
- selected_region_at_w = (ind, qs)
184
- best_vb_at_best_boundary_index = vbdata[depths[boundary_index]]
185
- if merit == 'belowboundary' \
186
- and vbdata[depths[boundary_index]] < best_vb_at_best_boundary_index:
187
- selected_region_at_w = (ind, qs)
188
- best_vb_at_best_boundary_index = vbdata[depths[boundary_index]]
189
- else:
190
- pass
191
-
192
- (ind, qs) = selected_region_at_w
193
- if ind not in selected_regions:
194
- selected_regions[ind] = [qs, ]
195
- else:
196
- selected_regions[ind].append(qs)
197
-
198
- return selected_regions
199
-
200
- def volumetric_benchmark_data(self, depths, widths='all', datatype='success_probabilities',
201
- statistic='mean', specs=None, aggregate=True, rescaler='auto'):
202
-
203
- # maxmax : max over all depths/widths larger or equal
204
- # minmin : min over all deoths/widths smaller or equal.
205
-
206
- assert(statistic in ('max', 'mean', 'min', 'dist', 'maxmax', 'minmin'))
207
-
208
- if isinstance(widths, str):
209
- assert(widths == 'all')
210
- else:
211
- assert(isinstance(widths, list) or isinstance(widths, tuple))
212
-
213
- if specs is None: # If we're not given a filter, we use all of the data.
214
- specs = {i: [qs for qs in spec.get_structure()] for i, spec in enumerate(self._specs)}
215
-
216
- width_to_spec = {}
217
- for i, structure in specs.items():
218
- for qs in structure:
219
- w = len(qs)
220
- if widths == 'all' or w in widths:
221
- if w not in width_to_spec:
222
- width_to_spec[w] = (i, qs)
223
- else:
224
- raise ValueError(("There are multiple qubit subsets of size {} benchmarked! "
225
- "Cannot have specs as None!").format(w))
226
-
227
- if widths == 'all':
228
- widths = list(width_to_spec.keys())
229
- widths.sort()
230
- else:
231
- assert(set(widths) == set(list(width_to_spec.keys())))
232
-
233
- if isinstance(rescaler, str):
234
- if rescaler == 'auto':
235
- if datatype == 'success_probabilities':
236
- def rescale_function(data, width):
237
- return list((_np.array(data) - 1 / 2**width) / (1 - 1 / 2**width))
238
- else:
239
- def rescale_function(data, width):
240
- return data
241
- elif rescaler == 'none':
242
-
243
- def rescale_function(data, width):
244
- return data
245
-
246
- else:
247
- raise ValueError("Unknown rescaling option!")
248
-
249
- else:
250
- rescale_function = rescaler
251
-
252
- # if samecircuitpredictions:
253
- # predvb = {d: {} for d in depths}
254
- # else:
255
- # predvb = None
256
-
257
- qs = self._specs[0].get_structure()[0] # An arbitrary key
258
- if datatype in self.pass_summary_data[0][qs].keys():
259
- datadict = self.pass_summary_data
260
- globaldata = False
261
- elif datatype in self.global_summary_data[0][qs].keys():
262
- datadict = self.global_summary_data
263
- globaldata = True
264
- else:
265
- raise ValueError("Unknown datatype!")
266
-
267
- if aggregate or globaldata:
268
- vb = {d: {} for d in depths}
269
- fails = {d: {} for d in depths}
270
- else:
271
- vb = [{d: {} for d in depths} for i in range(self.numpasses)]
272
- fails = [{d: {} for d in depths} for i in range(self.numpasses)]
273
-
274
- if len(self.predicted_summary_data) > 0:
275
- arbkey = list(self.predicted_summary_data.keys())[0]
276
- dopredictions = datatype in self.predicted_summary_data[arbkey][0][qs].keys()
277
- if dopredictions:
278
- pkeys = self.predicted_summary_data.keys()
279
- predictedvb = {pkey: {d: {} for d in depths} for pkey in pkeys}
280
- else:
281
- predictedvb = {pkey: None for pkey in self.predicted_summary_data.keys()}
282
-
283
- for w in widths:
284
- (i, qs) = width_to_spec[w]
285
- data = datadict[i][qs][datatype]
286
- if dopredictions:
287
- preddata = {pkey: self.predicted_summary_data[pkey][i][qs][datatype] for pkey in pkeys}
288
- for d in depths:
289
- if d in data.keys():
290
-
291
- dline = data[d]
292
-
293
- if globaldata:
294
-
295
- failcount = _np.sum(_np.isnan(dline))
296
- fails[d][w] = (len(dline) - failcount, failcount)
297
-
298
- if statistic == 'dist':
299
- vb[d][w] = rescale_function(dline, w)
300
- else:
301
- if not _np.isnan(rescale_function(dline, w)).all():
302
- if statistic == 'max' or statistic == 'maxmax':
303
- vb[d][w] = _np.nanmax(rescale_function(dline, w))
304
- elif statistic == 'mean':
305
- vb[d][w] = _np.nanmean(rescale_function(dline, w))
306
- elif statistic == 'min' or statistic == 'minmin':
307
- vb[d][w] = _np.nanmin(rescale_function(dline, w))
308
- else:
309
- vb[d][w] = _np.nan
310
-
311
- else:
312
- failline = [(len(dpass) - _np.sum(_np.isnan(dpass)), _np.sum(_np.isnan(dpass)))
313
- for dpass in dline]
314
-
315
- if statistic == 'max' or statistic == 'maxmax':
316
- vbdataline = [_np.nanmax(rescale_function(dpass, w))
317
- if not _np.isnan(rescale_function(dpass, w)).all() else _np.nan
318
- for dpass in dline]
319
- elif statistic == 'mean':
320
- vbdataline = [_np.nanmean(rescale_function(dpass, w))
321
- if not _np.isnan(rescale_function(dpass, w)).all() else _np.nan
322
- for dpass in dline]
323
- elif statistic == 'min' or statistic == 'minmin':
324
- vbdataline = [_np.nanmin(rescale_function(dpass, w))
325
- if not _np.isnan(rescale_function(dpass, w)).all() else _np.nan
326
- for dpass in dline]
327
- elif statistic == 'dist':
328
- vbdataline = [rescale_function(dpass, w) for dpass in dline]
329
-
330
- if not aggregate:
331
- for i in range(len(vb)):
332
- vb[i][d][w] = vbdataline[i]
333
- fails[i][d][w] = failline[i]
334
-
335
- if aggregate:
336
-
337
- successcount = 0
338
- failcount = 0
339
- for (successcountpass, failcountpass) in failline:
340
- successcount += successcountpass
341
- failcount += failcountpass
342
- fails[d][w] = (successcount, failcount)
343
-
344
- if statistic == 'dist':
345
- vb[d][w] = [item for sublist in vbdataline for item in sublist]
346
- else:
347
- if not _np.isnan(vbdataline).all():
348
- if statistic == 'max' or statistic == 'maxmax':
349
- vb[d][w] = _np.nanmax(vbdataline)
350
- elif statistic == 'mean':
351
- vb[d][w] = _np.nanmean(vbdataline)
352
- elif statistic == 'min' or statistic == 'minmin':
353
- vb[d][w] = _np.nanmin(vbdataline)
354
- else:
355
- vb[d][w] = _np.nan
356
-
357
- # Repeat the process for the predictions, but with simpler code as don't have to
358
- # deal with passes or NaNs.
359
- if dopredictions:
360
- pdline = {pkey: preddata[pkey][d] for pkey in pkeys}
361
- for pkey in pkeys:
362
- if statistic == 'dist':
363
- predictedvb[pkey][d][w] = rescale_function(pdline[pkey], w)
364
- if statistic == 'max' or statistic == 'maxmax':
365
- predictedvb[pkey][d][w] = _np.max(rescale_function(pdline[pkey], w))
366
- if statistic == 'mean':
367
- predictedvb[pkey][d][w] = _np.mean(rescale_function(pdline[pkey], w))
368
- if statistic == 'min' or statistic == 'minmin':
369
- predictedvb[pkey][d][w] = _np.min(rescale_function(pdline[pkey], w))
370
-
371
- if statistic == 'minmin' or statistic == 'maxmax':
372
- if aggregate:
373
- for d in vb.keys():
374
- for w in vb[d].keys():
375
- for d2 in vb.keys():
376
- for w2 in vb[d2].keys():
377
- if statistic == 'minmin' and d2 <= d and w2 <= w and vb[d2][w2] < vb[d][w]:
378
- vb[d][w] = vb[d2][w2]
379
- if statistic == 'maxmax' and d2 >= d and w2 >= w and vb[d2][w2] > vb[d][w]:
380
- vb[d][w] = vb[d2][w2]
381
- else:
382
- for i in range(self.numpasses):
383
- for d in vb[i].keys():
384
- for w in vb[i][d].keys():
385
- for d2 in vb[i].keys():
386
- for w2 in vb[i][d2].keys():
387
- if statistic == 'minmin' and d2 <= d and w2 <= w and vb[i][d2][w2] < vb[i][d][w]:
388
- vb[i][d][w] = vb[i][d2][w2]
389
- if statistic == 'maxmax' and d2 >= d and w2 >= w and vb[i][d2][w2] > vb[i][d][w]:
390
- vb[i][d][w] = vb[i][d2][w2]
391
-
392
- out = {'data': vb, 'fails': fails, 'predictions': predictedvb}
393
-
394
- return out
395
-
396
- def flattened_data(self, specs=None, aggregate=True):
397
-
398
- flattened_data = {}
399
-
400
- if specs is None:
401
- specs = self.filter_experiments()
402
-
403
- qubits = self._specs[0].get_structure()[0] # An arbitrary key in the dict of the summary data.
404
- if aggregate:
405
- flattened_data = {dtype: [] for dtype in self.pass_summary_data[0][qubits].keys()}
406
- else:
407
- flattened_data = {dtype: [[] for i in range(self.numpasses)]
408
- for dtype in self.pass_summary_data[0][qubits].keys()}
409
- flattened_data.update({dtype: [] for dtype in self.global_summary_data[0][qubits].keys()})
410
- flattened_data.update({dtype: [] for dtype in self.aux[0][qubits].keys()})
411
- flattened_data.update({'predictions': {pkey: {'success_probabilities': []}
412
- for pkey in self.predicted_summary_data.keys()}})
413
-
414
- for specind, structure in specs.items():
415
- for qubits in structure:
416
- for dtype, data in self.pass_summary_data[specind][qubits].items():
417
- for depth, dataline in data.items():
418
- #print(specind, qubits, dtype, depth)
419
- if aggregate:
420
- aggregatedata = _np.array(dataline[0])
421
- # print(aggregatedata)
422
- # print(type(aggregatedata))
423
- # print(type(aggregatedata[0]))
424
- for i in range(1, self.numpasses):
425
- # print(dataline[i])
426
- # print(type(dataline[i]))
427
- # print(type(dataline[i][0]))
428
- aggregatedata = aggregatedata + _np.array(dataline[i])
429
- flattened_data[dtype] += list(aggregatedata)
430
- else:
431
- for i in range(self.numpasses):
432
- flattened_data[dtype][i] += dataline[i]
433
-
434
- for dtype, data in self.global_summary_data[specind][qubits].items():
435
- for depth, dataline in data.items():
436
- flattened_data[dtype] += dataline
437
- for dtype, data in self.aux[specind][qubits].items():
438
- for depth, dataline in data.items():
439
- flattened_data[dtype] += dataline
440
- for pkey in self.predicted_summary_data.keys():
441
- data = self.predicted_summary_data[pkey][specind][qubits]
442
- if 'success_probabilities' in data.keys():
443
- for depth, dataline in data['success_probabilities'].items():
444
- flattened_data['predictions'][pkey]['success_probabilities'] += dataline
445
- else:
446
- for (depth, dataline1), dataline2 in zip(data['success_counts'].items(),
447
- data['total_counts'].values()):
448
- flattened_data['predictions'][pkey]['success_probabilities'] += list(
449
- _np.array(dataline1) / _np.array(dataline2))
450
-
451
- # Only do this if we've not already stored the success probabilities in the benchamrker.
452
- if ('success_counts' in flattened_data) and ('total_counts' in flattened_data) \
453
- and ('success_probabilities' not in flattened_data):
454
- if aggregate:
455
- flattened_data['success_probabilities'] = [sc / tc if tc > 0 else _np.nan for sc,
456
- tc in zip(flattened_data['success_counts'],
457
- flattened_data['total_counts'])]
458
- else:
459
- flattened_data['success_probabilities'] = [[sc / tc if tc > 0 else _np.nan for sc, tc in zip(
460
- scpass, tcpass)] for scpass, tcpass in zip(flattened_data['success_counts'],
461
- flattened_data['total_counts'])]
462
-
463
- return flattened_data
464
-
465
- def test_pass_stability(self, formatdata=False, verbosity=1):
466
-
467
- assert(self.multids is not None), \
468
- "Can only run the stability analysis if a MultiDataSet is contained in this Benchmarker!"
469
-
470
- if not formatdata:
471
- assert('success-fail' in self.multids.keys()), "Must have generated/imported a success-fail format DataSet!"
472
- else:
473
- if 'success-fail' not in self.multids.keys():
474
- if verbosity > 0:
475
- print("No success/fail dataset found, so first creating this dataset from the full data...", end='')
476
- self.generate_success_or_fail_dataset()
477
- if verbosity > 0:
478
- print("complete.")
479
-
480
- if len(self.multids['success-fail']) > 1:
481
- self.dscomparator = _dcomp.DataComparator(self.multids['success-fail'], allow_bad_circuits=True)
482
- self.dscomparator.run(verbosity=verbosity)
483
-
484
- def generate_success_or_fail_dataset(self, overwrite=False):
485
- """
486
- """
487
-
488
- assert('standard' in self.multids.keys())
489
- if not overwrite:
490
- assert('success-fail' not in self.multids.keys())
491
-
492
- sfmultids = _multids.MultiDataSet()
493
-
494
- for ds_ind, ds in self.multids['standard'].items():
495
- sfds = _stdds.DataSet(outcome_labels=['success', 'fail'], collision_action=ds.collisionAction)
496
- for circ, dsrow in ds.items(strip_occurrence_tags=True):
497
- try:
498
- scounts = dsrow[dsrow.aux[self.success_key]]
499
- except:
500
- scounts = 0
501
- tcounts = dsrow.total
502
- sfds.add_count_dict(circ, {'success': scounts, 'fail': tcounts - scounts}, aux=dsrow.aux)
503
-
504
- sfds.done_adding_data()
505
- sfmultids.add_dataset(ds_ind, sfds)
506
-
507
- self.multids['success-fail'] = sfmultids
508
-
509
- # def get_all_data(self):
510
-
511
- # for circ
512
-
513
- def summary_data(self, datatype, specindex, qubits=None):
514
-
515
- spec = self._specs[specindex]
516
- structure = spec.get_structure()
517
- if len(structure) == 1:
518
- if qubits is None:
519
- qubits = structure[0]
520
-
521
- assert(qubits in structure), "Invalid choice of qubits for this spec!"
522
-
523
- return self.pass_summary_data[specindex][qubits][datatype]
524
-
525
- #def getauxillary_data(self, datatype, specindex, qubits=None):
526
-
527
- #def get_predicted_summary_data(self, prediction, datatype, specindex, qubits=None):
528
-
529
- def create_summary_data(self, predictions=None, verbosity=2, auxtypes=None):
530
- """
531
- todo
532
- """
533
- if predictions is None:
534
- predictions = dict()
535
- if auxtypes is None:
536
- auxtypes = []
537
- assert(self.multids is not None), "Cannot generate summary data without a DataSet!"
538
- assert('standard' in self.multids.keys()), "Currently only works for standard dataset!"
539
- useds = 'standard'
540
- # We can't use the success-fail dataset if there's any simultaneous benchmarking. Not in
541
- # it's current format anyway.
542
-
543
- summarydata = {}
544
- aux = {}
545
- globalsummarydata = {}
546
- predsummarydata = {}
547
- predds = None
548
- preddskey = None
549
- for pkey in predictions.keys():
550
- predsummarydata[pkey] = {}
551
- if isinstance(predictions[pkey], _stdds.DataSet):
552
- assert(predds is None), "Can't have two DataSet predictions!"
553
- predds = predictions[pkey]
554
- preddskey = pkey
555
- else:
556
- assert(isinstance(predictions[pkey], _oplessmodel.SuccessFailModel)
557
- ), "If not a DataSet must be an ErrorRatesModel!"
558
-
559
- datatypes = ['success_counts', 'total_counts', 'hamming_distance_counts', 'success_probabilities']
560
- if self.dscomparator is not None:
561
- stabdatatypes = ['tvds', 'pvals', 'jsds', 'llrs', 'sstvds']
562
- else:
563
- stabdatatypes = []
564
-
565
- #preddtypes = ('success_probabilities', )
566
- auxtypes = ['twoQgate_count', 'depth', 'target', 'width', 'circuit_index'] + auxtypes
567
-
568
- def _get_datatype(datatype, dsrow, circ, target, qubits):
569
-
570
- if datatype == 'success_counts':
571
- return _analysis.marginalized_success_counts(dsrow, circ, target, qubits)
572
- elif datatype == 'total_counts':
573
- return dsrow.total
574
- elif datatype == 'hamming_distance_counts':
575
- return _analysis.marginalized_hamming_distance_counts(dsrow, circ, target, qubits)
576
- elif datatype == 'success_probabilities':
577
- sc = _analysis.marginalized_success_counts(dsrow, circ, target, qubits)
578
- tc = dsrow.total
579
- if tc == 0:
580
- return _np.nan
581
- else:
582
- return sc / tc
583
- else:
584
- raise ValueError("Unknown data type!")
585
-
586
- numpasses = len(self.multids[useds].keys())
587
-
588
- for ds_ind in self.multids[useds].keys():
589
-
590
- if verbosity > 0:
591
- print(" - Processing data from pass {} of {}. Percent complete:".format(ds_ind + 1,
592
- len(self.multids[useds])))
593
-
594
- #circuits = {}
595
- numcircuits = len(self.multids[useds][ds_ind].keys())
596
- percent = 0
597
-
598
- if preddskey is None or ds_ind > 0:
599
- iterator = zip(self.multids[useds][ds_ind].items(strip_occurrence_tags=True),
600
- self.multids[useds].auxInfo.values(), _cycle(zip([None, ], [None, ])))
601
- else:
602
- iterator = zip(self.multids[useds][ds_ind].items(strip_occurrence_tags=True),
603
- self.multids[useds].auxInfo.values(),
604
- predds.items(strip_occurrence_tags=True))
605
-
606
- for i, ((circ, dsrow), auxdict, (pcirc, pdsrow)) in enumerate(iterator):
607
-
608
- if pcirc is not None:
609
- if not circ == pcirc:
610
- print('-{}-'.format(i))
611
- pdsrow = predds[circ]
612
- _warnings.warn("Predicted DataSet is ordered differently to the main DataSet!"
613
- + "Reverting to potentially slow dictionary hashing!")
614
-
615
- if verbosity > 0:
616
- if _np.floor(100 * i / numcircuits) >= percent:
617
- percent += 1
618
- if percent in (1, 26, 51, 76):
619
- print("\n {},".format(percent), end='')
620
- else:
621
- print("{},".format(percent), end='')
622
- if percent == 100:
623
- print('')
624
-
625
- speckeys = auxdict['spec']
626
- try:
627
- depth = auxdict['depth']
628
- except:
629
- depth = auxdict['length']
630
- target = auxdict['target']
631
-
632
- if isinstance(speckeys, str):
633
- speckeys = [speckeys]
634
-
635
- for speckey in speckeys:
636
- specind = self._speckeys.index(speckey)
637
- spec = self._specs[specind]
638
- structure = spec.get_structure()
639
-
640
- # If we've not yet encountered this specind, we create the required dictionaries to store the
641
- # summary data from the circuits associated with that spec.
642
- if specind not in summarydata.keys():
643
-
644
- assert(ds_ind == 0)
645
- summarydata[specind] = {qubits: {datatype: {}
646
- for datatype in datatypes} for qubits in structure}
647
- aux[specind] = {qubits: {auxtype: {} for auxtype in auxtypes} for qubits in structure}
648
-
649
- # Only do predictions on the first pass dataset.
650
- for pkey in predictions.keys():
651
- predsummarydata[pkey][specind] = {}
652
- for pkey in predictions.keys():
653
- if pkey == preddskey:
654
- predsummarydata[pkey][specind] = {qubits: {datatype: {} for datatype in datatypes}
655
- for qubits in structure}
656
- else:
657
- predsummarydata[pkey][specind] = {
658
- qubits: {'success_probabilities': {}} for qubits in structure}
659
-
660
- globalsummarydata[specind] = {qubits: {datatype: {}
661
- for datatype in stabdatatypes} for qubits in structure}
662
-
663
- # If we've not yet encountered this depth, we create the list where the data for that depth
664
- # is stored.
665
- for qubits in structure:
666
- if depth not in summarydata[specind][qubits][datatypes[0]].keys():
667
-
668
- assert(ds_ind == 0)
669
- for datatype in datatypes:
670
- summarydata[specind][qubits][datatype][depth] = [[] for i in range(numpasses)]
671
- for auxtype in auxtypes:
672
- aux[specind][qubits][auxtype][depth] = []
673
-
674
- for pkey in predictions.keys():
675
- if pkey == preddskey:
676
- for datatype in datatypes:
677
- predsummarydata[pkey][specind][qubits][datatype][depth] = []
678
- else:
679
- predsummarydata[pkey][specind][qubits]['success_probabilities'][depth] = []
680
-
681
- for datatype in stabdatatypes:
682
- globalsummarydata[specind][qubits][datatype][depth] = []
683
-
684
- #print('---', i)
685
- for qubits_ind, qubits in enumerate(structure):
686
- for datatype in datatypes:
687
- x = _get_datatype(datatype, dsrow, circ, target, qubits)
688
- summarydata[specind][qubits][datatype][depth][ds_ind].append(x)
689
- # Only do predictions on the first pass dataset.
690
- if preddskey is not None and ds_ind == 0:
691
- x = _get_datatype(datatype, pdsrow, circ, target, qubits)
692
- predsummarydata[preddskey][specind][qubits][datatype][depth].append(x)
693
-
694
- # Only do predictions and aux on the first pass dataset.
695
- if ds_ind == 0:
696
- for auxtype in auxtypes:
697
- if auxtype == 'twoQgate_count':
698
- auxdata = circ.two_q_gate_count()
699
- elif auxtype == 'depth':
700
- auxdata = circ.depth
701
- elif auxtype == 'target':
702
- auxdata = target
703
- elif auxtype == 'circuit_index':
704
- auxdata = i
705
- elif auxtype == 'width':
706
- auxdata = len(qubits)
707
- else:
708
- auxdata = auxdict.get(auxtype, None)
709
-
710
- aux[specind][qubits][auxtype][depth].append(auxdata)
711
-
712
- for pkey, predmodel in predictions.items():
713
- if pkey != preddskey:
714
- if set(circ.line_labels) != set(qubits):
715
- trimmedcirc = circ.copy(editable=True)
716
- for q in circ.line_labels:
717
- if q not in qubits:
718
- trimmedcirc.delete_lines(q)
719
- else:
720
- trimmedcirc = circ
721
-
722
- predsp = predmodel.probabilities(trimmedcirc)[('success',)]
723
- predsummarydata[pkey][specind][qubits]['success_probabilities'][depth].append(
724
- predsp)
725
-
726
- for datatype in stabdatatypes:
727
- if datatype == 'tvds':
728
- x = self.dscomparator.tvds.get(circ, _np.nan)
729
- elif datatype == 'pvals':
730
- x = self.dscomparator.pVals.get(circ, _np.nan)
731
- elif datatype == 'jsds':
732
- x = self.dscomparator.jsds.get(circ, _np.nan)
733
- elif datatype == 'llrs':
734
- x = self.dscomparator.llrs.get(circ, _np.nan)
735
- globalsummarydata[specind][qubits][datatype][depth].append(x)
736
-
737
- if verbosity > 0:
738
- print('')
739
-
740
- # Record the data in the object at the end.
741
- self.predicted_summary_data = predsummarydata
742
- self.pass_summary_data = summarydata
743
- self.global_summary_data = globalsummarydata
744
- self.aux = aux
745
-
746
- def analyze(self, specindices=None, analysis='adjusted', bootstraps=200, verbosity=1):
747
- """
748
- todo
749
-
750
- todo: this partly ignores specindices
751
- """
752
- #self.create_summary_data(specindices=specindices, datatype=analysis, verbosity=verbosity)
753
-
754
- for i, rbdatadict in self._summary_data.items():
755
- #if not isinstance(rbdata, dict):
756
- # self._rbresults[i] = rb.analysis.std_practice_analysis(rbdata)
757
- #else:
758
- #self._rbresults[i] = {}
759
- #for key in rbdata.items():
760
- if verbosity > 0:
761
- print('- Running analysis for {} of {}'.format(i, len(self._summary_data)))
762
- self._rbresults['adjusted'][i] = {}
763
- self._rbresults['raw'][i] = {}
764
- for j, (key, rbdata) in enumerate(rbdatadict.items()):
765
- if verbosity > 1:
766
- print(' - Running analysis for qubits {} ({} of {})'.format(key, j, len(rbdatadict)))
767
- if analysis == 'all' or analysis == 'raw':
768
- self._rbresults['raw'][i][key] = _analysis.std_practice_analysis(
769
- rbdata, bootstrap_samples=bootstraps, datatype='raw')
770
- if (analysis == 'all' and rbdata.datatype == 'hamming_distance_counts') or analysis == 'adjusted':
771
- self._rbresults['adjusted'][i][key] = _analysis.std_practice_analysis(
772
- rbdata, bootstrap_samples=bootstraps, datatype='adjusted')
773
-
774
- def filter_experiments(self, numqubits=None, containqubits=None, onqubits=None, sampler=None,
775
- two_qubit_gate_prob=None, prefilter=None, benchmarktype=None):
776
- """
777
- todo
778
-
779
- """
780
-
781
- kept = {}
782
- for i, spec in enumerate(self._specs):
783
- structures = spec.get_structure()
784
- for qubits in structures:
785
-
786
- keep = True
787
-
788
- if keep:
789
- if benchmarktype is not None:
790
- if spec.type != benchmarktype:
791
- keep = False
792
-
793
- if keep:
794
- if numqubits is not None:
795
- if len(qubits) != numqubits:
796
- keep = False
797
-
798
- if keep:
799
- if containqubits is not None:
800
- if not set(containqubits).issubset(qubits):
801
- keep = False
802
-
803
- if keep:
804
- if onqubits is not None:
805
- if set(qubits) != set(onqubits):
806
- keep = False
807
-
808
- if keep:
809
- if sampler is not None:
810
- if not spec._sampler == sampler:
811
- keep = False
812
-
813
- if keep:
814
- if two_qubit_gate_prob is not None:
815
- if not _np.allclose(two_qubit_gate_prob, spec.get_twoQgate_rate()):
816
- keep = False
817
-
818
- if keep:
819
- if i not in kept.keys():
820
- kept[i] = []
821
- kept[i].append(qubits)
822
-
823
- if prefilter is not None:
824
- dellist = []
825
- for key in kept.keys():
826
- if key not in prefilter.keys():
827
- dellist.append(key)
828
- else:
829
- newlist = []
830
- for qubits in kept[key]:
831
- if qubits in prefilter[key]:
832
- newlist.append(qubits)
833
- if len(newlist) == 0:
834
- dellist.append(key)
835
- else:
836
- kept[key] = newlist
837
-
838
- for key in dellist:
839
- del kept[key]
840
-
841
- return kept
842
-
843
- # for i, rbdata in self._adjusted_summary_data.items():
844
- # #if not isinstance(rbdata, dict):
845
- # # self._rbresults[i] = rb.analysis.std_practice_analysis(rbdata)
846
- # #else:
847
- # #self._rbresults[i] = {}
848
- # #for key in rbdata.items():
849
- # self._adjusted_rbresults[i] = rb.analysis.std_practice_analysis(rbdata, bootstrap_samples=0,
850
- # asymptote=1/4**rbdata.number_of_qubits)
851
-
852
-
853
- # class RBResults(object):
854
- # """
855
- # An object to contain the results of an RB analysis
856
- # """
857
-
858
- # def __init__(self, data, rtype, fits):
859
- # """
860
- # Initialize an RBResults object.
861
-
862
- # Parameters
863
- # ----------
864
- # data : RBSummaryDataset
865
- # The RB summary data that the analysis was performed for.
866
-
867
- # rtype : {'IE','AGI'}
868
- # The type of RB error rate, corresponding to different dimension-dependent
869
- # re-scalings of (1-p), where p is the RB decay constant in A + B*p^m.
870
-
871
- # fits : dict
872
- # A dictionary containing FitResults objects, obtained from one or more
873
- # fits of the data (e.g., a fit with all A, B and p as free parameters and
874
- # a fit with A fixed to 1/2^n).
875
- # """
876
- # self.data = data
877
- # self.rtype = rtype
878
- # self.fits = fits
879
-
880
- # def plot(self, fitkey=None, decay=True, success_probabilities=True, size=(8, 5), ylim=None, xlim=None,
881
- # legend=True, title=None, figpath=None):
882
- # """
883
- # Plots RB data and, optionally, a fitted exponential decay.
884
-
885
- # Parameters
886
- # ----------
887
- # fitkey : dict key, optional
888
- # The key of the self.fits dictionary to plot the fit for. If None, will
889
- # look for a 'full' key (the key for a full fit to A + Bp^m if the standard
890
- # analysis functions are used) and plot this if possible. It otherwise checks
891
- # that there is only one key in the dict and defaults to this. If there are
892
- # multiple keys and none of them are 'full', `fitkey` must be specified when
893
- # `decay` is True.
894
-
895
- # decay : bool, optional
896
- # Whether to plot a fit, or just the data.
897
-
898
- # success_probabilities : bool, optional
899
- # Whether to plot the success probabilities distribution, as a violin plot. (as well
900
- # as the *average* success probabilities at each length).
901
-
902
- # size : tuple, optional
903
- # The figure size
904
-
905
- # ylim, xlim : tuple, optional
906
- # The x and y limits for the figure.
907
-
908
- # legend : bool, optional
909
- # Whether to show a legend.
910
-
911
- # title : str, optional
912
- # A title to put on the figure.
913
-
914
- # figpath : str, optional
915
- # If specified, the figure is saved with this filename.
916
- # """
917
-
918
- # # Future : change to a plotly plot.
919
- # try: import matplotlib.pyplot as _plt
920
- # except ImportError: raise ValueError("This function requires you to install matplotlib!")
921
-
922
- # if decay and fitkey is None:
923
- # allfitkeys = list(self.fits.keys())
924
- # if 'full' in allfitkeys: fitkey = 'full'
925
- # else:
926
- # assert(len(allfitkeys) == 1), \
927
- # "There are multiple fits and none have the key 'full'. Please specify the fit to plot!"
928
- # fitkey = allfitkeys[0]
929
-
930
- # _plt.figure(figsize=size)
931
- # _plt.plot(self.data.lengths, self.data.ASPs, 'o', label='Average success probabilities')
932
-
933
- # if decay:
934
- # lengths = _np.linspace(0, max(self.data.lengths), 200)
935
- # A = self.fits[fitkey].estimates['A']
936
- # B = self.fits[fitkey].estimates['B']
937
- # p = self.fits[fitkey].estimates['p']
938
- # _plt.plot(lengths, A + B * p**lengths,
939
- # label='Fit, r = {:.2} +/- {:.1}'.format(self.fits[fitkey].estimates['r'],
940
- # self.fits[fitkey].stds['r']))
941
-
942
- # if success_probabilities:
943
- # _plt.violinplot(list(self.data.success_probabilities), self.data.lengths, points=10, widths=1.,
944
- # showmeans=False, showextrema=False, showmedians=False) # , label='Success probabilities')
945
-
946
- # if title is not None: _plt.title(title)
947
- # _plt.ylabel("Success probability")
948
- # _plt.xlabel("RB sequence length $(m)$")
949
- # _plt.ylim(ylim)
950
- # _plt.xlim(xlim)
951
-
952
- # if legend: _plt.legend()
953
-
954
- # if figpath is not None: _plt.savefig(figpath, dpi=1000)
955
- # else: _plt.show()
956
-
957
- # return