pyGSTi 0.9.12__cp38-cp38-win32.whl → 0.9.13__cp38-cp38-win32.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (225) hide show
  1. pyGSTi-0.9.13.dist-info/METADATA +185 -0
  2. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/RECORD +211 -220
  3. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/WHEEL +1 -1
  4. pygsti/_version.py +2 -2
  5. pygsti/algorithms/contract.py +1 -1
  6. pygsti/algorithms/core.py +62 -35
  7. pygsti/algorithms/fiducialpairreduction.py +95 -110
  8. pygsti/algorithms/fiducialselection.py +17 -8
  9. pygsti/algorithms/gaugeopt.py +2 -2
  10. pygsti/algorithms/germselection.py +87 -77
  11. pygsti/algorithms/mirroring.py +0 -388
  12. pygsti/algorithms/randomcircuit.py +165 -1333
  13. pygsti/algorithms/rbfit.py +0 -234
  14. pygsti/baseobjs/basis.py +94 -396
  15. pygsti/baseobjs/errorgenbasis.py +0 -132
  16. pygsti/baseobjs/errorgenspace.py +0 -10
  17. pygsti/baseobjs/label.py +52 -168
  18. pygsti/baseobjs/opcalc/fastopcalc.cp38-win32.pyd +0 -0
  19. pygsti/baseobjs/opcalc/fastopcalc.pyx +2 -2
  20. pygsti/baseobjs/polynomial.py +13 -595
  21. pygsti/baseobjs/protectedarray.py +72 -132
  22. pygsti/baseobjs/statespace.py +1 -0
  23. pygsti/circuits/__init__.py +1 -1
  24. pygsti/circuits/circuit.py +753 -504
  25. pygsti/circuits/circuitconstruction.py +0 -4
  26. pygsti/circuits/circuitlist.py +47 -5
  27. pygsti/circuits/circuitparser/__init__.py +8 -8
  28. pygsti/circuits/circuitparser/fastcircuitparser.cp38-win32.pyd +0 -0
  29. pygsti/circuits/circuitstructure.py +3 -3
  30. pygsti/circuits/cloudcircuitconstruction.py +27 -14
  31. pygsti/data/datacomparator.py +4 -9
  32. pygsti/data/dataset.py +51 -46
  33. pygsti/data/hypothesistest.py +0 -7
  34. pygsti/drivers/bootstrap.py +0 -49
  35. pygsti/drivers/longsequence.py +46 -10
  36. pygsti/evotypes/basereps_cython.cp38-win32.pyd +0 -0
  37. pygsti/evotypes/chp/opreps.py +0 -61
  38. pygsti/evotypes/chp/statereps.py +0 -32
  39. pygsti/evotypes/densitymx/effectcreps.cpp +9 -10
  40. pygsti/evotypes/densitymx/effectreps.cp38-win32.pyd +0 -0
  41. pygsti/evotypes/densitymx/effectreps.pyx +1 -1
  42. pygsti/evotypes/densitymx/opreps.cp38-win32.pyd +0 -0
  43. pygsti/evotypes/densitymx/opreps.pyx +2 -2
  44. pygsti/evotypes/densitymx/statereps.cp38-win32.pyd +0 -0
  45. pygsti/evotypes/densitymx/statereps.pyx +1 -1
  46. pygsti/evotypes/densitymx_slow/effectreps.py +7 -23
  47. pygsti/evotypes/densitymx_slow/opreps.py +16 -23
  48. pygsti/evotypes/densitymx_slow/statereps.py +10 -3
  49. pygsti/evotypes/evotype.py +39 -2
  50. pygsti/evotypes/stabilizer/effectreps.cp38-win32.pyd +0 -0
  51. pygsti/evotypes/stabilizer/effectreps.pyx +0 -4
  52. pygsti/evotypes/stabilizer/opreps.cp38-win32.pyd +0 -0
  53. pygsti/evotypes/stabilizer/opreps.pyx +0 -4
  54. pygsti/evotypes/stabilizer/statereps.cp38-win32.pyd +0 -0
  55. pygsti/evotypes/stabilizer/statereps.pyx +1 -5
  56. pygsti/evotypes/stabilizer/termreps.cp38-win32.pyd +0 -0
  57. pygsti/evotypes/stabilizer/termreps.pyx +0 -7
  58. pygsti/evotypes/stabilizer_slow/effectreps.py +0 -22
  59. pygsti/evotypes/stabilizer_slow/opreps.py +0 -4
  60. pygsti/evotypes/stabilizer_slow/statereps.py +0 -4
  61. pygsti/evotypes/statevec/effectreps.cp38-win32.pyd +0 -0
  62. pygsti/evotypes/statevec/effectreps.pyx +1 -1
  63. pygsti/evotypes/statevec/opreps.cp38-win32.pyd +0 -0
  64. pygsti/evotypes/statevec/opreps.pyx +2 -2
  65. pygsti/evotypes/statevec/statereps.cp38-win32.pyd +0 -0
  66. pygsti/evotypes/statevec/statereps.pyx +1 -1
  67. pygsti/evotypes/statevec/termreps.cp38-win32.pyd +0 -0
  68. pygsti/evotypes/statevec/termreps.pyx +0 -7
  69. pygsti/evotypes/statevec_slow/effectreps.py +0 -3
  70. pygsti/evotypes/statevec_slow/opreps.py +0 -5
  71. pygsti/extras/__init__.py +0 -1
  72. pygsti/extras/drift/signal.py +1 -1
  73. pygsti/extras/drift/stabilityanalyzer.py +3 -1
  74. pygsti/extras/interpygate/__init__.py +12 -0
  75. pygsti/extras/interpygate/core.py +0 -36
  76. pygsti/extras/interpygate/process_tomography.py +44 -10
  77. pygsti/extras/rpe/rpeconstruction.py +0 -2
  78. pygsti/forwardsims/__init__.py +1 -0
  79. pygsti/forwardsims/forwardsim.py +50 -93
  80. pygsti/forwardsims/mapforwardsim.py +78 -20
  81. pygsti/forwardsims/mapforwardsim_calc_densitymx.cp38-win32.pyd +0 -0
  82. pygsti/forwardsims/mapforwardsim_calc_densitymx.pyx +65 -66
  83. pygsti/forwardsims/mapforwardsim_calc_generic.py +91 -13
  84. pygsti/forwardsims/matrixforwardsim.py +72 -17
  85. pygsti/forwardsims/termforwardsim.py +9 -111
  86. pygsti/forwardsims/termforwardsim_calc_stabilizer.cp38-win32.pyd +0 -0
  87. pygsti/forwardsims/termforwardsim_calc_statevec.cp38-win32.pyd +0 -0
  88. pygsti/forwardsims/termforwardsim_calc_statevec.pyx +0 -651
  89. pygsti/forwardsims/torchfwdsim.py +265 -0
  90. pygsti/forwardsims/weakforwardsim.py +2 -2
  91. pygsti/io/__init__.py +1 -2
  92. pygsti/io/mongodb.py +0 -2
  93. pygsti/io/stdinput.py +6 -22
  94. pygsti/layouts/copalayout.py +10 -12
  95. pygsti/layouts/distlayout.py +0 -40
  96. pygsti/layouts/maplayout.py +103 -25
  97. pygsti/layouts/matrixlayout.py +99 -60
  98. pygsti/layouts/prefixtable.py +1534 -52
  99. pygsti/layouts/termlayout.py +1 -1
  100. pygsti/modelmembers/instruments/instrument.py +3 -3
  101. pygsti/modelmembers/instruments/tpinstrument.py +2 -2
  102. pygsti/modelmembers/modelmember.py +0 -17
  103. pygsti/modelmembers/operations/__init__.py +3 -4
  104. pygsti/modelmembers/operations/affineshiftop.py +206 -0
  105. pygsti/modelmembers/operations/composederrorgen.py +1 -1
  106. pygsti/modelmembers/operations/composedop.py +1 -24
  107. pygsti/modelmembers/operations/denseop.py +5 -5
  108. pygsti/modelmembers/operations/eigpdenseop.py +2 -2
  109. pygsti/modelmembers/operations/embeddederrorgen.py +1 -1
  110. pygsti/modelmembers/operations/embeddedop.py +0 -1
  111. pygsti/modelmembers/operations/experrorgenop.py +5 -2
  112. pygsti/modelmembers/operations/fullarbitraryop.py +1 -0
  113. pygsti/modelmembers/operations/fullcptpop.py +2 -2
  114. pygsti/modelmembers/operations/fulltpop.py +28 -6
  115. pygsti/modelmembers/operations/fullunitaryop.py +5 -4
  116. pygsti/modelmembers/operations/lindbladcoefficients.py +93 -78
  117. pygsti/modelmembers/operations/lindbladerrorgen.py +268 -441
  118. pygsti/modelmembers/operations/linearop.py +7 -27
  119. pygsti/modelmembers/operations/opfactory.py +1 -1
  120. pygsti/modelmembers/operations/repeatedop.py +1 -24
  121. pygsti/modelmembers/operations/staticstdop.py +1 -1
  122. pygsti/modelmembers/povms/__init__.py +3 -3
  123. pygsti/modelmembers/povms/basepovm.py +7 -36
  124. pygsti/modelmembers/povms/complementeffect.py +4 -9
  125. pygsti/modelmembers/povms/composedeffect.py +0 -320
  126. pygsti/modelmembers/povms/computationaleffect.py +1 -1
  127. pygsti/modelmembers/povms/computationalpovm.py +3 -1
  128. pygsti/modelmembers/povms/effect.py +3 -5
  129. pygsti/modelmembers/povms/marginalizedpovm.py +3 -81
  130. pygsti/modelmembers/povms/tppovm.py +74 -2
  131. pygsti/modelmembers/states/__init__.py +2 -5
  132. pygsti/modelmembers/states/composedstate.py +0 -317
  133. pygsti/modelmembers/states/computationalstate.py +3 -3
  134. pygsti/modelmembers/states/cptpstate.py +4 -4
  135. pygsti/modelmembers/states/densestate.py +10 -8
  136. pygsti/modelmembers/states/fullpurestate.py +0 -24
  137. pygsti/modelmembers/states/purestate.py +1 -1
  138. pygsti/modelmembers/states/state.py +5 -6
  139. pygsti/modelmembers/states/tpstate.py +28 -10
  140. pygsti/modelmembers/term.py +3 -6
  141. pygsti/modelmembers/torchable.py +50 -0
  142. pygsti/modelpacks/_modelpack.py +1 -1
  143. pygsti/modelpacks/smq1Q_ZN.py +3 -1
  144. pygsti/modelpacks/smq2Q_XXYYII.py +2 -1
  145. pygsti/modelpacks/smq2Q_XY.py +3 -3
  146. pygsti/modelpacks/smq2Q_XYI.py +2 -2
  147. pygsti/modelpacks/smq2Q_XYICNOT.py +3 -3
  148. pygsti/modelpacks/smq2Q_XYICPHASE.py +3 -3
  149. pygsti/modelpacks/smq2Q_XYXX.py +1 -1
  150. pygsti/modelpacks/smq2Q_XYZICNOT.py +3 -3
  151. pygsti/modelpacks/smq2Q_XYZZ.py +1 -1
  152. pygsti/modelpacks/stdtarget.py +0 -121
  153. pygsti/models/cloudnoisemodel.py +1 -2
  154. pygsti/models/explicitcalc.py +3 -3
  155. pygsti/models/explicitmodel.py +3 -13
  156. pygsti/models/fogistore.py +5 -3
  157. pygsti/models/localnoisemodel.py +1 -2
  158. pygsti/models/memberdict.py +0 -12
  159. pygsti/models/model.py +801 -68
  160. pygsti/models/modelconstruction.py +4 -4
  161. pygsti/models/modelnoise.py +2 -2
  162. pygsti/models/modelparaminterposer.py +1 -1
  163. pygsti/models/oplessmodel.py +1 -1
  164. pygsti/models/qutrit.py +15 -14
  165. pygsti/objectivefns/objectivefns.py +75 -140
  166. pygsti/objectivefns/wildcardbudget.py +2 -7
  167. pygsti/optimize/__init__.py +1 -0
  168. pygsti/optimize/arraysinterface.py +28 -0
  169. pygsti/optimize/customcg.py +0 -12
  170. pygsti/optimize/customlm.py +129 -323
  171. pygsti/optimize/customsolve.py +2 -2
  172. pygsti/optimize/optimize.py +0 -84
  173. pygsti/optimize/simplerlm.py +841 -0
  174. pygsti/optimize/wildcardopt.py +19 -598
  175. pygsti/protocols/confidenceregionfactory.py +28 -14
  176. pygsti/protocols/estimate.py +31 -14
  177. pygsti/protocols/gst.py +238 -142
  178. pygsti/protocols/modeltest.py +19 -12
  179. pygsti/protocols/protocol.py +9 -37
  180. pygsti/protocols/rb.py +450 -79
  181. pygsti/protocols/treenode.py +8 -2
  182. pygsti/protocols/vb.py +108 -206
  183. pygsti/protocols/vbdataframe.py +1 -1
  184. pygsti/report/factory.py +0 -15
  185. pygsti/report/fogidiagram.py +1 -17
  186. pygsti/report/modelfunction.py +12 -3
  187. pygsti/report/mpl_colormaps.py +1 -1
  188. pygsti/report/plothelpers.py +11 -3
  189. pygsti/report/report.py +16 -0
  190. pygsti/report/reportables.py +41 -37
  191. pygsti/report/templates/offline/pygsti_dashboard.css +6 -0
  192. pygsti/report/templates/offline/pygsti_dashboard.js +12 -0
  193. pygsti/report/workspace.py +2 -14
  194. pygsti/report/workspaceplots.py +328 -505
  195. pygsti/tools/basistools.py +9 -36
  196. pygsti/tools/edesigntools.py +124 -96
  197. pygsti/tools/fastcalc.cp38-win32.pyd +0 -0
  198. pygsti/tools/fastcalc.pyx +35 -81
  199. pygsti/tools/internalgates.py +151 -15
  200. pygsti/tools/jamiolkowski.py +5 -5
  201. pygsti/tools/lindbladtools.py +19 -11
  202. pygsti/tools/listtools.py +0 -114
  203. pygsti/tools/matrixmod2.py +1 -1
  204. pygsti/tools/matrixtools.py +173 -339
  205. pygsti/tools/nameddict.py +1 -1
  206. pygsti/tools/optools.py +154 -88
  207. pygsti/tools/pdftools.py +0 -25
  208. pygsti/tools/rbtheory.py +3 -320
  209. pygsti/tools/slicetools.py +64 -12
  210. pyGSTi-0.9.12.dist-info/METADATA +0 -157
  211. pygsti/algorithms/directx.py +0 -711
  212. pygsti/evotypes/qibo/__init__.py +0 -33
  213. pygsti/evotypes/qibo/effectreps.py +0 -78
  214. pygsti/evotypes/qibo/opreps.py +0 -376
  215. pygsti/evotypes/qibo/povmreps.py +0 -98
  216. pygsti/evotypes/qibo/statereps.py +0 -174
  217. pygsti/extras/rb/__init__.py +0 -13
  218. pygsti/extras/rb/benchmarker.py +0 -957
  219. pygsti/extras/rb/dataset.py +0 -378
  220. pygsti/extras/rb/io.py +0 -814
  221. pygsti/extras/rb/simulate.py +0 -1020
  222. pygsti/io/legacyio.py +0 -385
  223. pygsti/modelmembers/povms/denseeffect.py +0 -142
  224. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/LICENSE +0 -0
  225. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/top_level.txt +0 -0
@@ -106,11 +106,14 @@ class TreeNode(object):
106
106
  #else: # just take from already-loaded edesign
107
107
  # child_id_suffixes = preloaded_edesign._dirs.copy()
108
108
 
109
- self._dirs = {nm: subdir for subdir, nm in doc['children'].items()}
109
+ def _to_immutable(x):
110
+ return tuple(x) if isinstance(x, list) else x
111
+
112
+ self._dirs = {_to_immutable(nm): subdir for subdir, nm in doc['children'].items()}
110
113
  self._vals = {}
111
114
 
112
115
  for subdir, child_id in doc['children_ids'].items():
113
- child_nm = doc['children'][subdir]
116
+ child_nm = _to_immutable(doc['children'][subdir])
114
117
  child_doc = mongodb[doc['children_collection_name']].find_one({'_id': child_id})
115
118
  if child_doc is None: # if there's no child document, generate the child value later
116
119
  continue # don't load anything - create child value on demand
@@ -134,6 +137,9 @@ class TreeNode(object):
134
137
  def __len__(self):
135
138
  return len(self._dirs)
136
139
 
140
+ def __iter__(self):
141
+ return iter(self._dirs)
142
+
137
143
  def items(self):
138
144
  """
139
145
  An iterator over the `(child_name, child_node)` pairs of this node.
pygsti/protocols/vb.py CHANGED
@@ -11,11 +11,12 @@ Volumetric Benchmarking Protocol objects
11
11
  #***************************************************************************************************
12
12
 
13
13
  import numpy as _np
14
+ import copy as _copy
14
15
 
15
- from pygsti.protocols import protocol as _proto
16
- from pygsti.models.oplessmodel import SuccessFailModel as _SuccessFailModel
17
16
  from pygsti import tools as _tools
18
17
  from pygsti.algorithms import randomcircuit as _rc
18
+ from pygsti.protocols import protocol as _proto
19
+ from pygsti.models.oplessmodel import SuccessFailModel as _SuccessFailModel
19
20
 
20
21
 
21
22
  class ByDepthDesign(_proto.CircuitListsDesign):
@@ -67,6 +68,25 @@ class ByDepthDesign(_proto.CircuitListsDesign):
67
68
  mapped_qubit_labels = self._mapped_qubit_labels(mapper)
68
69
  return ByDepthDesign(self.depths, mapped_circuit_lists, mapped_qubit_labels, remove_duplicates=False)
69
70
 
71
+ def truncate_to_lists(self, list_indices_to_keep):
72
+ """
73
+ Truncates this experiment design by only keeping a subset of its circuit lists.
74
+
75
+ Parameters
76
+ ----------
77
+ list_indices_to_keep : iterable
78
+ A list of the (integer) list indices to keep.
79
+
80
+ Returns
81
+ -------
82
+ ByDepthDesign
83
+ The truncated experiment design.
84
+ """
85
+ ret = _copy.deepcopy(self) # Works for derived classes too
86
+ ret.depths = [self.depths[i] for i in list_indices_to_keep]
87
+ ret.circuit_lists = [self.circuit_lists[i] for i in list_indices_to_keep]
88
+ return ret
89
+
70
90
 
71
91
  class BenchmarkingDesign(ByDepthDesign):
72
92
  """
@@ -98,12 +118,27 @@ class BenchmarkingDesign(ByDepthDesign):
98
118
  Whether to remove duplicates when automatically creating
99
119
  all the circuits that need data.
100
120
  """
121
+
122
+ paired_with_circuit_attrs = None
123
+ """List of attributes which are paired up with circuit lists
124
+
125
+ These will be saved as external files during serialization,
126
+ and are truncated when circuit lists are truncated.
127
+ """
101
128
 
102
129
  def __init__(self, depths, circuit_lists, ideal_outs, qubit_labels=None, remove_duplicates=False):
103
130
  assert(len(depths) == len(ideal_outs))
104
131
  super().__init__(depths, circuit_lists, qubit_labels, remove_duplicates)
132
+
105
133
  self.idealout_lists = ideal_outs
106
- self.auxfile_types['idealout_lists'] = 'json'
134
+
135
+ if self.paired_with_circuit_attrs is None:
136
+ self.paired_with_circuit_attrs = ['idealout_lists']
137
+ else:
138
+ self.paired_with_circuit_attrs.insert(0, 'idealout_lists')
139
+
140
+ for paired_attr in self.paired_with_circuit_attrs:
141
+ self.auxfile_types[paired_attr] = 'json'
107
142
 
108
143
  def _mapped_circuits_and_idealouts_by_depth(self, mapper):
109
144
  """ Used in derived classes """
@@ -133,6 +168,76 @@ class BenchmarkingDesign(ByDepthDesign):
133
168
  mapped_qubit_labels = self._mapped_qubit_labels(mapper)
134
169
  return BenchmarkingDesign(self.depths, mapped_circuit_lists, list(self.idealout_lists),
135
170
  mapped_qubit_labels, remove_duplicates=False)
171
+
172
+ def truncate_to_lists(self, list_indices_to_keep):
173
+ """
174
+ Truncates this experiment design by only keeping a subset of its circuit lists.
175
+
176
+ Parameters
177
+ ----------
178
+ list_indices_to_keep : iterable
179
+ A list of the (integer) list indices to keep.
180
+
181
+ Returns
182
+ -------
183
+ BenchmarkingDesign
184
+ The truncated experiment design.
185
+ """
186
+ ret = _copy.deepcopy(self) # Works for derived classes too
187
+ ret.depths = [self.depths[i] for i in list_indices_to_keep]
188
+ ret.circuit_lists = [self.circuit_lists[i] for i in list_indices_to_keep]
189
+ for paired_attr in self.paired_with_circuit_attrs:
190
+ val = getattr(self, paired_attr)
191
+ new_val = [val[i] for i in list_indices_to_keep]
192
+ setattr(ret, paired_attr, new_val)
193
+ return ret
194
+
195
+ def _truncate_to_circuits_inplace(self, circuits_to_keep):
196
+ truncated_circuit_lists = []
197
+ paired_attr_lists_list = [getattr(self, paired_attr) for paired_attr in self.paired_with_circuit_attrs]
198
+ truncated_paired_attr_lists_list = [[] for _ in range(len(self.paired_with_circuit_attrs))]
199
+ for list_idx, circuits in enumerate(self.circuit_lists):
200
+ paired_attrs = [pal[list_idx] for pal in paired_attr_lists_list]
201
+ # Do the same filtering as CircuitList.truncate, but drag along any paired attributes
202
+ new_data = list(zip(*filter(lambda ci: ci[0] in set(circuits_to_keep), zip(circuits, *paired_attrs))))
203
+ if len(new_data):
204
+ truncated_circuit_lists.append(new_data[0])
205
+ for i, attr_data in enumerate(new_data[1:]):
206
+ truncated_paired_attr_lists_list[i].append(attr_data)
207
+ else:
208
+ # If we have truncated all circuits, append empty lists
209
+ truncated_circuit_lists.append([])
210
+ truncated_paired_attr_lists_list.append([[] for _ in range(len(self.paired_with_circuit_attrs))])
211
+
212
+ self.circuit_lists = truncated_circuit_lists
213
+ for paired_attr, paired_attr_lists in zip(self.paired_with_circuit_attrs, truncated_paired_attr_lists_list):
214
+ setattr(self, paired_attr, paired_attr_lists)
215
+ super()._truncate_to_circuits_inplace(circuits_to_keep)
216
+
217
+ def _truncate_to_design_inplace(self, other_design):
218
+ truncated_circuit_lists = []
219
+ paired_attr_lists_list = [getattr(self, paired_attr) for paired_attr in self.paired_with_circuit_attrs]
220
+ truncated_paired_attr_lists_list = [[] for _ in range(len(self.paired_with_circuit_attrs))]
221
+ for list_idx, circuits in enumerate(self.circuit_lists):
222
+ paired_attrs = [pal[list_idx] for pal in paired_attr_lists_list]
223
+ # Do the same filtering as CircuitList.truncate, but drag along any paired attributes
224
+ new_data = list(zip(*filter(lambda ci: ci[0] in set(other_design.circuit_lists[list_idx]), zip(circuits, *paired_attrs))))
225
+ if len(new_data):
226
+ truncated_circuit_lists.append(new_data[0])
227
+ for i, attr_data in enumerate(new_data[1:]):
228
+ truncated_paired_attr_lists_list[i].append(attr_data)
229
+ else:
230
+ # If we have truncated all circuits, append empty lists
231
+ truncated_circuit_lists.append([])
232
+ truncated_paired_attr_lists_list.append([[] for _ in range(len(self.paired_with_circuit_attrs))])
233
+
234
+ self.circuit_lists = truncated_circuit_lists
235
+ for paired_attr, paired_attr_lists in zip(self.paired_with_circuit_attrs, truncated_paired_attr_lists_list):
236
+ setattr(self, paired_attr, paired_attr_lists)
237
+ super()._truncate_to_design_inplace(other_design)
238
+
239
+ def _truncate_to_available_data_inplace(self, dataset):
240
+ self._truncate_to_circuits_inplace(set(dataset.keys()))
136
241
 
137
242
 
138
243
  class PeriodicMirrorCircuitDesign(BenchmarkingDesign):
@@ -351,10 +456,6 @@ class PeriodicMirrorCircuitDesign(BenchmarkingDesign):
351
456
  self.descriptor)
352
457
 
353
458
 
354
-
355
-
356
-
357
-
358
459
  class SummaryStatistics(_proto.Protocol):
359
460
  """
360
461
  A protocol that can construct "summary" quantities from raw data.
@@ -521,17 +622,6 @@ class SummaryStatistics(_proto.Protocol):
521
622
 
522
623
  return self._compute_dict(data, self.circuit_statistics, _get_circuit_values, for_passes="first")
523
624
 
524
- # def compute_dscmp_data(self, data, dscomparator):
525
-
526
- # def get_dscmp_values(icirc, circ, dsrow, idealout):
527
- # ret = {'tvds': dscomparator.tvds.get(circ, _np.nan),
528
- # 'pvals': dscomparator.pVals.get(circ, _np.nan),
529
- # 'jsds': dscomparator.jsds.get(circ, _np.nan),
530
- # 'llrs': dscomparator.llrs.get(circ, _np.nan)}
531
- # return ret
532
-
533
- # return self.compute_dict(data, "dscmpdata", self.dsmp_statistics, get_dscmp_values, for_passes="none")
534
-
535
625
  def _compute_predicted_probs(self, data, model):
536
626
  """
537
627
  Compute the predicted success probabilities of `model` given `data`.
@@ -909,17 +999,6 @@ class ByDepthSummaryStatistics(SummaryStatistics):
909
999
  results.statistics[statistic_nm] = statistic_per_dwc
910
1000
  return results
911
1001
 
912
- # This is currently not used I think
913
- # class PredictedByDepthSummaryStatsConstructor(ByDepthSummaryStatsConstructor):
914
- # """
915
- # Runs a volumetric benchmark on success/fail data predicted from a model
916
-
917
- # """
918
- # def __init__(self, model_or_summary_data, depths='all', statistic='mean',
919
- # dscomparator=None, name=None):
920
- # super().__init__(depths, 'success_probabilities', statistic,
921
- # dscomparator, model_or_summary_data, name)
922
-
923
1002
 
924
1003
  class SummaryStatisticsResults(_proto.ProtocolResults):
925
1004
  """
@@ -951,180 +1030,3 @@ class SummaryStatisticsResults(_proto.ProtocolResults):
951
1030
  "SummaryStatisticsResults.statistics dict should be populated with NamedDicts, not %s" % str(type(v))
952
1031
  stats[k] = v
953
1032
  return stats
954
-
955
-
956
- #BDB = ByDepthBenchmark
957
- #VBGrid = VolumetricBenchmarkGrid
958
- #VBResults = VolumetricBenchmarkingResults # shorthand
959
-
960
- #Add something like this?
961
- #class PassStabilityTest(_proto.Protocol):
962
- # pass
963
-
964
- # Commented out as we are not using this currently. todo: revive or delete this in the future.
965
- # class VolumetricBenchmarkGrid(Benchmark):
966
- # """ A protocol that creates an entire depth vs. width grid of volumetric benchmark values """
967
-
968
- # def __init__(self, depths='all', widths='all', datatype='success_probabilities',
969
- # paths='all', statistic='mean', aggregate=True, rescaler='auto',
970
- # dscomparator=None, name=None):
971
-
972
- # super().__init__(name)
973
- # self.postproc = VolumetricBenchmarkGridPP(depths, widths, datatype, paths, statistic, aggregate, self.name)
974
- # self.dscomparator = dscomparator
975
- # self.rescaler = rescaler
976
-
977
- # self.auxfile_types['postproc'] = 'protocolobj'
978
- # self.auxfile_types['dscomparator'] = 'pickle'
979
- # self.auxfile_types['rescaler'] = 'reset' # punt for now - fix later
980
-
981
- # def run(self, data, memlimit=None, comm=None):
982
- # #Since we know that VolumetricBenchmark protocol objects Create a single results just fill
983
- # # in data under the result object's 'volumetric_benchmarks' and 'failure_counts'
984
- # # keys, and these are indexed by width and depth (even though each VolumetricBenchmark
985
- # # only contains data for a single width), we can just "merge" the VB results of all
986
- # # the underlying by-depth datas, so long as they're all for different widths.
987
-
988
- # #Then run resulting data normally, giving a results object
989
- # # with "top level" dicts correpsonding to different paths
990
- # VB = ByDepthBenchmark(self.postproc.depths, self.postproc.datatype, self.postproc.statistic,
991
- # self.rescaler, self.dscomparator, name=self.name)
992
- # separate_results = _proto.SimpleRunner(VB).run(data, memlimit, comm)
993
- # pp_results = self.postproc.run(separate_results, memlimit, comm)
994
- # pp_results.protocol = self
995
- # return pp_results
996
-
997
-
998
- # Commented out as we are not using this currently. todo: revive this in the future.
999
- # class VolumetricBenchmark(_proto.ProtocolPostProcessor):
1000
- # """ A postprocesor that constructs a volumetric benchmark from existing results. """
1001
-
1002
- # def __init__(self, depths='all', widths='all', datatype='polarization',
1003
- # statistic='mean', paths='all', edesigntype=None, aggregate=True,
1004
- # name=None):
1005
-
1006
- # super().__init__(name)
1007
- # self.depths = depths
1008
- # self.widths = widths
1009
- # self.datatype = datatype
1010
- # self.paths = paths if paths == 'all' else sorted(paths) # need to ensure paths are grouped by common prefix
1011
- # self.statistic = statistic
1012
- # self.aggregate = aggregate
1013
- # self.edesigntype = edesigntype
1014
-
1015
- # def run(self, results, memlimit=None, comm=None):
1016
- # data = results.data
1017
- # paths = results.get_tree_paths() if self.paths == 'all' else self.paths
1018
- # #Note: above won't work if given just a results object - needs a dir
1019
-
1020
- # #Process results
1021
- # #Merge/flatten the data from different paths into one depth vs width grid
1022
- # passnames = list(data.passes.keys()) if data.is_multipass() else [None]
1023
- # passresults = []
1024
- # for passname in passnames:
1025
- # vb = _tools.NamedDict('Depth', 'int', None, None)
1026
- # fails = _tools.NamedDict('Depth', 'int', None, None)
1027
- # path_for_gridloc = {}
1028
- # for path in paths:
1029
- # #TODO: need to be able to filter based on widths... - maybe replace .update calls
1030
- # # with something more complicated when width != 'all'
1031
- # #print("Aggregating path = ", path) #TODO - show progress something like this later?
1032
-
1033
- # #Traverse path to get to root of VB data
1034
- # root = results
1035
- # for key in path:
1036
- # root = root[key]
1037
- # root = root.for_protocol.get(self.name, None)
1038
- # if root is None: continue
1039
-
1040
- # if passname: # then we expect final Results are MultiPassResults
1041
- # root = root.passes[passname] # now root should be a BenchmarkingResults
1042
- # assert(isinstance(root, VolumetricBenchmarkingResults))
1043
- # if self.edesigntype is None:
1044
- # assert(isinstance(root.data.edesign, ByDepthDesign)), \
1045
- # "All paths must lead to by-depth exp. design, not %s!" % str(type(root.data.edesign))
1046
- # else:
1047
- # if not isinstance(root.data.edsign, self.edesigntype):
1048
- # continue
1049
-
1050
- # #Get the list of depths we'll extract from this (`root`) sub-results
1051
- # depths = root.data.edesign.depths if (self.depths == 'all') else \
1052
- # filter(lambda d: d in self.depths, root.data.edesign.depths)
1053
- # width = len(root.data.edesign.qubit_labels) # sub-results contains only a single width
1054
- # if self.widths != 'all' and width not in self.widths: continue # skip this one
1055
-
1056
- # for depth in depths:
1057
- # if depth not in vb: # and depth not in fails
1058
- # vb[depth] = _tools.NamedDict('Width', 'int', 'Value', 'float')
1059
- # fails[depth] = _tools.NamedDict('Width', 'int', 'Value', None)
1060
- # path_for_gridloc[depth] = {} # just used for meaningful error message
1061
-
1062
- # if width in path_for_gridloc[depth]:
1063
- # raise ValueError(("Paths %s and %s both give data for depth=%d, width=%d! Set the `paths`"
1064
- # " argument of this VolumetricBenchmarkGrid to avoid this.") %
1065
- # (str(path_for_gridloc[depth][width]), str(path), depth, width))
1066
-
1067
- # vb[depth][width] = root.volumetric_benchmarks[depth][width]
1068
- # fails[depth][width] = root.failure_counts[depth][width]
1069
- # path_for_gridloc[depth][width] = path
1070
-
1071
- # if self.statistic in ('minmin', 'maxmax') and not self.aggregate:
1072
- # self._update_vb_minmin_maxmax(vb) # aggregate now since we won't aggregate over passes
1073
-
1074
- # #Create Results
1075
- # results = VolumetricBenchmarkingResults(data, self)
1076
- # results.volumetric_benchmarks = vb
1077
- # results.failure_counts = fails
1078
- # passresults.append(results)
1079
-
1080
- # agg_fn = _get_statistic_function(self.statistic)
1081
-
1082
- # if self.aggregate and len(passnames) > 1: # aggregate pass data into a single set of qty dicts
1083
- # agg_vb = _tools.NamedDict('Depth', 'int', None, None)
1084
- # agg_fails = _tools.NamedDict('Depth', 'int', None, None)
1085
- # template = passresults[0].volumetric_benchmarks # to get widths and depths
1086
-
1087
- # for depth, template_by_width_data in template.items():
1088
- # agg_vb[depth] = _tools.NamedDict('Width', 'int', 'Value', 'float')
1089
- # agg_fails[depth] = _tools.NamedDict('Width', 'int', 'Value', None)
1090
-
1091
- # for width in template_by_width_data.keys():
1092
- # # ppd = "per pass data"
1093
- # vb_ppd = [r.volumetric_benchmarks[depth][width] for r in passresults]
1094
- # fail_ppd = [r.failure_counts[depth][width] for r in passresults]
1095
-
1096
- # successcount = 0
1097
- # failcount = 0
1098
- # for (successcountpass, failcountpass) in fail_ppd:
1099
- # successcount += successcountpass
1100
- # failcount += failcountpass
1101
- # agg_fails[depth][width] = (successcount, failcount)
1102
-
1103
- # if self.statistic == 'dist':
1104
- # agg_vb[depth][width] = [item for sublist in vb_ppd for item in sublist]
1105
- # else:
1106
- # agg_vb[depth][width] = agg_fn(vb_ppd)
1107
-
1108
- # aggregated_results = VolumetricBenchmarkingResults(data, self)
1109
- # aggregated_results.volumetric_benchmarks = agg_vb
1110
- # aggregated_results.failure_counts = agg_fails
1111
-
1112
- # if self.statistic in ('minmin', 'maxmax'):
1113
- # self._update_vb_minmin_maxmax(aggregated_results.qtys['volumetric_benchmarks'])
1114
- # return aggregated_results # replace per-pass results with aggregated results
1115
- # elif len(passnames) > 1:
1116
- # multipass_results = _proto.MultiPassResults(data, self)
1117
- # multipass_results.passes.update({passname: r for passname, r in zip(passnames, passresults)})
1118
- # return multipass_results
1119
- # else:
1120
- # return passresults[0]
1121
-
1122
- # def _update_vb_minmin_maxmax(self, vb):
1123
- # for d in vb.keys():
1124
- # for w in vb[d].keys():
1125
- # for d2 in vb.keys():
1126
- # for w2 in vb[d2].keys():
1127
- # if self.statistic == 'minmin' and d2 <= d and w2 <= w and vb[d2][w2] < vb[d][w]:
1128
- # vb[d][w] = vb[d2][w2]
1129
- # if self.statistic == 'maxmax' and d2 >= d and w2 >= w and vb[d2][w2] > vb[d][w]:
1130
- # vb[d][w] = vb[d2][w2]
@@ -19,7 +19,7 @@ def _calculate_summary_statistic(x, statistic, lower_cutoff=None):
19
19
  Utility function that returns statistic(x), or the maximum
20
20
  of statistic(x) and lower_cutoff if lower_cutoff is not None.
21
21
  """
22
- if len(x) == 0 or _np.all(_np.isnan(x)): return _np.NaN
22
+ if len(x) == 0 or _np.all(_np.isnan(x)): return _np.nan
23
23
  if statistic == 'mean': func = _np.nanmean
24
24
  elif statistic == 'max' or statistic == 'monotonic_max': func = _np.nanmax
25
25
  elif statistic == 'min' or statistic == 'monotonic_min': func = _np.nanmin
pygsti/report/factory.py CHANGED
@@ -84,21 +84,6 @@ def _add_new_estimate_labels(running_lbls, estimates, combine_robust):
84
84
  return running_lbls
85
85
 
86
86
 
87
- #def _robust_estimate_has_same_models(estimates, est_lbl):
88
- # lbl_robust = est_lbl+ROBUST_SUFFIX
89
- # if lbl_robust not in estimates: return False #no robust estimate
90
- #
91
- # for mdl_lbl in list(estimates[est_lbl].goparameters.keys()) \
92
- # + ['final iteration estimate']:
93
- # if mdl_lbl not in estimates[lbl_robust].models:
94
- # return False #robust estimate is missing mdl_lbl!
95
- #
96
- # mdl = estimates[lbl_robust].models[mdl_lbl]
97
- # if estimates[est_lbl].models[mdl_lbl].frobeniusdist(mdl) > 1e-8:
98
- # return False #model mismatch!
99
- #
100
- # return True
101
-
102
87
  def _get_viewable_crf(est, est_lbl, mdl_lbl, verbosity=0):
103
88
  printer = _VerbosityPrinter.create_printer(verbosity)
104
89
 
@@ -379,10 +379,6 @@ class FOGIGraphDiagram(FOGIDiagram):
379
379
  def _normalize(self, v):
380
380
  return -_np.log10(max(v, 10**(-self.MAX_POWER)) * 10**self.MIN_POWER) / (self.MAX_POWER - self.MIN_POWER)
381
381
 
382
- #def _normalize(v):
383
- # v = min(max(v, 10**(-MAX_POWER)), 10**(-MIN_POWER))
384
- # return 1.0 - v / (10**(-MIN_POWER) - 10**(-MAX_POWER))
385
-
386
382
  def _node_HScolor(self, Hvalue, Svalue):
387
383
  r, g, b, a = _Hcmap(self._normalize(Hvalue))
388
384
  r2, g2, b2, a2 = _Scmap(self._normalize(Svalue))
@@ -622,18 +618,6 @@ class FOGISvgGraphDiagram(FOGIGraphDiagram):
622
618
  if filename: d.saveSvg(filename)
623
619
  return d
624
620
 
625
- #def _draw_node_simple(self, drawing, r, theta, coh, sto, op_label, total, val_max):
626
- # nodes = drawing.nodes
627
- # back_color, border_color, tcolor, _, labels, _ = self._get_node_colors(coh, sto, total)
628
- # x, y = r * _np.cos(theta), r * _np.sin(theta)
629
- # scale = (coh + sto) / val_max
630
- # node_width = 20 + 40 * scale
631
- # node_height = 20 + 40 * scale
632
- # nodes.append(_draw.Rectangle(x - node_width / 2, y - node_height / 2, node_width, node_height, rx=3,
633
- # fill=back_color, stroke=border_color, stroke_width=2))
634
- # nodes.append(_draw.Text(labels, self.node_fontsize * (0.5 + scale), x, y, fill=tcolor,
635
- # text_anchor="middle", valign='middle', font_family='Times'))
636
-
637
621
  def _draw_node(self, drawing, r, theta, coh, sto, op_label, total, val_max, groupid, info):
638
622
  nodes = drawing.nodes
639
623
  back_color, border_color, tcolor, _, labels, _ = self._get_node_colors(coh, sto, total)
@@ -1038,7 +1022,7 @@ class FOGIMultiscaleGridDiagram(FOGIDiagram):
1038
1022
  for i in range(nOps):
1039
1023
  for j in range(i, nOps):
1040
1024
  total_items[i, j] = sum([len(by_qty_items[qty][i, j]) for qty in all_qtys])
1041
- if total_items[i, j] == 0: totals[i, j] = _np.NaN
1025
+ if total_items[i, j] == 0: totals[i, j] = _np.nan
1042
1026
 
1043
1027
  box_size_mode = "condensed" # or "inflated"
1044
1028
  if detail_level == 2:
@@ -10,6 +10,8 @@ Defines the ModelFunction class
10
10
  # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
11
11
  #***************************************************************************************************
12
12
 
13
+ from pygsti.models.explicitmodel import ExplicitOpModel as _ExplicitOpModel
14
+ from pygsti.models.localnoisemodel import LocalNoiseModel as _LocalNoiseModel
13
15
 
14
16
  class ModelFunction(object):
15
17
  """
@@ -233,9 +235,16 @@ def opsfn_factory(fn):
233
235
 
234
236
  def evaluate(self, model):
235
237
  """ Evaluate this gate-set-function at `model`."""
236
- return fn(model.operations[self.gl].to_dense(on_space='HilbertSchmidt'),
237
- self.other_model.operations[self.gl].to_dense(on_space='HilbertSchmidt'),
238
- model.basis, *self.args, **self.kwargs) # assume functions want *dense* gates
238
+ if isinstance(model, _ExplicitOpModel):
239
+ return fn(model.operations[self.gl].to_dense(on_space='HilbertSchmidt'),
240
+ self.other_model.operations[self.gl].to_dense(on_space='HilbertSchmidt'),
241
+ model.basis, *self.args, **self.kwargs) # assume functions want *dense* gates
242
+ elif isinstance(model, _LocalNoiseModel):
243
+ return fn(model.operation_blks['gates'][self.gl].to_dense(on_space='HilbertSchmidt'),
244
+ self.other_model.operation_blks['gates'][self.gl].to_dense(on_space='HilbertSchmidt'),
245
+ model.basis, *self.args, **self.kwargs) # assume functions want *dense* gates
246
+ else:
247
+ raise ValueError(f"Unsupported model type: {type(model)}!")
239
248
 
240
249
  GSFTemp.__name__ = fn.__name__ + str("_class")
241
250
  return GSFTemp
@@ -540,7 +540,7 @@ def plotly_to_matplotlib(pygsti_fig, save_to=None, fontsize=12, prec='compacthp'
540
540
  axes.bar(x, y, barWidth, color=color)
541
541
  else:
542
542
  axes.bar(x, y, barWidth, color=color,
543
- yerr=yerr.flatten().real)
543
+ yerr=yerr.ravel().real)
544
544
 
545
545
  if xtickvals is not None:
546
546
  xtics = _np.array(xtickvals) + 0.5 # _np.arange(plt_data.shape[1])+0.5
@@ -18,6 +18,7 @@ from pygsti import tools as _tools
18
18
  from pygsti.objectivefns import objectivefns as _objfns
19
19
  from pygsti.circuits.circuitlist import CircuitList as _CircuitList
20
20
  from pygsti.baseobjs.smartcache import smart_cached
21
+ from pygsti.baseobjs import Label
21
22
 
22
23
 
23
24
  def small_eigenvalue_err_rate(sigma, direct_gst_models):
@@ -43,7 +44,8 @@ def small_eigenvalue_err_rate(sigma, direct_gst_models):
43
44
  """
44
45
  if sigma is None: return _np.nan # in plot processing, "None" circuits = no plot output = nan values
45
46
  mdl_direct = direct_gst_models[sigma]
46
- minEigval = min(abs(_np.linalg.eigvals(mdl_direct.operations["GsigmaLbl"])))
47
+ key = Label('GsigmaLbl') if sigma.line_labels == ('*',) else Label('GsigmaLbl', sigma.line_labels)
48
+ minEigval = min(abs(_np.linalg.eigvals(mdl_direct.operations[key])))
47
49
  # (approximate) per-gate error rate; max averts divide by zero error
48
50
  return 1.0 - minEigval**(1.0 / max(len(sigma), 1))
49
51
 
@@ -101,7 +103,7 @@ def _eformat(f, prec):
101
103
 
102
104
 
103
105
  def _num_non_nan(array):
104
- ixs = _np.where(_np.isnan(_np.array(array).flatten()) == False)[0] # noqa: E712
106
+ ixs = _np.where(_np.isnan(_np.array(array).ravel()) == False)[0] # noqa: E712
105
107
  return int(len(ixs))
106
108
 
107
109
 
@@ -145,7 +147,7 @@ def _compute_num_boxes_dof(sub_mxs, sum_up, element_dof):
145
147
 
146
148
  # Gets all the non-NaN boxes, flattens the resulting
147
149
  # array, and does the sum.
148
- n_boxes = _np.sum(~_np.isnan(sub_mxs).flatten())
150
+ n_boxes = _np.sum(~_np.isnan(sub_mxs).ravel())
149
151
 
150
152
  return n_boxes, dof_per_box
151
153
 
@@ -156,7 +158,13 @@ def _compute_sub_mxs(gss, model, sub_mx_creation_fn, dataset=None, sub_mx_creati
156
158
  for x in gss.used_xs] for y in gss.used_ys]
157
159
  #Note: subMxs[y-index][x-index] is proper usage
158
160
  return subMxs
161
+
162
+ #define a modified version that is meant for working with CircuitList objects of lists of them.
163
+ #@smart_cached
164
+ def _compute_sub_mxs_circuit_list(circuit_lists, model, sub_mx_creation_fn, dataset=None, sub_mx_creation_fn_extra_arg=None):
165
+ subMxs = [sub_mx_creation_fn(circuit_list, sub_mx_creation_fn_extra_arg) for circuit_list in circuit_lists]
159
166
 
167
+ return subMxs
160
168
 
161
169
  @smart_cached
162
170
  def dscompare_llr_matrices(gsplaq, dscomparator):
pygsti/report/report.py CHANGED
@@ -206,6 +206,11 @@ class Report:
206
206
  who want to tinker with the standard analysis presented in the static
207
207
  HTML or LaTeX format reports.
208
208
 
209
+ Note that interactive cells in report notebooks require JavaScript,
210
+ and therefore do not work with JupyterLab. Please continue to use
211
+ classic Jupyter notebooks for PyGSTi report notebooks. To track this issue,
212
+ see https://github.com/pyGSTio/pyGSTi/issues/205.
213
+
209
214
  Parameters
210
215
  ----------
211
216
  path : str or path-like object
@@ -249,6 +254,12 @@ class Report:
249
254
  nb = _Notebook()
250
255
  nb.add_markdown('# {title}\n(Created on {date})'.format(
251
256
  title=title, date=_time.strftime("%B %d, %Y")))
257
+
258
+ nb.add_markdown("## JupyterLab Incompatibility Warning\n" +
259
+ "<font color='red'>Note that interactive cells in report notebooks require JavaScript, " +
260
+ "and therefore do not work with JupyterLab. Please continue to use " +
261
+ "classic Jupyter notebooks for PyGSTi report notebooks. To track this issue, " +
262
+ "see https://github.com/pyGSTio/pyGSTi/issues/205.</font>")
252
263
 
253
264
  nb.add_code("""\
254
265
  import pickle
@@ -353,6 +364,11 @@ class Report:
353
364
 
354
365
  printer.log("Report Notebook created as %s" % path)
355
366
 
367
+ printer.warning("""Note that interactive cells in report notebooks require JavaScript,
368
+ and therefore do not work with JupyterLab. Please continue to use
369
+ classic Jupyter notebooks for PyGSTi report notebooks. To track this issue,
370
+ see https://github.com/pyGSTio/pyGSTi/issues/205.""")
371
+
356
372
  if auto_open:
357
373
  port = "auto" if auto_open is True else int(auto_open)
358
374
  nb.launch(str(path), port=port)