pyGSTi 0.9.12.1__cp39-cp39-win_amd64.whl → 0.9.13__cp39-cp39-win_amd64.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (221) hide show
  1. pyGSTi-0.9.13.dist-info/METADATA +197 -0
  2. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/RECORD +207 -217
  3. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/WHEEL +1 -1
  4. pygsti/_version.py +2 -2
  5. pygsti/algorithms/contract.py +1 -1
  6. pygsti/algorithms/core.py +42 -28
  7. pygsti/algorithms/fiducialselection.py +17 -8
  8. pygsti/algorithms/gaugeopt.py +2 -2
  9. pygsti/algorithms/germselection.py +87 -77
  10. pygsti/algorithms/mirroring.py +0 -388
  11. pygsti/algorithms/randomcircuit.py +165 -1333
  12. pygsti/algorithms/rbfit.py +0 -234
  13. pygsti/baseobjs/basis.py +94 -396
  14. pygsti/baseobjs/errorgenbasis.py +0 -132
  15. pygsti/baseobjs/errorgenspace.py +0 -10
  16. pygsti/baseobjs/label.py +52 -168
  17. pygsti/baseobjs/opcalc/fastopcalc.cp39-win_amd64.pyd +0 -0
  18. pygsti/baseobjs/opcalc/fastopcalc.pyx +2 -2
  19. pygsti/baseobjs/polynomial.py +13 -595
  20. pygsti/baseobjs/statespace.py +1 -0
  21. pygsti/circuits/__init__.py +1 -1
  22. pygsti/circuits/circuit.py +682 -505
  23. pygsti/circuits/circuitconstruction.py +0 -4
  24. pygsti/circuits/circuitlist.py +47 -5
  25. pygsti/circuits/circuitparser/__init__.py +8 -8
  26. pygsti/circuits/circuitparser/fastcircuitparser.cp39-win_amd64.pyd +0 -0
  27. pygsti/circuits/circuitstructure.py +3 -3
  28. pygsti/circuits/cloudcircuitconstruction.py +1 -1
  29. pygsti/data/datacomparator.py +2 -7
  30. pygsti/data/dataset.py +46 -44
  31. pygsti/data/hypothesistest.py +0 -7
  32. pygsti/drivers/bootstrap.py +0 -49
  33. pygsti/drivers/longsequence.py +2 -1
  34. pygsti/evotypes/basereps_cython.cp39-win_amd64.pyd +0 -0
  35. pygsti/evotypes/chp/opreps.py +0 -61
  36. pygsti/evotypes/chp/statereps.py +0 -32
  37. pygsti/evotypes/densitymx/effectcreps.cpp +9 -10
  38. pygsti/evotypes/densitymx/effectreps.cp39-win_amd64.pyd +0 -0
  39. pygsti/evotypes/densitymx/effectreps.pyx +1 -1
  40. pygsti/evotypes/densitymx/opreps.cp39-win_amd64.pyd +0 -0
  41. pygsti/evotypes/densitymx/opreps.pyx +2 -2
  42. pygsti/evotypes/densitymx/statereps.cp39-win_amd64.pyd +0 -0
  43. pygsti/evotypes/densitymx/statereps.pyx +1 -1
  44. pygsti/evotypes/densitymx_slow/effectreps.py +7 -23
  45. pygsti/evotypes/densitymx_slow/opreps.py +16 -23
  46. pygsti/evotypes/densitymx_slow/statereps.py +10 -3
  47. pygsti/evotypes/evotype.py +39 -2
  48. pygsti/evotypes/stabilizer/effectreps.cp39-win_amd64.pyd +0 -0
  49. pygsti/evotypes/stabilizer/effectreps.pyx +0 -4
  50. pygsti/evotypes/stabilizer/opreps.cp39-win_amd64.pyd +0 -0
  51. pygsti/evotypes/stabilizer/opreps.pyx +0 -4
  52. pygsti/evotypes/stabilizer/statereps.cp39-win_amd64.pyd +0 -0
  53. pygsti/evotypes/stabilizer/statereps.pyx +1 -5
  54. pygsti/evotypes/stabilizer/termreps.cp39-win_amd64.pyd +0 -0
  55. pygsti/evotypes/stabilizer/termreps.pyx +0 -7
  56. pygsti/evotypes/stabilizer_slow/effectreps.py +0 -22
  57. pygsti/evotypes/stabilizer_slow/opreps.py +0 -4
  58. pygsti/evotypes/stabilizer_slow/statereps.py +0 -4
  59. pygsti/evotypes/statevec/effectreps.cp39-win_amd64.pyd +0 -0
  60. pygsti/evotypes/statevec/effectreps.pyx +1 -1
  61. pygsti/evotypes/statevec/opreps.cp39-win_amd64.pyd +0 -0
  62. pygsti/evotypes/statevec/opreps.pyx +2 -2
  63. pygsti/evotypes/statevec/statereps.cp39-win_amd64.pyd +0 -0
  64. pygsti/evotypes/statevec/statereps.pyx +1 -1
  65. pygsti/evotypes/statevec/termreps.cp39-win_amd64.pyd +0 -0
  66. pygsti/evotypes/statevec/termreps.pyx +0 -7
  67. pygsti/evotypes/statevec_slow/effectreps.py +0 -3
  68. pygsti/evotypes/statevec_slow/opreps.py +0 -5
  69. pygsti/extras/__init__.py +0 -1
  70. pygsti/extras/drift/stabilityanalyzer.py +3 -1
  71. pygsti/extras/interpygate/__init__.py +12 -0
  72. pygsti/extras/interpygate/core.py +0 -36
  73. pygsti/extras/interpygate/process_tomography.py +44 -10
  74. pygsti/extras/rpe/rpeconstruction.py +0 -2
  75. pygsti/forwardsims/__init__.py +1 -0
  76. pygsti/forwardsims/forwardsim.py +14 -55
  77. pygsti/forwardsims/mapforwardsim.py +69 -18
  78. pygsti/forwardsims/mapforwardsim_calc_densitymx.cp39-win_amd64.pyd +0 -0
  79. pygsti/forwardsims/mapforwardsim_calc_densitymx.pyx +65 -66
  80. pygsti/forwardsims/mapforwardsim_calc_generic.py +91 -13
  81. pygsti/forwardsims/matrixforwardsim.py +63 -15
  82. pygsti/forwardsims/termforwardsim.py +8 -110
  83. pygsti/forwardsims/termforwardsim_calc_stabilizer.cp39-win_amd64.pyd +0 -0
  84. pygsti/forwardsims/termforwardsim_calc_statevec.cp39-win_amd64.pyd +0 -0
  85. pygsti/forwardsims/termforwardsim_calc_statevec.pyx +0 -651
  86. pygsti/forwardsims/torchfwdsim.py +265 -0
  87. pygsti/forwardsims/weakforwardsim.py +2 -2
  88. pygsti/io/__init__.py +1 -2
  89. pygsti/io/mongodb.py +0 -2
  90. pygsti/io/stdinput.py +6 -22
  91. pygsti/layouts/copalayout.py +10 -12
  92. pygsti/layouts/distlayout.py +0 -40
  93. pygsti/layouts/maplayout.py +103 -25
  94. pygsti/layouts/matrixlayout.py +99 -60
  95. pygsti/layouts/prefixtable.py +1534 -52
  96. pygsti/layouts/termlayout.py +1 -1
  97. pygsti/modelmembers/instruments/instrument.py +3 -3
  98. pygsti/modelmembers/instruments/tpinstrument.py +2 -2
  99. pygsti/modelmembers/modelmember.py +0 -17
  100. pygsti/modelmembers/operations/__init__.py +2 -4
  101. pygsti/modelmembers/operations/affineshiftop.py +1 -0
  102. pygsti/modelmembers/operations/composederrorgen.py +1 -1
  103. pygsti/modelmembers/operations/composedop.py +1 -24
  104. pygsti/modelmembers/operations/denseop.py +5 -5
  105. pygsti/modelmembers/operations/eigpdenseop.py +2 -2
  106. pygsti/modelmembers/operations/embeddederrorgen.py +1 -1
  107. pygsti/modelmembers/operations/embeddedop.py +0 -1
  108. pygsti/modelmembers/operations/experrorgenop.py +2 -2
  109. pygsti/modelmembers/operations/fullarbitraryop.py +1 -0
  110. pygsti/modelmembers/operations/fullcptpop.py +2 -2
  111. pygsti/modelmembers/operations/fulltpop.py +28 -6
  112. pygsti/modelmembers/operations/fullunitaryop.py +5 -4
  113. pygsti/modelmembers/operations/lindbladcoefficients.py +93 -78
  114. pygsti/modelmembers/operations/lindbladerrorgen.py +268 -441
  115. pygsti/modelmembers/operations/linearop.py +7 -27
  116. pygsti/modelmembers/operations/opfactory.py +1 -1
  117. pygsti/modelmembers/operations/repeatedop.py +1 -24
  118. pygsti/modelmembers/operations/staticstdop.py +1 -1
  119. pygsti/modelmembers/povms/__init__.py +3 -3
  120. pygsti/modelmembers/povms/basepovm.py +7 -36
  121. pygsti/modelmembers/povms/complementeffect.py +4 -9
  122. pygsti/modelmembers/povms/composedeffect.py +0 -320
  123. pygsti/modelmembers/povms/computationaleffect.py +1 -1
  124. pygsti/modelmembers/povms/computationalpovm.py +3 -1
  125. pygsti/modelmembers/povms/effect.py +3 -5
  126. pygsti/modelmembers/povms/marginalizedpovm.py +0 -79
  127. pygsti/modelmembers/povms/tppovm.py +74 -2
  128. pygsti/modelmembers/states/__init__.py +2 -5
  129. pygsti/modelmembers/states/composedstate.py +0 -317
  130. pygsti/modelmembers/states/computationalstate.py +3 -3
  131. pygsti/modelmembers/states/cptpstate.py +4 -4
  132. pygsti/modelmembers/states/densestate.py +6 -4
  133. pygsti/modelmembers/states/fullpurestate.py +0 -24
  134. pygsti/modelmembers/states/purestate.py +1 -1
  135. pygsti/modelmembers/states/state.py +5 -6
  136. pygsti/modelmembers/states/tpstate.py +28 -10
  137. pygsti/modelmembers/term.py +3 -6
  138. pygsti/modelmembers/torchable.py +50 -0
  139. pygsti/modelpacks/_modelpack.py +1 -1
  140. pygsti/modelpacks/smq1Q_ZN.py +3 -1
  141. pygsti/modelpacks/smq2Q_XXYYII.py +2 -1
  142. pygsti/modelpacks/smq2Q_XY.py +3 -3
  143. pygsti/modelpacks/smq2Q_XYI.py +2 -2
  144. pygsti/modelpacks/smq2Q_XYICNOT.py +3 -3
  145. pygsti/modelpacks/smq2Q_XYICPHASE.py +3 -3
  146. pygsti/modelpacks/smq2Q_XYXX.py +1 -1
  147. pygsti/modelpacks/smq2Q_XYZICNOT.py +3 -3
  148. pygsti/modelpacks/smq2Q_XYZZ.py +1 -1
  149. pygsti/modelpacks/stdtarget.py +0 -121
  150. pygsti/models/cloudnoisemodel.py +1 -2
  151. pygsti/models/explicitcalc.py +3 -3
  152. pygsti/models/explicitmodel.py +3 -13
  153. pygsti/models/fogistore.py +5 -3
  154. pygsti/models/localnoisemodel.py +1 -2
  155. pygsti/models/memberdict.py +0 -12
  156. pygsti/models/model.py +800 -65
  157. pygsti/models/modelconstruction.py +4 -4
  158. pygsti/models/modelnoise.py +2 -2
  159. pygsti/models/modelparaminterposer.py +1 -1
  160. pygsti/models/oplessmodel.py +1 -1
  161. pygsti/models/qutrit.py +15 -14
  162. pygsti/objectivefns/objectivefns.py +73 -138
  163. pygsti/objectivefns/wildcardbudget.py +2 -7
  164. pygsti/optimize/__init__.py +1 -0
  165. pygsti/optimize/arraysinterface.py +28 -0
  166. pygsti/optimize/customcg.py +0 -12
  167. pygsti/optimize/customlm.py +129 -323
  168. pygsti/optimize/customsolve.py +2 -2
  169. pygsti/optimize/optimize.py +0 -84
  170. pygsti/optimize/simplerlm.py +841 -0
  171. pygsti/optimize/wildcardopt.py +19 -598
  172. pygsti/protocols/confidenceregionfactory.py +28 -14
  173. pygsti/protocols/estimate.py +31 -14
  174. pygsti/protocols/gst.py +142 -68
  175. pygsti/protocols/modeltest.py +6 -10
  176. pygsti/protocols/protocol.py +9 -37
  177. pygsti/protocols/rb.py +450 -79
  178. pygsti/protocols/treenode.py +8 -2
  179. pygsti/protocols/vb.py +108 -206
  180. pygsti/protocols/vbdataframe.py +1 -1
  181. pygsti/report/factory.py +0 -15
  182. pygsti/report/fogidiagram.py +1 -17
  183. pygsti/report/modelfunction.py +12 -3
  184. pygsti/report/mpl_colormaps.py +1 -1
  185. pygsti/report/plothelpers.py +8 -2
  186. pygsti/report/reportables.py +41 -37
  187. pygsti/report/templates/offline/pygsti_dashboard.css +6 -0
  188. pygsti/report/templates/offline/pygsti_dashboard.js +12 -0
  189. pygsti/report/workspace.py +2 -14
  190. pygsti/report/workspaceplots.py +326 -504
  191. pygsti/tools/basistools.py +9 -36
  192. pygsti/tools/edesigntools.py +124 -96
  193. pygsti/tools/fastcalc.cp39-win_amd64.pyd +0 -0
  194. pygsti/tools/fastcalc.pyx +35 -81
  195. pygsti/tools/internalgates.py +151 -15
  196. pygsti/tools/jamiolkowski.py +5 -5
  197. pygsti/tools/lindbladtools.py +19 -11
  198. pygsti/tools/listtools.py +0 -114
  199. pygsti/tools/matrixmod2.py +1 -1
  200. pygsti/tools/matrixtools.py +173 -339
  201. pygsti/tools/nameddict.py +1 -1
  202. pygsti/tools/optools.py +154 -88
  203. pygsti/tools/pdftools.py +0 -25
  204. pygsti/tools/rbtheory.py +3 -320
  205. pygsti/tools/slicetools.py +64 -12
  206. pyGSTi-0.9.12.1.dist-info/METADATA +0 -155
  207. pygsti/algorithms/directx.py +0 -711
  208. pygsti/evotypes/qibo/__init__.py +0 -33
  209. pygsti/evotypes/qibo/effectreps.py +0 -78
  210. pygsti/evotypes/qibo/opreps.py +0 -376
  211. pygsti/evotypes/qibo/povmreps.py +0 -98
  212. pygsti/evotypes/qibo/statereps.py +0 -174
  213. pygsti/extras/rb/__init__.py +0 -13
  214. pygsti/extras/rb/benchmarker.py +0 -957
  215. pygsti/extras/rb/dataset.py +0 -378
  216. pygsti/extras/rb/io.py +0 -814
  217. pygsti/extras/rb/simulate.py +0 -1020
  218. pygsti/io/legacyio.py +0 -385
  219. pygsti/modelmembers/povms/denseeffect.py +0 -142
  220. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/LICENSE +0 -0
  221. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/top_level.txt +0 -0
@@ -57,7 +57,7 @@ class _TermCOPALayoutAtom(_DistributableAtom):
57
57
  expanded_circuit_outcomes = _collections.OrderedDict()
58
58
  for i in group:
59
59
  observed_outcomes = None if (dataset is None) else dataset[ds_circuits[i]].outcomes
60
- d = unique_complete_circuits[i].expand_instruments_and_separate_povm(model, observed_outcomes)
60
+ d = model.expand_instruments_and_separate_povm(unique_complete_circuits[i], observed_outcomes)
61
61
  expanded_circuit_outcomes_by_unique[i] = d
62
62
  expanded_circuit_outcomes.update(d)
63
63
 
@@ -71,7 +71,7 @@ class Instrument(_mm.ModelMember, _collections.OrderedDict):
71
71
  if state_space is None:
72
72
  state_space = _statespace.default_space_for_dim(member_list[0][1].shape[0])
73
73
  if evotype is None:
74
- evotype = _Evotype.cast('default')
74
+ evotype = _Evotype.cast('default', state_space=state_space)
75
75
  member_list = [(k, v if isinstance(v, _op.LinearOperator) else
76
76
  _op.FullArbitraryOp(v, None, evotype, state_space)) for k, v in member_list]
77
77
 
@@ -79,10 +79,10 @@ class Instrument(_mm.ModelMember, _collections.OrderedDict):
79
79
  "Must specify `state_space` when there are no instrument members!"
80
80
  assert(len(member_list) > 0 or evotype is not None), \
81
81
  "Must specify `evotype` when there are no instrument members!"
82
- evotype = _Evotype.cast(evotype) if (evotype is not None) else member_list[0][1].evotype
83
82
  state_space = member_list[0][1].state_space if (state_space is None) \
84
83
  else _statespace.StateSpace.cast(state_space)
85
-
84
+ evotype = _Evotype.cast(evotype, state_space=state_space) if (evotype is not None)\
85
+ else member_list[0][1].evotype
86
86
  items = []
87
87
  for k, member in member_list:
88
88
  assert(evotype == member.evotype), \
@@ -77,8 +77,6 @@ class TPInstrument(_mm.ModelMember, _collections.OrderedDict):
77
77
  self._readonly = False # until init is done
78
78
  if len(items) > 0:
79
79
  assert(op_matrices is None), "`items` was given when op_matrices != None"
80
-
81
- evotype = _Evotype.cast(evotype)
82
80
  self.param_ops = [] # first element is TP sum (MT), following
83
81
  #elements are fully-param'd (Mi-Mt) for i=0...n-2
84
82
 
@@ -98,6 +96,7 @@ class TPInstrument(_mm.ModelMember, _collections.OrderedDict):
98
96
  "Must specify `state_space` when there are no instrument members!"
99
97
  state_space = _statespace.default_space_for_dim(matrix_list[0][1].shape[0]) if (state_space is None) \
100
98
  else _statespace.StateSpace.cast(state_space)
99
+ evotype = _Evotype.cast(evotype, state_space=state_space)
101
100
 
102
101
  # Create gate objects that are used to parameterize this instrument
103
102
  MT_mx = sum([v for k, v in matrix_list]) # sum-of-instrument-members matrix
@@ -125,6 +124,7 @@ class TPInstrument(_mm.ModelMember, _collections.OrderedDict):
125
124
  # print(k,":\n",v)
126
125
  else:
127
126
  assert(state_space is not None), "`state_space` cannot be `None` when there are no members!"
127
+ evotype = _Evotype.cast(evotype, state_space=state_space)
128
128
 
129
129
  _collections.OrderedDict.__init__(self, items)
130
130
  _mm.ModelMember.__init__(self, state_space, evotype)
@@ -340,23 +340,6 @@ class ModelMember(ModelChild, _NicelySerializable):
340
340
  if (self.parent is not None) and (force or self.parent._obj_refcount(self) == 0):
341
341
  self._parent = None
342
342
 
343
- # UNUSED - as this doesn't mark parameter for reallocation like it used to
344
- #def clear_gpindices(self):
345
- # """
346
- # Sets gpindices to None, along with any submembers' gpindices.
347
- #
348
- # This essentially marks these members for parameter re-allocation
349
- # (e.g. if the number - not just the value - of parameters they have
350
- # changes).
351
- #
352
- # Returns
353
- # -------
354
- # None
355
- # """
356
- # for subm in self.submembers():
357
- # subm.clear_gpindices()
358
- # self._gpindices = None
359
-
360
343
  def set_gpindices(self, gpindices, parent, memo=None):
361
344
  """
362
345
  Set the parent and indices into the parent's parameter vector that are used by this ModelMember object.
@@ -475,18 +475,16 @@ def optimize_operation(op_to_optimize, target_op):
475
475
  return
476
476
 
477
477
  from pygsti import optimize as _opt
478
- from pygsti.tools import matrixtools as _mt
479
478
  assert(target_op.dim == op_to_optimize.dim) # operations must have the same overall dimension
480
479
  targetMatrix = target_op.to_dense() if isinstance(target_op, LinearOperator) else target_op
481
480
 
482
481
  def _objective_func(param_vec):
483
482
  op_to_optimize.from_vector(param_vec)
484
- return _mt.frobeniusnorm(op_to_optimize.to_dense() - targetMatrix)
483
+ return _np.linalg.norm(op_to_optimize.to_dense() - targetMatrix)
485
484
 
486
485
  x0 = op_to_optimize.to_vector()
487
486
  minSol = _opt.minimize(_objective_func, x0, method='BFGS', maxiter=10000, maxfev=10000,
488
487
  tol=1e-6, callback=None)
489
488
 
490
489
  op_to_optimize.from_vector(minSol.x)
491
- #print("DEBUG: optimized operation to min frobenius distance %g" %
492
- # _mt.frobeniusnorm(op_to_optimize-targetMatrix))
490
+ return
@@ -126,6 +126,7 @@ class AffineShiftOp(_DenseOperator):
126
126
  numpy array
127
127
  The operation parameters as a 1D array with length num_params().
128
128
  """
129
+ # Use flatten (rather than ravel) to ensure a copy is made.
129
130
  return self._ptr[1:,0].flatten() # .real in case of complex matrices?
130
131
 
131
132
  def from_vector(self, v, close=False, dirty_value=True):
@@ -63,7 +63,7 @@ class ComposedErrorgen(_LinearOperator):
63
63
 
64
64
  if evotype == "auto":
65
65
  evotype = errgens_to_compose[0]._evotype
66
- evotype = _Evotype.cast(evotype)
66
+ evotype = _Evotype.cast(evotype, state_space=state_space)
67
67
  assert(all([evotype == eg._evotype for eg in errgens_to_compose])), \
68
68
  "All error generators must have the same evolution type (%s expected)!" % evotype
69
69
 
@@ -69,7 +69,7 @@ class ComposedOp(_LinearOperator):
69
69
  evotype = ops_to_compose[0]._evotype
70
70
  assert(all([evotype == operation._evotype for operation in ops_to_compose])), \
71
71
  "All operations must have the same evolution type (%s expected)!" % evotype
72
- evotype = _Evotype.cast(evotype)
72
+ evotype = _Evotype.cast(evotype, state_space=state_space)
73
73
 
74
74
  rep = self._create_rep_object(evotype, state_space)
75
75
 
@@ -491,10 +491,6 @@ class ComposedOp(_LinearOperator):
491
491
 
492
492
  self.terms[order] = terms
493
493
 
494
- #def _decompose_indices(x):
495
- # return tuple(_modelmember._decompose_gpindices(
496
- # self.gpindices, _np.array(x, _np.int64)))
497
-
498
494
  mapvec = _np.ascontiguousarray(_np.zeros(max_polynomial_vars, _np.int64))
499
495
  for ii, i in enumerate(gpindices_array):
500
496
  mapvec[i] = ii
@@ -555,25 +551,6 @@ class ComposedOp(_LinearOperator):
555
551
  if mag >= min_term_mag:
556
552
  terms.append(_term.compose_terms_with_mag(factors, mag))
557
553
  return terms
558
- #def _decompose_indices(x):
559
- # return tuple(_modelmember._decompose_gpindices(
560
- # self.gpindices, _np.array(x, _np.int64)))
561
- #
562
- #mapvec = _np.ascontiguousarray(_np.zeros(max_polynomial_vars,_np.int64))
563
- #for ii,i in enumerate(self.gpindices_as_array()):
564
- # mapvec[i] = ii
565
- #
566
- ##poly_coeffs = [t.coeff.map_indices(_decompose_indices) for t in terms] # with *local* indices
567
- #poly_coeffs = [t.coeff.mapvec_indices(mapvec) for t in terms] # with *local* indices
568
- #tapes = [poly.compact(complex_coeff_tape=True) for poly in poly_coeffs]
569
- #if len(tapes) > 0:
570
- # vtape = _np.concatenate([t[0] for t in tapes])
571
- # ctape = _np.concatenate([t[1] for t in tapes])
572
- #else:
573
- # vtape = _np.empty(0, _np.int64)
574
- # ctape = _np.empty(0, complex)
575
- #coeffs_as_compact_polys = (vtape, ctape)
576
- #self.local_term_poly_coeffs[order] = coeffs_as_compact_polys
577
554
 
578
555
  @property
579
556
  def total_term_magnitude(self):
@@ -313,7 +313,7 @@ class DenseOperator(DenseOperatorInterface, _KrausOperatorInterface, _LinearOper
313
313
  mx = _LinearOperator.convert_to_matrix(mx)
314
314
  state_space = _statespace.default_space_for_dim(mx.shape[0]) if (state_space is None) \
315
315
  else _statespace.StateSpace.cast(state_space)
316
- evotype = _Evotype.cast(evotype)
316
+ evotype = _Evotype.cast(evotype, state_space=state_space)
317
317
  self._basis = _Basis.cast(basis, state_space.dim) if (basis is not None) else None # for Hilbert-Schmidt space
318
318
  rep = evotype.create_dense_superop_rep(mx, self._basis, state_space)
319
319
  _LinearOperator.__init__(self, rep, evotype)
@@ -416,11 +416,11 @@ class DenseOperator(DenseOperatorInterface, _KrausOperatorInterface, _LinearOper
416
416
  #CHECK 1 (to unit test?) REMOVE
417
417
  #tmp_std = _bt.change_basis(superop_mx, self._basis, 'std')
418
418
  #B = _bt.basis_matrices('std', superop_mx.shape[0])
419
- #check_superop = sum([ choi_mx[i,j] * _np.kron(B[i], B[j].T) for i in range(d*d) for j in range(d*d)])
419
+ #check_superop = sum([ choi_mx[i,j] * _np.kron(B[i], B[j].conjugate()) for i in range(d*d) for j in range(d*d)])
420
420
  #assert(_np.allclose(check_superop, tmp_std))
421
421
 
422
- evals, evecs = _np.linalg.eig(choi_mx)
423
- #assert(_np.allclose(evecs @ _np.diag(evals) @ (evecs.conjugate().T), choi_mx))
422
+ evals, evecs = _np.linalg.eigh(choi_mx)
423
+ assert(_np.allclose(evecs @ _np.diag(evals) @ (evecs.conjugate().T), choi_mx))
424
424
  TOL = 1e-7 # consider lowering this tolerance as it leads to errors of this order in the Kraus decomp
425
425
  if any([ev <= -TOL for ev in evals]):
426
426
  raise ValueError("Cannot compute Kraus decomposition of non-positive-definite superoperator!")
@@ -533,7 +533,7 @@ class DenseUnitaryOperator(DenseOperatorInterface, _KrausOperatorInterface, _Lin
533
533
  state_space = _statespace.default_space_for_udim(mx.shape[0]) if (state_space is None) \
534
534
  else _statespace.StateSpace.cast(state_space)
535
535
  basis = _Basis.cast(basis, state_space.dim) # basis for Hilbert-Schmidt (superop) space
536
- evotype = _Evotype.cast(evotype)
536
+ evotype = _Evotype.cast(evotype, state_space=state_space)
537
537
 
538
538
  #Try to create a dense unitary rep. If this fails, see if a dense superop rep
539
539
  # can be created, as this type of rep can also hold arbitrary unitary ops.
@@ -432,13 +432,13 @@ class EigenvalueParamDenseOp(_DenseOperator):
432
432
  dMx = _np.zeros((self.dim, self.dim), 'complex')
433
433
  for prefactor, (i, j) in pdesc:
434
434
  dMx[i, j] = prefactor
435
- tmp = _np.dot(self.B, _np.dot(dMx, self.Bi))
435
+ tmp = self.B @ (dMx @ self.Bi)
436
436
  if _np.linalg.norm(tmp.imag) >= IMAG_TOL: # just a warning until we figure this out.
437
437
  print("EigenvalueParamDenseOp deriv_wrt_params WARNING:"
438
438
  " Imag part = ", _np.linalg.norm(tmp.imag), " pdesc = ", pdesc) # pragma: no cover
439
439
  #assert(_np.linalg.norm(tmp.imag) < IMAG_TOL), \
440
440
  # "Imaginary mag = %g!" % _np.linalg.norm(tmp.imag)
441
- derivMx[:, k] = tmp.real.flatten()
441
+ derivMx[:, k] = tmp.real.ravel()
442
442
 
443
443
  if wrt_filter is None:
444
444
  return derivMx
@@ -11,10 +11,10 @@ The EmbeddedErrorgen class and supporting functionality.
11
11
  #***************************************************************************************************
12
12
 
13
13
  import collections as _collections
14
+ from pygsti.baseobjs.basis import Basis as _Basis
14
15
  import warnings as _warnings
15
16
 
16
17
  from pygsti.modelmembers.operations.embeddedop import EmbeddedOp as _EmbeddedOp
17
- from pygsti.baseobjs.basis import Basis as _Basis, EmbeddedBasis as _EmbeddedBasis
18
18
 
19
19
 
20
20
  # Idea:
@@ -18,7 +18,6 @@ import scipy.sparse as _sps
18
18
 
19
19
  from pygsti.modelmembers.operations.linearop import LinearOperator as _LinearOperator
20
20
  from pygsti.modelmembers import modelmember as _modelmember
21
- from pygsti.baseobjs.basis import EmbeddedBasis as _EmbeddedBasis
22
21
  from pygsti.baseobjs.statespace import StateSpace as _StateSpace
23
22
 
24
23
 
@@ -699,9 +699,9 @@ class ExpErrorgenOp(_LinearOperator, _ErrorGeneratorContainer):
699
699
 
700
700
  #just act on postfactor and Lindbladian exponent:
701
701
  if typ == "prep":
702
- mx = _mt.safe_dot(Uinv, mx)
702
+ mx = Uinv @ mx
703
703
  else:
704
- mx = _mt.safe_dot(mx, U)
704
+ mx = mx @ U
705
705
  self.set_dense(mx) # calls _update_rep() and sets dirty flag
706
706
  else:
707
707
  raise ValueError("Invalid transform for this LindbladErrorgen: type %s"
@@ -93,6 +93,7 @@ class FullArbitraryOp(_DenseOperator):
93
93
  numpy array
94
94
  The operation parameters as a 1D array with length num_params().
95
95
  """
96
+ # Use flatten (rather than ravel) to ensure a copy is made.
96
97
  return self._ptr.flatten()
97
98
 
98
99
  def from_vector(self, v, close=False, dirty_value=True):
@@ -42,7 +42,7 @@ class FullCPTPOp(_KrausOperatorInterface, _LinearOperator):
42
42
  choi_mx = _LinearOperator.convert_to_matrix(choi_mx)
43
43
  state_space = _statespace.default_space_for_dim(choi_mx.shape[0]) if (state_space is None) \
44
44
  else _statespace.StateSpace.cast(state_space)
45
- evotype = _Evotype.cast(evotype)
45
+ evotype = _Evotype.cast(evotype, state_space=state_space)
46
46
  self._basis = _Basis.cast(basis, state_space.dim) if (basis is not None) else None # for Hilbert-Schmidt space
47
47
 
48
48
  #scratch space
@@ -93,7 +93,7 @@ class FullCPTPOp(_KrausOperatorInterface, _LinearOperator):
93
93
  Lmx = _np.linalg.cholesky(choi_mx)
94
94
 
95
95
  #check TP condition: that diagonal els of Lmx squared add to 1.0
96
- Lmx_norm = _np.trace(_np.dot(Lmx.T.conjugate(), Lmx)) # sum of magnitude^2 of all els
96
+ Lmx_norm = _np.linalg.norm(Lmx) # = sqrt(tr(Lmx' Lmx))
97
97
  assert(_np.isclose(Lmx_norm, 1.0)), "Cholesky decomp didn't preserve trace=1!"
98
98
 
99
99
  self.params = _np.empty(dim**2, 'd')
@@ -10,14 +10,25 @@ The FullTPOp class and supporting functionality.
10
10
  # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
11
11
  #***************************************************************************************************
12
12
 
13
- import numpy as _np
13
+ from __future__ import annotations
14
+ from typing import Tuple, TYPE_CHECKING
15
+ if TYPE_CHECKING:
16
+ import torch as _torch
17
+ try:
18
+ import torch as _torch
19
+ except ImportError:
20
+ pass
14
21
 
22
+ import numpy as _np
15
23
  from pygsti.modelmembers.operations.denseop import DenseOperator as _DenseOperator
16
24
  from pygsti.modelmembers.operations.linearop import LinearOperator as _LinearOperator
17
25
  from pygsti.baseobjs.protectedarray import ProtectedArray as _ProtectedArray
26
+ from pygsti.modelmembers.torchable import Torchable as _Torchable
27
+
28
+
18
29
 
19
30
 
20
- class FullTPOp(_DenseOperator):
31
+ class FullTPOp(_DenseOperator, _Torchable):
21
32
  """
22
33
  A trace-preserving operation matrix.
23
34
 
@@ -52,11 +63,9 @@ class FullTPOp(_DenseOperator):
52
63
  """
53
64
 
54
65
  def __init__(self, m, basis=None, evotype="default", state_space=None):
55
- #LinearOperator.__init__(self, LinearOperator.convert_to_matrix(m))
56
66
  mx = _LinearOperator.convert_to_matrix(m)
57
67
  assert(_np.isrealobj(mx)), "FullTPOp must have *real* values!"
58
- if not (_np.isclose(mx[0, 0], 1.0)
59
- and _np.allclose(mx[0, 1:], 0.0)):
68
+ if not (_np.isclose(mx[0, 0], 1.0) and _np.allclose(mx[0, 1:], 0.0)):
60
69
  raise ValueError("Cannot create FullTPOp: "
61
70
  "invalid form for 1st row!")
62
71
  _DenseOperator.__init__(self, mx, basis, evotype, state_space)
@@ -122,7 +131,7 @@ class FullTPOp(_DenseOperator):
122
131
  numpy array
123
132
  The operation parameters as a 1D array with length num_params().
124
133
  """
125
- return self._ptr.flatten()[self.dim:] # .real in case of complex matrices?
134
+ return self._ptr.ravel()[self.dim:].copy() # .real in case of complex matrices?
126
135
 
127
136
  def from_vector(self, v, close=False, dirty_value=True):
128
137
  """
@@ -155,6 +164,19 @@ class FullTPOp(_DenseOperator):
155
164
  self._ptr_has_changed() # because _rep.base == _ptr (same memory)
156
165
  self.dirty = dirty_value
157
166
 
167
+ def stateless_data(self) -> Tuple[int]:
168
+ return (self.dim,)
169
+
170
+ @staticmethod
171
+ def torch_base(sd: Tuple[int], t_param: _torch.Tensor) -> _torch.Tensor:
172
+ dim = sd[0]
173
+ t_const = _torch.zeros(size=(1, dim), dtype=_torch.double)
174
+ t_const[0,0] = 1.0
175
+ t_param_mat = t_param.reshape((dim - 1, dim))
176
+ t = _torch.row_stack((t_const, t_param_mat))
177
+ return t
178
+
179
+
158
180
  def deriv_wrt_params(self, wrt_filter=None):
159
181
  """
160
182
  The element-wise derivative this operation.
@@ -98,7 +98,8 @@ class FullUnitaryOp(_DenseUnitaryOperator):
98
98
  numpy array
99
99
  The operation parameters as a 1D array with length num_params().
100
100
  """
101
- return _np.concatenate((self._ptr.real.flatten(), self._ptr.imag.flatten()), axis=0)
101
+ # _np.concatenate will make a copy for us, so use ravel instead of flatten.
102
+ return _np.concatenate((self._ptr.real.ravel(), self._ptr.imag.ravel()), axis=0)
102
103
 
103
104
  def from_vector(self, v, close=False, dirty_value=True):
104
105
  """
@@ -200,7 +201,7 @@ class FullUnitaryOp(_DenseUnitaryOperator):
200
201
  Uinv = s.transform_matrix_inverse
201
202
 
202
203
  my_superop_mx = _ot.unitary_to_superop(self._ptr, self._basis)
203
- my_superop_mx = _mt.safe_dot(Uinv, _mt.safe_dot(my_superop_mx, U))
204
+ my_superop_mx = Uinv @ (my_superop_mx @ U)
204
205
 
205
206
  self._ptr[:, :] = _ot.superop_to_unitary(my_superop_mx, self._basis)
206
207
  self._ptr_has_changed()
@@ -250,9 +251,9 @@ class FullUnitaryOp(_DenseUnitaryOperator):
250
251
 
251
252
  #Note: this code may need to be tweaked to work with sparse matrices
252
253
  if typ == "prep":
253
- my_superop_mx = _mt.safe_dot(Uinv, my_superop_mx)
254
+ my_superop_mx = Uinv @ my_superop_mx
254
255
  else:
255
- my_superop_mx = _mt.safe_dot(my_superop_mx, U)
256
+ my_superop_mx = my_superop_mx @ U
256
257
 
257
258
  self._ptr[:, :] = _ot.superop_to_unitary(my_superop_mx, self._basis)
258
259
  self._ptr_has_changed()
@@ -8,78 +8,73 @@ import warnings as _warnings
8
8
  from pygsti.tools import lindbladtools as _lt
9
9
  from pygsti.tools import matrixtools as _mt
10
10
  from pygsti.tools import optools as _ot
11
+ from pygsti.tools import fastcalc as _fc
11
12
  from pygsti.baseobjs.basis import Basis as _Basis, BuiltinBasis as _BuiltinBasis
12
13
  from pygsti.modelmembers import term as _term
13
14
  from pygsti.baseobjs.polynomial import Polynomial as _Polynomial
14
15
  from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable
15
16
 
17
+ from functools import lru_cache
18
+
16
19
  IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
17
20
 
18
21
 
19
22
  class LindbladCoefficientBlock(_NicelySerializable):
20
- """ SCRATCH:
21
- This routine computes the Hamiltonian and Non-Hamiltonian ("other")
22
- superoperator generators which correspond to the terms of the Lindblad
23
- expression:
24
-
25
- L(rho) = sum_i( h_i [A_i,rho] ) +
26
- sum_ij( o_ij * (B_i rho B_j^dag -
27
- 0.5( rho B_j^dag B_i + B_j^dag B_i rho) ) )
28
-
29
- where {A_i} and {B_i} are bases (possibly the same) for Hilbert Schmidt
30
- (density matrix) space with the identity element removed so that each
31
- A_i and B_i are traceless. If we write L(rho) in terms of superoperators
32
- H_i and O_ij,
33
-
34
- L(rho) = sum_i( h_i H_i(rho) ) + sum_ij( o_ij O_ij(rho) )
35
-
36
- then this function computes the matrices for H_i and O_ij using the given
37
- density matrix basis. Thus, if `dmbasis` is expressed in the standard
38
- basis (as it should be), the returned matrices are also in this basis.
39
-
40
- If these elements are used as projectors it may be usedful to normalize
41
- them (by setting `normalize=True`). Note, however, that these projectors
42
- are not all orthogonal - in particular the O_ij's are not orthogonal to
43
- one another.
44
-
45
- Parameters
46
- ----------
47
- dmbasis_ham : list
48
- A list of basis matrices {B_i} *including* the identity as the first
49
- element, for the returned Hamiltonian-type error generators. This
50
- argument is easily obtained by call to :func:`pp_matrices` or a
51
- similar function. The matrices are expected to be in the standard
52
- basis, and should be traceless except for the identity. Matrices
53
- should be NumPy arrays or SciPy CSR sparse matrices.
54
-
55
- dmbasis_other : list
56
- A list of basis matrices {B_i} *including* the identity as the first
57
- element, for the returned Stochastic-type error generators. This
58
- argument is easily obtained by call to :func:`pp_matrices` or a
59
- similar function. The matrices are expected to be in the standard
60
- basis, and should be traceless except for the identity. Matrices
61
- should be NumPy arrays or SciPy CSR sparse matrices.
62
-
63
- normalize : bool
64
- Whether or not generators should be normalized so that
65
- numpy.linalg.norm(generator.flat) == 1.0 Note that the generators
66
- will still, in general, be non-orthogonal.
67
-
68
- other_mode : {"diagonal", "diag_affine", "all"}
69
- Which non-Hamiltonian Lindblad error generators to construct.
70
- Allowed values are: `"diagonal"` (only the diagonal Stochastic
71
- generators are returned; that is, the generators corresponding to the
72
- `i==j` terms in the Lindblad expression.), `"diag_affine"` (diagonal +
73
- affine generators), and `"all"` (all generators).
23
+ """
24
+ Class for storing and managing the parameters associated with particular subblocks of error-generator
25
+ parameters. Responsible for management of different internal representations utilized when employing
26
+ various error generator constraints.
74
27
  """
75
28
 
76
29
  _superops_cache = {} # a custom cache for create_lindblad_term_superoperators method calls
77
30
 
78
31
  def __init__(self, block_type, basis, basis_element_labels=None, initial_block_data=None, param_mode='static',
79
32
  truncate=False):
33
+ """
34
+ Parameters
35
+ ----------
36
+ block_type : str
37
+ String specifying the type of error generator parameters contained within this block. Allowed
38
+ values are 'ham' (for Hamiltonian error generators), 'other_diagonal' (for Pauli stochastic error generators),
39
+ and 'other' (for Pauli stochastic, Pauli correlation and active error generators).
40
+
41
+ basis : `Basis`
42
+ `Basis` object to be used by this coefficient block. Not this must be an actual `Basis` object, and not
43
+ a string (as the coefficient block doesn't have the requisite dimensionality information needed for casting).
44
+
45
+ basis_element_labels : list or tuple of str
46
+ Iterable of strings corresponding to the basis element subscripts used by the error generators managed by
47
+ this coefficient block.
48
+
49
+ initial_block_data : _np.ndarray, optional (default None)
50
+ Numpy array with initial parameter values to use in setting initial state of this coefficient block.
51
+
52
+ param_mode : str, optional (default 'static')
53
+ String specifying the type of internal parameterization used by this coefficient block. Allowed options are:
54
+
55
+ - For all block types: 'static'
56
+ - For 'ham': 'elements'
57
+ - For 'other_diagonal': 'elements', 'cholesky', 'depol', 'reldepol'
58
+ - For 'other': 'elements', 'cholesky'
59
+
60
+ Note that the most commonly encounted settings in practice are 'elements' and 'cholesky',
61
+ which when used in the right combination are utilized in the construction of GLND and CPTPLND
62
+ parameterized models. For both GLND and CPTPLND the 'ham' block used the 'elements' `param_mode`.
63
+ GLND the 'other' block uses 'elements', and for CPTPLND it uses 'cholesky'.
64
+
65
+ 'depol' and 'reldepol' are special modes used only for Pauli stochastic only coefficient blocks
66
+ (i.e. 'other_diagonal'), and correspond to special reduced parameterizations applicable to depolarizing
67
+ channels. (TODO: Add better explanation of the difference between depol and reldepol).
68
+
69
+ truncate : bool, optional (default False)
70
+ Flag specifying whether to truncate the parameters given by `initial_block_data` in order to meet
71
+ constraints (e.g. to preserve CPTP) when necessary. If False, then an error is thrown when the
72
+ given intial data cannot be parameterized as specified.
73
+ """
74
+
80
75
  super().__init__()
81
76
  self._block_type = block_type # 'ham' or 'other' or 'other_diagonal'
82
- self._param_mode = param_mode # 'static', 'elements', 'cholesky', or 'real_cholesky', 'depol', 'reldepol'
77
+ self._param_mode = param_mode # 'static', 'elements', 'cholesky', 'depol', 'reldepol'
83
78
  self._basis = basis # must be a full Basis object, not just a string, as we otherwise don't know dimension
84
79
  self._bel_labels = tuple(basis_element_labels) if (basis_element_labels is not None) \
85
80
  else tuple(basis.labels[1:]) # Note: don't include identity
@@ -195,7 +190,7 @@ class LindbladCoefficientBlock(_NicelySerializable):
195
190
  if sparse:
196
191
  #Note: complex OK here sometimes, as only linear combos of "other" gens
197
192
  # (like (i,j) + (j,i) terms) need to be real.
198
- superops = [_mt.safe_dot(leftTrans, _mt.safe_dot(mx, rightTrans)) for mx in superops]
193
+ superops = [leftTrans @ (mx @ rightTrans) for mx in superops]
199
194
  for mx in superops: mx.sort_indices()
200
195
  else:
201
196
  #superops = _np.einsum("ik,akl,lj->aij", leftTrans, superops, rightTrans)
@@ -816,12 +811,19 @@ class LindbladCoefficientBlock(_NicelySerializable):
816
811
  # encodes a lower-triangular matrix "cache_mx" via:
817
812
  # cache_mx[i,i] = params[i,i]
818
813
  # cache_mx[i,j] = params[i,j] + 1j*params[j,i] (i > j)
814
+
819
815
  cache_mx = self._cache_mx
820
- iparams = 1j * params
821
- for i in range(num_bels):
822
- cache_mx[i, i] = params[i, i]
823
- cache_mx[i, :i] = params[i, :i] + iparams[:i, i]
824
816
 
817
+ params_upper_indices = _fc.fast_triu_indices(num_bels)
818
+ params_upper = 1j*params[params_upper_indices]
819
+ params_lower = (params.T)[params_upper_indices]
820
+
821
+ cache_mx_trans = cache_mx.T
822
+ cache_mx_trans[params_upper_indices] = params_lower + params_upper
823
+
824
+ diag_indices = cached_diag_indices(num_bels)
825
+ cache_mx[diag_indices] = params[diag_indices]
826
+
825
827
  #The matrix of (complex) "other"-coefficients is build by assuming
826
828
  # cache_mx is its Cholesky decomp; means otherCoeffs is pos-def.
827
829
 
@@ -830,27 +832,28 @@ class LindbladCoefficientBlock(_NicelySerializable):
830
832
  # matrix, but we don't care about this uniqueness criteria and so
831
833
  # the diagonal els of cache_mx can be negative and that's fine -
832
834
  # block_data will still be posdef.
833
- self.block_data[:, :] = _np.dot(cache_mx, cache_mx.T.conjugate())
835
+ self.block_data[:, :] = cache_mx@cache_mx.T.conj()
834
836
 
835
- #DEBUG - test for pos-def
836
- #evals = _np.linalg.eigvalsh(block_data)
837
- #DEBUG_TOL = 1e-16; #print("EVALS DEBUG = ",evals)
838
- #assert(all([ev >= -DEBUG_TOL for ev in evals]))
839
837
 
840
838
  elif self._param_mode == "elements": # params mx stores block_data (hermitian) directly
841
839
  #params holds block_data real and imaginary parts directly
842
- iparams = 1j * params
843
- for i in range(num_bels):
844
- self.block_data[i, i] = params[i, i]
845
- self.block_data[i, :i] = params[i, :i] + iparams[:i, i]
846
- self.block_data[:i, i] = params[i, :i] - iparams[:i, i]
840
+ params_upper_indices = _fc.fast_triu_indices(num_bels)
841
+ params_upper = -1j*params[params_upper_indices]
842
+ params_lower = (params.T)[params_upper_indices]
843
+
844
+ block_data_trans = self.block_data.T
845
+ self.block_data[params_upper_indices] = params_lower + params_upper
846
+ block_data_trans[params_upper_indices] = params_lower - params_upper
847
+
848
+ diag_indices = cached_diag_indices(num_bels)
849
+ self.block_data[diag_indices] = params[diag_indices]
850
+
847
851
  else:
848
852
  raise ValueError("Internal error: invalid parameter mode (%s) for block type %s!"
849
853
  % (self._param_mode, self._block_type))
850
854
  else:
851
855
  raise ValueError("Internal error: invalid block type!")
852
856
 
853
- #def paramvals_to_coefficients_deriv(self, parameter_values, cache_mx=None):
854
857
  def deriv_wrt_params(self, v=None):
855
858
  """
856
859
  Construct derivative of Lindblad coefficients (for this block) from a set of parameter values.
@@ -1112,11 +1115,11 @@ class LindbladCoefficientBlock(_NicelySerializable):
1112
1115
  if self._param_mode == "depol":
1113
1116
  #d2Odp2 = _np.einsum('alj->lj', self.otherGens)[:,:,None,None] * 2
1114
1117
  d2Odp2 = _np.sum(superops, axis=0)[:, :, None, None] * 2
1115
- elif self.parameterization.param_mode == "cptp":
1118
+ elif self._param_mode == "cholesky":
1116
1119
  assert(nP == num_bels)
1117
1120
  #d2Odp2 = _np.einsum('alj,aq->ljaq', self.otherGens, 2*_np.identity(nP,'d'))
1118
1121
  d2Odp2 = _np.transpose(superops, (1, 2, 0))[:, :, :, None] * 2 * _np.identity(nP, 'd')
1119
- else: # param_mode == "unconstrained" or "reldepol"
1122
+ else: # param_mode == "elements" or "reldepol"
1120
1123
  assert(nP == num_bels)
1121
1124
  d2Odp2 = _np.zeros((superops.shape[1], superops.shape[2], nP, nP), 'd')
1122
1125
 
@@ -1124,7 +1127,10 @@ class LindbladCoefficientBlock(_NicelySerializable):
1124
1127
  if self._param_mode == "cholesky":
1125
1128
  if superops_are_flat: # then un-flatten
1126
1129
  superops = superops.reshape((num_bels, num_bels, superops.shape[1], superops.shape[2]))
1127
- d2Odp2 = _np.zeros([superops.shape[2], superops.shape[3], nP, nP, nP, nP], 'complex')
1130
+ sqrt_nP = _np.sqrt(nP)
1131
+ snP = int(sqrt_nP)
1132
+ assert snP == sqrt_nP == num_bels
1133
+ d2Odp2 = _np.zeros([superops.shape[2], superops.shape[3], snP, snP, snP, snP], 'complex')
1128
1134
  # yikes! maybe make this SPARSE in future?
1129
1135
 
1130
1136
  #Note: correspondence w/Erik's notes: a=alpha, b=beta, q=gamma, r=delta
@@ -1136,11 +1142,11 @@ class LindbladCoefficientBlock(_NicelySerializable):
1136
1142
  parameter indices s.t. ab > base and qr > base. If
1137
1143
  ab_inc_eq == True then the > becomes a >=, and likewise
1138
1144
  for qr_inc_eq. Used for looping over nonzero hessian els. """
1139
- for _base in range(nP):
1145
+ for _base in range(snP):
1140
1146
  start_ab = _base if ab_inc_eq else _base + 1
1141
1147
  start_qr = _base if qr_inc_eq else _base + 1
1142
- for _ab in range(start_ab, nP):
1143
- for _qr in range(start_qr, nP):
1148
+ for _ab in range(start_ab, snP):
1149
+ for _qr in range(start_qr, snP):
1144
1150
  yield (_base, _ab, _qr)
1145
1151
 
1146
1152
  for base, a, q in iter_base_ab_qr(True, True): # Case1: base=b=r, ab=a, qr=q
@@ -1153,7 +1159,12 @@ class LindbladCoefficientBlock(_NicelySerializable):
1153
1159
  d2Odp2[:, :, base, b, base, r] = superops[b, r] + superops[r, b]
1154
1160
 
1155
1161
  elif self._param_mode == 'elements': # unconstrained
1156
- d2Odp2 = _np.zeros([superops.shape[2], superops.shape[3], nP, nP, nP, nP], 'd') # all params linear
1162
+ if superops_are_flat: # then un-flatten
1163
+ superops = superops.reshape((num_bels, num_bels, superops.shape[1], superops.shape[2]))
1164
+ sqrt_nP = _np.sqrt(nP)
1165
+ snP = int(sqrt_nP)
1166
+ assert snP == sqrt_nP == num_bels
1167
+ d2Odp2 = _np.zeros([superops.shape[2], superops.shape[3], snP, snP, snP, snP], 'd') # all params linear
1157
1168
  else:
1158
1169
  raise ValueError("Internal error: invalid parameter mode (%s) for block type %s!"
1159
1170
  % (self._param_mode, self._block_type))
@@ -1204,3 +1215,7 @@ class LindbladCoefficientBlock(_NicelySerializable):
1204
1215
  if len(self._bel_labels) < 10:
1205
1216
  s += " Coefficients are:\n" + str(_np.round(self.block_data, 4))
1206
1217
  return s
1218
+
1219
+ @lru_cache(maxsize=16)
1220
+ def cached_diag_indices(n):
1221
+ return _np.diag_indices(n)