pyGSTi 0.9.12.1__cp39-cp39-win32.whl → 0.9.13__cp39-cp39-win32.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (221) hide show
  1. pyGSTi-0.9.13.dist-info/METADATA +197 -0
  2. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/RECORD +207 -217
  3. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/WHEEL +1 -1
  4. pygsti/_version.py +2 -2
  5. pygsti/algorithms/contract.py +1 -1
  6. pygsti/algorithms/core.py +42 -28
  7. pygsti/algorithms/fiducialselection.py +17 -8
  8. pygsti/algorithms/gaugeopt.py +2 -2
  9. pygsti/algorithms/germselection.py +87 -77
  10. pygsti/algorithms/mirroring.py +0 -388
  11. pygsti/algorithms/randomcircuit.py +165 -1333
  12. pygsti/algorithms/rbfit.py +0 -234
  13. pygsti/baseobjs/basis.py +94 -396
  14. pygsti/baseobjs/errorgenbasis.py +0 -132
  15. pygsti/baseobjs/errorgenspace.py +0 -10
  16. pygsti/baseobjs/label.py +52 -168
  17. pygsti/baseobjs/opcalc/fastopcalc.cp39-win32.pyd +0 -0
  18. pygsti/baseobjs/opcalc/fastopcalc.pyx +2 -2
  19. pygsti/baseobjs/polynomial.py +13 -595
  20. pygsti/baseobjs/statespace.py +1 -0
  21. pygsti/circuits/__init__.py +1 -1
  22. pygsti/circuits/circuit.py +682 -505
  23. pygsti/circuits/circuitconstruction.py +0 -4
  24. pygsti/circuits/circuitlist.py +47 -5
  25. pygsti/circuits/circuitparser/__init__.py +8 -8
  26. pygsti/circuits/circuitparser/fastcircuitparser.cp39-win32.pyd +0 -0
  27. pygsti/circuits/circuitstructure.py +3 -3
  28. pygsti/circuits/cloudcircuitconstruction.py +1 -1
  29. pygsti/data/datacomparator.py +2 -7
  30. pygsti/data/dataset.py +46 -44
  31. pygsti/data/hypothesistest.py +0 -7
  32. pygsti/drivers/bootstrap.py +0 -49
  33. pygsti/drivers/longsequence.py +2 -1
  34. pygsti/evotypes/basereps_cython.cp39-win32.pyd +0 -0
  35. pygsti/evotypes/chp/opreps.py +0 -61
  36. pygsti/evotypes/chp/statereps.py +0 -32
  37. pygsti/evotypes/densitymx/effectcreps.cpp +9 -10
  38. pygsti/evotypes/densitymx/effectreps.cp39-win32.pyd +0 -0
  39. pygsti/evotypes/densitymx/effectreps.pyx +1 -1
  40. pygsti/evotypes/densitymx/opreps.cp39-win32.pyd +0 -0
  41. pygsti/evotypes/densitymx/opreps.pyx +2 -2
  42. pygsti/evotypes/densitymx/statereps.cp39-win32.pyd +0 -0
  43. pygsti/evotypes/densitymx/statereps.pyx +1 -1
  44. pygsti/evotypes/densitymx_slow/effectreps.py +7 -23
  45. pygsti/evotypes/densitymx_slow/opreps.py +16 -23
  46. pygsti/evotypes/densitymx_slow/statereps.py +10 -3
  47. pygsti/evotypes/evotype.py +39 -2
  48. pygsti/evotypes/stabilizer/effectreps.cp39-win32.pyd +0 -0
  49. pygsti/evotypes/stabilizer/effectreps.pyx +0 -4
  50. pygsti/evotypes/stabilizer/opreps.cp39-win32.pyd +0 -0
  51. pygsti/evotypes/stabilizer/opreps.pyx +0 -4
  52. pygsti/evotypes/stabilizer/statereps.cp39-win32.pyd +0 -0
  53. pygsti/evotypes/stabilizer/statereps.pyx +1 -5
  54. pygsti/evotypes/stabilizer/termreps.cp39-win32.pyd +0 -0
  55. pygsti/evotypes/stabilizer/termreps.pyx +0 -7
  56. pygsti/evotypes/stabilizer_slow/effectreps.py +0 -22
  57. pygsti/evotypes/stabilizer_slow/opreps.py +0 -4
  58. pygsti/evotypes/stabilizer_slow/statereps.py +0 -4
  59. pygsti/evotypes/statevec/effectreps.cp39-win32.pyd +0 -0
  60. pygsti/evotypes/statevec/effectreps.pyx +1 -1
  61. pygsti/evotypes/statevec/opreps.cp39-win32.pyd +0 -0
  62. pygsti/evotypes/statevec/opreps.pyx +2 -2
  63. pygsti/evotypes/statevec/statereps.cp39-win32.pyd +0 -0
  64. pygsti/evotypes/statevec/statereps.pyx +1 -1
  65. pygsti/evotypes/statevec/termreps.cp39-win32.pyd +0 -0
  66. pygsti/evotypes/statevec/termreps.pyx +0 -7
  67. pygsti/evotypes/statevec_slow/effectreps.py +0 -3
  68. pygsti/evotypes/statevec_slow/opreps.py +0 -5
  69. pygsti/extras/__init__.py +0 -1
  70. pygsti/extras/drift/stabilityanalyzer.py +3 -1
  71. pygsti/extras/interpygate/__init__.py +12 -0
  72. pygsti/extras/interpygate/core.py +0 -36
  73. pygsti/extras/interpygate/process_tomography.py +44 -10
  74. pygsti/extras/rpe/rpeconstruction.py +0 -2
  75. pygsti/forwardsims/__init__.py +1 -0
  76. pygsti/forwardsims/forwardsim.py +14 -55
  77. pygsti/forwardsims/mapforwardsim.py +69 -18
  78. pygsti/forwardsims/mapforwardsim_calc_densitymx.cp39-win32.pyd +0 -0
  79. pygsti/forwardsims/mapforwardsim_calc_densitymx.pyx +65 -66
  80. pygsti/forwardsims/mapforwardsim_calc_generic.py +91 -13
  81. pygsti/forwardsims/matrixforwardsim.py +63 -15
  82. pygsti/forwardsims/termforwardsim.py +8 -110
  83. pygsti/forwardsims/termforwardsim_calc_stabilizer.cp39-win32.pyd +0 -0
  84. pygsti/forwardsims/termforwardsim_calc_statevec.cp39-win32.pyd +0 -0
  85. pygsti/forwardsims/termforwardsim_calc_statevec.pyx +0 -651
  86. pygsti/forwardsims/torchfwdsim.py +265 -0
  87. pygsti/forwardsims/weakforwardsim.py +2 -2
  88. pygsti/io/__init__.py +1 -2
  89. pygsti/io/mongodb.py +0 -2
  90. pygsti/io/stdinput.py +6 -22
  91. pygsti/layouts/copalayout.py +10 -12
  92. pygsti/layouts/distlayout.py +0 -40
  93. pygsti/layouts/maplayout.py +103 -25
  94. pygsti/layouts/matrixlayout.py +99 -60
  95. pygsti/layouts/prefixtable.py +1534 -52
  96. pygsti/layouts/termlayout.py +1 -1
  97. pygsti/modelmembers/instruments/instrument.py +3 -3
  98. pygsti/modelmembers/instruments/tpinstrument.py +2 -2
  99. pygsti/modelmembers/modelmember.py +0 -17
  100. pygsti/modelmembers/operations/__init__.py +2 -4
  101. pygsti/modelmembers/operations/affineshiftop.py +1 -0
  102. pygsti/modelmembers/operations/composederrorgen.py +1 -1
  103. pygsti/modelmembers/operations/composedop.py +1 -24
  104. pygsti/modelmembers/operations/denseop.py +5 -5
  105. pygsti/modelmembers/operations/eigpdenseop.py +2 -2
  106. pygsti/modelmembers/operations/embeddederrorgen.py +1 -1
  107. pygsti/modelmembers/operations/embeddedop.py +0 -1
  108. pygsti/modelmembers/operations/experrorgenop.py +2 -2
  109. pygsti/modelmembers/operations/fullarbitraryop.py +1 -0
  110. pygsti/modelmembers/operations/fullcptpop.py +2 -2
  111. pygsti/modelmembers/operations/fulltpop.py +28 -6
  112. pygsti/modelmembers/operations/fullunitaryop.py +5 -4
  113. pygsti/modelmembers/operations/lindbladcoefficients.py +93 -78
  114. pygsti/modelmembers/operations/lindbladerrorgen.py +268 -441
  115. pygsti/modelmembers/operations/linearop.py +7 -27
  116. pygsti/modelmembers/operations/opfactory.py +1 -1
  117. pygsti/modelmembers/operations/repeatedop.py +1 -24
  118. pygsti/modelmembers/operations/staticstdop.py +1 -1
  119. pygsti/modelmembers/povms/__init__.py +3 -3
  120. pygsti/modelmembers/povms/basepovm.py +7 -36
  121. pygsti/modelmembers/povms/complementeffect.py +4 -9
  122. pygsti/modelmembers/povms/composedeffect.py +0 -320
  123. pygsti/modelmembers/povms/computationaleffect.py +1 -1
  124. pygsti/modelmembers/povms/computationalpovm.py +3 -1
  125. pygsti/modelmembers/povms/effect.py +3 -5
  126. pygsti/modelmembers/povms/marginalizedpovm.py +0 -79
  127. pygsti/modelmembers/povms/tppovm.py +74 -2
  128. pygsti/modelmembers/states/__init__.py +2 -5
  129. pygsti/modelmembers/states/composedstate.py +0 -317
  130. pygsti/modelmembers/states/computationalstate.py +3 -3
  131. pygsti/modelmembers/states/cptpstate.py +4 -4
  132. pygsti/modelmembers/states/densestate.py +6 -4
  133. pygsti/modelmembers/states/fullpurestate.py +0 -24
  134. pygsti/modelmembers/states/purestate.py +1 -1
  135. pygsti/modelmembers/states/state.py +5 -6
  136. pygsti/modelmembers/states/tpstate.py +28 -10
  137. pygsti/modelmembers/term.py +3 -6
  138. pygsti/modelmembers/torchable.py +50 -0
  139. pygsti/modelpacks/_modelpack.py +1 -1
  140. pygsti/modelpacks/smq1Q_ZN.py +3 -1
  141. pygsti/modelpacks/smq2Q_XXYYII.py +2 -1
  142. pygsti/modelpacks/smq2Q_XY.py +3 -3
  143. pygsti/modelpacks/smq2Q_XYI.py +2 -2
  144. pygsti/modelpacks/smq2Q_XYICNOT.py +3 -3
  145. pygsti/modelpacks/smq2Q_XYICPHASE.py +3 -3
  146. pygsti/modelpacks/smq2Q_XYXX.py +1 -1
  147. pygsti/modelpacks/smq2Q_XYZICNOT.py +3 -3
  148. pygsti/modelpacks/smq2Q_XYZZ.py +1 -1
  149. pygsti/modelpacks/stdtarget.py +0 -121
  150. pygsti/models/cloudnoisemodel.py +1 -2
  151. pygsti/models/explicitcalc.py +3 -3
  152. pygsti/models/explicitmodel.py +3 -13
  153. pygsti/models/fogistore.py +5 -3
  154. pygsti/models/localnoisemodel.py +1 -2
  155. pygsti/models/memberdict.py +0 -12
  156. pygsti/models/model.py +800 -65
  157. pygsti/models/modelconstruction.py +4 -4
  158. pygsti/models/modelnoise.py +2 -2
  159. pygsti/models/modelparaminterposer.py +1 -1
  160. pygsti/models/oplessmodel.py +1 -1
  161. pygsti/models/qutrit.py +15 -14
  162. pygsti/objectivefns/objectivefns.py +73 -138
  163. pygsti/objectivefns/wildcardbudget.py +2 -7
  164. pygsti/optimize/__init__.py +1 -0
  165. pygsti/optimize/arraysinterface.py +28 -0
  166. pygsti/optimize/customcg.py +0 -12
  167. pygsti/optimize/customlm.py +129 -323
  168. pygsti/optimize/customsolve.py +2 -2
  169. pygsti/optimize/optimize.py +0 -84
  170. pygsti/optimize/simplerlm.py +841 -0
  171. pygsti/optimize/wildcardopt.py +19 -598
  172. pygsti/protocols/confidenceregionfactory.py +28 -14
  173. pygsti/protocols/estimate.py +31 -14
  174. pygsti/protocols/gst.py +142 -68
  175. pygsti/protocols/modeltest.py +6 -10
  176. pygsti/protocols/protocol.py +9 -37
  177. pygsti/protocols/rb.py +450 -79
  178. pygsti/protocols/treenode.py +8 -2
  179. pygsti/protocols/vb.py +108 -206
  180. pygsti/protocols/vbdataframe.py +1 -1
  181. pygsti/report/factory.py +0 -15
  182. pygsti/report/fogidiagram.py +1 -17
  183. pygsti/report/modelfunction.py +12 -3
  184. pygsti/report/mpl_colormaps.py +1 -1
  185. pygsti/report/plothelpers.py +8 -2
  186. pygsti/report/reportables.py +41 -37
  187. pygsti/report/templates/offline/pygsti_dashboard.css +6 -0
  188. pygsti/report/templates/offline/pygsti_dashboard.js +12 -0
  189. pygsti/report/workspace.py +2 -14
  190. pygsti/report/workspaceplots.py +326 -504
  191. pygsti/tools/basistools.py +9 -36
  192. pygsti/tools/edesigntools.py +124 -96
  193. pygsti/tools/fastcalc.cp39-win32.pyd +0 -0
  194. pygsti/tools/fastcalc.pyx +35 -81
  195. pygsti/tools/internalgates.py +151 -15
  196. pygsti/tools/jamiolkowski.py +5 -5
  197. pygsti/tools/lindbladtools.py +19 -11
  198. pygsti/tools/listtools.py +0 -114
  199. pygsti/tools/matrixmod2.py +1 -1
  200. pygsti/tools/matrixtools.py +173 -339
  201. pygsti/tools/nameddict.py +1 -1
  202. pygsti/tools/optools.py +154 -88
  203. pygsti/tools/pdftools.py +0 -25
  204. pygsti/tools/rbtheory.py +3 -320
  205. pygsti/tools/slicetools.py +64 -12
  206. pyGSTi-0.9.12.1.dist-info/METADATA +0 -155
  207. pygsti/algorithms/directx.py +0 -711
  208. pygsti/evotypes/qibo/__init__.py +0 -33
  209. pygsti/evotypes/qibo/effectreps.py +0 -78
  210. pygsti/evotypes/qibo/opreps.py +0 -376
  211. pygsti/evotypes/qibo/povmreps.py +0 -98
  212. pygsti/evotypes/qibo/statereps.py +0 -174
  213. pygsti/extras/rb/__init__.py +0 -13
  214. pygsti/extras/rb/benchmarker.py +0 -957
  215. pygsti/extras/rb/dataset.py +0 -378
  216. pygsti/extras/rb/io.py +0 -814
  217. pygsti/extras/rb/simulate.py +0 -1020
  218. pygsti/io/legacyio.py +0 -385
  219. pygsti/modelmembers/povms/denseeffect.py +0 -142
  220. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/LICENSE +0 -0
  221. {pyGSTi-0.9.12.1.dist-info → pyGSTi-0.9.13.dist-info}/top_level.txt +0 -0
@@ -10,6 +10,7 @@ Custom implementation of the Levenberg-Marquardt Algorithm
10
10
  # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
11
11
  #***************************************************************************************************
12
12
 
13
+ import os as _os
13
14
  import signal as _signal
14
15
  import time as _time
15
16
 
@@ -18,94 +19,19 @@ import scipy as _scipy
18
19
 
19
20
  from pygsti.optimize import arraysinterface as _ari
20
21
  from pygsti.optimize.customsolve import custom_solve as _custom_solve
22
+ from pygsti.optimize.simplerlm import Optimizer, OptimizerResult
21
23
  from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
22
24
  from pygsti.baseobjs.resourceallocation import ResourceAllocation as _ResourceAllocation
23
25
  from pygsti.baseobjs.nicelyserializable import NicelySerializable as _NicelySerializable
24
26
 
25
- # from scipy.optimize import OptimizeResult as _optResult
26
-
27
- #Make sure SIGINT will generate a KeyboardInterrupt (even if we're launched in the background)
28
- _signal.signal(_signal.SIGINT, _signal.default_int_handler)
27
+ # Make sure SIGINT will generate a KeyboardInterrupt (even if we're launched in the background)
28
+ # This may be problematic for multithreaded parallelism above pyGSTi, e.g. Dask,
29
+ # so this can be turned off by setting the PYGSTI_NO_CUSTOMLM_SIGINT environment variable
30
+ if 'PYGSTI_NO_CUSTOMLM_SIGINT' not in _os.environ:
31
+ _signal.signal(_signal.SIGINT, _signal.default_int_handler)
29
32
 
30
33
  #constants
31
34
  _MACH_PRECISION = 1e-12
32
- #MU_TOL1 = 1e10 # ??
33
- #MU_TOL2 = 1e3 # ??
34
-
35
-
36
- class OptimizerResult(object):
37
- """
38
- The result from an optimization.
39
-
40
- Parameters
41
- ----------
42
- objective_func : ObjectiveFunction
43
- The objective function that was optimized.
44
-
45
- opt_x : numpy.ndarray
46
- The optimal argument (x) value. Often a vector of parameters.
47
-
48
- opt_f : numpy.ndarray
49
- the optimal objective function (f) value. Often this is the least-squares
50
- vector of objective function values.
51
-
52
- opt_jtj : numpy.ndarray, optional
53
- the optimial `dot(transpose(J),J)` value, where `J`
54
- is the Jacobian matrix. This may be useful for computing
55
- approximate error bars.
56
-
57
- opt_unpenalized_f : numpy.ndarray, optional
58
- the optimal objective function (f) value with any
59
- penalty terms removed.
60
-
61
- chi2_k_distributed_qty : float, optional
62
- a value that is supposed to be chi2_k distributed.
63
-
64
- optimizer_specific_qtys : dict, optional
65
- a dictionary of additional optimization parameters.
66
- """
67
- def __init__(self, objective_func, opt_x, opt_f=None, opt_jtj=None,
68
- opt_unpenalized_f=None, chi2_k_distributed_qty=None,
69
- optimizer_specific_qtys=None):
70
- self.objective_func = objective_func
71
- self.x = opt_x
72
- self.f = opt_f
73
- self.jtj = opt_jtj # jacobian.T * jacobian
74
- self.f_no_penalties = opt_unpenalized_f
75
- self.optimizer_specific_qtys = optimizer_specific_qtys
76
- self.chi2_k_distributed_qty = chi2_k_distributed_qty
77
-
78
-
79
- class Optimizer(_NicelySerializable):
80
- """
81
- An optimizer. Optimizes an objective function.
82
- """
83
-
84
- @classmethod
85
- def cast(cls, obj):
86
- """
87
- Cast `obj` to a :class:`Optimizer`.
88
-
89
- If `obj` is already an `Optimizer` it is just returned,
90
- otherwise this function tries to create a new object
91
- using `obj` as a dictionary of constructor arguments.
92
-
93
- Parameters
94
- ----------
95
- obj : Optimizer or dict
96
- The object to cast.
97
-
98
- Returns
99
- -------
100
- Optimizer
101
- """
102
- if isinstance(obj, cls):
103
- return obj
104
- else:
105
- return cls(**obj) if obj else cls()
106
-
107
- def __init__(self):
108
- super().__init__()
109
35
 
110
36
 
111
37
  class CustomLMOptimizer(Optimizer):
@@ -371,13 +297,6 @@ class CustomLMOptimizer(Optimizer):
371
297
  return OptimizerResult(objective, opt_x, norm_f, opt_jtj, unpenalized_normf, chi2k_qty,
372
298
  {'msg': msg, 'mu': mu, 'nu': nu, 'fvec': f})
373
299
 
374
- #Scipy version...
375
- # opt_x, _, _, msg, flag = \
376
- # _spo.leastsq(objective_func, x0, xtol=tol['relx'], ftol=tol['relf'], gtol=tol['jac'],
377
- # maxfev=maxfev * (len(x0) + 1), full_output=True, Dfun=jacobian) # pragma: no cover
378
- # printer.log("Least squares message = %s; flag =%s" % (msg, flag), 2) # pragma: no cover
379
- # opt_state = (msg,)
380
-
381
300
 
382
301
  def custom_leastsq(obj_fn, jac_fn, x0, f_norm2_tol=1e-6, jac_norm_tol=1e-6,
383
302
  rel_ftol=1e-6, rel_xtol=1e-6, max_iter=100, num_fd_iters=0,
@@ -891,7 +810,6 @@ def custom_leastsq(obj_fn, jac_fn, x0, f_norm2_tol=1e-6, jac_norm_tol=1e-6,
891
810
  reject_msg = ""
892
811
  if profiler: profiler.memory_check("custom_leastsq: after linsolve")
893
812
  if success: # linear solve succeeded
894
- #dx = _hack_dx(obj_fn, x, dx, Jac, JTJ, JTf, f, norm_f)
895
813
 
896
814
  if damping_mode != 'adaptive':
897
815
  new_x[:] = x + dx
@@ -1311,239 +1229,127 @@ def custom_leastsq(obj_fn, jac_fn, x0, f_norm2_tol=1e-6, jac_norm_tol=1e-6,
1311
1229
  #return solution
1312
1230
 
1313
1231
 
1314
- def _hack_dx(obj_fn, x, dx, jac, jtj, jtf, f, norm_f):
1315
- #HACK1
1316
- #if nRejects >= 2:
1317
- # dx = -(10.0**(1-nRejects))*x
1318
- # print("HACK - setting dx = -%gx!" % 10.0**(1-nRejects))
1319
- # return dx
1320
-
1321
- #HACK2
1322
- if True:
1323
- print("HACK2 - trying to find a good dx by iteratively stepping in each direction...")
1324
-
1325
- test_f = obj_fn(x + dx); cmp_normf = _np.dot(test_f, test_f)
1326
- print("Compare with suggested step => ", cmp_normf)
1327
- STEP = 0.0001
1328
-
1329
- #import bpdb; bpdb.set_trace()
1330
- #gradient = -jtf
1331
- test_dx = _np.zeros(len(dx), 'd')
1332
- last_normf = norm_f
1333
- for ii in range(len(dx)):
1334
-
1335
- #Try adding
1336
- while True:
1337
- test_dx[ii] += STEP
1338
- test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f)
1339
- if test_normf < last_normf:
1340
- last_normf = test_normf
1232
+ """
1233
+ def custom_leastsq_wikip(obj_fn, jac_fn, x0, f_norm_tol=1e-6, jac_norm_tol=1e-6,
1234
+ rel_tol=1e-6, max_iter=100, comm=None, verbosity=0, profiler=None):
1235
+ #
1236
+ # Wikipedia-version of LM algorithm, testing mu and mu/nu damping params and taking
1237
+ # mu/nu => new_mu if acceptable... This didn't seem to perform well, but maybe just
1238
+ # needs some tweaking, so leaving it commented here for reference
1239
+ #
1240
+ msg = ""
1241
+ converged = False
1242
+ x = x0
1243
+ f = obj_fn(x)
1244
+ norm_f = _np.linalg.norm(f)
1245
+ tau = 1e-3 #initial mu
1246
+ nu = 1.3
1247
+ my_cols_slice = None
1248
+
1249
+
1250
+ if not _np.isfinite(norm_f):
1251
+ msg = "Infinite norm of objective function at initial point!"
1252
+
1253
+ for k in range(max_iter): #outer loop
1254
+ # assume x, f, fnorm hold valid values
1255
+
1256
+ if len(msg) > 0:
1257
+ break #exit outer loop if an exit-message has been set
1258
+
1259
+ if norm_f < f_norm_tol:
1260
+ msg = "norm(objectivefn) is small"
1261
+ converged = True; break
1262
+
1263
+ if verbosity > 0:
1264
+ print("--- Outer Iter %d: norm_f = %g" % (k,norm_f))
1265
+
1266
+ if profiler: profiler.mem_check("custom_leastsq: begin outer iter *before de-alloc*")
1267
+ jac = None; jtj = None; jtf = None
1268
+
1269
+ if profiler: profiler.mem_check("custom_leastsq: begin outer iter")
1270
+ jac = jac_fn(x)
1271
+ if profiler: profiler.mem_check("custom_leastsq: after jacobian:"
1272
+ + "shape=%s, GB=%.2f" % (str(jac.shape),
1273
+ jac.nbytes/(1024.0**3)) )
1274
+
1275
+ tm = _time.time()
1276
+ if my_cols_slice is None:
1277
+ my_cols_slice = _mpit.distribute_for_dot(jac.shape[0], comm)
1278
+ jtj = _mpit.mpidot(jac.T,jac,my_cols_slice,comm) #_np.dot(jac.T,jac)
1279
+ jtf = _np.dot(jac.T,f)
1280
+ if profiler: profiler.add_time("custom_leastsq: dotprods",tm)
1281
+
1282
+ idiag = _np.diag_indices_from(jtj)
1283
+ norm_JTf = _np.linalg.norm(jtf) #, ord='inf')
1284
+ norm_x = _np.linalg.norm(x)
1285
+ undampled_JTJ_diag = jtj.diagonal().copy()
1286
+
1287
+ if norm_JTf < jac_norm_tol:
1288
+ msg = "norm(jacobian) is small"
1289
+ converged = True; break
1290
+
1291
+ if k == 0:
1292
+ mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element
1293
+ #mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element
1294
+
1295
+ #determing increment using adaptive damping
1296
+ while True: #inner loop
1297
+
1298
+ ### Evaluate with mu' = mu / nu
1299
+ mu = mu / nu
1300
+ if profiler: profiler.mem_check("custom_leastsq: begin inner iter")
1301
+ jtj[idiag] *= (1.0 + mu) # augment normal equations
1302
+ #jtj[idiag] += mu # augment normal equations
1303
+
1304
+ try:
1305
+ if profiler: profiler.mem_check("custom_leastsq: before linsolve")
1306
+ tm = _time.time()
1307
+ success = True
1308
+ dx = _np.linalg.solve(jtj, -jtf)
1309
+ if profiler: profiler.add_time("custom_leastsq: linsolve",tm)
1310
+ except _np.linalg.LinAlgError:
1311
+ success = False
1312
+
1313
+ if profiler: profiler.mem_check("custom_leastsq: after linsolve")
1314
+ if success: #linear solve succeeded
1315
+ new_x = x + dx
1316
+ norm_dx = _np.linalg.norm(dx)
1317
+
1318
+ #if verbosity > 1:
1319
+ # print("--- Inner Loop: mu=%g, norm_dx=%g" % (mu,norm_dx))
1320
+
1321
+ if norm_dx < rel_tol*norm_x: #use squared qtys instead (speed)?
1322
+ msg = "relative change in x is small"
1323
+ converged = True; break
1324
+
1325
+ if norm_dx > (norm_x+rel_tol)/_MACH_PRECISION:
1326
+ msg = "(near-)singular linear system"; break
1327
+
1328
+ new_f = obj_fn(new_x)
1329
+ if profiler: profiler.mem_check("custom_leastsq: after obj_fn")
1330
+ norm_new_f = _np.linalg.norm(new_f)
1331
+ if not _np.isfinite(norm_new_f): # avoid infinite loop...
1332
+ msg = "Infinite norm of objective function!"; break
1333
+
1334
+ dF = norm_f - norm_new_f
1335
+ if dF > 0: #accept step
1336
+ #print(" Accepted!")
1337
+ x,f, norm_f = new_x, new_f, norm_new_f
1338
+ nu = 1.3
1339
+ break # exit inner loop normally
1341
1340
  else:
1342
- test_dx[ii] -= STEP
1343
- break
1344
-
1345
- if test_dx[ii] == 0: # then try subtracting
1346
- while True:
1347
- test_dx[ii] -= STEP
1348
- test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f)
1349
- if test_normf < last_normf:
1350
- last_normf = test_normf
1351
- else:
1352
- test_dx[ii] += STEP
1353
- break
1354
-
1355
- if abs(test_dx[ii]) > 1e-6:
1356
- test_prediction = norm_f + _np.dot(-2 * jtf, test_dx)
1357
- tp2_f = f + _np.dot(jac, test_dx)
1358
- test_prediction2 = _np.dot(tp2_f, tp2_f)
1359
- cmp_dx = dx # -jtf
1360
- print(" -> Adjusting index ", ii, ":", x[ii], "+", test_dx[ii], " => ", last_normf, "(cmp w/dx: ",
1361
- cmp_dx[ii], test_prediction, test_prediction2, ") ",
1362
- "YES" if test_dx[ii] * cmp_dx[ii] > 0 else "NO")
1363
-
1364
- if _np.linalg.norm(test_dx) > 0 and last_normf < cmp_normf:
1365
- print("FOUND HACK dx w/norm = ", _np.linalg.norm(test_dx))
1366
- return test_dx
1367
- else:
1368
- print("KEEPING ORIGINAL dx")
1369
-
1370
- #HACK3
1371
- if False:
1372
- print("HACK3 - checking if there's a simple dx that is better...")
1373
- test_f = obj_fn(x + dx); cmp_normf = _np.dot(test_f, test_f)
1374
- orig_prediction = norm_f + _np.dot(2 * jtf, dx)
1375
- Jdx = _np.dot(jac, dx)
1376
- op2_f = f + Jdx
1377
- orig_prediction2 = _np.dot(op2_f, op2_f)
1378
- # main objective = fT*f = norm_f
1379
- # at new x => (f+J*dx)T * (f+J*dx) = norm_f + JdxT*f + fT*Jdx
1380
- # = norm_f + 2*(fT*J)dx (b/c transpose of real# does nothing)
1381
- # = norm_f + 2*dxT*(JT*f)
1382
- # prediction 2 also includes (J*dx)T * (J*dx) term = dxT * (jtj) * dx
1383
- orig_prediction3 = orig_prediction + _np.dot(Jdx, Jdx)
1384
- norm_dx = _np.linalg.norm(dx)
1385
- print("Compare with suggested |dx| = ", norm_dx, " => ", cmp_normf,
1386
- "(predicted: ", orig_prediction, orig_prediction2, orig_prediction3)
1387
- STEP = norm_dx # 0.0001
1388
-
1389
- #import bpdb; bpdb.set_trace()
1390
- test_dx = _np.zeros(len(dx), 'd')
1391
- best_ii = -1; best_normf = norm_f; best_dx = 0
1392
- for ii in range(len(dx)):
1393
-
1394
- #Try adding a small amount
1395
- test_dx[ii] = STEP
1396
- test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f)
1397
- if test_normf < best_normf:
1398
- best_normf = test_normf
1399
- best_dx = STEP
1400
- best_ii = ii
1341
+ mu *= nu #increase mu
1401
1342
  else:
1402
- test_dx[ii] = -STEP
1403
- test_f = obj_fn(x + test_dx); test_normf = _np.dot(test_f, test_f)
1404
- if test_normf < best_normf:
1405
- best_normf = test_normf
1406
- best_dx = -STEP
1407
- best_ii = ii
1408
- test_dx[ii] = 0
1409
-
1410
- test_dx[best_ii] = best_dx
1411
- test_prediction = norm_f + _np.dot(2 * jtf, test_dx)
1412
- tp2_f = f + _np.dot(jac, test_dx)
1413
- test_prediction2 = _np.dot(tp2_f, tp2_f)
1414
-
1415
- jj = _np.argmax(_np.abs(dx))
1416
- print("Best decrease = index", best_ii, ":", x[best_ii], '+', best_dx, "==>",
1417
- best_normf, " (predictions: ", test_prediction, test_prediction2, ")")
1418
- print(" compare with original dx[", best_ii, "]=", dx[best_ii],
1419
- "YES" if test_dx[best_ii] * dx[best_ii] > 0 else "NO")
1420
- print(" max of abs(dx) is index ", jj, ":", dx[jj], "yes" if jj == best_ii else "no")
1421
-
1422
- if _np.linalg.norm(test_dx) > 0 and best_normf < cmp_normf:
1423
- print("FOUND HACK dx w/norm = ", _np.linalg.norm(test_dx))
1424
- return test_dx
1425
- else:
1426
- print("KEEPING ORIGINAL dx")
1427
- return dx
1428
-
1429
-
1430
- #Wikipedia-version of LM algorithm, testing mu and mu/nu damping params and taking
1431
- # mu/nu => new_mu if acceptable... This didn't seem to perform well, but maybe just
1432
- # needs some tweaking, so leaving it commented here for reference
1433
- #def custom_leastsq_wikip(obj_fn, jac_fn, x0, f_norm_tol=1e-6, jac_norm_tol=1e-6,
1434
- # rel_tol=1e-6, max_iter=100, comm=None, verbosity=0, profiler=None):
1435
- # msg = ""
1436
- # converged = False
1437
- # x = x0
1438
- # f = obj_fn(x)
1439
- # norm_f = _np.linalg.norm(f)
1440
- # tau = 1e-3 #initial mu
1441
- # nu = 1.3
1442
- # my_cols_slice = None
1443
- #
1444
- #
1445
- # if not _np.isfinite(norm_f):
1446
- # msg = "Infinite norm of objective function at initial point!"
1447
- #
1448
- # for k in range(max_iter): #outer loop
1449
- # # assume x, f, fnorm hold valid values
1450
- #
1451
- # if len(msg) > 0:
1452
- # break #exit outer loop if an exit-message has been set
1453
- #
1454
- # if norm_f < f_norm_tol:
1455
- # msg = "norm(objectivefn) is small"
1456
- # converged = True; break
1457
- #
1458
- # if verbosity > 0:
1459
- # print("--- Outer Iter %d: norm_f = %g" % (k,norm_f))
1460
- #
1461
- # if profiler: profiler.mem_check("custom_leastsq: begin outer iter *before de-alloc*")
1462
- # jac = None; jtj = None; jtf = None
1463
- #
1464
- # if profiler: profiler.mem_check("custom_leastsq: begin outer iter")
1465
- # jac = jac_fn(x)
1466
- # if profiler: profiler.mem_check("custom_leastsq: after jacobian:"
1467
- # + "shape=%s, GB=%.2f" % (str(jac.shape),
1468
- # jac.nbytes/(1024.0**3)) )
1469
- #
1470
- # tm = _time.time()
1471
- # if my_cols_slice is None:
1472
- # my_cols_slice = _mpit.distribute_for_dot(jac.shape[0], comm)
1473
- # jtj = _mpit.mpidot(jac.T,jac,my_cols_slice,comm) #_np.dot(jac.T,jac)
1474
- # jtf = _np.dot(jac.T,f)
1475
- # if profiler: profiler.add_time("custom_leastsq: dotprods",tm)
1476
- #
1477
- # idiag = _np.diag_indices_from(jtj)
1478
- # norm_JTf = _np.linalg.norm(jtf) #, ord='inf')
1479
- # norm_x = _np.linalg.norm(x)
1480
- # undampled_JTJ_diag = jtj.diagonal().copy()
1481
- #
1482
- # if norm_JTf < jac_norm_tol:
1483
- # msg = "norm(jacobian) is small"
1484
- # converged = True; break
1485
- #
1486
- # if k == 0:
1487
- # mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element
1488
- # #mu = tau #* _np.max(undampled_JTJ_diag) # initial damping element
1489
- #
1490
- # #determing increment using adaptive damping
1491
- # while True: #inner loop
1492
- #
1493
- # ### Evaluate with mu' = mu / nu
1494
- # mu = mu / nu
1495
- # if profiler: profiler.mem_check("custom_leastsq: begin inner iter")
1496
- # jtj[idiag] *= (1.0 + mu) # augment normal equations
1497
- # #jtj[idiag] += mu # augment normal equations
1498
- #
1499
- # try:
1500
- # if profiler: profiler.mem_check("custom_leastsq: before linsolve")
1501
- # tm = _time.time()
1502
- # success = True
1503
- # dx = _np.linalg.solve(jtj, -jtf)
1504
- # if profiler: profiler.add_time("custom_leastsq: linsolve",tm)
1505
- # except _np.linalg.LinAlgError:
1506
- # success = False
1507
- #
1508
- # if profiler: profiler.mem_check("custom_leastsq: after linsolve")
1509
- # if success: #linear solve succeeded
1510
- # new_x = x + dx
1511
- # norm_dx = _np.linalg.norm(dx)
1512
- #
1513
- # #if verbosity > 1:
1514
- # # print("--- Inner Loop: mu=%g, norm_dx=%g" % (mu,norm_dx))
1515
- #
1516
- # if norm_dx < rel_tol*norm_x: #use squared qtys instead (speed)?
1517
- # msg = "relative change in x is small"
1518
- # converged = True; break
1519
- #
1520
- # if norm_dx > (norm_x+rel_tol)/_MACH_PRECISION:
1521
- # msg = "(near-)singular linear system"; break
1522
- #
1523
- # new_f = obj_fn(new_x)
1524
- # if profiler: profiler.mem_check("custom_leastsq: after obj_fn")
1525
- # norm_new_f = _np.linalg.norm(new_f)
1526
- # if not _np.isfinite(norm_new_f): # avoid infinite loop...
1527
- # msg = "Infinite norm of objective function!"; break
1528
- #
1529
- # dF = norm_f - norm_new_f
1530
- # if dF > 0: #accept step
1531
- # #print(" Accepted!")
1532
- # x,f, norm_f = new_x, new_f, norm_new_f
1533
- # nu = 1.3
1534
- # break # exit inner loop normally
1535
- # else:
1536
- # mu *= nu #increase mu
1537
- # else:
1538
- # #Linear solve failed:
1539
- # mu *= nu #increase mu
1540
- # nu = 2*nu
1541
- #
1542
- # jtj[idiag] = undampled_JTJ_diag #restore diagonal for next inner loop iter
1543
- # #end of inner loop
1544
- # #end of outer loop
1545
- # else:
1546
- # #if no break stmt hit, then we've exceeded max_iter
1547
- # msg = "Maximum iterations (%d) exceeded" % max_iter
1548
- #
1549
- # return x, converged, msg
1343
+ #Linear solve failed:
1344
+ mu *= nu #increase mu
1345
+ nu = 2*nu
1346
+
1347
+ jtj[idiag] = undampled_JTJ_diag #restore diagonal for next inner loop iter
1348
+ #end of inner loop
1349
+ #end of outer loop
1350
+ else:
1351
+ #if no break stmt hit, then we've exceeded max_iter
1352
+ msg = "Maximum iterations (%d) exceeded" % max_iter
1353
+
1354
+ return x, converged, msg
1355
+ """
@@ -13,7 +13,7 @@ A custom MPI-enabled linear solver.
13
13
  import numpy as _np
14
14
  import scipy as _scipy
15
15
 
16
- from pygsti.optimize.arraysinterface import UndistributedArraysInterface as _UndistributedArraysInterface
16
+ from pygsti.optimize.arraysinterface import DistributedArraysInterface as _DistributedArraysInterface
17
17
  from pygsti.tools import sharedmemtools as _smt
18
18
  from pygsti.tools import slicetools as _slct
19
19
 
@@ -90,7 +90,7 @@ def custom_solve(a, b, x, ari, resource_alloc, proc_threshold=100):
90
90
  host_comm = resource_alloc.host_comm
91
91
  ok_buf = _np.empty(1, _np.int64)
92
92
 
93
- if comm is None or isinstance(ari, _UndistributedArraysInterface):
93
+ if comm is None or (not isinstance(ari, _DistributedArraysInterface)):
94
94
  x[:] = _scipy.linalg.solve(a, b, assume_a='pos')
95
95
  return
96
96
 
@@ -662,90 +662,6 @@ def _fmin_evolutionary(f, x0, num_generations, num_individuals, printer):
662
662
  return solution
663
663
 
664
664
 
665
- #def fmin_homebrew(f, x0, maxiter):
666
- # """
667
- # Cooked up by Erik, this algorithm is similar to basinhopping but with some tweaks.
668
- #
669
- # Parameters
670
- # ----------
671
- # fn : function
672
- # The function to minimize.
673
- #
674
- # x0 : numpy array
675
- # The starting point (argument to fn).
676
- #
677
- # maxiter : int
678
- # The maximum number of iterations.
679
- #
680
- # Returns
681
- # -------
682
- # scipy.optimize.Result object
683
- # Includes members 'x', 'fun', 'success', and 'message'.
684
- # """
685
- #
686
- # STEP = 0.01
687
- # MAX_STEPS = int(2.0 / STEP) # allow a change of at most 2.0
688
- # MAX_DIR_TRIES = 1000
689
- # T = 1.0
690
- #
691
- # global_best_params = cur_x0 = x0
692
- # global_best = cur_f = f(x0)
693
- # N = len(x0)
694
- # trial_x0 = x0.copy()
695
- #
696
- # for it in range(maxiter):
697
- #
698
- # #Minimize using L-BFGS-B
699
- # opts = {'maxiter': maxiter, 'maxfev': maxiter, 'disp': False }
700
- # soln = _spo.minimize(f,trial_x0,options=opts, method='L-BFGS-B',callback=None, tol=1e-8)
701
- #
702
- # # Update global best
703
- # if soln.fun < global_best:
704
- # global_best_params = soln.x
705
- # global_best = soln.fun
706
- #
707
- # #check if we accept the new minimum
708
- # if soln.fun < cur_f or _np.random.random() < _np.exp( -(soln.fun - cur_f)/T ):
709
- # cur_x0 = soln.x; cur_f = soln.fun
710
- # print "Iter %d: f=%g accepted -- global best = %g" % (it, cur_f, global_best)
711
- # else:
712
- # print "Iter %d: f=%g declined" % (it, cur_f)
713
- #
714
- # trial_x0 = None; numTries = 0
715
- # while trial_x0 is None and numTries < MAX_DIR_TRIES:
716
- # #choose a random direction
717
- # direction = _np.random.random( N )
718
- # numTries += 1
719
- #
720
- # #print "DB: test dir %d" % numTries #DEBUG
721
- #
722
- # #kick solution along random direction until the value of f starts to get smaller again (if it ever does)
723
- # # (this indicates we've gone over a maximum along this direction)
724
- # last_f = cur_f
725
- # for i in range(1,MAX_STEPS):
726
- # test_x = cur_x0 + i*STEP * direction
727
- # test_f = f(test_x)
728
- # #print "DB: test step=%f: f=%f" % (i*STEP, test_f)
729
- # if test_f < last_f:
730
- # trial_x0 = test_x
731
- # print "Found new direction in %d tries, new f(x0) = %g" % (numTries,test_f)
732
- # break
733
- # last_f = test_f
734
- #
735
- # if trial_x0 is None:
736
- # raise ValueError("Maximum number of direction tries exceeded")
737
- #
738
- # solution = _optResult()
739
- # solution.x = global_best_params; solution.fun = global_best
740
- # solution.success = True
741
- ## if it < maxiter:
742
- ## solution.success = True
743
- ## else:
744
- ## solution.success = False
745
- ## solution.message = "Maximum iterations exceeded"
746
- # return solution
747
-
748
-
749
665
  def create_objfn_printer(obj_func, start_time=None):
750
666
  """
751
667
  Create a callback function that prints the value of an objective function.