pyGSTi 0.9.12__cp39-cp39-win32.whl → 0.9.13__cp39-cp39-win32.whl

Sign up to get free protection for your applications and to get access to all the features.
Files changed (225) hide show
  1. pyGSTi-0.9.13.dist-info/METADATA +197 -0
  2. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/RECORD +211 -220
  3. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/WHEEL +1 -1
  4. pygsti/_version.py +2 -2
  5. pygsti/algorithms/contract.py +1 -1
  6. pygsti/algorithms/core.py +62 -35
  7. pygsti/algorithms/fiducialpairreduction.py +95 -110
  8. pygsti/algorithms/fiducialselection.py +17 -8
  9. pygsti/algorithms/gaugeopt.py +2 -2
  10. pygsti/algorithms/germselection.py +87 -77
  11. pygsti/algorithms/mirroring.py +0 -388
  12. pygsti/algorithms/randomcircuit.py +165 -1333
  13. pygsti/algorithms/rbfit.py +0 -234
  14. pygsti/baseobjs/basis.py +94 -396
  15. pygsti/baseobjs/errorgenbasis.py +0 -132
  16. pygsti/baseobjs/errorgenspace.py +0 -10
  17. pygsti/baseobjs/label.py +52 -168
  18. pygsti/baseobjs/opcalc/fastopcalc.cp39-win32.pyd +0 -0
  19. pygsti/baseobjs/opcalc/fastopcalc.pyx +2 -2
  20. pygsti/baseobjs/polynomial.py +13 -595
  21. pygsti/baseobjs/protectedarray.py +72 -132
  22. pygsti/baseobjs/statespace.py +1 -0
  23. pygsti/circuits/__init__.py +1 -1
  24. pygsti/circuits/circuit.py +753 -504
  25. pygsti/circuits/circuitconstruction.py +0 -4
  26. pygsti/circuits/circuitlist.py +47 -5
  27. pygsti/circuits/circuitparser/__init__.py +8 -8
  28. pygsti/circuits/circuitparser/fastcircuitparser.cp39-win32.pyd +0 -0
  29. pygsti/circuits/circuitstructure.py +3 -3
  30. pygsti/circuits/cloudcircuitconstruction.py +27 -14
  31. pygsti/data/datacomparator.py +4 -9
  32. pygsti/data/dataset.py +51 -46
  33. pygsti/data/hypothesistest.py +0 -7
  34. pygsti/drivers/bootstrap.py +0 -49
  35. pygsti/drivers/longsequence.py +46 -10
  36. pygsti/evotypes/basereps_cython.cp39-win32.pyd +0 -0
  37. pygsti/evotypes/chp/opreps.py +0 -61
  38. pygsti/evotypes/chp/statereps.py +0 -32
  39. pygsti/evotypes/densitymx/effectcreps.cpp +9 -10
  40. pygsti/evotypes/densitymx/effectreps.cp39-win32.pyd +0 -0
  41. pygsti/evotypes/densitymx/effectreps.pyx +1 -1
  42. pygsti/evotypes/densitymx/opreps.cp39-win32.pyd +0 -0
  43. pygsti/evotypes/densitymx/opreps.pyx +2 -2
  44. pygsti/evotypes/densitymx/statereps.cp39-win32.pyd +0 -0
  45. pygsti/evotypes/densitymx/statereps.pyx +1 -1
  46. pygsti/evotypes/densitymx_slow/effectreps.py +7 -23
  47. pygsti/evotypes/densitymx_slow/opreps.py +16 -23
  48. pygsti/evotypes/densitymx_slow/statereps.py +10 -3
  49. pygsti/evotypes/evotype.py +39 -2
  50. pygsti/evotypes/stabilizer/effectreps.cp39-win32.pyd +0 -0
  51. pygsti/evotypes/stabilizer/effectreps.pyx +0 -4
  52. pygsti/evotypes/stabilizer/opreps.cp39-win32.pyd +0 -0
  53. pygsti/evotypes/stabilizer/opreps.pyx +0 -4
  54. pygsti/evotypes/stabilizer/statereps.cp39-win32.pyd +0 -0
  55. pygsti/evotypes/stabilizer/statereps.pyx +1 -5
  56. pygsti/evotypes/stabilizer/termreps.cp39-win32.pyd +0 -0
  57. pygsti/evotypes/stabilizer/termreps.pyx +0 -7
  58. pygsti/evotypes/stabilizer_slow/effectreps.py +0 -22
  59. pygsti/evotypes/stabilizer_slow/opreps.py +0 -4
  60. pygsti/evotypes/stabilizer_slow/statereps.py +0 -4
  61. pygsti/evotypes/statevec/effectreps.cp39-win32.pyd +0 -0
  62. pygsti/evotypes/statevec/effectreps.pyx +1 -1
  63. pygsti/evotypes/statevec/opreps.cp39-win32.pyd +0 -0
  64. pygsti/evotypes/statevec/opreps.pyx +2 -2
  65. pygsti/evotypes/statevec/statereps.cp39-win32.pyd +0 -0
  66. pygsti/evotypes/statevec/statereps.pyx +1 -1
  67. pygsti/evotypes/statevec/termreps.cp39-win32.pyd +0 -0
  68. pygsti/evotypes/statevec/termreps.pyx +0 -7
  69. pygsti/evotypes/statevec_slow/effectreps.py +0 -3
  70. pygsti/evotypes/statevec_slow/opreps.py +0 -5
  71. pygsti/extras/__init__.py +0 -1
  72. pygsti/extras/drift/signal.py +1 -1
  73. pygsti/extras/drift/stabilityanalyzer.py +3 -1
  74. pygsti/extras/interpygate/__init__.py +12 -0
  75. pygsti/extras/interpygate/core.py +0 -36
  76. pygsti/extras/interpygate/process_tomography.py +44 -10
  77. pygsti/extras/rpe/rpeconstruction.py +0 -2
  78. pygsti/forwardsims/__init__.py +1 -0
  79. pygsti/forwardsims/forwardsim.py +50 -93
  80. pygsti/forwardsims/mapforwardsim.py +78 -20
  81. pygsti/forwardsims/mapforwardsim_calc_densitymx.cp39-win32.pyd +0 -0
  82. pygsti/forwardsims/mapforwardsim_calc_densitymx.pyx +65 -66
  83. pygsti/forwardsims/mapforwardsim_calc_generic.py +91 -13
  84. pygsti/forwardsims/matrixforwardsim.py +72 -17
  85. pygsti/forwardsims/termforwardsim.py +9 -111
  86. pygsti/forwardsims/termforwardsim_calc_stabilizer.cp39-win32.pyd +0 -0
  87. pygsti/forwardsims/termforwardsim_calc_statevec.cp39-win32.pyd +0 -0
  88. pygsti/forwardsims/termforwardsim_calc_statevec.pyx +0 -651
  89. pygsti/forwardsims/torchfwdsim.py +265 -0
  90. pygsti/forwardsims/weakforwardsim.py +2 -2
  91. pygsti/io/__init__.py +1 -2
  92. pygsti/io/mongodb.py +0 -2
  93. pygsti/io/stdinput.py +6 -22
  94. pygsti/layouts/copalayout.py +10 -12
  95. pygsti/layouts/distlayout.py +0 -40
  96. pygsti/layouts/maplayout.py +103 -25
  97. pygsti/layouts/matrixlayout.py +99 -60
  98. pygsti/layouts/prefixtable.py +1534 -52
  99. pygsti/layouts/termlayout.py +1 -1
  100. pygsti/modelmembers/instruments/instrument.py +3 -3
  101. pygsti/modelmembers/instruments/tpinstrument.py +2 -2
  102. pygsti/modelmembers/modelmember.py +0 -17
  103. pygsti/modelmembers/operations/__init__.py +3 -4
  104. pygsti/modelmembers/operations/affineshiftop.py +206 -0
  105. pygsti/modelmembers/operations/composederrorgen.py +1 -1
  106. pygsti/modelmembers/operations/composedop.py +1 -24
  107. pygsti/modelmembers/operations/denseop.py +5 -5
  108. pygsti/modelmembers/operations/eigpdenseop.py +2 -2
  109. pygsti/modelmembers/operations/embeddederrorgen.py +1 -1
  110. pygsti/modelmembers/operations/embeddedop.py +0 -1
  111. pygsti/modelmembers/operations/experrorgenop.py +5 -2
  112. pygsti/modelmembers/operations/fullarbitraryop.py +1 -0
  113. pygsti/modelmembers/operations/fullcptpop.py +2 -2
  114. pygsti/modelmembers/operations/fulltpop.py +28 -6
  115. pygsti/modelmembers/operations/fullunitaryop.py +5 -4
  116. pygsti/modelmembers/operations/lindbladcoefficients.py +93 -78
  117. pygsti/modelmembers/operations/lindbladerrorgen.py +268 -441
  118. pygsti/modelmembers/operations/linearop.py +7 -27
  119. pygsti/modelmembers/operations/opfactory.py +1 -1
  120. pygsti/modelmembers/operations/repeatedop.py +1 -24
  121. pygsti/modelmembers/operations/staticstdop.py +1 -1
  122. pygsti/modelmembers/povms/__init__.py +3 -3
  123. pygsti/modelmembers/povms/basepovm.py +7 -36
  124. pygsti/modelmembers/povms/complementeffect.py +4 -9
  125. pygsti/modelmembers/povms/composedeffect.py +0 -320
  126. pygsti/modelmembers/povms/computationaleffect.py +1 -1
  127. pygsti/modelmembers/povms/computationalpovm.py +3 -1
  128. pygsti/modelmembers/povms/effect.py +3 -5
  129. pygsti/modelmembers/povms/marginalizedpovm.py +3 -81
  130. pygsti/modelmembers/povms/tppovm.py +74 -2
  131. pygsti/modelmembers/states/__init__.py +2 -5
  132. pygsti/modelmembers/states/composedstate.py +0 -317
  133. pygsti/modelmembers/states/computationalstate.py +3 -3
  134. pygsti/modelmembers/states/cptpstate.py +4 -4
  135. pygsti/modelmembers/states/densestate.py +10 -8
  136. pygsti/modelmembers/states/fullpurestate.py +0 -24
  137. pygsti/modelmembers/states/purestate.py +1 -1
  138. pygsti/modelmembers/states/state.py +5 -6
  139. pygsti/modelmembers/states/tpstate.py +28 -10
  140. pygsti/modelmembers/term.py +3 -6
  141. pygsti/modelmembers/torchable.py +50 -0
  142. pygsti/modelpacks/_modelpack.py +1 -1
  143. pygsti/modelpacks/smq1Q_ZN.py +3 -1
  144. pygsti/modelpacks/smq2Q_XXYYII.py +2 -1
  145. pygsti/modelpacks/smq2Q_XY.py +3 -3
  146. pygsti/modelpacks/smq2Q_XYI.py +2 -2
  147. pygsti/modelpacks/smq2Q_XYICNOT.py +3 -3
  148. pygsti/modelpacks/smq2Q_XYICPHASE.py +3 -3
  149. pygsti/modelpacks/smq2Q_XYXX.py +1 -1
  150. pygsti/modelpacks/smq2Q_XYZICNOT.py +3 -3
  151. pygsti/modelpacks/smq2Q_XYZZ.py +1 -1
  152. pygsti/modelpacks/stdtarget.py +0 -121
  153. pygsti/models/cloudnoisemodel.py +1 -2
  154. pygsti/models/explicitcalc.py +3 -3
  155. pygsti/models/explicitmodel.py +3 -13
  156. pygsti/models/fogistore.py +5 -3
  157. pygsti/models/localnoisemodel.py +1 -2
  158. pygsti/models/memberdict.py +0 -12
  159. pygsti/models/model.py +801 -68
  160. pygsti/models/modelconstruction.py +4 -4
  161. pygsti/models/modelnoise.py +2 -2
  162. pygsti/models/modelparaminterposer.py +1 -1
  163. pygsti/models/oplessmodel.py +1 -1
  164. pygsti/models/qutrit.py +15 -14
  165. pygsti/objectivefns/objectivefns.py +75 -140
  166. pygsti/objectivefns/wildcardbudget.py +2 -7
  167. pygsti/optimize/__init__.py +1 -0
  168. pygsti/optimize/arraysinterface.py +28 -0
  169. pygsti/optimize/customcg.py +0 -12
  170. pygsti/optimize/customlm.py +129 -323
  171. pygsti/optimize/customsolve.py +2 -2
  172. pygsti/optimize/optimize.py +0 -84
  173. pygsti/optimize/simplerlm.py +841 -0
  174. pygsti/optimize/wildcardopt.py +19 -598
  175. pygsti/protocols/confidenceregionfactory.py +28 -14
  176. pygsti/protocols/estimate.py +31 -14
  177. pygsti/protocols/gst.py +238 -142
  178. pygsti/protocols/modeltest.py +19 -12
  179. pygsti/protocols/protocol.py +9 -37
  180. pygsti/protocols/rb.py +450 -79
  181. pygsti/protocols/treenode.py +8 -2
  182. pygsti/protocols/vb.py +108 -206
  183. pygsti/protocols/vbdataframe.py +1 -1
  184. pygsti/report/factory.py +0 -15
  185. pygsti/report/fogidiagram.py +1 -17
  186. pygsti/report/modelfunction.py +12 -3
  187. pygsti/report/mpl_colormaps.py +1 -1
  188. pygsti/report/plothelpers.py +11 -3
  189. pygsti/report/report.py +16 -0
  190. pygsti/report/reportables.py +41 -37
  191. pygsti/report/templates/offline/pygsti_dashboard.css +6 -0
  192. pygsti/report/templates/offline/pygsti_dashboard.js +12 -0
  193. pygsti/report/workspace.py +2 -14
  194. pygsti/report/workspaceplots.py +328 -505
  195. pygsti/tools/basistools.py +9 -36
  196. pygsti/tools/edesigntools.py +124 -96
  197. pygsti/tools/fastcalc.cp39-win32.pyd +0 -0
  198. pygsti/tools/fastcalc.pyx +35 -81
  199. pygsti/tools/internalgates.py +151 -15
  200. pygsti/tools/jamiolkowski.py +5 -5
  201. pygsti/tools/lindbladtools.py +19 -11
  202. pygsti/tools/listtools.py +0 -114
  203. pygsti/tools/matrixmod2.py +1 -1
  204. pygsti/tools/matrixtools.py +173 -339
  205. pygsti/tools/nameddict.py +1 -1
  206. pygsti/tools/optools.py +154 -88
  207. pygsti/tools/pdftools.py +0 -25
  208. pygsti/tools/rbtheory.py +3 -320
  209. pygsti/tools/slicetools.py +64 -12
  210. pyGSTi-0.9.12.dist-info/METADATA +0 -157
  211. pygsti/algorithms/directx.py +0 -711
  212. pygsti/evotypes/qibo/__init__.py +0 -33
  213. pygsti/evotypes/qibo/effectreps.py +0 -78
  214. pygsti/evotypes/qibo/opreps.py +0 -376
  215. pygsti/evotypes/qibo/povmreps.py +0 -98
  216. pygsti/evotypes/qibo/statereps.py +0 -174
  217. pygsti/extras/rb/__init__.py +0 -13
  218. pygsti/extras/rb/benchmarker.py +0 -957
  219. pygsti/extras/rb/dataset.py +0 -378
  220. pygsti/extras/rb/io.py +0 -814
  221. pygsti/extras/rb/simulate.py +0 -1020
  222. pygsti/io/legacyio.py +0 -385
  223. pygsti/modelmembers/povms/denseeffect.py +0 -142
  224. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/LICENSE +0 -0
  225. {pyGSTi-0.9.12.dist-info → pyGSTi-0.9.13.dist-info}/top_level.txt +0 -0
pygsti/extras/rb/io.py DELETED
@@ -1,814 +0,0 @@
1
- #***************************************************************************************************
2
- # Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
3
- # Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
4
- # in this software.
5
- # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
6
- # in compliance with the License. You may obtain a copy of the License at
7
- # http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
8
- #***************************************************************************************************
9
-
10
- import ast as _ast
11
- import json as _json
12
- import os as _os
13
- import pickle as _pickle
14
- import warnings as _warnings
15
-
16
- from pygsti.extras.rb import benchmarker as _benchmarker
17
- from pygsti.extras.rb import dataset as _dataset
18
- # todo : update
19
- from pygsti.extras.rb import sample as _sample
20
- from pygsti import io as _io
21
- from pygsti.circuits import circuit as _cir
22
- from pygsti.data import multidataset as _mds
23
-
24
-
25
- #def load_benchmarking_data(basedir):
26
-
27
- def load_benchmarker(directory, load_datasets=True, verbosity=1):
28
- """
29
-
30
- """
31
- with open(directory + '/global.txt', 'r') as f:
32
- globaldict = _json.load(f)
33
-
34
- numpasses = globaldict['numpasses']
35
- speckeys = globaldict['speckeys']
36
- success_key = globaldict['success_key']
37
- success_outcome = globaldict['success_outcome']
38
- dscomparator = globaldict['dscomparator']
39
-
40
- if load_datasets:
41
- dskeys = [dskey.name for dskey in _os.scandir(directory + '/data') if dskey.is_dir()]
42
- multidsdict = {dskey: _mds.MultiDataSet()for dskey in dskeys}
43
-
44
- for dskey in dskeys:
45
- for passnum in range(numpasses):
46
- dsfn = directory + '/data/{}/ds{}.txt'.format(dskey, passnum)
47
- ds = _io.read_dataset(dsfn, collision_action='keepseparate', record_zero_counts=False,
48
- ignore_zero_count_lines=False, verbosity=verbosity)
49
- multidsdict[dskey].add_dataset(passnum, ds)
50
- else:
51
- multidsdict = None
52
-
53
- specs = {}
54
- for i, speckey in enumerate(speckeys):
55
- specs[speckey] = load_benchmarkspec(directory + '/specs/{}.txt'.format(i))
56
-
57
- summary_data = {'global': {}, 'pass': {}, 'aux': {}}
58
- predictionkeys = [pkey.name for pkey in _os.scandir(directory + '/predictions') if pkey.is_dir()]
59
- predicted_summary_data = {pkey: {} for pkey in predictionkeys}
60
-
61
- for i, spec in enumerate(specs.values()):
62
-
63
- summary_data['pass'][i] = {}
64
- summary_data['global'][i] = {}
65
- summary_data['aux'][i] = {}
66
- for pkey in predictionkeys:
67
- predicted_summary_data[pkey][i] = {}
68
-
69
- structure = spec.get_structure()
70
-
71
- for j, qubits in enumerate(structure):
72
-
73
- # Import the summary data for that spec and qubit subset
74
- with open(directory + '/summarydata/{}-{}.txt'.format(i, j), 'r') as f:
75
- sd = _json.load(f)
76
- summary_data['pass'][i][qubits] = {}
77
- for dtype, data in sd['pass'].items():
78
- summary_data['pass'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()}
79
- summary_data['global'][i][qubits] = {}
80
- for dtype, data in sd['global'].items():
81
- summary_data['global'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()}
82
-
83
- # Import the auxillary data
84
- with open(directory + '/aux/{}-{}.txt'.format(i, j), 'r') as f:
85
- aux = _json.load(f)
86
- summary_data['aux'][i][qubits] = {}
87
- for dtype, data in aux.items():
88
- summary_data['aux'][i][qubits][dtype] = {int(key): value for (key, value) in data.items()}
89
-
90
- # Import the predicted summary data for that spec and qubit subset
91
- for pkey in predictionkeys:
92
- with open(directory + '/predictions/{}/summarydata/{}-{}.txt'.format(pkey, i, j), 'r') as f:
93
- psd = _json.load(f)
94
- predicted_summary_data[pkey][i][qubits] = {}
95
- for dtype, data in psd.items():
96
- predicted_summary_data[pkey][i][qubits][dtype] = {
97
- int(key): value for (key, value) in data.items()}
98
-
99
- benchmarker = _benchmarker.Benchmarker(specs, ds=multidsdict, summary_data=summary_data,
100
- predicted_summary_data=predicted_summary_data,
101
- dstype='dict', success_outcome=success_outcome,
102
- success_key=success_key, dscomparator=dscomparator)
103
-
104
- return benchmarker
105
-
106
-
107
- def write_benchmarker(benchmarker, outdir, overwrite=False, verbosity=0):
108
-
109
- try:
110
- _os.makedirs(outdir)
111
- if verbosity > 0:
112
- print(" - Created `" + outdir + "` folder to store benchmarker in txt format.")
113
- except:
114
- if overwrite:
115
- if verbosity > 0:
116
- print(" - `" + outdir + "` folder already exists. Will write data into that folder.")
117
- else:
118
- raise ValueError("Directory already exists! Set overwrite to True or change the directory name!")
119
-
120
- globaldict = {}
121
- globaldict['speckeys'] = benchmarker._speckeys
122
- globaldict['numpasses'] = benchmarker.numpasses
123
- globaldict['success_outcome'] = benchmarker.success_outcome
124
- globaldict['success_key'] = benchmarker.success_key
125
-
126
- if benchmarker.dscomparator is not None:
127
-
128
- globaldict['dscomparator'] = {}
129
- globaldict['dscomparator']['pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold
130
- globaldict['dscomparator']['llr_pseudothreshold'] = benchmarker.dscomparator.llr_pseudothreshold
131
- globaldict['dscomparator']['pVal_pseudothreshold'] = benchmarker.dscomparator.pVal_pseudothreshold
132
- globaldict['dscomparator']['jsd_pseudothreshold'] = benchmarker.dscomparator.jsd_pseudothreshold
133
- globaldict['dscomparator']['aggregate_llr'] = benchmarker.dscomparator.aggregate_llr
134
- globaldict['dscomparator']['aggregate_llr_threshold'] = benchmarker.dscomparator.aggregate_llr_threshold
135
- globaldict['dscomparator']['aggregate_nsigma'] = benchmarker.dscomparator.aggregate_nsigma
136
- globaldict['dscomparator']['aggregate_nsigma_threshold'] = benchmarker.dscomparator.aggregate_nsigma_threshold
137
- globaldict['dscomparator']['aggregate_pVal'] = benchmarker.dscomparator.aggregate_pVal
138
- globaldict['dscomparator']['aggregate_pVal_threshold'] = benchmarker.dscomparator.aggregate_pVal_threshold
139
- globaldict['dscomparator']['inconsistent_datasets_detected'] = \
140
- benchmarker.dscomparator.inconsistent_datasets_detected
141
- globaldict['dscomparator']['number_of_significant_sequences'] = int(
142
- benchmarker.dscomparator.number_of_significant_sequences)
143
- globaldict['dscomparator']['significance'] = benchmarker.dscomparator.significance
144
-
145
- else:
146
- globaldict['dscomparator'] = None
147
-
148
- # Write global details to file
149
- with open(outdir + '/global.txt', 'w') as f:
150
- _json.dump(globaldict, f, indent=4)
151
-
152
- _os.makedirs(outdir + '/specs')
153
- _os.makedirs(outdir + '/summarydata')
154
- _os.makedirs(outdir + '/aux')
155
-
156
- for pkey in benchmarker.predicted_summary_data.keys():
157
- _os.makedirs(outdir + '/predictions/{}/summarydata'.format(pkey))
158
-
159
- for i, spec in enumerate(benchmarker._specs):
160
- structure = spec.get_structure()
161
- write_benchmarkspec(spec, outdir + '/specs/{}.txt'.format(i), warning=0)
162
-
163
- for j, qubits in enumerate(structure):
164
- summarydict = {'pass': benchmarker.pass_summary_data[i][qubits],
165
- 'global': benchmarker.global_summary_data[i][qubits]
166
- }
167
- fname = outdir + '/summarydata/' + '{}-{}.txt'.format(i, j)
168
- with open(fname, 'w') as f:
169
- _json.dump(summarydict, f, indent=4)
170
-
171
- aux = benchmarker.aux[i][qubits]
172
- fname = outdir + '/aux/' + '{}-{}.txt'.format(i, j)
173
- with open(fname, 'w') as f:
174
- _json.dump(aux, f, indent=4)
175
-
176
- for pkey in benchmarker.predicted_summary_data.keys():
177
- summarydict = benchmarker.predicted_summary_data[pkey][i][qubits]
178
- fname = outdir + '/predictions/{}/summarydata/'.format(pkey) + '{}-{}.txt'.format(i, j)
179
- with open(fname, 'w') as f:
180
- _json.dump(summarydict, f, indent=4)
181
-
182
- for dskey in benchmarker.multids.keys():
183
- fdir = outdir + '/data/{}'.format(dskey)
184
- _os.makedirs(fdir)
185
- for dsind in benchmarker.multids[dskey].keys():
186
- fname = fdir + '/ds{}.txt'.format(dsind)
187
- _io.write_dataset(fname, benchmarker.multids[dskey][dsind], fixed_column_mode=False)
188
-
189
-
190
- def create_benchmarker(dsfilenames, predictions=None, test_stability=True, auxtypes=None, verbosity=1):
191
- if predictions is None:
192
- predictions = dict()
193
- if auxtypes is None:
194
- auxtypes = []
195
- benchmarker = load_data_into_benchmarker(dsfilenames, verbosity=verbosity)
196
- if test_stability:
197
- if verbosity > 0:
198
- print(" - Running stability analysis...", end='')
199
- benchmarker.test_pass_stability(formatdata=True, verbosity=0)
200
- if verbosity > 0:
201
- print("complete.")
202
-
203
- benchmarker.create_summary_data(predictions=predictions, auxtypes=auxtypes)
204
-
205
- return benchmarker
206
-
207
- # Todo : just make this and create_benchmarker a single function? This import has been superceded
208
- # by load_benchmarker
209
-
210
-
211
- def load_data_into_benchmarker(dsfilenames=None, summarydatasets_filenames=None, summarydatasets_folder=None,
212
- predicted_summarydatasets_folders=None, verbosity=1):
213
- """
214
- todo
215
-
216
- """
217
- if predicted_summarydatasets_folders is None:
218
- predicted_summarydatasets_folders = dict()
219
- elif len(predicted_summarydatasets_folders) > 0:
220
- assert(summarydatasets_folder is not None)
221
- #if len(predicted_summarydatasets_folders) > 1:
222
- # raise NotImplementedError("This is not yet supported!")
223
-
224
- if dsfilenames is not None:
225
-
226
- # If it is a filename, then we import the dataset from file.
227
- if isinstance(dsfilenames, str):
228
- dsfilenames = [dsfilenames, ]
229
- elif not isinstance(dsfilenames, list):
230
- raise ValueError("dsfilenames must be a str or a list of strings!")
231
-
232
- mds = _mds.MultiDataSet()
233
- for dsfn_ind, dsfn in enumerate(dsfilenames):
234
-
235
- if dsfn[-4:] == '.txt':
236
- print(dsfn)
237
- mds.add_dataset(dsfn_ind, _io.read_dataset(dsfn,
238
- collision_action='keepseparate',
239
- record_zero_counts=False,
240
- ignore_zero_count_lines=False,
241
- verbosity=verbosity))
242
-
243
- elif dsfn[-4:] == '.pkl':
244
-
245
- if verbosity > 0:
246
- print(" - Loading DataSet from pickle file...", end='')
247
- with open(dsfn, 'rb') as f:
248
- mds.add_dataset(dsfn_ind, _pickle.load(f))
249
- if verbosity > 0:
250
- print("complete.")
251
-
252
- else:
253
- raise ValueError("File must end in .pkl or .txt!")
254
-
255
- # # If it isn't a string, we assume that `dsfilenames` is a DataSet.
256
- # else:
257
-
258
- # ds = dsfilenames
259
-
260
- if verbosity > 0: print(" - Extracting metadata from the DataSet...", end='')
261
-
262
- # To store the aux information about the RB experiments.
263
- all_spec_filenames = []
264
- # circuits_for_specfile = {}
265
- # outdslist = []
266
-
267
- # We go through the dataset and extract all the necessary auxillary information.
268
- for circ in mds[mds.keys()[0]].keys():
269
-
270
- # The spec filename or names for this circuits
271
- specfns_forcirc = mds.auxInfo[circ]['spec']
272
- # The RB length for this circuit
273
- # try:
274
- # l = mds.auxInfo[circ]['depth']
275
- # except:
276
- # l = mds.auxInfo[circ]['length']
277
- # The target bitstring for this circuit.
278
- # target = mds.auxInfo[circ]['target']
279
-
280
- # This can be a string (a single spec filename) or a list, so make always a list.
281
- if isinstance(specfns_forcirc, str):
282
- specfns_forcirc = [specfns_forcirc, ]
283
-
284
- for sfn_forcirc in specfns_forcirc:
285
- # If this is the first instance of seeing this filename then...
286
- if sfn_forcirc not in all_spec_filenames:
287
- # ... we store it in the list of all spec filenames to import later.
288
- all_spec_filenames.append(sfn_forcirc)
289
- # And it won't yet be a key in the circuits_for_specfile dict, so we add it.
290
- # circuits_for_specfile[sfn_forcirc] = {}
291
-
292
- # # If we've not yet had this length for that spec filename, we add that as a key.
293
- # if l not in circuits_for_specfile[sfn_forcirc].keys():
294
- # circuits_for_specfile[sfn_forcirc][l] = []
295
-
296
- # # We add the circuit and target output to the dict for the corresponding spec files.
297
- # circuits_for_specfile[sfn_forcirc][l].append((circ, target))
298
-
299
- # circ_specindices = []
300
- # for sfn_forcirc in specfns_forcirc:
301
- # circ_specindices.append(all_spec_filenames.index(sfn_forcirc))
302
-
303
- if verbosity > 0:
304
- print("complete.")
305
- print(" - Reading in the metadata from the extracted filenames...", end='')
306
-
307
- # We put RB specs that we create via file import (and the circuits above) into this dict
308
- rbspecdict = {}
309
-
310
- # We look for spec files in the same directory as the datafiles, so we find what that is.
311
- # THIS REQUIRES ALL THE FILES TO BE IN THE SAME DIRECTORY
312
- directory = dsfilenames[0].split('/')
313
- directory = '/'.join(directory[: -1])
314
- if len(directory) > 0:
315
- directory += '/'
316
-
317
- for specfilename in all_spec_filenames:
318
-
319
- # Import the RB spec file.
320
- rbspec = load_benchmarkspec(directory + specfilename)
321
- # Add in the circuits that correspond to each spec, extracted from the dataset.
322
- # rbspec.add_circuits(circuits_for_specfile[specfilename])
323
- # Record the spec in a list, to be given to an RBAnalyzer object.
324
- rbspecdict[specfilename] = rbspec
325
-
326
- if verbosity > 0:
327
- print("complete.")
328
- print(" - Recording all of the data in a Benchmarker...", end='')
329
-
330
- # Put everything into an RBAnalyzer object, which is a container for RB data, and return this.
331
- benchmarker = _benchmarker.Benchmarker(rbspecdict, ds=mds, summary_data=None)
332
-
333
- if verbosity > 0: print("complete.")
334
-
335
- return benchmarker
336
-
337
- elif (summarydatasets_filenames is not None) or (summarydatasets_folder is not None):
338
-
339
- rbspecdict = {}
340
-
341
- # If a dict, its just the keys of the dict that are the rbspec file names.
342
- if summarydatasets_filenames is not None:
343
-
344
- specfiles = list(summarydatasets_filenames.keys())
345
-
346
- # If a folder, we look for files in that folder with the standard name format.
347
- elif summarydatasets_folder is not None:
348
- specfiles = []
349
- specfilefound = True
350
- i = 0
351
- while specfilefound:
352
- try:
353
- filename = summarydatasets_folder + "/spec{}.txt".format(i)
354
- with open(filename, 'r') as f:
355
- if verbosity > 0:
356
- print(filename + " found")
357
- specfiles.append(filename)
358
- i += 1
359
- except:
360
- specfilefound = False
361
- if verbosity > 0:
362
- print(filename + " not found so terminating spec file search.")
363
-
364
- for sfn_ind, specfilename in enumerate(specfiles):
365
-
366
- rbspec = load_benchmarkspec(specfilename)
367
- rbspecdict[sfn_ind] = rbspec
368
-
369
- summary_data = {}
370
- predicted_summary_data = {pkey: {} for pkey in predicted_summarydatasets_folders.keys()}
371
-
372
- for i, (specfilename, rbspec) in enumerate(zip(specfiles, rbspecdict.values())):
373
-
374
- structure = rbspec.get_structure()
375
- summary_data[i] = {}
376
- for pkey in predicted_summarydatasets_folders.keys():
377
- predicted_summary_data[pkey][i] = {}
378
-
379
- if summarydatasets_filenames is not None:
380
- sds_filenames = summarydatasets_filenames[specfilename]
381
- elif summarydatasets_folder is not None:
382
- sds_filenames = [summarydatasets_folder + '/{}-{}.txt'.format(i, j) for j in range(len(structure))]
383
- predsds_filenames_dict = {}
384
- for pkey, pfolder in predicted_summarydatasets_folders.items():
385
- predsds_filenames_dict[pkey] = [pfolder + '/{}-{}.txt'.format(i, j) for j in range(len(structure))]
386
-
387
- for sdsfn, qubits in zip(sds_filenames, structure):
388
- summary_data[i][qubits] = import_rb_summary_data(sdsfn, len(qubits), verbosity=verbosity)
389
-
390
- for pkey, predsds_filenames in predsds_filenames_dict.items():
391
- for sdsfn, qubits in zip(predsds_filenames, structure):
392
- predicted_summary_data[pkey][i][qubits] = import_rb_summary_data(
393
- sdsfn, len(qubits), verbosity=verbosity)
394
-
395
- benchmarker = _benchmarker.Benchmarker(rbspecdict, ds=None, summary_data=summary_data,
396
- predicted_summary_data=predicted_summary_data)
397
-
398
- return benchmarker
399
-
400
- else:
401
- raise ValueError("Either a filename for a DataSet or filenames for a set of RBSpecs "
402
- + "and RBSummaryDatasets must be provided!")
403
-
404
-
405
- def load_benchmarkspec(filename, circuitsfilename=None):
406
- """
407
- todo
408
-
409
- """
410
- #d = {}
411
- with open(filename) as f:
412
- d = _json.load(f)
413
- # for line in f:
414
- # if len(line) > 0 and line[0] != '#':
415
- # line = line.strip('\n')
416
- # line = line.split(' ', 1)
417
- # try:
418
- # d[line[0]] = _ast.literal_eval(line[1])
419
- # except:
420
- # d[line[0]] = line[1]
421
-
422
- #assert(d.get('type', None) == 'rb'), "This is for importing RB specs!"
423
-
424
- try:
425
- rbtype = d['type']
426
- except:
427
- raise ValueError("Input file does not contain a line specifying the RB type!")
428
- assert(isinstance(rbtype, str)), "The RB type (specified as rbtype) must be a string!"
429
-
430
- try:
431
- structure = d['structure']
432
- except:
433
- raise ValueError("Input file does not contain a line specifying the structure!")
434
- if isinstance(structure, list):
435
- structure = tuple([tuple(qubits) for qubits in structure])
436
- assert(isinstance(structure, tuple)), "The structure must be a tuple!"
437
-
438
- try:
439
- sampler = d['sampler']
440
- except:
441
- raise ValueError("Input file does not contain a line specifying the circuit layer sampler!")
442
- assert(isinstance(sampler, str)), "The sampler name must be a string!"
443
-
444
- samplerargs = d.get('samplerargs', None)
445
- depths = d.get('depths', None)
446
- numcircuits = d.get('numcircuits', None)
447
- subtype = d.get('subtype', None)
448
-
449
- if samplerargs is not None:
450
- assert(isinstance(samplerargs, dict)), "The samplerargs must be a dict!"
451
-
452
- if depths is not None:
453
- assert(isinstance(depths, list) or isinstance(depths, tuple)), "The depths must be a list or tuple!"
454
-
455
- if numcircuits is not None:
456
- assert(isinstance(numcircuits, list) or isinstance(numcircuits, int)), "numcircuits must be an int or list!"
457
-
458
- spec = _sample.BenchmarkSpec(rbtype, structure, sampler, samplerargs, depths=depths,
459
- numcircuits=numcircuits, subtype=subtype)
460
-
461
- return spec
462
-
463
-
464
- def write_benchmarkspec(spec, filename, circuitsfilename=None, warning=1):
465
- """
466
- todo
467
-
468
- """
469
- if spec.circuits is not None:
470
- if circuitsfilename is not None:
471
- circuitlist = [circ for sublist in [spec.circuits[l] for l in spec.depths] for circ in sublist]
472
- _io.write_circuit_list(circuitsfilename, circuitlist)
473
- elif warning > 0:
474
- _warnings.warn("The circuits recorded in this RBSpec are not being written to file!")
475
-
476
- # with open(filename, 'w') as f:
477
- # f.write('type rb\n')
478
- # f.write('rbtype ' + rbspec._rbtype + '\n')
479
- # f.write('structure ' + str(rbspec._structure) + '\n')
480
- # f.write('sampler ' + rbspec._sampler + '\n')
481
- # f.write('lengths ' + str(rbspec._lengths) + '\n')
482
- # f.write('numcircuits ' + str(rbspec._numcircuits) + '\n')
483
- # f.write('rbsubtype ' + str(rbspec._rbsubtype) + '\n')
484
- # f.write('samplerargs ' + str(rbspec._samplerargs) + '\n')
485
-
486
- specdict = spec.to_dict()
487
- del specdict['circuits'] # Don't write the circuits to this file.
488
-
489
- with open(filename, 'w') as f:
490
- _json.dump(specdict, f, indent=4)
491
-
492
-
493
- def import_rb_summary_data(filename, numqubits, datatype='auto', verbosity=1):
494
- """
495
- todo
496
-
497
- """
498
- try:
499
- with open(filename, 'r') as f:
500
- if verbosity > 0: print("Importing " + filename + "...", end='')
501
- except:
502
- raise ValueError("Date import failed! File does not exist or the format is incorrect.")
503
-
504
- aux = []
505
- descriptor = ''
506
- # Work out the type of data we're importing
507
- with open(filename, 'r') as f:
508
- for line in f:
509
-
510
- if (len(line) == 0 or line[0] != '#'): break
511
-
512
- elif line.startswith("# "):
513
- descriptor += line[2:]
514
-
515
- elif line.startswith("## "):
516
-
517
- line = line.strip('\n')
518
- line = line.split(' ')
519
- del line[0]
520
-
521
- if line[0:2] == ['rblength', 'success_probabilities']:
522
-
523
- auxind = 2
524
- if datatype == 'auto':
525
- datatype = 'success_probabilities'
526
- else:
527
- assert(datatype == 'success_probabilities'), "The data format appears to be " + \
528
- "success probabilities!"
529
-
530
- elif line[0:3] == ['rblength', 'success_counts', 'total_counts']:
531
-
532
- auxind = 3
533
- if datatype == 'auto':
534
- datatype = 'success_counts'
535
- else:
536
- assert(datatype == 'success_counts'), "The data format appears to be success counts!"
537
-
538
- elif line[0: numqubits + 2] == ['rblength', ] + ['hd{}c'.format(i) for i in range(numqubits + 1)]:
539
-
540
- auxind = numqubits + 2
541
- if datatype == 'auto':
542
- datatype = 'hamming_distance_counts'
543
- else:
544
- assert(datatype == 'hamming_distance_counts'), "The data format appears to be Hamming " + \
545
- "distance counts!"
546
-
547
- elif line[0: numqubits + 2] == ['rblength', ] + ['hd{}p'.format(i) for i in range(numqubits + 1)]:
548
-
549
- auxind = numqubits + 2
550
- if datatype == 'auto':
551
- datatype = 'hamming_distance_probabilities'
552
- else:
553
- assert(datatype == 'hamming_distance_probabilities'), "The data format appears to be " + \
554
- "Hamming distance probabilities!"
555
-
556
- else:
557
- raise ValueError("Invalid file format!")
558
-
559
- if len(line) > auxind:
560
- assert(line[auxind] == '#')
561
- if len(line) > auxind + 1:
562
- auxlabels = line[auxind + 1:]
563
- else:
564
- auxlabels = []
565
-
566
- break
567
-
568
- # Prepare an aux dict to hold any auxillary data
569
- aux = {key: {} for key in auxlabels}
570
-
571
- # Read in the data, using a different parser depending on the data type.
572
- if datatype == 'success_counts':
573
-
574
- success_counts = {}
575
- total_counts = {}
576
- finitecounts = True
577
- hamming_distance_counts = None
578
-
579
- with open(filename, 'r') as f:
580
- for line in f:
581
- if (len(line) > 0 and line[0] != '#'):
582
-
583
- line = line.strip('\n')
584
- line = line.split(' ')
585
- l = int(line[0])
586
-
587
- if l not in success_counts:
588
- success_counts[l] = []
589
- total_counts[l] = []
590
- for key in auxlabels:
591
- aux[key][l] = []
592
-
593
- success_counts[l].append(float(line[1]))
594
- total_counts[l].append(float(line[2]))
595
-
596
- if len(aux) > 0:
597
- assert(line[3] == '#'), "Auxillary data must be divided from the core data!"
598
- for i, key in enumerate(auxlabels):
599
- if key != 'target' and key != 'circuit':
600
- aux[key][l].append(_ast.literal_eval(line[4 + i]))
601
- else:
602
- if key == 'target':
603
- aux[key][l].append(line[4 + i])
604
- if key == 'circuit':
605
- aux[key][l].append(_cir.Circuit(line[4 + i]))
606
-
607
- elif datatype == 'success_probabilities':
608
-
609
- success_counts = {}
610
- total_counts = None
611
- finitecounts = False
612
- hamming_distance_counts = None
613
-
614
- with open(filename, 'r') as f:
615
- for line in f:
616
- if (len(line) > 0 and line[0] != '#'):
617
-
618
- line = line.strip('\n')
619
- line = line.split(' ')
620
- l = int(line[0])
621
-
622
- if l not in success_counts:
623
- success_counts[l] = []
624
- for key in auxlabels:
625
- aux[key][l] = []
626
-
627
- success_counts[l].append(float(line[1]))
628
-
629
- if len(aux) > 0:
630
- assert(line[2] == '#'), "Auxillary data must be divided from the core data!"
631
- for i, key in enumerate(auxlabels):
632
- if key != 'target' and key != 'circuit':
633
- aux[key][l].append(_ast.literal_eval(line[3 + i]))
634
- else:
635
- if key == 'target':
636
- aux[key][l].append(line[3 + i])
637
- if key == 'circuit':
638
- aux[key][l].append(_cir.Circuit(line[3 + i]))
639
-
640
- elif datatype == 'hamming_distance_counts' or datatype == 'hamming_distance_probabilities':
641
-
642
- hamming_distance_counts = {}
643
- success_counts = None
644
- total_counts = None
645
-
646
- if datatype == 'hamming_distance_counts': finitecounts = True
647
- if datatype == 'hamming_distance_probabilities': finitecounts = False
648
-
649
- with open(filename, 'r') as f:
650
- for line in f:
651
- if (len(line) > 0 and line[0] != '#'):
652
-
653
- line = line.strip('\n')
654
- line = line.split(' ')
655
- l = int(line[0])
656
-
657
- if l not in hamming_distance_counts:
658
- hamming_distance_counts[l] = []
659
- for key in auxlabels:
660
- aux[key][l] = []
661
-
662
- hamming_distance_counts[l].append([float(line[1 + i]) for i in range(0, numqubits + 1)])
663
-
664
- if len(aux) > 0:
665
- assert(line[numqubits + 2] == '#'), "Auxillary data must be divided from the core data!"
666
- for i, key in enumerate(auxlabels):
667
- if key != 'target' and key != 'circuit':
668
- aux[key][l].append(_ast.literal_eval(line[numqubits + 3 + i]))
669
- else:
670
- if key == 'target':
671
- aux[key][l].append(line[numqubits + 3 + i])
672
- if key == 'circuit':
673
- aux[key][l].append(line[numqubits + 3 + i])
674
- #aux[key][l].append(_cir.Circuit(line[numqubits + 3 + i]))
675
- else:
676
- raise ValueError("The data format couldn't be extracted from the file!")
677
-
678
- rbdataset = _dataset.RBSummaryDataset(numqubits, success_counts=success_counts, total_counts=total_counts,
679
- hamming_distance_counts=hamming_distance_counts, aux=aux,
680
- finitecounts=finitecounts, descriptor=descriptor)
681
-
682
- if verbosity > 0:
683
- print('complete')
684
-
685
- return rbdataset
686
-
687
-
688
- def write_rb_summary_data_to_file(ds, filename):
689
- """
690
- todo
691
-
692
- """
693
- numqubits = ds.num_qubits
694
- with open(filename, 'w') as f:
695
-
696
- descriptor_string = ds.descriptor.split("\n")
697
-
698
- for s in descriptor_string:
699
- if len(s) > 0:
700
- f.write("# " + s + "\n")
701
-
702
- if ds.datatype == 'success_counts':
703
- if ds.finitecounts:
704
- topline = '## rblength success_counts total_counts'
705
- else:
706
- topline = '## rblength success_probabilities'
707
-
708
- elif ds.datatype == 'hamming_distance_counts':
709
- if ds.finitecounts:
710
- topline = '## rblength' + ''.join([' hd{}c'.format(i) for i in range(0, numqubits + 1)])
711
- else:
712
- topline = '## rblength' + ''.join([' hd{}p'.format(i) for i in range(0, numqubits + 1)])
713
-
714
- auxlabels = list(ds.aux.keys())
715
- if len(auxlabels) > 0:
716
- topline += ' #'
717
- for key in auxlabels: topline += ' ' + key
718
-
719
- f.write(topline + '\n')
720
-
721
- for l, counts in ds.counts.items():
722
-
723
- for i, c in enumerate(counts):
724
-
725
- if ds.datatype == 'success_counts':
726
- if ds.finitecounts:
727
- dataline = str(l) + ' ' + str(c) + ' ' + str(ds._total_counts[l][i])
728
- else:
729
- dataline = str(l) + ' ' + str(c)
730
- elif ds.datatype == 'hamming_distance_counts':
731
- dataline = str(l) + ''.join([' ' + str(c[i]) for i in range(0, numqubits + 1)])
732
-
733
- if len(auxlabels) > 0:
734
- dataline += ' #' + ''.join([' ' + str(ds.aux[key][l][i]) for key in auxlabels])
735
-
736
- f.write(dataline + '\n')
737
-
738
- return
739
-
740
-
741
- # # todo update this.
742
- # def import_rb_summary_data(filenames, numqubits, type='auto', verbosity=1):
743
- # """
744
- # todo : redo
745
- # Reads in one or more text files of summary RB data into a RBSummaryDataset object. This format
746
- # is appropriate for using the RB analysis functions. The datafile(s) should have one of the
747
- # following two formats:
748
-
749
- # Format 1 (`is_counts_data` is True):
750
-
751
- # # The number of qubits
752
- # The number of qubits (this line is optional if `num_qubits` is specified)
753
- # # RB length // Success counts // Total counts // Circuit depth // Circuit two-qubit gate count
754
- # Between 3 and 5 columns of data (the last two columns are expected only if `contains_circuit_data` is True).
755
-
756
- # Format 2 (`is_counts_data` is False):
757
-
758
- # # The number of qubits
759
- # The number of qubits (this line is optional if `num_qubits` is specified)
760
- # # RB length // Survival probabilities // Circuit depth // Circuit two-qubit gate count
761
- # Between 2 and 4 columns of data (the last two columns are expected only if `contains_circuit_data` is True).
762
-
763
- # Parameters
764
- # ----------
765
- # filenames : str or list.
766
- # The filename, or a list of filenams, where the data is stored. The data from all files is read
767
- # into a *single* dataset, so normally it should all be data for a single RB experiment.
768
-
769
- # is_counts_data : bool, optional
770
- # Whether the data to be read contains success counts data (True) or survival probability data (False).
771
-
772
- # contains_circuit_data : bool, optional.
773
- # Whether the data counts summary circuit data.
774
-
775
- # finitesampling : bool, optional
776
- # Records in the RBSummaryDataset whether the survival probability for each circuit was obtained
777
- # from finite sampling of the outcome probabilities. This is there to, by default, warn the user
778
- # that any finite sampling cannot be taken into account if the input is not counts data (when
779
- # they run any analysis on the data). But it is useful to be able to set this to False for simulated
780
- # data obtained from perfect outcome sampling.
781
-
782
- # num_qubits : int, optional.
783
- # The number of qubits the data is for. Must be specified if this isn't in the input file.
784
-
785
- # total_counts : int, optional
786
- # If the data is success probability data, the total counts can optional be input here.
787
-
788
- # verbosity : int, optional
789
- # The amount of print-to-screen.
790
-
791
- # Returns
792
- # -------
793
- # None
794
- # """
795
-
796
-
797
- # # todo : update this.
798
- # def write_rb_summary_data_to_file(RBSdataset, filename):
799
- # """
800
- # Writes an RBSSummaryDataset to file, in the format that can be read back in by
801
- # import_rb_summary_data().
802
-
803
- # Parameters
804
- # ----------
805
- # RBSdataset : RBSummaryDataset
806
- # The data to write to file.
807
-
808
- # filename : str
809
- # The filename where the dataset should be written.
810
-
811
- # Returns
812
- # -------
813
- # None
814
- # """