sinter 1.15.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sinter might be problematic. Click here for more details.

Files changed (62) hide show
  1. sinter/__init__.py +47 -0
  2. sinter/_collection/__init__.py +10 -0
  3. sinter/_collection/_collection.py +480 -0
  4. sinter/_collection/_collection_manager.py +581 -0
  5. sinter/_collection/_collection_manager_test.py +287 -0
  6. sinter/_collection/_collection_test.py +317 -0
  7. sinter/_collection/_collection_worker_loop.py +35 -0
  8. sinter/_collection/_collection_worker_state.py +259 -0
  9. sinter/_collection/_collection_worker_test.py +222 -0
  10. sinter/_collection/_mux_sampler.py +56 -0
  11. sinter/_collection/_printer.py +65 -0
  12. sinter/_collection/_sampler_ramp_throttled.py +66 -0
  13. sinter/_collection/_sampler_ramp_throttled_test.py +144 -0
  14. sinter/_command/__init__.py +0 -0
  15. sinter/_command/_main.py +39 -0
  16. sinter/_command/_main_collect.py +350 -0
  17. sinter/_command/_main_collect_test.py +482 -0
  18. sinter/_command/_main_combine.py +84 -0
  19. sinter/_command/_main_combine_test.py +153 -0
  20. sinter/_command/_main_plot.py +817 -0
  21. sinter/_command/_main_plot_test.py +445 -0
  22. sinter/_command/_main_predict.py +75 -0
  23. sinter/_command/_main_predict_test.py +36 -0
  24. sinter/_data/__init__.py +20 -0
  25. sinter/_data/_anon_task_stats.py +89 -0
  26. sinter/_data/_anon_task_stats_test.py +35 -0
  27. sinter/_data/_collection_options.py +106 -0
  28. sinter/_data/_collection_options_test.py +24 -0
  29. sinter/_data/_csv_out.py +74 -0
  30. sinter/_data/_existing_data.py +173 -0
  31. sinter/_data/_existing_data_test.py +41 -0
  32. sinter/_data/_task.py +311 -0
  33. sinter/_data/_task_stats.py +244 -0
  34. sinter/_data/_task_stats_test.py +140 -0
  35. sinter/_data/_task_test.py +38 -0
  36. sinter/_decoding/__init__.py +16 -0
  37. sinter/_decoding/_decoding.py +419 -0
  38. sinter/_decoding/_decoding_all_built_in_decoders.py +25 -0
  39. sinter/_decoding/_decoding_decoder_class.py +161 -0
  40. sinter/_decoding/_decoding_fusion_blossom.py +193 -0
  41. sinter/_decoding/_decoding_mwpf.py +302 -0
  42. sinter/_decoding/_decoding_pymatching.py +81 -0
  43. sinter/_decoding/_decoding_test.py +480 -0
  44. sinter/_decoding/_decoding_vacuous.py +38 -0
  45. sinter/_decoding/_perfectionist_sampler.py +38 -0
  46. sinter/_decoding/_sampler.py +72 -0
  47. sinter/_decoding/_stim_then_decode_sampler.py +222 -0
  48. sinter/_decoding/_stim_then_decode_sampler_test.py +192 -0
  49. sinter/_plotting.py +619 -0
  50. sinter/_plotting_test.py +108 -0
  51. sinter/_predict.py +381 -0
  52. sinter/_predict_test.py +227 -0
  53. sinter/_probability_util.py +519 -0
  54. sinter/_probability_util_test.py +281 -0
  55. sinter-1.15.0.data/data/README.md +332 -0
  56. sinter-1.15.0.data/data/readme_example_plot.png +0 -0
  57. sinter-1.15.0.data/data/requirements.txt +4 -0
  58. sinter-1.15.0.dist-info/METADATA +354 -0
  59. sinter-1.15.0.dist-info/RECORD +62 -0
  60. sinter-1.15.0.dist-info/WHEEL +5 -0
  61. sinter-1.15.0.dist-info/entry_points.txt +2 -0
  62. sinter-1.15.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,281 @@
1
+ import math
2
+ from typing import Union
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ import sinter
8
+ from sinter._probability_util import (
9
+ binary_search, log_binomial, log_factorial, fit_line_y_at_x, fit_line_slope,
10
+ binary_intercept, least_squares_through_point, fit_binomial, shot_error_rate_to_piece_error_rate,
11
+ )
12
+ from sinter._probability_util import comma_separated_key_values
13
+
14
+
15
+ @pytest.mark.parametrize(
16
+ "arg,result",
17
+ {
18
+ 0: 0,
19
+ 1: 0,
20
+ 2: math.log(2),
21
+ 3: math.log(2) + math.log(3),
22
+ # These values were taken from wolfram alpha:
23
+ 10: 15.1044125730755152952257093292510,
24
+ 100: 363.73937555556349014407999336965,
25
+ 1000: 5912.128178488163348878130886725,
26
+ 10000: 82108.9278368143534553850300635,
27
+ 100000: 1051299.2218991218651292781082,
28
+ }.items(),
29
+ )
30
+ def test_log_factorial(arg, result):
31
+ np.testing.assert_allclose(log_factorial(arg), result, rtol=1e-11)
32
+
33
+
34
+ @pytest.mark.parametrize(
35
+ "n,p,hits,result",
36
+ [
37
+ (1, 0.5, 0, np.log(0.5)),
38
+ (1, 0.5, 1, np.log(0.5)),
39
+ (1, 0.1, 0, np.log(0.9)),
40
+ (1, 0.1, 1, np.log(0.1)),
41
+ (2, [0, 1, 0.1, 0.5], 0, [0, -np.inf, np.log(0.9 ** 2), np.log(0.25)]),
42
+ (2, [0, 1, 0.1, 0.5], 1, [-np.inf, -np.inf, np.log(0.1 * 0.9 * 2), np.log(0.5)]),
43
+ (2, [0, 1, 0.1, 0.5], 2, [-np.inf, 0, np.log(0.1 ** 2), np.log(0.25)]),
44
+ # Magic number comes from PDF[BinomialDistribution[10^10, 10^-6], 10000] on wolfram alpha.
45
+ (10 ** 10, 10 ** -6, 10 ** 4, np.log(0.0039893915536591)),
46
+ # Corner cases.
47
+ (1, 0.0, 0, 0),
48
+ (1, 0.0, 1, -np.inf),
49
+ (1, 1.0, 0, -np.inf),
50
+ (1, 1.0, 1, 0),
51
+ # Array broadcast.
52
+ (2, np.array([0.0, 0.5, 1.0]), 0, np.array([0.0, np.log(0.25), -np.inf])),
53
+ (2, np.array([0.0, 0.5, 1.0]), 1, np.array([-np.inf, np.log(0.5), -np.inf])),
54
+ (2, np.array([0.0, 0.5, 1.0]), 2, np.array([-np.inf, np.log(0.25), 0.0])),
55
+ ],
56
+ )
57
+ def test_log_binomial(
58
+ n: int, p: Union[float, np.ndarray], hits: int, result: Union[float, np.ndarray]
59
+ ) -> None:
60
+ np.testing.assert_allclose(log_binomial(n=n, p=p, hits=hits), result, rtol=1e-2)
61
+
62
+
63
+ def test_binary_search():
64
+ assert binary_search(func=lambda x: x**2, min_x=0, max_x=10**100, target=100.1) == 10
65
+ assert binary_search(func=lambda x: x**2, min_x=0, max_x=10**100, target=100) == 10
66
+ assert binary_search(func=lambda x: x**2, min_x=0, max_x=10**100, target=99.9) == 10
67
+ assert binary_search(func=lambda x: x**2, min_x=0, max_x=10**100, target=90) == 9
68
+ assert binary_search(func=lambda x: x**2, min_x=0, max_x=10**100, target=92) == 10
69
+ assert binary_search(func=lambda x: x**2, min_x=0, max_x=10**100, target=-100) == 0
70
+ assert binary_search(func=lambda x: x**2, min_x=0, max_x=10**100, target=10**300) == 10**100
71
+
72
+
73
+ def test_least_squares_through_point():
74
+ fit = least_squares_through_point(
75
+ xs=np.array([1, 2, 3]),
76
+ ys=np.array([2, 3, 4]),
77
+ required_x=1,
78
+ required_y=2)
79
+ np.testing.assert_allclose(fit.slope, 1)
80
+ np.testing.assert_allclose(fit.intercept, 1)
81
+
82
+ fit = least_squares_through_point(
83
+ xs=np.array([1, 2, 3]),
84
+ ys=np.array([2, 3, 4]),
85
+ required_x=1,
86
+ required_y=1)
87
+ np.testing.assert_allclose(fit.slope, 1.6, rtol=1e-5)
88
+ np.testing.assert_allclose(fit.intercept, -0.6, atol=1e-5)
89
+
90
+
91
+ def test_binary_intercept():
92
+ t = binary_intercept(func=lambda x: x**2, start_x=5, step=1, target_y=82.3, atol=0.01)
93
+ assert t > 0 and abs(t**2 - 82.3) <= 0.01
94
+ t = binary_intercept(func=lambda x: -x**2, start_x=5, step=1, target_y=-82.3, atol=0.01)
95
+ assert t > 0 and abs(t**2 - 82.3) <= 0.01
96
+ t = binary_intercept(func=lambda x: x**2, start_x=0, step=-1, target_y=82.3, atol=0.01)
97
+ assert t < 0 and abs(t**2 - 82.3) <= 0.01
98
+ t = binary_intercept(func=lambda x: -x**2, start_x=0, step=-1, target_y=-82.3, atol=0.2)
99
+ assert t < 0 and abs(t**2 - 82.3) <= 0.2
100
+
101
+
102
+ def test_fit_y_at_x():
103
+ fit = fit_line_y_at_x(
104
+ xs=[1, 2, 3],
105
+ ys=[1, 5, 9],
106
+ target_x=100,
107
+ max_extra_squared_error=1,
108
+ )
109
+ assert 300 < fit.low < 390 < fit.best < 410 < fit.high < 500
110
+
111
+
112
+ def test_fit_slope():
113
+ fit = fit_line_slope(
114
+ xs=[1, 2, 3],
115
+ ys=[1, 5, 9],
116
+ max_extra_squared_error=1,
117
+ )
118
+ np.testing.assert_allclose(fit.best, 4)
119
+ assert 3 < fit.low < 3.5 < fit.best < 4.5 < fit.high < 5
120
+
121
+
122
+ def test_fit_binomial_shrink_towards_half():
123
+ with pytest.raises(ValueError, match='max_likelihood_factor'):
124
+ fit_binomial(num_shots=10 ** 5, num_hits=10 ** 5 / 2, max_likelihood_factor=0.1)
125
+
126
+ fit = fit_binomial(num_shots=10 ** 5, num_hits=10 ** 5 / 2, max_likelihood_factor=1e3)
127
+ np.testing.assert_allclose(
128
+ (fit.low, fit.best, fit.high),
129
+ (0.494122, 0.5, 0.505878),
130
+ rtol=1e-4,
131
+ )
132
+ fit = fit_binomial(num_shots=10 ** 4, num_hits=10 ** 4 / 2, max_likelihood_factor=1e3)
133
+ np.testing.assert_allclose(
134
+ (fit.low, fit.best, fit.high),
135
+ (0.481422, 0.5, 0.518578),
136
+ rtol=1e-4,
137
+ )
138
+ fit = fit_binomial(num_shots=10 ** 4, num_hits=10 ** 4 / 2, max_likelihood_factor=1e2)
139
+ np.testing.assert_allclose(
140
+ (fit.low, fit.best, fit.high),
141
+ (0.48483, 0.5, 0.51517),
142
+ rtol=1e-4,
143
+ )
144
+ fit = fit_binomial(num_shots=1000, num_hits=500, max_likelihood_factor=1e3)
145
+ np.testing.assert_allclose(
146
+ (fit.low, fit.best, fit.high),
147
+ (0.44143, 0.5, 0.55857),
148
+ rtol=1e-4,
149
+ )
150
+ fit = fit_binomial(num_shots=100, num_hits=50, max_likelihood_factor=1e3)
151
+ np.testing.assert_allclose(
152
+ (fit.low, fit.best, fit.high),
153
+ (0.3204, 0.5, 0.6796),
154
+ rtol=1e-4,
155
+ )
156
+
157
+
158
+ @pytest.mark.parametrize("n,c,factor", [
159
+ (100, 50, 1e1),
160
+ (100, 50, 1e2),
161
+ (100, 50, 1e3),
162
+ (1000, 500, 1e3),
163
+ (10**6, 100, 1e3),
164
+ (10**6, 100, 1e2),
165
+ ])
166
+ def test_fit_binomial_vs_log_binomial(n: int, c: int, factor: float):
167
+ fit = fit_binomial(num_shots=n, num_hits=n - c, max_likelihood_factor=factor)
168
+ a = fit.low
169
+ b = fit.high
170
+
171
+ raw = log_binomial(p=(n - c) / n, n=n, hits=n - c)
172
+ low = log_binomial(p=a, n=n, hits=n - c)
173
+ high = log_binomial(p=b, n=n, hits=n - c)
174
+
175
+ np.testing.assert_allclose(
176
+ fit.best,
177
+ (n - c) / n,
178
+ rtol=1e-4,
179
+ )
180
+
181
+ np.testing.assert_allclose(
182
+ np.exp(raw - low),
183
+ factor,
184
+ rtol=1e-2,
185
+ )
186
+ np.testing.assert_allclose(
187
+ np.exp(raw - high),
188
+ factor,
189
+ rtol=1e-2,
190
+ )
191
+
192
+
193
+ def test_comma_separated_key_values():
194
+ d = comma_separated_key_values("folder/a=2,b=3.0,c=test.stim")
195
+ assert d == {
196
+ 'a': 2,
197
+ 'b': 3.0,
198
+ 'c': 'test',
199
+ }
200
+ assert type(d['a']) == int
201
+ assert type(d['b']) == float
202
+ with pytest.raises(ValueError, match='separated'):
203
+ comma_separated_key_values("folder/a,b=3.0,c=test.stim")
204
+
205
+
206
+ def test_shot_error_rate_to_piece_error_rate():
207
+ np.testing.assert_allclose(
208
+ shot_error_rate_to_piece_error_rate(
209
+ shot_error_rate=0.2 * (1 - 0.2) * 2,
210
+ pieces=2,
211
+ ),
212
+ 0.2,
213
+ rtol=1e-5)
214
+
215
+ np.testing.assert_allclose(
216
+ shot_error_rate_to_piece_error_rate(
217
+ shot_error_rate=0.001 * (1 - 0.001) * 2,
218
+ pieces=2,
219
+ ),
220
+ 0.001,
221
+ rtol=1e-5)
222
+
223
+ np.testing.assert_allclose(
224
+ shot_error_rate_to_piece_error_rate(
225
+ shot_error_rate=0.001 * (1 - 0.001)**2 * 3 + 0.001**3,
226
+ pieces=3,
227
+ ),
228
+ 0.001,
229
+ rtol=1e-5)
230
+
231
+ # Extremely low error rates.
232
+ np.testing.assert_allclose(
233
+ shot_error_rate_to_piece_error_rate(
234
+ shot_error_rate=1e-100,
235
+ pieces=100,
236
+ ),
237
+ 1e-102,
238
+ rtol=1e-5)
239
+
240
+
241
+ def test_shot_error_rate_to_piece_error_rate_unions():
242
+ np.testing.assert_allclose(
243
+ shot_error_rate_to_piece_error_rate(
244
+ shot_error_rate=0.75,
245
+ pieces=1,
246
+ values=2,
247
+ ),
248
+ 0.75,
249
+ rtol=1e-5)
250
+
251
+ np.testing.assert_allclose(
252
+ shot_error_rate_to_piece_error_rate(
253
+ shot_error_rate=0.2,
254
+ pieces=1,
255
+ values=2,
256
+ ),
257
+ 0.2,
258
+ rtol=1e-5)
259
+
260
+ np.testing.assert_allclose(
261
+ shot_error_rate_to_piece_error_rate(
262
+ shot_error_rate=0.001,
263
+ pieces=1000000,
264
+ values=2,
265
+ ),
266
+ 0.001 / 1000000,
267
+ rtol=1e-2)
268
+
269
+ np.testing.assert_allclose(
270
+ shot_error_rate_to_piece_error_rate(
271
+ shot_error_rate=0.0975,
272
+ pieces=10,
273
+ values=2,
274
+ ),
275
+ 0.010453280306648605,
276
+ rtol=1e-5)
277
+
278
+
279
+ def test_fit_repr():
280
+ v = sinter.Fit(low=0.25, best=1, high=10)
281
+ assert eval(repr(v), {"sinter": sinter}) == v
@@ -0,0 +1,332 @@
1
+ # sinter: fast QEC sampling
2
+
3
+ Sinter is a software tool/library for doing fast monte carlo sampling of
4
+ quantum error correction circuits.
5
+
6
+ - [How it works](#how_it_works)
7
+ - [How to install](#how_to_install)
8
+ - [How to use: Python API](#how_to_use_python)
9
+ - [Sinter Python API Reference](doc/sinter_api.md)
10
+ - [How to use: Linux Command Line](#how_to_use_linux)
11
+ - [Sinter Command Line Reference](doc/sinter_command_line.md)
12
+ - [The csv format for sample statistics](#csv_format)
13
+
14
+ <a name="how_to_works"></a>
15
+ # How it works
16
+
17
+ Sinter takes Stim circuits annotated with noise, detectors, and logical
18
+ observables.
19
+ It uses stim to sample the circuits and a decoder such as pymatching to predict
20
+ whether the logical observables were flipped or not, given the detector data.
21
+ It records how often this succeeds, and how often it fails (the error rate).
22
+
23
+ Sinter uses python multiprocessing to do parallel sampling across multiple CPU
24
+ cores, dynamically decides which circuits need more samples based on parameters
25
+ specified by the user (such as a target number of errors), saves the results to
26
+ as simple CSV format, and has some basic plotting functionality for viewing the
27
+ results.
28
+
29
+ Sinter doesn't support cloud compute, but it does scale well on a single
30
+ machine.
31
+ I've tested it on 2 core machines, 4 core machines, and 96 core machines.
32
+ Although there are potential pitfalls (e.g. setting batch sizes too large causes
33
+ thrashing), sinter generally achieves good resource utilization of the processes
34
+ you assign to it.
35
+
36
+ <a name="how_to_install"></a>
37
+ # How to install
38
+
39
+ Sinter is available as a pypi package. It can be installed using pip:
40
+
41
+ ```
42
+ pip install sinter
43
+ ```
44
+
45
+ When you are in a python virtual environment with sinter installed, you have
46
+ access to a command line command `sinter` which can be used to perform tasks
47
+ from the command line. You can also `import sinter` in a python program in order
48
+ to use sinter's python API.
49
+
50
+ <a name="how_to_use_python"></a>
51
+ # How to use: Python API
52
+
53
+ This example assumes you are in a python environment with `sinter` and
54
+ `pymatching` installed.
55
+
56
+ ```python
57
+ import stim
58
+ import sinter
59
+ import matplotlib.pyplot as plt
60
+
61
+
62
+ # Generates surface code circuit tasks using Stim's circuit generation.
63
+ def generate_example_tasks():
64
+ for p in [0.001, 0.005, 0.01]:
65
+ for d in [3, 5]:
66
+ yield sinter.Task(
67
+ circuit=stim.Circuit.generated(
68
+ rounds=d,
69
+ distance=d,
70
+ after_clifford_depolarization=p,
71
+ code_task=f'surface_code:rotated_memory_x',
72
+ ),
73
+ json_metadata={
74
+ 'p': p,
75
+ 'd': d,
76
+ },
77
+ )
78
+
79
+
80
+ def main():
81
+ # Collect the samples (takes a few minutes).
82
+ samples = sinter.collect(
83
+ num_workers=4,
84
+ max_shots=1_000_000,
85
+ max_errors=1000,
86
+ tasks=generate_example_tasks(),
87
+ decoders=['pymatching'],
88
+ )
89
+
90
+ # Print samples as CSV data.
91
+ print(sinter.CSV_HEADER)
92
+ for sample in samples:
93
+ print(sample.to_csv_line())
94
+
95
+ # Render a matplotlib plot of the data.
96
+ fig, ax = plt.subplots(1, 1)
97
+ sinter.plot_error_rate(
98
+ ax=ax,
99
+ stats=samples,
100
+ group_func=lambda stat: f"Rotated Surface Code d={stat.json_metadata['d']}",
101
+ x_func=lambda stat: stat.json_metadata['p'],
102
+ )
103
+ ax.loglog()
104
+ ax.set_ylim(1e-5, 1)
105
+ ax.grid()
106
+ ax.set_title('Logical Error Rate vs Physical Error Rate')
107
+ ax.set_ylabel('Logical Error Probability (per shot)')
108
+ ax.set_xlabel('Physical Error Rate')
109
+ ax.legend()
110
+
111
+ # Save to file and also open in a window.
112
+ fig.savefig('plot.png')
113
+ plt.show()
114
+
115
+
116
+ # NOTE: This is actually necessary! If the code inside 'main()' was at the
117
+ # module level, the multiprocessing children spawned by sinter.collect would
118
+ # also attempt to run that code.
119
+ if __name__ == '__main__':
120
+ main()
121
+ ```
122
+
123
+ Example output to stdout:
124
+
125
+ ```
126
+ shots, errors, discards, seconds,decoder,strong_id,json_metadata
127
+ 1000000, 837, 0, 36.6,pymatching,9f7e20c54fec45b6aef7491b774dd5c0a3b9a005aa82faf5b9c051d6e40d60a9,"{""d"":3,""p"":0.001}"
128
+ 53498, 1099, 0, 6.52,pymatching,3f40432443a99b933fb548b831fb54e7e245d9d73a35c03ea5a2fb2ce270f8c8,"{""d"":3,""p"":0.005}"
129
+ 16269, 1023, 0, 3.23,pymatching,17b2e0c99560d20307204494ac50e31b33e50721b4ebae99d9e3577ae7248874,"{""d"":3,""p"":0.01}"
130
+ 1000000, 151, 0, 77.3,pymatching,e179a18739201250371ffaae0197d8fa19d26b58dfc2942f9f1c85568645387a,"{""d"":5,""p"":0.001}"
131
+ 11363, 1068, 0, 12.5,pymatching,a4dec28934a033215ff1389651a26114ecc22016a6e122008830cf7dd04ba5ad,"{""d"":5,""p"":0.01}"
132
+ 61569, 1001, 0, 24.5,pymatching,2fefcc356752482fb4c6d912c228f6d18762f5752796c668b6abeb7775f5de92,"{""d"":5,""p"":0.005}"
133
+ ```
134
+
135
+ and the corresponding image saved to `plot.png`:
136
+
137
+ ![Example plot](readme_example_plot.png)
138
+
139
+ ## python API utility methods
140
+
141
+ Sinter's python module exposes a variety of methods that are handy for plotting
142
+ or analyzing QEC data.
143
+ See the [sinter API reference](https://github.com/quantumlib/Stim/blob/main/doc/sinter_api.md).
144
+
145
+ <a name="how_to_use_linux"></a>
146
+ # How to use: Linux Command Line
147
+
148
+ This example assumes you are using a linux command line in a python virtualenv with `sinter` installed.
149
+
150
+ ## pick circuits
151
+
152
+ For this example, we will use Stim's circuit generation functionality to produce
153
+ circuits to benchmark.
154
+ We will make rotated surface code circuits with various physical error rates,
155
+ with filenames like `rotated_d5_p0.001_surface_code.stim`.
156
+
157
+ ```bash
158
+ mkdir -p circuits
159
+ python -c "
160
+
161
+ import stim
162
+
163
+ for p in [0.001, 0.005, 0.01]:
164
+ for d in [3, 5]:
165
+ with open(f'circuits/d={d},p={p},b=X,type=rotated_surface_memory.stim', 'w') as f:
166
+ c = stim.Circuit.generated(
167
+ rounds=d,
168
+ distance=d,
169
+ after_clifford_depolarization=p,
170
+ after_reset_flip_probability=p,
171
+ before_measure_flip_probability=p,
172
+ before_round_data_depolarization=p,
173
+ code_task=f'surface_code:rotated_memory_x')
174
+ print(c, file=f)
175
+ "
176
+ ```
177
+
178
+ Normally, making the circuit files is the hardest step, because they are what
179
+ specifies the problem you are sampling from.
180
+ Almost all of the work you do will generally involve creating the exact perfect
181
+ circuit file for your needs.
182
+ But this is just an example, so we'll use normal surface code circuits.
183
+
184
+ # collect
185
+
186
+ You can use sinter to collect statistics on each circuit by using the `sinter collect` command.
187
+ This command takes options specifying how much data to collect, how to do decoding, etc.
188
+
189
+ The `processes` argument decides how many workers to use. Set it to `auto` to set
190
+ it to the number of CPUs on your machine.
191
+
192
+ The `metadata_func` argument can be used to specify custom python expression that turns the `path`
193
+ into a dictionary or other JSON object associated with the circuit.
194
+ If you set `metadata_func` to `auto` then will use the method
195
+ `sinter.comma_separated_key_values(path)` which parses
196
+ stim circuit paths like `folder/a=2,b=test.stim` into a dictionary like `{'a': 2, 'b': 'test'}`.
197
+
198
+ By default, sinter writes the collected statistics to stdout as CSV data.
199
+ One particularly important option that changes this behavior is `--save_resume_filepath`,
200
+ which allows the command to be interrupted and restarted without losing data.
201
+ Any data already at the file specified by `--save_resume_filepath` will count towards the
202
+ amount of statistics asked to be collected, and sinter will append new statistics to this file
203
+ instead of overwriting it.
204
+
205
+ ```bash
206
+ sinter collect \
207
+ --processes auto \
208
+ --circuits circuits/*.stim \
209
+ --metadata_func auto \
210
+ --decoders pymatching \
211
+ --max_shots 1_000_000 \
212
+ --max_errors 1000 \
213
+ --save_resume_filepath stats.csv
214
+ ```
215
+
216
+ Beware that if you SIGKILL or SIGTEM sinter, instead of just using SIGINT, it's possible
217
+ (though unlikely) that you are killing it just as it writes a row of CSV data. This truncates
218
+ the data, which requires manual intervention on your part to fix (e.g. by deleting the partial row
219
+ using a text editor).
220
+
221
+ # combine
222
+
223
+ Note that the CSV data written by sinter will contain multiple rows for each case, because
224
+ sinter starts by running small batches to see roughly what the error rate is before moving
225
+ to larger batch sizes.
226
+
227
+ You can get a single-row-per-case CSV file by using `sinter combine`:
228
+
229
+ ```bash
230
+ sinter combine stats.csv
231
+ ```
232
+
233
+ ```
234
+ shots, errors, discards, seconds,decoder,strong_id,json_metadata
235
+ 58591, 1067, 0, 5.50,pymatching,bb46c8fca4d9fd9d4d27a5039686332ac5e24011a7f2aea5a65f6040445567c0,"{""b"":""X"",""d"":3,""p"":0.005,""type"":""rotated_surface_memory""}"
236
+ 1000000, 901, 0, 73.4,pymatching,4c0780830fe1747ab22767b69d1178f803943c83dd4afa6d241acf02e6dfa71f,"{""b"":""X"",""d"":3,""p"":0.001,""type"":""rotated_surface_memory""}"
237
+ 16315, 1026, 0, 2.39,pymatching,64d81b177ef1a455644ac3e03f374394cd8ad385ba2ee0ac147b2405107564fc,"{""b"":""X"",""d"":3,""p"":0.01,""type"":""rotated_surface_memory""}"
238
+ 1000000, 157, 0, 116.5,pymatching,100855c078af0936d098cecbd8bfb7591c0951ae69527c002c9c5f4c79bde129,"{""b"":""X"",""d"":5,""p"":0.001,""type"":""rotated_surface_memory""}"
239
+ 61677, 1005, 0, 21.2,pymatching,6d7b8b312a5460c7fe08119d3c7a040daa25bd34d524611160e4aac6196293fe,"{""b"":""X"",""d"":5,""p"":0.005,""type"":""rotated_surface_memory""}"
240
+ 10891, 1021, 0, 7.43,pymatching,477252e968f0f22f64ccb058c0e1e9c77b765f60f74df8b6707de7ec65ed13b7,"{""b"":""X"",""d"":5,""p"":0.01,""type"":""rotated_surface_memory""}"
241
+ ```
242
+
243
+ # plot
244
+
245
+ You can use `sinter plot` to view the results you've collected.
246
+ This command takes a CSV file, an argument `--group_func` indicating how to
247
+ group the statistics into curves, an argument `--x_func` indicating how to
248
+ pick the X coordinate of each point, and various other arguments. Each `*_func`
249
+ argument takes a string that will be evaluated as a python expression, with
250
+ various useful values in scope such as a `metadata` value containing the
251
+ json metadata for the various points being evaluated. There is also a special
252
+ `m` value where `m.key` is shorthand for `metadata.get('key', None)`.
253
+
254
+ Here is an example of a `sinter plot` command:
255
+
256
+ ```bash
257
+ sinter plot \
258
+ --in stats.csv \
259
+ --group_func "f'''Rotated Surface Code d={m.d}'''" \
260
+ --x_func m.p \
261
+ --xaxis "[log]Physical Error Rate" \
262
+ --fig_size 1024 1024 \
263
+ --out surface_code_figure.png \
264
+ --show
265
+ ```
266
+
267
+ Which will save a png image of, and also open a window showing, a plot like this one:
268
+
269
+ ![Example plot](readme_example_plot.png)
270
+
271
+ <a name="csv_format"></a>
272
+ # The csv format for sample statistics
273
+
274
+ Sinter saves samples as a table using a Comma Separated Value format.
275
+ For example:
276
+
277
+ ```
278
+ shots,errors,discards,seconds,decoder,strong_id,json_metadata
279
+ 1000000, 837, 0, 36.6,pymatching,9f7e20c54fec45b6aef7491b774dd5c0a3b9a005aa82faf5b9c051d6e40d60a9,"{""d"":3,""p"":0.001}"
280
+ 53498, 1099, 0, 6.52,pymatching,3f40432443a99b933fb548b831fb54e7e245d9d73a35c03ea5a2fb2ce270f8c8,"{""d"":3,""p"":0.005}"
281
+ 16269, 1023, 0, 3.23,pymatching,17b2e0c99560d20307204494ac50e31b33e50721b4ebae99d9e3577ae7248874,"{""d"":3,""p"":0.01}"
282
+ 1000000, 151, 0, 77.3,pymatching,e179a18739201250371ffaae0197d8fa19d26b58dfc2942f9f1c85568645387a,"{""d"":5,""p"":0.001}"
283
+ 11363, 1068, 0, 12.5,pymatching,a4dec28934a033215ff1389651a26114ecc22016a6e122008830cf7dd04ba5ad,"{""d"":5,""p"":0.01}"
284
+ 61569, 1001, 0, 24.5,pymatching,2fefcc356752482fb4c6d912c228f6d18762f5752796c668b6abeb7775f5de92,"{""d"":5,""p"":0.005}"
285
+ ```
286
+
287
+ The columns are:
288
+
289
+ - `shots` (unsigned int): How many times the circuit was sampled.
290
+ - `errors` (unsigned int): How many times the decoder failed to predict any logical observable.
291
+ - `discards` (unsigned int): How many times a shot was discarded because a postselected detector fired or because the decoder incorrectly predicted the value of a postselected observable. Discarded shots never count as errors.
292
+ - `seconds` (non-negative float): How many CPU core seconds it took to simulate and decode these shots.
293
+ - `decoder` (str): Which decoder was used.
294
+ - `strong_id` (str):
295
+ Hex representation of a cryptographic hash of the problem
296
+ being sampled from.
297
+ The hashed data includes the exact circuit that was simulated,
298
+ the decoder that was used,
299
+ the exact detector error model that was given to the decoder,
300
+ the postselection rules that were applied,
301
+ and the metadata associated with the circuit.
302
+ The purpose of the strong id is to make it impossible to accidentally combine
303
+ shots that were from separate circuits or separate versions of a circuit.
304
+ - `json_metadata` (json): A free form field that can store any value representable in
305
+ [Java Script Object Notation](https://json.org). For example, this could be a
306
+ dictionary with helpful keys like "noise_level" or "circuit_name". The json
307
+ value is serialized into JSON and then escaped so that it can be put into the
308
+ CSV data (e.g. quotes get doubled up).
309
+ - `custom_counts` (json[Dict[str, int]]): An optional field that can store a
310
+ dictionary from string keys to integer counts represented in
311
+ [Java Script Object Notation](https://json.org).
312
+ The counts can be a huge variety of things, ranging from per-observable error
313
+ counts to detection event counts. In general, any value that should be added
314
+ when merging rows could be in these counters.
315
+
316
+ Note shots may be spread across multiple rows.
317
+ For example, this data:
318
+
319
+ ```
320
+ shots,errors,discards,seconds,decoder,strong_id,json_metadata
321
+ 500000, 437, 0, 20.5,pymatching,9f7e20c54fec45b6aef7491b774dd5c0a3b9a005aa82faf5b9c051d6e40d60a9,"{""d"":3,""p"":0.001}"
322
+ 500000, 400, 0, 16.1,pymatching,9f7e20c54fec45b6aef7491b774dd5c0a3b9a005aa82faf5b9c051d6e40d60a9,"{""d"":3,""p"":0.001}"
323
+ ```
324
+
325
+ has the same total statistics as this data:
326
+
327
+ ```
328
+ shots,errors,discards,seconds,decoder,strong_id,json_metadata
329
+ 1000000, 837, 0, 36.6,pymatching,9f7e20c54fec45b6aef7491b774dd5c0a3b9a005aa82faf5b9c051d6e40d60a9,"{""d"":3,""p"":0.001}"
330
+ ```
331
+
332
+ just split over two rows instead of combined into one.
@@ -0,0 +1,4 @@
1
+ matplotlib
2
+ numpy
3
+ stim
4
+ scipy