sinter 1.15.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sinter might be problematic. Click here for more details.
- sinter/__init__.py +47 -0
- sinter/_collection/__init__.py +10 -0
- sinter/_collection/_collection.py +480 -0
- sinter/_collection/_collection_manager.py +581 -0
- sinter/_collection/_collection_manager_test.py +287 -0
- sinter/_collection/_collection_test.py +317 -0
- sinter/_collection/_collection_worker_loop.py +35 -0
- sinter/_collection/_collection_worker_state.py +259 -0
- sinter/_collection/_collection_worker_test.py +222 -0
- sinter/_collection/_mux_sampler.py +56 -0
- sinter/_collection/_printer.py +65 -0
- sinter/_collection/_sampler_ramp_throttled.py +66 -0
- sinter/_collection/_sampler_ramp_throttled_test.py +144 -0
- sinter/_command/__init__.py +0 -0
- sinter/_command/_main.py +39 -0
- sinter/_command/_main_collect.py +350 -0
- sinter/_command/_main_collect_test.py +482 -0
- sinter/_command/_main_combine.py +84 -0
- sinter/_command/_main_combine_test.py +153 -0
- sinter/_command/_main_plot.py +817 -0
- sinter/_command/_main_plot_test.py +445 -0
- sinter/_command/_main_predict.py +75 -0
- sinter/_command/_main_predict_test.py +36 -0
- sinter/_data/__init__.py +20 -0
- sinter/_data/_anon_task_stats.py +89 -0
- sinter/_data/_anon_task_stats_test.py +35 -0
- sinter/_data/_collection_options.py +106 -0
- sinter/_data/_collection_options_test.py +24 -0
- sinter/_data/_csv_out.py +74 -0
- sinter/_data/_existing_data.py +173 -0
- sinter/_data/_existing_data_test.py +41 -0
- sinter/_data/_task.py +311 -0
- sinter/_data/_task_stats.py +244 -0
- sinter/_data/_task_stats_test.py +140 -0
- sinter/_data/_task_test.py +38 -0
- sinter/_decoding/__init__.py +16 -0
- sinter/_decoding/_decoding.py +419 -0
- sinter/_decoding/_decoding_all_built_in_decoders.py +25 -0
- sinter/_decoding/_decoding_decoder_class.py +161 -0
- sinter/_decoding/_decoding_fusion_blossom.py +193 -0
- sinter/_decoding/_decoding_mwpf.py +302 -0
- sinter/_decoding/_decoding_pymatching.py +81 -0
- sinter/_decoding/_decoding_test.py +480 -0
- sinter/_decoding/_decoding_vacuous.py +38 -0
- sinter/_decoding/_perfectionist_sampler.py +38 -0
- sinter/_decoding/_sampler.py +72 -0
- sinter/_decoding/_stim_then_decode_sampler.py +222 -0
- sinter/_decoding/_stim_then_decode_sampler_test.py +192 -0
- sinter/_plotting.py +619 -0
- sinter/_plotting_test.py +108 -0
- sinter/_predict.py +381 -0
- sinter/_predict_test.py +227 -0
- sinter/_probability_util.py +519 -0
- sinter/_probability_util_test.py +281 -0
- sinter-1.15.0.data/data/README.md +332 -0
- sinter-1.15.0.data/data/readme_example_plot.png +0 -0
- sinter-1.15.0.data/data/requirements.txt +4 -0
- sinter-1.15.0.dist-info/METADATA +354 -0
- sinter-1.15.0.dist-info/RECORD +62 -0
- sinter-1.15.0.dist-info/WHEEL +5 -0
- sinter-1.15.0.dist-info/entry_points.txt +2 -0
- sinter-1.15.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
import multiprocessing
|
|
2
|
+
import time
|
|
3
|
+
from typing import Any, List, Union
|
|
4
|
+
|
|
5
|
+
import sinter
|
|
6
|
+
import stim
|
|
7
|
+
|
|
8
|
+
from sinter._collection._collection_manager import CollectionManager
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _assert_drain_queue(q: multiprocessing.Queue, expected_contents: List[Any]):
|
|
12
|
+
for v in expected_contents:
|
|
13
|
+
assert q.get(timeout=0.1) == v
|
|
14
|
+
if not q.empty():
|
|
15
|
+
assert False, f'queue had another item: {q.get()=}'
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _put_wait_not_empty(q: Union[multiprocessing.Queue, multiprocessing.SimpleQueue], item: Any):
|
|
19
|
+
q.put(item)
|
|
20
|
+
while q.empty():
|
|
21
|
+
time.sleep(0.0001)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def test_manager():
|
|
25
|
+
log = []
|
|
26
|
+
t0 = sinter.Task(
|
|
27
|
+
circuit=stim.Circuit('H 0'),
|
|
28
|
+
detector_error_model=stim.DetectorErrorModel(),
|
|
29
|
+
decoder='fusion_blossom',
|
|
30
|
+
collection_options=sinter.CollectionOptions(max_shots=100_000_000, max_errors=100),
|
|
31
|
+
json_metadata={'a': 3},
|
|
32
|
+
)
|
|
33
|
+
t1 = sinter.Task(
|
|
34
|
+
circuit=stim.Circuit('M 0'),
|
|
35
|
+
detector_error_model=stim.DetectorErrorModel(),
|
|
36
|
+
decoder='pymatching',
|
|
37
|
+
collection_options=sinter.CollectionOptions(max_shots=10_000_000),
|
|
38
|
+
json_metadata=None,
|
|
39
|
+
)
|
|
40
|
+
manager = CollectionManager(
|
|
41
|
+
num_workers=3,
|
|
42
|
+
worker_flush_period=30,
|
|
43
|
+
tasks=[t0, t1],
|
|
44
|
+
progress_callback=log.append,
|
|
45
|
+
existing_data={},
|
|
46
|
+
collection_options=sinter.CollectionOptions(),
|
|
47
|
+
custom_decoders={},
|
|
48
|
+
allowed_cpu_affinity_ids=None,
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
assert manager.state_summary() == """
|
|
52
|
+
worker 0: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=None
|
|
53
|
+
worker 1: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=None
|
|
54
|
+
worker 2: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=None
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
manager.start_workers(actually_start_worker_processes=False)
|
|
58
|
+
manager.shared_worker_output_queue.put(('computed_strong_id', 2, 'c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa'))
|
|
59
|
+
manager.shared_worker_output_queue.put(('computed_strong_id', 1, 'a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604'))
|
|
60
|
+
manager.start_distributing_work()
|
|
61
|
+
|
|
62
|
+
assert manager.state_summary() == """
|
|
63
|
+
worker 0: asked_to_drop_shots=0 assigned_shots=100000000 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
64
|
+
worker 1: asked_to_drop_shots=0 assigned_shots=5000000 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
65
|
+
worker 2: asked_to_drop_shots=0 assigned_shots=5000000 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
66
|
+
task task.strong_id='a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604':
|
|
67
|
+
workers_assigned=[0]
|
|
68
|
+
shot_return_requests=0
|
|
69
|
+
shots_left=100000000
|
|
70
|
+
errors_left=100
|
|
71
|
+
shots_unassigned=0
|
|
72
|
+
task task.strong_id='c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa':
|
|
73
|
+
workers_assigned=[1, 2]
|
|
74
|
+
shot_return_requests=0
|
|
75
|
+
shots_left=10000000
|
|
76
|
+
errors_left=10000000
|
|
77
|
+
shots_unassigned=0
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
_assert_drain_queue(manager.worker_states[0].input_queue, [
|
|
81
|
+
(
|
|
82
|
+
'change_job',
|
|
83
|
+
(t0, sinter.CollectionOptions(max_errors=100), 100),
|
|
84
|
+
),
|
|
85
|
+
(
|
|
86
|
+
'accept_shots',
|
|
87
|
+
(t0.strong_id(), 100_000_000),
|
|
88
|
+
),
|
|
89
|
+
])
|
|
90
|
+
_assert_drain_queue(manager.worker_states[1].input_queue, [
|
|
91
|
+
('compute_strong_id', t0),
|
|
92
|
+
(
|
|
93
|
+
'change_job',
|
|
94
|
+
(t1, sinter.CollectionOptions(max_errors=10000000), 10000000),
|
|
95
|
+
),
|
|
96
|
+
(
|
|
97
|
+
'accept_shots',
|
|
98
|
+
(t1.strong_id(), 5_000_000),
|
|
99
|
+
),
|
|
100
|
+
])
|
|
101
|
+
_assert_drain_queue(manager.worker_states[2].input_queue, [
|
|
102
|
+
('compute_strong_id', t1),
|
|
103
|
+
(
|
|
104
|
+
'change_job',
|
|
105
|
+
(t1, sinter.CollectionOptions(max_errors=10000000), 10000000),
|
|
106
|
+
),
|
|
107
|
+
(
|
|
108
|
+
'accept_shots',
|
|
109
|
+
(t1.strong_id(), 5_000_000),
|
|
110
|
+
),
|
|
111
|
+
])
|
|
112
|
+
|
|
113
|
+
assert manager.shared_worker_output_queue.empty()
|
|
114
|
+
assert log.pop() is None
|
|
115
|
+
assert log.pop() is None
|
|
116
|
+
assert not log
|
|
117
|
+
_put_wait_not_empty(manager.shared_worker_output_queue, (
|
|
118
|
+
'flushed_results',
|
|
119
|
+
2,
|
|
120
|
+
(t1.strong_id(), sinter.AnonTaskStats(
|
|
121
|
+
shots=5_000_000,
|
|
122
|
+
errors=123,
|
|
123
|
+
discards=0,
|
|
124
|
+
seconds=1,
|
|
125
|
+
)),
|
|
126
|
+
))
|
|
127
|
+
|
|
128
|
+
assert manager.process_message()
|
|
129
|
+
assert manager.state_summary() == """
|
|
130
|
+
worker 0: asked_to_drop_shots=0 assigned_shots=100000000 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
131
|
+
worker 1: asked_to_drop_shots=2500000 assigned_shots=5000000 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
132
|
+
worker 2: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
133
|
+
task task.strong_id='a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604':
|
|
134
|
+
workers_assigned=[0]
|
|
135
|
+
shot_return_requests=0
|
|
136
|
+
shots_left=100000000
|
|
137
|
+
errors_left=100
|
|
138
|
+
shots_unassigned=0
|
|
139
|
+
task task.strong_id='c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa':
|
|
140
|
+
workers_assigned=[1, 2]
|
|
141
|
+
shot_return_requests=1
|
|
142
|
+
shots_left=5000000
|
|
143
|
+
errors_left=9999877
|
|
144
|
+
shots_unassigned=0
|
|
145
|
+
"""
|
|
146
|
+
|
|
147
|
+
assert log.pop() == sinter.TaskStats(
|
|
148
|
+
strong_id=t1.strong_id(),
|
|
149
|
+
decoder=t1.decoder,
|
|
150
|
+
json_metadata=t1.json_metadata,
|
|
151
|
+
shots=5_000_000,
|
|
152
|
+
errors=123,
|
|
153
|
+
discards=0,
|
|
154
|
+
seconds=1,
|
|
155
|
+
)
|
|
156
|
+
assert not log
|
|
157
|
+
|
|
158
|
+
_assert_drain_queue(manager.worker_states[0].input_queue, [])
|
|
159
|
+
_assert_drain_queue(manager.worker_states[1].input_queue, [
|
|
160
|
+
(
|
|
161
|
+
'return_shots',
|
|
162
|
+
(t1.strong_id(), 2_500_000),
|
|
163
|
+
),
|
|
164
|
+
])
|
|
165
|
+
_assert_drain_queue(manager.worker_states[2].input_queue, [])
|
|
166
|
+
|
|
167
|
+
_put_wait_not_empty(manager.shared_worker_output_queue, (
|
|
168
|
+
'returned_shots',
|
|
169
|
+
1,
|
|
170
|
+
(t1.strong_id(), 2_000_000),
|
|
171
|
+
))
|
|
172
|
+
assert manager.process_message()
|
|
173
|
+
assert manager.state_summary() == """
|
|
174
|
+
worker 0: asked_to_drop_shots=0 assigned_shots=100000000 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
175
|
+
worker 1: asked_to_drop_shots=0 assigned_shots=3000000 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
176
|
+
worker 2: asked_to_drop_shots=0 assigned_shots=2000000 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
177
|
+
task task.strong_id='a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604':
|
|
178
|
+
workers_assigned=[0]
|
|
179
|
+
shot_return_requests=0
|
|
180
|
+
shots_left=100000000
|
|
181
|
+
errors_left=100
|
|
182
|
+
shots_unassigned=0
|
|
183
|
+
task task.strong_id='c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa':
|
|
184
|
+
workers_assigned=[1, 2]
|
|
185
|
+
shot_return_requests=0
|
|
186
|
+
shots_left=5000000
|
|
187
|
+
errors_left=9999877
|
|
188
|
+
shots_unassigned=0
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
_assert_drain_queue(manager.worker_states[0].input_queue, [])
|
|
192
|
+
_assert_drain_queue(manager.worker_states[1].input_queue, [])
|
|
193
|
+
_assert_drain_queue(manager.worker_states[2].input_queue, [
|
|
194
|
+
(
|
|
195
|
+
'accept_shots',
|
|
196
|
+
(t1.strong_id(), 2_000_000),
|
|
197
|
+
),
|
|
198
|
+
])
|
|
199
|
+
|
|
200
|
+
_put_wait_not_empty(manager.shared_worker_output_queue, (
|
|
201
|
+
'flushed_results',
|
|
202
|
+
1,
|
|
203
|
+
(t1.strong_id(), sinter.AnonTaskStats(
|
|
204
|
+
shots=3_000_000,
|
|
205
|
+
errors=444,
|
|
206
|
+
discards=1,
|
|
207
|
+
seconds=2,
|
|
208
|
+
))
|
|
209
|
+
))
|
|
210
|
+
assert manager.process_message()
|
|
211
|
+
assert manager.state_summary() == """
|
|
212
|
+
worker 0: asked_to_drop_shots=0 assigned_shots=100000000 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
213
|
+
worker 1: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
214
|
+
worker 2: asked_to_drop_shots=1000000 assigned_shots=2000000 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
215
|
+
task task.strong_id='a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604':
|
|
216
|
+
workers_assigned=[0]
|
|
217
|
+
shot_return_requests=0
|
|
218
|
+
shots_left=100000000
|
|
219
|
+
errors_left=100
|
|
220
|
+
shots_unassigned=0
|
|
221
|
+
task task.strong_id='c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa':
|
|
222
|
+
workers_assigned=[1, 2]
|
|
223
|
+
shot_return_requests=1
|
|
224
|
+
shots_left=2000000
|
|
225
|
+
errors_left=9999433
|
|
226
|
+
shots_unassigned=0
|
|
227
|
+
"""
|
|
228
|
+
|
|
229
|
+
_put_wait_not_empty(manager.shared_worker_output_queue, (
|
|
230
|
+
'flushed_results',
|
|
231
|
+
2,
|
|
232
|
+
(t1.strong_id(), sinter.AnonTaskStats(
|
|
233
|
+
shots=2_000_000,
|
|
234
|
+
errors=555,
|
|
235
|
+
discards=2,
|
|
236
|
+
seconds=2.5,
|
|
237
|
+
))
|
|
238
|
+
))
|
|
239
|
+
assert manager.process_message()
|
|
240
|
+
assert manager.state_summary() == """
|
|
241
|
+
worker 0: asked_to_drop_shots=0 assigned_shots=100000000 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
242
|
+
worker 1: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
243
|
+
worker 2: asked_to_drop_shots=1000000 assigned_shots=0 assigned_work_key=c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa
|
|
244
|
+
task task.strong_id='a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604':
|
|
245
|
+
workers_assigned=[0]
|
|
246
|
+
shot_return_requests=0
|
|
247
|
+
shots_left=100000000
|
|
248
|
+
errors_left=100
|
|
249
|
+
shots_unassigned=0
|
|
250
|
+
task task.strong_id='c03f7852e4579e2a99cefac80eeb6b09556907540ab3d7787a3d07309c3333aa':
|
|
251
|
+
workers_assigned=[1, 2]
|
|
252
|
+
shot_return_requests=1
|
|
253
|
+
shots_left=0
|
|
254
|
+
errors_left=9998878
|
|
255
|
+
shots_unassigned=0
|
|
256
|
+
"""
|
|
257
|
+
|
|
258
|
+
assert manager.shared_worker_output_queue.empty()
|
|
259
|
+
_put_wait_not_empty(manager.shared_worker_output_queue, (
|
|
260
|
+
'returned_shots',
|
|
261
|
+
2,
|
|
262
|
+
(t1.strong_id(), 0)
|
|
263
|
+
))
|
|
264
|
+
assert manager.process_message()
|
|
265
|
+
assert manager.shared_worker_output_queue.empty()
|
|
266
|
+
assert manager.state_summary() == """
|
|
267
|
+
worker 0: asked_to_drop_shots=66666666 assigned_shots=100000000 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
268
|
+
worker 1: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
269
|
+
worker 2: asked_to_drop_shots=0 assigned_shots=0 assigned_work_key=a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604
|
|
270
|
+
task task.strong_id='a9165b6e4ab1053c04c017d0739a7bfff0910d62091fc9ee81716833eda7f604':
|
|
271
|
+
workers_assigned=[0, 1, 2]
|
|
272
|
+
shot_return_requests=1
|
|
273
|
+
shots_left=100000000
|
|
274
|
+
errors_left=100
|
|
275
|
+
shots_unassigned=0
|
|
276
|
+
"""
|
|
277
|
+
|
|
278
|
+
_assert_drain_queue(manager.worker_states[0].input_queue, [
|
|
279
|
+
('return_shots', (t0.strong_id(), 66666666)),
|
|
280
|
+
])
|
|
281
|
+
_assert_drain_queue(manager.worker_states[1].input_queue, [
|
|
282
|
+
('change_job', (t0, sinter.CollectionOptions(max_errors=100), 100)),
|
|
283
|
+
])
|
|
284
|
+
_assert_drain_queue(manager.worker_states[2].input_queue, [
|
|
285
|
+
('return_shots', (t1.strong_id(), 1000000)),
|
|
286
|
+
('change_job', (t0, sinter.CollectionOptions(max_errors=100), 100)),
|
|
287
|
+
])
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
import collections
|
|
2
|
+
import multiprocessing
|
|
3
|
+
import pathlib
|
|
4
|
+
import tempfile
|
|
5
|
+
import time
|
|
6
|
+
|
|
7
|
+
import pytest
|
|
8
|
+
|
|
9
|
+
import sinter
|
|
10
|
+
import stim
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def test_iter_collect():
|
|
14
|
+
result = collections.defaultdict(sinter.AnonTaskStats)
|
|
15
|
+
for sample in sinter.iter_collect(
|
|
16
|
+
num_workers=2,
|
|
17
|
+
tasks=[
|
|
18
|
+
sinter.Task(
|
|
19
|
+
circuit=stim.Circuit.generated(
|
|
20
|
+
'repetition_code:memory',
|
|
21
|
+
rounds=3,
|
|
22
|
+
distance=3,
|
|
23
|
+
after_clifford_depolarization=p),
|
|
24
|
+
decoder='pymatching',
|
|
25
|
+
json_metadata={'p': p},
|
|
26
|
+
collection_options=sinter.CollectionOptions(
|
|
27
|
+
max_shots=1000,
|
|
28
|
+
max_errors=100,
|
|
29
|
+
start_batch_size=100,
|
|
30
|
+
max_batch_size=1000,
|
|
31
|
+
),
|
|
32
|
+
)
|
|
33
|
+
for p in [0.01, 0.02, 0.03, 0.04]
|
|
34
|
+
],
|
|
35
|
+
):
|
|
36
|
+
for stats in sample.new_stats:
|
|
37
|
+
result[stats.json_metadata['p']] += stats.to_anon_stats()
|
|
38
|
+
assert len(result) == 4
|
|
39
|
+
for k, v in result.items():
|
|
40
|
+
assert v.shots >= 1000 or v.errors >= 100
|
|
41
|
+
assert v.discards == 0
|
|
42
|
+
assert result[0.01].errors <= 10
|
|
43
|
+
assert result[0.02].errors <= 30
|
|
44
|
+
assert result[0.03].errors <= 70
|
|
45
|
+
assert 1 <= result[0.04].errors <= 100
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def test_collect():
|
|
49
|
+
results = sinter.collect(
|
|
50
|
+
num_workers=2,
|
|
51
|
+
tasks=[
|
|
52
|
+
sinter.Task(
|
|
53
|
+
circuit=stim.Circuit.generated(
|
|
54
|
+
'repetition_code:memory',
|
|
55
|
+
rounds=3,
|
|
56
|
+
distance=3,
|
|
57
|
+
after_clifford_depolarization=p),
|
|
58
|
+
decoder='pymatching',
|
|
59
|
+
json_metadata={'p': p},
|
|
60
|
+
collection_options=sinter.CollectionOptions(
|
|
61
|
+
max_shots=1000,
|
|
62
|
+
max_errors=100,
|
|
63
|
+
start_batch_size=100,
|
|
64
|
+
max_batch_size=1000,
|
|
65
|
+
),
|
|
66
|
+
)
|
|
67
|
+
for p in [0.01, 0.02, 0.03, 0.04]
|
|
68
|
+
]
|
|
69
|
+
)
|
|
70
|
+
probabilities = [e.json_metadata['p'] for e in results]
|
|
71
|
+
assert len(probabilities) == len(set(probabilities))
|
|
72
|
+
d = {e.json_metadata['p']: e for e in results}
|
|
73
|
+
assert len(d) == 4
|
|
74
|
+
for k, v in d.items():
|
|
75
|
+
assert v.shots >= 1000 or v.errors >= 100
|
|
76
|
+
assert v.discards == 0
|
|
77
|
+
assert d[0.01].errors <= 10
|
|
78
|
+
assert d[0.02].errors <= 30
|
|
79
|
+
assert d[0.03].errors <= 70
|
|
80
|
+
assert 1 <= d[0.04].errors <= 100
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def test_collect_from_paths():
|
|
84
|
+
with tempfile.TemporaryDirectory() as d:
|
|
85
|
+
d = pathlib.Path(d)
|
|
86
|
+
tasks = []
|
|
87
|
+
for p in [0.01, 0.02, 0.03, 0.04]:
|
|
88
|
+
path = d / f'tmp{p}.stim'
|
|
89
|
+
stim.Circuit.generated(
|
|
90
|
+
'repetition_code:memory',
|
|
91
|
+
rounds=3,
|
|
92
|
+
distance=3,
|
|
93
|
+
after_clifford_depolarization=p,
|
|
94
|
+
).to_file(path)
|
|
95
|
+
tasks.append(sinter.Task(
|
|
96
|
+
circuit_path=path,
|
|
97
|
+
decoder='pymatching',
|
|
98
|
+
json_metadata={'p': p},
|
|
99
|
+
collection_options=sinter.CollectionOptions(
|
|
100
|
+
max_shots=1000,
|
|
101
|
+
max_errors=100,
|
|
102
|
+
start_batch_size=100,
|
|
103
|
+
max_batch_size=1000,
|
|
104
|
+
),
|
|
105
|
+
))
|
|
106
|
+
|
|
107
|
+
results = sinter.collect(
|
|
108
|
+
num_workers=2,
|
|
109
|
+
tasks=tasks
|
|
110
|
+
)
|
|
111
|
+
probabilities = [e.json_metadata['p'] for e in results]
|
|
112
|
+
assert len(probabilities) == len(set(probabilities))
|
|
113
|
+
d = {e.json_metadata['p']: e for e in results}
|
|
114
|
+
assert len(d) == 4
|
|
115
|
+
for k, v in d.items():
|
|
116
|
+
assert v.shots >= 1000 or v.errors >= 100
|
|
117
|
+
assert v.discards == 0
|
|
118
|
+
assert d[0.01].errors <= 10
|
|
119
|
+
assert d[0.02].errors <= 30
|
|
120
|
+
assert d[0.03].errors <= 70
|
|
121
|
+
assert 1 <= d[0.04].errors <= 100
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
class AlternatingPredictionsDecoder(sinter.Decoder):
|
|
125
|
+
def decode_via_files(self,
|
|
126
|
+
*,
|
|
127
|
+
num_shots: int,
|
|
128
|
+
num_dets: int,
|
|
129
|
+
num_obs: int,
|
|
130
|
+
dem_path: pathlib.Path,
|
|
131
|
+
dets_b8_in_path: pathlib.Path,
|
|
132
|
+
obs_predictions_b8_out_path: pathlib.Path,
|
|
133
|
+
tmp_dir: pathlib.Path,
|
|
134
|
+
) -> None:
|
|
135
|
+
bytes_per_shot = (num_obs + 7) // 8
|
|
136
|
+
with open(obs_predictions_b8_out_path, 'wb') as f:
|
|
137
|
+
for k in range(num_shots):
|
|
138
|
+
f.write((k % 3 == 0).to_bytes(length=bytes_per_shot, byteorder='little'))
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def test_collect_custom_decoder():
|
|
142
|
+
results = sinter.collect(
|
|
143
|
+
num_workers=2,
|
|
144
|
+
tasks=[
|
|
145
|
+
sinter.Task(
|
|
146
|
+
circuit=stim.Circuit("""
|
|
147
|
+
M(0.1) 0
|
|
148
|
+
DETECTOR rec[-1]
|
|
149
|
+
OBSERVABLE_INCLUDE(0) rec[-1]
|
|
150
|
+
"""),
|
|
151
|
+
json_metadata=None,
|
|
152
|
+
)
|
|
153
|
+
],
|
|
154
|
+
max_shots=10000,
|
|
155
|
+
decoders=['alternate'],
|
|
156
|
+
custom_decoders={'alternate': AlternatingPredictionsDecoder()},
|
|
157
|
+
)
|
|
158
|
+
assert len(results) == 1
|
|
159
|
+
assert results[0].shots == 10000
|
|
160
|
+
assert 2500 < results[0].errors < 4000
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def test_iter_collect_list():
|
|
164
|
+
result = collections.defaultdict(sinter.AnonTaskStats)
|
|
165
|
+
for sample in sinter.iter_collect(
|
|
166
|
+
num_workers=2,
|
|
167
|
+
tasks=[
|
|
168
|
+
sinter.Task(
|
|
169
|
+
circuit=stim.Circuit.generated(
|
|
170
|
+
'repetition_code:memory',
|
|
171
|
+
rounds=3,
|
|
172
|
+
distance=3,
|
|
173
|
+
after_clifford_depolarization=p),
|
|
174
|
+
decoder='pymatching',
|
|
175
|
+
json_metadata={'p': p},
|
|
176
|
+
collection_options=sinter.CollectionOptions(
|
|
177
|
+
max_errors=100,
|
|
178
|
+
max_shots=1000,
|
|
179
|
+
start_batch_size=100,
|
|
180
|
+
max_batch_size=1000,
|
|
181
|
+
),
|
|
182
|
+
)
|
|
183
|
+
for p in [0.01, 0.02, 0.03, 0.04]
|
|
184
|
+
],
|
|
185
|
+
):
|
|
186
|
+
for stats in sample.new_stats:
|
|
187
|
+
result[stats.json_metadata['p']] += stats.to_anon_stats()
|
|
188
|
+
assert len(result) == 4
|
|
189
|
+
for k, v in result.items():
|
|
190
|
+
assert v.shots >= 1000 or v.errors >= 100
|
|
191
|
+
assert v.discards == 0
|
|
192
|
+
assert result[0.01].errors <= 10
|
|
193
|
+
assert result[0.02].errors <= 30
|
|
194
|
+
assert result[0.03].errors <= 70
|
|
195
|
+
assert 1 <= result[0.04].errors <= 100
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def test_iter_collect_worker_fails():
|
|
199
|
+
with pytest.raises(RuntimeError, match="Worker failed"):
|
|
200
|
+
_ = list(sinter.iter_collect(
|
|
201
|
+
decoders=['NOT A VALID DECODER'],
|
|
202
|
+
num_workers=1,
|
|
203
|
+
tasks=iter([
|
|
204
|
+
sinter.Task(
|
|
205
|
+
circuit=stim.Circuit.generated('repetition_code:memory', rounds=3, distance=3),
|
|
206
|
+
collection_options=sinter.CollectionOptions(
|
|
207
|
+
max_errors=1,
|
|
208
|
+
max_shots=1,
|
|
209
|
+
),
|
|
210
|
+
),
|
|
211
|
+
]),
|
|
212
|
+
))
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
class FixedSizeSampler(sinter.Sampler, sinter.CompiledSampler):
|
|
216
|
+
def compiled_sampler_for_task(self, task: sinter.Task) -> sinter.CompiledSampler:
|
|
217
|
+
return self
|
|
218
|
+
|
|
219
|
+
def sample(self, suggested_shots: int) -> 'sinter.AnonTaskStats':
|
|
220
|
+
return sinter.AnonTaskStats(
|
|
221
|
+
shots=1024,
|
|
222
|
+
errors=5,
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
def test_fixed_size_sampler():
|
|
227
|
+
results = sinter.collect(
|
|
228
|
+
num_workers=2,
|
|
229
|
+
tasks=[
|
|
230
|
+
sinter.Task(
|
|
231
|
+
circuit=stim.Circuit(),
|
|
232
|
+
decoder='fixed_size_sampler',
|
|
233
|
+
json_metadata={},
|
|
234
|
+
collection_options=sinter.CollectionOptions(
|
|
235
|
+
max_shots=100_000,
|
|
236
|
+
max_errors=1_000,
|
|
237
|
+
),
|
|
238
|
+
)
|
|
239
|
+
],
|
|
240
|
+
custom_decoders={'fixed_size_sampler': FixedSizeSampler()}
|
|
241
|
+
)
|
|
242
|
+
assert 100_000 <= results[0].shots <= 100_000 + 3000
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
class MockTimingSampler(sinter.Sampler, sinter.CompiledSampler):
|
|
246
|
+
def compiled_sampler_for_task(self, task: sinter.Task) -> sinter.CompiledSampler:
|
|
247
|
+
return self
|
|
248
|
+
|
|
249
|
+
def sample(self, suggested_shots: int) -> 'sinter.AnonTaskStats':
|
|
250
|
+
actual_shots = -(-suggested_shots // 1024) * 1024
|
|
251
|
+
time.sleep(actual_shots * 0.00001)
|
|
252
|
+
return sinter.AnonTaskStats(
|
|
253
|
+
shots=actual_shots,
|
|
254
|
+
errors=5,
|
|
255
|
+
seconds=actual_shots * 0.00001,
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
def test_mock_timing_sampler():
|
|
260
|
+
results = sinter.collect(
|
|
261
|
+
num_workers=12,
|
|
262
|
+
tasks=[
|
|
263
|
+
sinter.Task(
|
|
264
|
+
circuit=stim.Circuit(),
|
|
265
|
+
decoder='MockTimingSampler',
|
|
266
|
+
json_metadata={},
|
|
267
|
+
)
|
|
268
|
+
],
|
|
269
|
+
max_shots=1_000_000,
|
|
270
|
+
max_errors=10_000,
|
|
271
|
+
custom_decoders={'MockTimingSampler': MockTimingSampler()},
|
|
272
|
+
)
|
|
273
|
+
assert 1_000_000 <= results[0].shots <= 1_000_000 + 12000
|
|
274
|
+
|
|
275
|
+
class BatchSizeTrackingSampler(sinter.Sampler, sinter.CompiledSampler):
|
|
276
|
+
"""A sampler that tracks the suggested batch size requests it receives."""
|
|
277
|
+
|
|
278
|
+
def __init__(self, batch_sizes: list[int]):
|
|
279
|
+
self.batch_sizes = batch_sizes
|
|
280
|
+
|
|
281
|
+
def compiled_sampler_for_task(self, task: sinter.Task) -> sinter.CompiledSampler:
|
|
282
|
+
return self
|
|
283
|
+
|
|
284
|
+
def sample(self, suggested_shots: int) -> sinter.AnonTaskStats:
|
|
285
|
+
self.batch_sizes.append(suggested_shots)
|
|
286
|
+
return sinter.AnonTaskStats(
|
|
287
|
+
shots=suggested_shots,
|
|
288
|
+
errors=1,
|
|
289
|
+
seconds=0.001,
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
def test_ramp_throttled_sampler_respects_max_batch_size():
|
|
294
|
+
"""Test that the CollectionManager instantiated RampThrottledSampler respects the `max_batch_size`
|
|
295
|
+
parameter."""
|
|
296
|
+
|
|
297
|
+
# since the RampThrottledSampler and batch sizing happens in the worker process, we need a
|
|
298
|
+
# shared list to track what goes on with the sampler
|
|
299
|
+
with multiprocessing.Manager() as manager:
|
|
300
|
+
tracking_sampler = BatchSizeTrackingSampler(manager.list())
|
|
301
|
+
|
|
302
|
+
sinter.collect(
|
|
303
|
+
num_workers=1,
|
|
304
|
+
tasks=[
|
|
305
|
+
sinter.Task(
|
|
306
|
+
circuit=stim.Circuit(),
|
|
307
|
+
decoder='tracking_sampler',
|
|
308
|
+
json_metadata={'test': 'small_batch'},
|
|
309
|
+
)
|
|
310
|
+
],
|
|
311
|
+
max_shots=10_000,
|
|
312
|
+
max_batch_size=128, # Set a small max batch size
|
|
313
|
+
custom_decoders={'tracking_sampler': tracking_sampler},
|
|
314
|
+
)
|
|
315
|
+
# batch size should start at one and then maximum seen should be at most 128
|
|
316
|
+
assert tracking_sampler.batch_sizes[0] == 1
|
|
317
|
+
assert 1 < max(tracking_sampler.batch_sizes) <= 128
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from typing import Optional, TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
from sinter._decoding import Sampler
|
|
5
|
+
from sinter._collection._collection_worker_state import CollectionWorkerState
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
import multiprocessing
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def collection_worker_loop(
|
|
12
|
+
flush_period: float,
|
|
13
|
+
worker_id: int,
|
|
14
|
+
sampler: Sampler,
|
|
15
|
+
inp: 'multiprocessing.Queue',
|
|
16
|
+
out: 'multiprocessing.Queue',
|
|
17
|
+
core_affinity: Optional[int],
|
|
18
|
+
custom_error_count_key: Optional[str],
|
|
19
|
+
) -> None:
|
|
20
|
+
try:
|
|
21
|
+
if core_affinity is not None and hasattr(os, 'sched_setaffinity'):
|
|
22
|
+
os.sched_setaffinity(0, {core_affinity})
|
|
23
|
+
except:
|
|
24
|
+
# If setting the core affinity fails, we keep going regardless.
|
|
25
|
+
pass
|
|
26
|
+
|
|
27
|
+
worker = CollectionWorkerState(
|
|
28
|
+
flush_period=flush_period,
|
|
29
|
+
worker_id=worker_id,
|
|
30
|
+
sampler=sampler,
|
|
31
|
+
inp=inp,
|
|
32
|
+
out=out,
|
|
33
|
+
custom_error_count_key=custom_error_count_key,
|
|
34
|
+
)
|
|
35
|
+
worker.run_message_loop()
|