sinter 1.15.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sinter might be problematic. Click here for more details.
- sinter/__init__.py +47 -0
- sinter/_collection/__init__.py +10 -0
- sinter/_collection/_collection.py +480 -0
- sinter/_collection/_collection_manager.py +581 -0
- sinter/_collection/_collection_manager_test.py +287 -0
- sinter/_collection/_collection_test.py +317 -0
- sinter/_collection/_collection_worker_loop.py +35 -0
- sinter/_collection/_collection_worker_state.py +259 -0
- sinter/_collection/_collection_worker_test.py +222 -0
- sinter/_collection/_mux_sampler.py +56 -0
- sinter/_collection/_printer.py +65 -0
- sinter/_collection/_sampler_ramp_throttled.py +66 -0
- sinter/_collection/_sampler_ramp_throttled_test.py +144 -0
- sinter/_command/__init__.py +0 -0
- sinter/_command/_main.py +39 -0
- sinter/_command/_main_collect.py +350 -0
- sinter/_command/_main_collect_test.py +482 -0
- sinter/_command/_main_combine.py +84 -0
- sinter/_command/_main_combine_test.py +153 -0
- sinter/_command/_main_plot.py +817 -0
- sinter/_command/_main_plot_test.py +445 -0
- sinter/_command/_main_predict.py +75 -0
- sinter/_command/_main_predict_test.py +36 -0
- sinter/_data/__init__.py +20 -0
- sinter/_data/_anon_task_stats.py +89 -0
- sinter/_data/_anon_task_stats_test.py +35 -0
- sinter/_data/_collection_options.py +106 -0
- sinter/_data/_collection_options_test.py +24 -0
- sinter/_data/_csv_out.py +74 -0
- sinter/_data/_existing_data.py +173 -0
- sinter/_data/_existing_data_test.py +41 -0
- sinter/_data/_task.py +311 -0
- sinter/_data/_task_stats.py +244 -0
- sinter/_data/_task_stats_test.py +140 -0
- sinter/_data/_task_test.py +38 -0
- sinter/_decoding/__init__.py +16 -0
- sinter/_decoding/_decoding.py +419 -0
- sinter/_decoding/_decoding_all_built_in_decoders.py +25 -0
- sinter/_decoding/_decoding_decoder_class.py +161 -0
- sinter/_decoding/_decoding_fusion_blossom.py +193 -0
- sinter/_decoding/_decoding_mwpf.py +302 -0
- sinter/_decoding/_decoding_pymatching.py +81 -0
- sinter/_decoding/_decoding_test.py +480 -0
- sinter/_decoding/_decoding_vacuous.py +38 -0
- sinter/_decoding/_perfectionist_sampler.py +38 -0
- sinter/_decoding/_sampler.py +72 -0
- sinter/_decoding/_stim_then_decode_sampler.py +222 -0
- sinter/_decoding/_stim_then_decode_sampler_test.py +192 -0
- sinter/_plotting.py +619 -0
- sinter/_plotting_test.py +108 -0
- sinter/_predict.py +381 -0
- sinter/_predict_test.py +227 -0
- sinter/_probability_util.py +519 -0
- sinter/_probability_util_test.py +281 -0
- sinter-1.15.0.data/data/README.md +332 -0
- sinter-1.15.0.data/data/readme_example_plot.png +0 -0
- sinter-1.15.0.data/data/requirements.txt +4 -0
- sinter-1.15.0.dist-info/METADATA +354 -0
- sinter-1.15.0.dist-info/RECORD +62 -0
- sinter-1.15.0.dist-info/WHEEL +5 -0
- sinter-1.15.0.dist-info/entry_points.txt +2 -0
- sinter-1.15.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,72 @@
|
|
|
1
|
+
import abc
|
|
2
|
+
from typing import TYPE_CHECKING
|
|
3
|
+
|
|
4
|
+
if TYPE_CHECKING:
|
|
5
|
+
import sinter
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CompiledSampler(metaclass=abc.ABCMeta):
|
|
9
|
+
"""A sampler that has been configured for efficiently sampling some task."""
|
|
10
|
+
|
|
11
|
+
@abc.abstractmethod
|
|
12
|
+
def sample(self, suggested_shots: int) -> 'sinter.AnonTaskStats':
|
|
13
|
+
"""Samples shots and returns statistics.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
suggested_shots: The number of shots being requested. The sampler
|
|
17
|
+
may perform more shots or fewer shots than this, so technically
|
|
18
|
+
this argument can just be ignored. If a sampler is optimized for
|
|
19
|
+
a specific batch size, it can simply return one batch per call
|
|
20
|
+
regardless of this parameter.
|
|
21
|
+
|
|
22
|
+
However, this parameter is a useful hint about the amount of
|
|
23
|
+
work being done. The sampler can use this to optimize its
|
|
24
|
+
behavior. For example, it could adjust its batch size downward
|
|
25
|
+
if the suggested shots is very small. Whereas if the suggested
|
|
26
|
+
shots is very high, the sampler should focus entirely on
|
|
27
|
+
achieving the best possible throughput.
|
|
28
|
+
|
|
29
|
+
Note that, in typical workloads, the sampler will be called
|
|
30
|
+
repeatedly with the same value of suggested_shots. Therefore it
|
|
31
|
+
is reasonable to allocate buffers sized to accomodate the
|
|
32
|
+
current suggested_shots, expecting them to be useful again for
|
|
33
|
+
the next shot.
|
|
34
|
+
|
|
35
|
+
Returns:
|
|
36
|
+
A sinter.AnonTaskStats saying how many shots were actually taken,
|
|
37
|
+
how many errors were seen, etc.
|
|
38
|
+
|
|
39
|
+
The returned stats must have at least one shot.
|
|
40
|
+
"""
|
|
41
|
+
pass
|
|
42
|
+
|
|
43
|
+
def handles_throttling(self) -> bool:
|
|
44
|
+
"""Return True to disable sinter wrapping samplers with throttling.
|
|
45
|
+
|
|
46
|
+
By default, sinter will wrap samplers so that they initially only do
|
|
47
|
+
a small number of shots then slowly ramp up. Sometimes this behavior
|
|
48
|
+
is not desired (e.g. in unit tests). Override this method to return True
|
|
49
|
+
to disable it.
|
|
50
|
+
"""
|
|
51
|
+
return False
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class Sampler(metaclass=abc.ABCMeta):
|
|
55
|
+
"""A strategy for producing stats from tasks.
|
|
56
|
+
|
|
57
|
+
Call `sampler.compiled_sampler_for_task(task)` to get a compiled sampler for
|
|
58
|
+
a task, then call `compiled_sampler.sample(shots)` to collect statistics.
|
|
59
|
+
|
|
60
|
+
A sampler differs from a `sinter.Decoder` because the sampler is responsible
|
|
61
|
+
for the full sampling process (e.g. simulating the circuit), whereas a
|
|
62
|
+
decoder can do nothing except predict observable flips from detection event
|
|
63
|
+
data. This prevents the decoders from cheating, but makes them less flexible
|
|
64
|
+
overall. A sampler can do things like use simulators other than stim, or
|
|
65
|
+
really anything at all as long as it ends with returning statistics about
|
|
66
|
+
shot counts, error counts, and etc.
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
@abc.abstractmethod
|
|
70
|
+
def compiled_sampler_for_task(self, task: 'sinter.Task') -> 'sinter.CompiledSampler':
|
|
71
|
+
"""Creates, configures, and returns an object for sampling the task."""
|
|
72
|
+
pass
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
import collections
|
|
2
|
+
import pathlib
|
|
3
|
+
import random
|
|
4
|
+
import time
|
|
5
|
+
from typing import Optional
|
|
6
|
+
from typing import Union
|
|
7
|
+
|
|
8
|
+
import numpy as np
|
|
9
|
+
|
|
10
|
+
from sinter._data import Task, AnonTaskStats
|
|
11
|
+
from sinter._decoding._sampler import Sampler, CompiledSampler
|
|
12
|
+
from sinter._decoding._decoding_decoder_class import Decoder, CompiledDecoder
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class StimThenDecodeSampler(Sampler):
|
|
16
|
+
"""Samples shots using stim, then decodes using the given decoder.
|
|
17
|
+
|
|
18
|
+
This is the default sampler; the one used to wrap decoders with no
|
|
19
|
+
specified sampler.
|
|
20
|
+
|
|
21
|
+
The decoder's goal is to predict the observable flips given the detection
|
|
22
|
+
event data. Errors are when the prediction is wrong. Discards are when the
|
|
23
|
+
decoder returns an extra byte of prediction data for each shot, and the
|
|
24
|
+
extra byte is not zero.
|
|
25
|
+
"""
|
|
26
|
+
def __init__(
|
|
27
|
+
self,
|
|
28
|
+
*,
|
|
29
|
+
decoder: Decoder,
|
|
30
|
+
count_observable_error_combos: bool,
|
|
31
|
+
count_detection_events: bool,
|
|
32
|
+
tmp_dir: Optional[pathlib.Path],
|
|
33
|
+
):
|
|
34
|
+
self.decoder = decoder
|
|
35
|
+
self.count_observable_error_combos = count_observable_error_combos
|
|
36
|
+
self.count_detection_events = count_detection_events
|
|
37
|
+
self.tmp_dir = tmp_dir
|
|
38
|
+
|
|
39
|
+
def compiled_sampler_for_task(self, task: Task) -> CompiledSampler:
|
|
40
|
+
return _CompiledStimThenDecodeSampler(
|
|
41
|
+
decoder=self.decoder,
|
|
42
|
+
task=task,
|
|
43
|
+
count_detection_events=self.count_detection_events,
|
|
44
|
+
count_observable_error_combos=self.count_observable_error_combos,
|
|
45
|
+
tmp_dir=self.tmp_dir,
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def classify_discards_and_errors(
|
|
50
|
+
*,
|
|
51
|
+
actual_obs: np.ndarray,
|
|
52
|
+
predictions: np.ndarray,
|
|
53
|
+
postselected_observables_mask: Union[np.ndarray, None],
|
|
54
|
+
out_count_observable_error_combos: Union[None, collections.Counter[str]],
|
|
55
|
+
num_obs: int,
|
|
56
|
+
) -> tuple[int, int]:
|
|
57
|
+
num_discards = 0
|
|
58
|
+
|
|
59
|
+
# Added bytes are used for signalling discards.
|
|
60
|
+
if predictions.shape[1] == actual_obs.shape[1] + 1:
|
|
61
|
+
discard_mask = predictions[:, -1] != 0
|
|
62
|
+
predictions = predictions[:, :-1]
|
|
63
|
+
num_discards += np.count_nonzero(discard_mask)
|
|
64
|
+
discard_mask ^= True
|
|
65
|
+
actual_obs = actual_obs[discard_mask]
|
|
66
|
+
predictions = predictions[discard_mask]
|
|
67
|
+
|
|
68
|
+
# Mispredicted observables can be used for signalling discards.
|
|
69
|
+
if postselected_observables_mask is not None:
|
|
70
|
+
discard_mask = np.any((actual_obs ^ predictions) & postselected_observables_mask, axis=1)
|
|
71
|
+
num_discards += np.count_nonzero(discard_mask)
|
|
72
|
+
discard_mask ^= True
|
|
73
|
+
actual_obs = actual_obs[discard_mask]
|
|
74
|
+
predictions = predictions[discard_mask]
|
|
75
|
+
|
|
76
|
+
fail_mask = np.any(actual_obs != predictions, axis=1)
|
|
77
|
+
if out_count_observable_error_combos is not None:
|
|
78
|
+
for k in np.flatnonzero(fail_mask):
|
|
79
|
+
mistakes = np.unpackbits(actual_obs[k] ^ predictions[k], count=num_obs, bitorder='little')
|
|
80
|
+
err_key = "obs_mistake_mask=" + ''.join('_E'[b] for b in mistakes)
|
|
81
|
+
out_count_observable_error_combos[err_key] += 1
|
|
82
|
+
|
|
83
|
+
num_errors = np.count_nonzero(fail_mask)
|
|
84
|
+
return num_discards, num_errors
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class DiskDecoder(CompiledDecoder):
|
|
88
|
+
def __init__(self, decoder: Decoder, task: Task, tmp_dir: pathlib.Path):
|
|
89
|
+
self.decoder = decoder
|
|
90
|
+
self.task = task
|
|
91
|
+
self.top_tmp_dir: pathlib.Path = tmp_dir
|
|
92
|
+
|
|
93
|
+
while True:
|
|
94
|
+
k = random.randint(0, 2**64)
|
|
95
|
+
self.top_tmp_dir = tmp_dir / f'disk_decoder_{k}'
|
|
96
|
+
try:
|
|
97
|
+
self.top_tmp_dir.mkdir()
|
|
98
|
+
break
|
|
99
|
+
except FileExistsError:
|
|
100
|
+
pass
|
|
101
|
+
self.decoder_tmp_dir: pathlib.Path = self.top_tmp_dir / 'dec'
|
|
102
|
+
self.decoder_tmp_dir.mkdir()
|
|
103
|
+
self.num_obs = task.detector_error_model.num_observables
|
|
104
|
+
self.num_dets = task.detector_error_model.num_detectors
|
|
105
|
+
self.dem_path = self.top_tmp_dir / 'dem.dem'
|
|
106
|
+
self.dets_b8_in_path = self.top_tmp_dir / 'dets.b8'
|
|
107
|
+
self.obs_predictions_b8_out_path = self.top_tmp_dir / 'obs.b8'
|
|
108
|
+
self.task.detector_error_model.to_file(self.dem_path)
|
|
109
|
+
|
|
110
|
+
def decode_shots_bit_packed(
|
|
111
|
+
self,
|
|
112
|
+
*,
|
|
113
|
+
bit_packed_detection_event_data: np.ndarray,
|
|
114
|
+
) -> np.ndarray:
|
|
115
|
+
num_shots = bit_packed_detection_event_data.shape[0]
|
|
116
|
+
with open(self.dets_b8_in_path, 'wb') as f:
|
|
117
|
+
bit_packed_detection_event_data.tofile(f)
|
|
118
|
+
self.decoder.decode_via_files(
|
|
119
|
+
num_shots=num_shots,
|
|
120
|
+
num_obs=self.num_obs,
|
|
121
|
+
num_dets=self.num_dets,
|
|
122
|
+
dem_path=self.dem_path,
|
|
123
|
+
dets_b8_in_path=self.dets_b8_in_path,
|
|
124
|
+
obs_predictions_b8_out_path=self.obs_predictions_b8_out_path,
|
|
125
|
+
tmp_dir=self.decoder_tmp_dir,
|
|
126
|
+
)
|
|
127
|
+
num_obs_bytes = (self.num_obs + 7) // 8
|
|
128
|
+
with open(self.obs_predictions_b8_out_path, 'rb') as f:
|
|
129
|
+
prediction = np.fromfile(f, dtype=np.uint8, count=num_obs_bytes * num_shots)
|
|
130
|
+
assert prediction.shape == (num_obs_bytes * num_shots,)
|
|
131
|
+
self.obs_predictions_b8_out_path.unlink()
|
|
132
|
+
self.dets_b8_in_path.unlink()
|
|
133
|
+
return prediction.reshape((num_shots, num_obs_bytes))
|
|
134
|
+
|
|
135
|
+
|
|
136
|
+
def _compile_decoder_with_disk_fallback(
|
|
137
|
+
decoder: Decoder,
|
|
138
|
+
task: Task,
|
|
139
|
+
tmp_dir: Optional[pathlib.Path],
|
|
140
|
+
) -> CompiledDecoder:
|
|
141
|
+
try:
|
|
142
|
+
return decoder.compile_decoder_for_dem(dem=task.detector_error_model)
|
|
143
|
+
except NotImplementedError:
|
|
144
|
+
pass
|
|
145
|
+
if tmp_dir is None:
|
|
146
|
+
raise ValueError(f"Decoder {task.decoder=} didn't implement `compile_decoder_for_dem`, but no temporary directory was provided for falling back to `decode_via_files`.")
|
|
147
|
+
return DiskDecoder(decoder, task, tmp_dir)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class _CompiledStimThenDecodeSampler(CompiledSampler):
|
|
151
|
+
def __init__(
|
|
152
|
+
self,
|
|
153
|
+
*,
|
|
154
|
+
decoder: Decoder,
|
|
155
|
+
task: Task,
|
|
156
|
+
count_observable_error_combos: bool,
|
|
157
|
+
count_detection_events: bool,
|
|
158
|
+
tmp_dir: Optional[pathlib.Path],
|
|
159
|
+
):
|
|
160
|
+
self.task = task
|
|
161
|
+
self.compiled_decoder = _compile_decoder_with_disk_fallback(decoder, task, tmp_dir)
|
|
162
|
+
self.stim_sampler = task.circuit.compile_detector_sampler()
|
|
163
|
+
self.count_observable_error_combos = count_observable_error_combos
|
|
164
|
+
self.count_detection_events = count_detection_events
|
|
165
|
+
self.num_det = self.task.circuit.num_detectors
|
|
166
|
+
self.num_obs = self.task.circuit.num_observables
|
|
167
|
+
|
|
168
|
+
def sample(self, max_shots: int) -> AnonTaskStats:
|
|
169
|
+
t0 = time.monotonic()
|
|
170
|
+
dets, actual_obs = self.stim_sampler.sample(
|
|
171
|
+
shots=max_shots,
|
|
172
|
+
bit_packed=True,
|
|
173
|
+
separate_observables=True,
|
|
174
|
+
)
|
|
175
|
+
num_shots = dets.shape[0]
|
|
176
|
+
|
|
177
|
+
custom_counts = collections.Counter()
|
|
178
|
+
if self.count_detection_events:
|
|
179
|
+
custom_counts['detectors_checked'] += self.num_det * num_shots
|
|
180
|
+
for b in range(8):
|
|
181
|
+
custom_counts['detection_events'] += np.count_nonzero(dets & (1 << b))
|
|
182
|
+
|
|
183
|
+
# Discard any shots that contain a postselected detection events.
|
|
184
|
+
if self.task.postselection_mask is not None:
|
|
185
|
+
discarded_flags = np.any(dets & self.task.postselection_mask, axis=1)
|
|
186
|
+
num_discards_1 = np.count_nonzero(discarded_flags)
|
|
187
|
+
if num_discards_1:
|
|
188
|
+
dets = dets[~discarded_flags, :]
|
|
189
|
+
actual_obs = actual_obs[~discarded_flags, :]
|
|
190
|
+
else:
|
|
191
|
+
num_discards_1 = 0
|
|
192
|
+
|
|
193
|
+
predictions = self.compiled_decoder.decode_shots_bit_packed(bit_packed_detection_event_data=dets)
|
|
194
|
+
if not isinstance(predictions, np.ndarray):
|
|
195
|
+
raise ValueError("not isinstance(predictions, np.ndarray)")
|
|
196
|
+
if predictions.dtype != np.uint8:
|
|
197
|
+
raise ValueError("predictions.dtype != np.uint8")
|
|
198
|
+
if len(predictions.shape) != 2:
|
|
199
|
+
raise ValueError("len(predictions.shape) != 2")
|
|
200
|
+
if predictions.shape[0] != num_shots:
|
|
201
|
+
raise ValueError("predictions.shape[0] != num_shots")
|
|
202
|
+
if predictions.shape[1] < actual_obs.shape[1]:
|
|
203
|
+
raise ValueError("predictions.shape[1] < actual_obs.shape[1]")
|
|
204
|
+
if predictions.shape[1] > actual_obs.shape[1] + 1:
|
|
205
|
+
raise ValueError("predictions.shape[1] > actual_obs.shape[1] + 1")
|
|
206
|
+
|
|
207
|
+
num_discards_2, num_errors = classify_discards_and_errors(
|
|
208
|
+
actual_obs=actual_obs,
|
|
209
|
+
predictions=predictions,
|
|
210
|
+
postselected_observables_mask=self.task.postselected_observables_mask,
|
|
211
|
+
out_count_observable_error_combos=custom_counts if self.count_observable_error_combos else None,
|
|
212
|
+
num_obs=self.num_obs,
|
|
213
|
+
)
|
|
214
|
+
t1 = time.monotonic()
|
|
215
|
+
|
|
216
|
+
return AnonTaskStats(
|
|
217
|
+
shots=num_shots,
|
|
218
|
+
errors=num_errors,
|
|
219
|
+
discards=num_discards_1 + num_discards_2,
|
|
220
|
+
seconds=t1 - t0,
|
|
221
|
+
custom_counts=custom_counts,
|
|
222
|
+
)
|
|
@@ -0,0 +1,192 @@
|
|
|
1
|
+
import collections
|
|
2
|
+
|
|
3
|
+
import numpy as np
|
|
4
|
+
|
|
5
|
+
from sinter._decoding._stim_then_decode_sampler import \
|
|
6
|
+
classify_discards_and_errors
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def test_classify_discards_and_errors():
|
|
10
|
+
assert classify_discards_and_errors(
|
|
11
|
+
actual_obs=np.array([
|
|
12
|
+
[1, 2],
|
|
13
|
+
[2, 2],
|
|
14
|
+
[3, 2],
|
|
15
|
+
[4, 3],
|
|
16
|
+
[1, 3],
|
|
17
|
+
[0, 3],
|
|
18
|
+
[0, 3],
|
|
19
|
+
], dtype=np.uint8),
|
|
20
|
+
predictions=np.array([
|
|
21
|
+
[1, 2],
|
|
22
|
+
[2, 2],
|
|
23
|
+
[3, 2],
|
|
24
|
+
[4, 3],
|
|
25
|
+
[1, 3],
|
|
26
|
+
[0, 3],
|
|
27
|
+
[0, 3],
|
|
28
|
+
], dtype=np.uint8),
|
|
29
|
+
postselected_observables_mask=None,
|
|
30
|
+
out_count_observable_error_combos=None,
|
|
31
|
+
num_obs=16,
|
|
32
|
+
) == (0, 0)
|
|
33
|
+
|
|
34
|
+
assert classify_discards_and_errors(
|
|
35
|
+
actual_obs=np.array([
|
|
36
|
+
[1, 2],
|
|
37
|
+
[2, 2],
|
|
38
|
+
[3, 2],
|
|
39
|
+
[4, 3],
|
|
40
|
+
[1, 3],
|
|
41
|
+
[0, 3],
|
|
42
|
+
[0, 3],
|
|
43
|
+
], dtype=np.uint8),
|
|
44
|
+
predictions=np.array([
|
|
45
|
+
[0, 0],
|
|
46
|
+
[2, 2],
|
|
47
|
+
[3, 2],
|
|
48
|
+
[4, 1],
|
|
49
|
+
[1, 3],
|
|
50
|
+
[0, 3],
|
|
51
|
+
[0, 3],
|
|
52
|
+
], dtype=np.uint8),
|
|
53
|
+
postselected_observables_mask=None,
|
|
54
|
+
out_count_observable_error_combos=None,
|
|
55
|
+
num_obs=16,
|
|
56
|
+
) == (0, 2)
|
|
57
|
+
|
|
58
|
+
assert classify_discards_and_errors(
|
|
59
|
+
actual_obs=np.array([
|
|
60
|
+
[1, 2],
|
|
61
|
+
[2, 2],
|
|
62
|
+
[3, 2],
|
|
63
|
+
[4, 3],
|
|
64
|
+
[1, 3],
|
|
65
|
+
[0, 3],
|
|
66
|
+
[0, 3],
|
|
67
|
+
], dtype=np.uint8),
|
|
68
|
+
predictions=np.array([
|
|
69
|
+
[0, 0, 0],
|
|
70
|
+
[2, 2, 0],
|
|
71
|
+
[3, 2, 0],
|
|
72
|
+
[4, 1, 0],
|
|
73
|
+
[1, 3, 0],
|
|
74
|
+
[0, 3, 0],
|
|
75
|
+
[0, 3, 0],
|
|
76
|
+
], dtype=np.uint8),
|
|
77
|
+
postselected_observables_mask=None,
|
|
78
|
+
out_count_observable_error_combos=None,
|
|
79
|
+
num_obs=16,
|
|
80
|
+
) == (0, 2)
|
|
81
|
+
|
|
82
|
+
assert classify_discards_and_errors(
|
|
83
|
+
actual_obs=np.array([
|
|
84
|
+
[1, 2],
|
|
85
|
+
[2, 2],
|
|
86
|
+
[3, 2],
|
|
87
|
+
[4, 3],
|
|
88
|
+
[1, 3],
|
|
89
|
+
[0, 3],
|
|
90
|
+
[0, 3],
|
|
91
|
+
], dtype=np.uint8),
|
|
92
|
+
predictions=np.array([
|
|
93
|
+
[0, 0, 0],
|
|
94
|
+
[2, 2, 1],
|
|
95
|
+
[3, 2, 0],
|
|
96
|
+
[4, 1, 0],
|
|
97
|
+
[1, 3, 0],
|
|
98
|
+
[0, 3, 0],
|
|
99
|
+
[0, 3, 0],
|
|
100
|
+
], dtype=np.uint8),
|
|
101
|
+
postselected_observables_mask=None,
|
|
102
|
+
out_count_observable_error_combos=None,
|
|
103
|
+
num_obs=16,
|
|
104
|
+
) == (1, 2)
|
|
105
|
+
|
|
106
|
+
assert classify_discards_and_errors(
|
|
107
|
+
actual_obs=np.array([
|
|
108
|
+
[1, 2],
|
|
109
|
+
[2, 2],
|
|
110
|
+
[3, 2],
|
|
111
|
+
[4, 3],
|
|
112
|
+
[1, 3],
|
|
113
|
+
[0, 3],
|
|
114
|
+
[0, 3],
|
|
115
|
+
], dtype=np.uint8),
|
|
116
|
+
predictions=np.array([
|
|
117
|
+
[0, 0, 1],
|
|
118
|
+
[2, 2, 0],
|
|
119
|
+
[3, 2, 0],
|
|
120
|
+
[4, 1, 0],
|
|
121
|
+
[1, 3, 0],
|
|
122
|
+
[0, 3, 0],
|
|
123
|
+
[0, 3, 0],
|
|
124
|
+
], dtype=np.uint8),
|
|
125
|
+
postselected_observables_mask=None,
|
|
126
|
+
out_count_observable_error_combos=None,
|
|
127
|
+
num_obs=16,
|
|
128
|
+
) == (1, 1)
|
|
129
|
+
|
|
130
|
+
assert classify_discards_and_errors(
|
|
131
|
+
actual_obs=np.array([
|
|
132
|
+
[1, 2],
|
|
133
|
+
[2, 2],
|
|
134
|
+
[3, 2],
|
|
135
|
+
[4, 3],
|
|
136
|
+
[1, 3],
|
|
137
|
+
[0, 3],
|
|
138
|
+
[0, 3],
|
|
139
|
+
], dtype=np.uint8),
|
|
140
|
+
predictions=np.array([
|
|
141
|
+
[0, 0, 1],
|
|
142
|
+
[2, 2, 1],
|
|
143
|
+
[3, 2, 0],
|
|
144
|
+
[4, 1, 0],
|
|
145
|
+
[1, 3, 0],
|
|
146
|
+
[0, 3, 0],
|
|
147
|
+
[0, 3, 0],
|
|
148
|
+
], dtype=np.uint8),
|
|
149
|
+
postselected_observables_mask=None,
|
|
150
|
+
out_count_observable_error_combos=None,
|
|
151
|
+
num_obs=16,
|
|
152
|
+
) == (2, 1)
|
|
153
|
+
|
|
154
|
+
assert classify_discards_and_errors(
|
|
155
|
+
actual_obs=np.array([
|
|
156
|
+
[1, 2],
|
|
157
|
+
[2, 2],
|
|
158
|
+
[3, 2],
|
|
159
|
+
[4, 3],
|
|
160
|
+
[1, 3],
|
|
161
|
+
[2, 3],
|
|
162
|
+
[1, 3],
|
|
163
|
+
], dtype=np.uint8),
|
|
164
|
+
predictions=np.array([
|
|
165
|
+
[0, 0, 1],
|
|
166
|
+
[2, 2, 1],
|
|
167
|
+
[3, 2, 0],
|
|
168
|
+
[4, 1, 0],
|
|
169
|
+
[1, 3, 0],
|
|
170
|
+
[0, 3, 0],
|
|
171
|
+
[0, 3, 0],
|
|
172
|
+
], dtype=np.uint8),
|
|
173
|
+
postselected_observables_mask=np.array([1, 0]),
|
|
174
|
+
out_count_observable_error_combos=None,
|
|
175
|
+
num_obs=16,
|
|
176
|
+
) == (3, 2)
|
|
177
|
+
|
|
178
|
+
counter = collections.Counter()
|
|
179
|
+
assert classify_discards_and_errors(
|
|
180
|
+
actual_obs=np.array([
|
|
181
|
+
[1, 2],
|
|
182
|
+
[1, 2],
|
|
183
|
+
], dtype=np.uint8),
|
|
184
|
+
predictions=np.array([
|
|
185
|
+
[1, 0],
|
|
186
|
+
[1, 2],
|
|
187
|
+
], dtype=np.uint8),
|
|
188
|
+
postselected_observables_mask=np.array([1, 0]),
|
|
189
|
+
out_count_observable_error_combos=counter,
|
|
190
|
+
num_obs=13,
|
|
191
|
+
) == (0, 1)
|
|
192
|
+
assert counter == collections.Counter(["obs_mistake_mask=_________E___"])
|