sinter 1.15.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of sinter might be problematic. Click here for more details.
- sinter/__init__.py +47 -0
- sinter/_collection/__init__.py +10 -0
- sinter/_collection/_collection.py +480 -0
- sinter/_collection/_collection_manager.py +581 -0
- sinter/_collection/_collection_manager_test.py +287 -0
- sinter/_collection/_collection_test.py +317 -0
- sinter/_collection/_collection_worker_loop.py +35 -0
- sinter/_collection/_collection_worker_state.py +259 -0
- sinter/_collection/_collection_worker_test.py +222 -0
- sinter/_collection/_mux_sampler.py +56 -0
- sinter/_collection/_printer.py +65 -0
- sinter/_collection/_sampler_ramp_throttled.py +66 -0
- sinter/_collection/_sampler_ramp_throttled_test.py +144 -0
- sinter/_command/__init__.py +0 -0
- sinter/_command/_main.py +39 -0
- sinter/_command/_main_collect.py +350 -0
- sinter/_command/_main_collect_test.py +482 -0
- sinter/_command/_main_combine.py +84 -0
- sinter/_command/_main_combine_test.py +153 -0
- sinter/_command/_main_plot.py +817 -0
- sinter/_command/_main_plot_test.py +445 -0
- sinter/_command/_main_predict.py +75 -0
- sinter/_command/_main_predict_test.py +36 -0
- sinter/_data/__init__.py +20 -0
- sinter/_data/_anon_task_stats.py +89 -0
- sinter/_data/_anon_task_stats_test.py +35 -0
- sinter/_data/_collection_options.py +106 -0
- sinter/_data/_collection_options_test.py +24 -0
- sinter/_data/_csv_out.py +74 -0
- sinter/_data/_existing_data.py +173 -0
- sinter/_data/_existing_data_test.py +41 -0
- sinter/_data/_task.py +311 -0
- sinter/_data/_task_stats.py +244 -0
- sinter/_data/_task_stats_test.py +140 -0
- sinter/_data/_task_test.py +38 -0
- sinter/_decoding/__init__.py +16 -0
- sinter/_decoding/_decoding.py +419 -0
- sinter/_decoding/_decoding_all_built_in_decoders.py +25 -0
- sinter/_decoding/_decoding_decoder_class.py +161 -0
- sinter/_decoding/_decoding_fusion_blossom.py +193 -0
- sinter/_decoding/_decoding_mwpf.py +302 -0
- sinter/_decoding/_decoding_pymatching.py +81 -0
- sinter/_decoding/_decoding_test.py +480 -0
- sinter/_decoding/_decoding_vacuous.py +38 -0
- sinter/_decoding/_perfectionist_sampler.py +38 -0
- sinter/_decoding/_sampler.py +72 -0
- sinter/_decoding/_stim_then_decode_sampler.py +222 -0
- sinter/_decoding/_stim_then_decode_sampler_test.py +192 -0
- sinter/_plotting.py +619 -0
- sinter/_plotting_test.py +108 -0
- sinter/_predict.py +381 -0
- sinter/_predict_test.py +227 -0
- sinter/_probability_util.py +519 -0
- sinter/_probability_util_test.py +281 -0
- sinter-1.15.0.data/data/README.md +332 -0
- sinter-1.15.0.data/data/readme_example_plot.png +0 -0
- sinter-1.15.0.data/data/requirements.txt +4 -0
- sinter-1.15.0.dist-info/METADATA +354 -0
- sinter-1.15.0.dist-info/RECORD +62 -0
- sinter-1.15.0.dist-info/WHEEL +5 -0
- sinter-1.15.0.dist-info/entry_points.txt +2 -0
- sinter-1.15.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,817 @@
|
|
|
1
|
+
import math
|
|
2
|
+
import sys
|
|
3
|
+
from typing import Any, Callable, Iterable, List, Optional, TYPE_CHECKING, Tuple, Union, Dict, Sequence, cast
|
|
4
|
+
import argparse
|
|
5
|
+
|
|
6
|
+
import matplotlib.pyplot as plt
|
|
7
|
+
import numpy as np
|
|
8
|
+
|
|
9
|
+
from sinter._command._main_combine import ExistingData
|
|
10
|
+
from sinter._plotting import plot_discard_rate, plot_custom
|
|
11
|
+
from sinter._plotting import plot_error_rate
|
|
12
|
+
from sinter._probability_util import shot_error_rate_to_piece_error_rate, Fit
|
|
13
|
+
|
|
14
|
+
if TYPE_CHECKING:
|
|
15
|
+
import sinter
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def parse_args(args: List[str]) -> Any:
|
|
19
|
+
parser = argparse.ArgumentParser(description='Plot collected CSV data.',
|
|
20
|
+
prog='sinter plot')
|
|
21
|
+
parser.add_argument('--filter_func',
|
|
22
|
+
type=str,
|
|
23
|
+
default="True",
|
|
24
|
+
help='A python expression that determines whether a case is kept or not.\n'
|
|
25
|
+
'Values available to the python expression:\n'
|
|
26
|
+
' metadata: The parsed value from the json_metadata for the data point.\n'
|
|
27
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
28
|
+
' decoder: The decoder that decoded the data for the data point.\n'
|
|
29
|
+
' strong_id: The cryptographic hash of the case that was sampled for the data point.\n'
|
|
30
|
+
' stat: The sinter.TaskStats object for the data point.\n'
|
|
31
|
+
'Expected expression type:\n'
|
|
32
|
+
' Something that can be given to `bool` to get True or False.\n'
|
|
33
|
+
'Examples:\n'
|
|
34
|
+
''' --filter_func "decoder=='pymatching'"\n'''
|
|
35
|
+
''' --filter_func "0.001 < metadata['p'] < 0.005"\n''')
|
|
36
|
+
parser.add_argument('--preprocess_stats_func',
|
|
37
|
+
type=str,
|
|
38
|
+
default=None,
|
|
39
|
+
help='An expression that operates on a `stats` value, returning a new list of stats to plot.\n'
|
|
40
|
+
'For example, this could double add a field to json_metadata or merge stats together.\n'
|
|
41
|
+
'Examples:\n'
|
|
42
|
+
''' --preprocess_stats_func "[stat for stat in stats if stat.errors > 0]\n'''
|
|
43
|
+
''' --preprocess_stats_func "[stat.with_edits(errors=stat.custom_counts['severe_errors']) for stat in stats]\n'''
|
|
44
|
+
''' --preprocess_stats_func "__import__('your_custom_module').your_custom_function(stats)"\n'''
|
|
45
|
+
)
|
|
46
|
+
parser.add_argument('--x_func',
|
|
47
|
+
type=str,
|
|
48
|
+
default="1",
|
|
49
|
+
help='A python expression that determines where points go on the x axis.\n'
|
|
50
|
+
'Values available to the python expression:\n'
|
|
51
|
+
' metadata: The parsed value from the json_metadata for the data point.\n'
|
|
52
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
53
|
+
' decoder: The decoder that decoded the data for the data point.\n'
|
|
54
|
+
' strong_id: The cryptographic hash of the case that was sampled for the data point.\n'
|
|
55
|
+
' stat: The sinter.TaskStats object for the data point.\n'
|
|
56
|
+
'Expected expression type:\n'
|
|
57
|
+
' Something that can be given to `float` to get a float.\n'
|
|
58
|
+
'Examples:\n'
|
|
59
|
+
''' --x_func "metadata['p']"\n'''
|
|
60
|
+
''' --x_func m.p\n'''
|
|
61
|
+
''' --x_func "metadata['path'].split('/')[-1].split('.')[0]"\n'''
|
|
62
|
+
)
|
|
63
|
+
parser.add_argument('--point_label_func',
|
|
64
|
+
type=str,
|
|
65
|
+
default="None",
|
|
66
|
+
help='A python expression that determines text to put next to data points.\n'
|
|
67
|
+
'Values available to the python expression:\n'
|
|
68
|
+
' metadata: The parsed value from the json_metadata for the data point.\n'
|
|
69
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
70
|
+
' decoder: The decoder that decoded the data for the data point.\n'
|
|
71
|
+
' strong_id: The cryptographic hash of the case that was sampled for the data point.\n'
|
|
72
|
+
' stat: The sinter.TaskStats object for the data point.\n'
|
|
73
|
+
'Expected expression type:\n'
|
|
74
|
+
' Something Falsy (no label), or something that can be given to `str` to get a string.\n'
|
|
75
|
+
'Examples:\n'
|
|
76
|
+
''' --point_label_func "f'p={m.p}'"\n'''
|
|
77
|
+
)
|
|
78
|
+
parser.add_argument('--y_func',
|
|
79
|
+
type=str,
|
|
80
|
+
default=None,
|
|
81
|
+
help='A python expression that determines where points go on the y axis.\n'
|
|
82
|
+
'This argument is not used by error rate or discard rate plots; only\n'
|
|
83
|
+
'by the "custom_y" type plot.'
|
|
84
|
+
'Values available to the python expression:\n'
|
|
85
|
+
' metadata: The parsed value from the json_metadata for the data point.\n'
|
|
86
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
87
|
+
' decoder: The decoder that decoded the data for the data point.\n'
|
|
88
|
+
' strong_id: The cryptographic hash of the case that was sampled for the data point.\n'
|
|
89
|
+
' stat: The sinter.TaskStats object for the data point.\n'
|
|
90
|
+
'Expected expression type:\n'
|
|
91
|
+
' A `sinter.Fit` specifying an uncertainty region,.\n'
|
|
92
|
+
' or else something that can be given to `float` to get a float.\n'
|
|
93
|
+
'Examples:\n'
|
|
94
|
+
''' --x_func "metadata['p']"\n'''
|
|
95
|
+
''' --x_func "metadata['path'].split('/')[-1].split('.')[0]"\n'''
|
|
96
|
+
)
|
|
97
|
+
parser.add_argument('--fig_size',
|
|
98
|
+
type=int,
|
|
99
|
+
nargs=2,
|
|
100
|
+
default=None,
|
|
101
|
+
help='Desired figure width and height in pixels.')
|
|
102
|
+
parser.add_argument('--dpi',
|
|
103
|
+
type=float,
|
|
104
|
+
default=100,
|
|
105
|
+
help='Dots per inch. Determines resolution of the figure.')
|
|
106
|
+
parser.add_argument('--group_func',
|
|
107
|
+
type=str,
|
|
108
|
+
default="'all data (use -group_func and -x_func to group into curves)'",
|
|
109
|
+
help='A python expression that determines how points are grouped into curves.\n'
|
|
110
|
+
'If this evaluates to a dict, different keys control different groupings (e.g. "color" and "marker")\n'
|
|
111
|
+
'Values available to the python expression:\n'
|
|
112
|
+
' metadata: The parsed value from the json_metadata for the data point.\n'
|
|
113
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
114
|
+
' decoder: The decoder that decoded the data for the data point.\n'
|
|
115
|
+
' strong_id: The cryptographic hash of the case that was sampled for the data point.\n'
|
|
116
|
+
' stat: The sinter.TaskStats object for the data point.\n'
|
|
117
|
+
'Expected expression type:\n'
|
|
118
|
+
' A dict, or something that can be given to `str` to get a useful string.\n'
|
|
119
|
+
'Recognized dict keys:\n'
|
|
120
|
+
' "color": controls color grouping\n'
|
|
121
|
+
' "marker": controls marker grouping\n'
|
|
122
|
+
' "linestyle": controls linestyle grouping\n'
|
|
123
|
+
' "order": controls ordering in the legend\n'
|
|
124
|
+
' "label": the text shown in the legend\n'
|
|
125
|
+
'Examples:\n'
|
|
126
|
+
''' --group_func "(decoder, m.d)"\n'''
|
|
127
|
+
''' --group_func "{'color': decoder, 'marker': m.d, 'label': (decoder, m.d)}"\n'''
|
|
128
|
+
''' --group_func "metadata['path'].split('/')[-2]"\n'''
|
|
129
|
+
)
|
|
130
|
+
parser.add_argument('--failure_unit_name',
|
|
131
|
+
type=str,
|
|
132
|
+
default=None,
|
|
133
|
+
help='The unit of failure, typically either "shot" (the default) or "round".\n'
|
|
134
|
+
'If this argument is specified, --failure_units_per_shot_func must also be specified.\n'''
|
|
135
|
+
)
|
|
136
|
+
parser.add_argument('--failure_units_per_shot_func',
|
|
137
|
+
type=str,
|
|
138
|
+
default=None,
|
|
139
|
+
help='A python expression that evaluates to the number of failure units there are per shot.\n'
|
|
140
|
+
'For example, if the failure unit is rounds, this should be an expression that returns\n'
|
|
141
|
+
'the number of rounds in a shot. Sinter has no way of knowing what you consider a round\n'
|
|
142
|
+
'to be, otherwise.\n'
|
|
143
|
+
'\n'
|
|
144
|
+
'This value is used to rescale the logical error rate plots. For example, if there are 4\n'
|
|
145
|
+
'failure units per shot then a shot error rate of 10%% corresponds to a unit failure rate\n'
|
|
146
|
+
'of 2.7129%%. The conversion formula (assuming less than 50%% error rates) is:\n'
|
|
147
|
+
'\n'
|
|
148
|
+
' P_unit = 0.5 - 0.5 * (1 - 2 * P_shot)**(1/units_per_shot)\n'
|
|
149
|
+
'\n'
|
|
150
|
+
'Values available to the python expression:\n'
|
|
151
|
+
' metadata: The parsed value from the json_metadata for the data point.\n'
|
|
152
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
153
|
+
' decoder: The decoder that decoded the data for the data point.\n'
|
|
154
|
+
' strong_id: The cryptographic hash of the case that was sampled for the data point.\n'
|
|
155
|
+
' stat: The sinter.TaskStats object for the data point.\n'
|
|
156
|
+
'\n'
|
|
157
|
+
'Expected expression type:\n'
|
|
158
|
+
' float.\n'
|
|
159
|
+
'\n'
|
|
160
|
+
'Examples:\n'
|
|
161
|
+
''' --failure_units_per_shot_func "metadata['rounds']"\n'''
|
|
162
|
+
''' --failure_units_per_shot_func m.r\n'''
|
|
163
|
+
''' --failure_units_per_shot_func "m.distance * 3"\n'''
|
|
164
|
+
''' --failure_units_per_shot_func "10"\n'''
|
|
165
|
+
)
|
|
166
|
+
parser.add_argument('--failure_values_func',
|
|
167
|
+
type=str,
|
|
168
|
+
default=None,
|
|
169
|
+
help='A python expression that evaluates to the number of independent ways a shot can fail.\n'
|
|
170
|
+
'For example, if a shot corresponds to a memory experiment preserving two observables,\n'
|
|
171
|
+
'then the failure unions is 2.\n'
|
|
172
|
+
'\n'
|
|
173
|
+
'This value is necessary to correctly rescale the logical error rate plots when using\n'
|
|
174
|
+
'--failure_values_func. By default it is assumed to be 1.\n'
|
|
175
|
+
'\n'
|
|
176
|
+
'Values available to the python expression:\n'
|
|
177
|
+
' metadata: The parsed value from the json_metadata for the data point.\n'
|
|
178
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
179
|
+
' decoder: The decoder that decoded the data for the data point.\n'
|
|
180
|
+
' strong_id: The cryptographic hash of the case that was sampled for the data point.\n'
|
|
181
|
+
' stat: The sinter.TaskStats object for the data point.\n'
|
|
182
|
+
'\n'
|
|
183
|
+
'Expected expression type:\n'
|
|
184
|
+
' float.\n'
|
|
185
|
+
'\n'
|
|
186
|
+
'Examples:\n'
|
|
187
|
+
''' --failure_values_func "metadata['num_obs']"\n'''
|
|
188
|
+
''' --failure_values_func "2"\n'''
|
|
189
|
+
)
|
|
190
|
+
parser.add_argument('--plot_args_func',
|
|
191
|
+
type=str,
|
|
192
|
+
default='''{}''',
|
|
193
|
+
help='A python expression used to customize the look of curves.\n'
|
|
194
|
+
'Values available to the python expression:\n'
|
|
195
|
+
' index: A unique integer identifying the curve.\n'
|
|
196
|
+
' key: The group key (returned from --group_func) identifying the curve.\n'
|
|
197
|
+
' stats: The list of sinter.TaskStats object in the group.\n'
|
|
198
|
+
' metadata: (From one arbitrary data point in the group.) The parsed value from the json_metadata for the data point.\n'
|
|
199
|
+
' m: `m.key` is a shorthand for `metadata.get("key", None)`.\n'
|
|
200
|
+
' decoder: (From one arbitrary data point in the group.) The decoder that decoded the data for the data point.\n'
|
|
201
|
+
' strong_id: (From one arbitrary data point in the group.) The cryptographic hash of the case that was sampled for the data point.\n'
|
|
202
|
+
' stat: (From one arbitrary data point in the group.) The sinter.TaskStats object for the data point.\n'
|
|
203
|
+
'Expected expression type:\n'
|
|
204
|
+
' A dictionary to give to matplotlib plotting functions as a **kwargs argument.\n'
|
|
205
|
+
'Examples:\n'
|
|
206
|
+
""" --plot_args_func "'''{'label': 'curve #' + str(index), 'linewidth': 5}'''"\n"""
|
|
207
|
+
""" --plot_args_func "'''{'marker': 'ov*sp^<>8PhH+xXDd|'[index %% 18]}'''"\n"""
|
|
208
|
+
)
|
|
209
|
+
parser.add_argument('--in',
|
|
210
|
+
type=str,
|
|
211
|
+
nargs='+',
|
|
212
|
+
required=True,
|
|
213
|
+
help='Input files to get data from.')
|
|
214
|
+
parser.add_argument('--type',
|
|
215
|
+
choices=['error_rate', 'discard_rate', 'custom_y'],
|
|
216
|
+
nargs='+',
|
|
217
|
+
default=(),
|
|
218
|
+
help='Picks the figures to include.')
|
|
219
|
+
parser.add_argument('--out',
|
|
220
|
+
type=str,
|
|
221
|
+
default=None,
|
|
222
|
+
help='Write the plot to a file instead of showing it.\n'
|
|
223
|
+
'(Use --show to still show the plot.)')
|
|
224
|
+
parser.add_argument('--xaxis',
|
|
225
|
+
type=str,
|
|
226
|
+
default='[log]',
|
|
227
|
+
help='Customize the X axis label. '
|
|
228
|
+
'Prefix [log] for logarithmic scale. '
|
|
229
|
+
'Prefix [sqrt] for square root scale.')
|
|
230
|
+
parser.add_argument('--yaxis',
|
|
231
|
+
type=str,
|
|
232
|
+
default=None,
|
|
233
|
+
help='Customize the Y axis label. '
|
|
234
|
+
'Prefix [log] for logarithmic scale. '
|
|
235
|
+
'Prefix [sqrt] for square root scale.')
|
|
236
|
+
parser.add_argument('--custom_error_count_keys',
|
|
237
|
+
type=str,
|
|
238
|
+
nargs='+',
|
|
239
|
+
default=None,
|
|
240
|
+
help="Replaces the stat's error count with one of its custom counts. Stats "
|
|
241
|
+
"without this count end up with an error count of 0. Adds the json "
|
|
242
|
+
"metadata field 'custom_error_count_key' to identify the custom count "
|
|
243
|
+
"used. Specifying multiple values turns each stat into multiple "
|
|
244
|
+
"stats.")
|
|
245
|
+
parser.add_argument('--show',
|
|
246
|
+
action='store_true',
|
|
247
|
+
help='Displays the plot in a window even when --out is specified.')
|
|
248
|
+
parser.add_argument('--xmin',
|
|
249
|
+
default=None,
|
|
250
|
+
type=float,
|
|
251
|
+
help='Forces the minimum value of the x axis.')
|
|
252
|
+
parser.add_argument('--xmax',
|
|
253
|
+
default=None,
|
|
254
|
+
type=float,
|
|
255
|
+
help='Forces the maximum value of the x axis.')
|
|
256
|
+
parser.add_argument('--ymin',
|
|
257
|
+
default=None,
|
|
258
|
+
type=float,
|
|
259
|
+
help='Forces the minimum value of the y axis.')
|
|
260
|
+
parser.add_argument('--ymax',
|
|
261
|
+
default=None,
|
|
262
|
+
type=float,
|
|
263
|
+
help='Forces the maximum value of the y axis.')
|
|
264
|
+
parser.add_argument('--title',
|
|
265
|
+
default=None,
|
|
266
|
+
type=str,
|
|
267
|
+
help='Sets the title of the plot.')
|
|
268
|
+
parser.add_argument('--subtitle',
|
|
269
|
+
default=None,
|
|
270
|
+
type=str,
|
|
271
|
+
help='Sets the subtitle of the plot.\n'
|
|
272
|
+
'\n'
|
|
273
|
+
'Note: The pattern "{common}" will expand to text including\n'
|
|
274
|
+
'all json metadata values that are the same across all stats.')
|
|
275
|
+
parser.add_argument('--highlight_max_likelihood_factor',
|
|
276
|
+
type=float,
|
|
277
|
+
default=1000,
|
|
278
|
+
help='The relative likelihood ratio that determines the color highlights around curves.\n'
|
|
279
|
+
'Set this to 1 or larger. Set to 1 to disable highlighting.')
|
|
280
|
+
parser.add_argument('--line_fits',
|
|
281
|
+
action='store_true',
|
|
282
|
+
help='Adds dashed line fits to every curve.')
|
|
283
|
+
|
|
284
|
+
a = parser.parse_args(args=args)
|
|
285
|
+
if 'custom_y' in a.type and a.y_func is None:
|
|
286
|
+
raise ValueError("--type custom_y requires --y_func.")
|
|
287
|
+
if a.y_func is not None and a.type and 'custom_y' not in a.type:
|
|
288
|
+
raise ValueError("--y_func only works with --type custom_y.")
|
|
289
|
+
if (len(a.type) == 0 and a.y_func is not None) or list(a.type) == 'custom_y':
|
|
290
|
+
if a.failure_units_per_shot_func is not None:
|
|
291
|
+
raise ValueError("--failure_units_per_shot_func doesn't affect --type custom_y")
|
|
292
|
+
if (a.failure_units_per_shot_func is not None) != (a.failure_unit_name is not None):
|
|
293
|
+
raise ValueError("--failure_units_per_shot_func and --failure_unit_name can only be specified together.")
|
|
294
|
+
if a.failure_values_func is not None and a.failure_units_per_shot_func is None:
|
|
295
|
+
raise ValueError('Specified --failure_values_func without --failure_units_per_shot_func')
|
|
296
|
+
if a.failure_units_per_shot_func is None:
|
|
297
|
+
a.failure_units_per_shot_func = "1"
|
|
298
|
+
if a.failure_values_func is None:
|
|
299
|
+
a.failure_values_func = "1"
|
|
300
|
+
if a.failure_unit_name is None:
|
|
301
|
+
a.failure_unit_name = 'shot'
|
|
302
|
+
|
|
303
|
+
def _compile_argument_into_func(arg_name: str, arg_val: Any = ()):
|
|
304
|
+
if arg_val == ():
|
|
305
|
+
arg_val = getattr(a, arg_name)
|
|
306
|
+
raw_func = eval(compile(
|
|
307
|
+
f'lambda *, stat, decoder, metadata, m, strong_id, sinter, math, np: {arg_val}',
|
|
308
|
+
filename=f'{arg_name}:command_line_arg',
|
|
309
|
+
mode='eval',
|
|
310
|
+
))
|
|
311
|
+
import sinter
|
|
312
|
+
return lambda stat: raw_func(
|
|
313
|
+
sinter=sinter,
|
|
314
|
+
math=math,
|
|
315
|
+
np=np,
|
|
316
|
+
stat=stat,
|
|
317
|
+
decoder=stat.decoder,
|
|
318
|
+
metadata=stat.json_metadata,
|
|
319
|
+
m=_FieldToMetadataWrapper(stat.json_metadata),
|
|
320
|
+
strong_id=stat.strong_id)
|
|
321
|
+
|
|
322
|
+
a.preprocess_stats_func = None if a.preprocess_stats_func is None else eval(compile(
|
|
323
|
+
f'lambda *, stats: {a.preprocess_stats_func}',
|
|
324
|
+
filename='preprocess_stats_func:command_line_arg',
|
|
325
|
+
mode='eval'))
|
|
326
|
+
a.x_func = _compile_argument_into_func('x_func', a.x_func)
|
|
327
|
+
if a.y_func is not None:
|
|
328
|
+
a.y_func = _compile_argument_into_func('y_func')
|
|
329
|
+
a.point_label_func = _compile_argument_into_func('point_label_func')
|
|
330
|
+
a.group_func = _compile_argument_into_func('group_func')
|
|
331
|
+
a.filter_func = _compile_argument_into_func('filter_func')
|
|
332
|
+
a.failure_units_per_shot_func = _compile_argument_into_func('failure_units_per_shot_func')
|
|
333
|
+
a.failure_values_func = _compile_argument_into_func('failure_values_func')
|
|
334
|
+
raw_plot_args_func = eval(compile(
|
|
335
|
+
f'lambda *, index, key, stats, stat, decoder, metadata, m, strong_id: {a.plot_args_func}',
|
|
336
|
+
filename='plot_args_func:command_line_arg',
|
|
337
|
+
mode='eval'))
|
|
338
|
+
a.plot_args_func = lambda index, group_key, stats: raw_plot_args_func(
|
|
339
|
+
index=index,
|
|
340
|
+
key=group_key,
|
|
341
|
+
stats=stats,
|
|
342
|
+
stat=stats[0],
|
|
343
|
+
decoder=stats[0].decoder,
|
|
344
|
+
metadata=stats[0].json_metadata,
|
|
345
|
+
m=_FieldToMetadataWrapper(stats[0].json_metadata),
|
|
346
|
+
strong_id=stats[0].strong_id)
|
|
347
|
+
return a
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def _log_ticks(
|
|
351
|
+
min_v: float,
|
|
352
|
+
max_v: float,
|
|
353
|
+
) -> Tuple[float, float, List[float], List[float]]:
|
|
354
|
+
d0 = math.floor(math.log10(min_v) + 0.0001)
|
|
355
|
+
d1 = math.ceil(math.log10(max_v) - 0.0001)
|
|
356
|
+
if d1 == d0:
|
|
357
|
+
d1 += 1
|
|
358
|
+
d0 -= 1
|
|
359
|
+
return (
|
|
360
|
+
10**d0,
|
|
361
|
+
10**d1,
|
|
362
|
+
[10**k for k in range(d0, d1 + 1)],
|
|
363
|
+
[d*10**k for k in range(d0, d1) for d in range(2, 10)],
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
def _sqrt_ticks(
|
|
368
|
+
min_v: float,
|
|
369
|
+
max_v: float,
|
|
370
|
+
) -> Tuple[float, float, List[float], List[float]]:
|
|
371
|
+
if max_v == min_v:
|
|
372
|
+
max_v *= 2
|
|
373
|
+
min_v /= 2
|
|
374
|
+
if max_v == min_v:
|
|
375
|
+
max_v = 1
|
|
376
|
+
min_v = 0
|
|
377
|
+
d = max_v - min_v
|
|
378
|
+
step = 10**math.floor(math.log10(d))
|
|
379
|
+
small_step = step / 10
|
|
380
|
+
|
|
381
|
+
start_k = math.floor(min_v / step)
|
|
382
|
+
end_k = math.ceil(max_v / step) + 1
|
|
383
|
+
major_ticks = [step * k for k in range(start_k, end_k)]
|
|
384
|
+
if len(major_ticks) < 5:
|
|
385
|
+
step /= 2
|
|
386
|
+
start_k = math.floor(min_v / step)
|
|
387
|
+
end_k = math.ceil(max_v / step) + 1
|
|
388
|
+
major_ticks = [step * k for k in range(start_k, end_k)]
|
|
389
|
+
|
|
390
|
+
small_start_k = math.floor(major_ticks[0] / small_step)
|
|
391
|
+
small_end_k = math.ceil(major_ticks[-1] / small_step) + 1
|
|
392
|
+
minor_ticks = [small_step * k for k in range(small_start_k, small_end_k)]
|
|
393
|
+
|
|
394
|
+
return (
|
|
395
|
+
major_ticks[0],
|
|
396
|
+
major_ticks[-1],
|
|
397
|
+
major_ticks,
|
|
398
|
+
minor_ticks,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def _pick_min_max(
|
|
403
|
+
*,
|
|
404
|
+
plotted_stats: Sequence['sinter.TaskStats'],
|
|
405
|
+
v_func: Callable[['sinter.TaskStats'], Optional[float]],
|
|
406
|
+
default_min: float,
|
|
407
|
+
default_max: float,
|
|
408
|
+
forced_min: Optional[float],
|
|
409
|
+
forced_max: Optional[float],
|
|
410
|
+
want_positive: bool,
|
|
411
|
+
want_strictly_positive: bool,
|
|
412
|
+
) -> Tuple[float, float]:
|
|
413
|
+
assert default_max >= default_min
|
|
414
|
+
vs = []
|
|
415
|
+
for stat in plotted_stats:
|
|
416
|
+
v = v_func(stat)
|
|
417
|
+
if isinstance(v, (int, float)):
|
|
418
|
+
vs.append(v)
|
|
419
|
+
elif isinstance(v, Fit):
|
|
420
|
+
for e in [v.low, v.best, v.high]:
|
|
421
|
+
if e is not None:
|
|
422
|
+
vs.append(e)
|
|
423
|
+
elif v is None:
|
|
424
|
+
pass
|
|
425
|
+
else:
|
|
426
|
+
raise NotImplementedError(f'{v=}')
|
|
427
|
+
if want_positive:
|
|
428
|
+
vs = [v for v in vs if v > 0]
|
|
429
|
+
|
|
430
|
+
min_v = min(vs, default=default_min)
|
|
431
|
+
max_v = max(vs, default=default_max)
|
|
432
|
+
if forced_min is not None:
|
|
433
|
+
min_v = forced_min
|
|
434
|
+
max_v = max(min_v, max_v)
|
|
435
|
+
if forced_max is not None:
|
|
436
|
+
max_v = forced_max
|
|
437
|
+
min_v = min(min_v, max_v)
|
|
438
|
+
if want_positive:
|
|
439
|
+
assert min_v >= 0
|
|
440
|
+
if want_strictly_positive:
|
|
441
|
+
assert min_v > 0
|
|
442
|
+
assert max_v >= min_v
|
|
443
|
+
|
|
444
|
+
return min_v, max_v
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
def _set_axis_scale_label_ticks(
|
|
448
|
+
*,
|
|
449
|
+
ax: Optional[plt.Axes],
|
|
450
|
+
y_not_x: bool,
|
|
451
|
+
axis_label: str,
|
|
452
|
+
default_scale: str = 'linear',
|
|
453
|
+
default_min_v: float = 0,
|
|
454
|
+
default_max_v: float = 0,
|
|
455
|
+
v_func: Callable[['sinter.TaskStats'], Optional[float]],
|
|
456
|
+
forced_min_v: Optional[float] = None,
|
|
457
|
+
forced_max_v: Optional[float] = None,
|
|
458
|
+
plotted_stats: Sequence['sinter.TaskStats'],
|
|
459
|
+
) -> Optional[str]:
|
|
460
|
+
if ax is None:
|
|
461
|
+
return None
|
|
462
|
+
set_scale = ax.set_yscale if y_not_x else ax.set_xscale
|
|
463
|
+
set_label = ax.set_ylabel if y_not_x else ax.set_xlabel
|
|
464
|
+
set_lim = cast(Callable[[Optional[float], Optional[float]], None], ax.set_ylim if y_not_x else ax.set_xlim)
|
|
465
|
+
set_ticks = ax.set_yticks if y_not_x else ax.set_xticks
|
|
466
|
+
|
|
467
|
+
if axis_label.startswith('[') and ']' in axis_label:
|
|
468
|
+
axis_split = axis_label.index(']')
|
|
469
|
+
scale_name = axis_label[1:axis_split]
|
|
470
|
+
axis_label = axis_label[axis_split + 1:]
|
|
471
|
+
else:
|
|
472
|
+
scale_name = default_scale
|
|
473
|
+
set_label(axis_label)
|
|
474
|
+
|
|
475
|
+
min_v, max_v = _pick_min_max(
|
|
476
|
+
plotted_stats=plotted_stats,
|
|
477
|
+
v_func=v_func,
|
|
478
|
+
default_min=default_min_v,
|
|
479
|
+
default_max=default_max_v,
|
|
480
|
+
forced_min=forced_min_v,
|
|
481
|
+
forced_max=forced_max_v,
|
|
482
|
+
want_positive=scale_name != 'linear',
|
|
483
|
+
want_strictly_positive=scale_name == 'log',
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
if scale_name == 'linear':
|
|
487
|
+
set_lim(min_v, max_v)
|
|
488
|
+
elif scale_name == 'log':
|
|
489
|
+
set_scale('log')
|
|
490
|
+
min_v, max_v, major_ticks, minor_ticks = _log_ticks(min_v, max_v)
|
|
491
|
+
if forced_min_v is not None:
|
|
492
|
+
min_v = forced_min_v
|
|
493
|
+
if forced_max_v is not None:
|
|
494
|
+
max_v = forced_max_v
|
|
495
|
+
set_ticks(major_ticks)
|
|
496
|
+
set_ticks(minor_ticks, minor=True)
|
|
497
|
+
set_lim(min_v, max_v)
|
|
498
|
+
elif scale_name == 'sqrt':
|
|
499
|
+
from matplotlib.scale import FuncScale
|
|
500
|
+
min_v, max_v, major_ticks, minor_ticks = _sqrt_ticks(min_v, max_v)
|
|
501
|
+
if forced_min_v is not None:
|
|
502
|
+
min_v = forced_min_v
|
|
503
|
+
if forced_max_v is not None:
|
|
504
|
+
max_v = forced_max_v
|
|
505
|
+
set_scale(FuncScale(ax, (lambda e: e**0.5, lambda e: e**2)))
|
|
506
|
+
set_ticks(major_ticks)
|
|
507
|
+
set_ticks(minor_ticks, minor=True)
|
|
508
|
+
set_lim(min_v, max_v)
|
|
509
|
+
else:
|
|
510
|
+
raise NotImplemented(f'{scale_name=}')
|
|
511
|
+
return scale_name
|
|
512
|
+
|
|
513
|
+
|
|
514
|
+
def _common_json_properties(stats: List['sinter.TaskStats']) -> Dict[str, Any]:
|
|
515
|
+
vals = {}
|
|
516
|
+
for stat in stats:
|
|
517
|
+
if isinstance(stat.json_metadata, dict):
|
|
518
|
+
for k in stat.json_metadata:
|
|
519
|
+
vals[k] = set()
|
|
520
|
+
for stat in stats:
|
|
521
|
+
if isinstance(stat.json_metadata, dict):
|
|
522
|
+
for k in vals:
|
|
523
|
+
v = stat.json_metadata.get(k)
|
|
524
|
+
if v is None or isinstance(v, (float, str, int)):
|
|
525
|
+
vals[k].add(v)
|
|
526
|
+
if 'decoder' not in vals:
|
|
527
|
+
vals['decoder'] = set()
|
|
528
|
+
for stat in stats:
|
|
529
|
+
vals['decoder'].add(stat.decoder)
|
|
530
|
+
return {k: next(iter(v)) for k, v in vals.items() if len(v) == 1}
|
|
531
|
+
|
|
532
|
+
|
|
533
|
+
def _plot_helper(
|
|
534
|
+
*,
|
|
535
|
+
samples: Union[Iterable['sinter.TaskStats'], ExistingData],
|
|
536
|
+
group_func: Callable[['sinter.TaskStats'], Any],
|
|
537
|
+
filter_func: Callable[['sinter.TaskStats'], Any],
|
|
538
|
+
preprocess_stats_func: Optional[Callable],
|
|
539
|
+
failure_units_per_shot_func: Callable[['sinter.TaskStats'], Any],
|
|
540
|
+
failure_values_func: Callable[['sinter.TaskStats'], Any],
|
|
541
|
+
x_func: Callable[['sinter.TaskStats'], Any],
|
|
542
|
+
y_func: Optional[Callable[['sinter.TaskStats'], Any]],
|
|
543
|
+
failure_unit: str,
|
|
544
|
+
plot_types: Sequence[str],
|
|
545
|
+
highlight_max_likelihood_factor: Optional[float],
|
|
546
|
+
xaxis: str,
|
|
547
|
+
yaxis: Optional[str],
|
|
548
|
+
min_y: Optional[float],
|
|
549
|
+
max_y: Optional[float],
|
|
550
|
+
max_x: Optional[float],
|
|
551
|
+
min_x: Optional[float],
|
|
552
|
+
title: Optional[str],
|
|
553
|
+
subtitle: Optional[str],
|
|
554
|
+
fig_size: Optional[Tuple[int, int]],
|
|
555
|
+
plot_args_func: Callable[[int, Any, List['sinter.TaskStats']], Dict[str, Any]],
|
|
556
|
+
line_fits: bool,
|
|
557
|
+
point_label_func: Callable[['sinter.TaskStats'], Any] = lambda _: None,
|
|
558
|
+
dpi: float,
|
|
559
|
+
) -> Tuple[plt.Figure, List[plt.Axes]]:
|
|
560
|
+
if isinstance(samples, ExistingData):
|
|
561
|
+
total = samples
|
|
562
|
+
else:
|
|
563
|
+
total = ExistingData()
|
|
564
|
+
for sample in samples:
|
|
565
|
+
total.add_sample(sample)
|
|
566
|
+
total.data = {k: v
|
|
567
|
+
for k, v in total.data.items()
|
|
568
|
+
if bool(filter_func(v))}
|
|
569
|
+
|
|
570
|
+
if preprocess_stats_func is not None:
|
|
571
|
+
processed_stats = preprocess_stats_func(stats=list(total.data.values()))
|
|
572
|
+
total.data = {}
|
|
573
|
+
for stat in processed_stats:
|
|
574
|
+
total.add_sample(stat)
|
|
575
|
+
|
|
576
|
+
if not plot_types:
|
|
577
|
+
if y_func is not None:
|
|
578
|
+
plot_types = ['custom_y']
|
|
579
|
+
else:
|
|
580
|
+
plot_types = ['error_rate']
|
|
581
|
+
if any(s.discards for s in total.data.values()):
|
|
582
|
+
plot_types.append('discard_rate')
|
|
583
|
+
include_error_rate_plot = 'error_rate' in plot_types
|
|
584
|
+
include_discard_rate_plot = 'discard_rate' in plot_types
|
|
585
|
+
include_custom_plot = 'custom_y' in plot_types
|
|
586
|
+
num_plots = include_error_rate_plot + include_discard_rate_plot + include_custom_plot
|
|
587
|
+
|
|
588
|
+
fig: plt.Figure
|
|
589
|
+
ax_err: Optional[plt.Axes] = None
|
|
590
|
+
ax_dis: Optional[plt.Axes] = None
|
|
591
|
+
ax_cus: Optional[plt.Axes] = None
|
|
592
|
+
fig, axes = plt.subplots(1, num_plots)
|
|
593
|
+
if num_plots == 1:
|
|
594
|
+
axes = [axes]
|
|
595
|
+
axes = list(axes)
|
|
596
|
+
pop_axes = list(axes)
|
|
597
|
+
if include_custom_plot:
|
|
598
|
+
ax_cus = pop_axes.pop()
|
|
599
|
+
if include_discard_rate_plot:
|
|
600
|
+
ax_dis = pop_axes.pop()
|
|
601
|
+
if include_error_rate_plot:
|
|
602
|
+
ax_err = pop_axes.pop()
|
|
603
|
+
assert not pop_axes
|
|
604
|
+
|
|
605
|
+
plotted_stats: List['sinter.TaskStats'] = [
|
|
606
|
+
stat
|
|
607
|
+
for stat in total.data.values()
|
|
608
|
+
]
|
|
609
|
+
|
|
610
|
+
def stat_to_err_rate(stat: 'sinter.TaskStats') -> Optional[float]:
|
|
611
|
+
if stat.shots <= stat.discards:
|
|
612
|
+
return None
|
|
613
|
+
err_rate = stat.errors / (stat.shots - stat.discards)
|
|
614
|
+
pieces = failure_units_per_shot_func(stat)
|
|
615
|
+
return shot_error_rate_to_piece_error_rate(err_rate, pieces=pieces)
|
|
616
|
+
|
|
617
|
+
x_scale_name: Optional[str] = None
|
|
618
|
+
for ax in [ax_err, ax_dis, ax_cus]:
|
|
619
|
+
v = _set_axis_scale_label_ticks(
|
|
620
|
+
ax=ax,
|
|
621
|
+
y_not_x=False,
|
|
622
|
+
axis_label=xaxis,
|
|
623
|
+
default_scale='linear',
|
|
624
|
+
default_min_v=1,
|
|
625
|
+
default_max_v=10,
|
|
626
|
+
forced_max_v=max_x,
|
|
627
|
+
forced_min_v=min_x,
|
|
628
|
+
plotted_stats=plotted_stats,
|
|
629
|
+
v_func=x_func,
|
|
630
|
+
)
|
|
631
|
+
x_scale_name = x_scale_name or v
|
|
632
|
+
|
|
633
|
+
y_scale_name: Optional[str] = None
|
|
634
|
+
if ax_err is not None:
|
|
635
|
+
y_scale_name = y_scale_name or _set_axis_scale_label_ticks(
|
|
636
|
+
ax=ax_err,
|
|
637
|
+
y_not_x=True,
|
|
638
|
+
axis_label=f"Logical Error Rate (per {failure_unit})" if yaxis is None else yaxis,
|
|
639
|
+
default_scale='log',
|
|
640
|
+
forced_max_v=max_y if max_y is not None else 1 if min_y is None or 1 > min_y else None,
|
|
641
|
+
default_min_v=1e-4,
|
|
642
|
+
default_max_v=1,
|
|
643
|
+
forced_min_v=min_y,
|
|
644
|
+
plotted_stats=plotted_stats,
|
|
645
|
+
v_func=stat_to_err_rate,
|
|
646
|
+
)
|
|
647
|
+
assert x_scale_name is not None
|
|
648
|
+
assert y_scale_name is not None
|
|
649
|
+
plot_error_rate(
|
|
650
|
+
ax=ax_err,
|
|
651
|
+
stats=plotted_stats,
|
|
652
|
+
group_func=group_func,
|
|
653
|
+
x_func=x_func,
|
|
654
|
+
failure_units_per_shot_func=failure_units_per_shot_func,
|
|
655
|
+
failure_values_func=failure_values_func,
|
|
656
|
+
highlight_max_likelihood_factor=highlight_max_likelihood_factor,
|
|
657
|
+
plot_args_func=plot_args_func,
|
|
658
|
+
line_fits=None if not line_fits else (x_scale_name, y_scale_name),
|
|
659
|
+
point_label_func=point_label_func,
|
|
660
|
+
)
|
|
661
|
+
ax_err.grid(which='major', color='#000000')
|
|
662
|
+
ax_err.grid(which='minor', color='#DDDDDD')
|
|
663
|
+
ax_err.legend()
|
|
664
|
+
|
|
665
|
+
if ax_dis is not None:
|
|
666
|
+
plot_discard_rate(
|
|
667
|
+
ax=ax_dis,
|
|
668
|
+
stats=plotted_stats,
|
|
669
|
+
group_func=group_func,
|
|
670
|
+
failure_units_per_shot_func=failure_units_per_shot_func,
|
|
671
|
+
x_func=x_func,
|
|
672
|
+
highlight_max_likelihood_factor=highlight_max_likelihood_factor,
|
|
673
|
+
plot_args_func=plot_args_func,
|
|
674
|
+
point_label_func=point_label_func,
|
|
675
|
+
)
|
|
676
|
+
ax_dis.set_yticks([p / 10 for p in range(11)], labels=[f'{10*p}%' for p in range(11)])
|
|
677
|
+
ax_dis.set_ylim(0, 1)
|
|
678
|
+
ax_dis.grid(which='major', color='#000000')
|
|
679
|
+
ax_dis.grid(which='minor', color='#DDDDDD')
|
|
680
|
+
if yaxis is not None and not include_custom_plot and ax_err is None:
|
|
681
|
+
ax_dis.set_ylabel(yaxis)
|
|
682
|
+
else:
|
|
683
|
+
ax_dis.set_ylabel(f"Discard Rate (per {failure_unit})")
|
|
684
|
+
ax_dis.legend()
|
|
685
|
+
|
|
686
|
+
if ax_cus is not None:
|
|
687
|
+
assert y_func is not None
|
|
688
|
+
y_scale_name = y_scale_name or _set_axis_scale_label_ticks(
|
|
689
|
+
ax=ax_cus,
|
|
690
|
+
y_not_x=True,
|
|
691
|
+
axis_label='custom' if yaxis is None else yaxis,
|
|
692
|
+
default_scale='linear',
|
|
693
|
+
default_min_v=1e-4,
|
|
694
|
+
default_max_v=1,
|
|
695
|
+
plotted_stats=plotted_stats,
|
|
696
|
+
v_func=y_func,
|
|
697
|
+
forced_min_v=min_y,
|
|
698
|
+
forced_max_v=max_y,
|
|
699
|
+
)
|
|
700
|
+
plot_custom(
|
|
701
|
+
ax=ax_cus,
|
|
702
|
+
stats=plotted_stats,
|
|
703
|
+
x_func=x_func,
|
|
704
|
+
y_func=y_func,
|
|
705
|
+
group_func=group_func,
|
|
706
|
+
plot_args_func=plot_args_func,
|
|
707
|
+
line_fits=None if not line_fits else (x_scale_name, y_scale_name),
|
|
708
|
+
point_label_func=point_label_func,
|
|
709
|
+
)
|
|
710
|
+
ax_cus.grid(which='major', color='#000000')
|
|
711
|
+
ax_cus.grid(which='minor', color='#DDDDDD')
|
|
712
|
+
ax_cus.legend()
|
|
713
|
+
|
|
714
|
+
stripped_xaxis = xaxis
|
|
715
|
+
if stripped_xaxis is not None:
|
|
716
|
+
if stripped_xaxis.startswith('[') and ']' in stripped_xaxis:
|
|
717
|
+
stripped_xaxis = stripped_xaxis[stripped_xaxis.index(']') + 1:]
|
|
718
|
+
|
|
719
|
+
vs_suffix = ''
|
|
720
|
+
if stripped_xaxis is not None:
|
|
721
|
+
vs_suffix = f' vs {stripped_xaxis}'
|
|
722
|
+
if ax_err is not None:
|
|
723
|
+
ax_err.set_title(f'Logical Error Rate per {failure_unit}{vs_suffix}')
|
|
724
|
+
if title is not None:
|
|
725
|
+
ax_err.set_title(title)
|
|
726
|
+
if ax_dis is not None:
|
|
727
|
+
ax_dis.set_title(f'Discard Rate per {failure_unit}{vs_suffix}')
|
|
728
|
+
if ax_cus is not None:
|
|
729
|
+
if title is not None:
|
|
730
|
+
ax_cus.set_title(title)
|
|
731
|
+
else:
|
|
732
|
+
ax_cus.set_title(f'Custom Plot')
|
|
733
|
+
if subtitle is not None:
|
|
734
|
+
if '{common}' in subtitle:
|
|
735
|
+
auto_subtitle = ', '.join(f'{k}={v}' for k, v in sorted(_common_json_properties(plotted_stats).items()))
|
|
736
|
+
subtitle = subtitle.replace('{common}', auto_subtitle)
|
|
737
|
+
for ax in axes:
|
|
738
|
+
ax.set_title(ax.title.get_text() + '\n' + subtitle)
|
|
739
|
+
|
|
740
|
+
if fig_size is None:
|
|
741
|
+
fig.set_dpi(dpi)
|
|
742
|
+
fig.set_size_inches(1000 * num_plots / dpi, 1000 / dpi)
|
|
743
|
+
else:
|
|
744
|
+
w, h = fig_size
|
|
745
|
+
fig.set_dpi(dpi)
|
|
746
|
+
fig.set_size_inches(w / dpi, h / dpi)
|
|
747
|
+
fig.tight_layout()
|
|
748
|
+
axs = [e for e in [ax_err, ax_dis] if e is not None]
|
|
749
|
+
return fig, axs
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
class _FieldToMetadataWrapper:
|
|
753
|
+
def __init__(self, d: Dict):
|
|
754
|
+
self.__private_d = d
|
|
755
|
+
|
|
756
|
+
def __getattr__(self, item):
|
|
757
|
+
if isinstance(self.__private_d, dict):
|
|
758
|
+
return self.__private_d.get(item, None)
|
|
759
|
+
return None
|
|
760
|
+
|
|
761
|
+
|
|
762
|
+
def main_plot(*, command_line_args: List[str]):
|
|
763
|
+
args = parse_args(command_line_args)
|
|
764
|
+
total = ExistingData()
|
|
765
|
+
for file in getattr(args, 'in'):
|
|
766
|
+
total += ExistingData.from_file(file)
|
|
767
|
+
|
|
768
|
+
if args.custom_error_count_keys:
|
|
769
|
+
seen_keys = {k for stat in total.data.values() for k in stat.custom_counts}
|
|
770
|
+
missing = []
|
|
771
|
+
for k in args.custom_error_count_keys:
|
|
772
|
+
if k not in seen_keys:
|
|
773
|
+
missing.append(k)
|
|
774
|
+
if missing:
|
|
775
|
+
print("Warning: the following custom error count keys didn't appear in any statistic:", file=sys.stderr)
|
|
776
|
+
for k in sorted(missing):
|
|
777
|
+
print(f' {k!r}', file=sys.stderr)
|
|
778
|
+
print("Here are the keys that do appear:", file=sys.stderr)
|
|
779
|
+
for k in sorted(seen_keys):
|
|
780
|
+
print(f' {k!r}', file=sys.stderr)
|
|
781
|
+
|
|
782
|
+
total.data = {
|
|
783
|
+
s.strong_id: s
|
|
784
|
+
for v in total.data.values()
|
|
785
|
+
for s in v._split_custom_counts(args.custom_error_count_keys)
|
|
786
|
+
}
|
|
787
|
+
|
|
788
|
+
fig, _ = _plot_helper(
|
|
789
|
+
samples=total,
|
|
790
|
+
group_func=args.group_func,
|
|
791
|
+
x_func=args.x_func,
|
|
792
|
+
point_label_func=args.point_label_func,
|
|
793
|
+
y_func=args.y_func,
|
|
794
|
+
filter_func=args.filter_func,
|
|
795
|
+
failure_units_per_shot_func=args.failure_units_per_shot_func,
|
|
796
|
+
failure_values_func=args.failure_values_func,
|
|
797
|
+
plot_args_func=args.plot_args_func,
|
|
798
|
+
failure_unit=args.failure_unit_name,
|
|
799
|
+
plot_types=args.type,
|
|
800
|
+
xaxis=args.xaxis,
|
|
801
|
+
yaxis=args.yaxis,
|
|
802
|
+
fig_size=args.fig_size,
|
|
803
|
+
min_y=args.ymin,
|
|
804
|
+
max_y=args.ymax,
|
|
805
|
+
max_x=args.xmax,
|
|
806
|
+
min_x=args.xmin,
|
|
807
|
+
highlight_max_likelihood_factor=args.highlight_max_likelihood_factor,
|
|
808
|
+
title=args.title,
|
|
809
|
+
subtitle=args.subtitle,
|
|
810
|
+
line_fits=args.line_fits,
|
|
811
|
+
preprocess_stats_func=args.preprocess_stats_func,
|
|
812
|
+
dpi=args.dpi,
|
|
813
|
+
)
|
|
814
|
+
if args.out is not None:
|
|
815
|
+
fig.savefig(args.out, dpi=args.dpi)
|
|
816
|
+
if args.show or args.out is None:
|
|
817
|
+
plt.show()
|