QuLab 2.7.5__cp311-cp311-macosx_10_9_universal2.whl → 2.7.7__cp311-cp311-macosx_10_9_universal2.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: QuLab
3
- Version: 2.7.5
3
+ Version: 2.7.7
4
4
  Summary: contral instruments and manage data
5
5
  Author-email: feihoo87 <feihoo87@gmail.com>
6
6
  Maintainer-email: feihoo87 <feihoo87@gmail.com>
@@ -1,20 +1,20 @@
1
- qulab/__init__.py,sha256=tKRZe2WafDGcwbu7ddlMaePerN842aRbxcrECsu5pas,286
1
+ qulab/__init__.py,sha256=KJcUcZ5qXY6wlAoirzK_B-dgtDjsLmOE671v3gcXO_c,286
2
2
  qulab/__main__.py,sha256=fjaRSL_uUjNIzBGNgjlGswb9TJ2VD5qnkZHW3hItrD4,68
3
3
  qulab/dicttree.py,sha256=tRRMpGZYVOLw0TEByE3_2Ss8FdOmzuGL9e1DWbs8qoY,13684
4
- qulab/fun.cpython-311-darwin.so,sha256=tROCXBE3kaRbmAiAelu3knHSlX8LgSMKBhy8BjLdX5U,126848
5
- qulab/typing.py,sha256=5xCLfrp1aZpKpDy4p2arbFszw2eK3hGUjZa-XSvC_-8,69
4
+ qulab/fun.cpython-311-darwin.so,sha256=gdwo5kR7RsUYV69Dp40GEMDtlRvD7QhmrQRED08p3Ow,126848
5
+ qulab/typing.py,sha256=vg62sGqxuD9CI5677ejlzAmf2fVdAESZCQjAE_xSxPg,69
6
6
  qulab/utils.py,sha256=JIXMSmZU0uYfKG_tzawpK7vRNPRir_hJE8JlqkVLX2o,1260
7
- qulab/version.py,sha256=KaaWLSulRNfH8E5IXWnj4a54KetKPO0aYlKBuIb_6bs,21
7
+ qulab/version.py,sha256=TXxZaBvn1Y_BQ2YYBZ0iedwKmA3iOfYiZehFXuAoky0,21
8
8
  qulab/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
9
  qulab/cli/commands.py,sha256=tgDIkkeIoasQXAifJZ6NU8jDgpNgb2a-B0C4nF0evrE,559
10
10
  qulab/cli/config.py,sha256=SdNmWzweWAdyk8M2oKYhMxnkaJ0qIayPlsLGCNlVqME,3108
11
11
  qulab/executor/__init__.py,sha256=LosPzOMaljSZY1thy_Fxtbrgq7uubJszMABEB7oM7tU,101
12
- qulab/executor/cli.py,sha256=glpQUV67_85sMpQoc3i2jw63I2WySwCd5sMXykEYzB0,8913
13
- qulab/executor/load.py,sha256=QKe2qs-kCPp0PjvnVMR5XVgSa1IkN5BSHkNPZ2EpC38,18007
14
- qulab/executor/schedule.py,sha256=YRAYs0PgR0mhhy4rJX8Ddi65peSfh7SfsvudVjLJQAU,15248
15
- qulab/executor/storage.py,sha256=SGnbVujkF3doNfNNDfgXI6alKkarhGbr690-VsjbBBA,10666
16
- qulab/executor/transform.py,sha256=kaDJePBhVc32gZkQN8YBuA69lYYW9YhDZmCnUKaWuQw,2189
17
- qulab/executor/utils.py,sha256=XDk3duNrqDsCqZyZUmPqIAIIiykiafcOMBpy9NVAOAw,5686
12
+ qulab/executor/cli.py,sha256=gGka2M6xccfM8facsIJ2qZ1y2Yx8C4BRhc1JG6nK9mo,8932
13
+ qulab/executor/load.py,sha256=4FY_SwumLDUewC265v4j_ZGGpfYOgH4c8PtglYcWpBw,18077
14
+ qulab/executor/schedule.py,sha256=DHQ5dI5YX8_frWplOoLEb9htcfM5-mikiSBNSPWT1io,16725
15
+ qulab/executor/storage.py,sha256=sBD-aNvj29l5HtoTpk_627qarZkPn33F-hcc80AuF6k,11079
16
+ qulab/executor/transform.py,sha256=s0kxWQx8Sr9pMIQke1BLNM6KqrSogAkjB6Zkapl8YSU,2189
17
+ qulab/executor/utils.py,sha256=cF6-2jlvlHyTjNHdxXKG04Fjfm3_3wfDQAF1G8DQphk,5686
18
18
  qulab/monitor/__init__.py,sha256=nTHelnDpxRS_fl_B38TsN0njgq8eVTEz9IAnN3NbDlM,42
19
19
  qulab/monitor/__main__.py,sha256=w3yUcqq195LzSnXTkQcuC1RSFRhy4oQ_PEBmucXguME,97
20
20
  qulab/monitor/config.py,sha256=fQ5JcsMApKc1UwANEnIvbDQZl8uYW0tle92SaYtX9lI,744
@@ -94,9 +94,9 @@ qulab/visualization/plot_seq.py,sha256=UWTS6p9nfX_7B8ehcYo6UnSTUCjkBsNU9jiOeW2ca
94
94
  qulab/visualization/qdat.py,sha256=ZeevBYWkzbww4xZnsjHhw7wRorJCBzbG0iEu-XQB4EA,5735
95
95
  qulab/visualization/rot3d.py,sha256=lMrEJlRLwYe6NMBlGkKYpp_V9CTipOAuDy6QW_cQK00,734
96
96
  qulab/visualization/widgets.py,sha256=6KkiTyQ8J-ei70LbPQZAK35wjktY47w2IveOa682ftA,3180
97
- QuLab-2.7.5.dist-info/LICENSE,sha256=PRzIKxZtpQcH7whTG6Egvzl1A0BvnSf30tmR2X2KrpA,1065
98
- QuLab-2.7.5.dist-info/METADATA,sha256=erx--03wAyJYszBgIpIVQQY7u3lx3_14DB4HSKzSidM,3698
99
- QuLab-2.7.5.dist-info/WHEEL,sha256=K10eKSN6_vzvMOgXxWbVOQNR7Orfl6gBTCpCI8bcYx4,114
100
- QuLab-2.7.5.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
101
- QuLab-2.7.5.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
102
- QuLab-2.7.5.dist-info/RECORD,,
97
+ QuLab-2.7.7.dist-info/LICENSE,sha256=PRzIKxZtpQcH7whTG6Egvzl1A0BvnSf30tmR2X2KrpA,1065
98
+ QuLab-2.7.7.dist-info/METADATA,sha256=uhQQtVYRkuDdN8Inh-gnDFZDTekIqQRrQEo5V9hPD1A,3698
99
+ QuLab-2.7.7.dist-info/WHEEL,sha256=K10eKSN6_vzvMOgXxWbVOQNR7Orfl6gBTCpCI8bcYx4,114
100
+ QuLab-2.7.7.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
101
+ QuLab-2.7.7.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
102
+ QuLab-2.7.7.dist-info/RECORD,,
qulab/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
- from .executor.storage import find_result
2
- from .executor.storage import get_result_by_index as get_result
1
+ from .executor.storage import find_report
2
+ from .executor.storage import get_report_by_index as get_report
3
3
  from .executor.utils import debug_analyze
4
4
  from .scan import Scan, get_record, load_record, lookup, lookup_list
5
5
  from .version import __version__
qulab/executor/cli.py CHANGED
@@ -149,7 +149,7 @@ def get(key, api):
149
149
 
150
150
  @click.command()
151
151
  @click.argument('workflow')
152
- @click.option('--plot', '-p', is_flag=True, help='Plot the result.')
152
+ @click.option('--plot', '-p', is_flag=True, help='Plot the report.')
153
153
  @click.option('--no-dependents',
154
154
  '-n',
155
155
  is_flag=True,
@@ -221,7 +221,7 @@ def run(workflow, code, data, api, plot, no_dependents, retry, freeze):
221
221
  @click.command()
222
222
  @click.argument('workflow')
223
223
  @click.option('--retry', '-r', default=1, type=int, help='Retry times.')
224
- @click.option('--plot', '-p', is_flag=True, help='Plot the result.')
224
+ @click.option('--plot', '-p', is_flag=True, help='Plot the report.')
225
225
  @log_options
226
226
  @command_option('maintain')
227
227
  def maintain(workflow, code, data, api, retry, plot):
@@ -263,6 +263,7 @@ def maintain(workflow, code, data, api, retry, plot):
263
263
  run=False,
264
264
  plot=plot,
265
265
  freeze=False)
266
+ break
266
267
  except CalibrationFailedError as e:
267
268
  if i == retry - 1:
268
269
  raise e
qulab/executor/load.py CHANGED
@@ -15,7 +15,7 @@ from typing import Any
15
15
 
16
16
  from loguru import logger
17
17
 
18
- from .storage import Result
18
+ from .storage import Report
19
19
 
20
20
 
21
21
  class SetConfigWorkflow():
@@ -29,7 +29,7 @@ class SetConfigWorkflow():
29
29
  def depends(self):
30
30
  return []
31
31
 
32
- def check_state(self, history: Result) -> bool:
32
+ def check_state(self, history: Report) -> bool:
33
33
  from . import transform
34
34
  try:
35
35
  return self._equal(history.parameters[self.key],
@@ -45,21 +45,21 @@ class SetConfigWorkflow():
45
45
  value = eval(input(f'"{self.key}": '))
46
46
  return value
47
47
 
48
- def analyze(self, result: Result, history: Result):
49
- result.state = 'OK'
50
- result.parameters = {self.key: result.data}
51
- return result
48
+ def analyze(self, report: Report, history: Report):
49
+ report.state = 'OK'
50
+ report.parameters = {self.key: report.data}
51
+ return report
52
52
 
53
53
  def check(self):
54
54
  return self.calibrate()
55
55
 
56
- def check_analyze(self, result: Result, history: Result | None):
56
+ def check_analyze(self, report: Report, history: Report | None):
57
57
  if self.check_state(history):
58
- result.state = 'OK'
59
- result.parameters = {self.key: history.data}
58
+ report.state = 'OK'
59
+ report.parameters = {self.key: history.data}
60
60
  else:
61
- result.state = 'Outdated'
62
- return result
61
+ report.state = 'Outdated'
62
+ return report
63
63
 
64
64
  @staticmethod
65
65
  def _equal(a, b):
@@ -77,6 +77,9 @@ class SetConfigWorkflow():
77
77
 
78
78
  return False
79
79
 
80
+ def __hash__(self):
81
+ return hash(self.__workflow_id__)
82
+
80
83
 
81
84
  WorkflowType = ModuleType | SetConfigWorkflow
82
85
 
@@ -7,10 +7,42 @@ from pathlib import Path
7
7
  from loguru import logger
8
8
 
9
9
  from .load import WorkflowType, get_dependents
10
- from .storage import (Result, find_result, get_heads, renew_result,
11
- revoke_result, save_result)
10
+ from .storage import (Report, find_report, get_heads, renew_report,
11
+ revoke_report, save_report)
12
12
  from .transform import current_config, obey_the_oracle, update_parameters
13
13
 
14
+ __session_id = None
15
+ __session_cache = {}
16
+
17
+
18
+ def set_cache(session_id, key, report: Report):
19
+ global __session_id
20
+ if __session_id is None:
21
+ __session_id = session_id
22
+ if __session_id != session_id:
23
+ __session_cache.clear()
24
+ if report.workflow.startswith('cfg:'):
25
+ __session_cache[key] = report
26
+ else:
27
+ __session_cache[key] = report.base_path, report.path
28
+
29
+
30
+ def get_cache(session_id, key) -> Report:
31
+ from .storage import load_report
32
+ global __session_id
33
+ if __session_id is None or __session_id != session_id:
34
+ return None
35
+ index = __session_cache.get(key, None)
36
+ if index is None:
37
+ return None
38
+ if isinstance(index, tuple):
39
+ base_path, path = index
40
+ return load_report(base_path, path)
41
+ elif isinstance(index, Report):
42
+ return index
43
+ else:
44
+ return None
45
+
14
46
 
15
47
  class CalibrationFailedError(Exception):
16
48
  pass
@@ -24,14 +56,14 @@ def is_pickleable(obj) -> bool:
24
56
  return False
25
57
 
26
58
 
27
- def veryfy_analyzed_result(result: Result, script: str, method: str):
28
- if not isinstance(result, Result):
29
- raise TypeError(f'"{script}" : "{method}" must return a Result object')
30
- if not is_pickleable(result.parameters):
59
+ def veryfy_analyzed_report(report: Report, script: str, method: str):
60
+ if not isinstance(report, Report):
61
+ raise TypeError(f'"{script}" : "{method}" must return a Report object')
62
+ if not is_pickleable(report.parameters):
31
63
  raise TypeError(
32
64
  f'"{script}" : "{method}" return not pickleable data in .parameters'
33
65
  )
34
- if not is_pickleable(result.other_infomation):
66
+ if not is_pickleable(report.other_infomation):
35
67
  raise TypeError(
36
68
  f'"{script}" : "{method}" return not pickleable data in .other_infomation'
37
69
  )
@@ -48,8 +80,8 @@ def check_state(workflow: WorkflowType, code_path: str | Path,
48
80
  4. All dependencies pass check state.
49
81
  """
50
82
  logger.debug(f'check_state: "{workflow.__workflow_id__}"')
51
- result = find_result(workflow.__workflow_id__, state_path)
52
- if not result:
83
+ report = find_report(workflow.__workflow_id__, state_path)
84
+ if not report:
53
85
  logger.debug(
54
86
  f'check_state failed: No history found for "{workflow.__workflow_id__}"'
55
87
  )
@@ -58,23 +90,23 @@ def check_state(workflow: WorkflowType, code_path: str | Path,
58
90
  logger.debug(
59
91
  f'check_state: "{workflow.__workflow_id__}" has custom check_state method'
60
92
  )
61
- return workflow.check_state(result)
93
+ return workflow.check_state(report)
62
94
  if workflow.__timeout__ is not None and datetime.now(
63
- ) > result.checked_time + timedelta(seconds=workflow.__timeout__):
95
+ ) > report.checked_time + timedelta(seconds=workflow.__timeout__):
64
96
  logger.debug(
65
97
  f'check_state failed: "{workflow.__workflow_id__}" has expired')
66
98
  return False
67
- if not result.in_spec:
99
+ if not report.in_spec:
68
100
  logger.debug(
69
101
  f'check_state failed: "{workflow.__workflow_id__}" is out of spec')
70
102
  return False
71
- if result.bad_data:
103
+ if report.bad_data:
72
104
  logger.debug(
73
105
  f'check_state failed: "{workflow.__workflow_id__}" has bad data')
74
106
  return False
75
107
  for n in get_dependents(workflow, code_path):
76
- r = find_result(n.__workflow_id__, state_path)
77
- if r is None or r.checked_time > result.checked_time:
108
+ r = find_report(n.__workflow_id__, state_path)
109
+ if r is None or r.checked_time > report.checked_time:
78
110
  logger.debug(
79
111
  f'check_state failed: "{workflow.__workflow_id__}" has outdated dependencies'
80
112
  )
@@ -88,66 +120,115 @@ def check_state(workflow: WorkflowType, code_path: str | Path,
88
120
  return True
89
121
 
90
122
 
91
- def call_analyzer(node,
92
- result: Result,
93
- history: Result | None,
123
+ def call_analyzer(node: WorkflowType,
124
+ report: Report,
125
+ history: Report | None,
94
126
  check=False,
95
- plot=False) -> Result:
127
+ plot=False) -> Report:
96
128
  if check:
97
- result = node.check_analyze(result, history=history)
98
- veryfy_analyzed_result(result, node.__workflow_id__, "check_analyze")
99
- result.fully_calibrated = False
129
+ report = node.check_analyze(report, history=history)
130
+ veryfy_analyzed_report(report, node.__workflow_id__, "check_analyze")
131
+ report.fully_calibrated = False
100
132
  else:
101
- result = node.analyze(result, history=history)
102
- veryfy_analyzed_result(result, node.__workflow_id__, "analyze")
133
+ report = node.analyze(report, history=history)
134
+ veryfy_analyzed_report(report, node.__workflow_id__, "analyze")
103
135
  if hasattr(node, 'oracle') and callable(node.oracle):
104
136
  logger.debug(
105
137
  f'"{node.__workflow_id__}" has oracle method, calling ...')
106
138
  try:
107
- result = node.oracle(result,
139
+ report = node.oracle(report,
108
140
  history=history,
109
- system_state=get_heads(result.base_path))
141
+ system_state=get_heads(report.base_path))
110
142
  except Exception as e:
111
143
  logger.exception(e)
112
- result.oracle = {}
113
- if not isinstance(result, Result):
144
+ report.oracle = {}
145
+ if not isinstance(report, Report):
114
146
  raise TypeError(
115
- f'"{node.__workflow_id__}" : function "oracle" must return a Result object'
147
+ f'"{node.__workflow_id__}" : function "oracle" must return a Report object'
116
148
  )
117
- if not is_pickleable(result.oracle):
149
+ if not is_pickleable(report.oracle):
118
150
  raise TypeError(
119
151
  f'"{node.__workflow_id__}" : function "oracle" return not pickleable data'
120
152
  )
121
- result.fully_calibrated = True
153
+ report.fully_calibrated = True
122
154
  if plot:
123
- call_plot(node, result)
124
- return result
155
+ call_plot(node, report)
156
+ return report
125
157
 
126
158
 
127
159
  @logger.catch()
128
- def call_plot(node, result: Result, check=False):
160
+ def call_plot(node: WorkflowType, report: Report, check=False):
129
161
  if hasattr(node, 'plot') and callable(node.plot):
130
- node.plot(result)
162
+ node.plot(report)
163
+
164
+
165
+ def call_check(workflow: WorkflowType, session_id: str, state_path: Path):
166
+ report = get_cache(session_id, (workflow, 'check'))
167
+ if report is not None:
168
+ logger.debug(f'Cache hit for "{workflow.__workflow_id__}:check"')
169
+ return report
170
+
171
+ data = workflow.check()
172
+ if not is_pickleable(data):
173
+ raise TypeError(
174
+ f'"{workflow.__workflow_id__}" : "check" return not pickleable data'
175
+ )
176
+ report = Report(workflow=workflow.__workflow_id__,
177
+ data=data,
178
+ config_path=current_config(state_path),
179
+ base_path=state_path,
180
+ heads=get_heads(state_path))
181
+
182
+ save_report(workflow.__workflow_id__,
183
+ report,
184
+ state_path,
185
+ refresh_heads=False)
186
+
187
+ set_cache(session_id, (workflow, 'check'), report)
188
+ return report
189
+
190
+
191
+ def call_calibrate(workflow: WorkflowType, session_id: str, state_path: Path):
192
+ report = get_cache(session_id, (workflow, 'calibrate'))
193
+ if report is not None:
194
+ logger.debug(f'Cache hit for "{workflow.__workflow_id__}:calibrate"')
195
+ return report
196
+
197
+ data = workflow.calibrate()
198
+ if not is_pickleable(data):
199
+ raise TypeError(
200
+ f'"{workflow.__workflow_id__}" : "calibrate" return not pickleable data'
201
+ )
202
+ report = Report(workflow=workflow.__workflow_id__,
203
+ data=data,
204
+ config_path=current_config(state_path),
205
+ base_path=state_path,
206
+ heads=get_heads(state_path))
207
+
208
+ save_report(workflow.__workflow_id__, report, state_path)
209
+
210
+ set_cache(session_id, (workflow, 'calibrate'), report)
211
+ return report
131
212
 
132
213
 
133
- @functools.lru_cache(maxsize=128)
134
214
  def check_data(workflow: WorkflowType, code_path: str | Path,
135
- state_path: str | Path, plot: bool, session_id: str) -> Result:
215
+ state_path: str | Path, plot: bool, session_id: str) -> Report:
136
216
  """
137
217
  check data answers two questions:
138
218
  Is the parameter associated with this cal in spec,
139
219
  and is the cal scan working as expected?
140
220
  """
141
- history = find_result(workflow.__workflow_id__, state_path)
221
+ history = find_report(workflow.__workflow_id__, state_path)
142
222
 
143
223
  if history is None:
144
224
  logger.debug(f'No history found for "{workflow.__workflow_id__}"')
145
- result = Result(workflow=workflow.__workflow_id__,
225
+ report = Report(workflow=workflow.__workflow_id__,
146
226
  config_path=current_config(state_path),
147
- base_path=state_path)
148
- result.in_spec = False
149
- result.bad_data = False
150
- return result
227
+ base_path=state_path,
228
+ heads=get_heads(state_path))
229
+ report.in_spec = False
230
+ report.bad_data = False
231
+ return report
151
232
 
152
233
  if history.bad_data:
153
234
  logger.debug(
@@ -165,82 +246,59 @@ def check_data(workflow: WorkflowType, code_path: str | Path,
165
246
  workflow, 'check_analyze') and callable(workflow.check_analyze):
166
247
  logger.debug(
167
248
  f'Checking "{workflow.__workflow_id__}" with "check" method ...')
168
- data = workflow.check()
169
- if not is_pickleable(data):
170
- raise TypeError(
171
- f'"{workflow.__workflow_id__}" : "check" return not pickleable data'
172
- )
173
- result = Result(workflow=workflow.__workflow_id__,
174
- data=data,
175
- config_path=current_config(state_path),
176
- base_path=state_path)
177
- #save_result(workflow.__workflow_id__, result, state_path)
249
+
250
+ report = call_check(workflow, session_id, state_path)
178
251
 
179
252
  logger.debug(f'Checked "{workflow.__workflow_id__}" !')
180
- result = call_analyzer(workflow,
181
- result,
253
+ report = call_analyzer(workflow,
254
+ report,
182
255
  history,
183
256
  check=True,
184
257
  plot=plot)
185
- if result.in_spec:
258
+ if report.in_spec:
186
259
  logger.debug(
187
- f'"{workflow.__workflow_id__}": checked in spec, renewing result'
260
+ f'"{workflow.__workflow_id__}": checked in spec, renewing report'
188
261
  )
189
- renew_result(workflow.__workflow_id__, state_path)
262
+ renew_report(workflow.__workflow_id__, report, state_path)
190
263
  else:
191
264
  logger.debug(
192
- f'"{workflow.__workflow_id__}": checked out of spec, revoking result'
265
+ f'"{workflow.__workflow_id__}": checked out of spec, revoking report'
193
266
  )
194
- revoke_result(workflow.__workflow_id__, state_path)
267
+ revoke_report(workflow.__workflow_id__, report, state_path)
195
268
  else:
196
269
  logger.debug(
197
270
  f'Checking "{workflow.__workflow_id__}" with "calibrate" method ...'
198
271
  )
199
- data = workflow.calibrate()
200
- if not is_pickleable(data):
201
- raise TypeError(
202
- f'"{workflow.__workflow_id__}" : "calibrate" return not pickleable data'
203
- )
204
- result = Result(workflow=workflow.__workflow_id__,
205
- data=data,
206
- config_path=current_config(state_path),
207
- base_path=state_path)
208
- save_result(workflow.__workflow_id__, result, state_path)
272
+
273
+ report = call_calibrate(workflow, session_id, state_path)
209
274
 
210
275
  logger.debug(f'Calibrated "{workflow.__workflow_id__}" !')
211
- result = call_analyzer(workflow,
212
- result,
276
+ report = call_analyzer(workflow,
277
+ report,
213
278
  history,
214
279
  check=False,
215
280
  plot=plot)
216
- save_result(workflow.__workflow_id__,
217
- result,
281
+ save_report(workflow.__workflow_id__,
282
+ report,
218
283
  state_path,
219
284
  overwrite=True)
220
-
221
- return result
285
+ return report
222
286
 
223
287
 
224
- @functools.lru_cache(maxsize=128)
225
288
  def calibrate(workflow: WorkflowType, code_path: str | Path,
226
- state_path: str | Path, plot: bool, session_id: str) -> Result:
227
- history = find_result(workflow.__workflow_id__, state_path)
289
+ state_path: str | Path, plot: bool, session_id: str) -> Report:
290
+ history = find_report(workflow.__workflow_id__, state_path)
228
291
 
229
292
  logger.debug(f'Calibrating "{workflow.__workflow_id__}" ...')
230
- data = workflow.calibrate()
231
- if not is_pickleable(data):
232
- raise TypeError(
233
- f'"{workflow.__workflow_id__}" : "calibrate" return not pickleable data'
234
- )
235
- result = Result(workflow=workflow.__workflow_id__,
236
- data=data,
237
- config_path=current_config(state_path),
238
- base_path=state_path)
239
- save_result(workflow.__workflow_id__, result, state_path)
293
+
294
+ report = call_calibrate(workflow, session_id, state_path)
295
+
240
296
  logger.debug(f'Calibrated "{workflow.__workflow_id__}" !')
241
- result = call_analyzer(workflow, result, history, check=False, plot=plot)
242
- save_result(workflow.__workflow_id__, result, state_path, overwrite=True)
243
- return result
297
+
298
+ report = call_analyzer(workflow, report, history, check=False, plot=plot)
299
+
300
+ save_report(workflow.__workflow_id__, report, state_path, overwrite=True)
301
+ return report
244
302
 
245
303
 
246
304
  def diagnose(workflow: WorkflowType, code_path: str | Path,
@@ -250,16 +308,16 @@ def diagnose(workflow: WorkflowType, code_path: str | Path,
250
308
  '''
251
309
  logger.debug(f'diagnose "{workflow.__workflow_id__}"')
252
310
  # check_data
253
- result = check_data(workflow, code_path, state_path, plot, session_id)
311
+ report = check_data(workflow, code_path, state_path, plot, session_id)
254
312
  # in spec case
255
- if result.in_spec:
313
+ if report.in_spec:
256
314
  logger.debug(
257
315
  f'"{workflow.__workflow_id__}": Checked! In spec, no need to diagnose'
258
316
  )
259
317
  return False
260
318
  # bad data case
261
319
  recalibrated = []
262
- if result.bad_data:
320
+ if report.bad_data:
263
321
  logger.debug(
264
322
  f'"{workflow.__workflow_id__}": Bad data, diagnosing dependents')
265
323
  recalibrated = [
@@ -267,7 +325,7 @@ def diagnose(workflow: WorkflowType, code_path: str | Path,
267
325
  for n in get_dependents(workflow, code_path)
268
326
  ]
269
327
  if not any(recalibrated):
270
- if result.bad_data:
328
+ if report.bad_data:
271
329
  raise CalibrationFailedError(
272
330
  f'"{workflow.__workflow_id__}": bad data but no dependents recalibrated.'
273
331
  )
@@ -278,27 +336,27 @@ def diagnose(workflow: WorkflowType, code_path: str | Path,
278
336
  logger.debug(
279
337
  f'recalibrate "{workflow.__workflow_id__}" because some dependents recalibrated.'
280
338
  )
281
- elif not result.in_spec and not result.bad_data:
339
+ elif not report.in_spec and not report.bad_data:
282
340
  logger.debug(
283
341
  f'recalibrate "{workflow.__workflow_id__}" because out of spec.')
284
- elif result.in_spec:
342
+ elif report.in_spec:
285
343
  logger.error(
286
344
  f'Never reach: recalibrate "{workflow.__workflow_id__}" because in spec.'
287
345
  )
288
- elif result.bad_data:
346
+ elif report.bad_data:
289
347
  logger.error(
290
348
  f'Never reach: recalibrate "{workflow.__workflow_id__}" because bad data.'
291
349
  )
292
350
  else:
293
351
  logger.error(f'Never reach: recalibrate "{workflow.__workflow_id__}"')
294
352
 
295
- result = calibrate(workflow, code_path, state_path, plot, session_id)
296
- if result.bad_data or not result.in_spec:
297
- obey_the_oracle(result, state_path)
353
+ report = calibrate(workflow, code_path, state_path, plot, session_id)
354
+ if report.bad_data or not report.in_spec:
355
+ obey_the_oracle(report, state_path)
298
356
  raise CalibrationFailedError(
299
357
  f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
300
358
  )
301
- update_parameters(result, state_path)
359
+ update_parameters(report, state_path)
302
360
  return True
303
361
 
304
362
 
@@ -335,13 +393,13 @@ def maintain(workflow: WorkflowType,
335
393
  f'"{workflow.__workflow_id__}": In spec, no need to maintain')
336
394
  return
337
395
  # check_data
338
- result = check_data(workflow, code_path, state_path, plot, session_id)
339
- if result.in_spec:
396
+ report = check_data(workflow, code_path, state_path, plot, session_id)
397
+ if report.in_spec:
340
398
  if not run:
341
399
  logger.debug(
342
400
  f'"{workflow.__workflow_id__}": In spec, no need to maintain')
343
401
  return
344
- elif result.bad_data:
402
+ elif report.bad_data:
345
403
  logger.debug(
346
404
  f'"{workflow.__workflow_id__}": Bad data, diagnosing dependents')
347
405
  for n in get_dependents(workflow, code_path):
@@ -354,15 +412,15 @@ def maintain(workflow: WorkflowType,
354
412
  f'"{workflow.__workflow_id__}": All dependents diagnosed')
355
413
  # calibrate
356
414
  logger.debug(f'recalibrate "{workflow.__workflow_id__}"')
357
- result = calibrate(workflow, code_path, state_path, plot, session_id)
358
- if result.bad_data or not result.in_spec:
415
+ report = calibrate(workflow, code_path, state_path, plot, session_id)
416
+ if report.bad_data or not report.in_spec:
359
417
  if not freeze:
360
- obey_the_oracle(result, state_path)
418
+ obey_the_oracle(report, state_path)
361
419
  raise CalibrationFailedError(
362
420
  f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
363
421
  )
364
422
  if not freeze:
365
- update_parameters(result, state_path)
423
+ update_parameters(report, state_path)
366
424
  return
367
425
 
368
426
 
@@ -374,17 +432,17 @@ def run(workflow: WorkflowType,
374
432
  freeze: bool = False):
375
433
  session_id = uuid.uuid4().hex
376
434
  logger.debug(f'run "{workflow.__workflow_id__}" without dependences.')
377
- result = calibrate(workflow,
435
+ report = calibrate(workflow,
378
436
  code_path,
379
437
  state_path,
380
438
  plot,
381
439
  session_id=session_id)
382
- if result.bad_data or not result.in_spec:
440
+ if report.bad_data or not report.in_spec:
383
441
  if not freeze:
384
- obey_the_oracle(result, state_path)
442
+ obey_the_oracle(report, state_path)
385
443
  raise CalibrationFailedError(
386
444
  f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
387
445
  )
388
446
  if not freeze:
389
- update_parameters(result, state_path)
447
+ update_parameters(report, state_path)
390
448
  return
qulab/executor/storage.py CHANGED
@@ -16,7 +16,7 @@ __current_config_cache = None
16
16
 
17
17
 
18
18
  @dataclass
19
- class Result():
19
+ class Report():
20
20
  workflow: str = ''
21
21
  in_spec: bool = False
22
22
  bad_data: bool = False
@@ -27,17 +27,18 @@ class Result():
27
27
  parameters: dict = field(default_factory=dict)
28
28
  oracle: dict = field(default_factory=dict)
29
29
  other_infomation: dict = field(default_factory=dict)
30
- data: Any = field(default_factory=tuple)
30
+ data: Any = field(default_factory=tuple, repr=False)
31
31
  index: int = -1
32
- previous_path: Path | None = None
33
- heads: dict[str, Path] | None = None
34
- base_path: Path | None = None
35
- config_path: Path | None = None
32
+ previous_path: Path | None = field(default=None, repr=False)
33
+ heads: dict[str, Path] = field(default_factory=dict, repr=False)
34
+ base_path: Path | None = field(default=None, repr=False)
35
+ path: Path | None = field(default=None, repr=False)
36
+ config_path: Path | None = field(default=None, repr=False)
36
37
 
37
38
  @property
38
39
  def previous(self):
39
40
  if self.previous_path is not None and self.base_path is not None:
40
- return load_result(self.previous_path, self.base_path)
41
+ return load_report(self.previous_path, self.base_path)
41
42
  else:
42
43
  return None
43
44
 
@@ -88,7 +89,7 @@ def random_path(base: Path) -> Path:
88
89
  return path
89
90
 
90
91
 
91
- def save_config_key_history(key: str, result: Result,
92
+ def save_config_key_history(key: str, report: Report,
92
93
  base_path: str | Path) -> int:
93
94
  global __current_config_cache
94
95
  base_path = Path(base_path) / 'state'
@@ -101,14 +102,15 @@ def save_config_key_history(key: str, result: Result,
101
102
  else:
102
103
  __current_config_cache = {}
103
104
 
104
- __current_config_cache[key] = result.data, datetime.now()
105
+ __current_config_cache[
106
+ key] = report.data, report.calibrated_time, report.checked_time
105
107
 
106
108
  with open(base_path / 'parameters.pkl', 'wb') as f:
107
109
  pickle.dump(__current_config_cache, f)
108
110
  return 0
109
111
 
110
112
 
111
- def find_config_key_history(key: str, base_path: str | Path) -> Result | None:
113
+ def find_config_key_history(key: str, base_path: str | Path) -> Report | None:
112
114
  global __current_config_cache
113
115
  base_path = Path(base_path) / 'state'
114
116
  if __current_config_cache is None:
@@ -119,72 +121,77 @@ def find_config_key_history(key: str, base_path: str | Path) -> Result | None:
119
121
  __current_config_cache = {}
120
122
 
121
123
  if key in __current_config_cache:
122
- value, checked_time = __current_config_cache.get(key, None)
123
- result = Result(
124
+ value, calibrated_time, checked_time = __current_config_cache.get(
125
+ key, None)
126
+ report = Report(
124
127
  workflow=f'cfg:{key}',
125
128
  bad_data=False,
126
129
  in_spec=True,
127
130
  fully_calibrated=True,
128
131
  parameters={key: value},
129
132
  data=value,
130
- calibrated_time=checked_time,
133
+ calibrated_time=calibrated_time,
131
134
  checked_time=checked_time,
132
135
  )
133
- result.bad_data = False
134
- return result
136
+ return report
135
137
  return None
136
138
 
137
139
 
138
- def save_result(workflow: str,
139
- result: Result,
140
+ def save_report(workflow: str,
141
+ report: Report,
140
142
  base_path: str | Path,
141
- overwrite: bool = False) -> int:
143
+ overwrite: bool = False,
144
+ refresh_heads: bool = True) -> int:
142
145
  if workflow.startswith("cfg:"):
143
- return save_config_key_history(workflow[4:], result, base_path)
146
+ return save_config_key_history(workflow[4:], report, base_path)
144
147
 
145
148
  logger.debug(
146
- f'Saving result for "{workflow}", {result.in_spec=}, {result.bad_data=}, {result.fully_calibrated=}'
149
+ f'Saving report for "{workflow}", {report.in_spec=}, {report.bad_data=}, {report.fully_calibrated=}'
147
150
  )
148
151
  base_path = Path(base_path)
149
152
  if overwrite:
150
- buf = lzma.compress(pickle.dumps(result))
151
- path = get_head(workflow, base_path)
153
+ buf = lzma.compress(pickle.dumps(report))
154
+ path = report.path
155
+ if path is None:
156
+ raise ValueError("Report path is None, can't overwrite.")
152
157
  with open(base_path / 'objects' / path, "rb") as f:
153
158
  index = int.from_bytes(f.read(8), 'big')
154
- result.index = index
159
+ report.index = index
155
160
  else:
156
- result.previous_path = get_head(workflow, base_path)
157
- buf = lzma.compress(pickle.dumps(result))
161
+ report.previous_path = get_head(workflow, base_path)
162
+ buf = lzma.compress(pickle.dumps(report))
158
163
  path = random_path(base_path / 'objects')
159
164
  (base_path / 'objects' / path).parent.mkdir(parents=True,
160
165
  exist_ok=True)
161
- result.index = create_index("result",
166
+ report.path = path
167
+ report.index = create_index("report",
162
168
  base_path,
163
169
  context=str(path),
164
170
  width=35)
165
171
 
166
172
  with open(base_path / 'objects' / path, "wb") as f:
167
- f.write(result.index.to_bytes(8, 'big'))
173
+ f.write(report.index.to_bytes(8, 'big'))
168
174
  f.write(buf)
169
- set_head(workflow, path, base_path)
170
- return result.index
175
+ if refresh_heads:
176
+ set_head(workflow, path, base_path)
177
+ return report.index
171
178
 
172
179
 
173
- def load_result(path: str | Path, base_path: str | Path) -> Result | None:
180
+ def load_report(path: str | Path, base_path: str | Path) -> Report | None:
174
181
  base_path = Path(base_path)
175
182
  path = base_path / 'objects' / path
176
183
 
177
184
  with open(base_path / 'objects' / path, "rb") as f:
178
185
  index = int.from_bytes(f.read(8), 'big')
179
- result = pickle.loads(lzma.decompress(f.read()))
180
- result.base_path = base_path
181
- result.index = index
182
- return result
186
+ report = pickle.loads(lzma.decompress(f.read()))
187
+ report.base_path = base_path
188
+ report.index = index
189
+ return report
183
190
 
184
191
 
185
- def find_result(
192
+ def find_report(
186
193
  workflow: str, base_path: str | Path = get_config_value("data", Path)
187
- ) -> Result | None:
194
+ ) -> Report | None:
188
195
  if workflow.startswith("cfg:"):
189
196
  return find_config_key_history(workflow[4:], base_path)
190
197
 
@@ -192,25 +199,25 @@ def find_result(
192
199
  path = get_head(workflow, base_path)
193
200
  if path is None:
194
201
  return None
195
- return load_result(path, base_path)
202
+ return load_report(path, base_path)
196
203
 
197
204
 
198
- def renew_result(workflow: str, base_path: str | Path):
199
- logger.debug(f'Renewing result for "{workflow}"')
200
- result = find_result(workflow, base_path)
201
- if result is not None:
202
- result.checked_time = datetime.now()
203
- return save_result(workflow, result, base_path)
205
+ def renew_report(workflow: str, report, base_path: str | Path):
206
+ logger.debug(f'Renewing report for "{workflow}"')
207
+ report = find_report(workflow, base_path)
208
+ if report is not None:
209
+ report.checked_time = datetime.now()
210
+ return save_report(workflow, report, base_path)
204
211
 
205
212
 
206
- def revoke_result(workflow: str, base_path: str | Path):
207
- logger.debug(f'Revoking result for "{workflow}"')
213
+ def revoke_report(workflow: str, report, base_path: str | Path):
214
+ logger.debug(f'Revoking report for "{workflow}"')
208
215
  base_path = Path(base_path)
209
216
  path = get_head(workflow, base_path)
210
217
  if path is not None:
211
- result = load_result(path, base_path)
212
- result.in_spec = False
213
- return save_result(workflow, result, base_path)
218
+ report = load_report(path, base_path)
219
+ report.in_spec = False
220
+ return save_report(workflow, report, base_path)
214
221
 
215
222
 
216
223
  def set_head(workflow: str, path: Path, base_path: str | Path):
@@ -290,12 +297,12 @@ def query_index(name: str, base_path: str | Path, index: int):
290
297
  return context.rstrip()
291
298
 
292
299
 
293
- def get_result_by_index(
300
+ def get_report_by_index(
294
301
  index: int, base_path: str | Path = get_config_value("data", Path)
295
- ) -> Result | None:
302
+ ) -> Report | None:
296
303
  try:
297
- path = query_index("result", base_path, index)
298
- return load_result(path, base_path)
304
+ path = query_index("report", base_path, index)
305
+ return load_report(path, base_path)
299
306
  except:
300
307
  return None
301
308
 
@@ -1,4 +1,4 @@
1
- from .storage import Result, save_config
1
+ from .storage import Report, save_config
2
2
 
3
3
  __current_config_id = None
4
4
 
@@ -43,16 +43,16 @@ def _export_config() -> dict:
43
43
  return parameters
44
44
 
45
45
 
46
- def obey_the_oracle(result: Result, data_path):
46
+ def obey_the_oracle(report: Report, data_path):
47
47
  global __current_config_id
48
- update_config(result.oracle)
48
+ update_config(report.oracle)
49
49
  cfg = export_config()
50
50
  __current_config_id = save_config(cfg, data_path)
51
51
 
52
52
 
53
- def update_parameters(result: Result, data_path):
53
+ def update_parameters(report: Report, data_path):
54
54
  global __current_config_id
55
- update_config(result.parameters)
55
+ update_config(report.parameters)
56
56
  cfg = export_config()
57
57
  __current_config_id = save_config(cfg, data_path)
58
58
 
qulab/executor/utils.py CHANGED
@@ -37,7 +37,7 @@ def workflow_template(workflow: str, deps: list[str]) -> str:
37
37
  import numpy as np
38
38
  from loguru import logger
39
39
 
40
- from qulab.typing import Result
40
+ from qulab.typing import Report
41
41
 
42
42
 
43
43
  # 多长时间应该检查一次校准实验,单位是秒。
@@ -63,35 +63,35 @@ def calibrate():
63
63
  return x, y
64
64
 
65
65
 
66
- def analyze(result: Result, history: Result | None = None) -> Result:
66
+ def analyze(report: Report, history: Report | None = None) -> Report:
67
67
  \"\"\"
68
68
  分析校准结果。
69
69
 
70
- result: Result
70
+ report: Report
71
71
  本次校准实验的数据。
72
- history: Result | None
72
+ history: Report | None
73
73
  上次校准实验数据和分析结果,如果有的话。
74
74
  \"\"\"
75
75
  import random
76
76
 
77
- # 这里添加你的分析过程,运行 calibrate 得到的数据,在 result.data 里
77
+ # 这里添加你的分析过程,运行 calibrate 得到的数据,在 report.data 里
78
78
  # 你可以得到校准的结果,然后根据这个结果进行分析。
79
- x, y = result.data
79
+ x, y = report.data
80
80
 
81
81
  # 完整校准后的状态有两种:OK 和 Bad,分别对应校准成功和校准失败。
82
82
  # 校准失败是指出现坏数据,无法简单通过重新运行本次校准解决,需要
83
83
  # 检查前置步骤。
84
- result.state = random.choice(['OK', 'Bad'])
84
+ report.state = random.choice(['OK', 'Bad'])
85
85
 
86
86
  # 参数是一个字典,包含了本次校准得到的参数,后续会更新到config表中。
87
- result.parameters = {{'gate.R.Q1.params.amp':1}}
87
+ report.parameters = {{'gate.R.Q1.params.amp':1}}
88
88
 
89
89
  # 其他信息可以是任何可序列化的内容,你可以将你想要记录的信息放在这里。
90
90
  # 下次校准分析时,这些信息也会在 history 参数中一起传入,帮助你在下
91
91
  # 次分析时对比参考。
92
- result.other_infomation = {{}}
92
+ report.other_infomation = {{}}
93
93
 
94
- return result
94
+ return report
95
95
 
96
96
 
97
97
  def check():
@@ -111,33 +111,33 @@ def check():
111
111
  return x, y
112
112
 
113
113
 
114
- def check_analyze(result: Result, history: Result | None = None) -> Result:
114
+ def check_analyze(report: Report, history: Report | None = None) -> Report:
115
115
  \"\"\"
116
116
  分析检查结果。
117
117
 
118
- result: Result
118
+ report: Report
119
119
  本次检查实验的数据。
120
- history: Result | None
120
+ history: Report | None
121
121
  上次检查实验数据和分析结果,如果有的话。
122
122
  \"\"\"
123
123
  import random
124
124
 
125
- # 这里添加你的分析过程,运行 check 得到的数据,在 result.data 里
125
+ # 这里添加你的分析过程,运行 check 得到的数据,在 report.data 里
126
126
  # 你可以得到校准的结果,然后根据这个结果进行分析。
127
- x, y = result.data
127
+ x, y = report.data
128
128
 
129
129
  # 状态有三种:Outdated, OK, Bad,分别对应过时、正常、坏数据。
130
130
  # Outdated 是指数据过时,即参数漂了,需要重新校准。
131
131
  # OK 是指数据正常,参数也没漂,不用重新校准。
132
132
  # Bad 是指数据坏了,无法校准,需要检查前置步骤。
133
- result.state = random.choice(['Outdated', 'OK', 'Bad'])
133
+ report.state = random.choice(['Outdated', 'OK', 'Bad'])
134
134
 
135
- return result
135
+ return report
136
136
 
137
137
 
138
- def oracle(result: Result,
139
- history: Result | None = None,
140
- system_state: dict[str:str] | None = None) -> Result:
138
+ def oracle(report: Report,
139
+ history: Report | None = None,
140
+ system_state: dict[str:str] | None = None) -> Report:
141
141
  \"\"\"
142
142
  谕示:指凭直觉或经验判断,改动某些配置,以期望下次校准成功。
143
143
 
@@ -145,31 +145,31 @@ def oracle(result: Result,
145
145
  比如通常我们在死活测不到 rabi 或能谱时,会换一个 idle bias 再试试。这
146
146
  里我们凭直觉设的那个 bias 值,就是一个谕示,可以通过 oracle 来设定。
147
147
 
148
- 该函数代入的参数 result 是 analyze 函数的返回值。
148
+ 该函数代入的参数 report 是 analyze 函数的返回值。
149
149
  \"\"\"
150
150
 
151
- # result.oracle['Q0.bias'] = 0.1
152
- # result.oracle['Q1.bias'] = -0.03
151
+ # report.oracle['Q0.bias'] = 0.1
152
+ # report.oracle['Q1.bias'] = -0.03
153
153
 
154
- return result
154
+ return report
155
155
  """
156
156
 
157
157
 
158
158
  def debug_analyze(
159
- result_index: int,
159
+ report_index: int,
160
160
  code_path: str | Path = get_config_value('code', Path),
161
161
  data_path: str | Path = get_config_value('data', Path),
162
162
  ) -> None:
163
- from .storage import get_result_by_index
163
+ from .storage import get_report_by_index
164
164
 
165
- result = get_result_by_index(result_index, data_path)
166
- if result is None:
167
- raise ValueError(f'Invalid result index: {result_index}')
168
- workflow = result.workflow
165
+ report = get_report_by_index(report_index, data_path)
166
+ if report is None:
167
+ raise ValueError(f'Invalid report index: {report_index}')
168
+ workflow = report.workflow
169
169
  wf = load_workflow(workflow, code_path)
170
170
  if wf is None:
171
171
  raise ValueError(f'Invalid workflow: {workflow}')
172
- result = wf.analyze(result, result.previous)
172
+ report = wf.analyze(report, report.previous)
173
173
  if hasattr(wf, 'plot'):
174
- wf.plot(result)
175
- return result
174
+ wf.plot(report)
175
+ return report
Binary file
qulab/typing.py CHANGED
@@ -1,2 +1,2 @@
1
- from .executor.storage import Result
1
+ from .executor.storage import Report
2
2
  from .scan.record import Record
qulab/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "2.7.5"
1
+ __version__ = "2.7.7"
File without changes
File without changes