QuLab 2.4.20__cp311-cp311-win_amd64.whl → 2.5.1__cp311-cp311-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.2
2
2
  Name: QuLab
3
- Version: 2.4.20
3
+ Version: 2.5.1
4
4
  Summary: contral instruments and manage data
5
5
  Author-email: feihoo87 <feihoo87@gmail.com>
6
6
  Maintainer-email: feihoo87 <feihoo87@gmail.com>
@@ -1,17 +1,19 @@
1
- qulab/__init__.py,sha256=vkFybY8YSsQilYdThPRD83-btPAR41sy_WCXiM-6mME,141
2
- qulab/__main__.py,sha256=g9iBs8xxX6Yik7cmgllQkpBN8C4JNoZVsEOyCCLCyFU,63
1
+ qulab/__init__.py,sha256=pdlicy07Dx39pTEGZ0i41Ox9tuuuKKIdsFIrE13bneg,249
2
+ qulab/__main__.py,sha256=FL4YsGZL1jEtmcPc5WbleArzhOHLMsWl7OH3O-1d1ss,72
3
3
  qulab/dicttree.py,sha256=ZoSJVWK4VMqfzj42gPb_n5RqLlM6K1Me0WmLIfLEYf8,14195
4
- qulab/fun.cp311-win_amd64.pyd,sha256=R96iJRfL-QZSf9reOnccgip5md-3EdMl21n0JSZs4lY,31744
5
- qulab/version.py,sha256=0EYwz_vOIrGy7c3cr82zYeYGyK4QxRsY1XkcDC_j6SI,22
6
- qulab/cli/__init__.py,sha256=6xd2eYw32k1NmfAuYSu__1kaP12Oz1QVqwbkYXdWno4,588
7
- qulab/cli/config.py,sha256=Loee8MwKFuDRWXttG8iBY9ZmD5xSCHDFi3ztt2i0S_0,3139
4
+ qulab/fun.cp311-win_amd64.pyd,sha256=GySZYYfMEL41c4mC5SdGt6pDagUrmnU-bAtD9HZLWv4,31744
5
+ qulab/typing.py,sha256=3c0eKa1avEHIi5wPvh3-4l6Of5mu5Rn1MWPnMeLGNX0,71
6
+ qulab/version.py,sha256=LBVD6Y5IKVHVmwynNRJsCm7zBTye77uIsmOVQJQUroE,21
7
+ qulab/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
+ qulab/cli/commands.py,sha256=6xd2eYw32k1NmfAuYSu__1kaP12Oz1QVqwbkYXdWno4,588
9
+ qulab/cli/config.py,sha256=tNmH4ggdgrFqcQa2WZKLpidiYTg95_UnT0paDJ4fi4c,3204
8
10
  qulab/executor/__init__.py,sha256=LosPzOMaljSZY1thy_Fxtbrgq7uubJszMABEB7oM7tU,101
9
11
  qulab/executor/cli.py,sha256=owdDSaSuzTPQA5SlNCxX4nHWITU1BF0VZMj7tLqCZhM,5781
10
- qulab/executor/load.py,sha256=fZNmsOGR_2s1IykVmVwziDKeWCIej-jRJAQzKJRUdaE,11990
11
- qulab/executor/schedule.py,sha256=bXQzn3e3tCvAVdEBsvh5ecAThP52njyxbsrlrxC9TzA,11922
12
- qulab/executor/storage.py,sha256=M66Q5_Uc5MMfc_QAuuaaexwAz7wxBPMkeleB5nRpQmI,4621
13
- qulab/executor/transform.py,sha256=inaOn6eqCs22ZZ0xAQl8s8YCoEACaXSwFNNu7jqdwAk,2148
14
- qulab/executor/utils.py,sha256=_9EZkx2rBpsWjhDN5BZ-Lq9MboqmHcI1jtwpRyrzSTY,3101
12
+ qulab/executor/load.py,sha256=i2MKxE5kflAGLt3e9LjME59K7IO7s68EUApP9228LhQ,12125
13
+ qulab/executor/schedule.py,sha256=4D8o68Cx6vCkpSTMHzULTwuuq1tS8unc2n8WdBA9oB0,12013
14
+ qulab/executor/storage.py,sha256=Ms4euSGrMnFJPwcs70sg5-ZOOm1B6kGULcZOipsFLk4,7242
15
+ qulab/executor/transform.py,sha256=AazWdlkEoOBaUJpTYsT5J4f0RanzCEeo-ThwEg8BB4Y,1262
16
+ qulab/executor/utils.py,sha256=RNDEo17M-n_tge7uLITHxmoNNYSTm9OES75wQygR8dM,4065
15
17
  qulab/monitor/__init__.py,sha256=xEVDkJF8issrsDeLqQmDsvtRmrf-UiViFcGTWuzdlFU,43
16
18
  qulab/monitor/__main__.py,sha256=k2H1H5Zf9LLXTDLISJkbikLH-z0f1e5i5i6wXXYPOrE,105
17
19
  qulab/monitor/config.py,sha256=y_5StMkdrbZO1ziyKBrvIkB7Jclp9RCPK1QbsOhCxnY,785
@@ -91,9 +93,9 @@ qulab/visualization/plot_seq.py,sha256=Uo1-dB1YE9IN_A9tuaOs9ZG3S5dKDQ_l98iD2Wbxp
91
93
  qulab/visualization/qdat.py,sha256=HubXFu4nfcA7iUzghJGle1C86G6221hicLR0b-GqhKQ,5887
92
94
  qulab/visualization/rot3d.py,sha256=jGHJcqj1lEWBUV-W4GUGONGacqjrYvuFoFCwPse5h1Y,757
93
95
  qulab/visualization/widgets.py,sha256=HcYwdhDtLreJiYaZuN3LfofjJmZcLwjMfP5aasebgDo,3266
94
- QuLab-2.4.20.dist-info/LICENSE,sha256=b4NRQ-GFVpJMT7RuExW3NwhfbrYsX7AcdB7Gudok-fs,1086
95
- QuLab-2.4.20.dist-info/METADATA,sha256=LzlzxYnUG7CckjTxnZ2-2yJY7yfwUFlf5pll5JI_PV4,3804
96
- QuLab-2.4.20.dist-info/WHEEL,sha256=yNnHoQL2GZYIUXm9YvoaBpFjGlUoK9qq9oqYeudrWlE,101
97
- QuLab-2.4.20.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
98
- QuLab-2.4.20.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
99
- QuLab-2.4.20.dist-info/RECORD,,
96
+ QuLab-2.5.1.dist-info/LICENSE,sha256=b4NRQ-GFVpJMT7RuExW3NwhfbrYsX7AcdB7Gudok-fs,1086
97
+ QuLab-2.5.1.dist-info/METADATA,sha256=SpfzBYK0REQ2vkduzKiMwa2hGCl0JRkexh522Lf3HkM,3803
98
+ QuLab-2.5.1.dist-info/WHEEL,sha256=yNnHoQL2GZYIUXm9YvoaBpFjGlUoK9qq9oqYeudrWlE,101
99
+ QuLab-2.5.1.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
100
+ QuLab-2.5.1.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
101
+ QuLab-2.5.1.dist-info/RECORD,,
qulab/__init__.py CHANGED
@@ -1,3 +1,5 @@
1
+ from .executor.storage import find_result
2
+ from .executor.storage import get_result_by_index as get_result
1
3
  from .scan import Scan, get_record, load_record, lookup, lookup_list
2
4
  from .version import __version__
3
5
  from .visualization import autoplot
qulab/__main__.py CHANGED
@@ -1,4 +1,4 @@
1
- from .cli import cli
1
+ from .cli.commands import cli
2
2
 
3
3
  if __name__ == '__main__':
4
4
  cli()
qulab/cli/__init__.py CHANGED
@@ -1,29 +0,0 @@
1
- import click
2
-
3
- from ..executor.cli import create, get, maintain, run, set
4
- from ..monitor.__main__ import main as monitor
5
- from ..scan.server import server
6
- from ..sys.net.cli import dht
7
- from ..visualization.__main__ import plot
8
-
9
-
10
- @click.group()
11
- def cli():
12
- pass
13
-
14
-
15
- @cli.command()
16
- def hello():
17
- """Print hello world."""
18
- click.echo('hello, world')
19
-
20
-
21
- cli.add_command(monitor)
22
- cli.add_command(plot)
23
- cli.add_command(dht)
24
- cli.add_command(server)
25
- cli.add_command(maintain)
26
- cli.add_command(run)
27
- cli.add_command(create)
28
- cli.add_command(set)
29
- cli.add_command(get)
qulab/cli/commands.py ADDED
@@ -0,0 +1,29 @@
1
+ import click
2
+
3
+ from ..executor.cli import create, get, maintain, run, set
4
+ from ..monitor.__main__ import main as monitor
5
+ from ..scan.server import server
6
+ from ..sys.net.cli import dht
7
+ from ..visualization.__main__ import plot
8
+
9
+
10
+ @click.group()
11
+ def cli():
12
+ pass
13
+
14
+
15
+ @cli.command()
16
+ def hello():
17
+ """Print hello world."""
18
+ click.echo('hello, world')
19
+
20
+
21
+ cli.add_command(monitor)
22
+ cli.add_command(plot)
23
+ cli.add_command(dht)
24
+ cli.add_command(server)
25
+ cli.add_command(maintain)
26
+ cli.add_command(run)
27
+ cli.add_command(create)
28
+ cli.add_command(set)
29
+ cli.add_command(get)
qulab/cli/config.py CHANGED
@@ -62,27 +62,35 @@ def log_options(func):
62
62
 
63
63
  @click.option("--debug",
64
64
  is_flag=True,
65
- default=lambda: get_config_value("debug", bool) or False,
65
+ default=get_config_value("debug", bool),
66
66
  help=f"Enable debug mode")
67
67
  @click.option("--log",
68
68
  type=click.Path(),
69
69
  default=lambda: get_config_value("log", Path),
70
70
  help=f"Log file path")
71
+ @click.option("--quiet",
72
+ is_flag=True,
73
+ default=get_config_value("quiet", bool),
74
+ help=f"Disable log output")
71
75
  @functools.wraps(func)
72
- def wrapper(*args, log=None, debug=False, **kwargs):
73
- if log is None and not debug:
74
- logger.remove()
75
- logger.add(sys.stderr, level='INFO')
76
- elif log is None and debug:
77
- logger.remove()
78
- logger.add(sys.stderr, level='DEBUG')
79
- elif log is not None and not debug:
80
- logger.configure(handlers=[dict(sink=log, level='INFO')])
81
- elif log is not None and debug:
82
- logger.configure(handlers=[
83
- dict(sink=log, level='DEBUG'),
84
- dict(sink=sys.stderr, level='DEBUG')
85
- ])
76
+ def wrapper(*args, **kwargs):
77
+ debug = bool(kwargs.pop("debug"))
78
+ log = kwargs.pop("log")
79
+ quiet = bool(kwargs.pop("quiet"))
80
+
81
+ if debug:
82
+ log_level = "DEBUG"
83
+ else:
84
+ log_level = "INFO"
85
+
86
+ handlers = []
87
+ if log is not None:
88
+ handlers.append(dict(sink=log, level=log_level))
89
+ if not quiet or debug:
90
+ handlers.append(dict(sink=sys.stderr, level=log_level))
91
+
92
+ logger.configure(handlers=handlers)
93
+
86
94
  return func(*args, **kwargs)
87
95
 
88
96
  return wrapper
qulab/executor/load.py CHANGED
@@ -40,17 +40,21 @@ class SetConfigWorkflow():
40
40
  value = transform.query_config(self.key)
41
41
  except:
42
42
  value = eval(input(f'"{self.key}": '))
43
- return self.key, value
43
+ return value
44
44
 
45
- def analyze(self, key, value, history):
46
- return 'OK', {key: value}, {}
45
+ def analyze(self, result: Result, history: Result):
46
+ result.state = 'OK'
47
+ result.parameters = {self.key: result.data}
48
+ return result
47
49
 
48
50
  def check(self):
49
51
  from .transform import query_config
50
- return self.key, query_config(self.key)
52
+ return query_config(self.key)
51
53
 
52
- def check_analyze(self, key, value, history):
53
- return 'Out of Spec', {key: value}, {}
54
+ def check_analyze(self, result: Result, history: Result):
55
+ result.state = 'Outdated'
56
+ result.parameters = {self.key: result.data}
57
+ return result
54
58
 
55
59
  @staticmethod
56
60
  def _equal(a, b):
@@ -269,7 +273,7 @@ def load_workflow_from_template(template_path: str,
269
273
  str: 已经替换的新字符串。
270
274
  """
271
275
  pattern = re.compile(r'VAR\s*\(\s*(["\'])(\w+)\1\s*\)')
272
- replacement = f'__VAR_{hash_str}' + r'["\2"]'
276
+ replacement = f'__VAR_{hash_str}' + r'[\1\2\1]'
273
277
  new_text = re.sub(pattern, replacement, text)
274
278
  return new_text
275
279
 
@@ -5,10 +5,10 @@ from pathlib import Path
5
5
 
6
6
  from loguru import logger
7
7
 
8
- from . import transform
9
8
  from .load import WorkflowType, get_dependents
10
- from .storage import (Result, find_result, get_head, renew_result,
11
- revoke_result, save_result)
9
+ from .storage import (Result, find_result, renew_result, revoke_result,
10
+ save_result)
11
+ from .transform import update_parameters
12
12
 
13
13
 
14
14
  class CalibrationFailedError(Exception):
@@ -66,27 +66,26 @@ def check_state(workflow: WorkflowType, code_path: str | Path,
66
66
  return True
67
67
 
68
68
 
69
- def call_analyzer(node, data, history, check=False, plot=False):
69
+ def call_analyzer(node,
70
+ result: Result,
71
+ history: Result | None,
72
+ check=False,
73
+ plot=False) -> Result:
70
74
  if check:
71
- result = transform.params_to_result(
72
- node.check_analyze(*data,
73
- history=transform.result_to_params(history)))
75
+ result = node.check_analyze(result, history=history)
74
76
  result.fully_calibrated = False
75
77
  else:
76
- result = transform.params_to_result(
77
- node.analyze(*data, history=transform.result_to_params(history)))
78
+ result = node.analyze(result, history=history)
78
79
  result.fully_calibrated = True
79
80
  if plot:
80
81
  call_plot(node, result)
81
- result.data = data
82
82
  return result
83
83
 
84
84
 
85
85
  @logger.catch()
86
- def call_plot(node, result, check=False):
86
+ def call_plot(node, result: Result, check=False):
87
87
  if hasattr(node, 'plot') and callable(node.plot):
88
- state, params, other = transform.result_to_params(result)
89
- node.plot(state, params, other)
88
+ node.plot(result)
90
89
 
91
90
 
92
91
  @functools.lru_cache(maxsize=128)
@@ -120,14 +119,19 @@ def check_data(workflow: WorkflowType, code_path: str | Path,
120
119
 
121
120
  if hasattr(workflow, 'check') and callable(workflow.check) and hasattr(
122
121
  workflow, 'check_analyze') and callable(workflow.check_analyze):
123
- logger.debug(f'Checking "{workflow}" with "check" method ...')
122
+ logger.debug(
123
+ f'Checking "{workflow.__workflow_id__}" with "check" method ...')
124
124
  data = workflow.check()
125
125
  result = Result()
126
126
  result.data = data
127
- save_result(workflow.__workflow_id__, result, state_path)
127
+ #save_result(workflow.__workflow_id__, result, state_path)
128
128
 
129
- logger.debug(f'Checked "{workflow}" !')
130
- result = call_analyzer(workflow, data, history, check=True, plot=plot)
129
+ logger.debug(f'Checked "{workflow.__workflow_id__}" !')
130
+ result = call_analyzer(workflow,
131
+ result,
132
+ history,
133
+ check=True,
134
+ plot=plot)
131
135
  if result.in_spec:
132
136
  logger.debug(
133
137
  f'"{workflow.__workflow_id__}": checked in spec, renewing result'
@@ -148,9 +152,15 @@ def check_data(workflow: WorkflowType, code_path: str | Path,
148
152
  save_result(workflow.__workflow_id__, result, state_path)
149
153
 
150
154
  logger.debug(f'Calibrated "{workflow}" !')
151
- result = call_analyzer(workflow, data, history, check=False, plot=plot)
152
- save_result(workflow.__workflow_id__, result, state_path,
153
- get_head(workflow.__workflow_id__, state_path))
155
+ result = call_analyzer(workflow,
156
+ result,
157
+ history,
158
+ check=False,
159
+ plot=plot)
160
+ save_result(workflow.__workflow_id__,
161
+ result,
162
+ state_path,
163
+ overwrite=True)
154
164
 
155
165
  return result
156
166
 
@@ -166,9 +176,8 @@ def calibrate(workflow: WorkflowType, code_path: str | Path,
166
176
  result.data = data
167
177
  save_result(workflow.__workflow_id__, result, state_path)
168
178
  logger.debug(f'Calibrated "{workflow.__workflow_id__}" !')
169
- result = call_analyzer(workflow, data, history, check=False, plot=plot)
170
- save_result(workflow.__workflow_id__, result, state_path,
171
- get_head(workflow.__workflow_id__, state_path))
179
+ result = call_analyzer(workflow, result, history, check=False, plot=plot)
180
+ save_result(workflow.__workflow_id__, result, state_path, overwrite=True)
172
181
  return result
173
182
 
174
183
 
@@ -226,7 +235,7 @@ def diagnose(workflow: WorkflowType, code_path: str | Path,
226
235
  raise CalibrationFailedError(
227
236
  f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
228
237
  )
229
- transform.update_parameters(result)
238
+ update_parameters(result)
230
239
  return True
231
240
 
232
241
 
@@ -282,7 +291,7 @@ def maintain(workflow: WorkflowType,
282
291
  f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
283
292
  )
284
293
  if update:
285
- transform.update_parameters(result)
294
+ update_parameters(result)
286
295
  return
287
296
 
288
297
 
@@ -304,5 +313,5 @@ def run(workflow: WorkflowType,
304
313
  f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
305
314
  )
306
315
  if update:
307
- transform.update_parameters(result)
316
+ update_parameters(result)
308
317
  return
qulab/executor/storage.py CHANGED
@@ -1,11 +1,15 @@
1
+ import lzma
1
2
  import pickle
2
3
  import uuid
3
4
  from dataclasses import dataclass, field
4
5
  from datetime import datetime, timedelta
5
6
  from pathlib import Path
7
+ from typing import Any, Literal
6
8
 
7
9
  from loguru import logger
8
10
 
11
+ from ..cli.config import get_config_value
12
+
9
13
 
10
14
  @dataclass
11
15
  class Result():
@@ -15,28 +19,50 @@ class Result():
15
19
  calibrated_time: datetime = field(default_factory=datetime.now)
16
20
  checked_time: datetime = field(default_factory=datetime.now)
17
21
  ttl: timedelta = timedelta(days=3650)
18
- params: dict = field(default_factory=dict)
19
- info: dict = field(default_factory=dict)
20
- data: tuple = field(default_factory=tuple)
21
- previous: Path | None = None
22
-
23
-
24
- class Graph:
25
-
26
- def __init__(self):
27
- self.nodes = {}
28
- self.heads = set()
29
- self.roots = set()
30
-
31
- def add_node(self, node: str, deps: list[str]):
32
- if node not in self.nodes:
33
- self.nodes[node] = deps
34
- if not deps:
35
- self.heads.add(node)
36
- for dep in deps:
37
- if dep not in self.nodes:
38
- self.nodes[dep] = []
39
- self.roots.discard(dep)
22
+ parameters: dict = field(default_factory=dict)
23
+ other_infomation: dict = field(default_factory=dict)
24
+ data: Any = field(default_factory=tuple)
25
+ index: int = -1
26
+ previous_path: Path | None = None
27
+ base_path: Path | None = None
28
+
29
+ @property
30
+ def previous(self):
31
+ if self.previous_path is not None and self.base_path is not None:
32
+ return load_result(self.previous_path, self.base_path)
33
+ else:
34
+ return None
35
+
36
+ @property
37
+ def state(self) -> Literal['OK', 'Bad', 'Outdated']:
38
+ state = 'Bad'
39
+ match (self.in_spec, self.bad_data):
40
+ case (True, False):
41
+ state = 'OK'
42
+ case (False, True):
43
+ state = 'Bad'
44
+ case (False, False):
45
+ state = 'Outdated'
46
+ return state
47
+
48
+ @state.setter
49
+ def state(self, state: Literal['OK', 'Bad', 'Outdated', 'In spec',
50
+ 'Out of spec', 'Bad data']):
51
+ if state not in [
52
+ 'OK', 'Bad', 'Outdated', 'In spec', 'Out of spec', 'Bad data'
53
+ ]:
54
+ raise ValueError(
55
+ f'Invalid state: {state}, state must be one of "OK", "Bad" and "Outdated"'
56
+ )
57
+ if state in ['In spec', 'OK']:
58
+ self.in_spec = True
59
+ self.bad_data = False
60
+ elif state in ['Bad data', 'Bad']:
61
+ self.bad_data = True
62
+ self.in_spec = False
63
+ else:
64
+ self.bad_data = False
65
+ self.in_spec = False
40
66
 
41
67
 
42
68
  def random_path(base: Path) -> Path:
@@ -50,27 +76,55 @@ def random_path(base: Path) -> Path:
50
76
  def save_result(workflow: str,
51
77
  result: Result,
52
78
  base_path: str | Path,
53
- path: Path | None = None):
79
+ overwrite: bool = False) -> int:
54
80
  logger.debug(
55
81
  f'Saving result for "{workflow}", {result.in_spec=}, {result.bad_data=}, {result.fully_calibrated=}'
56
82
  )
57
83
  base_path = Path(base_path)
58
- if path is None:
59
- path = random_path(base_path)
60
- (base_path / 'objects' / path).parent.mkdir(parents=True, exist_ok=True)
61
- result.previous = get_head(workflow, base_path)
84
+ if overwrite:
85
+ buf = lzma.compress(pickle.dumps(result))
86
+ path = get_head(workflow, base_path)
87
+ with open(base_path / 'objects' / path, "rb") as f:
88
+ index = int.from_bytes(f.read(8), 'big')
89
+ result.index = index
90
+ else:
91
+ result.previous_path = get_head(workflow, base_path)
92
+ buf = lzma.compress(pickle.dumps(result))
93
+ path = random_path(base_path / 'objects')
94
+ (base_path / 'objects' / path).parent.mkdir(parents=True,
95
+ exist_ok=True)
96
+ result.index = create_index("result",
97
+ base_path,
98
+ context=str(path),
99
+ width=35)
100
+
62
101
  with open(base_path / 'objects' / path, "wb") as f:
63
- pickle.dump(result, f)
102
+ f.write(result.index.to_bytes(8, 'big'))
103
+ f.write(buf)
64
104
  set_head(workflow, path, base_path)
105
+ return result.index
65
106
 
66
107
 
67
- def find_result(workflow: str, base_path: str | Path) -> Result | None:
108
+ def load_result(path: str | Path, base_path: str | Path) -> Result | None:
109
+ base_path = Path(base_path)
110
+ path = base_path / 'objects' / path
111
+
112
+ with open(base_path / 'objects' / path, "rb") as f:
113
+ index = int.from_bytes(f.read(8), 'big')
114
+ result = pickle.loads(lzma.decompress(f.read()))
115
+ result.base_path = base_path
116
+ result.index = index
117
+ return result
118
+
119
+
120
+ def find_result(
121
+ workflow: str, base_path: str | Path = get_config_value("data", Path)
122
+ ) -> Result | None:
68
123
  base_path = Path(base_path)
69
124
  path = get_head(workflow, base_path)
70
125
  if path is None:
71
126
  return None
72
- with open(base_path / 'objects' / path, "rb") as f:
73
- return pickle.load(f)
127
+ return load_result(path, base_path)
74
128
 
75
129
 
76
130
  def renew_result(workflow: str, base_path: str | Path):
@@ -78,7 +132,7 @@ def renew_result(workflow: str, base_path: str | Path):
78
132
  result = find_result(workflow, base_path)
79
133
  if result is not None:
80
134
  result.checked_time = datetime.now()
81
- save_result(workflow, result, base_path)
135
+ return save_result(workflow, result, base_path)
82
136
 
83
137
 
84
138
  def revoke_result(workflow: str, base_path: str | Path):
@@ -89,7 +143,7 @@ def revoke_result(workflow: str, base_path: str | Path):
89
143
  with open(base_path / 'objects' / path, "rb") as f:
90
144
  result = pickle.load(f)
91
145
  result.in_spec = False
92
- save_result(workflow, result, base_path)
146
+ return save_result(workflow, result, base_path)
93
147
 
94
148
 
95
149
  def set_head(workflow: str, path: Path, base_path: str | Path):
@@ -115,33 +169,56 @@ def get_head(workflow: str, base_path: str | Path) -> Path | None:
115
169
  return None
116
170
 
117
171
 
118
- def get_graph(base_path: str | Path) -> dict[str, list[str]]:
172
+ def get_heads(base_path: str | Path) -> Path | None:
119
173
  base_path = Path(base_path)
120
174
  try:
121
175
  with open(base_path / "heads", "rb") as f:
122
176
  heads = pickle.load(f)
177
+ return heads
123
178
  except:
124
- heads = {}
125
- graph = {}
126
- for workflow, path in heads.items():
127
- graph[workflow] = []
128
- while path is not None:
129
- with open(base_path / 'objects' / path, "rb") as f:
130
- result = pickle.load(f)
131
- path = result.previous
132
- if path is not None:
133
- graph[workflow].append(path)
134
- return graph
135
-
136
-
137
- def update_graph(workflow: str, base_path: str | Path):
138
- base_path = Path(base_path)
139
- graph = get_graph(base_path)
140
- for workflow, deps in graph.items():
141
- for dep in deps:
142
- if dep not in graph:
143
- graph[dep] = []
144
- if workflow not in graph[dep]:
145
- graph[dep].append(workflow)
146
- with open(base_path / "graph", "wb") as f:
147
- pickle.dump(graph, f)
179
+ return {}
180
+
181
+
182
+ def create_index(name: str,
183
+ base_path: str | Path,
184
+ context: str,
185
+ width: int,
186
+ start: int = 0):
187
+ path = Path(base_path) / "index" / f"{name}.seq"
188
+ if path.exists():
189
+ with open(path, "r") as f:
190
+ index = int(f.read())
191
+ else:
192
+ index = start
193
+ path.parent.mkdir(parents=True, exist_ok=True)
194
+ with open(path, "w") as f:
195
+ f.write(str(index + 1))
196
+
197
+ path = Path(base_path) / "index" / f"{name}.width"
198
+ with open(path, "w") as f:
199
+ f.write(str(width))
200
+
201
+ path = Path(base_path) / "index" / f"{name}.idx"
202
+ with open(path, "a") as f:
203
+
204
+ f.write(f"{context.ljust(width)}\n")
205
+
206
+ return index
207
+
208
+
209
+ def query_index(name: str, base_path: str | Path, index: int):
210
+ path = Path(base_path) / "index" / f"{name}.width"
211
+ with open(path, "r") as f:
212
+ width = int(f.read())
213
+ path = Path(base_path) / "index" / f"{name}.idx"
214
+ with open(path, "r") as f:
215
+ f.seek(index * (width + 1))
216
+ context = f.read(width)
217
+ return context.rstrip()
218
+
219
+
220
+ def get_result_by_index(
221
+ index: int, base_path: str | Path = get_config_value("data", Path)
222
+ ) -> Result | None:
223
+ path = query_index("result", base_path, index)
224
+ return load_result(path, base_path)
@@ -1,5 +1,3 @@
1
- import loguru
2
-
3
1
  from .storage import Result
4
2
 
5
3
 
@@ -32,40 +30,7 @@ def _update_config(updates):
32
30
 
33
31
 
34
32
  def update_parameters(result: Result):
35
- update_config(result.params)
36
-
37
-
38
- def result_to_params(result: Result | None) -> tuple | None:
39
- if result is None:
40
- return None
41
-
42
- state = 'Bad data'
43
- match (result.in_spec, result.bad_data):
44
- case (True, False):
45
- state = 'In spec'
46
- case (False, True):
47
- state = 'Bad data'
48
- case (False, False):
49
- state = 'Out of spec'
50
-
51
- return state, result.params, result.info
52
-
53
-
54
- def params_to_result(params: tuple) -> Result:
55
- state, cali, info = params
56
- result = Result()
57
- if state in ['In spec', 'OK']:
58
- result.in_spec = True
59
- result.bad_data = False
60
- elif state in ['Bad data', 'Bad']:
61
- result.bad_data = True
62
- result.in_spec = False
63
- else:
64
- result.bad_data = False
65
- result.in_spec = False
66
- result.params = cali
67
- result.info = info
68
- return result
33
+ update_config(result.parameters)
69
34
 
70
35
 
71
36
  query_config = _query_config
qulab/executor/utils.py CHANGED
@@ -31,12 +31,12 @@ def dependent_tree(node: str, code_path: str | Path) -> dict[str, list[str]]:
31
31
 
32
32
 
33
33
  def workflow_template(deps: list[str]) -> str:
34
- return f"""
35
- from loguru import logger
36
-
37
- def VAR(s): pass
34
+ return f"""def VAR(s): pass # 没有实际作用,只是用来抑制编辑器的警告。
38
35
 
39
36
  import numpy as np
37
+ from loguru import logger
38
+
39
+ from qulab.typing import Result
40
40
 
41
41
 
42
42
  # 多长时间应该检查一次校准实验,单位是秒。
@@ -61,23 +61,35 @@ def calibrate():
61
61
  return x, y
62
62
 
63
63
 
64
- def analyze(*args, history):
64
+ def analyze(result: Result, history: Result | None) -> Result:
65
+ \"\"\"
66
+ 分析校准结果。
67
+
68
+ result: Result
69
+ 本次校准实验的数据。
70
+ history: Result | None
71
+ 上次校准实验数据和分析结果,如果有的话。
72
+ \"\"\"
65
73
  import random
66
74
 
75
+ # 这里添加你的分析过程,运行 calibrate 得到的数据,在 result.data 里
76
+ # 你可以得到校准的结果,然后根据这个结果进行分析。
77
+ x, y = result.data
78
+
67
79
  # 完整校准后的状态有两种:OK 和 Bad,分别对应校准成功和校准失败。
68
80
  # 校准失败是指出现坏数据,无法简单通过重新运行本次校准解决,需要
69
81
  # 检查前置步骤。
70
- state = random.choice(['OK', 'Bad'])
82
+ result.state = random.choice(['OK', 'Bad'])
71
83
 
72
84
  # 参数是一个字典,包含了本次校准得到的参数,后续会更新到config表中。
73
- parameters = {{'gate.R.Q1.params.amp':1}}
85
+ result.parameters = {{'gate.R.Q1.params.amp':1}}
74
86
 
75
87
  # 其他信息可以是任何可序列化的内容,你可以将你想要记录的信息放在这里。
76
88
  # 下次校准分析时,这些信息也会在 history 参数中一起传入,帮助你在下
77
89
  # 次分析时对比参考。
78
- other_infomation = {{}}
90
+ result.other_infomation = {{}}
79
91
 
80
- return state, parameters, other_infomation
92
+ return result
81
93
 
82
94
 
83
95
  def check():
@@ -96,14 +108,26 @@ def check():
96
108
  return x, y
97
109
 
98
110
 
99
- def check_analyze(*args, history):
111
+ def check_analyze(result: Result, history: Result) -> Result:
112
+ \"\"\"
113
+ 分析检查结果。
114
+
115
+ result: Result
116
+ 本次检查实验的数据。
117
+ history: Result | None
118
+ 上次检查实验数据和分析结果,如果有的话。
119
+ \"\"\"
100
120
  import random
101
121
 
122
+ # 这里添加你的分析过程,运行 check 得到的数据,在 result.data 里
123
+ # 你可以得到校准的结果,然后根据这个结果进行分析。
124
+ x, y = result.data
125
+
102
126
  # 状态有三种:Outdated, OK, Bad,分别对应过时、正常、坏数据。
103
127
  # Outdated 是指数据过时,即参数漂了,需要重新校准。
104
128
  # OK 是指数据正常,参数也没漂,不用重新校准。
105
129
  # Bad 是指数据坏了,无法校准,需要检查前置步骤。
106
- state = random.choice(['Outdated', 'OK', 'Bad'])
130
+ result.state = random.choice(['Outdated', 'OK', 'Bad'])
107
131
 
108
- return state, {{}}, {{}}
132
+ return result
109
133
  """
Binary file
qulab/typing.py ADDED
@@ -0,0 +1,2 @@
1
+ from .executor.storage import Result
2
+ from .scan.record import Record
qulab/version.py CHANGED
@@ -1 +1 @@
1
- __version__ = "2.4.20"
1
+ __version__ = "2.5.1"
File without changes