QuLab 2.4.10__cp311-cp311-win_amd64.whl → 2.4.12__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {QuLab-2.4.10.dist-info → QuLab-2.4.12.dist-info}/METADATA +1 -1
- {QuLab-2.4.10.dist-info → QuLab-2.4.12.dist-info}/RECORD +12 -12
- qulab/__main__.py +3 -1
- qulab/executor/__main__.py +74 -6
- qulab/executor/load.py +72 -10
- qulab/executor/schedule.py +127 -87
- qulab/fun.cp311-win_amd64.pyd +0 -0
- qulab/version.py +1 -1
- {QuLab-2.4.10.dist-info → QuLab-2.4.12.dist-info}/LICENSE +0 -0
- {QuLab-2.4.10.dist-info → QuLab-2.4.12.dist-info}/WHEEL +0 -0
- {QuLab-2.4.10.dist-info → QuLab-2.4.12.dist-info}/entry_points.txt +0 -0
- {QuLab-2.4.10.dist-info → QuLab-2.4.12.dist-info}/top_level.txt +0 -0
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
qulab/__init__.py,sha256=vkFybY8YSsQilYdThPRD83-btPAR41sy_WCXiM-6mME,141
|
|
2
|
-
qulab/__main__.py,sha256=
|
|
2
|
+
qulab/__main__.py,sha256=vUnluugYF1cH2UA44odFW1Z4AKecBZIfAihvsbhhSe0,629
|
|
3
3
|
qulab/dicttree.py,sha256=ZoSJVWK4VMqfzj42gPb_n5RqLlM6K1Me0WmLIfLEYf8,14195
|
|
4
|
-
qulab/fun.cp311-win_amd64.pyd,sha256=
|
|
5
|
-
qulab/version.py,sha256=
|
|
4
|
+
qulab/fun.cp311-win_amd64.pyd,sha256=qtM2pxcTw_kvHrb4iIkjhrrfsW-Jl0bBNdSz5-I6GIE,31744
|
|
5
|
+
qulab/version.py,sha256=3Ac_4-IUz5KK-UOcxuXzEVTZr4p9bV93jrLgO3Au_zA,22
|
|
6
6
|
qulab/executor/__init__.py,sha256=LosPzOMaljSZY1thy_Fxtbrgq7uubJszMABEB7oM7tU,101
|
|
7
|
-
qulab/executor/__main__.py,sha256=
|
|
8
|
-
qulab/executor/load.py,sha256=
|
|
9
|
-
qulab/executor/schedule.py,sha256=
|
|
7
|
+
qulab/executor/__main__.py,sha256=qgZcdmx1wBZInHf6ionpuuZytIJnaDje9WRv-a8o2OM,5870
|
|
8
|
+
qulab/executor/load.py,sha256=GSRrP2UXOmTgg5VDTmZy_y4xXk91H3Sd5SEX-tOEBqY,8689
|
|
9
|
+
qulab/executor/schedule.py,sha256=UOace0Gpt9gR0xe1BoNSUfeHxZynaUS0BFHZhmfnB30,11849
|
|
10
10
|
qulab/executor/storage.py,sha256=M66Q5_Uc5MMfc_QAuuaaexwAz7wxBPMkeleB5nRpQmI,4621
|
|
11
11
|
qulab/executor/transform.py,sha256=inaOn6eqCs22ZZ0xAQl8s8YCoEACaXSwFNNu7jqdwAk,2148
|
|
12
12
|
qulab/executor/utils.py,sha256=n3uCSKh-qdDFFeNvOpj7_es2_B4AaC-ASAlV9gPmSO0,3086
|
|
@@ -89,9 +89,9 @@ qulab/visualization/plot_seq.py,sha256=Uo1-dB1YE9IN_A9tuaOs9ZG3S5dKDQ_l98iD2Wbxp
|
|
|
89
89
|
qulab/visualization/qdat.py,sha256=HubXFu4nfcA7iUzghJGle1C86G6221hicLR0b-GqhKQ,5887
|
|
90
90
|
qulab/visualization/rot3d.py,sha256=jGHJcqj1lEWBUV-W4GUGONGacqjrYvuFoFCwPse5h1Y,757
|
|
91
91
|
qulab/visualization/widgets.py,sha256=HcYwdhDtLreJiYaZuN3LfofjJmZcLwjMfP5aasebgDo,3266
|
|
92
|
-
QuLab-2.4.
|
|
93
|
-
QuLab-2.4.
|
|
94
|
-
QuLab-2.4.
|
|
95
|
-
QuLab-2.4.
|
|
96
|
-
QuLab-2.4.
|
|
97
|
-
QuLab-2.4.
|
|
92
|
+
QuLab-2.4.12.dist-info/LICENSE,sha256=b4NRQ-GFVpJMT7RuExW3NwhfbrYsX7AcdB7Gudok-fs,1086
|
|
93
|
+
QuLab-2.4.12.dist-info/METADATA,sha256=SHtCS_P4r8VmZXrsFopdlOZDbdEvgdKejBsJzB4xHJ8,3804
|
|
94
|
+
QuLab-2.4.12.dist-info/WHEEL,sha256=yNnHoQL2GZYIUXm9YvoaBpFjGlUoK9qq9oqYeudrWlE,101
|
|
95
|
+
QuLab-2.4.12.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
|
|
96
|
+
QuLab-2.4.12.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
|
|
97
|
+
QuLab-2.4.12.dist-info/RECORD,,
|
qulab/__main__.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import click
|
|
2
2
|
|
|
3
|
-
from .executor.__main__ import create, maintain, run
|
|
3
|
+
from .executor.__main__ import create, get, maintain, run, set
|
|
4
4
|
from .monitor.__main__ import main as monitor
|
|
5
5
|
from .scan.server import server
|
|
6
6
|
from .sys.net.cli import dht
|
|
@@ -25,6 +25,8 @@ cli.add_command(server)
|
|
|
25
25
|
cli.add_command(maintain)
|
|
26
26
|
cli.add_command(run)
|
|
27
27
|
cli.add_command(create)
|
|
28
|
+
cli.add_command(set)
|
|
29
|
+
cli.add_command(get)
|
|
28
30
|
|
|
29
31
|
if __name__ == '__main__':
|
|
30
32
|
cli()
|
qulab/executor/__main__.py
CHANGED
|
@@ -1,25 +1,50 @@
|
|
|
1
1
|
import functools
|
|
2
2
|
import importlib
|
|
3
|
-
import
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
|
|
6
7
|
import click
|
|
7
8
|
from loguru import logger
|
|
8
9
|
|
|
9
|
-
from .load import find_unreferenced_workflows
|
|
10
|
+
from .load import find_unreferenced_workflows, load_workflow
|
|
10
11
|
from .schedule import maintain as maintain_workflow
|
|
11
12
|
from .schedule import run as run_workflow
|
|
12
13
|
from .transform import set_config_api
|
|
13
14
|
from .utils import workflow_template
|
|
14
15
|
|
|
15
16
|
|
|
17
|
+
def load_config():
|
|
18
|
+
import yaml
|
|
19
|
+
|
|
20
|
+
config_paths = [
|
|
21
|
+
Path.home() / ".myapp/config.yaml", # 用户主目录
|
|
22
|
+
Path("config.yaml") # 当前目录
|
|
23
|
+
]
|
|
24
|
+
for path in config_paths:
|
|
25
|
+
if path.exists():
|
|
26
|
+
with open(path) as f:
|
|
27
|
+
return yaml.safe_load(f)
|
|
28
|
+
return {"defaults": {"log": "default.log", "debug": False}}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def get_config_value(option_name):
|
|
32
|
+
# 1. 尝试从环境变量读取
|
|
33
|
+
env_value = os.environ.get(f"MYAPP_{option_name.upper()}")
|
|
34
|
+
if env_value:
|
|
35
|
+
return env_value
|
|
36
|
+
|
|
37
|
+
# 2. 尝试从配置文件读取
|
|
38
|
+
config = load_config()
|
|
39
|
+
return config["defaults"].get(option_name)
|
|
40
|
+
|
|
41
|
+
|
|
16
42
|
def log_options(func):
|
|
17
43
|
|
|
18
44
|
@click.option("--debug", is_flag=True, help="Enable debug mode.")
|
|
19
45
|
@click.option("--log", type=str, help="Log file path.")
|
|
20
46
|
@functools.wraps(func) # 保持函数元信息
|
|
21
47
|
def wrapper(*args, log=None, debug=False, **kwargs):
|
|
22
|
-
print(f"{func} {log=}, {debug=}")
|
|
23
48
|
if log is None and not debug:
|
|
24
49
|
logger.remove()
|
|
25
50
|
logger.add(sys.stderr, level='INFO')
|
|
@@ -67,6 +92,39 @@ def create(workflow, code):
|
|
|
67
92
|
click.echo(f'{workflow} created')
|
|
68
93
|
|
|
69
94
|
|
|
95
|
+
@click.command()
|
|
96
|
+
@click.argument('key')
|
|
97
|
+
@click.argument('value', type=str)
|
|
98
|
+
@click.option('--api', '-a', default=None, help='The modlule name of the api.')
|
|
99
|
+
def set(key, value, api):
|
|
100
|
+
"""
|
|
101
|
+
Set a config.
|
|
102
|
+
"""
|
|
103
|
+
from . import transform
|
|
104
|
+
if api is not None:
|
|
105
|
+
api = importlib.import_module(api)
|
|
106
|
+
set_config_api(api.query_config, api.update_config)
|
|
107
|
+
try:
|
|
108
|
+
value = eval(value)
|
|
109
|
+
except:
|
|
110
|
+
pass
|
|
111
|
+
transform.update_config({key: value})
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@click.command()
|
|
115
|
+
@click.argument('key')
|
|
116
|
+
@click.option('--api', '-a', default=None, help='The modlule name of the api.')
|
|
117
|
+
def get(key, api):
|
|
118
|
+
"""
|
|
119
|
+
Get a config.
|
|
120
|
+
"""
|
|
121
|
+
from . import transform
|
|
122
|
+
if api is not None:
|
|
123
|
+
api = importlib.import_module(api)
|
|
124
|
+
set_config_api(api.query_config, api.update_config)
|
|
125
|
+
click.echo(transform.query_config(key))
|
|
126
|
+
|
|
127
|
+
|
|
70
128
|
@click.command()
|
|
71
129
|
@click.argument('workflow')
|
|
72
130
|
@click.option('--code', '-c', default=None, help='The path of the code.')
|
|
@@ -94,9 +152,13 @@ def run(workflow, code, data, api, plot, no_dependents):
|
|
|
94
152
|
data = Path(os.path.expanduser(data))
|
|
95
153
|
|
|
96
154
|
if no_dependents:
|
|
97
|
-
run_workflow(workflow, code, data, plot=plot)
|
|
155
|
+
run_workflow(load_workflow(workflow, code), code, data, plot=plot)
|
|
98
156
|
else:
|
|
99
|
-
maintain_workflow(workflow, code,
|
|
157
|
+
maintain_workflow(load_workflow(workflow, code),
|
|
158
|
+
code,
|
|
159
|
+
data,
|
|
160
|
+
run=True,
|
|
161
|
+
plot=plot)
|
|
100
162
|
|
|
101
163
|
|
|
102
164
|
@click.command()
|
|
@@ -121,12 +183,18 @@ def maintain(workflow, code, data, api, plot):
|
|
|
121
183
|
code = Path(os.path.expanduser(code))
|
|
122
184
|
data = Path(os.path.expanduser(data))
|
|
123
185
|
|
|
124
|
-
maintain_workflow(workflow, code,
|
|
186
|
+
maintain_workflow(load_workflow(workflow, code),
|
|
187
|
+
code,
|
|
188
|
+
data,
|
|
189
|
+
run=False,
|
|
190
|
+
plot=plot)
|
|
125
191
|
|
|
126
192
|
|
|
127
193
|
cli.add_command(maintain)
|
|
128
194
|
cli.add_command(run)
|
|
129
195
|
cli.add_command(create)
|
|
196
|
+
cli.add_command(set)
|
|
197
|
+
cli.add_command(get)
|
|
130
198
|
|
|
131
199
|
if __name__ == '__main__':
|
|
132
200
|
cli()
|
qulab/executor/load.py
CHANGED
|
@@ -1,10 +1,14 @@
|
|
|
1
|
+
import hashlib
|
|
1
2
|
import inspect
|
|
3
|
+
import pickle
|
|
4
|
+
import re
|
|
5
|
+
import string
|
|
2
6
|
import warnings
|
|
3
7
|
from importlib.util import module_from_spec, spec_from_file_location
|
|
4
8
|
from pathlib import Path
|
|
5
9
|
from types import ModuleType
|
|
6
10
|
|
|
7
|
-
import
|
|
11
|
+
from loguru import logger
|
|
8
12
|
|
|
9
13
|
from .storage import Result
|
|
10
14
|
|
|
@@ -124,12 +128,15 @@ def find_unreferenced_workflows(path: str) -> list[str]:
|
|
|
124
128
|
for file_path in root.rglob("*.py"):
|
|
125
129
|
if file_path.name == "__init__.py":
|
|
126
130
|
continue
|
|
131
|
+
if file_path.name.endswith("_template.py") or re.match(
|
|
132
|
+
r'.*_tmp_[0-9a-fA-F]{8}.py', file_path.name):
|
|
133
|
+
continue
|
|
127
134
|
try:
|
|
128
135
|
rel_path = file_path.relative_to(root)
|
|
129
136
|
except ValueError:
|
|
130
137
|
continue
|
|
131
138
|
|
|
132
|
-
module =
|
|
139
|
+
module = load_workflow_from_module(str(rel_path), root)
|
|
133
140
|
|
|
134
141
|
if is_workflow(module):
|
|
135
142
|
rel_str = str(rel_path)
|
|
@@ -140,7 +147,7 @@ def find_unreferenced_workflows(path: str) -> list[str]:
|
|
|
140
147
|
|
|
141
148
|
# Check dependencies for each workflow module
|
|
142
149
|
for rel_str in workflows:
|
|
143
|
-
module =
|
|
150
|
+
module = load_workflow_from_module(rel_str, root)
|
|
144
151
|
|
|
145
152
|
depends_func = getattr(module, "depends", None)
|
|
146
153
|
if depends_func and callable(depends_func):
|
|
@@ -156,11 +163,9 @@ def find_unreferenced_workflows(path: str) -> list[str]:
|
|
|
156
163
|
continue
|
|
157
164
|
|
|
158
165
|
if not isinstance(depends_list, list) or not all(
|
|
159
|
-
|
|
160
|
-
):
|
|
166
|
+
isinstance(item, str) for item in depends_list):
|
|
161
167
|
warnings.warn(
|
|
162
|
-
f"depends() in {rel_str} did not return a list of strings"
|
|
163
|
-
)
|
|
168
|
+
f"depends() in {rel_str} did not return a list of strings")
|
|
164
169
|
continue
|
|
165
170
|
|
|
166
171
|
for dep in depends_list:
|
|
@@ -178,9 +183,9 @@ def find_unreferenced_workflows(path: str) -> list[str]:
|
|
|
178
183
|
return unreferenced
|
|
179
184
|
|
|
180
185
|
|
|
181
|
-
def
|
|
182
|
-
|
|
183
|
-
|
|
186
|
+
def load_workflow_from_module(file_name: str,
|
|
187
|
+
base_path: str | Path,
|
|
188
|
+
package='workflows') -> WorkflowType:
|
|
184
189
|
if file_name.startswith('cfg:'):
|
|
185
190
|
return SetConfigWorkflow(file_name[4:])
|
|
186
191
|
base_path = Path(base_path)
|
|
@@ -200,3 +205,60 @@ def load_workflow(file_name: str,
|
|
|
200
205
|
verify_check_method(module)
|
|
201
206
|
|
|
202
207
|
return module
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
def load_workflow_from_template(file_name: str,
|
|
211
|
+
mappping: dict[str, str],
|
|
212
|
+
base_path: str | Path,
|
|
213
|
+
subtitle: str | None = None,
|
|
214
|
+
package='workflows') -> WorkflowType:
|
|
215
|
+
base_path = Path(base_path)
|
|
216
|
+
path = Path(file_name)
|
|
217
|
+
|
|
218
|
+
with open(base_path / path) as f:
|
|
219
|
+
content = f.read()
|
|
220
|
+
template = string.Template(content)
|
|
221
|
+
content = template.substitute(mappping)
|
|
222
|
+
|
|
223
|
+
hash_str = hashlib.md5(pickle.dumps(mappping)).hexdigest()[:8]
|
|
224
|
+
if subtitle is None:
|
|
225
|
+
path = path.parent / path.stem.replace('_template',
|
|
226
|
+
f'_tmp{hash_str}.py')
|
|
227
|
+
else:
|
|
228
|
+
path = path.parent / path.stem.replace('_template', f'_{subtitle}.py')
|
|
229
|
+
|
|
230
|
+
with open(base_path / path, 'w') as f:
|
|
231
|
+
f.write(content)
|
|
232
|
+
|
|
233
|
+
module = load_workflow_from_module(str(path), base_path, package)
|
|
234
|
+
|
|
235
|
+
return module
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
def load_workflow(workflow: str | tuple[str, dict],
|
|
239
|
+
base_path: str | Path,
|
|
240
|
+
package='workflows') -> WorkflowType:
|
|
241
|
+
if isinstance(workflow, tuple):
|
|
242
|
+
if len(workflow) == 2:
|
|
243
|
+
file_name, mapping = workflow
|
|
244
|
+
w = load_workflow_from_template(file_name, mapping, base_path,
|
|
245
|
+
None, package)
|
|
246
|
+
elif len(workflow) == 3:
|
|
247
|
+
file_name, subtitle, mapping = workflow
|
|
248
|
+
w = load_workflow_from_template(file_name, mapping, base_path,
|
|
249
|
+
subtitle, package)
|
|
250
|
+
else:
|
|
251
|
+
raise ValueError(f"Invalid workflow: {workflow}")
|
|
252
|
+
w.__workflow_id__ = str(Path(w.__file__).relative_to(base_path))
|
|
253
|
+
elif isinstance(workflow, str):
|
|
254
|
+
if workflow.startswith('cfg:'):
|
|
255
|
+
key = workflow[4:]
|
|
256
|
+
w = SetConfigWorkflow(key)
|
|
257
|
+
w.__workflow_id__ = workflow
|
|
258
|
+
else:
|
|
259
|
+
w = load_workflow_from_module(workflow, base_path, package)
|
|
260
|
+
w.__workflow_id__ = str(Path(w.__file__).relative_to(base_path))
|
|
261
|
+
else:
|
|
262
|
+
raise TypeError(f"Invalid workflow: {workflow}")
|
|
263
|
+
|
|
264
|
+
return w
|
qulab/executor/schedule.py
CHANGED
|
@@ -6,7 +6,7 @@ from pathlib import Path
|
|
|
6
6
|
from loguru import logger
|
|
7
7
|
|
|
8
8
|
from . import transform
|
|
9
|
-
from .load import load_workflow
|
|
9
|
+
from .load import WorkflowType, load_workflow
|
|
10
10
|
from .storage import (Result, find_result, get_head, renew_result,
|
|
11
11
|
revoke_result, save_result)
|
|
12
12
|
|
|
@@ -15,7 +15,7 @@ class CalibrationFailedError(Exception):
|
|
|
15
15
|
pass
|
|
16
16
|
|
|
17
17
|
|
|
18
|
-
def check_state(workflow:
|
|
18
|
+
def check_state(workflow: WorkflowType, code_path: str | Path,
|
|
19
19
|
state_path: str | Path) -> bool:
|
|
20
20
|
"""
|
|
21
21
|
check state should report a pass if and only if the following are satisfied:
|
|
@@ -26,35 +26,42 @@ def check_state(workflow: str, code_path: str | Path,
|
|
|
26
26
|
4. All dependencies pass check state.
|
|
27
27
|
"""
|
|
28
28
|
logger.debug(f'check_state: "{workflow}"')
|
|
29
|
-
result = find_result(workflow, state_path)
|
|
29
|
+
result = find_result(workflow.__workflow_id__, state_path)
|
|
30
30
|
if not result:
|
|
31
|
-
logger.debug(
|
|
31
|
+
logger.debug(
|
|
32
|
+
f'check_state failed: No history found for "{workflow.__workflow_id__}"'
|
|
33
|
+
)
|
|
32
34
|
return False
|
|
33
|
-
|
|
34
|
-
|
|
35
|
+
if hasattr(workflow, 'check_state') and callable(workflow.check_state):
|
|
36
|
+
logger.debug(
|
|
37
|
+
f'check_state: "{workflow.__workflow_id__}" has custom check_state method'
|
|
38
|
+
)
|
|
39
|
+
return workflow.check_state(result)
|
|
40
|
+
if workflow.__timeout__ is not None and datetime.now(
|
|
41
|
+
) > result.checked_time + timedelta(seconds=workflow.__timeout__):
|
|
35
42
|
logger.debug(
|
|
36
|
-
f'check_state: "{workflow}" has
|
|
37
|
-
return node.check_state(result)
|
|
38
|
-
if node.__timeout__ is not None and datetime.now(
|
|
39
|
-
) > result.checked_time + timedelta(seconds=node.__timeout__):
|
|
40
|
-
logger.debug(f'check_state failed: "{workflow}" has expired')
|
|
43
|
+
f'check_state failed: "{workflow.__workflow_id__}" has expired')
|
|
41
44
|
return False
|
|
42
45
|
if not result.in_spec:
|
|
43
|
-
logger.debug(
|
|
46
|
+
logger.debug(
|
|
47
|
+
f'check_state failed: "{workflow.__workflow_id__}" is out of spec')
|
|
44
48
|
return False
|
|
45
49
|
if result.bad_data:
|
|
46
|
-
logger.debug(
|
|
50
|
+
logger.debug(
|
|
51
|
+
f'check_state failed: "{workflow.__workflow_id__}" has bad data')
|
|
47
52
|
return False
|
|
48
53
|
for n in get_dependents(workflow, code_path):
|
|
49
|
-
r = find_result(n, state_path)
|
|
54
|
+
r = find_result(n.__workflow_id__, state_path)
|
|
50
55
|
if r is None or r.checked_time > result.checked_time:
|
|
51
56
|
logger.debug(
|
|
52
|
-
f'check_state failed: "{workflow}" has outdated dependencies'
|
|
57
|
+
f'check_state failed: "{workflow.__workflow_id__}" has outdated dependencies'
|
|
58
|
+
)
|
|
53
59
|
return False
|
|
54
60
|
for n in get_dependents(workflow, code_path):
|
|
55
61
|
if not check_state(n, code_path, state_path):
|
|
56
62
|
logger.debug(
|
|
57
|
-
f'check_state failed: "{workflow}" has bad dependencies'
|
|
63
|
+
f'check_state failed: "{workflow.__workflow_id__}" has bad dependencies'
|
|
64
|
+
)
|
|
58
65
|
return False
|
|
59
66
|
return True
|
|
60
67
|
|
|
@@ -83,132 +90,153 @@ def call_plot(node, result, check=False):
|
|
|
83
90
|
|
|
84
91
|
|
|
85
92
|
@functools.lru_cache(maxsize=128)
|
|
86
|
-
def check_data(workflow:
|
|
87
|
-
plot: bool, session_id: str) -> Result:
|
|
93
|
+
def check_data(workflow: WorkflowType, code_path: str | Path,
|
|
94
|
+
state_path: str | Path, plot: bool, session_id: str) -> Result:
|
|
88
95
|
"""
|
|
89
96
|
check data answers two questions:
|
|
90
97
|
Is the parameter associated with this cal in spec,
|
|
91
98
|
and is the cal scan working as expected?
|
|
92
99
|
"""
|
|
93
|
-
|
|
94
|
-
history = find_result(workflow, state_path)
|
|
100
|
+
history = find_result(workflow.__workflow_id__, state_path)
|
|
95
101
|
|
|
96
102
|
if history is None:
|
|
97
|
-
logger.debug(f'No history found for "{workflow}"')
|
|
103
|
+
logger.debug(f'No history found for "{workflow.__workflow_id__}"')
|
|
98
104
|
result = Result()
|
|
99
105
|
result.in_spec = False
|
|
100
106
|
result.bad_data = False
|
|
101
107
|
return result
|
|
102
108
|
|
|
103
109
|
if history.bad_data:
|
|
104
|
-
logger.debug(
|
|
110
|
+
logger.debug(
|
|
111
|
+
f'History found for "{workflow.__workflow_id__}", but bad data')
|
|
105
112
|
return history
|
|
106
113
|
if not history.in_spec:
|
|
107
|
-
logger.debug(
|
|
114
|
+
logger.debug(
|
|
115
|
+
f'History found for "{workflow.__workflow_id__}", but out of spec')
|
|
108
116
|
return history
|
|
109
117
|
|
|
110
|
-
logger.debug(
|
|
118
|
+
logger.debug(
|
|
119
|
+
f'History found for "{workflow.__workflow_id__}", but has expired')
|
|
111
120
|
|
|
112
|
-
if hasattr(
|
|
113
|
-
|
|
121
|
+
if hasattr(workflow, 'check') and callable(workflow.check) and hasattr(
|
|
122
|
+
workflow, 'check_analyze') and callable(workflow.check_analyze):
|
|
114
123
|
logger.debug(f'Checking "{workflow}" with "check" method ...')
|
|
115
|
-
data =
|
|
124
|
+
data = workflow.check()
|
|
116
125
|
result = Result()
|
|
117
126
|
result.data = data
|
|
118
|
-
save_result(workflow, result, state_path)
|
|
127
|
+
save_result(workflow.__workflow_id__, result, state_path)
|
|
119
128
|
|
|
120
129
|
logger.debug(f'Checked "{workflow}" !')
|
|
121
|
-
result = call_analyzer(
|
|
130
|
+
result = call_analyzer(workflow, data, history, check=True, plot=plot)
|
|
122
131
|
if result.in_spec:
|
|
123
|
-
logger.debug(
|
|
124
|
-
|
|
132
|
+
logger.debug(
|
|
133
|
+
f'"{workflow.__workflow_id__}": checked in spec, renewing result'
|
|
134
|
+
)
|
|
135
|
+
renew_result(workflow.__workflow_id__, state_path)
|
|
125
136
|
else:
|
|
126
|
-
logger.debug(
|
|
127
|
-
|
|
137
|
+
logger.debug(
|
|
138
|
+
f'"{workflow.__workflow_id__}": checked out of spec, revoking result'
|
|
139
|
+
)
|
|
140
|
+
revoke_result(workflow.__workflow_id__, state_path)
|
|
128
141
|
else:
|
|
129
|
-
logger.debug(
|
|
130
|
-
|
|
142
|
+
logger.debug(
|
|
143
|
+
f'Checking "{workflow.__workflow_id__}" with "calibrate" method ...'
|
|
144
|
+
)
|
|
145
|
+
data = workflow.calibrate()
|
|
131
146
|
result = Result()
|
|
132
147
|
result.data = data
|
|
133
|
-
save_result(workflow, result, state_path)
|
|
148
|
+
save_result(workflow.__workflow_id__, result, state_path)
|
|
134
149
|
|
|
135
150
|
logger.debug(f'Calibrated "{workflow}" !')
|
|
136
|
-
result = call_analyzer(
|
|
137
|
-
save_result(workflow, result, state_path,
|
|
138
|
-
get_head(workflow, state_path))
|
|
151
|
+
result = call_analyzer(workflow, data, history, check=False, plot=plot)
|
|
152
|
+
save_result(workflow.__workflow_id__, result, state_path,
|
|
153
|
+
get_head(workflow.__workflow_id__, state_path))
|
|
139
154
|
|
|
140
155
|
return result
|
|
141
156
|
|
|
142
157
|
|
|
143
158
|
@functools.lru_cache(maxsize=128)
|
|
144
|
-
def calibrate(workflow
|
|
145
|
-
plot: bool, session_id: str) -> Result:
|
|
146
|
-
|
|
147
|
-
history = find_result(workflow, state_path)
|
|
159
|
+
def calibrate(workflow: WorkflowType, code_path: str | Path,
|
|
160
|
+
state_path: str | Path, plot: bool, session_id: str) -> Result:
|
|
161
|
+
history = find_result(workflow.__workflow_id__, state_path)
|
|
148
162
|
|
|
149
|
-
logger.debug(f'Calibrating "{workflow}" ...')
|
|
150
|
-
data =
|
|
163
|
+
logger.debug(f'Calibrating "{workflow.__workflow_id__}" ...')
|
|
164
|
+
data = workflow.calibrate()
|
|
151
165
|
result = Result()
|
|
152
166
|
result.data = data
|
|
153
|
-
save_result(workflow, result, state_path)
|
|
154
|
-
logger.debug(f'Calibrated "{workflow}" !')
|
|
155
|
-
result = call_analyzer(
|
|
156
|
-
save_result(workflow, result, state_path,
|
|
167
|
+
save_result(workflow.__workflow_id__, result, state_path)
|
|
168
|
+
logger.debug(f'Calibrated "{workflow.__workflow_id__}" !')
|
|
169
|
+
result = call_analyzer(workflow, data, history, check=False, plot=plot)
|
|
170
|
+
save_result(workflow.__workflow_id__, result, state_path,
|
|
171
|
+
get_head(workflow.__workflow_id__, state_path))
|
|
157
172
|
return result
|
|
158
173
|
|
|
159
174
|
|
|
160
|
-
def diagnose(
|
|
161
|
-
session_id: str):
|
|
175
|
+
def diagnose(workflow: WorkflowType, code_path: str | Path,
|
|
176
|
+
state_path: str | Path, plot: bool, session_id: str):
|
|
162
177
|
'''
|
|
163
178
|
Returns: True if node or dependent recalibrated.
|
|
164
179
|
'''
|
|
165
|
-
logger.debug(f'diagnose "{
|
|
180
|
+
logger.debug(f'diagnose "{workflow.__workflow_id__}"')
|
|
166
181
|
# check_data
|
|
167
|
-
result = check_data(
|
|
182
|
+
result = check_data(workflow, code_path, state_path, plot, session_id)
|
|
168
183
|
# in spec case
|
|
169
184
|
if result.in_spec:
|
|
170
|
-
logger.debug(
|
|
185
|
+
logger.debug(
|
|
186
|
+
f'"{workflow.__workflow_id__}": Checked! In spec, no need to diagnose'
|
|
187
|
+
)
|
|
171
188
|
return False
|
|
172
189
|
# bad data case
|
|
173
190
|
recalibrated = []
|
|
174
191
|
if result.bad_data:
|
|
175
|
-
logger.debug(
|
|
192
|
+
logger.debug(
|
|
193
|
+
f'"{workflow.__workflow_id__}": Bad data, diagnosing dependents')
|
|
176
194
|
recalibrated = [
|
|
177
195
|
diagnose(n, code_path, state_path, plot, session_id)
|
|
178
|
-
for n in get_dependents(
|
|
196
|
+
for n in get_dependents(workflow, code_path)
|
|
179
197
|
]
|
|
180
198
|
if not any(recalibrated):
|
|
181
199
|
if result.bad_data:
|
|
182
200
|
raise CalibrationFailedError(
|
|
183
|
-
f'"{
|
|
184
|
-
|
|
201
|
+
f'"{workflow.__workflow_id__}": bad data but no dependents recalibrated.'
|
|
202
|
+
)
|
|
203
|
+
logger.debug(
|
|
204
|
+
f'"{workflow.__workflow_id__}": no dependents recalibrated.')
|
|
185
205
|
# calibrate
|
|
186
206
|
if any(recalibrated):
|
|
187
207
|
logger.debug(
|
|
188
|
-
f'recalibrate "{
|
|
208
|
+
f'recalibrate "{workflow.__workflow_id__}" because some dependents recalibrated.'
|
|
209
|
+
)
|
|
189
210
|
elif not result.in_spec and not result.bad_data:
|
|
190
|
-
logger.debug(
|
|
211
|
+
logger.debug(
|
|
212
|
+
f'recalibrate "{workflow.__workflow_id__}" because out of spec.')
|
|
191
213
|
elif result.in_spec:
|
|
192
|
-
logger.error(
|
|
214
|
+
logger.error(
|
|
215
|
+
f'Never reach: recalibrate "{workflow.__workflow_id__}" because in spec.'
|
|
216
|
+
)
|
|
193
217
|
elif result.bad_data:
|
|
194
|
-
logger.error(
|
|
218
|
+
logger.error(
|
|
219
|
+
f'Never reach: recalibrate "{workflow.__workflow_id__}" because bad data.'
|
|
220
|
+
)
|
|
195
221
|
else:
|
|
196
|
-
logger.error(f'Never reach: recalibrate "{
|
|
222
|
+
logger.error(f'Never reach: recalibrate "{workflow.__workflow_id__}"')
|
|
197
223
|
|
|
198
|
-
result = calibrate(
|
|
224
|
+
result = calibrate(workflow, code_path, state_path, plot, session_id)
|
|
199
225
|
if result.bad_data or not result.in_spec:
|
|
200
226
|
raise CalibrationFailedError(
|
|
201
|
-
f'"{
|
|
227
|
+
f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
|
|
228
|
+
)
|
|
202
229
|
transform.update_parameters(result)
|
|
203
230
|
return True
|
|
204
231
|
|
|
205
232
|
|
|
206
|
-
def get_dependents(workflow:
|
|
207
|
-
|
|
233
|
+
def get_dependents(workflow: WorkflowType,
|
|
234
|
+
code_path: str | Path) -> list[WorkflowType]:
|
|
235
|
+
return [load_workflow(n, code_path) for n in workflow.depends()[0]]
|
|
208
236
|
|
|
209
237
|
|
|
210
238
|
@logger.catch(reraise=True)
|
|
211
|
-
def maintain(
|
|
239
|
+
def maintain(workflow: WorkflowType,
|
|
212
240
|
code_path: str | Path,
|
|
213
241
|
state_path: str | Path,
|
|
214
242
|
session_id: str | None = None,
|
|
@@ -216,49 +244,61 @@ def maintain(node,
|
|
|
216
244
|
plot: bool = False):
|
|
217
245
|
if session_id is None:
|
|
218
246
|
session_id = uuid.uuid4().hex
|
|
219
|
-
logger.debug(f'run "{
|
|
247
|
+
logger.debug(f'run "{workflow.__workflow_id__}"'
|
|
248
|
+
if run else f'maintain "{workflow.__workflow_id__}"')
|
|
220
249
|
# recursive maintain
|
|
221
|
-
for n in get_dependents(
|
|
222
|
-
logger.debug(
|
|
250
|
+
for n in get_dependents(workflow, code_path):
|
|
251
|
+
logger.debug(
|
|
252
|
+
f'maintain "{n.__workflow_id__}" because it is depended by "{workflow.__workflow_id__}"'
|
|
253
|
+
)
|
|
223
254
|
maintain(n, code_path, state_path, session_id, run=False, plot=plot)
|
|
224
255
|
else:
|
|
225
|
-
logger.debug(
|
|
256
|
+
logger.debug(
|
|
257
|
+
f'"{workflow.__workflow_id__}": All dependents maintained')
|
|
226
258
|
# check_state
|
|
227
|
-
if check_state(
|
|
228
|
-
logger.debug(
|
|
259
|
+
if check_state(workflow, code_path, state_path) and not run:
|
|
260
|
+
logger.debug(
|
|
261
|
+
f'"{workflow.__workflow_id__}": In spec, no need to maintain')
|
|
229
262
|
return
|
|
230
263
|
# check_data
|
|
231
|
-
result = check_data(
|
|
264
|
+
result = check_data(workflow, code_path, state_path, plot, session_id)
|
|
232
265
|
if result.in_spec:
|
|
233
266
|
if not run:
|
|
234
|
-
logger.debug(
|
|
267
|
+
logger.debug(
|
|
268
|
+
f'"{workflow.__workflow_id__}": In spec, no need to maintain')
|
|
235
269
|
return
|
|
236
270
|
elif result.bad_data:
|
|
237
|
-
logger.debug(
|
|
238
|
-
|
|
239
|
-
|
|
271
|
+
logger.debug(
|
|
272
|
+
f'"{workflow.__workflow_id__}": Bad data, diagnosing dependents')
|
|
273
|
+
for n in get_dependents(workflow, code_path):
|
|
274
|
+
logger.debug(
|
|
275
|
+
f'diagnose "{n.__workflow_id__}" because of "{workflow.__workflow_id__}" bad data'
|
|
276
|
+
)
|
|
240
277
|
diagnose(n, code_path, state_path, plot, session_id)
|
|
241
278
|
else:
|
|
242
|
-
logger.debug(
|
|
279
|
+
logger.debug(
|
|
280
|
+
f'"{workflow.__workflow_id__}": All dependents diagnosed')
|
|
243
281
|
# calibrate
|
|
244
|
-
logger.debug(f'recalibrate "{
|
|
245
|
-
result = calibrate(
|
|
282
|
+
logger.debug(f'recalibrate "{workflow.__workflow_id__}"')
|
|
283
|
+
result = calibrate(workflow, code_path, state_path, plot, session_id)
|
|
246
284
|
if result.bad_data or not result.in_spec:
|
|
247
285
|
raise CalibrationFailedError(
|
|
248
|
-
f'"{
|
|
286
|
+
f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
|
|
287
|
+
)
|
|
249
288
|
transform.update_parameters(result)
|
|
250
289
|
return
|
|
251
290
|
|
|
252
291
|
|
|
253
292
|
@logger.catch(reraise=True)
|
|
254
|
-
def run(
|
|
293
|
+
def run(workflow: WorkflowType,
|
|
255
294
|
code_path: str | Path,
|
|
256
295
|
state_path: str | Path,
|
|
257
296
|
plot: bool = False):
|
|
258
|
-
logger.debug(f'run "{
|
|
259
|
-
result = calibrate(
|
|
297
|
+
logger.debug(f'run "{workflow.__workflow_id__}" without dependences.')
|
|
298
|
+
result = calibrate(workflow, code_path, state_path, plot)
|
|
260
299
|
if result.bad_data or not result.in_spec:
|
|
261
300
|
raise CalibrationFailedError(
|
|
262
|
-
f'"{
|
|
301
|
+
f'"{workflow.__workflow_id__}": All dependents passed, but calibration failed!'
|
|
302
|
+
)
|
|
263
303
|
transform.update_parameters(result)
|
|
264
304
|
return
|
qulab/fun.cp311-win_amd64.pyd
CHANGED
|
Binary file
|
qulab/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "2.4.
|
|
1
|
+
__version__ = "2.4.12"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|