QuLab 2.3.5__cp311-cp311-macosx_10_9_universal2.whl → 2.4.0__cp311-cp311-macosx_10_9_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {QuLab-2.3.5.dist-info → QuLab-2.4.0.dist-info}/METADATA +21 -18
- {QuLab-2.3.5.dist-info → QuLab-2.4.0.dist-info}/RECORD +28 -17
- {QuLab-2.3.5.dist-info → QuLab-2.4.0.dist-info}/WHEEL +1 -1
- qulab/__main__.py +11 -7
- qulab/dicttree.py +511 -0
- qulab/executor/__init__.py +5 -0
- qulab/executor/__main__.py +89 -0
- qulab/executor/load.py +202 -0
- qulab/executor/schedule.py +223 -0
- qulab/executor/storage.py +143 -0
- qulab/executor/transform.py +90 -0
- qulab/executor/utils.py +107 -0
- qulab/fun.cpython-311-darwin.so +0 -0
- qulab/scan/curd.py +1 -1
- qulab/scan/expression.py +4 -0
- qulab/scan/models.py +10 -11
- qulab/scan/server.py +6 -3
- qulab/sys/device/basedevice.py +8 -0
- qulab/sys/device/utils.py +46 -13
- qulab/sys/rpc/router.py +35 -0
- qulab/version.py +1 -1
- qulab/visualization/_autoplot.py +4 -3
- qulab/visualization/plot_circ.py +319 -0
- qulab/visualization/plot_seq.py +152 -0
- qulab/visualization/rot3d.py +23 -0
- {QuLab-2.3.5.dist-info → QuLab-2.4.0.dist-info}/LICENSE +0 -0
- {QuLab-2.3.5.dist-info → QuLab-2.4.0.dist-info}/entry_points.txt +0 -0
- {QuLab-2.3.5.dist-info → QuLab-2.4.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
import click
|
|
5
|
+
|
|
6
|
+
from .load import find_unreferenced_workflows
|
|
7
|
+
from .schedule import maintain as maintain_workflow, run as run_workflow
|
|
8
|
+
from .transform import set_config_api
|
|
9
|
+
from .utils import workflow_template
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@click.group()
|
|
13
|
+
def cli():
|
|
14
|
+
pass
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@click.command()
|
|
18
|
+
@click.argument('workflow')
|
|
19
|
+
@click.option('--code', '-c', default=None)
|
|
20
|
+
def create(workflow, code):
|
|
21
|
+
"""
|
|
22
|
+
Create a new workflow file.
|
|
23
|
+
"""
|
|
24
|
+
if code is None:
|
|
25
|
+
code = Path.cwd()
|
|
26
|
+
|
|
27
|
+
fname = Path(code) / f'{workflow}.py'
|
|
28
|
+
if fname.exists():
|
|
29
|
+
click.echo(f'{workflow}.py already exists')
|
|
30
|
+
return
|
|
31
|
+
|
|
32
|
+
fname.parent.mkdir(parents=True, exist_ok=True)
|
|
33
|
+
deps = find_unreferenced_workflows(code)
|
|
34
|
+
|
|
35
|
+
with open(fname, 'w') as f:
|
|
36
|
+
f.write(workflow_template(list(deps)))
|
|
37
|
+
click.echo(f'{workflow}.py created')
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@click.command()
|
|
41
|
+
@click.argument('workflow')
|
|
42
|
+
@click.option('--code', '-c', default=None)
|
|
43
|
+
@click.option('--data', '-d', default=None)
|
|
44
|
+
@click.option('--api', '-g', default=None)
|
|
45
|
+
@click.option('--no-dependents', '-n', is_flag=True)
|
|
46
|
+
def run(workflow, code, data, api, no_dependents):
|
|
47
|
+
"""
|
|
48
|
+
Run a workflow.
|
|
49
|
+
"""
|
|
50
|
+
if api is not None:
|
|
51
|
+
api = importlib.import_module(api)
|
|
52
|
+
set_config_api(api.query_config, api.update_config)
|
|
53
|
+
if code is None:
|
|
54
|
+
code = Path.cwd()
|
|
55
|
+
if data is None:
|
|
56
|
+
data = Path(code) / 'logs'
|
|
57
|
+
|
|
58
|
+
if no_dependents:
|
|
59
|
+
run_workflow(workflow, code, data)
|
|
60
|
+
else:
|
|
61
|
+
maintain_workflow(workflow, code, data, run=True)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
@click.command()
|
|
65
|
+
@click.argument('workflow')
|
|
66
|
+
@click.option('--code', '-c', default=None)
|
|
67
|
+
@click.option('--data', '-d', default=None)
|
|
68
|
+
@click.option('--api', '-g', default=None)
|
|
69
|
+
def maintain(workflow, code, data, api):
|
|
70
|
+
"""
|
|
71
|
+
Maintain a workflow.
|
|
72
|
+
"""
|
|
73
|
+
if api is not None:
|
|
74
|
+
api = importlib.import_module(api)
|
|
75
|
+
set_config_api(api.query_config, api.update_config)
|
|
76
|
+
if code is None:
|
|
77
|
+
code = Path.cwd()
|
|
78
|
+
if data is None:
|
|
79
|
+
data = Path(code) / 'logs'
|
|
80
|
+
|
|
81
|
+
maintain_workflow(workflow, code, data, run=False)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
cli.add_command(maintain)
|
|
85
|
+
cli.add_command(run)
|
|
86
|
+
cli.add_command(create)
|
|
87
|
+
|
|
88
|
+
if __name__ == '__main__':
|
|
89
|
+
cli()
|
qulab/executor/load.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import warnings
|
|
3
|
+
from importlib.util import module_from_spec, spec_from_file_location
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from types import ModuleType
|
|
6
|
+
|
|
7
|
+
import loguru
|
|
8
|
+
|
|
9
|
+
from .storage import Result
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class SetConfigWorkflow():
|
|
13
|
+
__timeout__ = None
|
|
14
|
+
|
|
15
|
+
def __init__(self, key):
|
|
16
|
+
self.key = key
|
|
17
|
+
|
|
18
|
+
def depends(self):
|
|
19
|
+
return [[]]
|
|
20
|
+
|
|
21
|
+
def check_state(self, history: Result) -> bool:
|
|
22
|
+
from . import transform
|
|
23
|
+
try:
|
|
24
|
+
return self._equal(history.params[self.key],
|
|
25
|
+
transform.query_config(self.key))
|
|
26
|
+
except:
|
|
27
|
+
return False
|
|
28
|
+
|
|
29
|
+
def calibrate(self):
|
|
30
|
+
from . import transform
|
|
31
|
+
try:
|
|
32
|
+
value = transform.query_config(self.key)
|
|
33
|
+
except:
|
|
34
|
+
value = eval(input(f'"{self.key}": '))
|
|
35
|
+
return self.key, value
|
|
36
|
+
|
|
37
|
+
def analyze(self, key, value, history):
|
|
38
|
+
return 'OK', {key: value}, {}
|
|
39
|
+
|
|
40
|
+
def check(self):
|
|
41
|
+
from .transform import query_config
|
|
42
|
+
return self.key, query_config(self.key)
|
|
43
|
+
|
|
44
|
+
def check_analyze(self, key, value, history):
|
|
45
|
+
return 'Out of Spec', {key: value}, {}
|
|
46
|
+
|
|
47
|
+
@staticmethod
|
|
48
|
+
def _equal(a, b):
|
|
49
|
+
import numpy as np
|
|
50
|
+
|
|
51
|
+
try:
|
|
52
|
+
return a == b
|
|
53
|
+
except:
|
|
54
|
+
pass
|
|
55
|
+
|
|
56
|
+
if isinstance(a, np.ndarray) and isinstance(b, np.ndarray):
|
|
57
|
+
return a.shape == b.shape and np.all(a == b)
|
|
58
|
+
|
|
59
|
+
return False
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
WorkflowType = ModuleType | SetConfigWorkflow
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def can_call_without_args(func):
|
|
66
|
+
if not callable(func):
|
|
67
|
+
return False
|
|
68
|
+
|
|
69
|
+
# 获取函数签名
|
|
70
|
+
sig = inspect.signature(func)
|
|
71
|
+
for param in sig.parameters.values():
|
|
72
|
+
# 如果有参数没有默认值且不是可变参数,则无法无参调用
|
|
73
|
+
if (param.default is param.empty and param.kind
|
|
74
|
+
not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)):
|
|
75
|
+
return False
|
|
76
|
+
return True
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def verify_calibrate_method(module: WorkflowType):
|
|
80
|
+
if not hasattr(module, 'calibrate'):
|
|
81
|
+
raise AttributeError(
|
|
82
|
+
f"Workflow {module.__file__} does not have 'calibrate' function")
|
|
83
|
+
|
|
84
|
+
if not can_call_without_args(module.calibrate):
|
|
85
|
+
raise AttributeError(
|
|
86
|
+
f"Workflow {module.__file__} 'calibrate' function should not have any parameters"
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
if not hasattr(module, 'analyze'):
|
|
90
|
+
raise AttributeError(
|
|
91
|
+
f"Workflow {module.__file__} does not have 'analyze' function")
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def verify_check_method(module: WorkflowType):
|
|
95
|
+
if not hasattr(module, 'check'):
|
|
96
|
+
warnings.warn(
|
|
97
|
+
f"Workflow {module.__file__} does not have 'check' function")
|
|
98
|
+
else:
|
|
99
|
+
if not can_call_without_args(module.check):
|
|
100
|
+
raise AttributeError(
|
|
101
|
+
f"Workflow {module.__file__} 'check' function should not have any parameters"
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
if not hasattr(module, 'check_analyze'):
|
|
105
|
+
raise AttributeError(
|
|
106
|
+
f"Workflow {module.__file__} has 'check' function but does not have 'check_analyze' function"
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def is_workflow(module: ModuleType) -> bool:
|
|
111
|
+
try:
|
|
112
|
+
verify_calibrate_method(module)
|
|
113
|
+
return True
|
|
114
|
+
except AttributeError:
|
|
115
|
+
return False
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def find_unreferenced_workflows(path: str) -> list[str]:
|
|
119
|
+
root = Path(path).resolve()
|
|
120
|
+
workflows = []
|
|
121
|
+
workflow_paths: set[str] = set()
|
|
122
|
+
|
|
123
|
+
# Collect all workflow modules
|
|
124
|
+
for file_path in root.rglob("*.py"):
|
|
125
|
+
if file_path.name == "__init__.py":
|
|
126
|
+
continue
|
|
127
|
+
try:
|
|
128
|
+
rel_path = file_path.relative_to(root)
|
|
129
|
+
except ValueError:
|
|
130
|
+
continue
|
|
131
|
+
|
|
132
|
+
module = load_workflow(str(rel_path), root)
|
|
133
|
+
|
|
134
|
+
if is_workflow(module):
|
|
135
|
+
rel_str = str(rel_path)
|
|
136
|
+
workflows.append(rel_str)
|
|
137
|
+
workflow_paths.add(rel_str)
|
|
138
|
+
|
|
139
|
+
dependencies: set[str] = set()
|
|
140
|
+
|
|
141
|
+
# Check dependencies for each workflow module
|
|
142
|
+
for rel_str in workflows:
|
|
143
|
+
module = load_workflow(rel_str, root)
|
|
144
|
+
|
|
145
|
+
depends_func = getattr(module, "depends", None)
|
|
146
|
+
if depends_func and callable(depends_func):
|
|
147
|
+
if not can_call_without_args(depends_func):
|
|
148
|
+
warnings.warn(
|
|
149
|
+
f"Skipping depends() in {rel_str} as it requires arguments"
|
|
150
|
+
)
|
|
151
|
+
continue
|
|
152
|
+
try:
|
|
153
|
+
depends_list = depends_func()
|
|
154
|
+
except Exception as e:
|
|
155
|
+
warnings.warn(f"Error calling depends() in {rel_str}: {e}")
|
|
156
|
+
continue
|
|
157
|
+
|
|
158
|
+
if not isinstance(depends_list, list) or not all(
|
|
159
|
+
isinstance(item, str) for item in depends_list
|
|
160
|
+
):
|
|
161
|
+
warnings.warn(
|
|
162
|
+
f"depends() in {rel_str} did not return a list of strings"
|
|
163
|
+
)
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
for dep in depends_list:
|
|
167
|
+
dep_full = (root / dep).resolve()
|
|
168
|
+
try:
|
|
169
|
+
dep_rel = dep_full.relative_to(root)
|
|
170
|
+
except ValueError:
|
|
171
|
+
continue
|
|
172
|
+
dep_rel_str = str(dep_rel)
|
|
173
|
+
if dep_rel_str in workflow_paths:
|
|
174
|
+
dependencies.add(dep_rel_str)
|
|
175
|
+
|
|
176
|
+
# Determine unreferenced workflows
|
|
177
|
+
unreferenced = [wp for wp in workflows if wp not in dependencies]
|
|
178
|
+
return unreferenced
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def load_workflow(file_name: str,
|
|
182
|
+
base_path: str | Path,
|
|
183
|
+
package='workflows') -> WorkflowType:
|
|
184
|
+
if file_name.startswith('cfg:'):
|
|
185
|
+
return SetConfigWorkflow(file_name[4:])
|
|
186
|
+
base_path = Path(base_path)
|
|
187
|
+
path = Path(file_name)
|
|
188
|
+
module_name = f"{package}.{'.'.join([*path.parts[:-1], path.stem])}"
|
|
189
|
+
spec = spec_from_file_location(module_name, base_path / path)
|
|
190
|
+
module = module_from_spec(spec)
|
|
191
|
+
spec.loader.exec_module(module)
|
|
192
|
+
|
|
193
|
+
if not hasattr(module, '__timeout__'):
|
|
194
|
+
module.__timeout__ = None
|
|
195
|
+
|
|
196
|
+
if not hasattr(module, 'depends'):
|
|
197
|
+
module.depends = lambda: [[]]
|
|
198
|
+
|
|
199
|
+
verify_calibrate_method(module)
|
|
200
|
+
verify_check_method(module)
|
|
201
|
+
|
|
202
|
+
return module
|
|
@@ -0,0 +1,223 @@
|
|
|
1
|
+
import functools
|
|
2
|
+
import uuid
|
|
3
|
+
from datetime import datetime, timedelta
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from loguru import logger
|
|
7
|
+
|
|
8
|
+
from . import transform
|
|
9
|
+
from .load import load_workflow
|
|
10
|
+
from .storage import (Result, find_result, renew_result, revoke_result,
|
|
11
|
+
save_result)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class CalibrationFailedError(Exception):
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def check_state(workflow: str, code_path: str | Path,
|
|
19
|
+
state_path: str | Path) -> bool:
|
|
20
|
+
"""
|
|
21
|
+
check state should report a pass if and only if the following are satisfied:
|
|
22
|
+
|
|
23
|
+
1. The cal has had check data or calibrate pass within the timeout period.
|
|
24
|
+
2. The cal has not failed calibrate without resolution.
|
|
25
|
+
3. No dependencies have been recalibrated since the last time check data or calibrate was run on this cal.
|
|
26
|
+
4. All dependencies pass check state.
|
|
27
|
+
"""
|
|
28
|
+
logger.debug(f'check_state: "{workflow}"')
|
|
29
|
+
result = find_result(workflow, state_path)
|
|
30
|
+
if not result:
|
|
31
|
+
logger.debug(f'check_state failed: No history found for "{workflow}"')
|
|
32
|
+
return False
|
|
33
|
+
node = load_workflow(workflow, code_path)
|
|
34
|
+
if hasattr(node, 'check_state') and callable(node.check_state):
|
|
35
|
+
logger.debug(
|
|
36
|
+
f'check_state: "{workflow}" has custom check_state method')
|
|
37
|
+
return node.check_state(result)
|
|
38
|
+
if node.__timeout__ is not None and datetime.now(
|
|
39
|
+
) > result.checked_time + timedelta(seconds=node.__timeout__):
|
|
40
|
+
logger.debug(f'check_state failed: "{workflow}" has expired')
|
|
41
|
+
return False
|
|
42
|
+
if not result.in_spec:
|
|
43
|
+
logger.debug(f'check_state failed: "{workflow}" is out of spec')
|
|
44
|
+
return False
|
|
45
|
+
if result.bad_data:
|
|
46
|
+
logger.debug(f'check_state failed: "{workflow}" has bad data')
|
|
47
|
+
return False
|
|
48
|
+
for n in get_dependents(workflow, code_path):
|
|
49
|
+
r = find_result(n, state_path)
|
|
50
|
+
if r is None or r.checked_time > result.checked_time:
|
|
51
|
+
logger.debug(
|
|
52
|
+
f'check_state failed: "{workflow}" has outdated dependencies')
|
|
53
|
+
return False
|
|
54
|
+
for n in get_dependents(workflow, code_path):
|
|
55
|
+
if not check_state(n, code_path, state_path):
|
|
56
|
+
logger.debug(
|
|
57
|
+
f'check_state failed: "{workflow}" has bad dependencies')
|
|
58
|
+
return False
|
|
59
|
+
return True
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@functools.lru_cache(maxsize=128)
|
|
63
|
+
def check_data(workflow: str, code_path: str | Path, state_path: str | Path,
|
|
64
|
+
session_id: str) -> Result:
|
|
65
|
+
"""
|
|
66
|
+
check data answers two questions:
|
|
67
|
+
Is the parameter associated with this cal in spec,
|
|
68
|
+
and is the cal scan working as expected?
|
|
69
|
+
"""
|
|
70
|
+
node = load_workflow(workflow, code_path)
|
|
71
|
+
history = find_result(workflow, state_path)
|
|
72
|
+
|
|
73
|
+
if history is None:
|
|
74
|
+
logger.debug(f'No history found for "{workflow}"')
|
|
75
|
+
result = Result()
|
|
76
|
+
result.in_spec = False
|
|
77
|
+
result.bad_data = False
|
|
78
|
+
return result
|
|
79
|
+
|
|
80
|
+
if history.bad_data:
|
|
81
|
+
logger.debug(f'History found for "{workflow}", but bad data')
|
|
82
|
+
return history
|
|
83
|
+
if not history.in_spec:
|
|
84
|
+
logger.debug(f'History found for "{workflow}", but out of spec')
|
|
85
|
+
return history
|
|
86
|
+
|
|
87
|
+
logger.debug(f'History found for "{workflow}", but has expired')
|
|
88
|
+
|
|
89
|
+
if hasattr(node, 'check') and callable(node.check) and hasattr(
|
|
90
|
+
node, 'check_analyze') and callable(node.check_analyze):
|
|
91
|
+
logger.debug(f'Checking "{workflow}" with "check" method ...')
|
|
92
|
+
data = node.check()
|
|
93
|
+
logger.debug(f'Checked "{workflow}" !')
|
|
94
|
+
result = transform.params_to_result(
|
|
95
|
+
node.check_analyze(*data,
|
|
96
|
+
history=transform.result_to_params(history)))
|
|
97
|
+
result.data = data
|
|
98
|
+
if result.in_spec:
|
|
99
|
+
logger.debug(f'"{workflow}": checked in spec, renewing result')
|
|
100
|
+
renew_result(workflow, state_path)
|
|
101
|
+
else:
|
|
102
|
+
logger.debug(f'"{workflow}": checked out of spec, revoking result')
|
|
103
|
+
revoke_result(workflow, state_path)
|
|
104
|
+
else:
|
|
105
|
+
logger.debug(f'Checking "{workflow}" with "calibrate" method ...')
|
|
106
|
+
data = node.calibrate()
|
|
107
|
+
logger.debug(f'Calibrated "{workflow}" !')
|
|
108
|
+
result = transform.params_to_result(
|
|
109
|
+
node.analyze(*data, history=transform.result_to_params(history)))
|
|
110
|
+
result.data = data
|
|
111
|
+
result.fully_calibrated = True
|
|
112
|
+
save_result(workflow, result, state_path)
|
|
113
|
+
|
|
114
|
+
return result
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
@functools.lru_cache(maxsize=128)
|
|
118
|
+
def calibrate(workflow, code_path: str | Path, state_path: str | Path,
|
|
119
|
+
session_id: str) -> Result:
|
|
120
|
+
result = Result()
|
|
121
|
+
node = load_workflow(workflow, code_path)
|
|
122
|
+
history = find_result(workflow, state_path)
|
|
123
|
+
|
|
124
|
+
logger.debug(f'Calibrating "{workflow}" ...')
|
|
125
|
+
data = node.calibrate()
|
|
126
|
+
logger.debug(f'Calibrated "{workflow}" !')
|
|
127
|
+
result = transform.params_to_result(
|
|
128
|
+
node.analyze(*data, history=transform.result_to_params(history)))
|
|
129
|
+
result.data = data
|
|
130
|
+
result.fully_calibrated = True
|
|
131
|
+
save_result(workflow, result, state_path)
|
|
132
|
+
return result
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def diagnose(node, code_path: str | Path, state_path: str | Path,
|
|
136
|
+
session_id: str):
|
|
137
|
+
'''
|
|
138
|
+
Returns: True if node or dependent recalibrated.
|
|
139
|
+
'''
|
|
140
|
+
logger.debug(f'diagnose "{node}"')
|
|
141
|
+
# check_data
|
|
142
|
+
result = check_data(node, code_path, state_path, session_id)
|
|
143
|
+
# in spec case
|
|
144
|
+
if result.in_spec:
|
|
145
|
+
return False
|
|
146
|
+
# bad data case
|
|
147
|
+
recalibrated = []
|
|
148
|
+
if result.bad_data:
|
|
149
|
+
recalibrated = [
|
|
150
|
+
diagnose(n, code_path, state_path, session_id)
|
|
151
|
+
for n in get_dependents(node, code_path)
|
|
152
|
+
]
|
|
153
|
+
if not any(recalibrated):
|
|
154
|
+
return False
|
|
155
|
+
# calibrate
|
|
156
|
+
if result.fully_calibrated and result.in_spec:
|
|
157
|
+
pass
|
|
158
|
+
else:
|
|
159
|
+
logger.debug(
|
|
160
|
+
f'recalibrate "{node}" because some dependents recalibrated')
|
|
161
|
+
result = calibrate(node, code_path, state_path, session_id)
|
|
162
|
+
if result.bad_data or not result.in_spec:
|
|
163
|
+
raise CalibrationFailedError(
|
|
164
|
+
f'"{node}": All dependents passed, but calibration failed!')
|
|
165
|
+
transform.update_parameters(result)
|
|
166
|
+
return True
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def get_dependents(workflow: str, code_path: str | Path) -> list[str]:
|
|
170
|
+
return [n for n in load_workflow(workflow, code_path).depends()[0]]
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
#@logger.catch(reraise=True)
|
|
174
|
+
def maintain(node,
|
|
175
|
+
code_path: str | Path,
|
|
176
|
+
state_path: str | Path,
|
|
177
|
+
session_id: str | None = None,
|
|
178
|
+
run: bool = False):
|
|
179
|
+
if session_id is None:
|
|
180
|
+
session_id = uuid.uuid4().hex
|
|
181
|
+
logger.debug(f'run "{node}"' if run else f'maintain "{node}"')
|
|
182
|
+
# recursive maintain
|
|
183
|
+
for n in get_dependents(node, code_path):
|
|
184
|
+
logger.debug(f'maintain "{n}" because it is depended by "{node}"')
|
|
185
|
+
maintain(n, code_path, state_path, session_id)
|
|
186
|
+
else:
|
|
187
|
+
logger.debug(f'"{node}": All dependents maintained')
|
|
188
|
+
# check_state
|
|
189
|
+
if check_state(node, code_path, state_path) and not run:
|
|
190
|
+
logger.debug(f'"{node}": In spec, no need to maintain')
|
|
191
|
+
return
|
|
192
|
+
# check_data
|
|
193
|
+
result = check_data(node, code_path, state_path, session_id)
|
|
194
|
+
if result.in_spec:
|
|
195
|
+
if not run:
|
|
196
|
+
logger.debug(f'"{node}": In spec, no need to maintain')
|
|
197
|
+
return
|
|
198
|
+
elif result.bad_data:
|
|
199
|
+
logger.debug(f'"{node}": Bad data, diagnosing dependents')
|
|
200
|
+
for n in get_dependents(node, code_path):
|
|
201
|
+
logger.debug(f'diagnose "{n}" because of "{node}" bad data')
|
|
202
|
+
diagnose(n, code_path, state_path, session_id)
|
|
203
|
+
else:
|
|
204
|
+
logger.debug(f'"{node}": All dependents diagnosed')
|
|
205
|
+
# calibrate
|
|
206
|
+
logger.debug(f'recalibrate "{node}"')
|
|
207
|
+
result = calibrate(node, code_path, state_path, session_id)
|
|
208
|
+
if result.bad_data or not result.in_spec:
|
|
209
|
+
raise CalibrationFailedError(
|
|
210
|
+
f'"{node}": All dependents passed, but calibration failed!')
|
|
211
|
+
transform.update_parameters(result)
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def run(node, code_path: str | Path,
|
|
216
|
+
state_path: str | Path):
|
|
217
|
+
logger.debug(f'run "{node}" without dependences.')
|
|
218
|
+
result = calibrate(node, code_path, state_path)
|
|
219
|
+
if result.bad_data or not result.in_spec:
|
|
220
|
+
raise CalibrationFailedError(
|
|
221
|
+
f'"{node}": All dependents passed, but calibration failed!')
|
|
222
|
+
transform.update_parameters(result)
|
|
223
|
+
return
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
import pickle
|
|
2
|
+
import uuid
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from datetime import datetime, timedelta
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
from loguru import logger
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
@dataclass
|
|
11
|
+
class Result():
|
|
12
|
+
in_spec: bool = False
|
|
13
|
+
bad_data: bool = False
|
|
14
|
+
fully_calibrated: bool = False
|
|
15
|
+
calibrated_time: datetime = field(default_factory=datetime.now)
|
|
16
|
+
checked_time: datetime = field(default_factory=datetime.now)
|
|
17
|
+
ttl: timedelta = timedelta(days=3650)
|
|
18
|
+
params: dict = field(default_factory=dict)
|
|
19
|
+
info: dict = field(default_factory=dict)
|
|
20
|
+
data: tuple = field(default_factory=tuple)
|
|
21
|
+
previous: Path | None = None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class Graph:
|
|
25
|
+
|
|
26
|
+
def __init__(self):
|
|
27
|
+
self.nodes = {}
|
|
28
|
+
self.heads = set()
|
|
29
|
+
self.roots = set()
|
|
30
|
+
|
|
31
|
+
def add_node(self, node: str, deps: list[str]):
|
|
32
|
+
if node not in self.nodes:
|
|
33
|
+
self.nodes[node] = deps
|
|
34
|
+
if not deps:
|
|
35
|
+
self.heads.add(node)
|
|
36
|
+
for dep in deps:
|
|
37
|
+
if dep not in self.nodes:
|
|
38
|
+
self.nodes[dep] = []
|
|
39
|
+
self.roots.discard(dep)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def random_path(base: Path) -> Path:
|
|
43
|
+
while True:
|
|
44
|
+
s = uuid.uuid4().hex
|
|
45
|
+
path = Path(s[:2]) / s[2:4] / s[4:6] / s[6:]
|
|
46
|
+
if not (base / path).exists():
|
|
47
|
+
return path
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def save_result(workflow: str, result: Result, base_path: str | Path):
|
|
51
|
+
logger.debug(
|
|
52
|
+
f'Saving result for "{workflow}", {result.in_spec=}, {result.bad_data=}, {result.fully_calibrated=}'
|
|
53
|
+
)
|
|
54
|
+
base_path = Path(base_path)
|
|
55
|
+
path = random_path(base_path)
|
|
56
|
+
(base_path / 'objects' / path).parent.mkdir(parents=True, exist_ok=True)
|
|
57
|
+
result.previous = get_head(workflow, base_path)
|
|
58
|
+
with open(base_path / 'objects' / path, "wb") as f:
|
|
59
|
+
pickle.dump(result, f)
|
|
60
|
+
set_head(workflow, path, base_path)
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
def find_result(workflow: str, base_path: str | Path) -> Result | None:
|
|
64
|
+
base_path = Path(base_path)
|
|
65
|
+
path = get_head(workflow, base_path)
|
|
66
|
+
if path is None:
|
|
67
|
+
return None
|
|
68
|
+
with open(base_path / 'objects' / path, "rb") as f:
|
|
69
|
+
return pickle.load(f)
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def renew_result(workflow: str, base_path: str | Path):
|
|
73
|
+
logger.debug(f'Renewing result for "{workflow}"')
|
|
74
|
+
result = find_result(workflow, base_path)
|
|
75
|
+
if result is not None:
|
|
76
|
+
result.checked_time = datetime.now()
|
|
77
|
+
save_result(workflow, result, base_path)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def revoke_result(workflow: str, base_path: str | Path):
|
|
81
|
+
logger.debug(f'Revoking result for "{workflow}"')
|
|
82
|
+
base_path = Path(base_path)
|
|
83
|
+
path = get_head(workflow, base_path)
|
|
84
|
+
if path is not None:
|
|
85
|
+
with open(base_path / 'objects' / path, "rb") as f:
|
|
86
|
+
result = pickle.load(f)
|
|
87
|
+
result.in_spec = False
|
|
88
|
+
save_result(workflow, result, base_path)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def set_head(workflow: str, path: Path, base_path: str | Path):
|
|
92
|
+
base_path = Path(base_path)
|
|
93
|
+
base_path.mkdir(parents=True, exist_ok=True)
|
|
94
|
+
try:
|
|
95
|
+
with open(base_path / "heads", "rb") as f:
|
|
96
|
+
heads = pickle.load(f)
|
|
97
|
+
except:
|
|
98
|
+
heads = {}
|
|
99
|
+
heads[workflow] = path
|
|
100
|
+
with open(base_path / "heads", "wb") as f:
|
|
101
|
+
pickle.dump(heads, f)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def get_head(workflow: str, base_path: str | Path) -> Path | None:
|
|
105
|
+
base_path = Path(base_path)
|
|
106
|
+
try:
|
|
107
|
+
with open(base_path / "heads", "rb") as f:
|
|
108
|
+
heads = pickle.load(f)
|
|
109
|
+
return heads[workflow]
|
|
110
|
+
except:
|
|
111
|
+
return None
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def get_graph(base_path: str | Path) -> dict[str, list[str]]:
|
|
115
|
+
base_path = Path(base_path)
|
|
116
|
+
try:
|
|
117
|
+
with open(base_path / "heads", "rb") as f:
|
|
118
|
+
heads = pickle.load(f)
|
|
119
|
+
except:
|
|
120
|
+
heads = {}
|
|
121
|
+
graph = {}
|
|
122
|
+
for workflow, path in heads.items():
|
|
123
|
+
graph[workflow] = []
|
|
124
|
+
while path is not None:
|
|
125
|
+
with open(base_path / 'objects' / path, "rb") as f:
|
|
126
|
+
result = pickle.load(f)
|
|
127
|
+
path = result.previous
|
|
128
|
+
if path is not None:
|
|
129
|
+
graph[workflow].append(path)
|
|
130
|
+
return graph
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def update_graph(workflow: str, base_path: str | Path):
|
|
134
|
+
base_path = Path(base_path)
|
|
135
|
+
graph = get_graph(base_path)
|
|
136
|
+
for workflow, deps in graph.items():
|
|
137
|
+
for dep in deps:
|
|
138
|
+
if dep not in graph:
|
|
139
|
+
graph[dep] = []
|
|
140
|
+
if workflow not in graph[dep]:
|
|
141
|
+
graph[dep].append(workflow)
|
|
142
|
+
with open(base_path / "graph", "wb") as f:
|
|
143
|
+
pickle.dump(graph, f)
|