QuLab 2.5.4__cp311-cp311-macosx_10_9_universal2.whl → 2.5.5__cp311-cp311-macosx_10_9_universal2.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {QuLab-2.5.4.dist-info → QuLab-2.5.5.dist-info}/METADATA +1 -1
- {QuLab-2.5.4.dist-info → QuLab-2.5.5.dist-info}/RECORD +11 -11
- qulab/executor/load.py +95 -3
- qulab/executor/schedule.py +36 -8
- qulab/executor/storage.py +31 -21
- qulab/fun.cpython-311-darwin.so +0 -0
- qulab/version.py +1 -1
- {QuLab-2.5.4.dist-info → QuLab-2.5.5.dist-info}/LICENSE +0 -0
- {QuLab-2.5.4.dist-info → QuLab-2.5.5.dist-info}/WHEEL +0 -0
- {QuLab-2.5.4.dist-info → QuLab-2.5.5.dist-info}/entry_points.txt +0 -0
- {QuLab-2.5.4.dist-info → QuLab-2.5.5.dist-info}/top_level.txt +0 -0
|
@@ -1,17 +1,17 @@
|
|
|
1
1
|
qulab/__init__.py,sha256=XnSePkDDgfbmEpu5uXK6spygxaqVt9emMubHYKIjSwc,244
|
|
2
2
|
qulab/__main__.py,sha256=fjaRSL_uUjNIzBGNgjlGswb9TJ2VD5qnkZHW3hItrD4,68
|
|
3
3
|
qulab/dicttree.py,sha256=tRRMpGZYVOLw0TEByE3_2Ss8FdOmzuGL9e1DWbs8qoY,13684
|
|
4
|
-
qulab/fun.cpython-311-darwin.so,sha256=
|
|
4
|
+
qulab/fun.cpython-311-darwin.so,sha256=ksCyegtDBabox5ZGHXdheGRIRlYmyLwahz1lmdcIeWk,126848
|
|
5
5
|
qulab/typing.py,sha256=5xCLfrp1aZpKpDy4p2arbFszw2eK3hGUjZa-XSvC_-8,69
|
|
6
|
-
qulab/version.py,sha256=
|
|
6
|
+
qulab/version.py,sha256=MDur6AV_Dwa0PNW31e6addEkEKMAh_dHbKFdazRMbHo,21
|
|
7
7
|
qulab/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
8
8
|
qulab/cli/commands.py,sha256=tgDIkkeIoasQXAifJZ6NU8jDgpNgb2a-B0C4nF0evrE,559
|
|
9
9
|
qulab/cli/config.py,sha256=SdNmWzweWAdyk8M2oKYhMxnkaJ0qIayPlsLGCNlVqME,3108
|
|
10
10
|
qulab/executor/__init__.py,sha256=LosPzOMaljSZY1thy_Fxtbrgq7uubJszMABEB7oM7tU,101
|
|
11
11
|
qulab/executor/cli.py,sha256=ZGnQdUFZmU812gsnqbKhaJzwWLdoPt42CukPbZ7Yv8k,6684
|
|
12
|
-
qulab/executor/load.py,sha256=
|
|
13
|
-
qulab/executor/schedule.py,sha256=
|
|
14
|
-
qulab/executor/storage.py,sha256=
|
|
12
|
+
qulab/executor/load.py,sha256=t48vZQVAViIwGBPnoRbfBafv5jSR4ix3z4pkvyGyX-w,17759
|
|
13
|
+
qulab/executor/schedule.py,sha256=DCZmqtNzrvsA1wOTxEldI22n6UWlHtxEx6QKsHn1S8k,13344
|
|
14
|
+
qulab/executor/storage.py,sha256=u7uAmZU8D-pvd6UNek1zzxbUBIZQpftSiXdag6hlkG8,7533
|
|
15
15
|
qulab/executor/transform.py,sha256=AazWdlkEoOBaUJpTYsT5J4f0RanzCEeo-ThwEg8BB4Y,1262
|
|
16
16
|
qulab/executor/utils.py,sha256=VZ_VPYT2MFIfrCtfEW1I7T4NKFIbOvfVac8Sv377MTY,4221
|
|
17
17
|
qulab/monitor/__init__.py,sha256=nTHelnDpxRS_fl_B38TsN0njgq8eVTEz9IAnN3NbDlM,42
|
|
@@ -93,9 +93,9 @@ qulab/visualization/plot_seq.py,sha256=UWTS6p9nfX_7B8ehcYo6UnSTUCjkBsNU9jiOeW2ca
|
|
|
93
93
|
qulab/visualization/qdat.py,sha256=ZeevBYWkzbww4xZnsjHhw7wRorJCBzbG0iEu-XQB4EA,5735
|
|
94
94
|
qulab/visualization/rot3d.py,sha256=lMrEJlRLwYe6NMBlGkKYpp_V9CTipOAuDy6QW_cQK00,734
|
|
95
95
|
qulab/visualization/widgets.py,sha256=6KkiTyQ8J-ei70LbPQZAK35wjktY47w2IveOa682ftA,3180
|
|
96
|
-
QuLab-2.5.
|
|
97
|
-
QuLab-2.5.
|
|
98
|
-
QuLab-2.5.
|
|
99
|
-
QuLab-2.5.
|
|
100
|
-
QuLab-2.5.
|
|
101
|
-
QuLab-2.5.
|
|
96
|
+
QuLab-2.5.5.dist-info/LICENSE,sha256=PRzIKxZtpQcH7whTG6Egvzl1A0BvnSf30tmR2X2KrpA,1065
|
|
97
|
+
QuLab-2.5.5.dist-info/METADATA,sha256=yaWVrSEOko53q9lByNffnVDYleUm9__SuZM8Rld3NdQ,3698
|
|
98
|
+
QuLab-2.5.5.dist-info/WHEEL,sha256=K10eKSN6_vzvMOgXxWbVOQNR7Orfl6gBTCpCI8bcYx4,114
|
|
99
|
+
QuLab-2.5.5.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
|
|
100
|
+
QuLab-2.5.5.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
|
|
101
|
+
QuLab-2.5.5.dist-info/RECORD,,
|
qulab/executor/load.py
CHANGED
|
@@ -11,6 +11,7 @@ import warnings
|
|
|
11
11
|
from importlib.util import module_from_spec, spec_from_file_location
|
|
12
12
|
from pathlib import Path
|
|
13
13
|
from types import ModuleType
|
|
14
|
+
from typing import Any
|
|
14
15
|
|
|
15
16
|
from loguru import logger
|
|
16
17
|
|
|
@@ -107,7 +108,8 @@ def verify_calibrate_method(module: WorkflowType):
|
|
|
107
108
|
def verify_check_method(module: WorkflowType):
|
|
108
109
|
if not hasattr(module, 'check'):
|
|
109
110
|
warnings.warn(
|
|
110
|
-
f"Workflow {module.__file__} does not have 'check' function"
|
|
111
|
+
f"Workflow {module.__file__} does not have 'check' function, it will be set to 'calibrate' function"
|
|
112
|
+
)
|
|
111
113
|
else:
|
|
112
114
|
if not can_call_without_args(module.check):
|
|
113
115
|
raise AttributeError(
|
|
@@ -120,6 +122,93 @@ def verify_check_method(module: WorkflowType):
|
|
|
120
122
|
)
|
|
121
123
|
|
|
122
124
|
|
|
125
|
+
def verify_dependence_key(workflow: str | tuple[str, dict[str, Any]]
|
|
126
|
+
| tuple[str, str, dict[str, Any]]):
|
|
127
|
+
if isinstance(workflow, str):
|
|
128
|
+
return
|
|
129
|
+
if not isinstance(workflow, tuple) or len(workflow) not in [2, 3]:
|
|
130
|
+
raise ValueError(f"Invalid workflow: {workflow}")
|
|
131
|
+
|
|
132
|
+
if len(workflow) == 2:
|
|
133
|
+
file_name, mapping = workflow
|
|
134
|
+
if not Path(file_name).exists():
|
|
135
|
+
raise FileNotFoundError(f"File not found: {file_name}")
|
|
136
|
+
elif len(workflow) == 3:
|
|
137
|
+
template_path, target_path, mapping = workflow
|
|
138
|
+
if not Path(template_path).exists():
|
|
139
|
+
raise FileNotFoundError(f"File not found: {template_path}")
|
|
140
|
+
if not isinstance(target_path, (Path, str)) or target_path == '':
|
|
141
|
+
raise ValueError(f"Invalid target_path: {target_path}")
|
|
142
|
+
if not isinstance(target_path, (Path, str)):
|
|
143
|
+
raise ValueError(f"Invalid target_path: {target_path}")
|
|
144
|
+
if Path(target_path).suffix != '.py':
|
|
145
|
+
raise ValueError(
|
|
146
|
+
f"Invalid target_path: {target_path}. Only .py file is supported"
|
|
147
|
+
)
|
|
148
|
+
else:
|
|
149
|
+
raise ValueError(f"Invalid workflow: {workflow}")
|
|
150
|
+
|
|
151
|
+
if not isinstance(mapping, dict):
|
|
152
|
+
raise ValueError(f"Invalid mapping: {mapping}")
|
|
153
|
+
|
|
154
|
+
for key, value in mapping.items():
|
|
155
|
+
if not isinstance(key, str):
|
|
156
|
+
raise ValueError(
|
|
157
|
+
f"Invalid key: {key}, should be str type and valid identifier")
|
|
158
|
+
if not key.isidentifier():
|
|
159
|
+
raise ValueError(f"Invalid key: {key}, should be identifier")
|
|
160
|
+
try:
|
|
161
|
+
pickle.dumps(value)
|
|
162
|
+
except Exception as e:
|
|
163
|
+
raise ValueError(
|
|
164
|
+
f"Invalid value: {key}: {value}, should be pickleable") from e
|
|
165
|
+
return
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def verify_depends(module: WorkflowType):
|
|
169
|
+
if not hasattr(module, 'depends'):
|
|
170
|
+
return
|
|
171
|
+
|
|
172
|
+
deps = []
|
|
173
|
+
|
|
174
|
+
if callable(module.depends):
|
|
175
|
+
if not can_call_without_args(module.depends):
|
|
176
|
+
raise AttributeError(
|
|
177
|
+
f"Workflow {module.__file__} 'depends' function should not have any parameters"
|
|
178
|
+
)
|
|
179
|
+
deps = list(module.depends())
|
|
180
|
+
elif isinstance(module.depends, (list, tuple)):
|
|
181
|
+
deps = module.depends
|
|
182
|
+
else:
|
|
183
|
+
raise AttributeError(
|
|
184
|
+
f"Workflow {module.__file__} 'depends' should be a callable or a list"
|
|
185
|
+
)
|
|
186
|
+
for workflow in deps:
|
|
187
|
+
verify_dependence_key(workflow)
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def verify_entries(module: WorkflowType):
|
|
191
|
+
if not hasattr(module, 'entries'):
|
|
192
|
+
return
|
|
193
|
+
|
|
194
|
+
deps = []
|
|
195
|
+
|
|
196
|
+
if callable(module.entries):
|
|
197
|
+
if not can_call_without_args(module.entries):
|
|
198
|
+
raise AttributeError(
|
|
199
|
+
f"Workflow {module.__file__} 'entries' function should not have any parameters"
|
|
200
|
+
)
|
|
201
|
+
deps = list(module.entries())
|
|
202
|
+
elif isinstance(module.entries, (list, tuple)):
|
|
203
|
+
deps = module.entries
|
|
204
|
+
else:
|
|
205
|
+
raise AttributeError(
|
|
206
|
+
f"Workflow {module.__file__} 'entries' should be a callable or a list"
|
|
207
|
+
)
|
|
208
|
+
for workflow in deps:
|
|
209
|
+
verify_dependence_key(workflow)
|
|
210
|
+
|
|
211
|
+
|
|
123
212
|
def is_workflow(module: ModuleType) -> bool:
|
|
124
213
|
try:
|
|
125
214
|
verify_calibrate_method(module)
|
|
@@ -218,14 +307,15 @@ def load_workflow_from_file(file_name: str,
|
|
|
218
307
|
module.__mtime__ = (base_path / path).stat().st_mtime
|
|
219
308
|
|
|
220
309
|
if hasattr(module, 'entries'):
|
|
310
|
+
verify_entries(module)
|
|
221
311
|
return module
|
|
222
312
|
|
|
223
313
|
if not hasattr(module, '__timeout__'):
|
|
224
314
|
module.__timeout__ = None
|
|
225
315
|
|
|
226
316
|
if not hasattr(module, 'depends'):
|
|
227
|
-
module.depends = lambda: [
|
|
228
|
-
|
|
317
|
+
module.depends = lambda: []
|
|
318
|
+
verify_depends(module)
|
|
229
319
|
verify_calibrate_method(module)
|
|
230
320
|
verify_check_method(module)
|
|
231
321
|
|
|
@@ -415,5 +505,7 @@ def make_graph(workflow: WorkflowType, graph: dict, code_path: str | Path):
|
|
|
415
505
|
for w in get_dependents(workflow, code_path):
|
|
416
506
|
graph[workflow.__workflow_id__].append(w.__workflow_id__)
|
|
417
507
|
make_graph(w, graph=graph, code_path=code_path)
|
|
508
|
+
if graph[workflow.__workflow_id__] == []:
|
|
509
|
+
del graph[workflow.__workflow_id__]
|
|
418
510
|
|
|
419
511
|
return graph
|
qulab/executor/schedule.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import functools
|
|
2
|
+
import pickle
|
|
2
3
|
import uuid
|
|
3
4
|
from datetime import datetime, timedelta
|
|
4
5
|
from pathlib import Path
|
|
@@ -15,6 +16,27 @@ class CalibrationFailedError(Exception):
|
|
|
15
16
|
pass
|
|
16
17
|
|
|
17
18
|
|
|
19
|
+
def is_pickleable(obj) -> bool:
|
|
20
|
+
try:
|
|
21
|
+
pickle.dumps(obj)
|
|
22
|
+
return True
|
|
23
|
+
except:
|
|
24
|
+
return False
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def veryfy_analyzed_result(result: Result, script: str, method: str):
|
|
28
|
+
if not isinstance(result, Result):
|
|
29
|
+
raise TypeError(f'"{script}" : "{method}" must return a Result object')
|
|
30
|
+
if not is_pickleable(result.parameters):
|
|
31
|
+
raise TypeError(
|
|
32
|
+
f'"{script}" : "{method}" return not pickleable data in .parameters'
|
|
33
|
+
)
|
|
34
|
+
if not is_pickleable(result.other_infomation):
|
|
35
|
+
raise TypeError(
|
|
36
|
+
f'"{script}" : "{method}" return not pickleable data in .other_infomation'
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
18
40
|
def check_state(workflow: WorkflowType, code_path: str | Path,
|
|
19
41
|
state_path: str | Path) -> bool:
|
|
20
42
|
"""
|
|
@@ -73,17 +95,11 @@ def call_analyzer(node,
|
|
|
73
95
|
plot=False) -> Result:
|
|
74
96
|
if check:
|
|
75
97
|
result = node.check_analyze(result, history=history)
|
|
76
|
-
|
|
77
|
-
raise TypeError(
|
|
78
|
-
f'"{node.__workflow_id__}" : "check_analyze" must return a Result object'
|
|
79
|
-
)
|
|
98
|
+
veryfy_analyzed_result(result, node.__workflow_id__, "check_analyze")
|
|
80
99
|
result.fully_calibrated = False
|
|
81
100
|
else:
|
|
82
101
|
result = node.analyze(result, history=history)
|
|
83
|
-
|
|
84
|
-
raise TypeError(
|
|
85
|
-
f'"{node.__workflow_id__}" : "analyze" must return a Result object'
|
|
86
|
-
)
|
|
102
|
+
veryfy_analyzed_result(result, node.__workflow_id__, "analyze")
|
|
87
103
|
result.fully_calibrated = True
|
|
88
104
|
if plot:
|
|
89
105
|
call_plot(node, result)
|
|
@@ -130,6 +146,10 @@ def check_data(workflow: WorkflowType, code_path: str | Path,
|
|
|
130
146
|
logger.debug(
|
|
131
147
|
f'Checking "{workflow.__workflow_id__}" with "check" method ...')
|
|
132
148
|
data = workflow.check()
|
|
149
|
+
if not is_pickleable(data):
|
|
150
|
+
raise TypeError(
|
|
151
|
+
f'"{workflow.__workflow_id__}" : "check" return not pickleable data'
|
|
152
|
+
)
|
|
133
153
|
result = Result()
|
|
134
154
|
result.data = data
|
|
135
155
|
#save_result(workflow.__workflow_id__, result, state_path)
|
|
@@ -155,6 +175,10 @@ def check_data(workflow: WorkflowType, code_path: str | Path,
|
|
|
155
175
|
f'Checking "{workflow.__workflow_id__}" with "calibrate" method ...'
|
|
156
176
|
)
|
|
157
177
|
data = workflow.calibrate()
|
|
178
|
+
if not is_pickleable(data):
|
|
179
|
+
raise TypeError(
|
|
180
|
+
f'"{workflow.__workflow_id__}" : "calibrate" return not pickleable data'
|
|
181
|
+
)
|
|
158
182
|
result = Result()
|
|
159
183
|
result.data = data
|
|
160
184
|
save_result(workflow.__workflow_id__, result, state_path)
|
|
@@ -180,6 +204,10 @@ def calibrate(workflow: WorkflowType, code_path: str | Path,
|
|
|
180
204
|
|
|
181
205
|
logger.debug(f'Calibrating "{workflow.__workflow_id__}" ...')
|
|
182
206
|
data = workflow.calibrate()
|
|
207
|
+
if not is_pickleable(data):
|
|
208
|
+
raise TypeError(
|
|
209
|
+
f'"{workflow.__workflow_id__}" : "calibrate" return not pickleable data'
|
|
210
|
+
)
|
|
183
211
|
result = Result()
|
|
184
212
|
result.data = data
|
|
185
213
|
save_result(workflow.__workflow_id__, result, state_path)
|
qulab/executor/storage.py
CHANGED
|
@@ -3,6 +3,7 @@ import pickle
|
|
|
3
3
|
import uuid
|
|
4
4
|
from dataclasses import dataclass, field
|
|
5
5
|
from datetime import datetime, timedelta
|
|
6
|
+
from functools import lru_cache
|
|
6
7
|
from pathlib import Path
|
|
7
8
|
from typing import Any, Literal
|
|
8
9
|
|
|
@@ -181,36 +182,42 @@ def get_heads(base_path: str | Path) -> Path | None:
|
|
|
181
182
|
def create_index(name: str,
|
|
182
183
|
base_path: str | Path,
|
|
183
184
|
context: str,
|
|
184
|
-
width: int,
|
|
185
|
+
width: int = -1,
|
|
185
186
|
start: int = 0):
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
187
|
+
|
|
188
|
+
path = Path(base_path) / "index" / name
|
|
189
|
+
if width == -1:
|
|
190
|
+
width = len(context)
|
|
190
191
|
else:
|
|
191
|
-
|
|
192
|
-
path.parent.mkdir(parents=True, exist_ok=True)
|
|
193
|
-
with open(path, "w") as f:
|
|
194
|
-
f.write(str(index + 1))
|
|
192
|
+
width = max(width, len(context))
|
|
195
193
|
|
|
196
|
-
path
|
|
197
|
-
|
|
198
|
-
|
|
194
|
+
if path.with_suffix('.width').exists():
|
|
195
|
+
width = int(path.with_suffix('.width').read_text())
|
|
196
|
+
index = int(path.with_suffix('.seq').read_text())
|
|
197
|
+
else:
|
|
198
|
+
index = start
|
|
199
|
+
if width < len(context):
|
|
200
|
+
raise ValueError(
|
|
201
|
+
f"Context '{context}' is too long, existing width of '{name}' is {width}."
|
|
202
|
+
)
|
|
203
|
+
if not path.with_suffix('.width').exists():
|
|
204
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
205
|
+
path.with_suffix('.width').write_text(str(width))
|
|
199
206
|
|
|
200
|
-
path
|
|
201
|
-
with open(path, "a") as f:
|
|
207
|
+
path.with_suffix('.seq').write_text(str(index + 1))
|
|
202
208
|
|
|
209
|
+
with path.with_suffix('.idx').open("a") as f:
|
|
203
210
|
f.write(f"{context.ljust(width)}\n")
|
|
204
211
|
|
|
205
212
|
return index
|
|
206
213
|
|
|
207
214
|
|
|
215
|
+
@lru_cache(maxsize=4096)
|
|
208
216
|
def query_index(name: str, base_path: str | Path, index: int):
|
|
209
|
-
path = Path(base_path) / "index" /
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
path
|
|
213
|
-
with open(path, "r") as f:
|
|
217
|
+
path = Path(base_path) / "index" / name
|
|
218
|
+
width = int(path.with_suffix('.width').read_text())
|
|
219
|
+
|
|
220
|
+
with path.with_suffix('.idx').open("r") as f:
|
|
214
221
|
f.seek(index * (width + 1))
|
|
215
222
|
context = f.read(width)
|
|
216
223
|
return context.rstrip()
|
|
@@ -219,5 +226,8 @@ def query_index(name: str, base_path: str | Path, index: int):
|
|
|
219
226
|
def get_result_by_index(
|
|
220
227
|
index: int, base_path: str | Path = get_config_value("data", Path)
|
|
221
228
|
) -> Result | None:
|
|
222
|
-
|
|
223
|
-
|
|
229
|
+
try:
|
|
230
|
+
path = query_index("result", base_path, index)
|
|
231
|
+
return load_result(path, base_path)
|
|
232
|
+
except:
|
|
233
|
+
return None
|
qulab/fun.cpython-311-darwin.so
CHANGED
|
Binary file
|
qulab/version.py
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
__version__ = "2.5.
|
|
1
|
+
__version__ = "2.5.5"
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|