QuLab 2.7.13__cp311-cp311-win_amd64.whl → 2.7.15__cp311-cp311-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qulab/executor/cli.py +16 -0
- qulab/executor/load.py +42 -74
- qulab/executor/template.py +186 -0
- qulab/executor/transform.py +2 -0
- qulab/executor/utils.py +12 -0
- qulab/fun.cp311-win_amd64.pyd +0 -0
- qulab/tools/__init__.py +0 -39
- qulab/tools/connection_helper.py +39 -0
- qulab/version.py +1 -1
- {qulab-2.7.13.dist-info → qulab-2.7.15.dist-info}/METADATA +1 -1
- {qulab-2.7.13.dist-info → qulab-2.7.15.dist-info}/RECORD +15 -13
- {qulab-2.7.13.dist-info → qulab-2.7.15.dist-info}/LICENSE +0 -0
- {qulab-2.7.13.dist-info → qulab-2.7.15.dist-info}/WHEEL +0 -0
- {qulab-2.7.13.dist-info → qulab-2.7.15.dist-info}/entry_points.txt +0 -0
- {qulab-2.7.13.dist-info → qulab-2.7.15.dist-info}/top_level.txt +0 -0
qulab/executor/cli.py
CHANGED
@@ -161,6 +161,15 @@ def get(key, api):
|
|
161
161
|
def run(workflow, code, data, api, plot, no_dependents, retry, freeze):
|
162
162
|
"""
|
163
163
|
Run a workflow.
|
164
|
+
|
165
|
+
If the workflow has entries, run all entries.
|
166
|
+
If `--no-dependents` is set, only run the workflow itself.
|
167
|
+
If `--retry` is set, retry the workflow when calibration failed.
|
168
|
+
If `--freeze` is set, freeze the config table.
|
169
|
+
If `--plot` is set, plot the report.
|
170
|
+
If `--api` is set, use the api to get and update the config table.
|
171
|
+
If `--code` is not set, use the current working directory.
|
172
|
+
If `--data` is not set, use the `logs` directory in the code path.
|
164
173
|
"""
|
165
174
|
logger.info(
|
166
175
|
f'[CMD]: run {workflow} --code {code} --data {data} --api {api}'
|
@@ -227,6 +236,13 @@ def run(workflow, code, data, api, plot, no_dependents, retry, freeze):
|
|
227
236
|
def maintain(workflow, code, data, api, retry, plot):
|
228
237
|
"""
|
229
238
|
Maintain a workflow.
|
239
|
+
|
240
|
+
If the workflow has entries, run all entries.
|
241
|
+
If `--retry` is set, retry the workflow when calibration failed.
|
242
|
+
If `--plot` is set, plot the report.
|
243
|
+
If `--api` is set, use the api to get and update the config table.
|
244
|
+
If `--code` is not set, use the current working directory.
|
245
|
+
If `--data` is not set, use the `logs` directory in the code path.
|
230
246
|
"""
|
231
247
|
logger.info(
|
232
248
|
f'[CMD]: maintain {workflow} --code {code} --data {data} --api {api}'
|
qulab/executor/load.py
CHANGED
@@ -1,12 +1,6 @@
|
|
1
|
-
import base64
|
2
1
|
import graphlib
|
3
|
-
import hashlib
|
4
2
|
import inspect
|
5
|
-
import lzma
|
6
3
|
import pickle
|
7
|
-
import re
|
8
|
-
import string
|
9
|
-
import textwrap
|
10
4
|
import warnings
|
11
5
|
from importlib.util import module_from_spec, spec_from_file_location
|
12
6
|
from pathlib import Path
|
@@ -16,6 +10,8 @@ from typing import Any
|
|
16
10
|
from loguru import logger
|
17
11
|
|
18
12
|
from .storage import Report
|
13
|
+
from .template import (TemplateKeyError, TemplateTypeError, decode_mapping,
|
14
|
+
inject_mapping)
|
19
15
|
|
20
16
|
|
21
17
|
class SetConfigWorkflow():
|
@@ -330,22 +326,6 @@ def load_workflow_from_file(file_name: str,
|
|
330
326
|
return module
|
331
327
|
|
332
328
|
|
333
|
-
def encode_mapping(mapping):
|
334
|
-
mapping_bytes = lzma.compress(pickle.dumps(mapping))
|
335
|
-
hash_str = hashlib.md5(mapping_bytes).hexdigest()[:8]
|
336
|
-
mappping_code = '\n'.join(
|
337
|
-
textwrap.wrap(base64.b64encode(mapping_bytes).decode(), 100))
|
338
|
-
return hash_str, mappping_code
|
339
|
-
|
340
|
-
|
341
|
-
def decode_mapping(hash_str, mappping_code):
|
342
|
-
mapping_bytes = base64.b64decode(mappping_code.replace('\n', ''))
|
343
|
-
if hash_str != hashlib.md5(mapping_bytes).hexdigest()[:8]:
|
344
|
-
raise ValueError("Hash does not match")
|
345
|
-
mapping = pickle.loads(lzma.decompress(mapping_bytes))
|
346
|
-
return mapping
|
347
|
-
|
348
|
-
|
349
329
|
def load_workflow_from_template(template_path: str,
|
350
330
|
mapping: dict[str, str],
|
351
331
|
base_path: str | Path,
|
@@ -359,36 +339,9 @@ def load_workflow_from_template(template_path: str,
|
|
359
339
|
content = f.read()
|
360
340
|
|
361
341
|
mtime = max((base_path / template_path).stat().st_mtime, mtime)
|
362
|
-
|
363
|
-
|
364
|
-
|
365
|
-
"""
|
366
|
-
将给定文本中的所有 VAR("var") 替换为 __VAR_{hash_str}["var"]。
|
367
|
-
|
368
|
-
Args:
|
369
|
-
text (str): 包含 VAR 调用的字符串。
|
370
|
-
|
371
|
-
Returns:
|
372
|
-
str: 已经替换的新字符串。
|
373
|
-
"""
|
374
|
-
pattern = re.compile(r'VAR\s*\(\s*(["\'])(\w+)\1\s*\)')
|
375
|
-
replacement = f'__VAR_{hash_str}' + r'[\1\2\1]'
|
376
|
-
new_text = re.sub(pattern, replacement, text)
|
377
|
-
return new_text
|
378
|
-
|
379
|
-
template = string.Template(replace(content))
|
380
|
-
keys = template.get_identifiers()
|
381
|
-
missing = set(keys) - set(mapping.keys())
|
382
|
-
if missing:
|
383
|
-
raise KeyError(f"{template_path}: Missing keys in mapping: {missing}")
|
384
|
-
content = template.substitute(mapping)
|
385
|
-
|
386
|
-
inject_code = [
|
387
|
-
"from qulab.executor.load import decode_mapping",
|
388
|
-
f"__VAR_{hash_str} = decode_mapping(\"{hash_str}\",",
|
389
|
-
f"\"\"\"{mapping_code}\"\"\")"
|
390
|
-
]
|
391
|
-
content = '\n'.join(inject_code + [content])
|
342
|
+
|
343
|
+
content, hash_str = inject_mapping(content, mapping, str(path))
|
344
|
+
|
392
345
|
if target_path is None:
|
393
346
|
if path.stem == 'template':
|
394
347
|
path = path.parent / f'tmp{hash_str}.py'
|
@@ -424,16 +377,27 @@ def load_workflow_from_template(template_path: str,
|
|
424
377
|
def load_workflow(workflow: str | tuple[str, dict],
|
425
378
|
base_path: str | Path,
|
426
379
|
package='workflows',
|
427
|
-
mtime: float = 0
|
380
|
+
mtime: float = 0,
|
381
|
+
inject: dict | None = None) -> WorkflowType:
|
428
382
|
if isinstance(workflow, tuple):
|
429
383
|
if len(workflow) == 2:
|
430
384
|
file_name, mapping = workflow
|
431
|
-
|
432
|
-
|
385
|
+
if inject is None:
|
386
|
+
w = load_workflow_from_template(file_name, mapping, base_path,
|
387
|
+
None, package, mtime)
|
388
|
+
else:
|
389
|
+
w = load_workflow_from_template(file_name, inject, base_path,
|
390
|
+
None, package, mtime)
|
433
391
|
elif len(workflow) == 3:
|
434
392
|
template_path, target_path, mapping = workflow
|
435
|
-
|
436
|
-
|
393
|
+
if inject is None:
|
394
|
+
w = load_workflow_from_template(template_path, mapping,
|
395
|
+
base_path, target_path,
|
396
|
+
package, mtime)
|
397
|
+
else:
|
398
|
+
w = load_workflow_from_template(template_path, inject,
|
399
|
+
base_path, target_path,
|
400
|
+
package, mtime)
|
437
401
|
else:
|
438
402
|
raise ValueError(f"Invalid workflow: {workflow}")
|
439
403
|
w.__workflow_id__ = str(Path(w.__file__).relative_to(base_path))
|
@@ -451,6 +415,22 @@ def load_workflow(workflow: str | tuple[str, dict],
|
|
451
415
|
return w
|
452
416
|
|
453
417
|
|
418
|
+
def _load_workflow_list(workflow, lst, code_path):
|
419
|
+
ret = []
|
420
|
+
for i, n in enumerate(lst):
|
421
|
+
try:
|
422
|
+
ret.append(load_workflow(n, code_path, mtime=workflow.__mtime__))
|
423
|
+
except TemplateKeyError:
|
424
|
+
raise TemplateKeyError(
|
425
|
+
f"Workflow {workflow.__workflow_id__} missing key in {i}th {n[0]} dependent mapping."
|
426
|
+
)
|
427
|
+
except TemplateTypeError:
|
428
|
+
raise TemplateTypeError(
|
429
|
+
f"Workflow {workflow.__workflow_id__} type error in {i}th {n[0]} dependent mapping."
|
430
|
+
)
|
431
|
+
return ret
|
432
|
+
|
433
|
+
|
454
434
|
def get_dependents(workflow: WorkflowType,
|
455
435
|
code_path: str | Path) -> list[WorkflowType]:
|
456
436
|
if callable(getattr(workflow, 'depends', None)):
|
@@ -458,16 +438,10 @@ def get_dependents(workflow: WorkflowType,
|
|
458
438
|
raise AttributeError(
|
459
439
|
f'Workflow {workflow.__workflow_id__} "depends" function should not have any parameters'
|
460
440
|
)
|
461
|
-
return
|
462
|
-
load_workflow(n, code_path, mtime=workflow.__mtime__)
|
463
|
-
for n in workflow.depends()
|
464
|
-
]
|
441
|
+
return _load_workflow_list(workflow, workflow.depends(), code_path)
|
465
442
|
elif isinstance(getattr(workflow, 'depends', None), (list, tuple)):
|
466
|
-
return
|
467
|
-
|
468
|
-
for n in workflow.depends
|
469
|
-
]
|
470
|
-
elif getattr(workflow, 'entries', None) is None:
|
443
|
+
return _load_workflow_list(workflow, workflow.depends, code_path)
|
444
|
+
elif getattr(workflow, 'depends', None) is None:
|
471
445
|
return []
|
472
446
|
else:
|
473
447
|
raise AttributeError(
|
@@ -482,15 +456,9 @@ def get_entries(workflow: WorkflowType,
|
|
482
456
|
raise AttributeError(
|
483
457
|
f'Workflow {workflow.__workflow_id__} "entries" function should not have any parameters'
|
484
458
|
)
|
485
|
-
return
|
486
|
-
load_workflow(n, code_path, mtime=workflow.__mtime__)
|
487
|
-
for n in workflow.entries()
|
488
|
-
]
|
459
|
+
return _load_workflow_list(workflow, workflow.entries(), code_path)
|
489
460
|
elif isinstance(getattr(workflow, 'entries', None), (list, tuple)):
|
490
|
-
return
|
491
|
-
load_workflow(n, code_path, mtime=workflow.__mtime__)
|
492
|
-
for n in workflow.entries
|
493
|
-
]
|
461
|
+
return _load_workflow_list(workflow, workflow.entries, code_path)
|
494
462
|
elif getattr(workflow, 'entries', None) is None:
|
495
463
|
return []
|
496
464
|
else:
|
@@ -0,0 +1,186 @@
|
|
1
|
+
import ast
|
2
|
+
import base64
|
3
|
+
import hashlib
|
4
|
+
import lzma
|
5
|
+
import pickle
|
6
|
+
import re
|
7
|
+
import string
|
8
|
+
import textwrap
|
9
|
+
from typing import Any
|
10
|
+
|
11
|
+
|
12
|
+
def encode_mapping(mapping):
|
13
|
+
mapping_bytes = lzma.compress(pickle.dumps(mapping))
|
14
|
+
hash_str = hashlib.md5(mapping_bytes).hexdigest()[:8]
|
15
|
+
mappping_code = '\n'.join(
|
16
|
+
textwrap.wrap(base64.b64encode(mapping_bytes).decode(),
|
17
|
+
90,
|
18
|
+
initial_indent=' ',
|
19
|
+
subsequent_indent=' '))
|
20
|
+
return hash_str, mappping_code
|
21
|
+
|
22
|
+
|
23
|
+
def decode_mapping(hash_str, mappping_code):
|
24
|
+
mapping_bytes = base64.b64decode(mappping_code)
|
25
|
+
if hash_str != hashlib.md5(mapping_bytes).hexdigest()[:8]:
|
26
|
+
raise ValueError("Hash does not match")
|
27
|
+
mapping = pickle.loads(lzma.decompress(mapping_bytes))
|
28
|
+
return mapping
|
29
|
+
|
30
|
+
|
31
|
+
class TemplateTypeError(TypeError):
|
32
|
+
pass
|
33
|
+
|
34
|
+
|
35
|
+
class TemplateKeyError(KeyError):
|
36
|
+
pass
|
37
|
+
|
38
|
+
|
39
|
+
class TemplateVarExtractor(ast.NodeVisitor):
|
40
|
+
|
41
|
+
def __init__(self, fname, mapping):
|
42
|
+
self.var_func_def = (0, 0)
|
43
|
+
self.variables = set()
|
44
|
+
self.str_variables = set()
|
45
|
+
self.replacements = {}
|
46
|
+
self.fname = fname
|
47
|
+
self.mapping = mapping
|
48
|
+
|
49
|
+
def visit_Constant(self, node):
|
50
|
+
if isinstance(node.value, str):
|
51
|
+
self._process_string(node.value, node.lineno, node.col_offset,
|
52
|
+
node.end_lineno, node.end_col_offset)
|
53
|
+
|
54
|
+
def visit_JoinedStr(self, node):
|
55
|
+
for value in node.values:
|
56
|
+
if isinstance(value, ast.Constant) and isinstance(
|
57
|
+
value.value, str):
|
58
|
+
self._process_string(value.value, value.lineno,
|
59
|
+
value.col_offset, value.end_lineno,
|
60
|
+
value.end_col_offset)
|
61
|
+
self.generic_visit(node)
|
62
|
+
|
63
|
+
def visit_FunctionDef(self, node):
|
64
|
+
if node.name == 'VAR':
|
65
|
+
self.var_func_def = (node.lineno, node.end_lineno)
|
66
|
+
self.generic_visit(node)
|
67
|
+
|
68
|
+
def visit_Call(self, node):
|
69
|
+
if isinstance(node.func, ast.Name) and node.func.id == 'VAR':
|
70
|
+
arg = node.args[0]
|
71
|
+
if isinstance(arg, ast.Constant) and isinstance(arg.value, str):
|
72
|
+
if arg.value not in self.mapping:
|
73
|
+
raise TemplateKeyError(
|
74
|
+
f"The variable '{arg.value}' is not provided in mapping. {self.fname}:{node.lineno}"
|
75
|
+
)
|
76
|
+
self.variables.add(arg.value)
|
77
|
+
# new_node = ast.Subscript(value=ast.Name(id="__VAR",
|
78
|
+
# ctx=ast.Load()),
|
79
|
+
# slice=ast.Constant(value=arg.value),
|
80
|
+
# ctx=ast.Load())
|
81
|
+
# ast.fix_missing_locations(new_node)
|
82
|
+
# new_source = ast.unparse(new_node)
|
83
|
+
self.replacements[(node.lineno, node.end_lineno,
|
84
|
+
node.col_offset,
|
85
|
+
node.end_col_offset)] = ('VAR', arg.value,
|
86
|
+
None, None)
|
87
|
+
else:
|
88
|
+
raise SyntaxError(
|
89
|
+
f"Argument of VAR function must be a string. {self.fname}:{node.lineno}"
|
90
|
+
)
|
91
|
+
self.generic_visit(node)
|
92
|
+
|
93
|
+
def _process_string(self, s: str, lineno: int, col_offset: int,
|
94
|
+
end_lineno: int, end_col_offset: int):
|
95
|
+
"""解析字符串内容,提取模板变量"""
|
96
|
+
lines = s.split('\n')
|
97
|
+
for offset, line in enumerate(lines):
|
98
|
+
current_lineno = lineno + offset
|
99
|
+
template = string.Template(line)
|
100
|
+
for var_name in template.get_identifiers():
|
101
|
+
if var_name not in self.mapping:
|
102
|
+
raise TemplateKeyError(
|
103
|
+
f"The variable '{var_name}' is not provided in mapping. {self.fname}:{current_lineno}"
|
104
|
+
)
|
105
|
+
if not isinstance(self.mapping[var_name], str):
|
106
|
+
raise TemplateTypeError(
|
107
|
+
f"Mapping value for '{var_name}' must be a string. {self.fname}:{current_lineno}"
|
108
|
+
)
|
109
|
+
self.str_variables.add(var_name)
|
110
|
+
start, stop = 0, len(line)
|
111
|
+
if current_lineno == lineno:
|
112
|
+
start = col_offset
|
113
|
+
if current_lineno == end_lineno:
|
114
|
+
stop = end_col_offset
|
115
|
+
self.replacements[(current_lineno, current_lineno, start,
|
116
|
+
stop)] = ('STR', var_name, None, None)
|
117
|
+
|
118
|
+
|
119
|
+
def inject_mapping(source: str, mapping: dict[str, Any],
|
120
|
+
fname: str) -> list[tuple[str, int]]:
|
121
|
+
hash_str, mapping_code = encode_mapping(mapping)
|
122
|
+
|
123
|
+
tree = ast.parse(source)
|
124
|
+
|
125
|
+
lines = source.splitlines()
|
126
|
+
lines_offset = [0 for _ in range(len(lines))]
|
127
|
+
|
128
|
+
extractor = TemplateVarExtractor(fname, mapping)
|
129
|
+
extractor.visit(tree)
|
130
|
+
|
131
|
+
# remove VAR function definition
|
132
|
+
if extractor.var_func_def != (0, 0):
|
133
|
+
for i in range(extractor.var_func_def[0] - 1,
|
134
|
+
extractor.var_func_def[1]):
|
135
|
+
lines[i] = ''
|
136
|
+
|
137
|
+
for (lineno, end_lineno, col_offset,
|
138
|
+
end_col_offset), (kind, name, old_source,
|
139
|
+
new_source) in extractor.replacements.items():
|
140
|
+
head = lines[lineno - 1][:col_offset + lines_offset[lineno - 1]]
|
141
|
+
tail = lines[end_lineno - 1][end_col_offset +
|
142
|
+
lines_offset[end_lineno - 1]:]
|
143
|
+
length_of_last_line = len(lines[end_lineno - 1])
|
144
|
+
content = lines[lineno - 1:end_lineno]
|
145
|
+
content[0] = content[0].removeprefix(head)
|
146
|
+
content[-1] = content[-1].removesuffix(tail)
|
147
|
+
content = '\n'.join(content)
|
148
|
+
|
149
|
+
if kind == 'STR':
|
150
|
+
template = string.Template(content)
|
151
|
+
formated_lines = template.substitute(mapping).splitlines()
|
152
|
+
formated_lines[0] = head + formated_lines[0]
|
153
|
+
formated_lines[-1] = formated_lines[-1] + tail
|
154
|
+
if len(formated_lines) == 1:
|
155
|
+
lines[lineno - 1] = formated_lines[0]
|
156
|
+
lines_offset[lineno -
|
157
|
+
1] += len(lines[lineno - 1]) - length_of_last_line
|
158
|
+
else:
|
159
|
+
lines[lineno - 1:end_lineno] = formated_lines
|
160
|
+
lines_offset[end_lineno - 1] += len(
|
161
|
+
lines[end_lineno - 1]) - length_of_last_line
|
162
|
+
else:
|
163
|
+
pattern = re.compile(r'VAR\s*\(\s*(["\'])(\w+)\1\s*\)')
|
164
|
+
replacement = f'__VAR_{hash_str}' + r'[\1\2\1]'
|
165
|
+
new_content = re.sub(pattern, replacement, content)
|
166
|
+
|
167
|
+
if lineno == end_lineno:
|
168
|
+
lines[lineno - 1] = head + new_content + tail
|
169
|
+
lines_offset[lineno -
|
170
|
+
1] += len(lines[lineno - 1]) - length_of_last_line
|
171
|
+
else:
|
172
|
+
lines[lineno - 1] = head + new_content[:-1]
|
173
|
+
for i in range(lineno, end_lineno - 1):
|
174
|
+
lines[i] = ''
|
175
|
+
lines[end_lineno - 1] = ']' + tail
|
176
|
+
lines_offset[end_lineno - 1] += len(
|
177
|
+
lines[end_lineno - 1]) - length_of_last_line
|
178
|
+
|
179
|
+
injected_code = '\n'.join([
|
180
|
+
f"__QULAB_TEMPLATE__ = \"{fname}\"",
|
181
|
+
f"from qulab.executor.template import decode_mapping as __decode_{hash_str}",
|
182
|
+
f"__VAR_{hash_str} = __decode_{hash_str}(\"{hash_str}\", \"\"\"",
|
183
|
+
mapping_code, " \"\"\")", *lines
|
184
|
+
])
|
185
|
+
|
186
|
+
return injected_code, hash_str
|
qulab/executor/transform.py
CHANGED
@@ -79,6 +79,8 @@ def set_config_api(query_method, update_method, export_method):
|
|
79
79
|
the method should take a key and return the value.
|
80
80
|
update_method: The update method.
|
81
81
|
the method should take a dict of updates.
|
82
|
+
export_method: The export method.
|
83
|
+
the method should return a dict of the config.
|
82
84
|
"""
|
83
85
|
global query_config, update_config, export_config
|
84
86
|
|
qulab/executor/utils.py
CHANGED
@@ -169,6 +169,18 @@ def debug_analyze(
|
|
169
169
|
wf = load_workflow(workflow, code_path)
|
170
170
|
if wf is None:
|
171
171
|
raise ValueError(f'Invalid workflow: {workflow}')
|
172
|
+
if hasattr(wf, '__QULAB_TEMPLATE__'):
|
173
|
+
template_mtime = (Path(code_path) / wf.__QULAB_TEMPLATE__).stat().st_mtime
|
174
|
+
if template_mtime > wf.__mtime__:
|
175
|
+
for k in dir(wf):
|
176
|
+
if k.startswith('__VAR_') and len(k) == len('__VAR_17fb4dde'):
|
177
|
+
var_dict = getattr(wf, k)
|
178
|
+
break
|
179
|
+
else:
|
180
|
+
var_dict = {}
|
181
|
+
wf = load_workflow((wf.__QULAB_TEMPLATE__, workflow, var_dict),
|
182
|
+
code_path)
|
183
|
+
|
172
184
|
report = wf.analyze(report, report.previous)
|
173
185
|
if hasattr(wf, 'plot'):
|
174
186
|
wf.plot(report)
|
qulab/fun.cp311-win_amd64.pyd
CHANGED
Binary file
|
qulab/tools/__init__.py
CHANGED
@@ -1,39 +0,0 @@
|
|
1
|
-
class _Missing():
|
2
|
-
|
3
|
-
def __repr__(self):
|
4
|
-
return "Missing"
|
5
|
-
|
6
|
-
|
7
|
-
Missing = _Missing()
|
8
|
-
|
9
|
-
|
10
|
-
def connect_trace(*mappings):
|
11
|
-
if not mappings:
|
12
|
-
return {}
|
13
|
-
result = {}
|
14
|
-
first_mapping = mappings[0]
|
15
|
-
for key in first_mapping:
|
16
|
-
current_value = key
|
17
|
-
trajectory = []
|
18
|
-
for mapping in mappings:
|
19
|
-
if current_value is Missing:
|
20
|
-
trajectory.append(Missing)
|
21
|
-
continue
|
22
|
-
if current_value in mapping:
|
23
|
-
next_value = mapping[current_value]
|
24
|
-
trajectory.append(next_value)
|
25
|
-
current_value = next_value
|
26
|
-
else:
|
27
|
-
trajectory.append(Missing)
|
28
|
-
current_value = Missing
|
29
|
-
result[key] = trajectory
|
30
|
-
return result
|
31
|
-
|
32
|
-
|
33
|
-
def connect(*mappings):
|
34
|
-
if not mappings:
|
35
|
-
return {}
|
36
|
-
return {
|
37
|
-
k: v[-1]
|
38
|
-
for k, v in connect_trace(*mappings).items() if v[-1] is not Missing
|
39
|
-
}
|
@@ -0,0 +1,39 @@
|
|
1
|
+
class _Missing():
|
2
|
+
|
3
|
+
def __repr__(self):
|
4
|
+
return "Missing"
|
5
|
+
|
6
|
+
|
7
|
+
Missing = _Missing()
|
8
|
+
|
9
|
+
|
10
|
+
def connect_trace(*mappings):
|
11
|
+
if not mappings:
|
12
|
+
return {}
|
13
|
+
result = {}
|
14
|
+
first_mapping = mappings[0]
|
15
|
+
for key in first_mapping:
|
16
|
+
current_value = key
|
17
|
+
trajectory = []
|
18
|
+
for mapping in mappings:
|
19
|
+
if current_value is Missing:
|
20
|
+
trajectory.append(Missing)
|
21
|
+
continue
|
22
|
+
if current_value in mapping:
|
23
|
+
next_value = mapping[current_value]
|
24
|
+
trajectory.append(next_value)
|
25
|
+
current_value = next_value
|
26
|
+
else:
|
27
|
+
trajectory.append(Missing)
|
28
|
+
current_value = Missing
|
29
|
+
result[key] = trajectory
|
30
|
+
return result
|
31
|
+
|
32
|
+
|
33
|
+
def connect(*mappings):
|
34
|
+
if not mappings:
|
35
|
+
return {}
|
36
|
+
return {
|
37
|
+
k: v[-1]
|
38
|
+
for k, v in connect_trace(*mappings).items() if v[-1] is not Missing
|
39
|
+
}
|
qulab/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "2.7.
|
1
|
+
__version__ = "2.7.15"
|
@@ -1,20 +1,21 @@
|
|
1
1
|
qulab/__init__.py,sha256=RZme5maBSMZpP6ckXymqZpo2sRYttwEpTYCIzIvys1c,292
|
2
2
|
qulab/__main__.py,sha256=FL4YsGZL1jEtmcPc5WbleArzhOHLMsWl7OH3O-1d1ss,72
|
3
3
|
qulab/dicttree.py,sha256=ZoSJVWK4VMqfzj42gPb_n5RqLlM6K1Me0WmLIfLEYf8,14195
|
4
|
-
qulab/fun.cp311-win_amd64.pyd,sha256=
|
4
|
+
qulab/fun.cp311-win_amd64.pyd,sha256=NUeXOv9kFzCXtKkc2trgV2vWKS68E9IrAM-FjAuOM38,31744
|
5
5
|
qulab/typing.py,sha256=PRtwbCHWY2ROKK8GHq4Bo8llXrIGo6xC73DrQf7S9os,71
|
6
6
|
qulab/utils.py,sha256=UyZNPIyvis5t2MJBkXXLO5EmYP3mQZbt87zmYAHgoyk,1291
|
7
|
-
qulab/version.py,sha256
|
7
|
+
qulab/version.py,sha256=-hKvJfKSrJwPY8gXyRBQzkjU0b1wKGMX2qCqb_6ZsJU,22
|
8
8
|
qulab/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
9
9
|
qulab/cli/commands.py,sha256=6xd2eYw32k1NmfAuYSu__1kaP12Oz1QVqwbkYXdWno4,588
|
10
10
|
qulab/cli/config.py,sha256=7h3k0K8FYHhI6LVWt8BoDdKrX2ApFDBAUAUuXhHwst4,3799
|
11
11
|
qulab/executor/__init__.py,sha256=LosPzOMaljSZY1thy_Fxtbrgq7uubJszMABEB7oM7tU,101
|
12
|
-
qulab/executor/cli.py,sha256=
|
13
|
-
qulab/executor/load.py,sha256=
|
12
|
+
qulab/executor/cli.py,sha256=z8W1RivKdABQSOGy2viNUvG73QvOBpE9gSKjw45vSVA,9794
|
13
|
+
qulab/executor/load.py,sha256=_Zn4wtGGD8pBZwOfbJaaFVAr8riY2Bkhi67lX9Jy-hA,17427
|
14
14
|
qulab/executor/schedule.py,sha256=9pTOVWzKiDc7ip8iuB_47poJcYOvoBI9eQwTrDTA3p0,19044
|
15
15
|
qulab/executor/storage.py,sha256=gI6g28BmKKEZ_Pl-hFwvpiOj3mF8Su-yjj3hfMXs1VY,11630
|
16
|
-
qulab/executor/
|
17
|
-
qulab/executor/
|
16
|
+
qulab/executor/template.py,sha256=bKMoOBPfa3XMgTfGHQK6pDTswH1vcIjnopaWE3UKpP0,7726
|
17
|
+
qulab/executor/transform.py,sha256=BDx0c4nqTHMAOLVqju0Ydd91uxNm6EpVIfssjZse0bI,2284
|
18
|
+
qulab/executor/utils.py,sha256=l_b0y2kMwYKyyXeFtoblPYwKNU-wiFQ9PMo9QlWl9wE,6213
|
18
19
|
qulab/monitor/__init__.py,sha256=xEVDkJF8issrsDeLqQmDsvtRmrf-UiViFcGTWuzdlFU,43
|
19
20
|
qulab/monitor/__main__.py,sha256=k2H1H5Zf9LLXTDLISJkbikLH-z0f1e5i5i6wXXYPOrE,105
|
20
21
|
qulab/monitor/config.py,sha256=y_5StMkdrbZO1ziyKBrvIkB7Jclp9RCPK1QbsOhCxnY,785
|
@@ -85,7 +86,8 @@ qulab/sys/rpc/socket.py,sha256=W3bPwe8um1IeR_3HLx-ad6iCcbeuUQcSg11Ze4w6DJg,742
|
|
85
86
|
qulab/sys/rpc/utils.py,sha256=BurIcqh8CS-Hsk1dYP6IiefK4qHivaEqD9_rBY083SA,619
|
86
87
|
qulab/sys/rpc/worker.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
87
88
|
qulab/sys/rpc/zmq_socket.py,sha256=aoIm-C-IdJjm9_PQXckvbqTxc9kCeJrT4PyYytoDIHo,8492
|
88
|
-
qulab/tools/__init__.py,sha256
|
89
|
+
qulab/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
90
|
+
qulab/tools/connection_helper.py,sha256=-qZJcLsfueyHMihcbMp3HU3VRwz8zB0wnlosSjKp6R0,985
|
89
91
|
qulab/visualization/__init__.py,sha256=Bkt9AK5c45d6HFLlT-f8cIppywXziHtJqhDtVxOoKKo,6317
|
90
92
|
qulab/visualization/__main__.py,sha256=WduINFl21B-XMsi2rg2cVhIyU11hKKX3zOsjc56QLiQ,1710
|
91
93
|
qulab/visualization/_autoplot.py,sha256=gK3m5STiUigcQdJ3NzqD5jEITkPAsTsWMnmw6nUJfvE,14629
|
@@ -95,9 +97,9 @@ qulab/visualization/plot_seq.py,sha256=Uo1-dB1YE9IN_A9tuaOs9ZG3S5dKDQ_l98iD2Wbxp
|
|
95
97
|
qulab/visualization/qdat.py,sha256=HubXFu4nfcA7iUzghJGle1C86G6221hicLR0b-GqhKQ,5887
|
96
98
|
qulab/visualization/rot3d.py,sha256=jGHJcqj1lEWBUV-W4GUGONGacqjrYvuFoFCwPse5h1Y,757
|
97
99
|
qulab/visualization/widgets.py,sha256=HcYwdhDtLreJiYaZuN3LfofjJmZcLwjMfP5aasebgDo,3266
|
98
|
-
qulab-2.7.
|
99
|
-
qulab-2.7.
|
100
|
-
qulab-2.7.
|
101
|
-
qulab-2.7.
|
102
|
-
qulab-2.7.
|
103
|
-
qulab-2.7.
|
100
|
+
qulab-2.7.15.dist-info/LICENSE,sha256=b4NRQ-GFVpJMT7RuExW3NwhfbrYsX7AcdB7Gudok-fs,1086
|
101
|
+
qulab-2.7.15.dist-info/METADATA,sha256=TX9LtH1PNfK8hz2Efdf2smnPtC0l2U4YHZxB_QCs3xA,3804
|
102
|
+
qulab-2.7.15.dist-info/WHEEL,sha256=lPxm9M09dVYWZ0wAh0Zu0ADFguuSXLRXmaW8X9Lg2rA,101
|
103
|
+
qulab-2.7.15.dist-info/entry_points.txt,sha256=b0v1GXOwmxY-nCCsPN_rHZZvY9CtTbWqrGj8u1m8yHo,45
|
104
|
+
qulab-2.7.15.dist-info/top_level.txt,sha256=3T886LbAsbvjonu_TDdmgxKYUn939BVTRPxPl9r4cEg,6
|
105
|
+
qulab-2.7.15.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|