opencos-eda 0.2.32__tar.gz → 0.2.33__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {opencos_eda-0.2.32/opencos_eda.egg-info → opencos_eda-0.2.33}/PKG-INFO +1 -1
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/multi.py +26 -37
- opencos_eda-0.2.33/opencos/commands/sweep.py +230 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/deps_schema.py +40 -11
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_base.py +57 -8
- {opencos_eda-0.2.32 → opencos_eda-0.2.33/opencos_eda.egg-info}/PKG-INFO +1 -1
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/pyproject.toml +1 -1
- opencos_eda-0.2.32/opencos/commands/sweep.py +0 -170
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/LICENSE +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/LICENSE.spdx +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/README.md +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/__init__.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/_version.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/_waves_pkg.sv +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/__init__.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/build.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/elab.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/export.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/flist.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/open.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/proj.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/sim.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/synth.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/upload.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/commands/waves.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/deps_helpers.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_config.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_config_defaults.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_config_max_verilator_waivers.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_config_reduced.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_deps_bash_completion.bash +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_extract_deps_keys.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/eda_tool_helper.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/export_helper.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/export_json_convert.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/files.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/names.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/oc_cli.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/pcie.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/peakrdl_cleanup.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/seed.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/__init__.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/custom_config.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/deps_files/command_order/DEPS.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/deps_files/error_msgs/DEPS.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/deps_files/iverilog_test/DEPS.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/deps_files/no_deps_here/DEPS.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/deps_files/non_sv_reqs/DEPS.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/deps_files/tags_with_tools/DEPS.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/deps_files/test_err_fatal/DEPS.yml +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/helpers.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_build.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_deps_helpers.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_deps_schema.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_eda.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_eda_elab.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_eda_synth.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_oc_cli.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tests/test_tools.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/__init__.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/invio.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/invio_helpers.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/invio_yosys.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/iverilog.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/modelsim_ase.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/questa.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/slang.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/slang_yosys.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/surelog.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/tabbycad_yosys.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/verilator.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/vivado.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/tools/yosys.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos/util.py +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos_eda.egg-info/SOURCES.txt +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos_eda.egg-info/dependency_links.txt +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos_eda.egg-info/entry_points.txt +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos_eda.egg-info/requires.txt +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/opencos_eda.egg-info/top_level.txt +0 -0
- {opencos_eda-0.2.32 → opencos_eda-0.2.33}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: opencos-eda
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.33
|
|
4
4
|
Summary: A simple Python package for wrapping RTL simuliatons and synthesis
|
|
5
5
|
Author-email: Simon Sabato <simon@cognichip.ai>, Drew Ranck <drew@cognichip.ai>
|
|
6
6
|
Project-URL: Homepage, https://github.com/cognichip/opencos
|
|
@@ -268,26 +268,23 @@ class CommandMulti(CommandParallel):
|
|
|
268
268
|
if parsed.parallel < 1 or parsed.parallel > 256:
|
|
269
269
|
self.error("Arg 'parallel' must be between 1 and 256")
|
|
270
270
|
|
|
271
|
-
|
|
272
|
-
if value in self.config['command_handler'].keys():
|
|
273
|
-
command = value
|
|
274
|
-
unparsed.remove(value)
|
|
275
|
-
break
|
|
271
|
+
command = self.get_command_from_unparsed_args(tokens=unparsed)
|
|
276
272
|
|
|
277
273
|
# Need to know the tool for this command, either it was set correctly via --tool and/or
|
|
278
274
|
# the command (class) will tell us.
|
|
279
275
|
all_multi_tools = self.multi_which_tools(command)
|
|
280
276
|
|
|
277
|
+
single_cmd_unparsed = self.get_unparsed_args_on_single_command(
|
|
278
|
+
command=command, tokens=unparsed
|
|
279
|
+
)
|
|
280
|
+
|
|
281
281
|
util.debug(f"Multi: {unparsed=}, looking for target_globs")
|
|
282
282
|
for token in unparsed:
|
|
283
|
-
if token
|
|
284
|
-
# save all --arg, -arg, or +plusarg for the job target:
|
|
285
|
-
arg_tokens.append(token)
|
|
286
|
-
else:
|
|
283
|
+
if token in single_cmd_unparsed:
|
|
287
284
|
target_globs.append(token)
|
|
285
|
+
else:
|
|
286
|
+
arg_tokens.append(token)
|
|
288
287
|
|
|
289
|
-
if command == "":
|
|
290
|
-
self.error("Didn't get a command after 'multi'!")
|
|
291
288
|
|
|
292
289
|
# now we need to expand the target list
|
|
293
290
|
self.single_command = command
|
|
@@ -379,27 +376,23 @@ class CommandMulti(CommandParallel):
|
|
|
379
376
|
# Do not use for CommandMulti, b/c we support list of tools.
|
|
380
377
|
raise NotImplementedError
|
|
381
378
|
|
|
382
|
-
def multi_which_tools(self, command):
|
|
379
|
+
def multi_which_tools(self, command) -> list:
|
|
383
380
|
'''returns a list, or None, of the tool that was already determined to run the command
|
|
384
381
|
|
|
385
382
|
CommandToolsMulti will override and return its own list'''
|
|
386
383
|
return [eda_base.which_tool(command, config=self.config)]
|
|
387
384
|
|
|
388
|
-
def _append_job_command_args(
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
if
|
|
398
|
-
|
|
399
|
-
if cfg_yml_fname:
|
|
400
|
-
command_list.append(f'--config-yml={cfg_yml_fname}')
|
|
401
|
-
if '--eda-safe' in self.config['eda_original_args']:
|
|
402
|
-
command_list.append('--eda-safe')
|
|
385
|
+
def _append_job_command_args( # pylint: disable=R0913,R0917 # too-many-arguments
|
|
386
|
+
self, command_list: list, tool: str, all_multi_tools: list, short_target: str,
|
|
387
|
+
command: str
|
|
388
|
+
) -> None:
|
|
389
|
+
|
|
390
|
+
super().update_args_list(args=command_list, tool=tool)
|
|
391
|
+
if self.args.get('export-jsonl', False):
|
|
392
|
+
# Special case for 'multi' --export-jsonl, run reach child with --export-json
|
|
393
|
+
command_list.append('--export-json')
|
|
394
|
+
if tool and len(all_multi_tools) > 1:
|
|
395
|
+
command_list.append(f'--sub-work-dir={short_target}.{command}.{tool}')
|
|
403
396
|
|
|
404
397
|
def append_jobs_from_targets(self, args:list):
|
|
405
398
|
'''Helper method in CommandMulti to apply 'args' (list) to all self.targets,
|
|
@@ -422,17 +415,11 @@ class CommandMulti(CommandParallel):
|
|
|
422
415
|
|
|
423
416
|
_, short_target = os.path.split(target) # trim path info on left
|
|
424
417
|
|
|
425
|
-
self._append_job_command_args(
|
|
418
|
+
self._append_job_command_args(
|
|
419
|
+
command_list=command_list, tool=tool, all_multi_tools=all_multi_tools,
|
|
420
|
+
short_target=short_target, command=command
|
|
421
|
+
)
|
|
426
422
|
|
|
427
|
-
if tool:
|
|
428
|
-
# tool can be None, we won't add it to the command (assumes default from config-yml)
|
|
429
|
-
command_list.append('--tool=' + tool)
|
|
430
|
-
if len(all_multi_tools) > 1:
|
|
431
|
-
command_list += [f'--sub-work-dir={short_target}.{command}.{tool}']
|
|
432
|
-
|
|
433
|
-
if self.args.get('export-jsonl', False):
|
|
434
|
-
# Special case for 'multi' --export-jsonl, run reach child with --export-json
|
|
435
|
-
command_list += [ '--export-json']
|
|
436
423
|
# if self.args['parallel']: command_list += ['--quiet']
|
|
437
424
|
command_list += args # put the args prior to the target.
|
|
438
425
|
command_list += [target]
|
|
@@ -540,6 +527,8 @@ class CommandMulti(CommandParallel):
|
|
|
540
527
|
output_json_path=output_json_path)
|
|
541
528
|
|
|
542
529
|
|
|
530
|
+
|
|
531
|
+
|
|
543
532
|
class CommandToolsMulti(CommandMulti):
|
|
544
533
|
'''eda.py command handler for: eda tools-multi <args,targets,target-globs,...>
|
|
545
534
|
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
'''opencos.commands.sweep - command handler for: eda sweep ...
|
|
2
|
+
|
|
3
|
+
These are not intended to be overriden by child classes. They do not inherit Tool classes.
|
|
4
|
+
'''
|
|
5
|
+
|
|
6
|
+
import os
|
|
7
|
+
import re
|
|
8
|
+
|
|
9
|
+
from opencos import util
|
|
10
|
+
from opencos.eda_base import CommandDesign, CommandParallel, get_eda_exec, which_tool
|
|
11
|
+
|
|
12
|
+
class CommandSweep(CommandDesign, CommandParallel):
|
|
13
|
+
'''Command handler for: eda sweep ...'''
|
|
14
|
+
|
|
15
|
+
command_name = 'sweep'
|
|
16
|
+
|
|
17
|
+
def __init__(self, config:dict):
|
|
18
|
+
CommandDesign.__init__(self, config=config)
|
|
19
|
+
CommandParallel.__init__(self, config=config)
|
|
20
|
+
self.sweep_target = ''
|
|
21
|
+
self.single_command = ''
|
|
22
|
+
self.args.update({
|
|
23
|
+
'sweep': []
|
|
24
|
+
})
|
|
25
|
+
self.args_help.update({
|
|
26
|
+
"sweep": ("List append arg, where range or value expansion is peformed on the RHS:"
|
|
27
|
+
" --sweep='--arg0=(start,last,iter=1)' "
|
|
28
|
+
" --sweep='--arg1=[val0,val1,val2,...]' "
|
|
29
|
+
" --sweep='+define+NAME0=(start,last,iter=1)' "
|
|
30
|
+
" --sweep='+define+NAME1=[val0,val1,val2,...]' "
|
|
31
|
+
" --sweep='+define+[NAME0,NAME1,NAME2,...]' ."
|
|
32
|
+
" Note that range expansion of (1,4) will expand to values [1,2,3,4]."
|
|
33
|
+
),
|
|
34
|
+
})
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def check_args(self) -> None:
|
|
38
|
+
'''Returns None, checks self.args (use after args parsed)'''
|
|
39
|
+
if self.args['parallel'] < 1 or self.args['parallel'] > 256:
|
|
40
|
+
self.error(f"Arg {self.args['parallel']=} must be between 1 and 256")
|
|
41
|
+
|
|
42
|
+
def _append_sweep_args(self, arg_tokens: list) -> None:
|
|
43
|
+
'''Modifies list arg_tokens, using known top-level args'''
|
|
44
|
+
tool = which_tool(command=self.single_command, config=self.config)
|
|
45
|
+
super().update_args_list(args=arg_tokens, tool=tool)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def process_tokens(
|
|
49
|
+
self, tokens: list, process_all: bool = True,
|
|
50
|
+
pwd: str = os.getcwd()
|
|
51
|
+
) -> list:
|
|
52
|
+
'''CommandSweep.process_tokens(..) is likely the entry point for: eda sweep <command> ...
|
|
53
|
+
|
|
54
|
+
- handles remaining CLI arguments (tokens list)
|
|
55
|
+
- builds sweep_axis_list to run multiple jobs for the target
|
|
56
|
+
'''
|
|
57
|
+
|
|
58
|
+
# 'sweep' is special in the way it handles tokens, due to most of them being processed by
|
|
59
|
+
# a sub instance
|
|
60
|
+
sweep_axis_list = []
|
|
61
|
+
arg_tokens = []
|
|
62
|
+
|
|
63
|
+
_, unparsed = self.run_argparser_on_list(
|
|
64
|
+
tokens=tokens,
|
|
65
|
+
parser_arg_list=[
|
|
66
|
+
'parallel',
|
|
67
|
+
'sweep',
|
|
68
|
+
],
|
|
69
|
+
apply_parsed_args=True
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
self.check_args()
|
|
73
|
+
|
|
74
|
+
self.single_command = self.get_command_from_unparsed_args(tokens=unparsed)
|
|
75
|
+
|
|
76
|
+
self._append_sweep_args(arg_tokens=arg_tokens)
|
|
77
|
+
|
|
78
|
+
for sweep_arg_value in self.args['sweep']:
|
|
79
|
+
# Deal with --sweep= args we already parsed, but haven't expanded yet.
|
|
80
|
+
sweep_arg_value = util.strip_outer_quotes(sweep_arg_value)
|
|
81
|
+
sweep_axis_list_entry = self._process_sweep_arg(sweep_arg_value=sweep_arg_value)
|
|
82
|
+
if sweep_axis_list_entry:
|
|
83
|
+
sweep_axis_list.append(sweep_axis_list_entry)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
# command, --parallel, and --sweep already processed by argparse,
|
|
87
|
+
# let's tentatively parse what the child jobs cannot consume.
|
|
88
|
+
# Whatever is leftover is either an eda target, or another unparsed token.
|
|
89
|
+
single_cmd_unparsed = self.get_unparsed_args_on_single_command(
|
|
90
|
+
command=self.single_command, tokens=unparsed
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
for token in unparsed:
|
|
94
|
+
if token in single_cmd_unparsed:
|
|
95
|
+
|
|
96
|
+
if self.resolve_target(token, no_recursion=True):
|
|
97
|
+
if self.sweep_target:
|
|
98
|
+
self.error("Sweep can only take one target, already got",
|
|
99
|
+
f"{self.sweep_target} now getting {token}")
|
|
100
|
+
self.sweep_target = token
|
|
101
|
+
else:
|
|
102
|
+
# If we don't know what to do with it, pass it to downstream
|
|
103
|
+
arg_tokens.append(token)
|
|
104
|
+
|
|
105
|
+
else:
|
|
106
|
+
# If it wasn't in single_cmd_unparsed, then it can definitely be
|
|
107
|
+
# consumed downstream
|
|
108
|
+
arg_tokens.append(token)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
# now we need to expand the target list
|
|
112
|
+
util.debug(f"Sweep: command: '{self.single_command}'")
|
|
113
|
+
util.debug(f"Sweep: arg_tokens: '{arg_tokens}'")
|
|
114
|
+
util.debug(f"Sweep: target: '{self.sweep_target}'")
|
|
115
|
+
|
|
116
|
+
# now create the list of jobs, support one axis
|
|
117
|
+
self.jobs = []
|
|
118
|
+
|
|
119
|
+
self.expand_sweep_axis(arg_tokens=arg_tokens, sweep_axis_list=sweep_axis_list)
|
|
120
|
+
self.run_jobs(command=self.single_command)
|
|
121
|
+
return [] # we used all of unparsed.
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
@staticmethod
|
|
125
|
+
def _process_sweep_arg(sweep_arg_value: str) -> dict:
|
|
126
|
+
'''If processed, returns a non-empty dict. Handles processing of --sweep=VALUE
|
|
127
|
+
|
|
128
|
+
Where VALUE could be one of:
|
|
129
|
+
--arg0=(1,4)
|
|
130
|
+
--arg1=[val0,val1]
|
|
131
|
+
+define+NAME0=[val0,val1]
|
|
132
|
+
|
|
133
|
+
Return value is {} or {'lhs': str, 'operator': str (+ or =), 'values': list}
|
|
134
|
+
'''
|
|
135
|
+
|
|
136
|
+
sweep_arg_value = util.strip_outer_quotes(sweep_arg_value)
|
|
137
|
+
|
|
138
|
+
util.debug(f'{sweep_arg_value=}')
|
|
139
|
+
# Try to match a sweep range expansion:
|
|
140
|
+
# --sweep='--arg0=(1,4)'
|
|
141
|
+
# --sweep='--arg0=(1,4,1)'
|
|
142
|
+
# --sweep='+define+NAME0=(1,4,1)'
|
|
143
|
+
m = re.match(
|
|
144
|
+
r'(.*)(\=) \( ([\d\.]+) \, ([\d\.]+) (\, ([\d\.]+) )? \)'.replace(' ',''),
|
|
145
|
+
sweep_arg_value
|
|
146
|
+
)
|
|
147
|
+
if m:
|
|
148
|
+
lhs = m.group(1)
|
|
149
|
+
operator = m.group(2)
|
|
150
|
+
sweep_axis_values = []
|
|
151
|
+
if m.group(5):
|
|
152
|
+
rhs_range_iter = int(m.group(6))
|
|
153
|
+
else:
|
|
154
|
+
rhs_range_iter = 1
|
|
155
|
+
for v in range(int(m.group(3)), int(m.group(4)) + 1, rhs_range_iter):
|
|
156
|
+
sweep_axis_values.append(v)
|
|
157
|
+
util.debug(f"Sweep axis: {lhs} {operator} {sweep_axis_values}")
|
|
158
|
+
return {
|
|
159
|
+
'lhs': lhs,
|
|
160
|
+
'operator': operator,
|
|
161
|
+
'values': sweep_axis_values,
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
# Try to match a sweep list value expansion:
|
|
165
|
+
# --sweep='--arg0=[1,2]'
|
|
166
|
+
# --sweep='--arg1=[3,4,5,6]'
|
|
167
|
+
# --sweep='+define+NAME=[8,9]'
|
|
168
|
+
# --sweep='+define+[NAME0,NAME1,NAME2]'
|
|
169
|
+
# --sweep='+define+[NAME0=1,NAME1=22,NAME2=46]'
|
|
170
|
+
m = re.match(
|
|
171
|
+
r'(.*)([\=\+]) \[ ([^\]]+) \] '.replace(' ',''),
|
|
172
|
+
sweep_arg_value
|
|
173
|
+
)
|
|
174
|
+
if m:
|
|
175
|
+
sweep_axis_values = []
|
|
176
|
+
lhs = m.group(1)
|
|
177
|
+
operator = m.group(2)
|
|
178
|
+
for v in m.group(3).split(','):
|
|
179
|
+
sweep_axis_values.append(v)
|
|
180
|
+
util.debug(f"Sweep axis: {lhs} {operator} {sweep_axis_values}")
|
|
181
|
+
return {
|
|
182
|
+
'lhs': lhs,
|
|
183
|
+
'operator': operator,
|
|
184
|
+
'values': sweep_axis_values,
|
|
185
|
+
}
|
|
186
|
+
|
|
187
|
+
util.warning(f'Ignored unprocessed expansion for --sweep={sweep_arg_value}')
|
|
188
|
+
return {}
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def expand_sweep_axis(
|
|
192
|
+
self, arg_tokens: list, sweep_axis_list: list, sweep_string: str = ""
|
|
193
|
+
) -> None:
|
|
194
|
+
'''Returns None, appends jobs to self.jobs to be run by CommandParallel.run_jobs(..)'''
|
|
195
|
+
|
|
196
|
+
util.debug(f"Entering expand_sweep_axis: command={self.single_command},",
|
|
197
|
+
f"target={self.sweep_target}, arg_tokens={arg_tokens},",
|
|
198
|
+
f"sweep_axis_list={sweep_axis_list}")
|
|
199
|
+
if not sweep_axis_list:
|
|
200
|
+
# we aren't sweeping anything, create one job
|
|
201
|
+
snapshot_name = self.sweep_target.replace('../','').replace('/','_') + sweep_string
|
|
202
|
+
eda_path = get_eda_exec('sweep')
|
|
203
|
+
self.jobs.append({
|
|
204
|
+
'name' : snapshot_name,
|
|
205
|
+
'index' : len(self.jobs),
|
|
206
|
+
'command_list' : (
|
|
207
|
+
[eda_path, self.single_command, self.sweep_target,
|
|
208
|
+
'--job_name', snapshot_name] + arg_tokens
|
|
209
|
+
)
|
|
210
|
+
})
|
|
211
|
+
return
|
|
212
|
+
|
|
213
|
+
sweep_axis = sweep_axis_list.pop(0)
|
|
214
|
+
lhs = sweep_axis['lhs']
|
|
215
|
+
operator = sweep_axis['operator']
|
|
216
|
+
|
|
217
|
+
lhs_trimmed = lhs.replace('-', '').replace('+', '').replace('=', '')
|
|
218
|
+
|
|
219
|
+
for v in sweep_axis['values']:
|
|
220
|
+
this_arg_tokens = arg_tokens.copy()
|
|
221
|
+
this_arg_tokens.append(f'{lhs}{operator}{v}')
|
|
222
|
+
|
|
223
|
+
v_string = f"{v}".replace('.','p')
|
|
224
|
+
this_sweep_string = sweep_string + f"_{lhs_trimmed}_{v_string}"
|
|
225
|
+
|
|
226
|
+
self.expand_sweep_axis(
|
|
227
|
+
arg_tokens=this_arg_tokens,
|
|
228
|
+
sweep_axis_list=sweep_axis_list,
|
|
229
|
+
sweep_string=this_sweep_string
|
|
230
|
+
)
|
|
@@ -125,7 +125,7 @@ import sys
|
|
|
125
125
|
|
|
126
126
|
from schema import Schema, Or, Optional, SchemaError
|
|
127
127
|
|
|
128
|
-
from opencos
|
|
128
|
+
from opencos import util, deps_helpers
|
|
129
129
|
|
|
130
130
|
# Because we deal with YAML, where a Table Key with dangling/empty value is allowed
|
|
131
131
|
# and we have things like SystemVerilog defines where there's a Table key with no Value,
|
|
@@ -299,8 +299,10 @@ FILE_SIMPLIFIED = Schema(
|
|
|
299
299
|
|
|
300
300
|
|
|
301
301
|
|
|
302
|
-
def check(data: dict, schema_obj=FILE):
|
|
302
|
+
def check(data: dict, schema_obj=FILE) -> (bool, str):
|
|
303
303
|
'''Returns (bool, str) for checking dict against FILE schema'''
|
|
304
|
+
|
|
305
|
+
|
|
304
306
|
try:
|
|
305
307
|
schema_obj.validate(data)
|
|
306
308
|
return True, None
|
|
@@ -310,6 +312,37 @@ def check(data: dict, schema_obj=FILE):
|
|
|
310
312
|
return False, str(e)
|
|
311
313
|
|
|
312
314
|
|
|
315
|
+
def deps_markup_safe_load(deps_filepath: str) -> (bool, dict):
|
|
316
|
+
'''Returns tuple (bool False if took errors, dict of markp data)'''
|
|
317
|
+
current_errors = util.args['errors']
|
|
318
|
+
data = deps_helpers.deps_markup_safe_load(deps_filepath)
|
|
319
|
+
if util.args['errors'] > current_errors:
|
|
320
|
+
return False, data
|
|
321
|
+
return True, data
|
|
322
|
+
|
|
323
|
+
|
|
324
|
+
def check_file(filepath: str, schema_obj=FILE) -> (bool, str, str):
|
|
325
|
+
'''Returns tuple (bool pass/fail, str error retdata, str deps_filepath)'''
|
|
326
|
+
|
|
327
|
+
deps_filepath = filepath
|
|
328
|
+
if os.path.isdir(filepath):
|
|
329
|
+
deps_filepath = deps_helpers.get_deps_markup_file(base_path=filepath)
|
|
330
|
+
|
|
331
|
+
# get deps file
|
|
332
|
+
if not os.path.exists(deps_filepath):
|
|
333
|
+
print(f'ERROR: internal error(s) no DEPS.[yml|..] found in {filepath=}')
|
|
334
|
+
return False, '', deps_filepath
|
|
335
|
+
|
|
336
|
+
passes, data = deps_markup_safe_load(deps_filepath)
|
|
337
|
+
if not passes:
|
|
338
|
+
print(f'ERROR: internal error(s) from deps_markup_safe_load({deps_filepath=})')
|
|
339
|
+
return False, '', deps_filepath
|
|
340
|
+
|
|
341
|
+
passes, retdata = check(data, schema_obj)
|
|
342
|
+
return passes, retdata, deps_filepath
|
|
343
|
+
|
|
344
|
+
|
|
345
|
+
|
|
313
346
|
def check_files(files, schema_obj=FILE) -> bool:
|
|
314
347
|
'''Returns True if files lint cleanly in the FILE schema.'''
|
|
315
348
|
|
|
@@ -319,20 +352,16 @@ def check_files(files, schema_obj=FILE) -> bool:
|
|
|
319
352
|
passes_list = []
|
|
320
353
|
error_files = []
|
|
321
354
|
for filepath in files:
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
# get deps file
|
|
325
|
-
deps_filepath = get_deps_markup_file(base_path=filepath)
|
|
326
|
-
assert os.path.exists(deps_filepath)
|
|
327
|
-
data = deps_markup_safe_load(deps_filepath)
|
|
328
|
-
passes, retdata = check(data, schema_obj)
|
|
355
|
+
|
|
356
|
+
passes, retdata, deps_filepath = check_file(filepath, schema_obj)
|
|
329
357
|
passes_list.append(passes)
|
|
330
358
|
if passes:
|
|
331
359
|
print(f'{deps_filepath}: [PASS]')
|
|
332
360
|
if not passes:
|
|
333
361
|
print(f'ERROR: {deps_filepath}:')
|
|
334
|
-
|
|
335
|
-
|
|
362
|
+
if retdata:
|
|
363
|
+
print('-- retdata --')
|
|
364
|
+
print(retdata)
|
|
336
365
|
print(f' previous error on: {deps_filepath}\n')
|
|
337
366
|
error_files.append(deps_filepath)
|
|
338
367
|
|
|
@@ -398,12 +398,6 @@ class Command:
|
|
|
398
398
|
else:
|
|
399
399
|
assert False, f'{key=} {value=} how do we do argparse for this type of value?'
|
|
400
400
|
|
|
401
|
-
# TODO(drew): it might be nice to support positional args here as a list
|
|
402
|
-
# self.target_args (files/targets/patterns), something like:
|
|
403
|
-
# parser.add_argument(
|
|
404
|
-
# 'targets', nargs='+', help='positional arg for targets/files/pattern'
|
|
405
|
-
# )
|
|
406
|
-
|
|
407
401
|
return parser
|
|
408
402
|
|
|
409
403
|
|
|
@@ -425,7 +419,7 @@ class Command:
|
|
|
425
419
|
parsed, unparsed = parser.parse_known_args(tokens + [''])
|
|
426
420
|
unparsed = list(filter(None, unparsed))
|
|
427
421
|
except argparse.ArgumentError:
|
|
428
|
-
self.error(f'problem attempting to parse_known_args for {tokens=}')
|
|
422
|
+
self.error(f'problem {command_name=} attempting to parse_known_args for {tokens=}')
|
|
429
423
|
|
|
430
424
|
parsed_as_dict = vars(parsed)
|
|
431
425
|
|
|
@@ -506,7 +500,8 @@ class Command:
|
|
|
506
500
|
break
|
|
507
501
|
|
|
508
502
|
if not ret and error_if_no_command:
|
|
509
|
-
self.error(f"Looking for a valid eda <command>
|
|
503
|
+
self.error(f"Looking for a valid eda {self.command_name} <command>",
|
|
504
|
+
f"but didn't find one in {tokens=}")
|
|
510
505
|
return ret
|
|
511
506
|
|
|
512
507
|
|
|
@@ -1518,3 +1513,57 @@ class CommandParallel(Command):
|
|
|
1518
1513
|
if self.status == 0:
|
|
1519
1514
|
self.status = 0 if len(self.jobs_status) == 0 else max(self.jobs_status)
|
|
1520
1515
|
util.fancy_stop()
|
|
1516
|
+
|
|
1517
|
+
|
|
1518
|
+
def update_args_list(self, args: list, tool: str) -> None:
|
|
1519
|
+
'''Modfies list args, using allow-listed known top-level args:
|
|
1520
|
+
|
|
1521
|
+
--config-yml
|
|
1522
|
+
--eda-safe
|
|
1523
|
+
--tool
|
|
1524
|
+
|
|
1525
|
+
Many args were consumed by eda before CommandParallel saw them
|
|
1526
|
+
(for commands like 'multi' or 'sweep'). Some are in self.config.
|
|
1527
|
+
We need to apply those eda level args to each single exec-command
|
|
1528
|
+
'''
|
|
1529
|
+
if any(a.startswith('--config-yml') for a in self.config['eda_original_args']):
|
|
1530
|
+
cfg_yml_fname = self.config.get('config-yml', None)
|
|
1531
|
+
if cfg_yml_fname:
|
|
1532
|
+
args.append(f'--config-yml={cfg_yml_fname}')
|
|
1533
|
+
if '--eda-safe' in self.config['eda_original_args']:
|
|
1534
|
+
args.append('--eda-safe')
|
|
1535
|
+
if tool:
|
|
1536
|
+
# tool can be None, if so we won't add it to the command (assumes default from
|
|
1537
|
+
# config-yml auto load order)
|
|
1538
|
+
args.append('--tool=' + tool)
|
|
1539
|
+
|
|
1540
|
+
|
|
1541
|
+
def get_unparsed_args_on_single_command(self, command: str, tokens: list) -> list:
|
|
1542
|
+
'''Returns a list of args that the single (non-multi) command cannot parse
|
|
1543
|
+
|
|
1544
|
+
This will error on bad --args or -arg, such as:
|
|
1545
|
+
eda multi sim --seeeed=1
|
|
1546
|
+
is not a valid arg in CommandSim
|
|
1547
|
+
|
|
1548
|
+
+arg=value, +arg+value will not be included in the return list, those are
|
|
1549
|
+
intended to be consumed by the single/job command downstream (anything starting
|
|
1550
|
+
with '+')
|
|
1551
|
+
|
|
1552
|
+
Used by CommandMulti and CommandSweep.
|
|
1553
|
+
'''
|
|
1554
|
+
single_cmd_handler = self.config['command_handler'][command](config=self.config)
|
|
1555
|
+
single_cmd_parsed, single_cmd_unparsed = single_cmd_handler.run_argparser_on_list(
|
|
1556
|
+
tokens=tokens.copy(),
|
|
1557
|
+
apply_parsed_args=False,
|
|
1558
|
+
)
|
|
1559
|
+
util.debug(f'{self.command_name}: {single_cmd_unparsed=}')
|
|
1560
|
+
|
|
1561
|
+
# There should not be any single_cmd_unparsed args starting with '-'
|
|
1562
|
+
bad_remaining_args = [x for x in single_cmd_unparsed if x.startswith('-')]
|
|
1563
|
+
if bad_remaining_args:
|
|
1564
|
+
self.error(f'for {self.command_name} {command=} the following args are unknown',
|
|
1565
|
+
f'{bad_remaining_args}')
|
|
1566
|
+
|
|
1567
|
+
# Remove unparsed args starting with '+', since those are commonly sent downstream to
|
|
1568
|
+
# single job (example, CommandSim plusargs).
|
|
1569
|
+
return [x for x in single_cmd_unparsed if not x.startswith('+')]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: opencos-eda
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.33
|
|
4
4
|
Summary: A simple Python package for wrapping RTL simuliatons and synthesis
|
|
5
5
|
Author-email: Simon Sabato <simon@cognichip.ai>, Drew Ranck <drew@cognichip.ai>
|
|
6
6
|
Project-URL: Homepage, https://github.com/cognichip/opencos
|
|
@@ -1,170 +0,0 @@
|
|
|
1
|
-
'''opencos.commands.sweep - command handler for: eda sweep ...
|
|
2
|
-
|
|
3
|
-
These are not intended to be overriden by child classes. They do not inherit Tool classes.
|
|
4
|
-
'''
|
|
5
|
-
|
|
6
|
-
import os
|
|
7
|
-
import re
|
|
8
|
-
|
|
9
|
-
from opencos import util
|
|
10
|
-
from opencos.eda_base import CommandDesign, CommandParallel, get_eda_exec
|
|
11
|
-
|
|
12
|
-
class CommandSweep(CommandDesign, CommandParallel):
|
|
13
|
-
'''Command handler for: eda sweep ...'''
|
|
14
|
-
|
|
15
|
-
command_name = 'sweep'
|
|
16
|
-
|
|
17
|
-
def __init__(self, config:dict):
|
|
18
|
-
CommandDesign.__init__(self, config=config)
|
|
19
|
-
CommandParallel.__init__(self, config=config)
|
|
20
|
-
self.sweep_target = ''
|
|
21
|
-
self.single_command = ''
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def check_args(self) -> None:
|
|
25
|
-
'''Returns None, checks self.args (use after args parsed)'''
|
|
26
|
-
if self.args['parallel'] < 1 or self.args['parallel'] > 256:
|
|
27
|
-
self.error(f"Arg {self.args['parallel']=} must be between 1 and 256")
|
|
28
|
-
|
|
29
|
-
def _append_sweep_args(self, arg_tokens: list) -> None:
|
|
30
|
-
'''Modifies list arg_tokens, bit of a hack'''
|
|
31
|
-
|
|
32
|
-
# TODO(drew): similar clunky behavior with self.config['eda_orignal_args'] that
|
|
33
|
-
# CommandMulti has we need to pass global args to each sweep job, which we can
|
|
34
|
-
# do via arg_tokens (list)
|
|
35
|
-
# TODO(drew): fix this, for now it works but --color and other args do not work.
|
|
36
|
-
if any(a.startswith('--config-yml') for a in self.config['eda_original_args']):
|
|
37
|
-
cfg_yml_fname = self.config.get('config-yml', None)
|
|
38
|
-
if cfg_yml_fname:
|
|
39
|
-
arg_tokens.append(f'--config-yml={cfg_yml_fname}')
|
|
40
|
-
if '--eda-safe' in self.config['eda_original_args']:
|
|
41
|
-
arg_tokens.append('--eda-safe')
|
|
42
|
-
if any(a.startswith('--tool') for a in self.config['eda_original_args']):
|
|
43
|
-
tool = self.config.get('tool', None)
|
|
44
|
-
if tool:
|
|
45
|
-
arg_tokens.append('--tool=' + tool)
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
def process_tokens(
|
|
49
|
-
self, tokens: list, process_all: bool = True,
|
|
50
|
-
pwd: str = os.getcwd()
|
|
51
|
-
) -> list:
|
|
52
|
-
'''CommandSweep.process_tokens(..) is likely the entry point for: eda sweep <command> ...
|
|
53
|
-
|
|
54
|
-
- handles remaining CLI arguments (tokens list)
|
|
55
|
-
- builds sweep_axis_list to run multiple jobs for the target
|
|
56
|
-
'''
|
|
57
|
-
|
|
58
|
-
# multi is special in the way it handles tokens, due to most of them being processed by
|
|
59
|
-
# a sub instance
|
|
60
|
-
sweep_axis_list = []
|
|
61
|
-
arg_tokens = []
|
|
62
|
-
|
|
63
|
-
_, unparsed = self.run_argparser_on_list(
|
|
64
|
-
tokens=tokens,
|
|
65
|
-
parser_arg_list=[
|
|
66
|
-
'parallel',
|
|
67
|
-
],
|
|
68
|
-
apply_parsed_args=True
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
self.check_args()
|
|
72
|
-
|
|
73
|
-
tokens = unparsed
|
|
74
|
-
|
|
75
|
-
self.single_command = self.get_command_from_unparsed_args(tokens=tokens)
|
|
76
|
-
|
|
77
|
-
self._append_sweep_args(arg_tokens=arg_tokens)
|
|
78
|
-
|
|
79
|
-
while tokens:
|
|
80
|
-
token = tokens.pop(0)
|
|
81
|
-
|
|
82
|
-
# command and --parallel already processed by argparse
|
|
83
|
-
|
|
84
|
-
m = re.match(r'(\S+)\=\(([\d\.]+)\,([\d\.]+)(,([\d\.]+))?\)', token)
|
|
85
|
-
if m:
|
|
86
|
-
# Form --arg=CUST "CUST=(range-start,range-stop,range-step)"
|
|
87
|
-
sweep_axis = { 'key' : m.group(1),
|
|
88
|
-
'values' : [ ] }
|
|
89
|
-
for v in range(
|
|
90
|
-
int(m.group(2)),
|
|
91
|
-
int(m.group(3)) + 1,
|
|
92
|
-
int(m.group(5)) if m.group(4) else 1
|
|
93
|
-
):
|
|
94
|
-
sweep_axis['values'].append(v)
|
|
95
|
-
util.debug(f"Sweep axis: {sweep_axis['key']} : {sweep_axis['values']}")
|
|
96
|
-
sweep_axis_list.append(sweep_axis)
|
|
97
|
-
continue
|
|
98
|
-
m = re.match(r'(\S+)\=\[([^\]]+)\]', token)
|
|
99
|
-
if m:
|
|
100
|
-
# Form --arg=CUST "CUST=[val0,val1,val2,...]"
|
|
101
|
-
sweep_axis = { 'key' : m.group(1), 'values' : [] }
|
|
102
|
-
for v in m.group(2).split(','):
|
|
103
|
-
v = v.replace(' ','')
|
|
104
|
-
sweep_axis['values'].append(v)
|
|
105
|
-
util.debug(f"Sweep axis: {sweep_axis['key']} : {sweep_axis['values']}")
|
|
106
|
-
sweep_axis_list.append(sweep_axis)
|
|
107
|
-
continue
|
|
108
|
-
if token.startswith('--') or token.startswith('+'):
|
|
109
|
-
arg_tokens.append(token)
|
|
110
|
-
continue
|
|
111
|
-
if self.resolve_target(token, no_recursion=True):
|
|
112
|
-
if self.sweep_target != "":
|
|
113
|
-
self.error(f"Sweep can only take one target, already got {self.sweep_target},"
|
|
114
|
-
f"now getting {token}")
|
|
115
|
-
self.sweep_target = token
|
|
116
|
-
continue
|
|
117
|
-
self.error(f"Sweep doesn't know what to do with arg '{token}'")
|
|
118
|
-
if self.single_command == "":
|
|
119
|
-
self.error("Didn't get a command after 'sweep'!")
|
|
120
|
-
|
|
121
|
-
# now we need to expand the target list
|
|
122
|
-
util.debug(f"Sweep: command: '{self.single_command}'")
|
|
123
|
-
util.debug(f"Sweep: arg_tokens: '{arg_tokens}'")
|
|
124
|
-
util.debug(f"Sweep: target: '{self.sweep_target}'")
|
|
125
|
-
|
|
126
|
-
# now create the list of jobs, support one axis
|
|
127
|
-
self.jobs = []
|
|
128
|
-
|
|
129
|
-
self.expand_sweep_axis(arg_tokens=arg_tokens, sweep_axis_list=sweep_axis_list)
|
|
130
|
-
self.run_jobs(command=self.single_command)
|
|
131
|
-
return tokens
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
def expand_sweep_axis(
|
|
135
|
-
self, arg_tokens: list, sweep_axis_list: list, sweep_string: str = ""
|
|
136
|
-
) -> None:
|
|
137
|
-
'''Returns None, appends jobs to self.jobs to be run by CommandParallel.run_jobs(..)'''
|
|
138
|
-
|
|
139
|
-
command = self.single_command
|
|
140
|
-
target = self.sweep_target
|
|
141
|
-
|
|
142
|
-
util.debug(f"Entering expand_sweep_axis: command={command}, target={target},",
|
|
143
|
-
f"arg_tokens={arg_tokens}, sweep_axis_list={sweep_axis_list}")
|
|
144
|
-
if len(sweep_axis_list) == 0:
|
|
145
|
-
# we aren't sweeping anything, create one job
|
|
146
|
-
snapshot_name = target.replace('../','').replace('/','_') + sweep_string
|
|
147
|
-
eda_path = get_eda_exec('sweep')
|
|
148
|
-
self.jobs.append({
|
|
149
|
-
'name' : snapshot_name,
|
|
150
|
-
'index' : len(self.jobs),
|
|
151
|
-
'command_list' : (
|
|
152
|
-
[eda_path, command, target, '--job_name', snapshot_name] + arg_tokens
|
|
153
|
-
)
|
|
154
|
-
})
|
|
155
|
-
return
|
|
156
|
-
sweep_axis = sweep_axis_list[0]
|
|
157
|
-
for v in sweep_axis['values']:
|
|
158
|
-
this_arg_tokens = []
|
|
159
|
-
for a in arg_tokens:
|
|
160
|
-
a_swept = re.sub(rf'\b{sweep_axis["key"]}\b', f"{v}", a)
|
|
161
|
-
this_arg_tokens.append(a_swept)
|
|
162
|
-
next_sweep_axis_list = []
|
|
163
|
-
if len(sweep_axis_list)>1:
|
|
164
|
-
next_sweep_axis_list = sweep_axis_list[1:]
|
|
165
|
-
v_string = f"{v}".replace('.','p')
|
|
166
|
-
self.expand_sweep_axis(
|
|
167
|
-
arg_tokens=this_arg_tokens,
|
|
168
|
-
sweep_axis_list=next_sweep_axis_list,
|
|
169
|
-
sweep_string = sweep_string + f"_{sweep_axis['key']}_{v_string}"
|
|
170
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|