opencos-eda 0.2.48__py3-none-any.whl → 0.2.49__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. opencos/__init__.py +4 -2
  2. opencos/_version.py +10 -7
  3. opencos/commands/flist.py +8 -7
  4. opencos/commands/multi.py +13 -14
  5. opencos/commands/sweep.py +3 -2
  6. opencos/deps/__init__.py +0 -0
  7. opencos/deps/defaults.py +69 -0
  8. opencos/deps/deps_commands.py +419 -0
  9. opencos/deps/deps_file.py +326 -0
  10. opencos/deps/deps_processor.py +670 -0
  11. opencos/deps_schema.py +7 -8
  12. opencos/eda.py +84 -64
  13. opencos/eda_base.py +572 -316
  14. opencos/eda_config.py +80 -14
  15. opencos/eda_extract_targets.py +22 -14
  16. opencos/eda_tool_helper.py +33 -7
  17. opencos/export_helper.py +166 -86
  18. opencos/export_json_convert.py +31 -23
  19. opencos/files.py +2 -1
  20. opencos/hw/__init__.py +0 -0
  21. opencos/{oc_cli.py → hw/oc_cli.py} +9 -4
  22. opencos/names.py +0 -4
  23. opencos/peakrdl_cleanup.py +13 -7
  24. opencos/seed.py +19 -11
  25. opencos/tests/helpers.py +3 -2
  26. opencos/tests/test_deps_helpers.py +35 -32
  27. opencos/tests/test_eda.py +36 -29
  28. opencos/tests/test_eda_elab.py +5 -3
  29. opencos/tests/test_eda_synth.py +1 -1
  30. opencos/tests/test_oc_cli.py +1 -1
  31. opencos/tests/test_tools.py +3 -2
  32. opencos/tools/iverilog.py +2 -2
  33. opencos/tools/modelsim_ase.py +2 -2
  34. opencos/tools/riviera.py +1 -1
  35. opencos/tools/slang.py +1 -1
  36. opencos/tools/surelog.py +1 -1
  37. opencos/tools/verilator.py +1 -1
  38. opencos/tools/vivado.py +1 -1
  39. opencos/tools/yosys.py +4 -3
  40. opencos/util.py +374 -468
  41. opencos/utils/__init__.py +0 -0
  42. opencos/utils/markup_helpers.py +98 -0
  43. opencos/utils/str_helpers.py +111 -0
  44. opencos/utils/subprocess_helpers.py +108 -0
  45. {opencos_eda-0.2.48.dist-info → opencos_eda-0.2.49.dist-info}/METADATA +1 -1
  46. opencos_eda-0.2.49.dist-info/RECORD +88 -0
  47. {opencos_eda-0.2.48.dist-info → opencos_eda-0.2.49.dist-info}/entry_points.txt +1 -1
  48. opencos/deps_helpers.py +0 -1346
  49. opencos_eda-0.2.48.dist-info/RECORD +0 -79
  50. /opencos/{pcie.py → hw/pcie.py} +0 -0
  51. {opencos_eda-0.2.48.dist-info → opencos_eda-0.2.49.dist-info}/WHEEL +0 -0
  52. {opencos_eda-0.2.48.dist-info → opencos_eda-0.2.49.dist-info}/licenses/LICENSE +0 -0
  53. {opencos_eda-0.2.48.dist-info → opencos_eda-0.2.49.dist-info}/licenses/LICENSE.spdx +0 -0
  54. {opencos_eda-0.2.48.dist-info → opencos_eda-0.2.49.dist-info}/top_level.txt +0 -0
opencos/deps_helpers.py DELETED
@@ -1,1346 +0,0 @@
1
-
2
- import fnmatch
3
- import os
4
- from pathlib import Path
5
- import sys
6
- import re
7
- import shutil
8
- import toml, json
9
-
10
- from opencos import files
11
- from opencos import eda_config
12
- from opencos.util import debug, info, warning, error, ShellCommandList, \
13
- yaml_safe_load, toml_load_only_root_line_numbers, \
14
- subprocess_run_background
15
-
16
-
17
- class Defaults:
18
- deps_file_exts = set([
19
- '.yml', '.yaml', '.toml', '.json',
20
- # Treat no extension DEPS as YAML.
21
- ''
22
- ])
23
- root_table_keys_not_targets = set([
24
- "DEFAULTS",
25
- "METADATA",
26
- ])
27
- known_eda_commands = set([
28
- "sim",
29
- "elab",
30
- "synth",
31
- "flist",
32
- "proj",
33
- "multi",
34
- "tools-multi",
35
- "sweep",
36
- "build",
37
- "waves",
38
- "upload",
39
- "open",
40
- "export",
41
- ])
42
- supported_target_table_keys = set([
43
- 'args',
44
- 'defines',
45
- 'incdirs',
46
- 'top',
47
- 'deps',
48
- 'reqs',
49
- 'multi',
50
- 'tags',
51
- 'commands'] + list(known_eda_commands))
52
- supported_dep_keys_by_type = {
53
- dict: set(['commands']),
54
- str: set(['*']),
55
- }
56
- supported_tag_keys = set([
57
- 'with-tools',
58
- 'with-args',
59
- 'args',
60
- 'deps',
61
- 'reqs',
62
- 'defines',
63
- 'incdirs',
64
- 'replace-config-tools',
65
- 'additive-config-tools',
66
- ])
67
- supported_command_keys = set([
68
- 'shell',
69
- 'work-dir-add-srcs', 'work-dir-add-sources',
70
- 'peakrdl',
71
- 'run-from-work-dir', # default True
72
- 'filepath-subst-target-dir', # default True
73
- 'dirpath-subst-target-dir', # default False
74
- 'var-subst-args',
75
- 'var-subst-os-env',
76
- 'tee',
77
- ])
78
-
79
-
80
- def get_deps_markup_file(base_path:str):
81
- '''Returns one of DEPS.yml, DEPS.yaml, DEPS.toml, DEPS.json'''
82
- for suffix in Defaults.deps_file_exts:
83
- deps_file = os.path.join(base_path, 'DEPS' + suffix)
84
- if os.path.isfile(deps_file):
85
- return deps_file
86
- return None
87
-
88
-
89
- def deps_markup_safe_load(filepath:str, assert_return_types:list=[type(None), dict],
90
- only_root_line_numbers=False):
91
- '''Returns dict or None from filepath (str), errors if return type not in assert_return_types.
92
-
93
- (assert_return_types can be empty list to avoid check.)
94
-
95
- only_root_line_numbers -- if True, will return a dict of {key: line number (int)} for
96
- all the root level keys. Used for debugging DEPS.yml in
97
- eda.CommandDesign.resolve_target_core
98
- '''
99
- data = None
100
- _, file_ext = os.path.splitext(filepath)
101
- if file_ext in ['', '.yml', 'yaml']:
102
- # treat DEPS as YAML.
103
- data = yaml_safe_load(filepath=filepath, only_root_line_numbers=only_root_line_numbers)
104
- elif file_ext == '.toml':
105
- if only_root_line_numbers:
106
- data = toml_load_only_root_line_numbers(filepath)
107
- else:
108
- data = toml.load(filepath)
109
- elif file_ext == '.json':
110
- if only_root_line_numbers:
111
- data = None
112
- else:
113
- with open(filepath) as f:
114
- data = json.load(f)
115
-
116
- if len(assert_return_types) > 0 and type(data) not in assert_return_types:
117
- error(f'deps_markeup_safe_load: {filepath=} loaded type {type(data)=} is not in',
118
- f'{assert_return_types=}')
119
-
120
- return data
121
-
122
-
123
- class DepsFile:
124
- '''A Container for a DEPS.yml or other Markup file
125
-
126
- References the original CommandDesign object and its cache
127
-
128
- Used for looking up a target, getting its line number in the original file, and
129
- merging contents with a DEFAULTS key if present.
130
- '''
131
-
132
- def __init__(self, command_design_ref:object, target_path:str, cache:dict={}):
133
- self.target_path = target_path
134
- self.deps_file = get_deps_markup_file(target_path)
135
- self.rel_deps_file = self.deps_file
136
-
137
- if not self.deps_file:
138
- # didn't find it, file doesn't exist.
139
- self.data = {}
140
- self.line_numbers = {}
141
- elif self.deps_file in cache:
142
- self.data = cache[self.deps_file].get('data', {})
143
- self.line_numbers = cache[self.deps_file].get('line_numbers', {})
144
- else:
145
- self.data = deps_markup_safe_load(self.deps_file)
146
- self.line_numbers = deps_markup_safe_load(self.deps_file, only_root_line_numbers=True)
147
- cache[self.deps_file] = {
148
- 'data': self.data,
149
- 'line_numbers': self.line_numbers,
150
- }
151
-
152
- if self.deps_file:
153
- deps_path, deps_leaf = os.path.split(self.deps_file)
154
- if deps_path and os.path.exists(deps_path):
155
- self.rel_deps_file = os.path.join(os.path.relpath(deps_path), deps_leaf)
156
-
157
- self.error = command_design_ref.error # method.
158
-
159
- def found(self):
160
- return bool(self.deps_file) and bool(self.data)
161
-
162
- def get_approx_line_number_str(self, target) -> str:
163
- _, target_node = os.path.split(target)
164
- if not self.line_numbers:
165
- return ''
166
- else:
167
- return f'line={self.line_numbers.get(target_node, "")}'
168
-
169
- def gen_caller_info(self, target) -> str:
170
- return '::'.join([
171
- self.rel_deps_file,
172
- target,
173
- self.get_approx_line_number_str(target)
174
- ])
175
-
176
- def lookup(self, target_node:str, caller_info:str) -> bool:
177
- if target_node not in self.data:
178
- found_target = False
179
- # For error printing, prefer relative paths:
180
- t_path = os.path.relpath(self.target_path)
181
- t_node = target_node
182
- t_full = os.path.join(t_path, t_node)
183
- if not caller_info:
184
- # If we don't have caller_info, likely came from command line.
185
- if '.' in target_node:
186
- # Likely a filename:
187
- self.error(f'Trying to resolve command-line target={t_full} (file?):',
188
- f'File={t_node} not found in directory={t_path}')
189
- else:
190
- self.error(f'Trying to resolve command-line target={t_full}:',
191
- f'was not found in deps_file={self.rel_deps_file},',
192
- f'possible targets in deps file = {list(self.data.keys())}')
193
- else:
194
- # If we have caller_info, then this was a recursive call from another
195
- # DEPS file. It should already have the useful error messaging:
196
-
197
- if '.' in target_node:
198
- # Likely a filename:
199
- self.error(f'Trying to resolve target={t_full} (file?):',
200
- f'called from {caller_info},',
201
- f'File={t_node} not found in directory={t_path}')
202
- else:
203
- self.error(f'Trying to resolve target={t_full}:',
204
- f'called from {caller_info},',
205
- f'Target not found in deps_file={self.rel_deps_file}')
206
- else:
207
- debug(f'Found {target_node=} in deps_file={self.rel_deps_file}')
208
- found_target = True
209
-
210
- return found_target
211
-
212
-
213
- def get_entry(self, target_node):
214
- # Start with the defaults
215
- entry = self.data.get('DEFAULTS', dict()).copy()
216
-
217
- # Lookup the entry from the DEPS dict:
218
- entry_raw = self.data[target_node]
219
-
220
- entry_sanitized = deps_list_target_sanitize(
221
- entry_raw, target_node=target_node, deps_file=self.deps_file
222
- )
223
-
224
- # Finally update entry (defaults) with what we looked up:
225
- entry.update(entry_sanitized)
226
-
227
- return entry
228
-
229
-
230
-
231
- # Conditional imports, where someone may not have 'peakrdl' package installed.
232
- # attempt to gracefull handle this instead of dying on missing module/package:
233
- try:
234
- import peakrdl
235
- except:
236
- pass
237
-
238
- thispath = os.path.dirname(__file__)
239
- peakrdl_cleanup_py = os.path.join(thispath, 'peakrdl_cleanup.py')
240
-
241
-
242
- def deps_data_get_all_targets(data:dict) -> list:
243
- return [x for x in data.keys() if x not in Defaults.root_table_keys_not_targets]
244
-
245
-
246
- def fnmatch_or_re(pattern: str, string: str) -> bool:
247
- '''Returns True if pattern/string matches in re.match or fnmatch'''
248
- matches = []
249
- # fnmatch check, aka: ./*test
250
- matches.append(
251
- bool(fnmatch.fnmatch(name=string, pat=pattern))
252
- )
253
- # regex check, aka: ./.*test
254
- try:
255
- matches.append(
256
- bool(re.match(pattern=pattern, string=string))
257
- )
258
- except: # pylint: disable=bare-except
259
- # could have been an illegal/unsupported regex, so don't match.
260
- pass
261
- return any(matches)
262
-
263
-
264
- def get_all_targets(
265
- dirs: list = [os.getcwd()],
266
- base_path: str = os.getcwd(),
267
- filter_str: str = '',
268
- filter_using_multi: str = '',
269
- error_on_empty_return: bool = True,
270
- lstrip_path: bool = True
271
- ) -> list:
272
- '''Returns a list of [dir/target, ... ] using relpath from base_path
273
-
274
- If using filter_using_multi (str), dirs (list) is not required. Example:
275
- filter_using_multi='sim --tool vivado path/to/*test'
276
- and filter_str is applied to all resulting targets.
277
-
278
- If not using filter_using_multi, dirs is required, and filter_str is applied
279
- To all targets from dirs.
280
- '''
281
-
282
- _path_lprefix = str(Path('.')) + os.path.sep
283
-
284
- if filter_using_multi:
285
- targets = []
286
- orig_dir = os.path.abspath(os.getcwd())
287
- os.chdir(base_path)
288
- cmd_str = 'eda multi --quiet --print-targets ' + filter_using_multi
289
- stdout, stderr, rc = subprocess_run_background(
290
- work_dir='.', command_list=cmd_str.split()
291
- )
292
- os.chdir(orig_dir)
293
- if rc != 0:
294
- error(f'get_all_targets: {base_path=} {filter_using_multi=} {cmd_str=} returned:',
295
- f'{rc=}, {stdout=}')
296
-
297
- multi_filtered_targets = stdout.split()
298
- if not filter_str:
299
- targets = multi_filtered_targets
300
- else:
301
- targets = set()
302
- for target in multi_filtered_targets:
303
- this_dir, leaf_target = os.path.split(target)
304
- if fnmatch_or_re(pattern=filter_str,
305
- string=leaf_target):
306
- t = os.path.join(os.path.relpath(this_dir, start=base_path), leaf_target)
307
- if lstrip_path:
308
- t = t.removeprefix(_path_lprefix)
309
- targets.add(t)
310
- targets = list(targets)
311
- if not targets and error_on_empty_return:
312
- error(f'get_all_targets: {base_path=} {filter_using_multi=} returned no targets')
313
- return targets
314
-
315
- targets = set()
316
- for this_dir in dirs:
317
- this_dir = os.path.join(base_path, this_dir)
318
- deps_file = get_deps_markup_file(this_dir)
319
- if not deps_file:
320
- continue
321
- data = deps_markup_safe_load(filepath=deps_file)
322
-
323
- for leaf_target in deps_data_get_all_targets(data):
324
- if not filter_str or fnmatch_or_re(pattern=filter_str,
325
- string=leaf_target):
326
- t = os.path.join(os.path.relpath(this_dir, start=base_path), leaf_target)
327
- if lstrip_path:
328
- t = t.removeprefix(_path_lprefix)
329
- targets.add(t)
330
-
331
- if not targets and error_on_empty_return:
332
- error(f'get_all_targets: {base_path=} {dirs=} {filter_str=} returned no targets')
333
- return list(targets)
334
-
335
-
336
- def dep_str2list(value) -> list():
337
- if value is None:
338
- return []
339
- if type(value) is str:
340
- return re.split('\n+| +', value) # convert \n separated to list, also split on spaces
341
- else:
342
- return value
343
-
344
-
345
- def deps_target_get_deps_list(entry, default_key:str='deps', target_node:str='',
346
- deps_file:str='', entry_must_have_default_key:bool=False) -> list():
347
- # For convenience, if key 'deps' in not in an entry, and entry is a list or string, then
348
- # assume it's a list of deps
349
- debug(f'{deps_file=} {target_node=}: {entry=} {default_key=}')
350
- deps = list()
351
- if type(entry) is str:
352
- deps = dep_str2list(entry)
353
- elif type(entry) is list:
354
- deps = entry # already a list
355
- elif type(entry) is dict:
356
-
357
- if entry_must_have_default_key:
358
- assert default_key in entry, f'{target_node=} in {deps_file=} does not have a key for {default_key=} in {entry=}'
359
- deps = entry.get(default_key, list())
360
- deps = dep_str2list(deps)
361
-
362
- # Strip commented out list entries, strip blank strings, preserve non-strings
363
- ret = list()
364
- for dep in deps:
365
- if type(dep) is str:
366
- if dep.startswith('#') or dep == '':
367
- continue
368
- ret.append(dep)
369
- return ret
370
-
371
-
372
- def deps_list_target_sanitize(entry, default_key:str='deps', target_node:str='', deps_file:str='') -> dict():
373
- # Since we support target entries that can be dict(), list(), or str(), sanitize
374
- # them so they are a dict(), with a key named 'deps' that has a list of deps.
375
- if type(entry) is dict:
376
- return entry
377
-
378
- if type(entry) is str:
379
- mylist = dep_str2list(entry) # convert str to list()
380
- return {default_key: mylist}
381
-
382
- if type(entry) is list:
383
- # it's already a list
384
- return {default_key: entry}
385
-
386
- assert False, f"Can't convert to list {entry=} {default_key=} {target_node=} {deps_file=}"
387
-
388
-
389
- def path_substitutions_relative_to_work_dir(
390
- exec_list: list, info_str: str, target_path: str,
391
- enable_filepath_subst: bool, enable_dirpath_subst: bool
392
- ) -> list:
393
-
394
- if not enable_filepath_subst and not enable_dirpath_subst:
395
- return exec_list
396
-
397
- # Look for path substitutions, b/c we later "work" in self.args['work-dir'], but
398
- # files should be relative to our target_path.
399
- for i,word in enumerate(exec_list):
400
- m = re.search(r'(\.+\/+[^"\;\:\|\<\>\*]*)$', word)
401
- if m:
402
- # ./, ../, file=./../whatever It might be a filepath.
403
- # [^"\;\:\|\<\>\*] is looking for non-path like characters, so we dont' have a trailing
404
- # " : ; < > |
405
- # try and see if this file or dir exists. Note that files in the self.args['work-dir'] don't
406
- # need this, and we can't assume dir levels in the work-dir.
407
- try:
408
- try_path = os.path.abspath(os.path.join(os.path.abspath(target_path), m.group(1)))
409
- if enable_filepath_subst and os.path.isfile(try_path):
410
- # make the substitution
411
- exec_list[i] = word.replace(m.group(1), try_path)
412
- debug(f'file path substitution {info_str=} {target_path=}: replaced - {word=}'
413
- f'is now ={exec_list[i]}. This can be disabled in DEPS with:',
414
- '"filepath-subst-targetdir: false"')
415
- elif enable_dirpath_subst and os.path.isdir(try_path):
416
- # make the substitution
417
- exec_list[i] = word.replace(m.group(1), try_path)
418
- debug(f'dir path substitution {info_str=} {target_path=}: replaced - {word=}'
419
- f'is now ={exec_list[i]}. This can be disabled in DEPS with:',
420
- '"dirpath-subst-targetdir: false"')
421
- except:
422
- pass
423
-
424
- return exec_list
425
-
426
-
427
- def line_with_var_subst(line : str, replace_vars_dict=dict(), replace_vars_os_env=False,
428
- target_node='', target_path='') -> str:
429
- # We can try for replacing any formatted strings, using self.args, and os.environ?
430
- # We have to do this per-word, so that missing replacements or tcl-like things, such
431
- # as '{}' wouldn't bail if trying to do line.format(**dict)
432
- if '{' not in line:
433
- return line
434
-
435
- if replace_vars_os_env:
436
- replace_dict = dict()
437
- replace_dict.update(os.environ)
438
- replace_dict.update(replace_vars_dict)
439
- else:
440
- replace_dict = replace_vars_dict
441
-
442
- words = line.split()
443
- for iter,word in enumerate(words):
444
- try:
445
- words[iter] = word.format(**replace_dict)
446
- except:
447
- pass
448
-
449
- new_line = ' '.join(words)
450
- if new_line != line:
451
- debug(f'{target_node=} {target_path=} performed string format replacement, {line=} {new_line=}')
452
- return new_line
453
- else:
454
- debug(f'{target_node=} {target_path=} string format replacement attempted, no replacement. {line=}')
455
- return line
456
-
457
-
458
- class DepsProcessor:
459
- def __init__(self, command_design_ref, deps_entry:dict, target:str,
460
- target_path:str, target_node:str, deps_file:str, caller_info:str):
461
- '''
462
- command_design_ref (eda.CommandDesign),
463
- deps_entry (dict, target in DEPS.yml file)
464
- target_node (str) -- key in DEPS.yml that got us the deps_entry, used for debug
465
- deps_file (str) -- file, used for debug
466
- caller_info (str) -- used for debug
467
- '''
468
-
469
- self.command_design_ref = command_design_ref
470
- self.deps_entry = deps_entry
471
- self.target = target
472
- self.target_path = target_path
473
- self.target_node = target_node # for debug
474
- self.deps_file = deps_file # for debug
475
- self.caller_info = caller_info
476
-
477
- assert type(deps_entry) is dict, \
478
- f'{deps_entry=} for {target_node=} in {deps_file=} must be a dict()'
479
- assert command_design_ref is not None, \
480
- f'called DepsProcessor.__init__, but no ref to CommandDesign object (is None)'
481
-
482
- # named eda commands in the target:
483
- # If this deps_entry has a 'sim', 'build', etc command entry for this target, grab that because it
484
- # can set defines or other things specific to an eda command ('sim', for example)
485
- self.entry_eda_command = self.deps_entry.get(command_design_ref.command_name, dict())
486
-
487
- # alias some of the self.command_design_ref values
488
- self.command_name = self.command_design_ref.command_name # str, for debug
489
- self.args = self.command_design_ref.args # dict
490
- self.config = self.command_design_ref.config # dict
491
- self.set_arg = self.command_design_ref.set_arg # method
492
- self.error = self.command_design_ref.error # method.
493
-
494
- # If there are expanded eda commands in self.command_design_ref.config['command_handler'].keys(),
495
- # then make note of that now.
496
- self.known_eda_commands = getattr(self.command_design_ref, 'config', {}).get('command_handler', {}).keys()
497
-
498
- def apply_defines(self, defines_dict:dict):
499
- if type(defines_dict) is not dict:
500
- self.error(f"{defines_dict=} is not type dict, can't apply defines,",
501
- f"in {self.caller_info}")
502
- for k,v in defines_dict.items():
503
- if v is None or v == '':
504
- self.command_design_ref.process_plusarg(f'+define+{k}')
505
- else:
506
- # %PWD% and %SEED% substiutions:
507
- if v and type(v) is str:
508
- if v.startswith('%PWD%/') or v.startswith('"%PWD%/'):
509
- v = v.replace('%PWD%', os.path.abspath(self.target_path))
510
- if v.startswith('%SEED%') or v.startswith('"%SEED%'):
511
- v = v.replace('%SEED%', str(self.args.get('seed', 1)))
512
- self.command_design_ref.process_plusarg(f'+define+{k}={v}')
513
-
514
- def apply_incdirs(self, incdirs_list:list):
515
- if type(incdirs_list) not in [str, list]:
516
- self.error(f"{incdirs_list=} is not type str/list, can't apply incdirs",
517
- f"in {self.caller_info}")
518
- incdirs_list = dep_str2list(incdirs_list)
519
- for x in incdirs_list:
520
- abspath = os.path.abspath(os.path.join(self.target_path, x))
521
- if abspath not in self.command_design_ref.incdirs:
522
- self.command_design_ref.incdirs.append(abspath)
523
- debug(f'Added include dir {abspath} from {self.caller_info}')
524
-
525
- def apply_args(self, args_list:list) -> list:
526
- if type(args_list) not in [str, list]:
527
- self.error(f"{args_list=} is not type str/list, can't apply args",
528
- f"in {self.caller_info}")
529
- tokens = dep_str2list(args_list)
530
- # We're going to run an ArgumentParser here, which is not the most efficient
531
- # thing to do b/c it runs on all of self.command_design_ref.args (dict) even
532
- # if we're applying a single token.
533
- debug(f'deps_helpers - custom apply_args with {tokens=}',
534
- f'from {self.caller_info}')
535
- _, unparsed = self.command_design_ref.run_argparser_on_list(
536
- tokens=tokens
537
- )
538
- # Annoying, but check for plusargs in unparsed, and have referenced CommandDesign
539
- # or CommandSim class handle it with process_plusarg.
540
- for arg in unparsed:
541
- if arg.startswith('+'):
542
- self.command_design_ref.process_plusarg(plusarg=arg, pwd=self.target_path)
543
-
544
- if len(unparsed) > 0:
545
- # This is only a warning - because things like CommandFlist may not have every
546
- # one of their self.args.keys() set for a given target, such as a 'sim' target that
547
- # has --optimize, which is not an arg for CommandFlist. But we'd still like to get an flist
548
- # from that target.
549
- warning(f'For {self.command_design_ref.command_name}:' \
550
- + f' in {self.caller_info} has unknown args {unparsed=}')
551
- return unparsed
552
-
553
- def apply_reqs(self, reqs_list:list) -> None:
554
- for req in reqs_list:
555
- req_abspath = os.path.abspath(os.path.join(self.target_path, req))
556
- self.command_design_ref.add_file(req_abspath, use_abspath=False, add_to_non_sources=True,
557
- caller_info=self.caller_info)
558
-
559
- def process_deps_entry(self):
560
-
561
- # Supported target keys:
562
- # -- tags (or equivalent, to support multiple define/incdir/deps for a target)
563
- # -- supports tag-name, with-tools, with-args, args, defines, incdirs, deps
564
- # ** to be applied if a tool matches.
565
- # -- TODO(drew): other features in docs/DEPS.md not yet implemented.
566
- # -- multi: ignore-this-target: - commands (handled in eda.py CommandMulti.resolve_target)
567
- # -- Named eda commands
568
- # -- (partially done) sim or other eda commands (eda.py command specific things)
569
- # basically, check the command, and apply/merge values to 'entry'?
570
- # -- args
571
- # -- defines
572
- # -- incdirs
573
- # -- top.
574
- # -- commands (not in deps)
575
- # -- deps
576
-
577
- # TODO(drew): This does not yet support conditional inclusions based on defines,
578
- # like the old DEPS files did with pattern:
579
- # SOME_DEFINE ? dep_if_define_present : dep_if_define_not_present
580
- # I would like to deprecate that in favor of 'tags'. However, likely will need
581
- # to walk the entire DEPS.yml once to populate all args/defines, and then re-
582
- # walk them to add/prune the correct tag based dependencies, or rely on it being
583
- # entirely top-down.
584
-
585
- # DEPS.yml entries have ordered keys, and process these in-order
586
- # with how the <target> defined it.
587
- remaining_deps_list = list() # deps items we find that are not yet processed.
588
- for key in self.deps_entry.keys():
589
-
590
- # Make sure DEPS target table keys are legal:
591
- if key not in Defaults.supported_target_table_keys and \
592
- key not in self.known_eda_commands:
593
- error(f'Unknown target {key=} in {self.caller_info},',
594
- f' must be one of deps_helpers.{Defaults.supported_target_table_keys=}',
595
- f' or an eda command: {self.known_eda_commands}')
596
-
597
- if key == 'tags':
598
- remaining_deps_list += self.process_tags()
599
- elif key == 'defines':
600
- self.process_defines()
601
- elif key == 'incdirs':
602
- self.process_incdirs()
603
- elif key == 'top':
604
- self.process_top()
605
- elif key == 'args':
606
- self.process_args()
607
- elif key == 'commands':
608
- self.process_commands()
609
- elif key == 'reqs':
610
- self.process_reqs()
611
- elif key == 'deps':
612
- remaining_deps_list += self.process_deps_return_discovered_deps()
613
-
614
- # We return the list of deps that still need to be resolved (['full_path/some_target', ...])
615
- return remaining_deps_list
616
-
617
- def process_tags(self) -> list:
618
- '''Returns List of added deps, applies tags (dict w/ details, if any) to self.command_design_ref.
619
-
620
- Tags are only supported as a Table within a target. Current we only support:
621
- 'args', 'replace-config-tools', 'additive-config-tools', 'with-tools', 'with-args'.
622
- '''
623
-
624
- deps_tags_enables = self.config.get('dep_tags_enables', {})
625
- ret_deps_added_from_tags = list()
626
-
627
- entry_tags = dict() # from yml table
628
- entry_tags.update(self.deps_entry.get('tags', dict()))
629
- for tagname,value in entry_tags.items():
630
- debug(f'process_tags(): {tagname=} in {self.caller_info}' \
631
- + f' observed: {value=}')
632
- assert type(value) is dict, \
633
- f'{tagname=} {value=} value must be a dict for in {self.caller_info}'
634
- tags_dict_to_apply = value.copy()
635
-
636
- for key in value.keys():
637
- if key not in Defaults.supported_tag_keys:
638
- self.error(f'{tagname=} in {self.caller_info}:',
639
- f'has unsupported {key=} {Defaults.supported_tag_keys=}')
640
-
641
- enable_tags_matched = False
642
- disable_tags_matched = False
643
- if tagname in self.command_design_ref.args['enable-tags']:
644
- # tagname was force enabled by --enable-tags=tagname.
645
- debug(f'process_tags(): {tagname=} in {self.caller_info=}',
646
- 'will be enabled, matched in --enable-tags:',
647
- f'{self.command_design_ref.args["enable-tags"]}')
648
- enable_tags_matched = True
649
- if tagname in self.command_design_ref.args['disable-tags']:
650
- # tagname was force disabled by --disable-tags=tagname.
651
- debug(f'process_tags(): {tagname=} in {self.caller_info=}',
652
- 'will be disabled, matched in disable-tags:',
653
- f'{self.command_design_ref.args["disable-tags"]}')
654
- disable_tags_matched = True
655
-
656
-
657
- apply_tag_items_tools = False
658
- apply_tag_items_with_args = False
659
-
660
- tool = self.args.get('tool', None)
661
-
662
- if disable_tags_matched or enable_tags_matched:
663
- # skip checking with-tools or with-args, b/c we are already
664
- # force matched by tagname from --enable-tags or --disable-tags.
665
- pass
666
- else:
667
-
668
- with_tools = dep_str2list(value.get('with-tools', list()))
669
- if with_tools and not deps_tags_enables.get('with-tools', None):
670
- with_tools = []
671
- warning(f'{tagname=} in {self.caller_info}:',
672
- f' skipped due to with-tools disabled.')
673
-
674
- with_args = value.get('with-args', dict())
675
- if type(with_args) is not dict:
676
- error(f'{tagname=} in {self.caller_info}:',
677
- f' with-args must be a table (dict) of key-value pairs')
678
- if with_args and not deps_tags_enables.get('with-args', None):
679
- with_args = {}
680
- warning(f'{tagname=} in {self.caller_info}:',
681
- f' skipped due to with-args disabled.')
682
-
683
- # check with-tools?
684
- if not with_tools:
685
- apply_tag_items_tools = True # no with-tools present
686
- elif tool in with_tools:
687
- apply_tag_items_tools = True # with-tools present and we matched.
688
- else:
689
- # Each item of with-tools can also be in the form {tool (str)}:{TOOL.tool_version (str)}
690
- # this matches Tool.get_full_tool_and_versions()
691
- if getattr(self.command_design_ref, 'get_full_tool_and_versions', None):
692
- tool_full_version = self.command_design_ref.get_full_tool_and_versions()
693
- if tool_full_version and tool_full_version in with_tools:
694
- apply_tag_items_tools = True
695
-
696
- # check with-args?
697
- with_args_matched_list = []
698
- for k,v in with_args.items():
699
- with_args_matched_list.append(False)
700
- if not apply_tag_items_tools:
701
- # If we didn't previously match with-tools (if with-tools was present),
702
- # then we may not match the args, b/c those are tool dependend in the
703
- # Command handling class.
704
- pass
705
- elif k not in self.command_design_ref.args:
706
- warning(f'{tagname=} in {self.caller_info}:',
707
- f'with-args key {k} is not a valid arg for {tool=}')
708
- elif type(v) != type(self.command_design_ref.args[k]):
709
- warning(f'{tagname=} in {self.caller_info}:',
710
- f' with-args table key {k} value {v} (type {type(v)}) does not match type in args',
711
- f' (type {self.command_design_ref.args[k]})')
712
- elif self.command_design_ref.args[k] == v:
713
- # set it as matched:
714
- with_args_matched_list[-1] = True
715
- debug(f'{tagname=} in {self.caller_info}:',
716
- f' with-args table key {k} value {v} matched')
717
- else:
718
- debug(f'{tagname=} in {self.caller_info}:',
719
- f'with-args table key {k} value {v} did not match args value: ',
720
- f'{self.command_design_ref.args[k]}')
721
-
722
- if not with_args_matched_list:
723
- apply_tag_items_with_args = True # no with-args set
724
- else:
725
- apply_tag_items_with_args = all(with_args_matched_list)
726
-
727
- # Did we match all with-tools and with-args?
728
- if disable_tags_matched:
729
- apply_tag_items = False
730
- elif enable_tags_matched:
731
- apply_tag_items = True
732
- else:
733
- apply_tag_items = apply_tag_items_tools and apply_tag_items_with_args
734
-
735
- if not apply_tag_items:
736
- debug(f'process_tags(): {tagname=} in {self.caller_info}',
737
- f'skipped for {tool=}, {with_args=}, {with_args_matched_list=}')
738
- elif apply_tag_items_tools or apply_tag_items_with_args:
739
- debug(f'process_tags(): {tagname=} in {self.caller_info=}',
740
- f'applying tags for {tool=}, {with_args=}, {with_args_matched_list=},',
741
- f'{tags_dict_to_apply.keys()=}')
742
-
743
-
744
- if apply_tag_items:
745
- # We have matched something (with-tools, etc).
746
- # apply these in the original order of the keys:
747
- for key in tags_dict_to_apply.keys():
748
-
749
- if key == 'defines':
750
- # apply defines:
751
- self.apply_defines(value.get('defines', {}))
752
-
753
- elif key == 'incdirs':
754
- # apply incdirs:
755
- self.apply_incdirs(value.get('incdirs', []))
756
-
757
- elif key == 'args':
758
- # apply args
759
- args_list = dep_str2list(value.get('args', list()))
760
- if len(args_list) > 0 and not deps_tags_enables.get('args', None):
761
- args_list = []
762
- warning(f'{tagname=} in {self.caller_info=}:' \
763
- + f' skipped args due to args disabled.')
764
- if len(args_list) > 0:
765
- # This will apply knowns args to the target dep:
766
- info(f'{tagname=} in {self.caller_info=}:' \
767
- + f'applying args b/c {with_tools=} for {args_list=}')
768
- unparsed = self.apply_args(args_list)
769
-
770
- elif key == 'reqs':
771
- reqs_list = deps_target_get_deps_list(entry=value,
772
- default_key='reqs',
773
- target_node=self.target_node,
774
- deps_file=self.deps_file)
775
- self.apply_reqs(reqs_list)
776
-
777
- elif key == 'deps':
778
-
779
- # apply deps (includes commands, stray +define+ +incdir+)
780
- # treat the same way we treat self.process_deps_return_discovered_deps
781
- deps_list = deps_target_get_deps_list(entry=value,
782
- default_key='deps',
783
- target_node=self.target_node,
784
- deps_file=self.deps_file)
785
- ret_deps_added_from_tags += self.get_remaining_and_apply_deps(deps_list)
786
-
787
- # for replace-config-tools or additive-config-tools from tags, these don't need to
788
- # handle in order of tags keys:
789
-
790
- # apply replace-config-tools
791
- # This will replace lists (compile-waivers).
792
- tool_config = value.get('replace-config-tools', {}).get(tool, None)
793
- if tool_config and not deps_tags_enables.get('replace-config-tools', None):
794
- tool_config = None
795
- warning(f'{tagname=} in {self.caller_info}:' \
796
- + f' skipped replace-config-tools b/c it is disabled.')
797
- if tool_config and type(tool_config) is dict:
798
- # apply it to self.tool_config:
799
- info(f'{tagname=} in {self.caller_info}:' \
800
- + f'applying replace-config-tools for {tool=}: {tool_config}')
801
- eda_config.merge_config(self.command_design_ref.tool_config, tool_config)
802
- # Since we altered command_design_ref.tool_config, need to call update on it:
803
- self.command_design_ref.update_tool_config()
804
- debug(f'{tagname=} in {self.caller_info}:' \
805
- + f'Updated {self.command_design_ref.tool_config=}')
806
-
807
- # apply additive-config-tools
808
- # This will append to lists (compile-waivers)
809
- tool_config = value.get('additive-config-tools', {}).get(tool, None)
810
- if tool_config and not deps_tags_enables.get('additive-config-tools', None):
811
- tool_config = None
812
- warning(f'{tagname=} in {self.caller_info}:' \
813
- + f' skipped additive-config-tools b/c it is disabled.')
814
- if tool_config and type(tool_config) is dict:
815
- # apply it to self.tool_config:
816
- info(f'{tagname=} in {self.caller_info}:' \
817
- + f'applying additive-config-tools for {tool=}: {tool_config}')
818
- eda_config.merge_config(self.command_design_ref.tool_config, tool_config,
819
- additive_strategy=True)
820
- # Since we altered command_design_ref.tool_config, need to call update on it:
821
- self.command_design_ref.update_tool_config()
822
- debug(f'{tagname=} in {self.caller_info}:' \
823
- + f'Updated {self.command_design_ref.tool_config=}')
824
-
825
- return ret_deps_added_from_tags
826
-
827
-
828
- def process_defines(self):
829
- '''Returns None, applies defines (dict, if any) from self.deps_entry to self.command_design_ref.'''
830
-
831
- # Defines:
832
- # apply command specific defines, with higher priority than the a deps_entry['sim']['defines'] entry,
833
- # do this with dict1.update(dict2):
834
- entry_defines = dict()
835
- entry_defines.update(self.deps_entry.get('defines', dict()))
836
- entry_defines.update(self.entry_eda_command.get('defines', dict()))
837
- assert type(entry_defines) is dict, \
838
- f'{entry_defines=} for in {self.caller_info} must be a dict()'
839
-
840
- self.apply_defines(entry_defines)
841
-
842
- def process_incdirs(self):
843
- '''Returns None, applies incdirs (dict, if any) from self.deps_entry to self.command_design_ref.'''
844
-
845
- entry_incdirs = list()
846
- # apply command specific incdirs, higher in the incdir list:
847
- entry_incdirs = dep_str2list(self.entry_eda_command.get('incdirs', list()))
848
- entry_incdirs += dep_str2list(self.deps_entry.get('incdirs', list()))
849
- assert type(entry_incdirs) is list, \
850
- f'{entry_incdirs=} for in {self.caller_info} must be a list()'
851
- self.apply_incdirs(entry_incdirs)
852
-
853
- def process_top(self):
854
- '''Returns None, applies top (str, if any) from self.deps_entry to self.command_design_ref.'''
855
-
856
- if self.args['top'] != '':
857
- return # already set
858
-
859
- # For 'top', we overwrite it if not yet set.
860
- # the command specific 'top' has higher priority.
861
- entry_top = self.entry_eda_command.get('top', str()) # if someone set target['sim']['top']
862
- if entry_top == '':
863
- entry_top = self.deps_entry.get('top', str()) # if this target has target['top'] set
864
-
865
- if entry_top != '':
866
- if self.args['top'] == '':
867
- # overwrite only if unset - we don't want other deps overriding the topmost
868
- # target's setting for 'top'.
869
- self.set_arg('top', str(entry_top))
870
-
871
- def process_args(self):
872
- '''Returns None, applies args (list or str, if any) from self.deps_entry to self.command_design_ref.'''
873
-
874
- # for 'args', process each. command specific args take higher priority that target args.
875
- # run_argparser_on_list: uses argparse, which takes precedence on the last arg that is set,
876
- # so put the command specific args last.
877
- # Note that if an arg is already set, we do NOT update it
878
- args_list = dep_str2list(self.deps_entry.get('args', list()))
879
- args_list += dep_str2list(self.entry_eda_command.get('args', list()))
880
-
881
- # for args_list, re-parse these args to apply them to self.args.
882
- unparsed = list()
883
- if len(args_list) == 0:
884
- return
885
-
886
- debug(f'in {self.caller_info}: {args_list=}')
887
- unparsed = self.apply_args(args_list)
888
-
889
- # TODO(drew): Currently, I can't support changing the 'config' via an arg encountered in
890
- # DEPS.yml. This is prevented b/c --config-yml appears as a modifed arg no matter what
891
- # (and we don't let DEPS.yml override modifed args, otherwise a target would override the
892
- # user command line).
893
-
894
-
895
- def get_commands(self, commands=list(), dep=None):
896
- '''Returns tuple of (shell_commands_list, work_dir_add_srcs_list).
897
-
898
- Does not have side effects on self.command_design_ref.
899
- '''
900
-
901
- default_ret = list(), list()
902
-
903
- if len(commands) == 0:
904
- # if we weren't passed commands, then get them from our target (self.deps_entry)
905
- commands = self.deps_entry.get('commands', list())
906
-
907
- assert type(commands) is list, f'{self.deps_entry=} has {commands=} type is not list'
908
-
909
- if len(commands) == 0: # No commands in this target
910
- return default_ret
911
-
912
- debug(f"Got {self.deps_entry=} for in {self.caller_info}, has {commands=}")
913
- shell_commands_list = list() # list of dict()s
914
- work_dir_add_srcs_list = list() # list of dict()s
915
-
916
- if dep is None:
917
- # if we weren't passed a dep, then use our target_node (str key for our self.deps_entry)
918
- dep = self.target_node
919
-
920
- # Run handler for this to convert to shell commands in self.command_design_ref
921
- shell_commands_list, work_dir_add_srcs_list = deps_commands_handler(
922
- config=self.command_design_ref.config,
923
- eda_args=self.command_design_ref.args,
924
- dep=dep,
925
- deps_file=self.deps_file,
926
- target_node=self.target_node,
927
- target_path=self.target_path,
928
- commands=commands
929
- )
930
-
931
- return shell_commands_list, work_dir_add_srcs_list
932
-
933
- def process_commands(self, commands=list(), dep=None):
934
- '''Returns None, handles commands (shell, etc) in the target that aren' in the 'deps' list.
935
-
936
- Applies these to self.command_design_ref.
937
-
938
- You can optionally call this with a commands list and a single dep, which we support for
939
- commands lists that exist within the 'deps' entry of a target.
940
- '''
941
-
942
- shell_commands_list, work_dir_add_srcs_list = self.get_commands(commands=commands, dep=dep)
943
-
944
- # add these commands lists to self.command_design_ref:
945
- # Process all shell_commands_list:
946
- # This will track each shell command with its target_node and target_path
947
- self.command_design_ref.append_shell_commands( cmds=shell_commands_list )
948
- # Process all work_dir_add_srcs_list:
949
- # This will track each added filename with its target_node and target_path
950
- self.command_design_ref.append_work_dir_add_srcs( add_srcs=work_dir_add_srcs_list,
951
- caller_info=self.caller_info )
952
-
953
-
954
- def process_reqs(self) -> None:
955
- reqs_list = deps_target_get_deps_list(entry=self.deps_entry,
956
- default_key='reqs',
957
- target_node=self.target_node,
958
- deps_file=self.deps_file)
959
- self.apply_reqs(reqs_list)
960
-
961
-
962
- def process_deps_return_discovered_deps(self) -> list:
963
- '''Returns list of deps targets to continue processing,
964
-
965
- -- iterates through 'deps' for this target (self.deps_entry['deps'])
966
- -- applies to self.command_design_ref
967
- '''
968
-
969
- # Get the list of deps from this entry (entry is a target in our DEPS.yml):
970
- deps = deps_target_get_deps_list(
971
- self.deps_entry,
972
- target_node=self.target_node,
973
- deps_file=self.deps_file
974
- )
975
- return self.get_remaining_and_apply_deps(deps)
976
-
977
- def get_remaining_and_apply_deps(self, deps:list) -> list:
978
-
979
- deps_targets_to_resolve = list()
980
-
981
- # Process deps (list)
982
- for dep in deps:
983
-
984
- typ = type(dep)
985
- if typ not in Defaults.supported_dep_keys_by_type:
986
- self.error(f'{self.target_node=} {dep=} in {self.deps_file=}:' \
987
- + f'has unsupported {type(dep)=} {Defaults.supported_dep_keys_by_type=}')
988
-
989
- for supported_type, supported_values in Defaults.supported_dep_keys_by_type.items():
990
- if '*' in supported_values:
991
- continue
992
- if typ in [dict,list] and any([k not in supported_values for k in dep]):
993
- self.error(f'{self.target_node=} {dep=} in {self.deps_file=}:' \
994
- + f'has dict-key or list-item not in {Defaults.supported_dep_keys_by_type[typ]=}')
995
-
996
- # In-line commands in the deps list, in case the results need to be in strict file
997
- # order for other deps
998
- if type(dep) is dict and 'commands' in dep:
999
-
1000
- commands = dep['commands']
1001
- debug(f"Got commands {dep=} for in {self.caller_info}, {commands=}")
1002
-
1003
- assert type(commands) is list, \
1004
- f'dep commands must be a list: {dep=} in {self.caller_info}'
1005
-
1006
- # For this, we need to get the returned commands (to keep strict order w/ other deps)
1007
- command_tuple = self.get_commands( commands=commands, dep=dep )
1008
- # TODO(drew): it might be cleaner to return a dict instead of list, b/c those are also ordered
1009
- # and we can pass type information, something like:
1010
- deps_targets_to_resolve.append(command_tuple)
1011
-
1012
-
1013
- elif type(dep) is str and any(dep.startswith(x) for x in ['+define+', '+incdir']):
1014
- # Note: we still support +define+ and +incdir in the deps list.
1015
- # check for compile-time Verilog style plusarg, which are supported under targets
1016
- # These are not run-time Verilog style plusargs comsumable from within the .sv:
1017
- debug(f"Got plusarg (define, incdir) {dep=} for {self.caller_info}")
1018
- self.command_design_ref.process_plusarg(plusarg=dep, pwd=self.target_path)
1019
-
1020
- else:
1021
- # If we made it this far, dep better be a str type.
1022
- assert type(dep) is str, f'{dep=} {type(dep)=} must be str'
1023
- dep_path = os.path.join(self.target_path, dep)
1024
- debug(f"Got dep {dep_path=} for in {self.caller_info}")
1025
-
1026
- if dep_path in self.command_design_ref.targets_dict or \
1027
- dep_path in deps_targets_to_resolve:
1028
- debug(" - already processed, skipping")
1029
- else:
1030
- file_exists, _, _ = files.get_source_file(dep_path)
1031
- if file_exists:
1032
- debug(" - raw file, adding to return list...")
1033
- deps_targets_to_resolve.append(dep_path) # append to list, keeping file order.
1034
- else:
1035
- debug(f" - a target (not a file) needing to be resolved, adding to return list...")
1036
- deps_targets_to_resolve.append(dep_path) # append to list, keeping file order.
1037
-
1038
- # We return the list of deps or files that still need to be resolved (['full_path/some_target', ...])
1039
- # items in this list are either:
1040
- # -- string (dep or file)
1041
- # -- tuple (unprocessed commands, in form: (shell_commands_list, work_dir_add_srcs_list))
1042
- # TODO(drew): it might be cleaner to return a dict instead of list, b/c those are also ordered
1043
- # and we can pass type information, something like:
1044
- # { dep1: 'file',
1045
- # dep2: 'target',
1046
- # dep3: 'command_tuple',
1047
- # }
1048
- return deps_targets_to_resolve
1049
-
1050
-
1051
-
1052
-
1053
- def parse_deps_shell_str(line: str, target_path: str, target_node: str,
1054
- enable_filepath_subst_target_dir: bool = True,
1055
- enable_dirpath_subst_target_dir: bool = False,
1056
- enable: bool = True) -> dict:
1057
- '''Returns None or a dict of a possible shell command from line (str)
1058
-
1059
- Examples of 'line' str:
1060
- shell@echo "hello world" > hello.txt
1061
- shell@ generate_something.sh
1062
- shell@ generate_this.py --input=some_data.json
1063
- shell@ cp ./some_file.txt some_file_COPY.txt
1064
- shell@ vivado -mode tcl -script ./some.tcl -tclargs foo_ip 1.2 foo_part foo_our_name {property value}
1065
-
1066
- Returns None if no parsing was performed, or if enable is False
1067
-
1068
- target_path (str) -- from dependency parsing (relative path of the DEPS file)
1069
- target_node (str) -- from dependency parsing, the target containing this 'line' str.
1070
- '''
1071
- if not enable:
1072
- return {}
1073
-
1074
- m = re.match(r'^\s*shell\@(.*)\s*$', line)
1075
- if not m:
1076
- return {}
1077
-
1078
- exec_str = m.group(1)
1079
- exec_list = exec_str.split()
1080
-
1081
- # Look for path substitutions, b/c we later "work" in self.args['work-dir'], but
1082
- # files should be relative to our target_path.
1083
- # Note this can be disable in DEPS via path-subst-target-dir=False
1084
- exec_list = path_substitutions_relative_to_work_dir(
1085
- exec_list=exec_list, info_str='shell@', target_path=target_path,
1086
- enable_filepath_subst=enable_filepath_subst_target_dir,
1087
- enable_dirpath_subst=enable_dirpath_subst_target_dir
1088
- )
1089
-
1090
- return {
1091
- 'target_path': os.path.abspath(target_path),
1092
- 'target_node': target_node,
1093
- 'run_from_work_dir': True, # may be overriden later.
1094
- 'exec_list': exec_list,
1095
- }
1096
-
1097
-
1098
- def parse_deps_work_dir_add_srcs(line : str, target_path : str, target_node : str, enable : bool = True):
1099
- '''Returns None or a dict describing source files to add from the work-dir path
1100
-
1101
- Examples of 'line' str:
1102
- work_dir_add_srcs@ my_csrs.sv
1103
- work_dir_add_srcs@ some_generated_file.sv some_dir/some_other.v ./gen-vhd-dir/even_more.vhd
1104
-
1105
- Returns None if no parsing was performed, or if enable is False
1106
-
1107
- target_path (str) -- from dependency parsing (relative path of the DEPS file)
1108
- target_node (str) -- from dependency parsing, the target containing this 'line' str.
1109
- '''
1110
- if not enable:
1111
- return None
1112
-
1113
- m = re.match(r'^\s*work_dir_add_srcs\@(.*)\s*$', line)
1114
- if not m:
1115
- return None
1116
-
1117
- files_str = m.group(1)
1118
- file_list = files_str.split()
1119
-
1120
- d = {'target_path': os.path.abspath(target_path),
1121
- 'target_node': target_node,
1122
- 'file_list': file_list,
1123
- }
1124
- return d
1125
-
1126
-
1127
- def parse_deps_peakrdl(
1128
- line: str, target_path: str, target_node: str, enable: bool = True,
1129
- enable_filepath_subst_target_dir: bool = True,
1130
- enable_dirpath_subst_target_dir: bool = False,
1131
- tool: str = ''
1132
- ) -> dict:
1133
- '''Returns None or a dict describing a PeakRDL CSR register generator dependency
1134
-
1135
- Examples of 'line' str:
1136
- peakrdl@ --cpuif axi4-lite-flat --top oc_eth_10g_1port_csrs ./oc_eth_10g_csrs.rdl
1137
-
1138
- Returns None if no parsing was performed, or if enable=False
1139
-
1140
- target_path (str) -- from dependency parsing (relative path of the DEPS file)
1141
- target_node (str) -- from dependency parsing, the target containing this 'line' str.
1142
- '''
1143
-
1144
- m = re.match(r'^\s*peakrdl\@(.*)\s*$', line)
1145
- if not m:
1146
- return None
1147
-
1148
- if not enable:
1149
- warning(f'peakrdl: encountered peakrdl command in {target_path=} {target_node=},' \
1150
- + ' however it is not enabled in edy.py - eda.config[dep_command_enables]')
1151
- return None
1152
-
1153
- if not shutil.which('peakrdl') or \
1154
- 'peakrdl' not in globals().keys():
1155
-
1156
- error('peakrdl: is not present in shell path, or the python package is not avaiable,' \
1157
- + f' yet we encountered a peakrdl command in {target_path=} {target_node=}')
1158
- return None
1159
-
1160
-
1161
- args_str = m.group(1)
1162
- args_list = args_str.split()
1163
-
1164
- # Fish out the .rdl name
1165
- # If there is --top=value or --top value, then fish out that value (that will be the
1166
- # value.sv and value_pkg.sv generated names.
1167
-
1168
- sv_files = list()
1169
- top = ''
1170
- for i,str_value in enumerate(args_list):
1171
- if '--top=' in str_value:
1172
- _, top = str_value.split('=')
1173
- elif '--top' in str_value:
1174
- if i + 1 < len(args_list):
1175
- top = args_list[i + 1]
1176
-
1177
- for str_item in args_list:
1178
- if str_item[-4:] == '.rdl':
1179
- _, rdl_fileonly = os.path.split(str_item) # strip all path info
1180
- rdl_filebase, rdl_ext = os.path.splitext(rdl_fileonly) # strip .rdl
1181
- if top == '':
1182
- top = rdl_filebase
1183
-
1184
- assert top != '', f'peakrdl@ DEP, could not determine value for {top=}: {line=}, {target_path=}, {target_node=}'
1185
-
1186
- sv_files += [ f'peakrdl/{top}_pkg.sv', f'peakrdl/{top}.sv' ]
1187
-
1188
-
1189
- shell_commands = [
1190
- [ 'peakrdl', 'regblock', '-o', str(Path('peakrdl/'))] + args_list,
1191
- # Edit file to apply some verilator waivers, etc, from peakrdl_cleanup.py:
1192
- [ 'python3', peakrdl_cleanup_py, str(Path(f'peakrdl/{top}.sv')), str(Path(f'peakrdl/{top}.sv')) ],
1193
- ]
1194
-
1195
- ret_dict = {
1196
- 'shell_commands_list': list(), # Entry needs target_path, target_node, exec_list
1197
- 'work_dir_add_srcs': dict(), # Single dict needs target_path, target_node, file_list
1198
- }
1199
-
1200
- # Make these look like a dep_shell_command:
1201
- for one_cmd_as_list in shell_commands:
1202
- ret_dict['shell_commands_list'].append(
1203
- parse_deps_shell_str(
1204
- line=(' shell@ ' + ' '.join(one_cmd_as_list)),
1205
- target_path=target_path,
1206
- target_node=target_node,
1207
- enable_filepath_subst_target_dir=enable_filepath_subst_target_dir,
1208
- enable_dirpath_subst_target_dir=enable_dirpath_subst_target_dir
1209
- )
1210
- )
1211
-
1212
- # Make the work_dir_add_srcs dict:
1213
- ret_dict['work_dir_add_srcs'] = parse_deps_work_dir_add_srcs(line = ' work_dir_add_srcs@ ' + ' '.join(sv_files),
1214
- target_path = target_path,
1215
- target_node = target_node
1216
- )
1217
-
1218
- return ret_dict
1219
-
1220
-
1221
-
1222
- def deps_commands_handler(config: dict, eda_args: dict,
1223
- dep : str, deps_file : str, target_node : str, target_path : str,
1224
- commands : list):
1225
- ''' Returns a tuple of (shell_commands_list, work_dir_add_srcs_list), from processing
1226
- a DEPS.yml entry for something like:
1227
-
1228
- target_foo:
1229
- deps:
1230
- - some_file
1231
- - commands: # (list of dicts) These are directly in a 'deps' list.
1232
- - shell: ...
1233
- - peakrdl: ...
1234
- - work-dir-add-sources: ...
1235
- - shell: ...
1236
-
1237
- target_foo:
1238
- commands: # (list of dicts) These are in a target, but not ordered with other deps
1239
- - shell: ...
1240
- - peakrdl: ...
1241
- - work-dir-add-sources: ...
1242
- - shell: ...
1243
-
1244
- We'd like to handle the list in a 'commands' entry, supporting it in a few places in a DEPS.yml, so this
1245
- this a generic way to do that. Currently these are broken down into Shell commands and Files
1246
- that will be later added to our sources (b/c we haven't run the Shell commands yet, and the Files
1247
- aren't present yet but we'd like them in our eda.py filelist in order.
1248
-
1249
- '''
1250
-
1251
- shell_commands_list = list()
1252
- work_dir_add_srcs_list = list()
1253
-
1254
- for command in commands:
1255
- assert type(command) is dict, \
1256
- f'{type(command)=} must be dict, for {deps_file=} {target_node=} {target_path=} with {commands=}'
1257
-
1258
- for key in command.keys():
1259
- if key not in Defaults.supported_command_keys:
1260
- error(f'deps_helpers.process_commands - command {key=} not in {Defaults.supported_command_keys=}')
1261
-
1262
- var_subst_dict = dict() # this is per-command.
1263
- if config['dep_command_enables'].get('var_subst_os_env', False) and \
1264
- command.get('var-subst-os-env', False):
1265
- var_subst_dict.update(os.env)
1266
- if config['dep_command_enables'].get('var_subst_args', False) and \
1267
- command.get('var-subst-args', False):
1268
- var_subst_dict = eda_args
1269
-
1270
- tee_fpath = command.get('tee', None)
1271
-
1272
- # These are both optional bools, default True, would have to explicitly be set to False
1273
- # to take effect:
1274
- run_from_work_dir = command.get('run-from-work-dir', True)
1275
- filepath_subst_target_dir = command.get('filepath-subst-target-dir', True)
1276
- dirpath_subst_target_dir = command.get('dirpath-subst-target-dir', False)
1277
-
1278
- for key,item in command.items():
1279
-
1280
- # skip the tee and var-subst-* keys, since these types are bools and not commands.
1281
- if key in ['tee',
1282
- 'var-subst-os-env',
1283
- 'var-subst-args',
1284
- 'run-from-work-dir',
1285
- 'filepath-subst-target-dir',
1286
- 'dirpath-subst-target-dir']:
1287
- continue
1288
-
1289
- # Optional variable substituion in commands
1290
- if type(item) is str:
1291
- item = line_with_var_subst(item, replace_vars_dict=var_subst_dict,
1292
- target_node=target_node, target_path=deps_file)
1293
-
1294
- if key == 'shell':
1295
- # For now, piggyback on parse_deps_shell_str:
1296
- ret_dict = parse_deps_shell_str(
1297
- line=('shell@ ' + item),
1298
- target_path=target_path,
1299
- target_node=target_node,
1300
- enable_filepath_subst_target_dir=filepath_subst_target_dir,
1301
- enable_dirpath_subst_target_dir=dirpath_subst_target_dir,
1302
- enable=config['dep_command_enables']['shell'],
1303
- )
1304
- # To support 'tee: <some-file>' need to append it to last
1305
- # list item in ret_dict['exec_list'], and make it a util.ShellCommandList.
1306
- if tee_fpath:
1307
- ret_dict['exec_list'] = ShellCommandList(ret_dict['exec_list'], tee_fpath=tee_fpath)
1308
- ret_dict['run_from_work_dir'] = run_from_work_dir
1309
- assert ret_dict, f'shell command failed in {dep=} {target_node=} in {deps_file=}'
1310
- shell_commands_list.append(ret_dict) # process this later, append to our to-be-returned tuple
1311
-
1312
- elif key in ['work-dir-add-srcs', 'work-dir-add-sources']:
1313
- # For now, piggyback on parse_deps_work_dir_add_srcs:
1314
- ret_dict = parse_deps_work_dir_add_srcs(
1315
- line = 'work_dir_add_srcs@ ' + item,
1316
- target_path = target_path,
1317
- target_node = target_node,
1318
- enable=config['dep_command_enables']['work_dir_add_srcs'],
1319
- )
1320
- assert ret_dict, f'work-dir-add-srcs command failed in {dep=} {target_node=} in {deps_file=}'
1321
-
1322
- work_dir_add_srcs_list.append(ret_dict) # process this later, append to our to-be-returned tuple
1323
-
1324
- elif key == 'peakrdl':
1325
- # for now, piggyback on parse_deps_peakrdl:
1326
- ret_dict = parse_deps_peakrdl(
1327
- line = 'peakrdl@ ' + item,
1328
- target_path = target_path,
1329
- target_node = target_node,
1330
- enable_filepath_subst_target_dir=filepath_subst_target_dir,
1331
- enable_dirpath_subst_target_dir=dirpath_subst_target_dir,
1332
- enable=config['dep_command_enables']['peakrdl'],
1333
- tool=eda_args.get('tool', '')
1334
- )
1335
- assert ret_dict, f'peakrdl command failed in {dep=} {target_node=} in {deps_file=}'
1336
-
1337
- # add all the shell commands:
1338
- shell_commands_list += ret_dict['shell_commands_list'] # several entries.
1339
- # all the work_dir_add_srcs:
1340
- work_dir_add_srcs_list += [ ret_dict['work_dir_add_srcs'] ] # single entry append
1341
-
1342
-
1343
- else:
1344
- assert False, f'unknown {key=} in {command=}, {item=} {dep=} {target_node=} in {deps_file=}'
1345
-
1346
- return (shell_commands_list, work_dir_add_srcs_list)