looper 1.7.0a1__py3-none-any.whl → 2.0.0__py3-none-any.whl
Sign up to get free protection for your applications and to get access to all the features.
- looper/__main__.py +1 -1
- looper/_version.py +2 -1
- looper/cli_divvy.py +10 -6
- looper/cli_pydantic.py +413 -0
- looper/command_models/DEVELOPER.md +85 -0
- looper/command_models/README.md +4 -0
- looper/command_models/__init__.py +6 -0
- looper/command_models/arguments.py +293 -0
- looper/command_models/commands.py +335 -0
- looper/conductor.py +161 -28
- looper/const.py +9 -0
- looper/divvy.py +56 -47
- looper/exceptions.py +9 -1
- looper/looper.py +196 -168
- looper/pipeline_interface.py +2 -12
- looper/project.py +154 -176
- looper/schemas/pipeline_interface_schema_generic.yaml +14 -6
- looper/utils.py +450 -78
- {looper-1.7.0a1.dist-info → looper-2.0.0.dist-info}/METADATA +24 -14
- {looper-1.7.0a1.dist-info → looper-2.0.0.dist-info}/RECORD +24 -19
- {looper-1.7.0a1.dist-info → looper-2.0.0.dist-info}/WHEEL +1 -1
- {looper-1.7.0a1.dist-info → looper-2.0.0.dist-info}/entry_points.txt +1 -1
- looper/cli_looper.py +0 -788
- {looper-1.7.0a1.dist-info → looper-2.0.0.dist-info}/LICENSE.txt +0 -0
- {looper-1.7.0a1.dist-info → looper-2.0.0.dist-info}/top_level.txt +0 -0
looper/__main__.py
CHANGED
looper/_version.py
CHANGED
@@ -1 +1,2 @@
|
|
1
|
-
__version__ = "
|
1
|
+
__version__ = "2.0.0"
|
2
|
+
# You must change the version in parser = pydantic_argparse.ArgumentParser in cli_pydantic.py!!!
|
looper/cli_divvy.py
CHANGED
@@ -53,10 +53,10 @@ def build_argparser():
|
|
53
53
|
|
54
54
|
for sp in [sps["list"], sps["write"], sps["submit"], sps["inspect"]]:
|
55
55
|
sp.add_argument(
|
56
|
-
"config", nargs="?", default=None, help="Divvy configuration file."
|
56
|
+
"--config", nargs="?", default=None, help="Divvy configuration file."
|
57
57
|
)
|
58
58
|
|
59
|
-
sps["init"].add_argument("config", default=None, help="Divvy configuration file.")
|
59
|
+
sps["init"].add_argument("--config", default=None, help="Divvy configuration file.")
|
60
60
|
|
61
61
|
for sp in [sps["inspect"]]:
|
62
62
|
sp.add_argument(
|
@@ -124,9 +124,11 @@ def main():
|
|
124
124
|
sys.exit(0)
|
125
125
|
|
126
126
|
_LOGGER.debug("Divvy config: {}".format(args.config))
|
127
|
+
|
127
128
|
divcfg = select_divvy_config(args.config)
|
129
|
+
|
128
130
|
_LOGGER.info("Using divvy config: {}".format(divcfg))
|
129
|
-
dcc = ComputingConfiguration(filepath=divcfg)
|
131
|
+
dcc = ComputingConfiguration.from_yaml_file(filepath=divcfg)
|
130
132
|
|
131
133
|
if args.command == "list":
|
132
134
|
# Output header via logger and content via print so the user can
|
@@ -142,11 +144,13 @@ def main():
|
|
142
144
|
for pkg_name, pkg in dcc.compute_packages.items():
|
143
145
|
if pkg_name == args.package:
|
144
146
|
found = True
|
145
|
-
with open(pkg
|
147
|
+
with open(pkg["submission_template"], "r") as f:
|
146
148
|
print(f.read())
|
147
|
-
_LOGGER.info(
|
149
|
+
_LOGGER.info(
|
150
|
+
"Submission command is: " + pkg["submission_command"] + "\n"
|
151
|
+
)
|
148
152
|
if pkg_name == "docker":
|
149
|
-
print("Docker args are: " + pkg
|
153
|
+
print("Docker args are: " + pkg["docker_args"])
|
150
154
|
|
151
155
|
if not found:
|
152
156
|
_LOGGER.info("Package not found. Use 'divvy list' to see list of packages.")
|
looper/cli_pydantic.py
ADDED
@@ -0,0 +1,413 @@
|
|
1
|
+
"""
|
2
|
+
CLI script using `pydantic-argparse` for parsing of arguments
|
3
|
+
|
4
|
+
Arguments / commands are defined in `command_models/` and are given, eventually, as
|
5
|
+
`pydantic` models, allowing for type-checking and validation of arguments.
|
6
|
+
|
7
|
+
Note: this is only a test script so far, and coexists next to the current CLI
|
8
|
+
(`cli_looper.py`), which uses `argparse` directly. The goal is to eventually
|
9
|
+
replace the current CLI with a CLI based on above-mentioned `pydantic` models,
|
10
|
+
but whether this will happen with `pydantic-argparse` or another, possibly self-
|
11
|
+
written library is not yet clear.
|
12
|
+
It is well possible that this script will be removed again.
|
13
|
+
"""
|
14
|
+
|
15
|
+
# Note: The following import is used for forward annotations (Python 3.8)
|
16
|
+
# to prevent potential 'TypeError' related to the use of the '|' operator
|
17
|
+
# with types.
|
18
|
+
from __future__ import annotations
|
19
|
+
|
20
|
+
import sys
|
21
|
+
|
22
|
+
import logmuse
|
23
|
+
import pydantic_argparse
|
24
|
+
import yaml
|
25
|
+
from eido import inspect_project
|
26
|
+
from pephubclient import PEPHubClient
|
27
|
+
from pydantic_argparse.argparse.parser import ArgumentParser
|
28
|
+
|
29
|
+
from . import __version__
|
30
|
+
|
31
|
+
from .command_models.arguments import ArgumentEnum
|
32
|
+
|
33
|
+
from .command_models.commands import (
|
34
|
+
SUPPORTED_COMMANDS,
|
35
|
+
TopLevelParser,
|
36
|
+
add_short_arguments,
|
37
|
+
)
|
38
|
+
from .const import *
|
39
|
+
from .divvy import DEFAULT_COMPUTE_RESOURCES_NAME, select_divvy_config
|
40
|
+
from .exceptions import *
|
41
|
+
from .looper import *
|
42
|
+
from .parser_types import *
|
43
|
+
from .project import Project, ProjectContext
|
44
|
+
from .utils import (
|
45
|
+
dotfile_path,
|
46
|
+
enrich_args_via_cfg,
|
47
|
+
is_pephub_registry_path,
|
48
|
+
read_looper_config_file,
|
49
|
+
read_looper_dotfile,
|
50
|
+
initiate_looper_config,
|
51
|
+
init_generic_pipeline,
|
52
|
+
read_yaml_file,
|
53
|
+
inspect_looper_config_file,
|
54
|
+
is_PEP_file_type,
|
55
|
+
looper_config_tutorial,
|
56
|
+
)
|
57
|
+
|
58
|
+
from typing import List, Tuple
|
59
|
+
from rich.console import Console
|
60
|
+
|
61
|
+
|
62
|
+
def opt_attr_pair(name: str) -> Tuple[str, str]:
|
63
|
+
"""Takes argument as attribute and returns as tuple of top-level or subcommand used."""
|
64
|
+
return f"--{name}", name.replace("-", "_")
|
65
|
+
|
66
|
+
|
67
|
+
def validate_post_parse(args: argparse.Namespace) -> List[str]:
|
68
|
+
"""Checks if user is attempting to use mutually exclusive options."""
|
69
|
+
problems = []
|
70
|
+
used_exclusives = [
|
71
|
+
opt
|
72
|
+
for opt, attr in map(
|
73
|
+
opt_attr_pair,
|
74
|
+
[
|
75
|
+
"skip",
|
76
|
+
"limit",
|
77
|
+
SAMPLE_EXCLUSION_OPTNAME,
|
78
|
+
SAMPLE_INCLUSION_OPTNAME,
|
79
|
+
],
|
80
|
+
)
|
81
|
+
# Depending on the subcommand used, the above options might either be in
|
82
|
+
# the top-level namespace or in the subcommand namespace (the latter due
|
83
|
+
# to a `modify_args_namespace()`)
|
84
|
+
if getattr(
|
85
|
+
args, attr, None
|
86
|
+
) # or (getattr(args.run, attr, None) if hasattr(args, "run") else False)
|
87
|
+
]
|
88
|
+
if len(used_exclusives) > 1:
|
89
|
+
problems.append(
|
90
|
+
f"Used multiple mutually exclusive options: {', '.join(used_exclusives)}"
|
91
|
+
)
|
92
|
+
return problems
|
93
|
+
|
94
|
+
|
95
|
+
# TODO rename to run_looper_via_cli for running lloper as a python library:
|
96
|
+
# https://github.com/pepkit/looper/pull/472#discussion_r1521970763
|
97
|
+
def run_looper(args: TopLevelParser, parser: ArgumentParser, test_args=None):
|
98
|
+
# here comes adapted `cli_looper.py` code
|
99
|
+
global _LOGGER
|
100
|
+
|
101
|
+
_LOGGER = logmuse.logger_via_cli(args, make_root=True)
|
102
|
+
|
103
|
+
# Find out which subcommand was used
|
104
|
+
supported_command_names = [cmd.name for cmd in SUPPORTED_COMMANDS]
|
105
|
+
subcommand_valued_args = [
|
106
|
+
(arg, value)
|
107
|
+
for arg, value in vars(args).items()
|
108
|
+
if arg and arg in supported_command_names and value is not None
|
109
|
+
]
|
110
|
+
# Only one subcommand argument will be not `None`, else we found a bug in `pydantic-argparse`
|
111
|
+
[(subcommand_name, subcommand_args)] = subcommand_valued_args
|
112
|
+
|
113
|
+
cli_use_errors = validate_post_parse(subcommand_args)
|
114
|
+
if cli_use_errors:
|
115
|
+
parser.print_help(sys.stderr)
|
116
|
+
parser.error(
|
117
|
+
f"{len(cli_use_errors)} CLI use problem(s): {', '.join(cli_use_errors)}"
|
118
|
+
)
|
119
|
+
|
120
|
+
if subcommand_name is None:
|
121
|
+
parser.print_help(sys.stderr)
|
122
|
+
sys.exit(1)
|
123
|
+
|
124
|
+
if subcommand_name == "init":
|
125
|
+
|
126
|
+
console = Console()
|
127
|
+
console.clear()
|
128
|
+
console.rule(f"\n[magenta]Looper initialization[/magenta]")
|
129
|
+
selection = subcommand_args.generic
|
130
|
+
if selection is True:
|
131
|
+
console.clear()
|
132
|
+
return int(
|
133
|
+
not initiate_looper_config(
|
134
|
+
dotfile_path(),
|
135
|
+
subcommand_args.pep_config,
|
136
|
+
subcommand_args.output_dir,
|
137
|
+
subcommand_args.sample_pipeline_interfaces,
|
138
|
+
subcommand_args.project_pipeline_interfaces,
|
139
|
+
subcommand_args.force_yes,
|
140
|
+
)
|
141
|
+
)
|
142
|
+
else:
|
143
|
+
console.clear()
|
144
|
+
return int(looper_config_tutorial())
|
145
|
+
|
146
|
+
if subcommand_name == "init_piface":
|
147
|
+
sys.exit(int(not init_generic_pipeline()))
|
148
|
+
|
149
|
+
_LOGGER.info("Looper version: {}\nCommand: {}".format(__version__, subcommand_name))
|
150
|
+
|
151
|
+
looper_cfg_path = os.path.relpath(dotfile_path(), start=os.curdir)
|
152
|
+
try:
|
153
|
+
if subcommand_args.config:
|
154
|
+
looper_config_dict = read_looper_config_file(subcommand_args.config)
|
155
|
+
else:
|
156
|
+
looper_config_dict = read_looper_dotfile()
|
157
|
+
_LOGGER.info(f"Using looper config ({looper_cfg_path}).")
|
158
|
+
|
159
|
+
cli_modifiers_dict = None
|
160
|
+
for looper_config_key, looper_config_item in looper_config_dict.items():
|
161
|
+
if looper_config_key == CLI_KEY:
|
162
|
+
cli_modifiers_dict = looper_config_item
|
163
|
+
else:
|
164
|
+
setattr(subcommand_args, looper_config_key, looper_config_item)
|
165
|
+
|
166
|
+
except OSError as e:
|
167
|
+
if subcommand_args.config:
|
168
|
+
_LOGGER.warning(
|
169
|
+
f"\nLooper config file does not exist at given path {subcommand_args.config}. Use looper init to create one at {looper_cfg_path}."
|
170
|
+
)
|
171
|
+
else:
|
172
|
+
_LOGGER.warning(e)
|
173
|
+
|
174
|
+
sys.exit(1)
|
175
|
+
|
176
|
+
subcommand_args = enrich_args_via_cfg(
|
177
|
+
subcommand_name,
|
178
|
+
subcommand_args,
|
179
|
+
parser,
|
180
|
+
test_args=test_args,
|
181
|
+
cli_modifiers=cli_modifiers_dict,
|
182
|
+
)
|
183
|
+
|
184
|
+
# If project pipeline interface defined in the cli, change name to: "pipeline_interface"
|
185
|
+
if vars(subcommand_args)[PROJECT_PL_ARG]:
|
186
|
+
subcommand_args.pipeline_interfaces = vars(subcommand_args)[PROJECT_PL_ARG]
|
187
|
+
|
188
|
+
divcfg = (
|
189
|
+
select_divvy_config(filepath=subcommand_args.divvy)
|
190
|
+
if hasattr(subcommand_args, "divvy")
|
191
|
+
else None
|
192
|
+
)
|
193
|
+
# Ignore flags if user is selecting or excluding on flags:
|
194
|
+
if subcommand_args.sel_flag or subcommand_args.exc_flag:
|
195
|
+
subcommand_args.ignore_flags = True
|
196
|
+
|
197
|
+
# Initialize project
|
198
|
+
if is_PEP_file_type(subcommand_args.pep_config) and os.path.exists(
|
199
|
+
subcommand_args.pep_config
|
200
|
+
):
|
201
|
+
try:
|
202
|
+
p = Project(
|
203
|
+
cfg=subcommand_args.pep_config,
|
204
|
+
amendments=subcommand_args.amend,
|
205
|
+
divcfg_path=divcfg,
|
206
|
+
runp=subcommand_name == "runp",
|
207
|
+
**{
|
208
|
+
attr: getattr(subcommand_args, attr)
|
209
|
+
for attr in CLI_PROJ_ATTRS
|
210
|
+
if attr in subcommand_args
|
211
|
+
},
|
212
|
+
)
|
213
|
+
except yaml.parser.ParserError as e:
|
214
|
+
_LOGGER.error(f"Project config parse failed -- {e}")
|
215
|
+
sys.exit(1)
|
216
|
+
elif is_pephub_registry_path(subcommand_args.pep_config):
|
217
|
+
if vars(subcommand_args)[SAMPLE_PL_ARG]:
|
218
|
+
p = Project(
|
219
|
+
amendments=subcommand_args.amend,
|
220
|
+
divcfg_path=divcfg,
|
221
|
+
runp=subcommand_name == "runp",
|
222
|
+
project_dict=PEPHubClient().load_raw_pep(
|
223
|
+
registry_path=subcommand_args.pep_config
|
224
|
+
),
|
225
|
+
**{
|
226
|
+
attr: getattr(subcommand_args, attr)
|
227
|
+
for attr in CLI_PROJ_ATTRS
|
228
|
+
if attr in subcommand_args
|
229
|
+
},
|
230
|
+
)
|
231
|
+
else:
|
232
|
+
raise MisconfigurationException(
|
233
|
+
f"`sample_pipeline_interface` is missing. Provide it in the parameters."
|
234
|
+
)
|
235
|
+
else:
|
236
|
+
raise MisconfigurationException(
|
237
|
+
f"Cannot load PEP. Check file path or registry path to pep."
|
238
|
+
)
|
239
|
+
|
240
|
+
selected_compute_pkg = p.selected_compute_package or DEFAULT_COMPUTE_RESOURCES_NAME
|
241
|
+
if p.dcc is not None and not p.dcc.activate_package(selected_compute_pkg):
|
242
|
+
_LOGGER.info(
|
243
|
+
"Failed to activate '{}' computing package. "
|
244
|
+
"Using the default one".format(selected_compute_pkg)
|
245
|
+
)
|
246
|
+
|
247
|
+
with ProjectContext(
|
248
|
+
prj=p,
|
249
|
+
selector_attribute=subcommand_args.sel_attr,
|
250
|
+
selector_include=subcommand_args.sel_incl,
|
251
|
+
selector_exclude=subcommand_args.sel_excl,
|
252
|
+
selector_flag=subcommand_args.sel_flag,
|
253
|
+
exclusion_flag=subcommand_args.exc_flag,
|
254
|
+
) as prj:
|
255
|
+
|
256
|
+
# Check at the beginning if user wants to use pipestat and pipestat is configurable
|
257
|
+
is_pipestat_configured = (
|
258
|
+
prj._check_if_pipestat_configured(pipeline_type=PipelineLevel.PROJECT.value)
|
259
|
+
if getattr(subcommand_args, "project", None) or subcommand_name == "runp"
|
260
|
+
else prj._check_if_pipestat_configured()
|
261
|
+
)
|
262
|
+
|
263
|
+
if subcommand_name in ["run", "rerun"]:
|
264
|
+
if getattr(subcommand_args, "project", None):
|
265
|
+
_LOGGER.warning(
|
266
|
+
"Project flag set but 'run' command was used. Please use 'runp' to run at project-level."
|
267
|
+
)
|
268
|
+
rerun = subcommand_name == "rerun"
|
269
|
+
run = Runner(prj)
|
270
|
+
try:
|
271
|
+
# compute_kwargs = _proc_resources_spec(args)
|
272
|
+
compute_kwargs = _proc_resources_spec(subcommand_args)
|
273
|
+
|
274
|
+
# TODO Shouldn't top level args and subcommand args be accessible on the same object?
|
275
|
+
return run(
|
276
|
+
subcommand_args, top_level_args=args, rerun=rerun, **compute_kwargs
|
277
|
+
)
|
278
|
+
except SampleFailedException:
|
279
|
+
sys.exit(1)
|
280
|
+
except IOError:
|
281
|
+
_LOGGER.error(
|
282
|
+
"{} pipeline_interfaces: '{}'".format(
|
283
|
+
prj.__class__.__name__, prj.pipeline_interface_sources
|
284
|
+
)
|
285
|
+
)
|
286
|
+
raise
|
287
|
+
|
288
|
+
if subcommand_name == "runp":
|
289
|
+
compute_kwargs = _proc_resources_spec(subcommand_args)
|
290
|
+
collate = Collator(prj)
|
291
|
+
collate(subcommand_args, **compute_kwargs)
|
292
|
+
return collate.debug
|
293
|
+
|
294
|
+
if subcommand_name == "destroy":
|
295
|
+
return Destroyer(prj)(subcommand_args)
|
296
|
+
|
297
|
+
if subcommand_name == "table":
|
298
|
+
if is_pipestat_configured:
|
299
|
+
return Tabulator(prj)(subcommand_args)
|
300
|
+
else:
|
301
|
+
raise PipestatConfigurationException("table")
|
302
|
+
|
303
|
+
if subcommand_name == "report":
|
304
|
+
if is_pipestat_configured:
|
305
|
+
return Reporter(prj)(subcommand_args)
|
306
|
+
else:
|
307
|
+
raise PipestatConfigurationException("report")
|
308
|
+
|
309
|
+
if subcommand_name == "link":
|
310
|
+
if is_pipestat_configured:
|
311
|
+
Linker(prj)(subcommand_args)
|
312
|
+
else:
|
313
|
+
raise PipestatConfigurationException("link")
|
314
|
+
|
315
|
+
if subcommand_name == "check":
|
316
|
+
if is_pipestat_configured:
|
317
|
+
return Checker(prj)(subcommand_args)
|
318
|
+
else:
|
319
|
+
raise PipestatConfigurationException("check")
|
320
|
+
|
321
|
+
if subcommand_name == "clean":
|
322
|
+
return Cleaner(prj)(subcommand_args)
|
323
|
+
|
324
|
+
if subcommand_name == "inspect":
|
325
|
+
# Inspect PEP from Eido
|
326
|
+
sample_names = []
|
327
|
+
for sample in p.samples:
|
328
|
+
sample_names.append(sample["sample_name"])
|
329
|
+
inspect_project(p, sample_names)
|
330
|
+
# Inspect looper config file
|
331
|
+
if looper_config_dict:
|
332
|
+
inspect_looper_config_file(looper_config_dict)
|
333
|
+
else:
|
334
|
+
_LOGGER.warning("No looper configuration was supplied.")
|
335
|
+
|
336
|
+
|
337
|
+
def main(test_args=None) -> dict:
|
338
|
+
parser = pydantic_argparse.ArgumentParser(
|
339
|
+
model=TopLevelParser,
|
340
|
+
prog="looper",
|
341
|
+
description="Looper: A job submitter for Portable Encapsulated Projects",
|
342
|
+
add_help=True,
|
343
|
+
version="2.0.0",
|
344
|
+
)
|
345
|
+
|
346
|
+
parser = add_short_arguments(parser, ArgumentEnum)
|
347
|
+
|
348
|
+
if test_args:
|
349
|
+
args = parser.parse_typed_args(args=test_args)
|
350
|
+
else:
|
351
|
+
args = parser.parse_typed_args()
|
352
|
+
|
353
|
+
return run_looper(args, parser, test_args=test_args)
|
354
|
+
|
355
|
+
|
356
|
+
def main_cli() -> None:
|
357
|
+
main()
|
358
|
+
|
359
|
+
|
360
|
+
def _proc_resources_spec(args):
|
361
|
+
"""
|
362
|
+
Process CLI-sources compute setting specification. There are two sources
|
363
|
+
of compute settings in the CLI alone:
|
364
|
+
* YAML file (--settings argument)
|
365
|
+
* itemized compute settings (--compute argument)
|
366
|
+
|
367
|
+
The itemized compute specification is given priority
|
368
|
+
|
369
|
+
:param argparse.Namespace: arguments namespace
|
370
|
+
:return Mapping[str, str]: binding between resource setting name and value
|
371
|
+
:raise ValueError: if interpretation of the given specification as encoding
|
372
|
+
of key-value pairs fails
|
373
|
+
"""
|
374
|
+
spec = getattr(args, "compute", None)
|
375
|
+
settings = args.settings
|
376
|
+
try:
|
377
|
+
settings_data = read_yaml_file(settings) or {}
|
378
|
+
except yaml.YAMLError:
|
379
|
+
_LOGGER.warning(
|
380
|
+
"Settings file ({}) does not follow YAML format,"
|
381
|
+
" disregarding".format(settings)
|
382
|
+
)
|
383
|
+
settings_data = {}
|
384
|
+
if not spec:
|
385
|
+
return settings_data
|
386
|
+
if isinstance(
|
387
|
+
spec, str
|
388
|
+
): # compute: "partition=standard time='01-00:00:00' cores='32' mem='32000'"
|
389
|
+
spec = spec.split(sep=" ")
|
390
|
+
if isinstance(spec, list):
|
391
|
+
pairs = [(kv, kv.split("=")) for kv in spec]
|
392
|
+
bads = []
|
393
|
+
for orig, pair in pairs:
|
394
|
+
try:
|
395
|
+
k, v = pair
|
396
|
+
except ValueError:
|
397
|
+
bads.append(orig)
|
398
|
+
else:
|
399
|
+
settings_data[k] = v
|
400
|
+
if bads:
|
401
|
+
raise ValueError(
|
402
|
+
"Could not correctly parse itemized compute specification. "
|
403
|
+
"Correct format: " + EXAMPLE_COMPUTE_SPEC_FMT
|
404
|
+
)
|
405
|
+
elif isinstance(spec, dict):
|
406
|
+
for key, value in spec.items():
|
407
|
+
settings_data[key] = value
|
408
|
+
|
409
|
+
return settings_data
|
410
|
+
|
411
|
+
|
412
|
+
if __name__ == "__main__":
|
413
|
+
main()
|
@@ -0,0 +1,85 @@
|
|
1
|
+
# Developer documentation
|
2
|
+
|
3
|
+
## Adding new command models
|
4
|
+
|
5
|
+
To add a new model (command) to the project, follow these steps:
|
6
|
+
|
7
|
+
1. Add new arguments in `looper/command_models/arguments.py` if necessary.
|
8
|
+
|
9
|
+
- Add a new entry for the `ArgumentEnum` class.
|
10
|
+
- For example:
|
11
|
+
|
12
|
+
```python
|
13
|
+
# arguments.py
|
14
|
+
|
15
|
+
class ArgumentEnum(enum.Enum):
|
16
|
+
...
|
17
|
+
|
18
|
+
NEW_ARGUMENT = Argument(
|
19
|
+
name="new_argument",
|
20
|
+
default=(new_argument_type, "default_value"),
|
21
|
+
description="Description of the new argument",
|
22
|
+
)
|
23
|
+
|
24
|
+
```
|
25
|
+
|
26
|
+
2. Create a new command in the existing command creation logic in `looper/command_models/commands.py`.
|
27
|
+
|
28
|
+
- Create a new `Command` instance.
|
29
|
+
- Create a `pydantic` model for this new command.
|
30
|
+
- Add the new `Command` instance to `SUPPORTED_COMMANDS`.
|
31
|
+
- For example:
|
32
|
+
|
33
|
+
```python
|
34
|
+
NewCommandParser = Command(
|
35
|
+
"new_command",
|
36
|
+
MESSAGE_BY_SUBCOMMAND["new_command"],
|
37
|
+
[
|
38
|
+
...
|
39
|
+
ArgumentEnum.NEW_ARGUMENT.value,
|
40
|
+
# Add more arguments as needed for the new command
|
41
|
+
],
|
42
|
+
)
|
43
|
+
NewCommandParserModel = NewCommandParser.create_model()
|
44
|
+
|
45
|
+
SUPPORTED_COMMANDS = [..., NewCommandParser]
|
46
|
+
```
|
47
|
+
|
48
|
+
3. Update the new argument(s) and command in `TopLevelParser` from `looper/command_models/commands.py`.
|
49
|
+
|
50
|
+
- Add a new field for the new command.
|
51
|
+
- Add a new field for the new argument(s).
|
52
|
+
- For example:
|
53
|
+
|
54
|
+
```python
|
55
|
+
class TopLevelParser(pydantic.BaseModel):
|
56
|
+
|
57
|
+
# commands
|
58
|
+
...
|
59
|
+
new_command: Optional[NewCommandParserModel] = pydantic.Field(description=NewCommandParser.description)
|
60
|
+
|
61
|
+
# arguments
|
62
|
+
...
|
63
|
+
new_argument: Optional[new_argument_type] = ArgumentEnum.NEW_ARGUMENT.value.with_reduced_default()
|
64
|
+
```
|
65
|
+
|
66
|
+
## Special treatment for the `run` command
|
67
|
+
|
68
|
+
The `run` command in our project requires special treatment to accommodate hierarchical namespaces
|
69
|
+
and properly handle its unique characteristics. Several functions have been adapted to ensure the
|
70
|
+
correct behavior of the run command, and similar adaptations may be necessary for other commands.
|
71
|
+
|
72
|
+
For developers looking to understand the details of the special treatment given to the `run`
|
73
|
+
command and its associated changes, we recommend to inspect the following functions / part of the
|
74
|
+
code:
|
75
|
+
- `looper/cli_looper.py`:
|
76
|
+
- `make_hierarchical_if_needed()`
|
77
|
+
- assignment of the `divcfg` variable
|
78
|
+
- assignment of the `project_args` variable
|
79
|
+
- `_proc_resources_spec()`
|
80
|
+
- `validate_post_parse()`
|
81
|
+
- `looper/utils.py`:
|
82
|
+
- `enrich_args_via_cfg()`
|
83
|
+
|
84
|
+
If you are adding new commands to the project / migrate existing commands to a `pydantic` model-based definition, adapt these parts of the codes with equivalent behavior for your new command.
|
85
|
+
Likewise, adapt argument accessions in the corresponding executor in `looper/looper.py` to take into account the hierarchical organization of the command's arguments.
|
@@ -0,0 +1,4 @@
|
|
1
|
+
# `pydantic`-based definitions of `looper` commands and their arguments
|
2
|
+
|
3
|
+
With the goal of writing an HTTP API that is in sync with the `looper` CLI, this module defines `looper` commands as `pydantic` models and arguments as fields in there.
|
4
|
+
These can then be used by the [`pydantic-argparse`](https://pydantic-argparse.supimdos.com/) library to create a type-validated CLI (see `../cli_pydantic.py`), and by the future HTTP API for validating `POST`ed JSON data. Eventually, the `pydantic-argparse`-based CLI will replace the existing `argparse`-based CLI defined in `../cli_looper.py`.
|