looper 1.7.0a1__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
looper/cli_looper.py DELETED
@@ -1,788 +0,0 @@
1
- import argparse
2
- import logmuse
3
- import os
4
- import sys
5
- import yaml
6
-
7
- from eido import inspect_project
8
- from pephubclient import PEPHubClient
9
- from typing import Tuple, List
10
- from ubiquerg import VersionInHelpParser
11
-
12
- from . import __version__
13
- from .const import *
14
- from .divvy import DEFAULT_COMPUTE_RESOURCES_NAME, select_divvy_config
15
- from .exceptions import *
16
- from .looper import *
17
- from .parser_types import *
18
- from .project import Project, ProjectContext
19
- from .utils import (
20
- dotfile_path,
21
- enrich_args_via_cfg,
22
- is_registry_path,
23
- read_looper_dotfile,
24
- read_looper_config_file,
25
- read_yaml_file,
26
- initiate_looper_config,
27
- init_generic_pipeline,
28
- )
29
-
30
-
31
- class _StoreBoolActionType(argparse.Action):
32
- """
33
- Enables the storage of a boolean const and custom type definition needed
34
- for systematic html interface generation. To get the _StoreTrueAction
35
- output use default=False in the add_argument function
36
- and default=True to get _StoreFalseAction output.
37
- """
38
-
39
- def __init__(self, option_strings, dest, type, default, required=False, help=None):
40
- super(_StoreBoolActionType, self).__init__(
41
- option_strings=option_strings,
42
- dest=dest,
43
- nargs=0,
44
- const=not default,
45
- default=default,
46
- type=type,
47
- required=required,
48
- help=help,
49
- )
50
-
51
- def __call__(self, parser, namespace, values, option_string=None):
52
- setattr(namespace, self.dest, self.const)
53
-
54
-
55
- def build_parser():
56
- """
57
- Building argument parser.
58
-
59
- :return argparse.ArgumentParser
60
- """
61
- # Main looper program help text messages
62
- banner = "%(prog)s - A project job submission engine and project manager."
63
- additional_description = (
64
- "For subcommand-specific options, " "type: '%(prog)s <subcommand> -h'"
65
- )
66
- additional_description += "\nhttps://github.com/pepkit/looper"
67
-
68
- parser = VersionInHelpParser(
69
- prog="looper",
70
- description=banner,
71
- epilog=additional_description,
72
- version=__version__,
73
- )
74
-
75
- aux_parser = VersionInHelpParser(
76
- prog="looper",
77
- description=banner,
78
- epilog=additional_description,
79
- version=__version__,
80
- )
81
- result = []
82
- for parser in [parser, aux_parser]:
83
- # Logging control
84
- parser.add_argument(
85
- "--logfile",
86
- help="Optional output file for looper logs " "(default: %(default)s)",
87
- )
88
- parser.add_argument("--logging-level", help=argparse.SUPPRESS)
89
- parser.add_argument(
90
- "--dbg",
91
- action="store_true",
92
- help="Turn on debug mode (default: %(default)s)",
93
- )
94
-
95
- parser = logmuse.add_logging_options(parser)
96
- subparsers = parser.add_subparsers(dest="command")
97
-
98
- def add_subparser(cmd):
99
- message = MESSAGE_BY_SUBCOMMAND[cmd]
100
- return subparsers.add_parser(
101
- cmd,
102
- description=message,
103
- help=message,
104
- formatter_class=lambda prog: argparse.HelpFormatter(
105
- prog, max_help_position=37, width=90
106
- ),
107
- )
108
-
109
- # Run and rerun command
110
- run_subparser = add_subparser("run")
111
- rerun_subparser = add_subparser("rerun")
112
- collate_subparser = add_subparser("runp")
113
- table_subparser = add_subparser("table")
114
- report_subparser = add_subparser("report")
115
- destroy_subparser = add_subparser("destroy")
116
- check_subparser = add_subparser("check")
117
- clean_subparser = add_subparser("clean")
118
- inspect_subparser = add_subparser("inspect")
119
- init_subparser = add_subparser("init")
120
- init_piface = add_subparser("init-piface")
121
- link_subparser = add_subparser("link")
122
-
123
- # Flag arguments
124
- ####################################################################
125
- for subparser in [run_subparser, rerun_subparser, collate_subparser]:
126
- subparser.add_argument(
127
- "-i",
128
- "--ignore-flags",
129
- default=False,
130
- action=_StoreBoolActionType,
131
- type=html_checkbox(checked=False),
132
- help="Ignore run status flags? Default=False",
133
- )
134
-
135
- for subparser in [
136
- run_subparser,
137
- rerun_subparser,
138
- destroy_subparser,
139
- clean_subparser,
140
- collate_subparser,
141
- ]:
142
- subparser.add_argument(
143
- "-d",
144
- "--dry-run",
145
- action=_StoreBoolActionType,
146
- default=False,
147
- type=html_checkbox(checked=False),
148
- help="Don't actually submit the jobs. Default=False",
149
- )
150
-
151
- # Parameter arguments
152
- ####################################################################
153
- for subparser in [run_subparser, rerun_subparser, collate_subparser]:
154
- subparser.add_argument(
155
- "-t",
156
- "--time-delay",
157
- metavar="S",
158
- type=html_range(min_val=0, max_val=30, value=0),
159
- default=0,
160
- help="Time delay in seconds between job submissions",
161
- )
162
-
163
- subparser.add_argument(
164
- "-x",
165
- "--command-extra",
166
- default="",
167
- metavar="S",
168
- help="String to append to every command",
169
- )
170
- subparser.add_argument(
171
- "-y",
172
- "--command-extra-override",
173
- metavar="S",
174
- default="",
175
- help="Same as command-extra, but overrides values in PEP",
176
- )
177
- subparser.add_argument(
178
- "-f",
179
- "--skip-file-checks",
180
- action=_StoreBoolActionType,
181
- default=False,
182
- type=html_checkbox(checked=False),
183
- help="Do not perform input file checks",
184
- )
185
-
186
- divvy_group = subparser.add_argument_group(
187
- "divvy arguments", "Configure divvy to change computing settings"
188
- )
189
- divvy_group.add_argument(
190
- "--divvy",
191
- default=None,
192
- metavar="DIVCFG",
193
- help="Path to divvy configuration file. Default=$DIVCFG env "
194
- "variable. Currently: {}".format(
195
- os.getenv("DIVCFG", None) or "not set"
196
- ),
197
- )
198
- divvy_group.add_argument(
199
- "-p",
200
- "--package",
201
- metavar="P",
202
- help="Name of computing resource package to use",
203
- )
204
- divvy_group.add_argument(
205
- "-s",
206
- "--settings",
207
- default="",
208
- metavar="S",
209
- help="Path to a YAML settings file with compute settings",
210
- )
211
- divvy_group.add_argument(
212
- "-c",
213
- "--compute",
214
- metavar="K",
215
- nargs="+",
216
- help="List of key-value pairs (k1=v1)",
217
- )
218
-
219
- for subparser in [run_subparser, rerun_subparser]:
220
- subparser.add_argument(
221
- "-u",
222
- "--lump",
223
- default=None,
224
- metavar="X",
225
- type=html_range(min_val=0, max_val=100, step=0.1, value=0),
226
- help="Total input file size (GB) to batch into one job",
227
- )
228
- subparser.add_argument(
229
- "-n",
230
- "--lumpn",
231
- default=None,
232
- metavar="N",
233
- type=html_range(min_val=1, max_val="num_samples", value=1),
234
- help="Number of commands to batch into one job",
235
- )
236
-
237
- check_subparser.add_argument(
238
- "--describe-codes",
239
- help="Show status codes description",
240
- action="store_true",
241
- default=False,
242
- )
243
-
244
- check_subparser.add_argument(
245
- "--itemized",
246
- help="Show a detailed, by sample statuses",
247
- action="store_true",
248
- default=False,
249
- )
250
-
251
- check_subparser.add_argument(
252
- "-f",
253
- "--flags",
254
- nargs="*",
255
- default=FLAGS,
256
- type=html_select(choices=FLAGS),
257
- metavar="F",
258
- help="Check on only these flags/status values",
259
- )
260
-
261
- for subparser in [destroy_subparser, clean_subparser]:
262
- subparser.add_argument(
263
- "--force-yes",
264
- action=_StoreBoolActionType,
265
- default=False,
266
- type=html_checkbox(checked=False),
267
- help="Provide upfront confirmation of destruction intent, "
268
- "to skip console query. Default=False",
269
- )
270
-
271
- init_subparser.add_argument(
272
- "pep_config", help="Project configuration file (PEP)"
273
- )
274
-
275
- init_subparser.add_argument(
276
- "-f", "--force", help="Force overwrite", action="store_true", default=False
277
- )
278
-
279
- init_subparser.add_argument(
280
- "-o",
281
- "--output-dir",
282
- dest="output_dir",
283
- metavar="DIR",
284
- default=None,
285
- type=str,
286
- )
287
-
288
- init_subparser.add_argument(
289
- "-S",
290
- "--sample-pipeline-interfaces",
291
- dest=SAMPLE_PL_ARG,
292
- metavar="YAML",
293
- default=None,
294
- nargs="+",
295
- type=str,
296
- help="Path to looper sample config file",
297
- )
298
- init_subparser.add_argument(
299
- "-P",
300
- "--project-pipeline-interfaces",
301
- dest=PROJECT_PL_ARG,
302
- metavar="YAML",
303
- default=None,
304
- nargs="+",
305
- type=str,
306
- help="Path to looper project config file",
307
- )
308
-
309
- # TODO: add ouput dir, sample, project pifaces
310
-
311
- init_subparser.add_argument(
312
- "-p",
313
- "--piface",
314
- help="Generates generic pipeline interface",
315
- action="store_true",
316
- default=False,
317
- )
318
-
319
- # Common arguments
320
- for subparser in [
321
- run_subparser,
322
- rerun_subparser,
323
- table_subparser,
324
- report_subparser,
325
- destroy_subparser,
326
- check_subparser,
327
- clean_subparser,
328
- collate_subparser,
329
- inspect_subparser,
330
- link_subparser,
331
- ]:
332
- subparser.add_argument(
333
- "config_file",
334
- nargs="?",
335
- default=None,
336
- help="Project configuration file (YAML) or pephub registry path.",
337
- )
338
- subparser.add_argument(
339
- "--looper-config",
340
- required=False,
341
- default=None,
342
- type=str,
343
- help="Looper configuration file (YAML)",
344
- )
345
- # help="Path to the looper config file"
346
- subparser.add_argument(
347
- "-S",
348
- "--sample-pipeline-interfaces",
349
- dest=SAMPLE_PL_ARG,
350
- metavar="YAML",
351
- default=None,
352
- nargs="+",
353
- type=str,
354
- help="Path to looper sample config file",
355
- )
356
- subparser.add_argument(
357
- "-P",
358
- "--project-pipeline-interfaces",
359
- dest=PROJECT_PL_ARG,
360
- metavar="YAML",
361
- default=None,
362
- nargs="+",
363
- type=str,
364
- help="Path to looper project config file",
365
- )
366
- # help="Path to the output directory"
367
- subparser.add_argument(
368
- "-o",
369
- "--output-dir",
370
- dest="output_dir",
371
- metavar="DIR",
372
- default=None,
373
- type=str,
374
- help=argparse.SUPPRESS,
375
- )
376
- # "Submission subdirectory name"
377
- subparser.add_argument(
378
- "--submission-subdir", metavar="DIR", help=argparse.SUPPRESS
379
- )
380
- # "Results subdirectory name"
381
- subparser.add_argument(
382
- "--results-subdir", metavar="DIR", help=argparse.SUPPRESS
383
- )
384
- # "Sample attribute for pipeline interface sources"
385
- subparser.add_argument(
386
- "--pipeline-interfaces-key", metavar="K", help=argparse.SUPPRESS
387
- )
388
- # "Paths to pipeline interface files"
389
- subparser.add_argument(
390
- "--pipeline-interfaces",
391
- metavar="P",
392
- nargs="+",
393
- action="append",
394
- help=argparse.SUPPRESS,
395
- )
396
-
397
- for subparser in [
398
- run_subparser,
399
- rerun_subparser,
400
- table_subparser,
401
- report_subparser,
402
- destroy_subparser,
403
- check_subparser,
404
- clean_subparser,
405
- collate_subparser,
406
- inspect_subparser,
407
- link_subparser,
408
- ]:
409
- fetch_samples_group = subparser.add_argument_group(
410
- "sample selection arguments",
411
- "Specify samples to include or exclude based on sample attribute values",
412
- )
413
- fetch_samples_group.add_argument(
414
- "-l",
415
- "--limit",
416
- default=None,
417
- metavar="N",
418
- type=html_range(min_val=1, max_val="num_samples", value="num_samples"),
419
- help="Limit to n samples",
420
- )
421
- fetch_samples_group.add_argument(
422
- "-k",
423
- "--skip",
424
- default=None,
425
- metavar="N",
426
- type=html_range(min_val=1, max_val="num_samples", value="num_samples"),
427
- help="Skip samples by numerical index",
428
- )
429
-
430
- fetch_samples_group.add_argument(
431
- f"--{SAMPLE_SELECTION_ATTRIBUTE_OPTNAME}",
432
- default="toggle",
433
- metavar="ATTR",
434
- help="Attribute for sample exclusion OR inclusion",
435
- )
436
-
437
- protocols = fetch_samples_group.add_mutually_exclusive_group()
438
- protocols.add_argument(
439
- f"--{SAMPLE_EXCLUSION_OPTNAME}",
440
- nargs="*",
441
- metavar="E",
442
- help="Exclude samples with these values",
443
- )
444
- protocols.add_argument(
445
- f"--{SAMPLE_INCLUSION_OPTNAME}",
446
- nargs="*",
447
- metavar="I",
448
- help="Include only samples with these values",
449
- )
450
- fetch_samples_group.add_argument(
451
- f"--{SAMPLE_SELECTION_FLAG_OPTNAME}",
452
- default=None,
453
- nargs="*",
454
- metavar="SELFLAG",
455
- help="Include samples with this flag status, e.g. completed",
456
- )
457
-
458
- fetch_samples_group.add_argument(
459
- f"--{SAMPLE_EXCLUSION_FLAG_OPTNAME}",
460
- default=None,
461
- nargs="*",
462
- metavar="EXCFLAG",
463
- help="Exclude samples with this flag status, e.g. completed",
464
- )
465
-
466
- subparser.add_argument(
467
- "-a",
468
- "--amend",
469
- nargs="+",
470
- metavar="A",
471
- help="List of amendments to activate",
472
- )
473
- for subparser in [
474
- report_subparser,
475
- table_subparser,
476
- check_subparser,
477
- destroy_subparser,
478
- link_subparser,
479
- ]:
480
- subparser.add_argument(
481
- "--project",
482
- help="Process project-level pipelines",
483
- action="store_true",
484
- default=False,
485
- )
486
- inspect_subparser.add_argument(
487
- "--sample-names",
488
- help="Names of the samples to inspect",
489
- nargs="*",
490
- default=None,
491
- )
492
-
493
- inspect_subparser.add_argument(
494
- "--attr-limit",
495
- help="Number of attributes to display",
496
- type=int,
497
- )
498
- parser.add_argument(
499
- "--commands",
500
- action="version",
501
- version="{}".format(" ".join(subparsers.choices.keys())),
502
- )
503
-
504
- report_subparser.add_argument(
505
- "--portable",
506
- help="Makes html report portable.",
507
- action="store_true",
508
- )
509
-
510
- result.append(parser)
511
- return result
512
-
513
-
514
- def opt_attr_pair(name: str) -> Tuple[str, str]:
515
- return f"--{name}", name.replace("-", "_")
516
-
517
-
518
- def validate_post_parse(args: argparse.Namespace) -> List[str]:
519
- problems = []
520
- used_exclusives = [
521
- opt
522
- for opt, attr in map(
523
- opt_attr_pair,
524
- [
525
- "skip",
526
- "limit",
527
- SAMPLE_EXCLUSION_OPTNAME,
528
- SAMPLE_INCLUSION_OPTNAME,
529
- ],
530
- )
531
- if getattr(args, attr, None)
532
- ]
533
- if len(used_exclusives) > 1:
534
- problems.append(
535
- f"Used multiple mutually exclusive options: {', '.join(used_exclusives)}"
536
- )
537
- return problems
538
-
539
-
540
- def _proc_resources_spec(args):
541
- """
542
- Process CLI-sources compute setting specification. There are two sources
543
- of compute settings in the CLI alone:
544
- * YAML file (--settings argument)
545
- * itemized compute settings (--compute argument)
546
-
547
- The itemized compute specification is given priority
548
-
549
- :param argparse.Namespace: arguments namespace
550
- :return Mapping[str, str]: binding between resource setting name and value
551
- :raise ValueError: if interpretation of the given specification as encoding
552
- of key-value pairs fails
553
- """
554
- spec = getattr(args, "compute", None)
555
- try:
556
- settings_data = read_yaml_file(args.settings) or {}
557
- except yaml.YAMLError:
558
- _LOGGER.warning(
559
- "Settings file ({}) does not follow YAML format,"
560
- " disregarding".format(args.settings)
561
- )
562
- settings_data = {}
563
- if not spec:
564
- return settings_data
565
- pairs = [(kv, kv.split("=")) for kv in spec]
566
- bads = []
567
- for orig, pair in pairs:
568
- try:
569
- k, v = pair
570
- except ValueError:
571
- bads.append(orig)
572
- else:
573
- settings_data[k] = v
574
- if bads:
575
- raise ValueError(
576
- "Could not correctly parse itemized compute specification. "
577
- "Correct format: " + EXAMPLE_COMPUTE_SPEC_FMT
578
- )
579
- return settings_data
580
-
581
-
582
- def main(test_args=None):
583
- """Primary workflow"""
584
- global _LOGGER
585
-
586
- parser, aux_parser = build_parser()
587
- aux_parser.suppress_defaults()
588
-
589
- if test_args:
590
- args, remaining_args = parser.parse_known_args(args=test_args)
591
- else:
592
- args, remaining_args = parser.parse_known_args()
593
-
594
- cli_use_errors = validate_post_parse(args)
595
- if cli_use_errors:
596
- parser.print_help(sys.stderr)
597
- parser.error(
598
- f"{len(cli_use_errors)} CLI use problem(s): {', '.join(cli_use_errors)}"
599
- )
600
- if args.command is None:
601
- parser.print_help(sys.stderr)
602
- sys.exit(1)
603
-
604
- if args.command == "init":
605
- return int(
606
- not initiate_looper_config(
607
- dotfile_path(),
608
- args.pep_config,
609
- args.output_dir,
610
- args.sample_pipeline_interfaces,
611
- args.project_pipeline_interfaces,
612
- args.force,
613
- )
614
- )
615
-
616
- if args.command == "init-piface":
617
- sys.exit(int(not init_generic_pipeline()))
618
-
619
- _LOGGER = logmuse.logger_via_cli(args, make_root=True)
620
- _LOGGER.info("Looper version: {}\nCommand: {}".format(__version__, args.command))
621
-
622
- if "config_file" in vars(args):
623
- if args.config_file is None:
624
- looper_cfg_path = os.path.relpath(dotfile_path(), start=os.curdir)
625
- try:
626
- if args.looper_config:
627
- looper_config_dict = read_looper_config_file(args.looper_config)
628
- else:
629
- looper_config_dict = read_looper_dotfile()
630
- _LOGGER.info(f"Using looper config ({looper_cfg_path}).")
631
-
632
- for looper_config_key, looper_config_item in looper_config_dict.items():
633
- setattr(args, looper_config_key, looper_config_item)
634
-
635
- except OSError:
636
- parser.print_help(sys.stderr)
637
- _LOGGER.warning(
638
- f"Looper config file does not exist. Use looper init to create one at {looper_cfg_path}."
639
- )
640
- sys.exit(1)
641
- else:
642
- _LOGGER.warning(
643
- "This PEP configures looper through the project config. This approach is deprecated and will "
644
- "be removed in future versions. Please use a looper config file. For more information see "
645
- "looper.databio.org/en/latest/looper-config"
646
- )
647
-
648
- args = enrich_args_via_cfg(args, aux_parser, test_args)
649
-
650
- # If project pipeline interface defined in the cli, change name to: "pipeline_interface"
651
- if vars(args)[PROJECT_PL_ARG]:
652
- args.pipeline_interfaces = vars(args)[PROJECT_PL_ARG]
653
-
654
- if len(remaining_args) > 0:
655
- _LOGGER.warning(
656
- "Unrecognized arguments: {}".format(
657
- " ".join([str(x) for x in remaining_args])
658
- )
659
- )
660
-
661
- divcfg = (
662
- select_divvy_config(filepath=args.divvy) if hasattr(args, "divvy") else None
663
- )
664
-
665
- # Ignore flags if user is selecting or excluding on flags:
666
- if args.sel_flag or args.exc_flag:
667
- args.ignore_flags = True
668
-
669
- # Initialize project
670
- if is_registry_path(args.config_file):
671
- if vars(args)[SAMPLE_PL_ARG]:
672
- p = Project(
673
- amendments=args.amend,
674
- divcfg_path=divcfg,
675
- runp=args.command == "runp",
676
- project_dict=PEPHubClient()._load_raw_pep(
677
- registry_path=args.config_file
678
- ),
679
- **{
680
- attr: getattr(args, attr) for attr in CLI_PROJ_ATTRS if attr in args
681
- },
682
- )
683
- else:
684
- raise MisconfigurationException(
685
- f"`sample_pipeline_interface` is missing. Provide it in the parameters."
686
- )
687
- else:
688
- try:
689
- p = Project(
690
- cfg=args.config_file,
691
- amendments=args.amend,
692
- divcfg_path=divcfg,
693
- runp=args.command == "runp",
694
- **{
695
- attr: getattr(args, attr) for attr in CLI_PROJ_ATTRS if attr in args
696
- },
697
- )
698
- except yaml.parser.ParserError as e:
699
- _LOGGER.error(f"Project config parse failed -- {e}")
700
- sys.exit(1)
701
-
702
- selected_compute_pkg = p.selected_compute_package or DEFAULT_COMPUTE_RESOURCES_NAME
703
- if p.dcc is not None and not p.dcc.activate_package(selected_compute_pkg):
704
- _LOGGER.info(
705
- "Failed to activate '{}' computing package. "
706
- "Using the default one".format(selected_compute_pkg)
707
- )
708
-
709
- with ProjectContext(
710
- prj=p,
711
- selector_attribute=args.sel_attr,
712
- selector_include=args.sel_incl,
713
- selector_exclude=args.sel_excl,
714
- selector_flag=args.sel_flag,
715
- exclusion_flag=args.exc_flag,
716
- ) as prj:
717
- if args.command in ["run", "rerun"]:
718
- run = Runner(prj)
719
- try:
720
- compute_kwargs = _proc_resources_spec(args)
721
- return run(args, rerun=(args.command == "rerun"), **compute_kwargs)
722
- except SampleFailedException:
723
- sys.exit(1)
724
- except IOError:
725
- _LOGGER.error(
726
- "{} pipeline_interfaces: '{}'".format(
727
- prj.__class__.__name__, prj.pipeline_interface_sources
728
- )
729
- )
730
- raise
731
-
732
- if args.command == "runp":
733
- compute_kwargs = _proc_resources_spec(args)
734
- collate = Collator(prj)
735
- collate(args, **compute_kwargs)
736
- return collate.debug
737
-
738
- if args.command == "destroy":
739
- return Destroyer(prj)(args)
740
-
741
- # pipestat support introduces breaking changes and pipelines run
742
- # with no pipestat reporting would not be compatible with
743
- # commands: table, report and check. Therefore we plan maintain
744
- # the old implementations for a couple of releases.
745
- # if hasattr(args, "project"):
746
- # use_pipestat = (
747
- # prj.pipestat_configured_project
748
- # if args.project
749
- # else prj.pipestat_configured
750
- # )
751
- use_pipestat = (
752
- prj.pipestat_configured_project if args.project else prj.pipestat_configured
753
- )
754
- if args.command == "table":
755
- if use_pipestat:
756
- Tabulator(prj)(args)
757
- else:
758
- raise PipestatConfigurationException("table")
759
-
760
- if args.command == "report":
761
- if use_pipestat:
762
- Reporter(prj)(args)
763
- else:
764
- raise PipestatConfigurationException("report")
765
-
766
- if args.command == "link":
767
- if use_pipestat:
768
- Linker(prj)(args)
769
- else:
770
- raise PipestatConfigurationException("link")
771
-
772
- if args.command == "check":
773
- if use_pipestat:
774
- return Checker(prj)(args)
775
- else:
776
- raise PipestatConfigurationException("check")
777
-
778
- if args.command == "clean":
779
- return Cleaner(prj)(args)
780
-
781
- if args.command == "inspect":
782
- inspect_project(p, args.sample_names, args.attr_limit)
783
- from warnings import warn
784
-
785
- warn(
786
- "The inspect feature has moved to eido and will be removed in the future release of looper. "
787
- "Use `eido inspect` from now on.",
788
- )