looper 1.7.0__py3-none-any.whl → 2.0.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
looper/cli_looper.py DELETED
@@ -1,796 +0,0 @@
1
- import argparse
2
- import logmuse
3
- import os
4
- import sys
5
- import yaml
6
-
7
- from eido import inspect_project
8
- from pephubclient import PEPHubClient
9
- from typing import Tuple, List
10
- from ubiquerg import VersionInHelpParser
11
-
12
- from . import __version__
13
- from .const import *
14
- from .divvy import DEFAULT_COMPUTE_RESOURCES_NAME, select_divvy_config
15
- from .exceptions import *
16
- from .looper import *
17
- from .parser_types import *
18
- from .project import Project, ProjectContext
19
- from .utils import (
20
- dotfile_path,
21
- enrich_args_via_cfg,
22
- is_registry_path,
23
- read_looper_dotfile,
24
- read_looper_config_file,
25
- read_yaml_file,
26
- initiate_looper_config,
27
- init_generic_pipeline,
28
- )
29
-
30
-
31
- class _StoreBoolActionType(argparse.Action):
32
- """
33
- Enables the storage of a boolean const and custom type definition needed
34
- for systematic html interface generation. To get the _StoreTrueAction
35
- output use default=False in the add_argument function
36
- and default=True to get _StoreFalseAction output.
37
- """
38
-
39
- def __init__(self, option_strings, dest, type, default, required=False, help=None):
40
- super(_StoreBoolActionType, self).__init__(
41
- option_strings=option_strings,
42
- dest=dest,
43
- nargs=0,
44
- const=not default,
45
- default=default,
46
- type=type,
47
- required=required,
48
- help=help,
49
- )
50
-
51
- def __call__(self, parser, namespace, values, option_string=None):
52
- setattr(namespace, self.dest, self.const)
53
-
54
-
55
- def build_parser():
56
- """
57
- Building argument parser.
58
-
59
- :return argparse.ArgumentParser
60
- """
61
- # Main looper program help text messages
62
- banner = "%(prog)s - A project job submission engine and project manager."
63
- additional_description = (
64
- "For subcommand-specific options, " "type: '%(prog)s <subcommand> -h'"
65
- )
66
- additional_description += "\nhttps://github.com/pepkit/looper"
67
-
68
- parser = VersionInHelpParser(
69
- prog="looper",
70
- description=banner,
71
- epilog=additional_description,
72
- version=__version__,
73
- )
74
-
75
- aux_parser = VersionInHelpParser(
76
- prog="looper",
77
- description=banner,
78
- epilog=additional_description,
79
- version=__version__,
80
- )
81
- result = []
82
- for parser in [parser, aux_parser]:
83
- # Logging control
84
- parser.add_argument(
85
- "--logfile",
86
- help="Optional output file for looper logs " "(default: %(default)s)",
87
- )
88
- parser.add_argument("--logging-level", help=argparse.SUPPRESS)
89
- parser.add_argument(
90
- "--dbg",
91
- action="store_true",
92
- help="Turn on debug mode (default: %(default)s)",
93
- )
94
-
95
- parser = logmuse.add_logging_options(parser)
96
- subparsers = parser.add_subparsers(dest="command")
97
-
98
- def add_subparser(cmd):
99
- message = MESSAGE_BY_SUBCOMMAND[cmd]
100
- return subparsers.add_parser(
101
- cmd,
102
- description=message,
103
- help=message,
104
- formatter_class=lambda prog: argparse.HelpFormatter(
105
- prog, max_help_position=37, width=90
106
- ),
107
- )
108
-
109
- # Run and rerun command
110
- run_subparser = add_subparser("run")
111
- rerun_subparser = add_subparser("rerun")
112
- collate_subparser = add_subparser("runp")
113
- table_subparser = add_subparser("table")
114
- report_subparser = add_subparser("report")
115
- destroy_subparser = add_subparser("destroy")
116
- check_subparser = add_subparser("check")
117
- clean_subparser = add_subparser("clean")
118
- inspect_subparser = add_subparser("inspect")
119
- init_subparser = add_subparser("init")
120
- init_piface = add_subparser("init-piface")
121
- link_subparser = add_subparser("link")
122
-
123
- # Flag arguments
124
- ####################################################################
125
- for subparser in [run_subparser, rerun_subparser, collate_subparser]:
126
- subparser.add_argument(
127
- "-i",
128
- "--ignore-flags",
129
- default=False,
130
- action=_StoreBoolActionType,
131
- type=html_checkbox(checked=False),
132
- help="Ignore run status flags? Default=False",
133
- )
134
-
135
- for subparser in [
136
- run_subparser,
137
- rerun_subparser,
138
- destroy_subparser,
139
- clean_subparser,
140
- collate_subparser,
141
- ]:
142
- subparser.add_argument(
143
- "-d",
144
- "--dry-run",
145
- action=_StoreBoolActionType,
146
- default=False,
147
- type=html_checkbox(checked=False),
148
- help="Don't actually submit the jobs. Default=False",
149
- )
150
-
151
- # Parameter arguments
152
- ####################################################################
153
- for subparser in [run_subparser, rerun_subparser, collate_subparser]:
154
- subparser.add_argument(
155
- "-t",
156
- "--time-delay",
157
- metavar="S",
158
- type=html_range(min_val=0, max_val=30, value=0),
159
- default=0,
160
- help="Time delay in seconds between job submissions",
161
- )
162
-
163
- subparser.add_argument(
164
- "-x",
165
- "--command-extra",
166
- default="",
167
- metavar="S",
168
- help="String to append to every command",
169
- )
170
- subparser.add_argument(
171
- "-y",
172
- "--command-extra-override",
173
- metavar="S",
174
- default="",
175
- help="Same as command-extra, but overrides values in PEP",
176
- )
177
- subparser.add_argument(
178
- "-f",
179
- "--skip-file-checks",
180
- action=_StoreBoolActionType,
181
- default=False,
182
- type=html_checkbox(checked=False),
183
- help="Do not perform input file checks",
184
- )
185
-
186
- divvy_group = subparser.add_argument_group(
187
- "divvy arguments", "Configure divvy to change computing settings"
188
- )
189
- divvy_group.add_argument(
190
- "--divvy",
191
- default=None,
192
- metavar="DIVCFG",
193
- help="Path to divvy configuration file. Default=$DIVCFG env "
194
- "variable. Currently: {}".format(
195
- os.getenv("DIVCFG", None) or "not set"
196
- ),
197
- )
198
- divvy_group.add_argument(
199
- "-p",
200
- "--package",
201
- metavar="P",
202
- help="Name of computing resource package to use",
203
- )
204
- divvy_group.add_argument(
205
- "-s",
206
- "--settings",
207
- default="",
208
- metavar="S",
209
- help="Path to a YAML settings file with compute settings",
210
- )
211
- divvy_group.add_argument(
212
- "-c",
213
- "--compute",
214
- metavar="K",
215
- nargs="+",
216
- help="List of key-value pairs (k1=v1)",
217
- )
218
-
219
- for subparser in [run_subparser, rerun_subparser]:
220
- subparser.add_argument(
221
- "-u",
222
- "--lump-s",
223
- default=None,
224
- metavar="X",
225
- type=html_range(min_val=0, max_val=100, step=0.1, value=0),
226
- help="Lump by size: total input file size (GB) to batch into one job",
227
- )
228
- subparser.add_argument(
229
- "-n",
230
- "--lump-n",
231
- default=None,
232
- metavar="N",
233
- type=html_range(min_val=1, max_val="num_samples", value=1),
234
- help="Lump by number: number of samples to batch into one job",
235
- )
236
- subparser.add_argument(
237
- "-j",
238
- "--lump-j",
239
- default=None,
240
- metavar="J",
241
- type=int,
242
- help="Lump samples into number of jobs.",
243
- )
244
-
245
- check_subparser.add_argument(
246
- "--describe-codes",
247
- help="Show status codes description",
248
- action="store_true",
249
- default=False,
250
- )
251
-
252
- check_subparser.add_argument(
253
- "--itemized",
254
- help="Show a detailed, by sample statuses",
255
- action="store_true",
256
- default=False,
257
- )
258
-
259
- check_subparser.add_argument(
260
- "-f",
261
- "--flags",
262
- nargs="*",
263
- default=FLAGS,
264
- type=html_select(choices=FLAGS),
265
- metavar="F",
266
- help="Check on only these flags/status values",
267
- )
268
-
269
- for subparser in [destroy_subparser, clean_subparser]:
270
- subparser.add_argument(
271
- "--force-yes",
272
- action=_StoreBoolActionType,
273
- default=False,
274
- type=html_checkbox(checked=False),
275
- help="Provide upfront confirmation of destruction intent, "
276
- "to skip console query. Default=False",
277
- )
278
-
279
- init_subparser.add_argument(
280
- "pep_config", help="Project configuration file (PEP)"
281
- )
282
-
283
- init_subparser.add_argument(
284
- "-f", "--force", help="Force overwrite", action="store_true", default=False
285
- )
286
-
287
- init_subparser.add_argument(
288
- "-o",
289
- "--output-dir",
290
- dest="output_dir",
291
- metavar="DIR",
292
- default=None,
293
- type=str,
294
- )
295
-
296
- init_subparser.add_argument(
297
- "-S",
298
- "--sample-pipeline-interfaces",
299
- dest=SAMPLE_PL_ARG,
300
- metavar="YAML",
301
- default=None,
302
- nargs="+",
303
- type=str,
304
- help="Path to looper sample config file",
305
- )
306
- init_subparser.add_argument(
307
- "-P",
308
- "--project-pipeline-interfaces",
309
- dest=PROJECT_PL_ARG,
310
- metavar="YAML",
311
- default=None,
312
- nargs="+",
313
- type=str,
314
- help="Path to looper project config file",
315
- )
316
-
317
- # TODO: add ouput dir, sample, project pifaces
318
-
319
- init_subparser.add_argument(
320
- "-p",
321
- "--piface",
322
- help="Generates generic pipeline interface",
323
- action="store_true",
324
- default=False,
325
- )
326
-
327
- # Common arguments
328
- for subparser in [
329
- run_subparser,
330
- rerun_subparser,
331
- table_subparser,
332
- report_subparser,
333
- destroy_subparser,
334
- check_subparser,
335
- clean_subparser,
336
- collate_subparser,
337
- inspect_subparser,
338
- link_subparser,
339
- ]:
340
- subparser.add_argument(
341
- "config_file",
342
- nargs="?",
343
- default=None,
344
- help="Project configuration file (YAML) or pephub registry path.",
345
- )
346
- subparser.add_argument(
347
- "--looper-config",
348
- required=False,
349
- default=None,
350
- type=str,
351
- help="Looper configuration file (YAML)",
352
- )
353
- # help="Path to the looper config file"
354
- subparser.add_argument(
355
- "-S",
356
- "--sample-pipeline-interfaces",
357
- dest=SAMPLE_PL_ARG,
358
- metavar="YAML",
359
- default=None,
360
- nargs="+",
361
- type=str,
362
- help="Path to looper sample config file",
363
- )
364
- subparser.add_argument(
365
- "-P",
366
- "--project-pipeline-interfaces",
367
- dest=PROJECT_PL_ARG,
368
- metavar="YAML",
369
- default=None,
370
- nargs="+",
371
- type=str,
372
- help="Path to looper project config file",
373
- )
374
- # help="Path to the output directory"
375
- subparser.add_argument(
376
- "-o",
377
- "--output-dir",
378
- dest="output_dir",
379
- metavar="DIR",
380
- default=None,
381
- type=str,
382
- help=argparse.SUPPRESS,
383
- )
384
- # "Submission subdirectory name"
385
- subparser.add_argument(
386
- "--submission-subdir", metavar="DIR", help=argparse.SUPPRESS
387
- )
388
- # "Results subdirectory name"
389
- subparser.add_argument(
390
- "--results-subdir", metavar="DIR", help=argparse.SUPPRESS
391
- )
392
- # "Sample attribute for pipeline interface sources"
393
- subparser.add_argument(
394
- "--pipeline-interfaces-key", metavar="K", help=argparse.SUPPRESS
395
- )
396
- # "Paths to pipeline interface files"
397
- subparser.add_argument(
398
- "--pipeline-interfaces",
399
- metavar="P",
400
- nargs="+",
401
- action="append",
402
- help=argparse.SUPPRESS,
403
- )
404
-
405
- for subparser in [
406
- run_subparser,
407
- rerun_subparser,
408
- table_subparser,
409
- report_subparser,
410
- destroy_subparser,
411
- check_subparser,
412
- clean_subparser,
413
- collate_subparser,
414
- inspect_subparser,
415
- link_subparser,
416
- ]:
417
- fetch_samples_group = subparser.add_argument_group(
418
- "sample selection arguments",
419
- "Specify samples to include or exclude based on sample attribute values",
420
- )
421
- fetch_samples_group.add_argument(
422
- "-l",
423
- "--limit",
424
- default=None,
425
- metavar="N",
426
- type=html_range(min_val=1, max_val="num_samples", value="num_samples"),
427
- help="Limit to n samples",
428
- )
429
- fetch_samples_group.add_argument(
430
- "-k",
431
- "--skip",
432
- default=None,
433
- metavar="N",
434
- type=html_range(min_val=1, max_val="num_samples", value="num_samples"),
435
- help="Skip samples by numerical index",
436
- )
437
-
438
- fetch_samples_group.add_argument(
439
- f"--{SAMPLE_SELECTION_ATTRIBUTE_OPTNAME}",
440
- default="toggle",
441
- metavar="ATTR",
442
- help="Attribute for sample exclusion OR inclusion",
443
- )
444
-
445
- protocols = fetch_samples_group.add_mutually_exclusive_group()
446
- protocols.add_argument(
447
- f"--{SAMPLE_EXCLUSION_OPTNAME}",
448
- nargs="*",
449
- metavar="E",
450
- help="Exclude samples with these values",
451
- )
452
- protocols.add_argument(
453
- f"--{SAMPLE_INCLUSION_OPTNAME}",
454
- nargs="*",
455
- metavar="I",
456
- help="Include only samples with these values",
457
- )
458
- fetch_samples_group.add_argument(
459
- f"--{SAMPLE_SELECTION_FLAG_OPTNAME}",
460
- default=None,
461
- nargs="*",
462
- metavar="SELFLAG",
463
- help="Include samples with this flag status, e.g. completed",
464
- )
465
-
466
- fetch_samples_group.add_argument(
467
- f"--{SAMPLE_EXCLUSION_FLAG_OPTNAME}",
468
- default=None,
469
- nargs="*",
470
- metavar="EXCFLAG",
471
- help="Exclude samples with this flag status, e.g. completed",
472
- )
473
-
474
- subparser.add_argument(
475
- "-a",
476
- "--amend",
477
- nargs="+",
478
- metavar="A",
479
- help="List of amendments to activate",
480
- )
481
- for subparser in [
482
- report_subparser,
483
- table_subparser,
484
- check_subparser,
485
- destroy_subparser,
486
- link_subparser,
487
- ]:
488
- subparser.add_argument(
489
- "--project",
490
- help="Process project-level pipelines",
491
- action="store_true",
492
- default=False,
493
- )
494
- inspect_subparser.add_argument(
495
- "--sample-names",
496
- help="Names of the samples to inspect",
497
- nargs="*",
498
- default=None,
499
- )
500
-
501
- inspect_subparser.add_argument(
502
- "--attr-limit",
503
- help="Number of attributes to display",
504
- type=int,
505
- )
506
- parser.add_argument(
507
- "--commands",
508
- action="version",
509
- version="{}".format(" ".join(subparsers.choices.keys())),
510
- )
511
-
512
- report_subparser.add_argument(
513
- "--portable",
514
- help="Makes html report portable.",
515
- action="store_true",
516
- )
517
-
518
- result.append(parser)
519
- return result
520
-
521
-
522
- def opt_attr_pair(name: str) -> Tuple[str, str]:
523
- return f"--{name}", name.replace("-", "_")
524
-
525
-
526
- def validate_post_parse(args: argparse.Namespace) -> List[str]:
527
- problems = []
528
- used_exclusives = [
529
- opt
530
- for opt, attr in map(
531
- opt_attr_pair,
532
- [
533
- "skip",
534
- "limit",
535
- SAMPLE_EXCLUSION_OPTNAME,
536
- SAMPLE_INCLUSION_OPTNAME,
537
- ],
538
- )
539
- if getattr(args, attr, None)
540
- ]
541
- if len(used_exclusives) > 1:
542
- problems.append(
543
- f"Used multiple mutually exclusive options: {', '.join(used_exclusives)}"
544
- )
545
- return problems
546
-
547
-
548
- def _proc_resources_spec(args):
549
- """
550
- Process CLI-sources compute setting specification. There are two sources
551
- of compute settings in the CLI alone:
552
- * YAML file (--settings argument)
553
- * itemized compute settings (--compute argument)
554
-
555
- The itemized compute specification is given priority
556
-
557
- :param argparse.Namespace: arguments namespace
558
- :return Mapping[str, str]: binding between resource setting name and value
559
- :raise ValueError: if interpretation of the given specification as encoding
560
- of key-value pairs fails
561
- """
562
- spec = getattr(args, "compute", None)
563
- try:
564
- settings_data = read_yaml_file(args.settings) or {}
565
- except yaml.YAMLError:
566
- _LOGGER.warning(
567
- "Settings file ({}) does not follow YAML format,"
568
- " disregarding".format(args.settings)
569
- )
570
- settings_data = {}
571
- if not spec:
572
- return settings_data
573
- pairs = [(kv, kv.split("=")) for kv in spec]
574
- bads = []
575
- for orig, pair in pairs:
576
- try:
577
- k, v = pair
578
- except ValueError:
579
- bads.append(orig)
580
- else:
581
- settings_data[k] = v
582
- if bads:
583
- raise ValueError(
584
- "Could not correctly parse itemized compute specification. "
585
- "Correct format: " + EXAMPLE_COMPUTE_SPEC_FMT
586
- )
587
- return settings_data
588
-
589
-
590
- def main(test_args=None):
591
- """Primary workflow"""
592
- global _LOGGER
593
-
594
- parser, aux_parser = build_parser()
595
- aux_parser.suppress_defaults()
596
-
597
- if test_args:
598
- args, remaining_args = parser.parse_known_args(args=test_args)
599
- else:
600
- args, remaining_args = parser.parse_known_args()
601
-
602
- cli_use_errors = validate_post_parse(args)
603
- if cli_use_errors:
604
- parser.print_help(sys.stderr)
605
- parser.error(
606
- f"{len(cli_use_errors)} CLI use problem(s): {', '.join(cli_use_errors)}"
607
- )
608
- if args.command is None:
609
- parser.print_help(sys.stderr)
610
- sys.exit(1)
611
-
612
- if args.command == "init":
613
- return int(
614
- not initiate_looper_config(
615
- dotfile_path(),
616
- args.pep_config,
617
- args.output_dir,
618
- args.sample_pipeline_interfaces,
619
- args.project_pipeline_interfaces,
620
- args.force,
621
- )
622
- )
623
-
624
- if args.command == "init-piface":
625
- sys.exit(int(not init_generic_pipeline()))
626
-
627
- _LOGGER = logmuse.logger_via_cli(args, make_root=True)
628
- _LOGGER.info("Looper version: {}\nCommand: {}".format(__version__, args.command))
629
-
630
- if "config_file" in vars(args):
631
- if args.config_file is None:
632
- looper_cfg_path = os.path.relpath(dotfile_path(), start=os.curdir)
633
- try:
634
- if args.looper_config:
635
- looper_config_dict = read_looper_config_file(args.looper_config)
636
- else:
637
- looper_config_dict = read_looper_dotfile()
638
- _LOGGER.info(f"Using looper config ({looper_cfg_path}).")
639
-
640
- for looper_config_key, looper_config_item in looper_config_dict.items():
641
- setattr(args, looper_config_key, looper_config_item)
642
-
643
- except OSError:
644
- parser.print_help(sys.stderr)
645
- _LOGGER.warning(
646
- f"Looper config file does not exist. Use looper init to create one at {looper_cfg_path}."
647
- )
648
- sys.exit(1)
649
- else:
650
- _LOGGER.warning(
651
- "This PEP configures looper through the project config. This approach is deprecated and will "
652
- "be removed in future versions. Please use a looper config file. For more information see "
653
- "looper.databio.org/en/latest/looper-config"
654
- )
655
-
656
- args = enrich_args_via_cfg(args, aux_parser, test_args)
657
-
658
- # If project pipeline interface defined in the cli, change name to: "pipeline_interface"
659
- if vars(args)[PROJECT_PL_ARG]:
660
- args.pipeline_interfaces = vars(args)[PROJECT_PL_ARG]
661
-
662
- if len(remaining_args) > 0:
663
- _LOGGER.warning(
664
- "Unrecognized arguments: {}".format(
665
- " ".join([str(x) for x in remaining_args])
666
- )
667
- )
668
-
669
- divcfg = (
670
- select_divvy_config(filepath=args.divvy) if hasattr(args, "divvy") else None
671
- )
672
-
673
- # Ignore flags if user is selecting or excluding on flags:
674
- if args.sel_flag or args.exc_flag:
675
- args.ignore_flags = True
676
-
677
- # Initialize project
678
- if is_registry_path(args.config_file):
679
- if vars(args)[SAMPLE_PL_ARG]:
680
- p = Project(
681
- amendments=args.amend,
682
- divcfg_path=divcfg,
683
- runp=args.command == "runp",
684
- project_dict=PEPHubClient()._load_raw_pep(
685
- registry_path=args.config_file
686
- ),
687
- **{
688
- attr: getattr(args, attr) for attr in CLI_PROJ_ATTRS if attr in args
689
- },
690
- )
691
- else:
692
- raise MisconfigurationException(
693
- f"`sample_pipeline_interface` is missing. Provide it in the parameters."
694
- )
695
- else:
696
- try:
697
- p = Project(
698
- cfg=args.config_file,
699
- amendments=args.amend,
700
- divcfg_path=divcfg,
701
- runp=args.command == "runp",
702
- **{
703
- attr: getattr(args, attr) for attr in CLI_PROJ_ATTRS if attr in args
704
- },
705
- )
706
- except yaml.parser.ParserError as e:
707
- _LOGGER.error(f"Project config parse failed -- {e}")
708
- sys.exit(1)
709
-
710
- selected_compute_pkg = p.selected_compute_package or DEFAULT_COMPUTE_RESOURCES_NAME
711
- if p.dcc is not None and not p.dcc.activate_package(selected_compute_pkg):
712
- _LOGGER.info(
713
- "Failed to activate '{}' computing package. "
714
- "Using the default one".format(selected_compute_pkg)
715
- )
716
-
717
- with ProjectContext(
718
- prj=p,
719
- selector_attribute=args.sel_attr,
720
- selector_include=args.sel_incl,
721
- selector_exclude=args.sel_excl,
722
- selector_flag=args.sel_flag,
723
- exclusion_flag=args.exc_flag,
724
- ) as prj:
725
- if args.command in ["run", "rerun"]:
726
- run = Runner(prj)
727
- try:
728
- compute_kwargs = _proc_resources_spec(args)
729
- return run(args, rerun=(args.command == "rerun"), **compute_kwargs)
730
- except SampleFailedException:
731
- sys.exit(1)
732
- except IOError:
733
- _LOGGER.error(
734
- "{} pipeline_interfaces: '{}'".format(
735
- prj.__class__.__name__, prj.pipeline_interface_sources
736
- )
737
- )
738
- raise
739
-
740
- if args.command == "runp":
741
- compute_kwargs = _proc_resources_spec(args)
742
- collate = Collator(prj)
743
- collate(args, **compute_kwargs)
744
- return collate.debug
745
-
746
- if args.command == "destroy":
747
- return Destroyer(prj)(args)
748
-
749
- # pipestat support introduces breaking changes and pipelines run
750
- # with no pipestat reporting would not be compatible with
751
- # commands: table, report and check. Therefore we plan maintain
752
- # the old implementations for a couple of releases.
753
- # if hasattr(args, "project"):
754
- # use_pipestat = (
755
- # prj.pipestat_configured_project
756
- # if args.project
757
- # else prj.pipestat_configured
758
- # )
759
- use_pipestat = (
760
- prj.pipestat_configured_project if args.project else prj.pipestat_configured
761
- )
762
- if args.command == "table":
763
- if use_pipestat:
764
- Tabulator(prj)(args)
765
- else:
766
- raise PipestatConfigurationException("table")
767
-
768
- if args.command == "report":
769
- if use_pipestat:
770
- Reporter(prj)(args)
771
- else:
772
- raise PipestatConfigurationException("report")
773
-
774
- if args.command == "link":
775
- if use_pipestat:
776
- Linker(prj)(args)
777
- else:
778
- raise PipestatConfigurationException("link")
779
-
780
- if args.command == "check":
781
- if use_pipestat:
782
- return Checker(prj)(args)
783
- else:
784
- raise PipestatConfigurationException("check")
785
-
786
- if args.command == "clean":
787
- return Cleaner(prj)(args)
788
-
789
- if args.command == "inspect":
790
- inspect_project(p, args.sample_names, args.attr_limit)
791
- from warnings import warn
792
-
793
- warn(
794
- "The inspect feature has moved to eido and will be removed in the future release of looper. "
795
- "Use `eido inspect` from now on.",
796
- )