looper 1.5.1__py3-none-any.whl → 1.6.0a1__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
looper/cli_looper.py ADDED
@@ -0,0 +1,776 @@
1
+ import argparse
2
+ import logmuse
3
+ import os
4
+ import sys
5
+ import yaml
6
+
7
+ from eido import inspect_project
8
+ from pephubclient import PEPHubClient
9
+ from typing import Tuple, List
10
+ from ubiquerg import VersionInHelpParser
11
+
12
+ from . import __version__
13
+ from .const import *
14
+ from .divvy import DEFAULT_COMPUTE_RESOURCES_NAME, select_divvy_config
15
+ from .exceptions import *
16
+ from .looper import *
17
+ from .parser_types import *
18
+ from .project import Project, ProjectContext
19
+ from .utils import (
20
+ dotfile_path,
21
+ enrich_args_via_cfg,
22
+ is_registry_path,
23
+ read_looper_dotfile,
24
+ read_looper_config_file,
25
+ read_yaml_file,
26
+ initiate_looper_config,
27
+ init_generic_pipeline,
28
+ )
29
+
30
+
31
+ class _StoreBoolActionType(argparse.Action):
32
+ """
33
+ Enables the storage of a boolean const and custom type definition needed
34
+ for systematic html interface generation. To get the _StoreTrueAction
35
+ output use default=False in the add_argument function
36
+ and default=True to get _StoreFalseAction output.
37
+ """
38
+
39
+ def __init__(self, option_strings, dest, type, default, required=False, help=None):
40
+ super(_StoreBoolActionType, self).__init__(
41
+ option_strings=option_strings,
42
+ dest=dest,
43
+ nargs=0,
44
+ const=not default,
45
+ default=default,
46
+ type=type,
47
+ required=required,
48
+ help=help,
49
+ )
50
+
51
+ def __call__(self, parser, namespace, values, option_string=None):
52
+ setattr(namespace, self.dest, self.const)
53
+
54
+
55
+ def build_parser():
56
+ """
57
+ Building argument parser.
58
+
59
+ :return argparse.ArgumentParser
60
+ """
61
+ # Main looper program help text messages
62
+ banner = "%(prog)s - A project job submission engine and project manager."
63
+ additional_description = (
64
+ "For subcommand-specific options, " "type: '%(prog)s <subcommand> -h'"
65
+ )
66
+ additional_description += "\nhttps://github.com/pepkit/looper"
67
+
68
+ parser = VersionInHelpParser(
69
+ prog="looper",
70
+ description=banner,
71
+ epilog=additional_description,
72
+ version=__version__,
73
+ )
74
+
75
+ aux_parser = VersionInHelpParser(
76
+ prog="looper",
77
+ description=banner,
78
+ epilog=additional_description,
79
+ version=__version__,
80
+ )
81
+ result = []
82
+ for parser in [parser, aux_parser]:
83
+ # Logging control
84
+ parser.add_argument(
85
+ "--logfile",
86
+ help="Optional output file for looper logs " "(default: %(default)s)",
87
+ )
88
+ parser.add_argument("--logging-level", help=argparse.SUPPRESS)
89
+ parser.add_argument(
90
+ "--dbg",
91
+ action="store_true",
92
+ help="Turn on debug mode (default: %(default)s)",
93
+ )
94
+
95
+ parser = logmuse.add_logging_options(parser)
96
+ subparsers = parser.add_subparsers(dest="command")
97
+
98
+ def add_subparser(cmd):
99
+ message = MESSAGE_BY_SUBCOMMAND[cmd]
100
+ return subparsers.add_parser(
101
+ cmd,
102
+ description=message,
103
+ help=message,
104
+ formatter_class=lambda prog: argparse.HelpFormatter(
105
+ prog, max_help_position=37, width=90
106
+ ),
107
+ )
108
+
109
+ # Run and rerun command
110
+ run_subparser = add_subparser("run")
111
+ rerun_subparser = add_subparser("rerun")
112
+ collate_subparser = add_subparser("runp")
113
+ table_subparser = add_subparser("table")
114
+ report_subparser = add_subparser("report")
115
+ destroy_subparser = add_subparser("destroy")
116
+ check_subparser = add_subparser("check")
117
+ clean_subparser = add_subparser("clean")
118
+ inspect_subparser = add_subparser("inspect")
119
+ init_subparser = add_subparser("init")
120
+ init_piface = add_subparser("init-piface")
121
+ link_subparser = add_subparser("link")
122
+
123
+ # Flag arguments
124
+ ####################################################################
125
+ for subparser in [run_subparser, rerun_subparser, collate_subparser]:
126
+ subparser.add_argument(
127
+ "-i",
128
+ "--ignore-flags",
129
+ default=False,
130
+ action=_StoreBoolActionType,
131
+ type=html_checkbox(checked=False),
132
+ help="Ignore run status flags? Default=False",
133
+ )
134
+
135
+ for subparser in [
136
+ run_subparser,
137
+ rerun_subparser,
138
+ destroy_subparser,
139
+ clean_subparser,
140
+ collate_subparser,
141
+ ]:
142
+ subparser.add_argument(
143
+ "-d",
144
+ "--dry-run",
145
+ action=_StoreBoolActionType,
146
+ default=False,
147
+ type=html_checkbox(checked=False),
148
+ help="Don't actually submit the jobs. Default=False",
149
+ )
150
+
151
+ # Parameter arguments
152
+ ####################################################################
153
+ for subparser in [run_subparser, rerun_subparser, collate_subparser]:
154
+ subparser.add_argument(
155
+ "-t",
156
+ "--time-delay",
157
+ metavar="S",
158
+ type=html_range(min_val=0, max_val=30, value=0),
159
+ default=0,
160
+ help="Time delay in seconds between job submissions",
161
+ )
162
+
163
+ subparser.add_argument(
164
+ "-x",
165
+ "--command-extra",
166
+ default="",
167
+ metavar="S",
168
+ help="String to append to every command",
169
+ )
170
+ subparser.add_argument(
171
+ "-y",
172
+ "--command-extra-override",
173
+ metavar="S",
174
+ default="",
175
+ help="Same as command-extra, but overrides values in PEP",
176
+ )
177
+ subparser.add_argument(
178
+ "-f",
179
+ "--skip-file-checks",
180
+ action=_StoreBoolActionType,
181
+ default=False,
182
+ type=html_checkbox(checked=False),
183
+ help="Do not perform input file checks",
184
+ )
185
+
186
+ divvy_group = subparser.add_argument_group(
187
+ "divvy arguments", "Configure divvy to change computing settings"
188
+ )
189
+ divvy_group.add_argument(
190
+ "--divvy",
191
+ default=None,
192
+ metavar="DIVCFG",
193
+ help="Path to divvy configuration file. Default=$DIVCFG env "
194
+ "variable. Currently: {}".format(
195
+ os.getenv("DIVCFG", None) or "not set"
196
+ ),
197
+ )
198
+ divvy_group.add_argument(
199
+ "-p",
200
+ "--package",
201
+ metavar="P",
202
+ help="Name of computing resource package to use",
203
+ )
204
+ divvy_group.add_argument(
205
+ "-s",
206
+ "--settings",
207
+ default="",
208
+ metavar="S",
209
+ help="Path to a YAML settings file with compute settings",
210
+ )
211
+ divvy_group.add_argument(
212
+ "-c",
213
+ "--compute",
214
+ metavar="K",
215
+ nargs="+",
216
+ help="List of key-value pairs (k1=v1)",
217
+ )
218
+
219
+ for subparser in [run_subparser, rerun_subparser]:
220
+ subparser.add_argument(
221
+ "-u",
222
+ "--lump",
223
+ default=None,
224
+ metavar="X",
225
+ type=html_range(min_val=0, max_val=100, step=0.1, value=0),
226
+ help="Total input file size (GB) to batch into one job",
227
+ )
228
+ subparser.add_argument(
229
+ "-n",
230
+ "--lumpn",
231
+ default=None,
232
+ metavar="N",
233
+ type=html_range(min_val=1, max_val="num_samples", value=1),
234
+ help="Number of commands to batch into one job",
235
+ )
236
+
237
+ check_subparser.add_argument(
238
+ "--describe-codes",
239
+ help="Show status codes description",
240
+ action="store_true",
241
+ default=False,
242
+ )
243
+
244
+ check_subparser.add_argument(
245
+ "--itemized",
246
+ help="Show a detailed, by sample statuses",
247
+ action="store_true",
248
+ default=False,
249
+ )
250
+
251
+ check_subparser.add_argument(
252
+ "-f",
253
+ "--flags",
254
+ nargs="*",
255
+ default=FLAGS,
256
+ type=html_select(choices=FLAGS),
257
+ metavar="F",
258
+ help="Check on only these flags/status values",
259
+ )
260
+
261
+ for subparser in [destroy_subparser, clean_subparser]:
262
+ subparser.add_argument(
263
+ "--force-yes",
264
+ action=_StoreBoolActionType,
265
+ default=False,
266
+ type=html_checkbox(checked=False),
267
+ help="Provide upfront confirmation of destruction intent, "
268
+ "to skip console query. Default=False",
269
+ )
270
+
271
+ init_subparser.add_argument(
272
+ "pep_config", help="Project configuration file (PEP)"
273
+ )
274
+
275
+ init_subparser.add_argument(
276
+ "-f", "--force", help="Force overwrite", action="store_true", default=False
277
+ )
278
+
279
+ init_subparser.add_argument(
280
+ "-o",
281
+ "--output-dir",
282
+ dest="output_dir",
283
+ metavar="DIR",
284
+ default=None,
285
+ type=str,
286
+ )
287
+
288
+ init_subparser.add_argument(
289
+ "-S",
290
+ "--sample-pipeline-interfaces",
291
+ dest=SAMPLE_PL_ARG,
292
+ metavar="YAML",
293
+ default=None,
294
+ nargs="+",
295
+ type=str,
296
+ help="Path to looper sample config file",
297
+ )
298
+ init_subparser.add_argument(
299
+ "-P",
300
+ "--project-pipeline-interfaces",
301
+ dest=PROJECT_PL_ARG,
302
+ metavar="YAML",
303
+ default=None,
304
+ nargs="+",
305
+ type=str,
306
+ help="Path to looper project config file",
307
+ )
308
+
309
+ # TODO: add ouput dir, sample, project pifaces
310
+
311
+ init_subparser.add_argument(
312
+ "-p",
313
+ "--piface",
314
+ help="Generates generic pipeline interface",
315
+ action="store_true",
316
+ default=False,
317
+ )
318
+
319
+ # Common arguments
320
+ for subparser in [
321
+ run_subparser,
322
+ rerun_subparser,
323
+ table_subparser,
324
+ report_subparser,
325
+ destroy_subparser,
326
+ check_subparser,
327
+ clean_subparser,
328
+ collate_subparser,
329
+ inspect_subparser,
330
+ link_subparser,
331
+ ]:
332
+ subparser.add_argument(
333
+ "config_file",
334
+ nargs="?",
335
+ default=None,
336
+ help="Project configuration file (YAML) or pephub registry path.",
337
+ )
338
+ subparser.add_argument(
339
+ "--looper-config",
340
+ required=False,
341
+ default=None,
342
+ type=str,
343
+ help="Looper configuration file (YAML)",
344
+ )
345
+ # help="Path to the looper config file"
346
+ subparser.add_argument(
347
+ "-S",
348
+ "--sample-pipeline-interfaces",
349
+ dest=SAMPLE_PL_ARG,
350
+ metavar="YAML",
351
+ default=None,
352
+ nargs="+",
353
+ type=str,
354
+ help="Path to looper sample config file",
355
+ )
356
+ subparser.add_argument(
357
+ "-P",
358
+ "--project-pipeline-interfaces",
359
+ dest=PROJECT_PL_ARG,
360
+ metavar="YAML",
361
+ default=None,
362
+ nargs="+",
363
+ type=str,
364
+ help="Path to looper project config file",
365
+ )
366
+ # help="Path to the output directory"
367
+ subparser.add_argument(
368
+ "-o",
369
+ "--output-dir",
370
+ dest="output_dir",
371
+ metavar="DIR",
372
+ default=None,
373
+ type=str,
374
+ help=argparse.SUPPRESS,
375
+ )
376
+ # "Submission subdirectory name"
377
+ subparser.add_argument(
378
+ "--submission-subdir", metavar="DIR", help=argparse.SUPPRESS
379
+ )
380
+ # "Results subdirectory name"
381
+ subparser.add_argument(
382
+ "--results-subdir", metavar="DIR", help=argparse.SUPPRESS
383
+ )
384
+ # "Sample attribute for pipeline interface sources"
385
+ subparser.add_argument(
386
+ "--pipeline-interfaces-key", metavar="K", help=argparse.SUPPRESS
387
+ )
388
+ # "Paths to pipeline interface files"
389
+ subparser.add_argument(
390
+ "--pipeline-interfaces",
391
+ metavar="P",
392
+ nargs="+",
393
+ action="append",
394
+ help=argparse.SUPPRESS,
395
+ )
396
+
397
+ for subparser in [
398
+ run_subparser,
399
+ rerun_subparser,
400
+ table_subparser,
401
+ report_subparser,
402
+ destroy_subparser,
403
+ check_subparser,
404
+ clean_subparser,
405
+ collate_subparser,
406
+ inspect_subparser,
407
+ link_subparser,
408
+ ]:
409
+ fetch_samples_group = subparser.add_argument_group(
410
+ "sample selection arguments",
411
+ "Specify samples to include or exclude based on sample attribute values",
412
+ )
413
+ fetch_samples_group.add_argument(
414
+ "-l",
415
+ "--limit",
416
+ default=None,
417
+ metavar="N",
418
+ type=html_range(min_val=1, max_val="num_samples", value="num_samples"),
419
+ help="Limit to n samples",
420
+ )
421
+ fetch_samples_group.add_argument(
422
+ "-k",
423
+ "--skip",
424
+ default=None,
425
+ metavar="N",
426
+ type=html_range(min_val=1, max_val="num_samples", value="num_samples"),
427
+ help="Skip samples by numerical index",
428
+ )
429
+
430
+ fetch_samples_group.add_argument(
431
+ f"--{SAMPLE_SELECTION_ATTRIBUTE_OPTNAME}",
432
+ default="toggle",
433
+ metavar="ATTR",
434
+ help="Attribute for sample exclusion OR inclusion",
435
+ )
436
+
437
+ protocols = fetch_samples_group.add_mutually_exclusive_group()
438
+ protocols.add_argument(
439
+ f"--{SAMPLE_EXCLUSION_OPTNAME}",
440
+ nargs="*",
441
+ metavar="E",
442
+ help="Exclude samples with these values",
443
+ )
444
+ protocols.add_argument(
445
+ f"--{SAMPLE_INCLUSION_OPTNAME}",
446
+ nargs="*",
447
+ metavar="I",
448
+ help="Include only samples with these values",
449
+ )
450
+ fetch_samples_group.add_argument(
451
+ f"--{SAMPLE_SELECTION_FLAG_OPTNAME}",
452
+ default=None,
453
+ nargs="*",
454
+ metavar="SELFLAG",
455
+ help="Include samples with this flag status, e.g. completed",
456
+ )
457
+
458
+ fetch_samples_group.add_argument(
459
+ f"--{SAMPLE_EXCLUSION_FLAG_OPTNAME}",
460
+ default=None,
461
+ nargs="*",
462
+ metavar="EXCFLAG",
463
+ help="Exclude samples with this flag status, e.g. completed",
464
+ )
465
+
466
+ subparser.add_argument(
467
+ "-a",
468
+ "--amend",
469
+ nargs="+",
470
+ metavar="A",
471
+ help="List of amendments to activate",
472
+ )
473
+ for subparser in [
474
+ report_subparser,
475
+ table_subparser,
476
+ check_subparser,
477
+ destroy_subparser,
478
+ link_subparser,
479
+ ]:
480
+ subparser.add_argument(
481
+ "--project",
482
+ help="Process project-level pipelines",
483
+ action="store_true",
484
+ default=False,
485
+ )
486
+ inspect_subparser.add_argument(
487
+ "--sample-names",
488
+ help="Names of the samples to inspect",
489
+ nargs="*",
490
+ default=None,
491
+ )
492
+
493
+ inspect_subparser.add_argument(
494
+ "--attr-limit",
495
+ help="Number of attributes to display",
496
+ type=int,
497
+ )
498
+ result.append(parser)
499
+ return result
500
+
501
+
502
+ def opt_attr_pair(name: str) -> Tuple[str, str]:
503
+ return f"--{name}", name.replace("-", "_")
504
+
505
+
506
+ def validate_post_parse(args: argparse.Namespace) -> List[str]:
507
+ problems = []
508
+ used_exclusives = [
509
+ opt
510
+ for opt, attr in map(
511
+ opt_attr_pair,
512
+ [
513
+ "skip",
514
+ "limit",
515
+ SAMPLE_EXCLUSION_OPTNAME,
516
+ SAMPLE_INCLUSION_OPTNAME,
517
+ ],
518
+ )
519
+ if getattr(args, attr, None)
520
+ ]
521
+ if len(used_exclusives) > 1:
522
+ problems.append(
523
+ f"Used multiple mutually exclusive options: {', '.join(used_exclusives)}"
524
+ )
525
+ return problems
526
+
527
+
528
+ def _proc_resources_spec(args):
529
+ """
530
+ Process CLI-sources compute setting specification. There are two sources
531
+ of compute settings in the CLI alone:
532
+ * YAML file (--settings argument)
533
+ * itemized compute settings (--compute argument)
534
+
535
+ The itemized compute specification is given priority
536
+
537
+ :param argparse.Namespace: arguments namespace
538
+ :return Mapping[str, str]: binding between resource setting name and value
539
+ :raise ValueError: if interpretation of the given specification as encoding
540
+ of key-value pairs fails
541
+ """
542
+ spec = getattr(args, "compute", None)
543
+ try:
544
+ settings_data = read_yaml_file(args.settings) or {}
545
+ except yaml.YAMLError:
546
+ _LOGGER.warning(
547
+ "Settings file ({}) does not follow YAML format,"
548
+ " disregarding".format(args.settings)
549
+ )
550
+ settings_data = {}
551
+ if not spec:
552
+ return settings_data
553
+ pairs = [(kv, kv.split("=")) for kv in spec]
554
+ bads = []
555
+ for orig, pair in pairs:
556
+ try:
557
+ k, v = pair
558
+ except ValueError:
559
+ bads.append(orig)
560
+ else:
561
+ settings_data[k] = v
562
+ if bads:
563
+ raise ValueError(
564
+ "Could not correctly parse itemized compute specification. "
565
+ "Correct format: " + EXAMPLE_COMPUTE_SPEC_FMT
566
+ )
567
+ return settings_data
568
+
569
+
570
+ def main(test_args=None):
571
+ """Primary workflow"""
572
+ global _LOGGER
573
+
574
+ parser, aux_parser = build_parser()
575
+ aux_parser.suppress_defaults()
576
+
577
+ if test_args:
578
+ args, remaining_args = parser.parse_known_args(args=test_args)
579
+ else:
580
+ args, remaining_args = parser.parse_known_args()
581
+
582
+ cli_use_errors = validate_post_parse(args)
583
+ if cli_use_errors:
584
+ parser.print_help(sys.stderr)
585
+ parser.error(
586
+ f"{len(cli_use_errors)} CLI use problem(s): {', '.join(cli_use_errors)}"
587
+ )
588
+ if args.command is None:
589
+ parser.print_help(sys.stderr)
590
+ sys.exit(1)
591
+
592
+ if args.command == "init":
593
+ return int(
594
+ not initiate_looper_config(
595
+ dotfile_path(),
596
+ args.pep_config,
597
+ args.output_dir,
598
+ args.sample_pipeline_interfaces,
599
+ args.project_pipeline_interfaces,
600
+ args.force,
601
+ )
602
+ )
603
+
604
+ if args.command == "init-piface":
605
+ sys.exit(int(not init_generic_pipeline()))
606
+
607
+ _LOGGER = logmuse.logger_via_cli(args, make_root=True)
608
+ _LOGGER.info("Looper version: {}\nCommand: {}".format(__version__, args.command))
609
+
610
+ if "config_file" in vars(args):
611
+ if args.config_file is None:
612
+ looper_cfg_path = os.path.relpath(dotfile_path(), start=os.curdir)
613
+ try:
614
+ if args.looper_config:
615
+ looper_config_dict = read_looper_config_file(args.looper_config)
616
+ else:
617
+ looper_config_dict = read_looper_dotfile()
618
+ _LOGGER.info(f"Using looper config ({looper_cfg_path}).")
619
+
620
+ for looper_config_key, looper_config_item in looper_config_dict.items():
621
+ setattr(args, looper_config_key, looper_config_item)
622
+
623
+ except OSError:
624
+ parser.print_help(sys.stderr)
625
+ _LOGGER.warning(
626
+ f"Looper config file does not exist. Use looper init to create one at {looper_cfg_path}."
627
+ )
628
+ sys.exit(1)
629
+ else:
630
+ _LOGGER.warning(
631
+ "This PEP configures looper through the project config. This approach is deprecated and will "
632
+ "be removed in future versions. Please use a looper config file. For more information see "
633
+ "looper.databio.org/en/latest/looper-config"
634
+ )
635
+
636
+ args = enrich_args_via_cfg(args, aux_parser, test_args)
637
+
638
+ # If project pipeline interface defined in the cli, change name to: "pipeline_interface"
639
+ if vars(args)[PROJECT_PL_ARG]:
640
+ args.pipeline_interfaces = vars(args)[PROJECT_PL_ARG]
641
+
642
+ if len(remaining_args) > 0:
643
+ _LOGGER.warning(
644
+ "Unrecognized arguments: {}".format(
645
+ " ".join([str(x) for x in remaining_args])
646
+ )
647
+ )
648
+
649
+ divcfg = (
650
+ select_divvy_config(filepath=args.divvy) if hasattr(args, "divvy") else None
651
+ )
652
+
653
+ # Ignore flags if user is selecting or excluding on flags:
654
+ if args.sel_flag or args.exc_flag:
655
+ args.ignore_flags = True
656
+
657
+ # Initialize project
658
+ if is_registry_path(args.config_file):
659
+ if vars(args)[SAMPLE_PL_ARG]:
660
+ p = Project(
661
+ amendments=args.amend,
662
+ divcfg_path=divcfg,
663
+ runp=args.command == "runp",
664
+ project_dict=PEPHubClient()._load_raw_pep(
665
+ registry_path=args.config_file
666
+ ),
667
+ **{
668
+ attr: getattr(args, attr) for attr in CLI_PROJ_ATTRS if attr in args
669
+ },
670
+ )
671
+ else:
672
+ raise MisconfigurationException(
673
+ f"`sample_pipeline_interface` is missing. Provide it in the parameters."
674
+ )
675
+ else:
676
+ try:
677
+ p = Project(
678
+ cfg=args.config_file,
679
+ amendments=args.amend,
680
+ divcfg_path=divcfg,
681
+ runp=args.command == "runp",
682
+ **{
683
+ attr: getattr(args, attr) for attr in CLI_PROJ_ATTRS if attr in args
684
+ },
685
+ )
686
+ except yaml.parser.ParserError as e:
687
+ _LOGGER.error(f"Project config parse failed -- {e}")
688
+ sys.exit(1)
689
+
690
+ selected_compute_pkg = p.selected_compute_package or DEFAULT_COMPUTE_RESOURCES_NAME
691
+ if p.dcc is not None and not p.dcc.activate_package(selected_compute_pkg):
692
+ _LOGGER.info(
693
+ "Failed to activate '{}' computing package. "
694
+ "Using the default one".format(selected_compute_pkg)
695
+ )
696
+
697
+ with ProjectContext(
698
+ prj=p,
699
+ selector_attribute=args.sel_attr,
700
+ selector_include=args.sel_incl,
701
+ selector_exclude=args.sel_excl,
702
+ selector_flag=args.sel_flag,
703
+ exclusion_flag=args.exc_flag,
704
+ ) as prj:
705
+ if args.command in ["run", "rerun"]:
706
+ run = Runner(prj)
707
+ try:
708
+ compute_kwargs = _proc_resources_spec(args)
709
+ return run(args, rerun=(args.command == "rerun"), **compute_kwargs)
710
+ except SampleFailedException:
711
+ sys.exit(1)
712
+ except IOError:
713
+ _LOGGER.error(
714
+ "{} pipeline_interfaces: '{}'".format(
715
+ prj.__class__.__name__, prj.pipeline_interface_sources
716
+ )
717
+ )
718
+ raise
719
+
720
+ if args.command == "runp":
721
+ compute_kwargs = _proc_resources_spec(args)
722
+ collate = Collator(prj)
723
+ collate(args, **compute_kwargs)
724
+ return collate.debug
725
+
726
+ if args.command == "destroy":
727
+ return Destroyer(prj)(args)
728
+
729
+ # pipestat support introduces breaking changes and pipelines run
730
+ # with no pipestat reporting would not be compatible with
731
+ # commands: table, report and check. Therefore we plan maintain
732
+ # the old implementations for a couple of releases.
733
+ # if hasattr(args, "project"):
734
+ # use_pipestat = (
735
+ # prj.pipestat_configured_project
736
+ # if args.project
737
+ # else prj.pipestat_configured
738
+ # )
739
+ use_pipestat = (
740
+ prj.pipestat_configured_project if args.project else prj.pipestat_configured
741
+ )
742
+ if args.command == "table":
743
+ if use_pipestat:
744
+ Tabulator(prj)(args)
745
+ else:
746
+ raise PipestatConfigurationException("table")
747
+
748
+ if args.command == "report":
749
+ if use_pipestat:
750
+ Reporter(prj)(args)
751
+ else:
752
+ raise PipestatConfigurationException("report")
753
+
754
+ if args.command == "link":
755
+ if use_pipestat:
756
+ Linker(prj)(args)
757
+ else:
758
+ raise PipestatConfigurationException("link")
759
+
760
+ if args.command == "check":
761
+ if use_pipestat:
762
+ return Checker(prj)(args)
763
+ else:
764
+ raise PipestatConfigurationException("check")
765
+
766
+ if args.command == "clean":
767
+ return Cleaner(prj)(args)
768
+
769
+ if args.command == "inspect":
770
+ inspect_project(p, args.sample_names, args.attr_limit)
771
+ from warnings import warn
772
+
773
+ warn(
774
+ "The inspect feature has moved to eido and will be removed in the future release of looper. "
775
+ "Use `eido inspect` from now on.",
776
+ )