runnable 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. extensions/README.md +0 -0
  2. extensions/__init__.py +0 -0
  3. extensions/catalog/README.md +0 -0
  4. extensions/catalog/any_path.py +214 -0
  5. extensions/catalog/file_system.py +52 -0
  6. extensions/catalog/minio.py +72 -0
  7. extensions/catalog/pyproject.toml +14 -0
  8. extensions/catalog/s3.py +11 -0
  9. extensions/job_executor/README.md +0 -0
  10. extensions/job_executor/__init__.py +236 -0
  11. extensions/job_executor/emulate.py +70 -0
  12. extensions/job_executor/k8s.py +553 -0
  13. extensions/job_executor/k8s_job_spec.yaml +37 -0
  14. extensions/job_executor/local.py +35 -0
  15. extensions/job_executor/local_container.py +161 -0
  16. extensions/job_executor/pyproject.toml +16 -0
  17. extensions/nodes/README.md +0 -0
  18. extensions/nodes/__init__.py +0 -0
  19. extensions/nodes/conditional.py +301 -0
  20. extensions/nodes/fail.py +78 -0
  21. extensions/nodes/loop.py +394 -0
  22. extensions/nodes/map.py +477 -0
  23. extensions/nodes/parallel.py +281 -0
  24. extensions/nodes/pyproject.toml +15 -0
  25. extensions/nodes/stub.py +93 -0
  26. extensions/nodes/success.py +78 -0
  27. extensions/nodes/task.py +156 -0
  28. extensions/pipeline_executor/README.md +0 -0
  29. extensions/pipeline_executor/__init__.py +871 -0
  30. extensions/pipeline_executor/argo.py +1266 -0
  31. extensions/pipeline_executor/emulate.py +119 -0
  32. extensions/pipeline_executor/local.py +226 -0
  33. extensions/pipeline_executor/local_container.py +369 -0
  34. extensions/pipeline_executor/mocked.py +159 -0
  35. extensions/pipeline_executor/pyproject.toml +16 -0
  36. extensions/run_log_store/README.md +0 -0
  37. extensions/run_log_store/__init__.py +0 -0
  38. extensions/run_log_store/any_path.py +100 -0
  39. extensions/run_log_store/chunked_fs.py +122 -0
  40. extensions/run_log_store/chunked_minio.py +141 -0
  41. extensions/run_log_store/file_system.py +91 -0
  42. extensions/run_log_store/generic_chunked.py +549 -0
  43. extensions/run_log_store/minio.py +114 -0
  44. extensions/run_log_store/pyproject.toml +15 -0
  45. extensions/secrets/README.md +0 -0
  46. extensions/secrets/dotenv.py +62 -0
  47. extensions/secrets/pyproject.toml +15 -0
  48. runnable/__init__.py +108 -0
  49. runnable/catalog.py +141 -0
  50. runnable/cli.py +484 -0
  51. runnable/context.py +730 -0
  52. runnable/datastore.py +1058 -0
  53. runnable/defaults.py +159 -0
  54. runnable/entrypoints.py +390 -0
  55. runnable/exceptions.py +137 -0
  56. runnable/executor.py +561 -0
  57. runnable/gantt.py +1646 -0
  58. runnable/graph.py +501 -0
  59. runnable/names.py +546 -0
  60. runnable/nodes.py +593 -0
  61. runnable/parameters.py +217 -0
  62. runnable/pickler.py +96 -0
  63. runnable/sdk.py +1277 -0
  64. runnable/secrets.py +92 -0
  65. runnable/tasks.py +1268 -0
  66. runnable/telemetry.py +142 -0
  67. runnable/utils.py +423 -0
  68. runnable-0.50.0.dist-info/METADATA +189 -0
  69. runnable-0.50.0.dist-info/RECORD +72 -0
  70. runnable-0.50.0.dist-info/WHEEL +4 -0
  71. runnable-0.50.0.dist-info/entry_points.txt +53 -0
  72. runnable-0.50.0.dist-info/licenses/LICENSE +201 -0
runnable/cli.py ADDED
@@ -0,0 +1,484 @@
1
+ import logging
2
+ import os
3
+ from enum import Enum
4
+ from pathlib import Path
5
+ from typing import Annotated
6
+
7
+ import typer
8
+
9
+ from runnable import defaults, entrypoints
10
+ from runnable.gantt import SimpleVisualizer, generate_html_timeline, visualize_simple
11
+
12
+ logger = logging.getLogger(defaults.LOGGER_NAME)
13
+
14
+
15
+ app = typer.Typer(
16
+ help=(
17
+ "Welcome to runnable. Please provide the command that you want to use."
18
+ "All commands have options that you can see by runnable <command> --help"
19
+ ),
20
+ )
21
+
22
+
23
+ class LogLevel(str, Enum):
24
+ INFO = "INFO"
25
+ DEBUG = "DEBUG"
26
+ WARNING = "WARNING"
27
+ ERROR = "ERROR"
28
+ FATAL = "FATAL"
29
+
30
+
31
+ class ExecutionMode(str, Enum):
32
+ YAML = "yaml"
33
+ PYTHON = "python"
34
+
35
+
36
+ class FanMode(str, Enum):
37
+ IN = "in"
38
+ OUT = "out"
39
+
40
+
41
+ @app.command(hidden=True)
42
+ def execute(
43
+ yaml_file: Annotated[str, typer.Argument(help="The pipeline definition file")],
44
+ config_file: Annotated[
45
+ str,
46
+ typer.Option(
47
+ "--config", "-c", help="The configuration file specifying the services"
48
+ ),
49
+ ] = "",
50
+ parameters_file: Annotated[
51
+ str,
52
+ typer.Option(
53
+ "--parameters",
54
+ "-p",
55
+ help="Parameters, in yaml, accessible by the application",
56
+ ),
57
+ ] = "",
58
+ log_level: Annotated[
59
+ LogLevel,
60
+ typer.Option(
61
+ "--log-level",
62
+ help="The log level",
63
+ show_default=True,
64
+ case_sensitive=False,
65
+ ),
66
+ ] = LogLevel.WARNING,
67
+ tag: Annotated[str, typer.Option(help="A tag attached to the run")] = "",
68
+ run_id: Annotated[
69
+ str,
70
+ typer.Argument(
71
+ envvar=defaults.ENV_RUN_ID,
72
+ help="An optional run_id, one would be generated if its not provided",
73
+ ),
74
+ ] = "",
75
+ ):
76
+ """
77
+ Execute a pipeline defined by yaml file.
78
+
79
+ The executor is defined by executor block of the configuration file.
80
+
81
+ The behavior of this command depends on the executor type:
82
+
83
+ -- For local executors (local, local-container), the pipeline is executed in the current environment.
84
+
85
+ -- For remote executors (argo, airflow), the pipeline translated to the specification.
86
+ """
87
+ logger.setLevel(log_level.value)
88
+
89
+ entrypoints.execute_pipeline_yaml_spec(
90
+ configuration_file=config_file,
91
+ pipeline_file=yaml_file,
92
+ tag=tag,
93
+ run_id=run_id,
94
+ parameters_file=parameters_file,
95
+ )
96
+
97
+
98
+ @app.command(hidden=True)
99
+ def execute_single_node(
100
+ run_id: Annotated[
101
+ str,
102
+ typer.Argument(
103
+ help="An optional run_id, one would be generated if its not provided"
104
+ ),
105
+ ],
106
+ yaml_or_python_file: Annotated[
107
+ str, typer.Argument(help="The pipeline definition file")
108
+ ],
109
+ step_name: Annotated[str, typer.Argument(help="The step name to execute")],
110
+ config_file: Annotated[
111
+ str,
112
+ typer.Option(
113
+ "--config", "-c", help="The configuration file specifying the services"
114
+ ),
115
+ ] = "",
116
+ parameters_file: Annotated[
117
+ str,
118
+ typer.Option(
119
+ "--parameters-file",
120
+ "-p",
121
+ help="Parameters, in yaml, accessible by the application",
122
+ ),
123
+ ] = "",
124
+ log_level: Annotated[
125
+ LogLevel,
126
+ typer.Option(
127
+ "--log-level",
128
+ help="The log level",
129
+ show_default=True,
130
+ case_sensitive=False,
131
+ ),
132
+ ] = LogLevel.INFO,
133
+ tag: Annotated[str, typer.Option(help="A tag attached to the run")] = "",
134
+ mode: Annotated[
135
+ ExecutionMode,
136
+ typer.Option(
137
+ "--mode",
138
+ "-m",
139
+ help="spec in yaml or python sdk",
140
+ ),
141
+ ] = ExecutionMode.YAML,
142
+ iter_variable: Annotated[
143
+ str,
144
+ typer.Option(
145
+ "--iter-variable",
146
+ help="The iterable variable dictionary in str",
147
+ show_default=True,
148
+ ),
149
+ ] = "",
150
+ ):
151
+ logger.setLevel(log_level.value)
152
+ entrypoints.execute_single_node(
153
+ configuration_file=config_file,
154
+ pipeline_file=yaml_or_python_file,
155
+ mode=mode,
156
+ step_name=step_name,
157
+ iter_variable=iter_variable,
158
+ run_id=run_id,
159
+ tag=tag,
160
+ parameters_file=parameters_file,
161
+ )
162
+
163
+
164
+ @app.command(hidden=True)
165
+ def fan(
166
+ run_id: Annotated[str, typer.Argument(help="The run id")],
167
+ step_name: Annotated[str, typer.Argument(help="The step name")],
168
+ python_or_yaml_file: Annotated[
169
+ str, typer.Argument(help="The pipeline definition file")
170
+ ],
171
+ in_or_out: Annotated[str, typer.Argument(help="The fan mode")],
172
+ iter_variable: Annotated[
173
+ str,
174
+ typer.Option(
175
+ "--iter-variable",
176
+ help="The iterable variable dictionary in str",
177
+ show_default=True,
178
+ ),
179
+ ] = "",
180
+ config_file: Annotated[
181
+ str,
182
+ typer.Option(
183
+ "--config-file", "-c", help="The configuration file specifying the services"
184
+ ),
185
+ ] = "",
186
+ parameters_file: Annotated[
187
+ str,
188
+ typer.Option(
189
+ "--parameters-file",
190
+ "-p",
191
+ help="Parameters, in yaml, accessible by the application",
192
+ ),
193
+ ] = "",
194
+ log_level: Annotated[
195
+ LogLevel,
196
+ typer.Option(
197
+ "--log-level",
198
+ help="The log level",
199
+ show_default=True,
200
+ case_sensitive=False,
201
+ ),
202
+ ] = LogLevel.INFO,
203
+ tag: Annotated[str, typer.Option(help="A tag attached to the run")] = "",
204
+ mode: Annotated[
205
+ ExecutionMode,
206
+ typer.Option(
207
+ "--mode",
208
+ "-m",
209
+ help="spec in yaml or python sdk",
210
+ ),
211
+ ] = ExecutionMode.YAML,
212
+ ):
213
+ logger.setLevel(log_level.value)
214
+
215
+ # Fan in or out
216
+ entrypoints.fan(
217
+ configuration_file=config_file,
218
+ pipeline_file=python_or_yaml_file,
219
+ step_name=step_name,
220
+ mode=mode,
221
+ in_or_out=in_or_out,
222
+ iter_variable=iter_variable,
223
+ run_id=run_id,
224
+ tag=tag,
225
+ parameters_file=parameters_file,
226
+ )
227
+
228
+
229
+ @app.command(hidden=True)
230
+ def execute_job(
231
+ job_definition_file: Annotated[
232
+ str,
233
+ typer.Argument(
234
+ help=("The yaml file containing the job definition"),
235
+ ),
236
+ ],
237
+ run_id: Annotated[
238
+ str,
239
+ typer.Argument(
240
+ envvar="RUNNABLE_RUN_ID",
241
+ help="An optional run_id, one would be generated if its not provided",
242
+ ),
243
+ ] = "",
244
+ config_file: Annotated[
245
+ str,
246
+ typer.Option(
247
+ "--config", "-c", help="The configuration file specifying the services"
248
+ ),
249
+ ] = "",
250
+ parameters_file: Annotated[
251
+ str,
252
+ typer.Option(
253
+ "--parameters",
254
+ "-p",
255
+ help="Parameters, in yaml, accessible by the application",
256
+ ),
257
+ ] = "",
258
+ log_level: Annotated[
259
+ LogLevel,
260
+ typer.Option(
261
+ "--log-level",
262
+ help="The log level",
263
+ show_default=True,
264
+ case_sensitive=False,
265
+ ),
266
+ ] = LogLevel.WARNING,
267
+ tag: Annotated[str, typer.Option(help="A tag attached to the run")] = "",
268
+ ):
269
+ logger.setLevel(log_level.value)
270
+
271
+ entrypoints.execute_job_non_local(
272
+ configuration_file=config_file,
273
+ job_definition_file=job_definition_file,
274
+ tag=tag,
275
+ run_id=run_id,
276
+ parameters_file=parameters_file,
277
+ )
278
+
279
+
280
+ @app.command()
281
+ def retry(
282
+ run_id: Annotated[
283
+ str,
284
+ typer.Argument(help="The run_id of the failed run to retry"),
285
+ ],
286
+ config_file: Annotated[
287
+ str,
288
+ typer.Option(
289
+ "--config",
290
+ "-c",
291
+ help="The configuration file (defaults to original run's config)",
292
+ ),
293
+ ] = "",
294
+ log_level: Annotated[
295
+ LogLevel,
296
+ typer.Option(
297
+ "--log-level",
298
+ help="The log level",
299
+ show_default=True,
300
+ case_sensitive=False,
301
+ ),
302
+ ] = LogLevel.WARNING,
303
+ tag: Annotated[str, typer.Option(help="A tag attached to the retry run")] = "",
304
+ ):
305
+ """
306
+ Retry a failed pipeline run from the point of failure.
307
+
308
+ This command re-executes a pipeline while preserving successful steps.
309
+ Only failed and subsequent steps will re-execute.
310
+
311
+ The pipeline definition and parameters are loaded from the original run log.
312
+
313
+ Examples:
314
+ runnable retry forgiving-joliot-0645
315
+ runnable retry abc123 --config local.yaml
316
+ runnable retry abc123 --log-level DEBUG
317
+ """
318
+ logger.setLevel(log_level.value)
319
+
320
+ try:
321
+ entrypoints.retry_pipeline(
322
+ run_id=run_id,
323
+ configuration_file=config_file,
324
+ tag=tag,
325
+ )
326
+ except Exception as e:
327
+ logger.error(f"Retry failed: {e}")
328
+
329
+
330
+ @app.command()
331
+ def timeline(
332
+ run_id_or_path: Annotated[
333
+ str, typer.Argument(help="Run ID to visualize, or path to JSON run log file")
334
+ ],
335
+ output: Annotated[
336
+ str,
337
+ typer.Option("--output", "-o", help="Output HTML file path"),
338
+ ] = "",
339
+ console: Annotated[
340
+ bool,
341
+ typer.Option(
342
+ "--console/--no-console",
343
+ help="Show console timeline output (default: true)",
344
+ ),
345
+ ] = True,
346
+ open_browser: Annotated[
347
+ bool,
348
+ typer.Option(
349
+ "--open/--no-open",
350
+ help="Automatically open the generated file in default browser",
351
+ ),
352
+ ] = True,
353
+ log_level: Annotated[
354
+ LogLevel,
355
+ typer.Option(
356
+ "--log-level",
357
+ help="The log level",
358
+ show_default=True,
359
+ case_sensitive=False,
360
+ ),
361
+ ] = LogLevel.WARNING,
362
+ ):
363
+ """
364
+ Visualize pipeline execution as an interactive timeline.
365
+
366
+ This command creates lightweight timeline visualizations that effectively
367
+ show composite nodes (parallel, map, conditional) with hierarchical structure,
368
+ timing information, and execution metadata.
369
+
370
+ The new visualization system provides:
371
+ - Clean console output with hierarchical display
372
+ - Interactive HTML with hover tooltips and expandable sections
373
+ - Proper support for all composite pipeline types
374
+ - Rich metadata including commands, parameters, and catalog operations
375
+
376
+ By default, shows console output AND generates HTML file with browser opening.
377
+
378
+ Input Options:
379
+ - Run ID: Looks up JSON file in .run_log_store/ directory
380
+ - JSON Path: Direct path to run log JSON file (flexible for any config)
381
+
382
+ Examples:
383
+ # Using Run ID (looks in .run_log_store/)
384
+ runnable timeline forgiving-joliot-0645 # Console + HTML + browser
385
+ runnable timeline parallel-run --output custom.html # Console + custom HTML + browser
386
+
387
+ # Using JSON file path (any location)
388
+ runnable timeline /path/to/my-run.json # Console + HTML + browser
389
+ runnable timeline ../logs/pipeline-run.json --no-open # Console + HTML, no browser
390
+ runnable timeline ~/experiments/run.json --no-console # HTML + browser only
391
+
392
+ # Other options
393
+ runnable timeline complex-pipeline --no-open # Console + HTML, no browser
394
+ runnable timeline simple-run --no-console --no-open # HTML only, no browser
395
+ """
396
+ logger.setLevel(log_level.value)
397
+
398
+ # Determine if input is a file path or run ID
399
+ if os.path.exists(run_id_or_path) or run_id_or_path.endswith(".json"):
400
+ # Input is a file path
401
+ json_file_path = Path(run_id_or_path)
402
+ if not json_file_path.exists():
403
+ print(f"āŒ JSON file not found: {json_file_path}")
404
+ return
405
+
406
+ # Extract run ID from the file for default naming
407
+ run_id = json_file_path.stem
408
+ mode = "file"
409
+ else:
410
+ # Input is a run ID - use existing behavior
411
+ run_id = run_id_or_path
412
+ json_file_path = None
413
+ mode = "run_id"
414
+
415
+ # Default console behavior: always show console output
416
+ show_console = console if console is not None else True
417
+
418
+ if output:
419
+ # Generate HTML file with console output
420
+ output_file = output
421
+ print(f"🌐 Generating timeline: {output_file}")
422
+
423
+ if show_console:
424
+ # Show console output first, then generate HTML
425
+ if mode == "file":
426
+ _visualize_simple_from_file(json_file_path, show_summary=False)
427
+ else:
428
+ visualize_simple(run_id, show_summary=False)
429
+ print(f"\n🌐 Generating HTML timeline: {output_file}")
430
+
431
+ if mode == "file":
432
+ _generate_html_timeline_from_file(json_file_path, output_file, open_browser)
433
+ else:
434
+ generate_html_timeline(run_id, output_file, open_browser)
435
+ else:
436
+ # Default behavior: show console + generate HTML with browser
437
+ if show_console:
438
+ if mode == "file":
439
+ _visualize_simple_from_file(json_file_path, show_summary=False)
440
+ else:
441
+ visualize_simple(run_id, show_summary=False)
442
+
443
+ # Always generate HTML file and open browser by default
444
+ output_file = f"{run_id}_timeline.html"
445
+ print(f"\n🌐 Generating HTML timeline: {output_file}")
446
+ if mode == "file":
447
+ _generate_html_timeline_from_file(json_file_path, output_file, open_browser)
448
+ else:
449
+ generate_html_timeline(run_id, output_file, open_browser)
450
+
451
+
452
+ def _visualize_simple_from_file(json_file_path, show_summary: bool = False) -> None:
453
+ """Visualize timeline from JSON file path."""
454
+
455
+ try:
456
+ viz = SimpleVisualizer(json_file_path)
457
+ viz.print_simple_timeline()
458
+ if show_summary:
459
+ viz.print_execution_summary()
460
+ except Exception as e:
461
+ print(f"āŒ Error reading JSON file: {e}")
462
+
463
+
464
+ def _generate_html_timeline_from_file(
465
+ json_file_path, output_file: str, open_browser: bool = True
466
+ ) -> None:
467
+ """Generate HTML timeline from JSON file path."""
468
+
469
+ try:
470
+ viz = SimpleVisualizer(json_file_path)
471
+ viz.generate_html_timeline(output_file)
472
+
473
+ if open_browser:
474
+ import webbrowser
475
+
476
+ file_path = Path(output_file).absolute()
477
+ print(f"🌐 Opening timeline in browser: {file_path.name}")
478
+ webbrowser.open(file_path.as_uri())
479
+ except Exception as e:
480
+ print(f"āŒ Error generating HTML: {e}")
481
+
482
+
483
+ if __name__ == "__main__":
484
+ app()