runnable 0.34.0a1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of runnable might be problematic. Click here for more details.

Files changed (49) hide show
  1. extensions/catalog/any_path.py +13 -2
  2. extensions/job_executor/__init__.py +7 -5
  3. extensions/job_executor/emulate.py +106 -0
  4. extensions/job_executor/k8s.py +8 -8
  5. extensions/job_executor/local_container.py +13 -14
  6. extensions/nodes/__init__.py +0 -0
  7. extensions/nodes/conditional.py +243 -0
  8. extensions/nodes/fail.py +72 -0
  9. extensions/nodes/map.py +350 -0
  10. extensions/nodes/parallel.py +159 -0
  11. extensions/nodes/stub.py +89 -0
  12. extensions/nodes/success.py +72 -0
  13. extensions/nodes/task.py +92 -0
  14. extensions/pipeline_executor/__init__.py +27 -27
  15. extensions/pipeline_executor/argo.py +52 -46
  16. extensions/pipeline_executor/emulate.py +112 -0
  17. extensions/pipeline_executor/local.py +4 -4
  18. extensions/pipeline_executor/local_container.py +19 -79
  19. extensions/pipeline_executor/mocked.py +5 -9
  20. extensions/pipeline_executor/retry.py +6 -10
  21. runnable/__init__.py +2 -11
  22. runnable/catalog.py +6 -23
  23. runnable/cli.py +145 -48
  24. runnable/context.py +520 -28
  25. runnable/datastore.py +51 -54
  26. runnable/defaults.py +12 -34
  27. runnable/entrypoints.py +82 -440
  28. runnable/exceptions.py +35 -34
  29. runnable/executor.py +13 -20
  30. runnable/gantt.py +1141 -0
  31. runnable/graph.py +1 -1
  32. runnable/names.py +1 -1
  33. runnable/nodes.py +20 -16
  34. runnable/parameters.py +108 -51
  35. runnable/sdk.py +125 -204
  36. runnable/tasks.py +62 -85
  37. runnable/utils.py +6 -268
  38. runnable-1.0.0.dist-info/METADATA +122 -0
  39. runnable-1.0.0.dist-info/RECORD +73 -0
  40. {runnable-0.34.0a1.dist-info → runnable-1.0.0.dist-info}/entry_points.txt +9 -8
  41. extensions/nodes/nodes.py +0 -778
  42. extensions/nodes/torch.py +0 -273
  43. extensions/nodes/torch_config.py +0 -76
  44. extensions/tasks/torch.py +0 -286
  45. extensions/tasks/torch_config.py +0 -76
  46. runnable-0.34.0a1.dist-info/METADATA +0 -267
  47. runnable-0.34.0a1.dist-info/RECORD +0 -67
  48. {runnable-0.34.0a1.dist-info → runnable-1.0.0.dist-info}/WHEEL +0 -0
  49. {runnable-0.34.0a1.dist-info → runnable-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -4,7 +4,7 @@ from pydantic import Field, PrivateAttr
4
4
 
5
5
  from extensions.pipeline_executor import GenericPipelineExecutor
6
6
  from runnable import defaults
7
- from runnable.defaults import TypeMapVariable
7
+ from runnable.defaults import MapVariableType
8
8
  from runnable.nodes import BaseNode
9
9
 
10
10
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -32,14 +32,14 @@ class LocalExecutor(GenericPipelineExecutor):
32
32
 
33
33
  _is_local: bool = PrivateAttr(default=True)
34
34
 
35
- def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
35
+ def execute_from_graph(self, node: BaseNode, map_variable: MapVariableType = None):
36
36
  if not self.object_serialisation:
37
37
  self._context.object_serialisation = False
38
38
 
39
39
  super().execute_from_graph(node=node, map_variable=map_variable)
40
40
 
41
41
  def trigger_node_execution(
42
- self, node: BaseNode, map_variable: TypeMapVariable = None
42
+ self, node: BaseNode, map_variable: MapVariableType = None
43
43
  ):
44
44
  """
45
45
  In this mode of execution, we prepare for the node execution and execute the node
@@ -50,7 +50,7 @@ class LocalExecutor(GenericPipelineExecutor):
50
50
  """
51
51
  self.execute_node(node=node, map_variable=map_variable)
52
52
 
53
- def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
53
+ def execute_node(self, node: BaseNode, map_variable: MapVariableType = None):
54
54
  """
55
55
  For local execution, we just execute the node.
56
56
 
@@ -2,12 +2,12 @@ import logging
2
2
  from pathlib import Path
3
3
  from typing import Dict
4
4
 
5
- from pydantic import Field
5
+ from pydantic import Field, PrivateAttr
6
6
 
7
7
  from extensions.pipeline_executor import GenericPipelineExecutor
8
- from runnable import console, defaults, task_console, utils
8
+ from runnable import defaults, utils
9
9
  from runnable.datastore import StepLog
10
- from runnable.defaults import TypeMapVariable
10
+ from runnable.defaults import MapVariableType
11
11
  from runnable.nodes import BaseNode
12
12
 
13
13
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -70,7 +70,7 @@ class LocalContainerExecutor(GenericPipelineExecutor):
70
70
  auto_remove_container: bool = True
71
71
  environment: Dict[str, str] = Field(default_factory=dict)
72
72
 
73
- _is_local: bool = False
73
+ _should_setup_run_log_at_traversal: bool = PrivateAttr(default=True)
74
74
 
75
75
  _container_log_location = "/tmp/run_logs/"
76
76
  _container_catalog_location = "/tmp/catalog/"
@@ -104,7 +104,7 @@ class LocalContainerExecutor(GenericPipelineExecutor):
104
104
  code_id.code_identifier_url = "local docker host"
105
105
  step_log.code_identities.append(code_id)
106
106
 
107
- def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
107
+ def execute_node(self, node: BaseNode, map_variable: MapVariableType = None):
108
108
  """
109
109
  We are already in the container, we just execute the node.
110
110
  The node is already prepared for execution.
@@ -112,69 +112,8 @@ class LocalContainerExecutor(GenericPipelineExecutor):
112
112
  self._use_volumes()
113
113
  return self._execute_node(node, map_variable)
114
114
 
115
- def execute_from_graph(
116
- self,
117
- node: BaseNode,
118
- map_variable: TypeMapVariable = None,
119
- ):
120
- """
121
- This is the entry point to from the graph execution.
122
-
123
- While the self.execute_graph is responsible for traversing the graph, this function is responsible for
124
- actual execution of the node.
125
-
126
- If the node type is:
127
- * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
128
- * success: We can delegate to _execute_node
129
- * fail: We can delegate to _execute_node
130
-
131
- For nodes that are internally graphs:
132
- * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
133
- * dag: Delegate the responsibility of execution to the node.execute_as_graph()
134
- * map: Delegate the responsibility of execution to the node.execute_as_graph()
135
-
136
- Transpilers will NEVER use this method and will NEVER call ths method.
137
- This method should only be used by interactive executors.
138
-
139
- Args:
140
- node (Node): The node to execute
141
- map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
142
- Defaults to None.
143
- """
144
- step_log = self._context.run_log_store.create_step_log(
145
- node.name, node._get_step_log_name(map_variable)
146
- )
147
-
148
- self.add_code_identities(node=node, step_log=step_log)
149
-
150
- step_log.step_type = node.node_type
151
- step_log.status = defaults.PROCESSING
152
-
153
- self._context.run_log_store.add_step_log(step_log, self._context.run_id)
154
-
155
- logger.info(f"Executing node: {node.get_summary()}")
156
-
157
- # Add the step log to the database as per the situation.
158
- # If its a terminal node, complete it now
159
- if node.node_type in ["success", "fail"]:
160
- self._execute_node(node, map_variable=map_variable)
161
- return
162
-
163
- # We call an internal function to iterate the sub graphs and execute them
164
- if node.is_composite:
165
- node.execute_as_graph(map_variable=map_variable)
166
- return
167
-
168
- task_console.export_text(clear=True)
169
-
170
- task_name = node._resolve_map_placeholders(node.internal_name, map_variable)
171
- console.print(
172
- f":runner: Executing the node {task_name} ... ", style="bold color(208)"
173
- )
174
- self.trigger_node_execution(node=node, map_variable=map_variable)
175
-
176
115
  def trigger_node_execution(
177
- self, node: BaseNode, map_variable: TypeMapVariable = None
116
+ self, node: BaseNode, map_variable: MapVariableType = None
178
117
  ):
179
118
  """
180
119
  We come into this step via execute from graph, use trigger job to spin up the container.
@@ -192,7 +131,9 @@ class LocalContainerExecutor(GenericPipelineExecutor):
192
131
  logger.debug("Here is the resolved executor config")
193
132
  logger.debug(executor_config)
194
133
 
195
- command = utils.get_node_execution_command(node, map_variable=map_variable)
134
+ command = self._context.get_node_callable_command(
135
+ node, map_variable=map_variable
136
+ )
196
137
 
197
138
  self._spin_container(
198
139
  node=node,
@@ -218,7 +159,7 @@ class LocalContainerExecutor(GenericPipelineExecutor):
218
159
  self,
219
160
  node: BaseNode,
220
161
  command: str,
221
- map_variable: TypeMapVariable = None,
162
+ map_variable: MapVariableType = None,
222
163
  auto_remove_container: bool = True,
223
164
  ):
224
165
  """
@@ -294,6 +235,7 @@ class LocalContainerExecutor(GenericPipelineExecutor):
294
235
  """
295
236
  Mount the volumes for the container
296
237
  """
238
+ # TODO: There should be an abstraction on top of service providers
297
239
  match self._context.run_log_store.service_name:
298
240
  case "file-system":
299
241
  write_to = self._context.run_log_store.log_folder
@@ -308,17 +250,17 @@ class LocalContainerExecutor(GenericPipelineExecutor):
308
250
  "mode": "rw",
309
251
  }
310
252
 
311
- match self._context.catalog_handler.service_name:
253
+ match self._context.catalog.service_name:
312
254
  case "file-system":
313
- catalog_location = self._context.catalog_handler.catalog_location
255
+ catalog_location = self._context.catalog.catalog_location
314
256
  self._volumes[str(Path(catalog_location).resolve())] = {
315
257
  "bind": f"{self._container_catalog_location}",
316
258
  "mode": "rw",
317
259
  }
318
260
 
319
- match self._context.secrets_handler.service_name:
261
+ match self._context.secrets.service_name:
320
262
  case "dotenv":
321
- secrets_location = self._context.secrets_handler.location
263
+ secrets_location = self._context.secrets.location
322
264
  self._volumes[str(Path(secrets_location).resolve())] = {
323
265
  "bind": f"{self._container_secrets_location}",
324
266
  "mode": "ro",
@@ -331,14 +273,12 @@ class LocalContainerExecutor(GenericPipelineExecutor):
331
273
  case "chunked-fs":
332
274
  self._context.run_log_store.log_folder = self._container_log_location
333
275
 
334
- match self._context.catalog_handler.service_name:
276
+ match self._context.catalog.service_name:
335
277
  case "file-system":
336
- self._context.catalog_handler.catalog_location = (
278
+ self._context.catalog.catalog_location = (
337
279
  self._container_catalog_location
338
280
  )
339
281
 
340
- match self._context.secrets_handler.service_name:
282
+ match self._context.secrets.service_name:
341
283
  case "dotenv":
342
- self._context.secrets_handler.location = (
343
- self._container_secrets_location
344
- )
284
+ self._context.secrets.location = self._container_secrets_location
@@ -4,10 +4,10 @@ from typing import Any, Dict, Type, cast
4
4
 
5
5
  from pydantic import ConfigDict, Field
6
6
 
7
- from extensions.nodes.nodes import TaskNode
7
+ from extensions.nodes.task import TaskNode
8
8
  from extensions.pipeline_executor import GenericPipelineExecutor
9
- from runnable import context, defaults
10
- from runnable.defaults import TypeMapVariable
9
+ from runnable import defaults
10
+ from runnable.defaults import MapVariableType
11
11
  from runnable.nodes import BaseNode
12
12
  from runnable.tasks import BaseTaskType
13
13
 
@@ -32,11 +32,7 @@ class MockedExecutor(GenericPipelineExecutor):
32
32
 
33
33
  patches: Dict[str, Any] = Field(default_factory=dict)
34
34
 
35
- @property
36
- def _context(self):
37
- return context.run_context
38
-
39
- def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
35
+ def execute_from_graph(self, node: BaseNode, map_variable: MapVariableType = None):
40
36
  """
41
37
  This is the entry point to from the graph execution.
42
38
 
@@ -140,7 +136,7 @@ class MockedExecutor(GenericPipelineExecutor):
140
136
 
141
137
  return effective_node_config
142
138
 
143
- def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
139
+ def execute_node(self, node: BaseNode, map_variable: MapVariableType = None):
144
140
  """
145
141
  The entry point for all executors apart from local.
146
142
  We have already prepared for node execution.
@@ -3,9 +3,9 @@ from functools import cached_property
3
3
  from typing import Any, Dict, Optional
4
4
 
5
5
  from extensions.pipeline_executor import GenericPipelineExecutor
6
- from runnable import context, defaults, exceptions
6
+ from runnable import defaults, exceptions
7
7
  from runnable.datastore import RunLog
8
- from runnable.defaults import TypeMapVariable
8
+ from runnable.defaults import MapVariableType
9
9
  from runnable.nodes import BaseNode
10
10
 
11
11
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -33,10 +33,6 @@ class RetryExecutor(GenericPipelineExecutor):
33
33
  _original_run_log: Optional[RunLog] = None
34
34
  _restart_initiated: bool = False
35
35
 
36
- @property
37
- def _context(self):
38
- return context.run_context
39
-
40
36
  @cached_property
41
37
  def original_run_log(self):
42
38
  return self._context.run_log_store.get_run_log_by_id(
@@ -46,7 +42,7 @@ class RetryExecutor(GenericPipelineExecutor):
46
42
 
47
43
  def _set_up_for_re_run(self, params: Dict[str, Any]) -> None:
48
44
  # Sync the previous run log catalog to this one.
49
- self._context.catalog_handler.sync_between_runs(
45
+ self._context.catalog.sync_between_runs(
50
46
  previous_run_id=self.run_id, run_id=self._context.run_id
51
47
  )
52
48
 
@@ -63,7 +59,7 @@ class RetryExecutor(GenericPipelineExecutor):
63
59
  # Should the parameters be copied from previous execution
64
60
  # self._set_up_for_re_run(params=params)
65
61
 
66
- def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
62
+ def execute_from_graph(self, node: BaseNode, map_variable: MapVariableType = None):
67
63
  """
68
64
  This is the entry point to from the graph execution.
69
65
 
@@ -124,7 +120,7 @@ class RetryExecutor(GenericPipelineExecutor):
124
120
  self.execute_node(node=node, map_variable=map_variable)
125
121
 
126
122
  def _is_step_eligible_for_rerun(
127
- self, node: BaseNode, map_variable: TypeMapVariable = None
123
+ self, node: BaseNode, map_variable: MapVariableType = None
128
124
  ):
129
125
  """
130
126
  In case of a re-run, this method checks to see if the previous run step status to determine if a re-run is
@@ -172,5 +168,5 @@ class RetryExecutor(GenericPipelineExecutor):
172
168
  self._restart_initiated = True
173
169
  return True
174
170
 
175
- def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
171
+ def execute_node(self, node: BaseNode, map_variable: MapVariableType = None):
176
172
  self._execute_node(node, map_variable=map_variable)
runnable/__init__.py CHANGED
@@ -1,24 +1,17 @@
1
1
  # ruff: noqa
2
2
 
3
-
4
- import logging
5
3
  import os
6
- from logging.config import dictConfig
7
4
 
8
5
  from rich.console import Console
9
6
 
10
- from runnable import defaults
11
-
12
- dictConfig(defaults.LOGGING_CONFIG)
13
- logger = logging.getLogger(defaults.LOGGER_NAME)
14
-
15
7
  console = Console(record=True)
16
8
  console.print(":runner: Lets go!!")
17
9
 
18
10
  task_console = Console(record=True)
19
11
 
20
- from runnable.sdk import ( # noqa
12
+ from runnable.sdk import ( # noqa;
21
13
  Catalog,
14
+ Conditional,
22
15
  Fail,
23
16
  Map,
24
17
  NotebookJob,
@@ -31,8 +24,6 @@ from runnable.sdk import ( # noqa
31
24
  ShellTask,
32
25
  Stub,
33
26
  Success,
34
- TorchJob,
35
- TorchTask,
36
27
  metric,
37
28
  pickled,
38
29
  )
runnable/catalog.py CHANGED
@@ -1,6 +1,6 @@
1
1
  import logging
2
2
  from abc import ABC, abstractmethod
3
- from typing import Any, Dict, List, Optional
3
+ from typing import Any, Dict, List
4
4
 
5
5
  from pydantic import BaseModel, ConfigDict, Field
6
6
 
@@ -11,26 +11,6 @@ from runnable.datastore import DataCatalog
11
11
  logger = logging.getLogger(defaults.LOGGER_NAME)
12
12
 
13
13
 
14
- def is_catalog_out_of_sync(
15
- catalog, synced_catalogs=Optional[List[DataCatalog]]
16
- ) -> bool:
17
- """
18
- Check if the catalog items are out of sync from already cataloged objects.
19
- If they are, return False.
20
- If the object does not exist or synced catalog does not exist, return True
21
- """
22
- if not synced_catalogs:
23
- return True # If nothing has been synced in the past
24
-
25
- for synced_catalog in synced_catalogs:
26
- if synced_catalog.catalog_relative_path == catalog.catalog_relative_path:
27
- if synced_catalog.data_hash == catalog.data_hash:
28
- return False
29
- return True
30
-
31
- return True # The object does not exist, sync it
32
-
33
-
34
14
  # --8<-- [start:docs]
35
15
 
36
16
 
@@ -77,7 +57,7 @@ class BaseCatalog(ABC, BaseModel):
77
57
 
78
58
  @abstractmethod
79
59
  def put(
80
- self, name: str, allow_file_not_found_exc: bool = False
60
+ self, name: str, allow_file_not_found_exc: bool = False, store_copy: bool = True
81
61
  ) -> List[DataCatalog]:
82
62
  """
83
63
  Put the file by 'name' from the 'compute_data_folder' in the catalog for the run_id.
@@ -140,7 +120,10 @@ class DoNothingCatalog(BaseCatalog):
140
120
  return []
141
121
 
142
122
  def put(
143
- self, name: str, allow_file_not_found_exc: bool = False
123
+ self,
124
+ name: str,
125
+ allow_file_not_found_exc: bool = False,
126
+ store_copy: bool = True,
144
127
  ) -> List[DataCatalog]:
145
128
  """
146
129
  Does nothing
runnable/cli.py CHANGED
@@ -1,10 +1,13 @@
1
1
  import logging
2
+ import os
2
3
  from enum import Enum
4
+ from pathlib import Path
3
5
  from typing import Annotated
4
6
 
5
7
  import typer
6
8
 
7
9
  from runnable import defaults, entrypoints
10
+ from runnable.gantt import SimpleVisualizer, generate_html_timeline, visualize_simple
8
11
 
9
12
  logger = logging.getLogger(defaults.LOGGER_NAME)
10
13
 
@@ -223,14 +226,21 @@ def fan(
223
226
  )
224
227
 
225
228
 
226
- @app.command()
227
- def submit_job(
229
+ @app.command(hidden=True)
230
+ def execute_job(
228
231
  job_definition_file: Annotated[
229
232
  str,
230
233
  typer.Argument(
231
234
  help=("The yaml file containing the job definition"),
232
235
  ),
233
236
  ],
237
+ run_id: Annotated[
238
+ str,
239
+ typer.Argument(
240
+ envvar="RUNNABLE_RUN_ID",
241
+ help="An optional run_id, one would be generated if its not provided",
242
+ ),
243
+ ] = "",
234
244
  config_file: Annotated[
235
245
  str,
236
246
  typer.Option(
@@ -255,16 +265,10 @@ def submit_job(
255
265
  ),
256
266
  ] = LogLevel.WARNING,
257
267
  tag: Annotated[str, typer.Option(help="A tag attached to the run")] = "",
258
- run_id: Annotated[
259
- str,
260
- typer.Option(
261
- help="An optional run_id, one would be generated if its not provided"
262
- ),
263
- ] = "",
264
268
  ):
265
269
  logger.setLevel(log_level.value)
266
270
 
267
- entrypoints.execute_job_yaml_spec(
271
+ entrypoints.execute_job_non_local(
268
272
  configuration_file=config_file,
269
273
  job_definition_file=job_definition_file,
270
274
  tag=tag,
@@ -273,43 +277,29 @@ def submit_job(
273
277
  )
274
278
 
275
279
 
276
- @app.command(hidden=True)
277
- def execute_job(
278
- job_definition_file: Annotated[
279
- str,
280
- typer.Argument(
281
- help=("The yaml file containing the job definition"),
282
- ),
280
+ @app.command()
281
+ def timeline(
282
+ run_id_or_path: Annotated[
283
+ str, typer.Argument(help="Run ID to visualize, or path to JSON run log file")
283
284
  ],
284
- run_id: Annotated[
285
- str,
286
- typer.Argument(
287
- envvar="RUNNABLE_RUN_ID",
288
- help="An optional run_id, one would be generated if its not provided",
289
- ),
290
- ] = "",
291
- config_file: Annotated[
285
+ output: Annotated[
292
286
  str,
293
- typer.Option(
294
- "--config", "-c", help="The configuration file specifying the services"
295
- ),
287
+ typer.Option("--output", "-o", help="Output HTML file path"),
296
288
  ] = "",
297
- parameters_file: Annotated[
298
- str,
289
+ console: Annotated[
290
+ bool,
299
291
  typer.Option(
300
- "--parameters",
301
- "-p",
302
- help="Parameters, in yaml, accessible by the application",
292
+ "--console/--no-console",
293
+ help="Show console timeline output (default: true)",
303
294
  ),
304
- ] = "",
305
- mode: Annotated[
306
- ExecutionMode,
295
+ ] = True,
296
+ open_browser: Annotated[
297
+ bool,
307
298
  typer.Option(
308
- "--mode",
309
- "-m",
310
- help="spec in yaml or python sdk",
299
+ "--open/--no-open",
300
+ help="Automatically open the generated file in default browser",
311
301
  ),
312
- ] = ExecutionMode.YAML,
302
+ ] = True,
313
303
  log_level: Annotated[
314
304
  LogLevel,
315
305
  typer.Option(
@@ -319,18 +309,125 @@ def execute_job(
319
309
  case_sensitive=False,
320
310
  ),
321
311
  ] = LogLevel.WARNING,
322
- tag: Annotated[str, typer.Option(help="A tag attached to the run")] = "",
323
312
  ):
313
+ """
314
+ Visualize pipeline execution as an interactive timeline.
315
+
316
+ This command creates lightweight timeline visualizations that effectively
317
+ show composite nodes (parallel, map, conditional) with hierarchical structure,
318
+ timing information, and execution metadata.
319
+
320
+ The new visualization system provides:
321
+ - Clean console output with hierarchical display
322
+ - Interactive HTML with hover tooltips and expandable sections
323
+ - Proper support for all composite pipeline types
324
+ - Rich metadata including commands, parameters, and catalog operations
325
+
326
+ By default, shows console output AND generates HTML file with browser opening.
327
+
328
+ Input Options:
329
+ - Run ID: Looks up JSON file in .run_log_store/ directory
330
+ - JSON Path: Direct path to run log JSON file (flexible for any config)
331
+
332
+ Examples:
333
+ # Using Run ID (looks in .run_log_store/)
334
+ runnable timeline forgiving-joliot-0645 # Console + HTML + browser
335
+ runnable timeline parallel-run --output custom.html # Console + custom HTML + browser
336
+
337
+ # Using JSON file path (any location)
338
+ runnable timeline /path/to/my-run.json # Console + HTML + browser
339
+ runnable timeline ../logs/pipeline-run.json --no-open # Console + HTML, no browser
340
+ runnable timeline ~/experiments/run.json --no-console # HTML + browser only
341
+
342
+ # Other options
343
+ runnable timeline complex-pipeline --no-open # Console + HTML, no browser
344
+ runnable timeline simple-run --no-console --no-open # HTML only, no browser
345
+ """
324
346
  logger.setLevel(log_level.value)
325
347
 
326
- entrypoints.execute_job_non_local(
327
- configuration_file=config_file,
328
- job_definition_file=job_definition_file,
329
- mode=mode,
330
- tag=tag,
331
- run_id=run_id,
332
- parameters_file=parameters_file,
333
- )
348
+ # Determine if input is a file path or run ID
349
+ if os.path.exists(run_id_or_path) or run_id_or_path.endswith(".json"):
350
+ # Input is a file path
351
+ json_file_path = Path(run_id_or_path)
352
+ if not json_file_path.exists():
353
+ print(f"❌ JSON file not found: {json_file_path}")
354
+ return
355
+
356
+ # Extract run ID from the file for default naming
357
+ run_id = json_file_path.stem
358
+ mode = "file"
359
+ else:
360
+ # Input is a run ID - use existing behavior
361
+ run_id = run_id_or_path
362
+ json_file_path = None
363
+ mode = "run_id"
364
+
365
+ # Default console behavior: always show console output
366
+ show_console = console if console is not None else True
367
+
368
+ if output:
369
+ # Generate HTML file with console output
370
+ output_file = output
371
+ print(f"🌐 Generating timeline: {output_file}")
372
+
373
+ if show_console:
374
+ # Show console output first, then generate HTML
375
+ if mode == "file":
376
+ _visualize_simple_from_file(json_file_path, show_summary=False)
377
+ else:
378
+ visualize_simple(run_id, show_summary=False)
379
+ print(f"\n🌐 Generating HTML timeline: {output_file}")
380
+
381
+ if mode == "file":
382
+ _generate_html_timeline_from_file(json_file_path, output_file, open_browser)
383
+ else:
384
+ generate_html_timeline(run_id, output_file, open_browser)
385
+ else:
386
+ # Default behavior: show console + generate HTML with browser
387
+ if show_console:
388
+ if mode == "file":
389
+ _visualize_simple_from_file(json_file_path, show_summary=False)
390
+ else:
391
+ visualize_simple(run_id, show_summary=False)
392
+
393
+ # Always generate HTML file and open browser by default
394
+ output_file = f"{run_id}_timeline.html"
395
+ print(f"\n🌐 Generating HTML timeline: {output_file}")
396
+ if mode == "file":
397
+ _generate_html_timeline_from_file(json_file_path, output_file, open_browser)
398
+ else:
399
+ generate_html_timeline(run_id, output_file, open_browser)
400
+
401
+
402
+ def _visualize_simple_from_file(json_file_path, show_summary: bool = False) -> None:
403
+ """Visualize timeline from JSON file path."""
404
+
405
+ try:
406
+ viz = SimpleVisualizer(json_file_path)
407
+ viz.print_simple_timeline()
408
+ if show_summary:
409
+ viz.print_execution_summary()
410
+ except Exception as e:
411
+ print(f"❌ Error reading JSON file: {e}")
412
+
413
+
414
+ def _generate_html_timeline_from_file(
415
+ json_file_path, output_file: str, open_browser: bool = True
416
+ ) -> None:
417
+ """Generate HTML timeline from JSON file path."""
418
+
419
+ try:
420
+ viz = SimpleVisualizer(json_file_path)
421
+ viz.generate_html_timeline(output_file)
422
+
423
+ if open_browser:
424
+ import webbrowser
425
+
426
+ file_path = Path(output_file).absolute()
427
+ print(f"🌐 Opening timeline in browser: {file_path.name}")
428
+ webbrowser.open(file_path.as_uri())
429
+ except Exception as e:
430
+ print(f"❌ Error generating HTML: {e}")
334
431
 
335
432
 
336
433
  if __name__ == "__main__":