runnable 0.12.1__py3-none-any.whl → 0.12.3__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
runnable/__init__.py CHANGED
@@ -15,6 +15,8 @@ logger = logging.getLogger(defaults.LOGGER_NAME)
15
15
  console = Console(record=True)
16
16
  console.print(":runner: Lets go!!")
17
17
 
18
+ task_console = Console(record=True)
19
+
18
20
  from runnable.sdk import ( # noqa
19
21
  Catalog,
20
22
  Fail,
runnable/defaults.py CHANGED
@@ -77,7 +77,7 @@ DEFAULT_CONTAINER_OUTPUT_PARAMETERS = "parameters.json"
77
77
  DEFAULT_EXECUTOR = ServiceConfig(type="local", config={})
78
78
  DEFAULT_RUN_LOG_STORE = ServiceConfig(type="file-system", config={})
79
79
  DEFAULT_CATALOG = ServiceConfig(type="file-system", config={})
80
- DEFAULT_SECRETS = ServiceConfig(type="do-nothing", config={})
80
+ DEFAULT_SECRETS = ServiceConfig(type="env-secrets", config={})
81
81
  DEFAULT_EXPERIMENT_TRACKER = ServiceConfig(type="do-nothing", config={})
82
82
  DEFAULT_PICKLER = ServiceConfig(type="pickle", config={})
83
83
 
runnable/entrypoints.py CHANGED
@@ -9,7 +9,7 @@ from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
9
9
  from rich.table import Column
10
10
 
11
11
  import runnable.context as context
12
- from runnable import console, defaults, graph, utils
12
+ from runnable import console, defaults, graph, task_console, utils
13
13
  from runnable.defaults import RunnableConfig, ServiceConfig
14
14
 
15
15
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -165,6 +165,7 @@ def execute(
165
165
  tag=tag,
166
166
  parameters_file=parameters_file,
167
167
  )
168
+
168
169
  console.print("Working with context:")
169
170
  console.print(run_context)
170
171
  console.rule(style="[dark orange]")
@@ -239,7 +240,7 @@ def execute_single_node(
239
240
  """
240
241
  from runnable import nodes
241
242
 
242
- console.print(f"Executing the single node: {step_name} with map variable: {map_variable}")
243
+ task_console.print(f"Executing the single node: {step_name} with map variable: {map_variable}")
243
244
 
244
245
  configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
245
246
 
@@ -250,9 +251,9 @@ def execute_single_node(
250
251
  tag=tag,
251
252
  parameters_file=parameters_file,
252
253
  )
253
- console.print("Working with context:")
254
- console.print(run_context)
255
- console.rule(style="[dark orange]")
254
+ task_console.print("Working with context:")
255
+ task_console.print(run_context)
256
+ task_console.rule(style="[dark orange]")
256
257
 
257
258
  executor = run_context.executor
258
259
  run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
@@ -281,7 +282,7 @@ def execute_single_node(
281
282
  node=node_to_execute,
282
283
  map_variable=map_variable_dict,
283
284
  )
284
- console.save_text(log_file_name)
285
+ task_console.save_text(log_file_name)
285
286
 
286
287
  # Put the log file in the catalog
287
288
  run_context.catalog_handler.put(name=log_file_name, run_id=run_context.run_id)
@@ -11,6 +11,7 @@ from runnable import (
11
11
  exceptions,
12
12
  integration,
13
13
  parameters,
14
+ task_console,
14
15
  utils,
15
16
  )
16
17
  from runnable.datastore import DataCatalog, JsonParameter, RunLog, StepLog
@@ -340,10 +341,18 @@ class GenericExecutor(BaseExecutor):
340
341
  node.execute_as_graph(map_variable=map_variable, **kwargs)
341
342
  return
342
343
 
344
+ task_console.export_text(clear=True)
345
+
343
346
  task_name = node._resolve_map_placeholders(node.internal_name, map_variable)
344
347
  console.print(f":runner: Executing the node {task_name} ... ", style="bold color(208)")
345
348
  self.trigger_job(node=node, map_variable=map_variable, **kwargs)
346
349
 
350
+ log_file_name = utils.make_log_file_name(node=node, map_variable=map_variable)
351
+ task_console.save_text(log_file_name, clear=True)
352
+
353
+ self._context.catalog_handler.put(name=log_file_name, run_id=self._context.run_id)
354
+ os.remove(log_file_name)
355
+
347
356
  def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
348
357
  """
349
358
  Call this method only if we are responsible for traversing the graph via
@@ -493,6 +502,7 @@ class GenericExecutor(BaseExecutor):
493
502
 
494
503
  logger.info(f"Finished execution of the {branch} with status {run_log.status}")
495
504
 
505
+ # We are in the root dag
496
506
  if dag == self._context.dag:
497
507
  run_log = cast(RunLog, run_log)
498
508
  console.print("Completed Execution, Summary:", style="bold color(208)")
@@ -5,7 +5,7 @@ from typing import Dict, cast
5
5
  from pydantic import Field
6
6
  from rich import print
7
7
 
8
- from runnable import defaults, utils
8
+ from runnable import console, defaults, task_console, utils
9
9
  from runnable.datastore import StepLog
10
10
  from runnable.defaults import TypeMapVariable
11
11
  from runnable.extensions.executor import GenericExecutor
@@ -96,6 +96,59 @@ class LocalContainerExecutor(GenericExecutor):
96
96
  """
97
97
  return self._execute_node(node, map_variable, **kwargs)
98
98
 
99
+ def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
100
+ """
101
+ This is the entry point to from the graph execution.
102
+
103
+ While the self.execute_graph is responsible for traversing the graph, this function is responsible for
104
+ actual execution of the node.
105
+
106
+ If the node type is:
107
+ * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
108
+ * success: We can delegate to _execute_node
109
+ * fail: We can delegate to _execute_node
110
+
111
+ For nodes that are internally graphs:
112
+ * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
113
+ * dag: Delegate the responsibility of execution to the node.execute_as_graph()
114
+ * map: Delegate the responsibility of execution to the node.execute_as_graph()
115
+
116
+ Transpilers will NEVER use this method and will NEVER call ths method.
117
+ This method should only be used by interactive executors.
118
+
119
+ Args:
120
+ node (Node): The node to execute
121
+ map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
122
+ Defaults to None.
123
+ """
124
+ step_log = self._context.run_log_store.create_step_log(node.name, node._get_step_log_name(map_variable))
125
+
126
+ self.add_code_identities(node=node, step_log=step_log)
127
+
128
+ step_log.step_type = node.node_type
129
+ step_log.status = defaults.PROCESSING
130
+
131
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
132
+
133
+ logger.info(f"Executing node: {node.get_summary()}")
134
+
135
+ # Add the step log to the database as per the situation.
136
+ # If its a terminal node, complete it now
137
+ if node.node_type in ["success", "fail"]:
138
+ self._execute_node(node, map_variable=map_variable, **kwargs)
139
+ return
140
+
141
+ # We call an internal function to iterate the sub graphs and execute them
142
+ if node.is_composite:
143
+ node.execute_as_graph(map_variable=map_variable, **kwargs)
144
+ return
145
+
146
+ task_console.export_text(clear=True)
147
+
148
+ task_name = node._resolve_map_placeholders(node.internal_name, map_variable)
149
+ console.print(f":runner: Executing the node {task_name} ... ", style="bold color(208)")
150
+ self.trigger_job(node=node, map_variable=map_variable, **kwargs)
151
+
99
152
  def execute_job(self, node: TaskNode):
100
153
  """
101
154
  Set up the step log and call the execute node
runnable/sdk.py CHANGED
@@ -61,11 +61,9 @@ class Catalog(BaseModel):
61
61
  put (List[str]): List of glob patterns to put into central catalog from the compute data folder.
62
62
 
63
63
  Examples:
64
- >>> from runnable import Catalog, Task
64
+ >>> from runnable import Catalog
65
65
  >>> catalog = Catalog(compute_data_folder="/path/to/data", get=["*.csv"], put=["*.csv"])
66
66
 
67
- >>> task = Task(name="task", catalog=catalog, command="echo 'hello'")
68
-
69
67
  """
70
68
 
71
69
  model_config = ConfigDict(extra="forbid") # Need to be for command, would be validated later
@@ -143,50 +141,7 @@ class BaseTraversal(ABC, BaseModel):
143
141
 
144
142
  class BaseTask(BaseTraversal):
145
143
  """
146
- An execution node of the pipeline.
147
- Please refer to [concepts](concepts/task.md) for more information.
148
-
149
- Attributes:
150
- name (str): The name of the node.
151
- command (str): The command to execute.
152
-
153
- - For python functions, [dotted path](concepts/task.md/#python_functions) to the function.
154
- - For shell commands: command to execute in the shell.
155
- - For notebooks: path to the notebook.
156
- command_type (str): The type of command to execute.
157
- Can be one of "shell", "python", or "notebook".
158
- catalog (Optional[Catalog]): The catalog to sync data from/to.
159
- Please see Catalog about the structure of the catalog.
160
- overrides (Dict[str, Any]): Any overrides to the command.
161
- Individual tasks can override the global configuration config by referring to the
162
- specific override.
163
-
164
- For example,
165
- ### Global configuration
166
- ```yaml
167
- executor:
168
- type: local-container
169
- config:
170
- docker_image: "runnable/runnable:latest"
171
- overrides:
172
- custom_docker_image:
173
- docker_image: "runnable/runnable:custom"
174
- ```
175
- ### Task specific configuration
176
- ```python
177
- task = Task(name="task", command="echo 'hello'", command_type="shell",
178
- overrides={'local-container': custom_docker_image})
179
- ```
180
- notebook_output_path (Optional[str]): The path to save the notebook output.
181
- Only used when command_type is 'notebook', defaults to command+_out.ipynb
182
- optional_ploomber_args (Optional[Dict[str, Any]]): Any optional ploomber args.
183
- Only used when command_type is 'notebook', defaults to {}
184
- output_cell_tag (Optional[str]): The tag of the output cell.
185
- Only used when command_type is 'notebook', defaults to "runnable_output"
186
- terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
187
- terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
188
- on_failure (str): The name of the node to execute if the step fails.
189
-
144
+ Base task type which has catalog, overrides, returns and secrets.
190
145
  """
191
146
 
192
147
  catalog: Optional[Catalog] = Field(default=None, alias="catalog")
@@ -220,12 +175,50 @@ class BaseTask(BaseTraversal):
220
175
  class PythonTask(BaseTask):
221
176
  """
222
177
  An execution node of the pipeline of python functions.
178
+ Please refer to [concepts](concepts/task.md/#python_functions) for more information.
223
179
 
224
180
  Attributes:
225
181
  name (str): The name of the node.
226
182
  function (callable): The function to execute.
227
- catalog (Optional[Catalog]): The catalog to sync data from/to.
228
- Please see Catalog about the structure of the catalog.
183
+
184
+ terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
185
+ Defaults to False.
186
+ terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
187
+ Defaults to False.
188
+
189
+ on_failure (str): The name of the node to execute if the step fails.
190
+
191
+ returns List[Union[str, TaskReturns]] : A list of the names of variables to return from the task.
192
+ The names should match the order of the variables returned by the function.
193
+
194
+ ```TaskReturns```: can be JSON friendly variables, objects or metrics.
195
+
196
+ By default, all variables are assumed to be JSON friendly and will be serialized to JSON.
197
+ Pydantic models are readily supported and will be serialized to JSON.
198
+
199
+ To return a python object, please use ```pickled(<name>)```.
200
+ It is advised to use ```pickled(<name>)``` for big JSON friendly variables.
201
+
202
+ For example,
203
+ ```python
204
+ from runnable import pickled
205
+
206
+ def f():
207
+ ...
208
+ x = 1
209
+ return x, df # A simple JSON friendly variable and a python object.
210
+
211
+ task = PythonTask(name="task", function=f, returns=["x", pickled(df)]))
212
+ ```
213
+
214
+ To mark any JSON friendly variable as a ```metric```, please use ```metric(x)```.
215
+ Metric variables should be JSON friendly and can be treated just like any other parameter.
216
+
217
+ catalog Optional[Catalog]: The files sync data from/to, refer to Catalog.
218
+
219
+ secrets List[str]: List of secrets to pass to the task. They are exposed as environment variables
220
+ and removed after execution.
221
+
229
222
  overrides (Dict[str, Any]): Any overrides to the command.
230
223
  Individual tasks can override the global configuration config by referring to the
231
224
  specific override.
@@ -246,11 +239,6 @@ class PythonTask(BaseTask):
246
239
  task = PythonTask(name="task", function="function'",
247
240
  overrides={'local-container': custom_docker_image})
248
241
  ```
249
-
250
- terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
251
- terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
252
- on_failure (str): The name of the node to execute if the step fails.
253
-
254
242
  """
255
243
 
256
244
  function: Callable = Field(exclude=True)
@@ -269,15 +257,52 @@ class PythonTask(BaseTask):
269
257
 
270
258
  class NotebookTask(BaseTask):
271
259
  """
272
- An execution node of the pipeline of type notebook.
273
- Please refer to [concepts](concepts/task.md) for more information.
260
+ An execution node of the pipeline of notebook.
261
+ Please refer to [concepts](concepts/task.md/#notebooks) for more information.
262
+
263
+ We internally use [Ploomber engine](https://github.com/ploomber/ploomber-engine) to execute the notebook.
274
264
 
275
265
  Attributes:
276
266
  name (str): The name of the node.
277
- notebook: The path to the notebook
278
- catalog (Optional[Catalog]): The catalog to sync data from/to.
279
- Please see Catalog about the structure of the catalog.
280
- returns: A list of the names of variables to return from the notebook.
267
+ notebook (str): The path to the notebook relative the project root.
268
+ optional_ploomber_args (Dict[str, Any]): Any optional ploomber args, please refer to
269
+ [Ploomber engine](https://github.com/ploomber/ploomber-engine) for more information.
270
+
271
+ terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
272
+ Defaults to False.
273
+ terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
274
+ Defaults to False.
275
+
276
+ on_failure (str): The name of the node to execute if the step fails.
277
+
278
+ returns List[Union[str, TaskReturns]] : A list of the names of variables to return from the task.
279
+ The names should match the order of the variables returned by the function.
280
+
281
+ ```TaskReturns```: can be JSON friendly variables, objects or metrics.
282
+
283
+ By default, all variables are assumed to be JSON friendly and will be serialized to JSON.
284
+ Pydantic models are readily supported and will be serialized to JSON.
285
+
286
+ To return a python object, please use ```pickled(<name>)```.
287
+ It is advised to use ```pickled(<name>)``` for big JSON friendly variables.
288
+
289
+ For example,
290
+ ```python
291
+ from runnable import pickled
292
+
293
+ # assume, example.ipynb is the notebook with df and x as variables in some cells.
294
+
295
+ task = Notebook(name="task", notebook="example.ipynb", returns=["x", pickled(df)]))
296
+ ```
297
+
298
+ To mark any JSON friendly variable as a ```metric```, please use ```metric(x)```.
299
+ Metric variables should be JSON friendly and can be treated just like any other parameter.
300
+
301
+ catalog Optional[Catalog]: The files sync data from/to, refer to Catalog.
302
+
303
+ secrets List[str]: List of secrets to pass to the task. They are exposed as environment variables
304
+ and removed after execution.
305
+
281
306
  overrides (Dict[str, Any]): Any overrides to the command.
282
307
  Individual tasks can override the global configuration config by referring to the
283
308
  specific override.
@@ -295,18 +320,9 @@ class NotebookTask(BaseTask):
295
320
  ```
296
321
  ### Task specific configuration
297
322
  ```python
298
- task = NotebookTask(name="task", notebook="evaluation.ipynb",
323
+ task = NotebookTask(name="task", notebook="example.ipynb",
299
324
  overrides={'local-container': custom_docker_image})
300
325
  ```
301
- notebook_output_path (Optional[str]): The path to save the notebook output.
302
- Only used when command_type is 'notebook', defaults to command+_out.ipynb
303
- optional_ploomber_args (Optional[Dict[str, Any]]): Any optional ploomber args.
304
- Only used when command_type is 'notebook', defaults to {}
305
-
306
- terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
307
- terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
308
- on_failure (str): The name of the node to execute if the step fails.
309
-
310
326
  """
311
327
 
312
328
  notebook: str = Field(serialization_alias="command")
@@ -319,15 +335,33 @@ class NotebookTask(BaseTask):
319
335
 
320
336
  class ShellTask(BaseTask):
321
337
  """
322
- An execution node of the pipeline of type shell.
323
- Please refer to [concepts](concepts/task.md) for more information.
338
+ An execution node of the pipeline of shell script.
339
+ Please refer to [concepts](concepts/task.md/#shell) for more information.
340
+
324
341
 
325
342
  Attributes:
326
343
  name (str): The name of the node.
327
- command: The shell command to execute.
328
- catalog (Optional[Catalog]): The catalog to sync data from/to.
329
- Please see Catalog about the structure of the catalog.
330
- returns: A list of the names of variables to capture from environment variables of shell.
344
+ command (str): The path to the notebook relative the project root.
345
+ terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
346
+ Defaults to False.
347
+ terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
348
+ Defaults to False.
349
+
350
+ on_failure (str): The name of the node to execute if the step fails.
351
+
352
+ returns List[str] : A list of the names of environment variables to collect from the task.
353
+
354
+ The names should match the order of the variables returned by the function.
355
+ Shell based tasks can only return JSON friendly variables.
356
+
357
+ To mark any JSON friendly variable as a ```metric```, please use ```metric(x)```.
358
+ Metric variables should be JSON friendly and can be treated just like any other parameter.
359
+
360
+ catalog Optional[Catalog]: The files sync data from/to, refer to Catalog.
361
+
362
+ secrets List[str]: List of secrets to pass to the task. They are exposed as environment variables
363
+ and removed after execution.
364
+
331
365
  overrides (Dict[str, Any]): Any overrides to the command.
332
366
  Individual tasks can override the global configuration config by referring to the
333
367
  specific override.
@@ -345,14 +379,10 @@ class ShellTask(BaseTask):
345
379
  ```
346
380
  ### Task specific configuration
347
381
  ```python
348
- task = ShellTask(name="task", command="exit 0",
382
+ task = ShellTask(name="task", command="export x=1",
349
383
  overrides={'local-container': custom_docker_image})
350
384
  ```
351
385
 
352
- terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
353
- terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
354
- on_failure (str): The name of the node to execute if the step fails.
355
-
356
386
  """
357
387
 
358
388
  command: str = Field(alias="command")
@@ -364,16 +394,20 @@ class ShellTask(BaseTask):
364
394
 
365
395
  class Stub(BaseTraversal):
366
396
  """
367
- A node that does nothing.
397
+ A node that passes through the pipeline with no action. Just like ```pass``` in Python.
398
+ Please refer to [concepts](concepts/task.md/#stub) for more information.
368
399
 
369
400
  A stub node can tak arbitrary number of arguments.
370
- Please refer to [concepts](concepts/stub.md) for more information.
371
401
 
372
402
  Attributes:
373
403
  name (str): The name of the node.
374
- terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
404
+ command (str): The path to the notebook relative the project root.
375
405
  terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
406
+ Defaults to False.
407
+ terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
408
+ Defaults to False.
376
409
 
410
+ on_failure (str): The name of the node to execute if the step fails.
377
411
  """
378
412
 
379
413
  model_config = ConfigDict(extra="ignore")
@@ -422,12 +456,13 @@ class Map(BaseTraversal):
422
456
  Please refer to [concepts](concepts/map.md) for more information.
423
457
 
424
458
  Attributes:
425
- branch: The pipeline to execute for each item.
459
+ branch (Pipeline): The pipeline to execute for each item.
426
460
 
427
- iterate_on: The name of the parameter to iterate over.
461
+ iterate_on (str): The name of the parameter to iterate over.
428
462
  The parameter should be defined either by previous steps or statically at the start of execution.
429
463
 
430
- iterate_as: The name of the iterable to be passed to functions.
464
+ iterate_as (str): The name of the iterable to be passed to functions.
465
+ reducer (Callable): The function to reduce the results of the branches.
431
466
 
432
467
 
433
468
  overrides (Dict[str, Any]): Any overrides to the command.
@@ -510,29 +545,44 @@ class Fail(BaseModel):
510
545
 
511
546
  class Pipeline(BaseModel):
512
547
  """
513
- A Pipeline is a directed acyclic graph of Steps that define a workflow.
548
+ A Pipeline is a sequence of Steps.
514
549
 
515
550
  Attributes:
516
- steps (List[Stub | PythonTask | NotebookTask | ShellTask | Parallel | Map | Success | Fail]):
551
+ steps (List[Stub | PythonTask | NotebookTask | ShellTask | Parallel | Map]]):
517
552
  A list of Steps that make up the Pipeline.
518
- start_at (Stub | Task | Parallel | Map): The name of the first Step in the Pipeline.
553
+
554
+ The order of steps is important as it determines the order of execution.
555
+ Any on failure behavior should the first step in ```on_failure``` pipelines.
556
+
557
+
558
+
559
+ on_failure (List[List[Pipeline], optional): A list of Pipelines to execute in case of failure.
560
+
561
+ For example, for the below pipeline:
562
+ step1 >> step2
563
+ and step1 to reach step3 in case of failure.
564
+
565
+ failure_pipeline = Pipeline(steps=[step1, step3])
566
+
567
+ pipeline = Pipeline(steps=[step1, step2, on_failure=[failure_pipeline])
568
+
519
569
  name (str, optional): The name of the Pipeline. Defaults to "".
520
570
  description (str, optional): A description of the Pipeline. Defaults to "".
521
- add_terminal_nodes (bool, optional): Whether to add terminal nodes to the Pipeline. Defaults to True.
522
571
 
523
- The default behavior is to add "success" and "fail" nodes to the Pipeline.
524
- To add custom success and fail nodes, set add_terminal_nodes=False and create success
525
- and fail nodes manually.
572
+ The pipeline implicitly add success and fail nodes.
526
573
 
527
574
  """
528
575
 
529
- steps: List[Union[StepType, List[StepType]]]
576
+ steps: List[Union[StepType, List["Pipeline"]]]
530
577
  name: str = ""
531
578
  description: str = ""
532
- add_terminal_nodes: bool = True # Adds "success" and "fail" nodes
533
579
 
534
580
  internal_branch_name: str = ""
535
581
 
582
+ @property
583
+ def add_terminal_nodes(self) -> bool:
584
+ return True
585
+
536
586
  _dag: graph.Graph = PrivateAttr()
537
587
  model_config = ConfigDict(extra="forbid")
538
588
 
@@ -590,6 +640,7 @@ class Pipeline(BaseModel):
590
640
  Any definition of pipeline should have one node that terminates with success.
591
641
  """
592
642
  # TODO: Bug with repeat names
643
+ # TODO: https://github.com/AstraZeneca/runnable/issues/156
593
644
 
594
645
  success_path: List[StepType] = []
595
646
  on_failure_paths: List[List[StepType]] = []
@@ -598,7 +649,7 @@ class Pipeline(BaseModel):
598
649
  if isinstance(step, (Stub, PythonTask, NotebookTask, ShellTask, Parallel, Map)):
599
650
  success_path.append(step)
600
651
  continue
601
- on_failure_paths.append(step)
652
+ # on_failure_paths.append(step)
602
653
 
603
654
  if not success_path:
604
655
  raise Exception("There should be some success path")
@@ -654,21 +705,19 @@ class Pipeline(BaseModel):
654
705
 
655
706
  Traverse and execute all the steps of the pipeline, eg. [local execution](configurations/executors/local.md).
656
707
 
657
- Or create the ```yaml``` representation of the pipeline for other executors.
708
+ Or create the representation of the pipeline for other executors.
658
709
 
659
710
  Please refer to [concepts](concepts/executor.md) for more information.
660
711
 
661
712
  Args:
662
713
  configuration_file (str, optional): The path to the configuration file. Defaults to "".
663
- The configuration file can be overridden by the environment variable runnable_CONFIGURATION_FILE.
714
+ The configuration file can be overridden by the environment variable RUNNABLE_CONFIGURATION_FILE.
664
715
 
665
716
  run_id (str, optional): The ID of the run. Defaults to "".
666
717
  tag (str, optional): The tag of the run. Defaults to "".
667
718
  Use to group multiple runs.
668
719
 
669
720
  parameters_file (str, optional): The path to the parameters file. Defaults to "".
670
- use_cached (str, optional): Whether to use cached results. Defaults to "".
671
- Provide the run_id of the older execution to recover.
672
721
 
673
722
  log_level (str, optional): The log level. Defaults to defaults.LOG_LEVEL.
674
723
  """