runnable 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,10 +4,10 @@
4
4
 
5
5
  # from pydantic import BaseModel
6
6
 
7
- # from magnus import defaults, integration, utils
8
- # from magnus.executor import BaseExecutor
9
- # from magnus.graph import Graph
10
- # from magnus.nodes import BaseNode
7
+ # from runnable import defaults, integration, utils
8
+ # from runnable.executor import BaseExecutor
9
+ # from runnable.graph import Graph
10
+ # from runnable.nodes import BaseNode
11
11
 
12
12
  # logger = logging.getLogger(defaults.NAME)
13
13
 
@@ -25,6 +25,7 @@ class LocalExecutor(GenericExecutor):
25
25
  """
26
26
 
27
27
  service_name: str = "local"
28
+ _local: bool = True
28
29
 
29
30
  def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
30
31
  """
@@ -55,6 +55,8 @@ class LocalContainerExecutor(GenericExecutor):
55
55
  run_in_local: bool = False
56
56
  environment: Dict[str, str] = Field(default_factory=dict)
57
57
 
58
+ _local: bool = False
59
+
58
60
  _container_log_location = "/tmp/run_logs/"
59
61
  _container_catalog_location = "/tmp/catalog/"
60
62
  _container_secrets_location = "/tmp/dotenv"
@@ -131,7 +133,7 @@ class LocalContainerExecutor(GenericExecutor):
131
133
 
132
134
 
133
135
  If the config has "run_in_local: True", we compute it on local system instead of container.
134
- In local container execution, we just spin the container to execute magnus execute_single_node.
136
+ In local container execution, we just spin the container to execute runnable execute_single_node.
135
137
 
136
138
  Args:
137
139
  node (BaseNode): The node we are currently executing
@@ -198,6 +200,7 @@ class LocalContainerExecutor(GenericExecutor):
198
200
 
199
201
  try:
200
202
  logger.info(f"Running the command {command}")
203
+ print(command)
201
204
  #  Overrides global config with local
202
205
  executor_config = self._resolve_executor_config(node)
203
206
 
@@ -256,15 +259,6 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
256
259
  service_type = "run_log_store" # One of secret, catalog, datastore
257
260
  service_provider = "file-system" # The actual implementation of the service
258
261
 
259
- def validate(self, **kwargs):
260
- if self.executor._is_parallel_execution(): # pragma: no branch
261
- msg = (
262
- "Run log generated by file-system run log store are not thread safe. "
263
- "Inconsistent results are possible because of race conditions to write to the same file.\n"
264
- "Consider using partitioned run log store like database for consistent results."
265
- )
266
- logger.warning(msg)
267
-
268
262
  def configure_for_traversal(self, **kwargs):
269
263
  from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore
270
264
 
@@ -8,7 +8,6 @@ from runnable import context, defaults
8
8
  from runnable.defaults import TypeMapVariable
9
9
  from runnable.extensions.executor import GenericExecutor
10
10
  from runnable.extensions.nodes import TaskNode
11
- from runnable.integration import BaseIntegration
12
11
  from runnable.nodes import BaseNode
13
12
  from runnable.tasks import BaseTaskType
14
13
 
@@ -25,8 +24,7 @@ def create_executable(params: Dict[str, Any], model: Type[BaseTaskType], node_na
25
24
 
26
25
  class MockedExecutor(GenericExecutor):
27
26
  service_name: str = "mocked"
28
-
29
- enable_parallel: bool = defaults.ENABLE_PARALLEL
27
+ _local_executor: bool = True
30
28
 
31
29
  patches: Dict[str, Any] = Field(default_factory=dict)
32
30
 
@@ -119,6 +117,7 @@ class MockedExecutor(GenericExecutor):
119
117
  self.prepare_for_node_execution()
120
118
  self.execute_node(node=node, map_variable=map_variable, **kwargs)
121
119
 
120
+ # TODO: This needs to go away
122
121
  def _is_step_eligible_for_rerun(self, node: BaseNode, map_variable: TypeMapVariable = None):
123
122
  """
124
123
  In case of a re-run, this method checks to see if the previous run step status to determine if a re-run is
@@ -188,33 +187,3 @@ class MockedExecutor(GenericExecutor):
188
187
  map_variable (dict[str, str], optional): _description_. Defaults to None.
189
188
  """
190
189
  self._execute_node(node=node, map_variable=map_variable, **kwargs)
191
-
192
-
193
- class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
194
- """
195
- Integration between local container and file system run log store
196
- """
197
-
198
- executor_type = "local-container"
199
- service_type = "run_log_store" # One of secret, catalog, datastore
200
- service_provider = "file-system" # The actual implementation of the service
201
-
202
- def validate(self, **kwargs):
203
- if self.executor._is_parallel_execution(): # pragma: no branch
204
- msg = "Mocked executor does not support parallel execution. "
205
- logger.warning(msg)
206
-
207
-
208
- class LocalContainerComputeChunkedFSRunLogstore(BaseIntegration):
209
- """
210
- Integration between local container and file system run log store
211
- """
212
-
213
- executor_type = "local-container"
214
- service_type = "run_log_store" # One of secret, catalog, datastore
215
- service_provider = "chunked-fs" # The actual implementation of the service
216
-
217
- def validate(self, **kwargs):
218
- if self.executor._is_parallel_execution(): # pragma: no branch
219
- msg = "Mocked executor does not support parallel execution. "
220
- logger.warning(msg)
@@ -1,10 +1,9 @@
1
- import json
1
+ import copy
2
2
  import logging
3
- import multiprocessing
4
3
  from collections import OrderedDict
5
4
  from copy import deepcopy
6
5
  from datetime import datetime
7
- from typing import Any, Dict, cast
6
+ from typing import Any, Dict, Optional, cast
8
7
 
9
8
  from pydantic import ConfigDict, Field, ValidationInfo, field_serializer, field_validator
10
9
  from typing_extensions import Annotated
@@ -44,9 +43,15 @@ class TaskNode(ExecutableNode):
44
43
  executable = create_task(task_config)
45
44
  return cls(executable=executable, **node_config, **task_config)
46
45
 
47
- def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt:
46
+ def execute(
47
+ self,
48
+ mock=False,
49
+ params: Optional[Dict[str, Any]] = None,
50
+ map_variable: TypeMapVariable = None,
51
+ **kwargs,
52
+ ) -> StepAttempt:
48
53
  """
49
- All that we do in magnus is to come to this point where we actually execute the command.
54
+ All that we do in runnable is to come to this point where we actually execute the command.
50
55
 
51
56
  Args:
52
57
  executor (_type_): The executor class
@@ -62,9 +67,11 @@ class TaskNode(ExecutableNode):
62
67
  try:
63
68
  attempt_log.start_time = str(datetime.now())
64
69
  attempt_log.status = defaults.SUCCESS
70
+ attempt_log.input_parameters = copy.deepcopy(params)
65
71
  if not mock:
66
72
  # Do not run if we are mocking the execution, could be useful for caching and dry runs
67
- self.executable.execute_command(map_variable=map_variable)
73
+ output_parameters = self.executable.execute_command(map_variable=map_variable, params=params)
74
+ attempt_log.output_parameters = output_parameters
68
75
  except Exception as _e: # pylint: disable=W0703
69
76
  logger.exception("Task failed")
70
77
  attempt_log.status = defaults.FAIL
@@ -88,7 +95,13 @@ class FailNode(TerminalNode):
88
95
  def parse_from_config(cls, config: Dict[str, Any]) -> "FailNode":
89
96
  return cast("FailNode", super().parse_from_config(config))
90
97
 
91
- def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt:
98
+ def execute(
99
+ self,
100
+ mock=False,
101
+ params: Optional[Dict[str, Any]] = None,
102
+ map_variable: TypeMapVariable = None,
103
+ **kwargs,
104
+ ) -> StepAttempt:
92
105
  """
93
106
  Execute the failure node.
94
107
  Set the run or branch log status to failure.
@@ -105,6 +118,7 @@ class FailNode(TerminalNode):
105
118
  try:
106
119
  attempt_log.start_time = str(datetime.now())
107
120
  attempt_log.status = defaults.SUCCESS
121
+ attempt_log.input_parameters = params
108
122
  #  could be a branch or run log
109
123
  run_or_branch_log = self._context.run_log_store.get_branch_log(
110
124
  self._get_branch_log_name(map_variable), self._context.run_id
@@ -133,7 +147,13 @@ class SuccessNode(TerminalNode):
133
147
  def parse_from_config(cls, config: Dict[str, Any]) -> "SuccessNode":
134
148
  return cast("SuccessNode", super().parse_from_config(config))
135
149
 
136
- def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt:
150
+ def execute(
151
+ self,
152
+ mock=False,
153
+ params: Optional[Dict[str, Any]] = None,
154
+ map_variable: TypeMapVariable = None,
155
+ **kwargs,
156
+ ) -> StepAttempt:
137
157
  """
138
158
  Execute the success node.
139
159
  Set the run or branch log status to success.
@@ -150,6 +170,7 @@ class SuccessNode(TerminalNode):
150
170
  try:
151
171
  attempt_log.start_time = str(datetime.now())
152
172
  attempt_log.status = defaults.SUCCESS
173
+ attempt_log.input_parameters = params
153
174
  #  could be a branch or run log
154
175
  run_or_branch_log = self._context.run_log_store.get_branch_log(
155
176
  self._get_branch_log_name(map_variable), self._context.run_id
@@ -257,35 +278,11 @@ class ParallelNode(CompositeNode):
257
278
  executor (Executor): The Executor as per the use config
258
279
  **kwargs: Optional kwargs passed around
259
280
  """
260
- from runnable import entrypoints
261
281
 
262
282
  self.fan_out(map_variable=map_variable, **kwargs)
263
283
 
264
- jobs = []
265
- # Given that we can have nesting and complex graphs, controlling the number of processes is hard.
266
- # A better way is to actually submit the job to some process scheduler which does resource management
267
- for internal_branch_name, branch in self.branches.items():
268
- if self._context.executor._is_parallel_execution():
269
- # Trigger parallel jobs
270
- action = entrypoints.execute_single_brach
271
- kwargs = {
272
- "configuration_file": self._context.configuration_file,
273
- "pipeline_file": self._context.pipeline_file,
274
- "branch_name": internal_branch_name.replace(" ", defaults.COMMAND_FRIENDLY_CHARACTER),
275
- "run_id": self._context.run_id,
276
- "map_variable": json.dumps(map_variable),
277
- "tag": self._context.tag,
278
- }
279
- process = multiprocessing.Process(target=action, kwargs=kwargs)
280
- jobs.append(process)
281
- process.start()
282
-
283
- else:
284
- # If parallel is not enabled, execute them sequentially
285
- self._context.executor.execute_graph(branch, map_variable=map_variable, **kwargs)
286
-
287
- for job in jobs:
288
- job.join() # Find status of the branches
284
+ for _, branch in self.branches.items():
285
+ self._context.executor.execute_graph(branch, map_variable=map_variable, **kwargs)
289
286
 
290
287
  self.fan_in(map_variable=map_variable, **kwargs)
291
288
 
@@ -418,7 +415,6 @@ class MapNode(CompositeNode):
418
415
  map_variable (dict): The map variables the graph belongs to
419
416
  **kwargs: Optional kwargs passed around
420
417
  """
421
- from runnable import entrypoints
422
418
 
423
419
  iterate_on = None
424
420
  try:
@@ -433,34 +429,11 @@ class MapNode(CompositeNode):
433
429
 
434
430
  self.fan_out(map_variable=map_variable, **kwargs)
435
431
 
436
- jobs = []
437
- # Given that we can have nesting and complex graphs, controlling the number of processess is hard.
438
- # A better way is to actually submit the job to some process scheduler which does resource management
439
432
  for iter_variable in iterate_on:
440
433
  effective_map_variable = map_variable or OrderedDict()
441
434
  effective_map_variable[self.iterate_as] = iter_variable
442
435
 
443
- if self._context.executor._is_parallel_execution():
444
- # Trigger parallel jobs
445
- action = entrypoints.execute_single_brach
446
- kwargs = {
447
- "configuration_file": self._context.configuration_file,
448
- "pipeline_file": self._context.pipeline_file,
449
- "branch_name": self.branch.internal_branch_name.replace(" ", defaults.COMMAND_FRIENDLY_CHARACTER),
450
- "run_id": self._context.run_id,
451
- "map_variable": json.dumps(effective_map_variable),
452
- "tag": self._context.tag,
453
- }
454
- process = multiprocessing.Process(target=action, kwargs=kwargs)
455
- jobs.append(process)
456
- process.start()
457
-
458
- else:
459
- # If parallel is not enabled, execute them sequentially
460
- self._context.executor.execute_graph(self.branch, map_variable=effective_map_variable, **kwargs)
461
-
462
- for job in jobs:
463
- job.join()
436
+ self._context.executor.execute_graph(self.branch, map_variable=effective_map_variable, **kwargs)
464
437
 
465
438
  self.fan_in(map_variable=map_variable, **kwargs)
466
439
 
@@ -652,7 +625,13 @@ class StubNode(ExecutableNode):
652
625
  def parse_from_config(cls, config: Dict[str, Any]) -> "StubNode":
653
626
  return cls(**config)
654
627
 
655
- def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt:
628
+ def execute(
629
+ self,
630
+ mock=False,
631
+ params: Optional[Dict[str, Any]] = None,
632
+ map_variable: TypeMapVariable = None,
633
+ **kwargs,
634
+ ) -> StepAttempt:
656
635
  """
657
636
  Do Nothing node.
658
637
  We just send an success attempt log back to the caller
@@ -666,6 +645,7 @@ class StubNode(ExecutableNode):
666
645
  [type]: [description]
667
646
  """
668
647
  attempt_log = self._context.run_log_store.create_attempt_log()
648
+ attempt_log.input_parameters = params
669
649
 
670
650
  attempt_log.start_time = str(datetime.now())
671
651
  attempt_log.status = defaults.SUCCESS # This is a dummy node and always will be success
runnable/integration.py CHANGED
@@ -84,7 +84,7 @@ def get_integration_handler(executor: "BaseExecutor", service: object) -> BaseIn
84
84
  logger.info(f"Identified an integration pattern {kls.obj}")
85
85
  integrations.append(kls.obj)
86
86
 
87
- # Get all the implementations defined by the magnus package
87
+ # Get all the implementations defined by the runnable package
88
88
  for kls in BaseIntegration.__subclasses__():
89
89
  # Match the exact service type
90
90
  if kls.service_type == service_type and kls.service_provider == service_name:
@@ -95,7 +95,7 @@ def get_integration_handler(executor: "BaseExecutor", service: object) -> BaseIn
95
95
  if len(integrations) > 1:
96
96
  msg = (
97
97
  f"Multiple integrations between {executor.service_name} and {service_name} of type {service_type} found. "
98
- "If you defined an integration pattern, please ensure it is specific and does not conflict with magnus "
98
+ "If you defined an integration pattern, please ensure it is specific and does not conflict with runnable "
99
99
  " implementations."
100
100
  )
101
101
  logger.exception(msg)
runnable/interaction.py CHANGED
@@ -58,6 +58,11 @@ def track_this(step: int = 0, **kwargs):
58
58
  os.environ[prefix + key + f"{defaults.STEP_INDICATOR}{step}"] = json.dumps(value)
59
59
 
60
60
 
61
+ # TODO: Do we need the API for parameters?
62
+ # If we still want them, what takes precedence? API or returns?
63
+ # Once we decide that, collect the parameters and update them in tasks
64
+
65
+
61
66
  @check_context
62
67
  def set_parameter(**kwargs) -> None:
63
68
  """
@@ -279,7 +284,7 @@ def get_run_id() -> str:
279
284
  """
280
285
  Returns the run_id of the current run.
281
286
 
282
- You can also access this from the environment variable `MAGNUS_RUN_ID`.
287
+ You can also access this from the environment variable `runnable_RUN_ID`.
283
288
  """
284
289
  return context.run_context.run_id
285
290
 
@@ -321,14 +326,14 @@ def get_experiment_tracker_context() -> ContextManager:
321
326
 
322
327
  def start_interactive_session(run_id: str = "", config_file: str = "", tag: str = "", parameters_file: str = ""):
323
328
  """
324
- During interactive python coding, either via notebooks or ipython, you can start a magnus session by calling
329
+ During interactive python coding, either via notebooks or ipython, you can start a runnable session by calling
325
330
  this function. The executor would always be local executor as its interactive.
326
331
 
327
332
  If this was called during a pipeline/function/notebook execution, it will be ignored.
328
333
 
329
334
  Args:
330
335
  run_id (str, optional): The run id to use. Defaults to "" and would be created if not provided.
331
- config_file (str, optional): The configuration file to use. Defaults to "" and magnus defaults.
336
+ config_file (str, optional): The configuration file to use. Defaults to "" and runnable defaults.
332
337
  tag (str, optional): The tag to attach to the run. Defaults to "".
333
338
  parameters_file (str, optional): The parameters file to use. Defaults to "".
334
339
  """
@@ -350,7 +355,7 @@ def start_interactive_session(run_id: str = "", config_file: str = "", tag: str
350
355
 
351
356
  executor = context.run_context.executor
352
357
 
353
- utils.set_magnus_environment_variables(run_id=run_id, configuration_file=config_file, tag=tag)
358
+ utils.set_runnable_environment_variables(run_id=run_id, configuration_file=config_file, tag=tag)
354
359
 
355
360
  context.run_context.execution_plan = defaults.EXECUTION_PLAN.INTERACTIVE.value
356
361
  executor.prepare_for_graph_execution()
runnable/nodes.py CHANGED
@@ -64,7 +64,7 @@ class BaseNode(ABC, BaseModel):
64
64
  @classmethod
65
65
  def _get_internal_name_from_command_name(cls, command_name: str) -> str:
66
66
  """
67
- Replace Magnus specific character (%) with whitespace.
67
+ Replace runnable specific character (%) with whitespace.
68
68
  The opposite of _command_friendly_name.
69
69
 
70
70
  Args:
@@ -274,7 +274,13 @@ class BaseNode(ABC, BaseModel):
274
274
  ...
275
275
 
276
276
  @abstractmethod
277
- def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt:
277
+ def execute(
278
+ self,
279
+ mock=False,
280
+ params: Optional[Dict[str, Any]] = None,
281
+ map_variable: TypeMapVariable = None,
282
+ **kwargs,
283
+ ) -> StepAttempt:
278
284
  """
279
285
  The actual function that does the execution of the command in the config.
280
286
 
@@ -282,7 +288,7 @@ class BaseNode(ABC, BaseModel):
282
288
  composite nodes.
283
289
 
284
290
  Args:
285
- executor (magnus.executor.BaseExecutor): The executor class
291
+ executor (runnable.executor.BaseExecutor): The executor class
286
292
  mock (bool, optional): Don't run, just pretend. Defaults to False.
287
293
  map_variable (str, optional): The value of the map iteration variable, if part of a map node.
288
294
  Defaults to ''.
@@ -301,7 +307,7 @@ class BaseNode(ABC, BaseModel):
301
307
  Function should only be implemented for composite nodes like dag, map, parallel.
302
308
 
303
309
  Args:
304
- executor (magnus.executor.BaseExecutor): The executor.
310
+ executor (runnable.executor.BaseExecutor): The executor.
305
311
 
306
312
  Raises:
307
313
  NotImplementedError: Base class, hence not implemented.
@@ -317,7 +323,7 @@ class BaseNode(ABC, BaseModel):
317
323
  Function should only be implemented for composite nodes like dag, map, parallel.
318
324
 
319
325
  Args:
320
- executor (magnus.executor.BaseExecutor): The executor.
326
+ executor (runnable.executor.BaseExecutor): The executor.
321
327
  map_variable (str, optional): The value of the map iteration variable, if part of a map node.
322
328
 
323
329
  Raises:
@@ -334,7 +340,7 @@ class BaseNode(ABC, BaseModel):
334
340
  Function should only be implemented for composite nodes like dag, map, parallel.
335
341
 
336
342
  Args:
337
- executor (magnus.executor.BaseExecutor): The executor.
343
+ executor (runnable.executor.BaseExecutor): The executor.
338
344
  map_variable (str, optional): The value of the map iteration variable, if part of a map node.
339
345
 
340
346
  Raises:
@@ -449,7 +455,13 @@ class CompositeNode(TraversalNode):
449
455
  def _get_max_attempts(self) -> int:
450
456
  raise Exception("This is a composite node and does not have a max_attempts")
451
457
 
452
- def execute(self, mock=False, map_variable: TypeMapVariable = None, **kwargs) -> StepAttempt:
458
+ def execute(
459
+ self,
460
+ mock=False,
461
+ params: Optional[Dict[str, Any]] = None,
462
+ map_variable: TypeMapVariable = None,
463
+ **kwargs,
464
+ ) -> StepAttempt:
453
465
  raise Exception("This is a composite node and does not have an execute function")
454
466
 
455
467
 
runnable/parameters.py CHANGED
@@ -16,7 +16,7 @@ logger = logging.getLogger(defaults.LOGGER_NAME)
16
16
 
17
17
  def get_user_set_parameters(remove: bool = False) -> Dict[str, Any]:
18
18
  """
19
- Scans the environment variables for any user returned parameters that have a prefix MAGNUS_PRM_.
19
+ Scans the environment variables for any user returned parameters that have a prefix runnable_PRM_.
20
20
 
21
21
  This function does not deal with any type conversion of the parameters.
22
22
  It just deserializes the parameters and returns them as a dictionary.
runnable/sdk.py CHANGED
@@ -7,7 +7,6 @@ from typing import Any, Dict, List, Optional, Union
7
7
 
8
8
  from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, computed_field, field_validator, model_validator
9
9
  from rich import print
10
- from ruamel.yaml import YAML
11
10
  from typing_extensions import Self
12
11
 
13
12
  from runnable import defaults, entrypoints, graph, utils
@@ -33,7 +32,7 @@ class Catalog(BaseModel):
33
32
  put (List[str]): List of glob patterns to put into central catalog from the compute data folder.
34
33
 
35
34
  Examples:
36
- >>> from magnus import Catalog, Task
35
+ >>> from runnable import Catalog, Task
37
36
  >>> catalog = Catalog(compute_data_folder="/path/to/data", get=["*.csv"], put=["*.csv"])
38
37
 
39
38
  >>> task = Task(name="task", catalog=catalog, command="echo 'hello'")
@@ -107,6 +106,9 @@ class BaseTraversal(ABC, BaseModel):
107
106
  ...
108
107
 
109
108
 
109
+ ## TODO: Add python task, shell task, and notebook task.
110
+
111
+
110
112
  class Task(BaseTraversal):
111
113
  """
112
114
  An execution node of the pipeline.
@@ -133,10 +135,10 @@ class Task(BaseTraversal):
133
135
  executor:
134
136
  type: local-container
135
137
  config:
136
- docker_image: "magnus/magnus:latest"
138
+ docker_image: "runnable/runnable:latest"
137
139
  overrides:
138
140
  custom_docker_image:
139
- docker_image: "magnus/magnus:custom"
141
+ docker_image: "runnable/runnable:custom"
140
142
  ```
141
143
  ### Task specific configuration
142
144
  ```python
@@ -148,7 +150,7 @@ class Task(BaseTraversal):
148
150
  optional_ploomber_args (Optional[Dict[str, Any]]): Any optional ploomber args.
149
151
  Only used when command_type is 'notebook', defaults to {}
150
152
  output_cell_tag (Optional[str]): The tag of the output cell.
151
- Only used when command_type is 'notebook', defaults to "magnus_output"
153
+ Only used when command_type is 'notebook', defaults to "runnable_output"
152
154
  terminate_with_failure (bool): Whether to terminate the pipeline with a failure after this node.
153
155
  terminate_with_success (bool): Whether to terminate the pipeline with a success after this node.
154
156
  on_failure (str): The name of the node to execute if the step fails.
@@ -385,6 +387,9 @@ class Pipeline(BaseModel):
385
387
 
386
388
  self._dag.check_graph()
387
389
 
390
+ def return_dag(self) -> graph.Graph:
391
+ return self._dag
392
+
388
393
  def execute(
389
394
  self,
390
395
  configuration_file: str = "",
@@ -393,7 +398,6 @@ class Pipeline(BaseModel):
393
398
  parameters_file: str = "",
394
399
  use_cached: str = "",
395
400
  log_level: str = defaults.LOG_LEVEL,
396
- output_pipeline_definition: str = "magnus-pipeline.yaml",
397
401
  ):
398
402
  """
399
403
  *Execute* the Pipeline.
@@ -408,7 +412,7 @@ class Pipeline(BaseModel):
408
412
 
409
413
  Args:
410
414
  configuration_file (str, optional): The path to the configuration file. Defaults to "".
411
- The configuration file can be overridden by the environment variable MAGNUS_CONFIGURATION_FILE.
415
+ The configuration file can be overridden by the environment variable runnable_CONFIGURATION_FILE.
412
416
 
413
417
  run_id (str, optional): The ID of the run. Defaults to "".
414
418
  tag (str, optional): The tag of the run. Defaults to "".
@@ -419,18 +423,18 @@ class Pipeline(BaseModel):
419
423
  Provide the run_id of the older execution to recover.
420
424
 
421
425
  log_level (str, optional): The log level. Defaults to defaults.LOG_LEVEL.
422
- output_pipeline_definition (str, optional): The path to the output pipeline definition file.
423
- Defaults to "magnus-pipeline.yaml".
424
-
425
- Only applicable for the execution via SDK for non ```local``` executors.
426
426
  """
427
- from runnable.extensions.executor.local.implementation import LocalExecutor
428
- from runnable.extensions.executor.mocked.implementation import MockedExecutor
427
+
428
+ # py_to_yaml is used by non local executors to generate the yaml representation of the pipeline.
429
+ py_to_yaml = os.environ.get("RUNNABLE_PY_TO_YAML", "false")
430
+
431
+ if py_to_yaml == "true":
432
+ return
429
433
 
430
434
  logger.setLevel(log_level)
431
435
 
432
436
  run_id = utils.generate_run_id(run_id=run_id)
433
- configuration_file = os.environ.get("MAGNUS_CONFIGURATION_FILE", configuration_file)
437
+ configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
434
438
  run_context = entrypoints.prepare_configurations(
435
439
  configuration_file=configuration_file,
436
440
  run_id=run_id,
@@ -440,7 +444,7 @@ class Pipeline(BaseModel):
440
444
  )
441
445
 
442
446
  run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
443
- utils.set_magnus_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag)
447
+ utils.set_runnable_environment_variables(run_id=run_id, configuration_file=configuration_file, tag=tag)
444
448
 
445
449
  dag_definition = self._dag.model_dump(by_alias=True, exclude_none=True)
446
450
 
@@ -449,17 +453,14 @@ class Pipeline(BaseModel):
449
453
  print("Working with context:")
450
454
  print(run_context)
451
455
 
452
- if not (isinstance(run_context.executor, LocalExecutor) or isinstance(run_context.executor, MockedExecutor)):
453
- logger.debug(run_context.dag.model_dump(by_alias=True))
454
- yaml = YAML()
456
+ if not run_context.executor._local:
457
+ # We are working with non local executor
458
+ import inspect
455
459
 
456
- with open(output_pipeline_definition, "w", encoding="utf-8") as f:
457
- yaml.dump(
458
- {"dag": run_context.dag.model_dump(by_alias=True, exclude_none=True)},
459
- f,
460
- )
460
+ caller_stack = inspect.stack()[1]
461
+ module_to_call = f"{caller_stack.filename.replace('/', '.').replace('.py', '')}.{caller_stack.function}"
461
462
 
462
- return
463
+ run_context.pipeline_file = f"{module_to_call}.py"
463
464
 
464
465
  # Prepare for graph execution
465
466
  run_context.executor.prepare_for_graph_execution()
@@ -467,4 +468,5 @@ class Pipeline(BaseModel):
467
468
  logger.info("Executing the graph")
468
469
  run_context.executor.execute_graph(dag=run_context.dag)
469
470
 
470
- return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id)
471
+ if run_context.executor._local:
472
+ return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id)