runnable 0.12.3__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. runnable/__init__.py +0 -11
  2. runnable/catalog.py +27 -5
  3. runnable/cli.py +122 -26
  4. runnable/datastore.py +71 -35
  5. runnable/defaults.py +0 -1
  6. runnable/entrypoints.py +107 -32
  7. runnable/exceptions.py +6 -2
  8. runnable/executor.py +28 -9
  9. runnable/graph.py +37 -12
  10. runnable/integration.py +7 -2
  11. runnable/nodes.py +15 -17
  12. runnable/parameters.py +27 -8
  13. runnable/pickler.py +1 -1
  14. runnable/sdk.py +101 -33
  15. runnable/secrets.py +3 -1
  16. runnable/tasks.py +246 -34
  17. runnable/utils.py +41 -13
  18. {runnable-0.12.3.dist-info → runnable-0.14.0.dist-info}/METADATA +25 -31
  19. runnable-0.14.0.dist-info/RECORD +24 -0
  20. {runnable-0.12.3.dist-info → runnable-0.14.0.dist-info}/WHEEL +1 -1
  21. runnable-0.14.0.dist-info/entry_points.txt +40 -0
  22. runnable/extensions/__init__.py +0 -0
  23. runnable/extensions/catalog/__init__.py +0 -21
  24. runnable/extensions/catalog/file_system/__init__.py +0 -0
  25. runnable/extensions/catalog/file_system/implementation.py +0 -234
  26. runnable/extensions/catalog/k8s_pvc/__init__.py +0 -0
  27. runnable/extensions/catalog/k8s_pvc/implementation.py +0 -16
  28. runnable/extensions/catalog/k8s_pvc/integration.py +0 -59
  29. runnable/extensions/executor/__init__.py +0 -649
  30. runnable/extensions/executor/argo/__init__.py +0 -0
  31. runnable/extensions/executor/argo/implementation.py +0 -1194
  32. runnable/extensions/executor/argo/specification.yaml +0 -51
  33. runnable/extensions/executor/k8s_job/__init__.py +0 -0
  34. runnable/extensions/executor/k8s_job/implementation_FF.py +0 -259
  35. runnable/extensions/executor/k8s_job/integration_FF.py +0 -69
  36. runnable/extensions/executor/local/__init__.py +0 -0
  37. runnable/extensions/executor/local/implementation.py +0 -71
  38. runnable/extensions/executor/local_container/__init__.py +0 -0
  39. runnable/extensions/executor/local_container/implementation.py +0 -446
  40. runnable/extensions/executor/mocked/__init__.py +0 -0
  41. runnable/extensions/executor/mocked/implementation.py +0 -154
  42. runnable/extensions/executor/retry/__init__.py +0 -0
  43. runnable/extensions/executor/retry/implementation.py +0 -168
  44. runnable/extensions/nodes.py +0 -855
  45. runnable/extensions/run_log_store/__init__.py +0 -0
  46. runnable/extensions/run_log_store/chunked_file_system/__init__.py +0 -0
  47. runnable/extensions/run_log_store/chunked_file_system/implementation.py +0 -111
  48. runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py +0 -0
  49. runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py +0 -21
  50. runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py +0 -61
  51. runnable/extensions/run_log_store/db/implementation_FF.py +0 -157
  52. runnable/extensions/run_log_store/db/integration_FF.py +0 -0
  53. runnable/extensions/run_log_store/file_system/__init__.py +0 -0
  54. runnable/extensions/run_log_store/file_system/implementation.py +0 -140
  55. runnable/extensions/run_log_store/generic_chunked.py +0 -557
  56. runnable/extensions/run_log_store/k8s_pvc/__init__.py +0 -0
  57. runnable/extensions/run_log_store/k8s_pvc/implementation.py +0 -21
  58. runnable/extensions/run_log_store/k8s_pvc/integration.py +0 -56
  59. runnable/extensions/secrets/__init__.py +0 -0
  60. runnable/extensions/secrets/dotenv/__init__.py +0 -0
  61. runnable/extensions/secrets/dotenv/implementation.py +0 -100
  62. runnable-0.12.3.dist-info/RECORD +0 -64
  63. runnable-0.12.3.dist-info/entry_points.txt +0 -41
  64. {runnable-0.12.3.dist-info → runnable-0.14.0.dist-info/licenses}/LICENSE +0 -0
@@ -1,446 +0,0 @@
1
- import logging
2
- from pathlib import Path
3
- from typing import Dict, cast
4
-
5
- from pydantic import Field
6
- from rich import print
7
-
8
- from runnable import console, defaults, task_console, utils
9
- from runnable.datastore import StepLog
10
- from runnable.defaults import TypeMapVariable
11
- from runnable.extensions.executor import GenericExecutor
12
- from runnable.extensions.nodes import TaskNode
13
- from runnable.integration import BaseIntegration
14
- from runnable.nodes import BaseNode
15
-
16
- logger = logging.getLogger(defaults.LOGGER_NAME)
17
-
18
-
19
- class LocalContainerExecutor(GenericExecutor):
20
- """
21
- In the mode of local-container, we execute all the commands in a container.
22
-
23
- Ensure that the local compute has enough resources to finish all your jobs.
24
-
25
- The image of the run, could either be provided as default in the configuration of the execution engine
26
- i.e.:
27
- execution:
28
- type: 'local-container'
29
- config:
30
- docker_image: the image you want the code to run in.
31
-
32
- or default image could be over-ridden for a single node by providing a docker_image in the step config.
33
- i.e:
34
- dag:
35
- steps:
36
- step:
37
- executor_config:
38
- local-container:
39
- docker_image: The image that you want that single step to run in.
40
- This image would only be used for that step only.
41
-
42
- This mode does not build the docker image with the latest code for you, it is still left for the user to build
43
- and ensure that the docker image provided is the correct one.
44
-
45
- Example config:
46
- execution:
47
- type: local-container
48
- config:
49
- docker_image: The default docker image to use if the node does not provide one.
50
- """
51
-
52
- service_name: str = "local-container"
53
- docker_image: str
54
- auto_remove_container: bool = True
55
- run_in_local: bool = False
56
- environment: Dict[str, str] = Field(default_factory=dict)
57
-
58
- _local: bool = False
59
-
60
- _container_log_location = "/tmp/run_logs/"
61
- _container_catalog_location = "/tmp/catalog/"
62
- _container_secrets_location = "/tmp/dotenv"
63
- _volumes: Dict[str, Dict[str, str]] = {}
64
-
65
- def add_code_identities(self, node: BaseNode, step_log: StepLog, **kwargs):
66
- """
67
- Call the Base class to add the git code identity and add docker identity
68
-
69
- Args:
70
- node (BaseNode): The node we are adding the code identity
71
- step_log (Object): The step log corresponding to the node
72
- """
73
-
74
- super().add_code_identities(node, step_log)
75
-
76
- if node.node_type in ["success", "fail"]:
77
- # Need not add code identities if we are in a success or fail node
78
- return
79
-
80
- executor_config = self._resolve_executor_config(node)
81
-
82
- docker_image = executor_config.get("docker_image", None)
83
- if docker_image:
84
- code_id = self._context.run_log_store.create_code_identity()
85
-
86
- code_id.code_identifier = utils.get_local_docker_image_id(docker_image)
87
- code_id.code_identifier_type = "docker"
88
- code_id.code_identifier_dependable = True
89
- code_id.code_identifier_url = "local docker host"
90
- step_log.code_identities.append(code_id)
91
-
92
- def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
93
- """
94
- We are already in the container, we just execute the node.
95
- The node is already prepared for execution.
96
- """
97
- return self._execute_node(node, map_variable, **kwargs)
98
-
99
- def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
100
- """
101
- This is the entry point to from the graph execution.
102
-
103
- While the self.execute_graph is responsible for traversing the graph, this function is responsible for
104
- actual execution of the node.
105
-
106
- If the node type is:
107
- * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
108
- * success: We can delegate to _execute_node
109
- * fail: We can delegate to _execute_node
110
-
111
- For nodes that are internally graphs:
112
- * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
113
- * dag: Delegate the responsibility of execution to the node.execute_as_graph()
114
- * map: Delegate the responsibility of execution to the node.execute_as_graph()
115
-
116
- Transpilers will NEVER use this method and will NEVER call ths method.
117
- This method should only be used by interactive executors.
118
-
119
- Args:
120
- node (Node): The node to execute
121
- map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
122
- Defaults to None.
123
- """
124
- step_log = self._context.run_log_store.create_step_log(node.name, node._get_step_log_name(map_variable))
125
-
126
- self.add_code_identities(node=node, step_log=step_log)
127
-
128
- step_log.step_type = node.node_type
129
- step_log.status = defaults.PROCESSING
130
-
131
- self._context.run_log_store.add_step_log(step_log, self._context.run_id)
132
-
133
- logger.info(f"Executing node: {node.get_summary()}")
134
-
135
- # Add the step log to the database as per the situation.
136
- # If its a terminal node, complete it now
137
- if node.node_type in ["success", "fail"]:
138
- self._execute_node(node, map_variable=map_variable, **kwargs)
139
- return
140
-
141
- # We call an internal function to iterate the sub graphs and execute them
142
- if node.is_composite:
143
- node.execute_as_graph(map_variable=map_variable, **kwargs)
144
- return
145
-
146
- task_console.export_text(clear=True)
147
-
148
- task_name = node._resolve_map_placeholders(node.internal_name, map_variable)
149
- console.print(f":runner: Executing the node {task_name} ... ", style="bold color(208)")
150
- self.trigger_job(node=node, map_variable=map_variable, **kwargs)
151
-
152
- def execute_job(self, node: TaskNode):
153
- """
154
- Set up the step log and call the execute node
155
-
156
- Args:
157
- node (BaseNode): _description_
158
- """
159
-
160
- step_log = self._context.run_log_store.create_step_log(node.name, node._get_step_log_name(map_variable=None))
161
-
162
- self.add_code_identities(node=node, step_log=step_log)
163
-
164
- step_log.step_type = node.node_type
165
- step_log.status = defaults.PROCESSING
166
- self._context.run_log_store.add_step_log(step_log, self._context.run_id)
167
-
168
- command = utils.get_job_execution_command(node)
169
- self._spin_container(node=node, command=command)
170
-
171
- # Check the step log status and warn if necessary. Docker errors are generally suppressed.
172
- step_log = self._context.run_log_store.get_step_log(
173
- node._get_step_log_name(map_variable=None), self._context.run_id
174
- )
175
- if step_log.status != defaults.SUCCESS:
176
- msg = (
177
- "Node execution inside the container failed. Please check the logs.\n"
178
- "Note: If you do not see any docker issue from your side and the code works properly on local execution"
179
- "please raise a bug report."
180
- )
181
- logger.warning(msg)
182
-
183
- def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
184
- """
185
- We come into this step via execute from graph, use trigger job to spin up the container.
186
-
187
-
188
- If the config has "run_in_local: True", we compute it on local system instead of container.
189
- In local container execution, we just spin the container to execute runnable execute_single_node.
190
-
191
- Args:
192
- node (BaseNode): The node we are currently executing
193
- map_variable (str, optional): If the node is part of the map branch. Defaults to ''.
194
- """
195
- executor_config = self._resolve_executor_config(node)
196
- auto_remove_container = executor_config.get("auto_remove_container", True)
197
-
198
- logger.debug("Here is the resolved executor config")
199
- logger.debug(executor_config)
200
-
201
- command = utils.get_node_execution_command(node, map_variable=map_variable)
202
-
203
- self._spin_container(
204
- node=node,
205
- command=command,
206
- map_variable=map_variable,
207
- auto_remove_container=auto_remove_container,
208
- **kwargs,
209
- )
210
-
211
- step_log = self._context.run_log_store.get_step_log(node._get_step_log_name(map_variable), self._context.run_id)
212
- if step_log.status != defaults.SUCCESS:
213
- msg = (
214
- "Node execution inside the container failed. Please check the logs.\n"
215
- "Note: If you do not see any docker issue from your side and the code works properly on local execution"
216
- "please raise a bug report."
217
- )
218
- logger.error(msg)
219
- step_log.status = defaults.FAIL
220
- self._context.run_log_store.add_step_log(step_log, self._context.run_id)
221
-
222
- def _spin_container(
223
- self,
224
- node: BaseNode,
225
- command: str,
226
- map_variable: TypeMapVariable = None,
227
- auto_remove_container: bool = True,
228
- **kwargs,
229
- ):
230
- """
231
- During the flow run, we have to spin up a container with the docker image mentioned
232
- and the right log locations
233
- """
234
- # Conditional import
235
- import docker # pylint: disable=C0415
236
-
237
- try:
238
- client = docker.from_env()
239
- api_client = docker.APIClient()
240
- except Exception as ex:
241
- logger.exception("Could not get access to docker")
242
- raise Exception("Could not get the docker socket file, do you have docker installed?") from ex
243
-
244
- try:
245
- logger.info(f"Running the command {command}")
246
- print(command)
247
- #  Overrides global config with local
248
- executor_config = self._resolve_executor_config(node)
249
-
250
- docker_image = executor_config.get("docker_image", None)
251
- environment = executor_config.get("environment", {})
252
- environment.update(self._context.variables)
253
- if not docker_image:
254
- raise Exception(
255
- f"Please provide a docker_image using executor_config of the step {node.name} or at global config"
256
- )
257
-
258
- # TODO: Should consider using getpass.getuser() when running the docker container? Volume permissions
259
- container = client.containers.create(
260
- image=docker_image,
261
- command=command,
262
- auto_remove=False,
263
- volumes=self._volumes,
264
- network_mode="host",
265
- environment=environment,
266
- )
267
-
268
- # print(container.__dict__)
269
-
270
- container.start()
271
- stream = api_client.logs(container=container.id, timestamps=True, stream=True, follow=True)
272
- while True:
273
- try:
274
- output = next(stream).decode("utf-8")
275
- output = output.strip("\r\n")
276
- logger.info(output)
277
- print(output)
278
- except StopIteration:
279
- logger.info("Docker Run completed")
280
- break
281
-
282
- exit_status = api_client.inspect_container(container.id)["State"]["ExitCode"]
283
-
284
- if auto_remove_container:
285
- container.remove(force=True)
286
-
287
- if exit_status != 0:
288
- msg = f"Docker command failed with exit code {exit_status}"
289
- raise Exception(msg)
290
-
291
- except Exception as _e:
292
- logger.exception("Problems with spinning/running the container")
293
- raise _e
294
-
295
-
296
- class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
297
- """
298
- Integration between local container and file system run log store
299
- """
300
-
301
- executor_type = "local-container"
302
- service_type = "run_log_store" # One of secret, catalog, datastore
303
- service_provider = "file-system" # The actual implementation of the service
304
-
305
- def configure_for_traversal(self, **kwargs):
306
- from runnable.extensions.run_log_store.file_system.implementation import (
307
- FileSystemRunLogstore,
308
- )
309
-
310
- self.executor = cast(LocalContainerExecutor, self.executor)
311
- self.service = cast(FileSystemRunLogstore, self.service)
312
-
313
- write_to = self.service.log_folder_name
314
- self.executor._volumes[str(Path(write_to).resolve())] = {
315
- "bind": f"{self.executor._container_log_location}",
316
- "mode": "rw",
317
- }
318
-
319
- def configure_for_execution(self, **kwargs):
320
- from runnable.extensions.run_log_store.file_system.implementation import (
321
- FileSystemRunLogstore,
322
- )
323
-
324
- self.executor = cast(LocalContainerExecutor, self.executor)
325
- self.service = cast(FileSystemRunLogstore, self.service)
326
-
327
- self.service.log_folder = self.executor._container_log_location
328
-
329
-
330
- class LocalContainerComputeChunkedFS(BaseIntegration):
331
- """
332
- Integration pattern between Local container and File System catalog
333
- """
334
-
335
- executor_type = "local-container"
336
- service_type = "run_log_store" # One of secret, catalog, datastore
337
- service_provider = "chunked-fs" # The actual implementation of the service
338
-
339
- def configure_for_traversal(self, **kwargs):
340
- from runnable.extensions.run_log_store.chunked_file_system.implementation import (
341
- ChunkedFileSystemRunLogStore,
342
- )
343
-
344
- self.executor = cast(LocalContainerExecutor, self.executor)
345
- self.service = cast(ChunkedFileSystemRunLogStore, self.service)
346
-
347
- write_to = self.service.log_folder
348
- self.executor._volumes[str(Path(write_to).resolve())] = {
349
- "bind": f"{self.executor._container_log_location}",
350
- "mode": "rw",
351
- }
352
-
353
- def configure_for_execution(self, **kwargs):
354
- from runnable.extensions.run_log_store.chunked_file_system.implementation import (
355
- ChunkedFileSystemRunLogStore,
356
- )
357
-
358
- self.executor = cast(LocalContainerExecutor, self.executor)
359
- self.service = cast(ChunkedFileSystemRunLogStore, self.service)
360
-
361
- self.service.log_folder = self.executor._container_log_location
362
-
363
-
364
- class LocalContainerComputeFileSystemCatalog(BaseIntegration):
365
- """
366
- Integration pattern between Local container and File System catalog
367
- """
368
-
369
- executor_type = "local-container"
370
- service_type = "catalog" # One of secret, catalog, datastore
371
- service_provider = "file-system" # The actual implementation of the service
372
-
373
- def configure_for_traversal(self, **kwargs):
374
- from runnable.extensions.catalog.file_system.implementation import (
375
- FileSystemCatalog,
376
- )
377
-
378
- self.executor = cast(LocalContainerExecutor, self.executor)
379
- self.service = cast(FileSystemCatalog, self.service)
380
-
381
- catalog_location = self.service.catalog_location
382
- self.executor._volumes[str(Path(catalog_location).resolve())] = {
383
- "bind": f"{self.executor._container_catalog_location}",
384
- "mode": "rw",
385
- }
386
-
387
- def configure_for_execution(self, **kwargs):
388
- from runnable.extensions.catalog.file_system.implementation import (
389
- FileSystemCatalog,
390
- )
391
-
392
- self.executor = cast(LocalContainerExecutor, self.executor)
393
- self.service = cast(FileSystemCatalog, self.service)
394
-
395
- self.service.catalog_location = self.executor._container_catalog_location
396
-
397
-
398
- class LocalContainerComputeDotEnvSecrets(BaseIntegration):
399
- """
400
- Integration between local container and dot env secrets
401
- """
402
-
403
- executor_type = "local-container"
404
- service_type = "secrets" # One of secret, catalog, datastore
405
- service_provider = "dotenv" # The actual implementation of the service
406
-
407
- def validate(self, **kwargs):
408
- logger.warning("Using dot env for non local deployments is not ideal, consider options")
409
-
410
- def configure_for_traversal(self, **kwargs):
411
- from runnable.extensions.secrets.dotenv.implementation import DotEnvSecrets
412
-
413
- self.executor = cast(LocalContainerExecutor, self.executor)
414
- self.service = cast(DotEnvSecrets, self.service)
415
-
416
- secrets_location = self.service.secrets_location
417
- self.executor._volumes[str(Path(secrets_location).resolve())] = {
418
- "bind": f"{self.executor._container_secrets_location}",
419
- "mode": "ro",
420
- }
421
-
422
- def configure_for_execution(self, **kwargs):
423
- from runnable.extensions.secrets.dotenv.implementation import DotEnvSecrets
424
-
425
- self.executor = cast(LocalContainerExecutor, self.executor)
426
- self.service = cast(DotEnvSecrets, self.service)
427
-
428
- self.service.location = self.executor._container_secrets_location
429
-
430
-
431
- class LocalContainerComputeEnvSecretsManager(BaseIntegration):
432
- """
433
- Integration between local container and env secrets manager
434
- """
435
-
436
- executor_type = "local-container"
437
- service_type = "secrets" # One of secret, catalog, datastore
438
- service_provider = "env-secrets-manager" # The actual implementation of the service
439
-
440
- def validate(self, **kwargs):
441
- msg = (
442
- "Local container executions cannot be used with environment secrets manager. "
443
- "Please use a supported secrets manager"
444
- )
445
- logger.exception(msg)
446
- raise Exception(msg)
File without changes
@@ -1,154 +0,0 @@
1
- import copy
2
- import logging
3
- from typing import Any, Dict, Type, cast
4
-
5
- from pydantic import ConfigDict, Field
6
-
7
- from runnable import context, defaults
8
- from runnable.defaults import TypeMapVariable
9
- from runnable.extensions.executor import GenericExecutor
10
- from runnable.extensions.nodes import TaskNode
11
- from runnable.nodes import BaseNode
12
- from runnable.tasks import BaseTaskType
13
-
14
- logger = logging.getLogger(defaults.LOGGER_NAME)
15
-
16
-
17
- def create_executable(params: Dict[str, Any], model: Type[BaseTaskType], node_name: str) -> BaseTaskType:
18
- class EasyModel(model): # type: ignore
19
- model_config = ConfigDict(extra="ignore")
20
-
21
- swallow_all = EasyModel(node_name=node_name, **params)
22
- return swallow_all
23
-
24
-
25
- class MockedExecutor(GenericExecutor):
26
- service_name: str = "mocked"
27
- _local_executor: bool = True
28
-
29
- model_config = ConfigDict(extra="ignore")
30
-
31
- patches: Dict[str, Any] = Field(default_factory=dict)
32
-
33
- @property
34
- def _context(self):
35
- return context.run_context
36
-
37
- def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
38
- """
39
- This is the entry point to from the graph execution.
40
-
41
- While the self.execute_graph is responsible for traversing the graph, this function is responsible for
42
- actual execution of the node.
43
-
44
- If the node type is:
45
- * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
46
- * success: We can delegate to _execute_node
47
- * fail: We can delegate to _execute_node
48
-
49
- For nodes that are internally graphs:
50
- * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
51
- * dag: Delegate the responsibility of execution to the node.execute_as_graph()
52
- * map: Delegate the responsibility of execution to the node.execute_as_graph()
53
-
54
- Transpilers will NEVER use this method and will NEVER call ths method.
55
- This method should only be used by interactive executors.
56
-
57
- Args:
58
- node (Node): The node to execute
59
- map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
60
- Defaults to None.
61
- """
62
- step_log = self._context.run_log_store.create_step_log(node.name, node._get_step_log_name(map_variable))
63
-
64
- self.add_code_identities(node=node, step_log=step_log)
65
-
66
- step_log.step_type = node.node_type
67
- step_log.status = defaults.PROCESSING
68
-
69
- self._context.run_log_store.add_step_log(step_log, self._context.run_id)
70
-
71
- logger.info(f"Executing node: {node.get_summary()}")
72
-
73
- # Add the step log to the database as per the situation.
74
- # If its a terminal node, complete it now
75
- if node.node_type in ["success", "fail"]:
76
- self._context.run_log_store.add_step_log(step_log, self._context.run_id)
77
- self._execute_node(node, map_variable=map_variable, **kwargs)
78
- return
79
-
80
- # We call an internal function to iterate the sub graphs and execute them
81
- if node.is_composite:
82
- self._context.run_log_store.add_step_log(step_log, self._context.run_id)
83
- node.execute_as_graph(map_variable=map_variable, **kwargs)
84
- return
85
-
86
- if node.name not in self.patches:
87
- # node is not patched, so mock it
88
- self._execute_node(node, map_variable=map_variable, mock=True, **kwargs)
89
- else:
90
- # node is patched
91
- # command as the patch value
92
- node_to_send: TaskNode = cast(TaskNode, node).model_copy(deep=True)
93
- executable_type = node_to_send.executable.__class__
94
- executable = create_executable(
95
- self.patches[node.name],
96
- executable_type,
97
- node_name=node.name,
98
- )
99
- node_to_send.executable = executable
100
- self._execute_node(node_to_send, map_variable=map_variable, mock=False, **kwargs)
101
-
102
- def _resolve_executor_config(self, node: BaseNode):
103
- """
104
- The overrides section can contain specific over-rides to an global executor config.
105
- To avoid too much clutter in the dag definition, we allow the configuration file to have overrides block.
106
- The nodes can over-ride the global config by referring to key in the overrides.
107
-
108
- This function also applies variables to the effective node config.
109
-
110
- For example:
111
- # configuration.yaml
112
- execution:
113
- type: cloud-implementation
114
- config:
115
- k1: v1
116
- k3: v3
117
- overrides:
118
- custom_config:
119
- k1: v11
120
- k2: v2 # Could be a mapping internally.
121
-
122
- # in pipeline definition.yaml
123
- dag:
124
- steps:
125
- step1:
126
- overrides:
127
- cloud-implementation: custom_config
128
-
129
- This method should resolve the node_config to {'k1': 'v11', 'k2': 'v2', 'k3': 'v3'}
130
-
131
- Args:
132
- node (BaseNode): The current node being processed.
133
-
134
- """
135
- effective_node_config = copy.deepcopy(self.model_dump())
136
-
137
- return effective_node_config
138
-
139
- def execute_job(self, node: TaskNode):
140
- pass
141
-
142
- def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
143
- """
144
- The entry point for all executors apart from local.
145
- We have already prepared for node execution.
146
-
147
- Args:
148
- node (BaseNode): The node to execute
149
- map_variable (dict, optional): If the node is part of a map, send in the map dictionary. Defaults to None.
150
-
151
- Raises:
152
- NotImplementedError: _description_
153
- """
154
- ...
File without changes