runnable 0.17.1__py3-none-any.whl → 0.18.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (41) hide show
  1. extensions/README.md +0 -0
  2. extensions/__init__.py +0 -0
  3. extensions/catalog/README.md +0 -0
  4. extensions/catalog/file_system.py +253 -0
  5. extensions/catalog/pyproject.toml +14 -0
  6. extensions/job_executor/README.md +0 -0
  7. extensions/job_executor/__init__.py +160 -0
  8. extensions/job_executor/k8s.py +362 -0
  9. extensions/job_executor/k8s_job_spec.yaml +37 -0
  10. extensions/job_executor/local.py +61 -0
  11. extensions/job_executor/local_container.py +192 -0
  12. extensions/job_executor/pyproject.toml +16 -0
  13. extensions/nodes/README.md +0 -0
  14. extensions/nodes/nodes.py +954 -0
  15. extensions/nodes/pyproject.toml +15 -0
  16. extensions/pipeline_executor/README.md +0 -0
  17. extensions/pipeline_executor/__init__.py +644 -0
  18. extensions/pipeline_executor/argo.py +1307 -0
  19. extensions/pipeline_executor/argo_specification.yaml +51 -0
  20. extensions/pipeline_executor/local.py +62 -0
  21. extensions/pipeline_executor/local_container.py +363 -0
  22. extensions/pipeline_executor/mocked.py +161 -0
  23. extensions/pipeline_executor/pyproject.toml +16 -0
  24. extensions/pipeline_executor/retry.py +180 -0
  25. extensions/run_log_store/README.md +0 -0
  26. extensions/run_log_store/__init__.py +0 -0
  27. extensions/run_log_store/chunked_fs.py +113 -0
  28. extensions/run_log_store/db/implementation_FF.py +163 -0
  29. extensions/run_log_store/db/integration_FF.py +0 -0
  30. extensions/run_log_store/file_system.py +145 -0
  31. extensions/run_log_store/generic_chunked.py +599 -0
  32. extensions/run_log_store/pyproject.toml +15 -0
  33. extensions/secrets/README.md +0 -0
  34. extensions/secrets/dotenv.py +62 -0
  35. extensions/secrets/pyproject.toml +15 -0
  36. {runnable-0.17.1.dist-info → runnable-0.18.0.dist-info}/METADATA +1 -7
  37. runnable-0.18.0.dist-info/RECORD +58 -0
  38. runnable-0.17.1.dist-info/RECORD +0 -23
  39. {runnable-0.17.1.dist-info → runnable-0.18.0.dist-info}/WHEEL +0 -0
  40. {runnable-0.17.1.dist-info → runnable-0.18.0.dist-info}/entry_points.txt +0 -0
  41. {runnable-0.17.1.dist-info → runnable-0.18.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,51 @@
1
+ apiVersion: argoproj.io/v1alpha1
2
+ kind: Workflow
3
+ metadata:
4
+ generateName: runnable-dag
5
+ spec:
6
+ activeDeadlineSeconds: int # max run time of the workflow
7
+ entrypoint: str
8
+ nodeSelector: Dict[str, str] # global node selector
9
+ parallelism: # global level
10
+ podGC: OnPodCompletion
11
+ resources: # Should be converted to podSpecPath
12
+ limits:
13
+ requests:
14
+ podSpecPatch: json str representation of resources for defaults
15
+ retryStrategy: # global level for all templates
16
+ limit: int
17
+ retryPolicy: # global level for all templates
18
+ backoff:
19
+ duration: str
20
+ factor: int
21
+ maxDuration: str
22
+ serviceAccountName: str # Optionally required
23
+ templateDefaults:
24
+ activeDeadlineSeconds: int, for a template
25
+ timeout: str # max time including the wait time
26
+ failFast: true
27
+ volumes:
28
+ templates:
29
+ activeDeadlineSeconds: # override
30
+ nodeSelector: # override
31
+ retryStrategy: # override
32
+ tolerations: # override
33
+ container:
34
+ command:
35
+ env:
36
+ image:
37
+ imagePullPolicy:
38
+ volumeMounts:
39
+ resources:
40
+ limits:
41
+ requests:
42
+ dag:
43
+ tasks:
44
+ depends:
45
+ continueOn:
46
+ tolerations: # global level for all templates
47
+ effect: str
48
+ key: str
49
+ operator: str
50
+ value: str
51
+ volumes:
@@ -0,0 +1,62 @@
1
+ import logging
2
+
3
+ from pydantic import Field, PrivateAttr
4
+
5
+ from extensions.pipeline_executor import GenericPipelineExecutor
6
+ from runnable import defaults
7
+ from runnable.defaults import TypeMapVariable
8
+ from runnable.nodes import BaseNode
9
+
10
+ logger = logging.getLogger(defaults.LOGGER_NAME)
11
+
12
+
13
+ class LocalExecutor(GenericPipelineExecutor):
14
+ """
15
+ In the mode of local execution, we run everything on the local computer.
16
+
17
+ This has some serious implications on the amount of time it would take to complete the run.
18
+ Also ensure that the local compute is good enough for the compute to happen of all the steps.
19
+
20
+ Example config:
21
+ execution:
22
+ type: local
23
+
24
+ """
25
+
26
+ service_name: str = "local"
27
+
28
+ object_serialisation: bool = Field(default=True)
29
+
30
+ _is_local: bool = PrivateAttr(default=True)
31
+
32
+ def execute_from_graph(
33
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
34
+ ):
35
+ if not self.object_serialisation:
36
+ self._context.object_serialisation = False
37
+
38
+ super().execute_from_graph(node=node, map_variable=map_variable, **kwargs)
39
+
40
+ def trigger_node_execution(
41
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
42
+ ):
43
+ """
44
+ In this mode of execution, we prepare for the node execution and execute the node
45
+
46
+ Args:
47
+ node (BaseNode): [description]
48
+ map_variable (str, optional): [description]. Defaults to ''.
49
+ """
50
+ self.execute_node(node=node, map_variable=map_variable, **kwargs)
51
+
52
+ def execute_node(
53
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
54
+ ):
55
+ """
56
+ For local execution, we just execute the node.
57
+
58
+ Args:
59
+ node (BaseNode): _description_
60
+ map_variable (dict[str, str], optional): _description_. Defaults to None.
61
+ """
62
+ self._execute_node(node=node, map_variable=map_variable, **kwargs)
@@ -0,0 +1,363 @@
1
+ import logging
2
+ from pathlib import Path
3
+ from typing import Dict
4
+
5
+ from pydantic import Field
6
+ from rich import print
7
+
8
+ from extensions.pipeline_executor import GenericPipelineExecutor
9
+ from runnable import console, defaults, task_console, utils
10
+ from runnable.datastore import StepLog
11
+ from runnable.defaults import TypeMapVariable
12
+ from runnable.nodes import BaseNode
13
+
14
+ logger = logging.getLogger(defaults.LOGGER_NAME)
15
+
16
+
17
+ class LocalContainerExecutor(GenericPipelineExecutor):
18
+ """
19
+ In the mode of local-container, we execute all the commands in a container.
20
+
21
+ Ensure that the local compute has enough resources to finish all your jobs.
22
+
23
+ The image of the run, could either be provided as default in the configuration of the execution engine
24
+ i.e.:
25
+ execution:
26
+ type: 'local-container'
27
+ config:
28
+ docker_image: the image you want the code to run in.
29
+
30
+ or default image could be over-ridden for a single node by providing a docker_image in the step config.
31
+ i.e:
32
+ dag:
33
+ steps:
34
+ step:
35
+ executor_config:
36
+ local-container:
37
+ docker_image: The image that you want that single step to run in.
38
+ This image would only be used for that step only.
39
+
40
+ This mode does not build the docker image with the latest code for you, it is still left for the user to build
41
+ and ensure that the docker image provided is the correct one.
42
+
43
+ Example config:
44
+ execution:
45
+ type: local-container
46
+ config:
47
+ docker_image: The default docker image to use if the node does not provide one.
48
+ """
49
+
50
+ service_name: str = "local-container"
51
+ docker_image: str
52
+ auto_remove_container: bool = True
53
+ environment: Dict[str, str] = Field(default_factory=dict)
54
+
55
+ _is_local: bool = False
56
+
57
+ _container_log_location = "/tmp/run_logs/"
58
+ _container_catalog_location = "/tmp/catalog/"
59
+ _container_secrets_location = "/tmp/dotenv"
60
+ _volumes: Dict[str, Dict[str, str]] = {}
61
+
62
+ def add_code_identities(self, node: BaseNode, step_log: StepLog, **kwargs):
63
+ """
64
+ Call the Base class to add the git code identity and add docker identity
65
+
66
+ Args:
67
+ node (BaseNode): The node we are adding the code identity
68
+ step_log (Object): The step log corresponding to the node
69
+ """
70
+
71
+ super().add_code_identities(node, step_log)
72
+
73
+ if node.node_type in ["success", "fail"]:
74
+ # Need not add code identities if we are in a success or fail node
75
+ return
76
+
77
+ executor_config = self._resolve_executor_config(node)
78
+
79
+ docker_image = executor_config.get("docker_image", None)
80
+ if docker_image:
81
+ code_id = self._context.run_log_store.create_code_identity()
82
+
83
+ code_id.code_identifier = utils.get_local_docker_image_id(docker_image)
84
+ code_id.code_identifier_type = "docker"
85
+ code_id.code_identifier_dependable = True
86
+ code_id.code_identifier_url = "local docker host"
87
+ step_log.code_identities.append(code_id)
88
+
89
+ def execute_node(
90
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
91
+ ):
92
+ """
93
+ We are already in the container, we just execute the node.
94
+ The node is already prepared for execution.
95
+ """
96
+ self._use_volumes()
97
+ return self._execute_node(node, map_variable, **kwargs)
98
+
99
+ def execute_from_graph(
100
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
101
+ ):
102
+ """
103
+ This is the entry point to from the graph execution.
104
+
105
+ While the self.execute_graph is responsible for traversing the graph, this function is responsible for
106
+ actual execution of the node.
107
+
108
+ If the node type is:
109
+ * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
110
+ * success: We can delegate to _execute_node
111
+ * fail: We can delegate to _execute_node
112
+
113
+ For nodes that are internally graphs:
114
+ * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
115
+ * dag: Delegate the responsibility of execution to the node.execute_as_graph()
116
+ * map: Delegate the responsibility of execution to the node.execute_as_graph()
117
+
118
+ Transpilers will NEVER use this method and will NEVER call ths method.
119
+ This method should only be used by interactive executors.
120
+
121
+ Args:
122
+ node (Node): The node to execute
123
+ map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
124
+ Defaults to None.
125
+ """
126
+ step_log = self._context.run_log_store.create_step_log(
127
+ node.name, node._get_step_log_name(map_variable)
128
+ )
129
+
130
+ self.add_code_identities(node=node, step_log=step_log)
131
+
132
+ step_log.step_type = node.node_type
133
+ step_log.status = defaults.PROCESSING
134
+
135
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
136
+
137
+ logger.info(f"Executing node: {node.get_summary()}")
138
+
139
+ # Add the step log to the database as per the situation.
140
+ # If its a terminal node, complete it now
141
+ if node.node_type in ["success", "fail"]:
142
+ self._execute_node(node, map_variable=map_variable, **kwargs)
143
+ return
144
+
145
+ # We call an internal function to iterate the sub graphs and execute them
146
+ if node.is_composite:
147
+ node.execute_as_graph(map_variable=map_variable, **kwargs)
148
+ return
149
+
150
+ task_console.export_text(clear=True)
151
+
152
+ task_name = node._resolve_map_placeholders(node.internal_name, map_variable)
153
+ console.print(
154
+ f":runner: Executing the node {task_name} ... ", style="bold color(208)"
155
+ )
156
+ self.trigger_node_execution(node=node, map_variable=map_variable, **kwargs)
157
+
158
+ # def execute_job(self, node: TaskNode):
159
+ # """
160
+ # Set up the step log and call the execute node
161
+
162
+ # Args:
163
+ # node (BaseNode): _description_
164
+ # """
165
+
166
+ # step_log = self._context.run_log_store.create_step_log(
167
+ # node.name, node._get_step_log_name(map_variable=None)
168
+ # )
169
+
170
+ # self.add_code_identities(node=node, step_log=step_log)
171
+
172
+ # step_log.step_type = node.node_type
173
+ # step_log.status = defaults.PROCESSING
174
+ # self._context.run_log_store.add_step_log(step_log, self._context.run_id)
175
+
176
+ # command = utils.get_job_execution_command(node)
177
+ # self._spin_container(node=node, command=command)
178
+
179
+ # # Check the step log status and warn if necessary. Docker errors are generally suppressed.
180
+ # step_log = self._context.run_log_store.get_step_log(
181
+ # node._get_step_log_name(map_variable=None), self._context.run_id
182
+ # )
183
+ # if step_log.status != defaults.SUCCESS:
184
+ # msg = (
185
+ # "Node execution inside the container failed. Please check the logs.\n"
186
+ # "Note: If you do not see any docker issue from your side and the code works properly on local execution"
187
+ # "please raise a bug report."
188
+ # )
189
+ # logger.warning(msg)
190
+
191
+ def trigger_node_execution(
192
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
193
+ ):
194
+ """
195
+ We come into this step via execute from graph, use trigger job to spin up the container.
196
+
197
+ In local container execution, we just spin the container to execute runnable execute_single_node.
198
+
199
+ Args:
200
+ node (BaseNode): The node we are currently executing
201
+ map_variable (str, optional): If the node is part of the map branch. Defaults to ''.
202
+ """
203
+ self._mount_volumes()
204
+ executor_config = self._resolve_executor_config(node)
205
+ auto_remove_container = executor_config.get("auto_remove_container", True)
206
+
207
+ logger.debug("Here is the resolved executor config")
208
+ logger.debug(executor_config)
209
+
210
+ command = utils.get_node_execution_command(node, map_variable=map_variable)
211
+
212
+ self._spin_container(
213
+ node=node,
214
+ command=command,
215
+ map_variable=map_variable,
216
+ auto_remove_container=auto_remove_container,
217
+ **kwargs,
218
+ )
219
+
220
+ step_log = self._context.run_log_store.get_step_log(
221
+ node._get_step_log_name(map_variable), self._context.run_id
222
+ )
223
+ if step_log.status != defaults.SUCCESS:
224
+ msg = (
225
+ "Node execution inside the container failed. Please check the logs.\n"
226
+ "Note: If you do not see any docker issue from your side and the code works properly on local execution"
227
+ "please raise a bug report."
228
+ )
229
+ logger.error(msg)
230
+ step_log.status = defaults.FAIL
231
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
232
+
233
+ def _spin_container(
234
+ self,
235
+ node: BaseNode,
236
+ command: str,
237
+ map_variable: TypeMapVariable = None,
238
+ auto_remove_container: bool = True,
239
+ **kwargs,
240
+ ):
241
+ """
242
+ During the flow run, we have to spin up a container with the docker image mentioned
243
+ and the right log locations
244
+ """
245
+ # Conditional import
246
+ import docker # pylint: disable=C0415
247
+
248
+ try:
249
+ client = docker.from_env()
250
+ api_client = docker.APIClient()
251
+ except Exception as ex:
252
+ logger.exception("Could not get access to docker")
253
+ raise Exception(
254
+ "Could not get the docker socket file, do you have docker installed?"
255
+ ) from ex
256
+
257
+ try:
258
+ logger.info(f"Running the command {command}")
259
+ print(command)
260
+ #  Overrides global config with local
261
+ executor_config = self._resolve_executor_config(node)
262
+
263
+ docker_image = executor_config.get("docker_image", None)
264
+ environment = executor_config.get("environment", {})
265
+ environment.update(self._context.variables)
266
+ if not docker_image:
267
+ raise Exception(
268
+ f"Please provide a docker_image using executor_config of the step {node.name} or at global config"
269
+ )
270
+
271
+ # TODO: Should consider using getpass.getuser() when running the docker container? Volume permissions
272
+ container = client.containers.create(
273
+ image=docker_image,
274
+ command=command,
275
+ auto_remove=False,
276
+ volumes=self._volumes,
277
+ network_mode="host",
278
+ environment=environment,
279
+ )
280
+
281
+ # print(container.__dict__)
282
+
283
+ container.start()
284
+ stream = api_client.logs(
285
+ container=container.id, timestamps=True, stream=True, follow=True
286
+ )
287
+ while True:
288
+ try:
289
+ output = next(stream).decode("utf-8")
290
+ output = output.strip("\r\n")
291
+ logger.info(output)
292
+ print(output)
293
+ except StopIteration:
294
+ logger.info("Docker Run completed")
295
+ break
296
+
297
+ exit_status = api_client.inspect_container(container.id)["State"][
298
+ "ExitCode"
299
+ ]
300
+
301
+ if auto_remove_container:
302
+ container.remove(force=True)
303
+
304
+ if exit_status != 0:
305
+ msg = f"Docker command failed with exit code {exit_status}"
306
+ raise Exception(msg)
307
+
308
+ except Exception as _e:
309
+ logger.exception("Problems with spinning/running the container")
310
+ raise _e
311
+
312
+ def _mount_volumes(self):
313
+ """
314
+ Mount the volumes for the container
315
+ """
316
+ match self._context.run_log_store.service_name:
317
+ case "file-system":
318
+ write_to = self._context.run_log_store.log_folder
319
+ self._volumes[str(Path(write_to).resolve())] = {
320
+ "bind": f"{self._container_log_location}",
321
+ "mode": "rw",
322
+ }
323
+ case "chunked-fs":
324
+ write_to = self._context.run_log_store.log_folder
325
+ self._volumes[str(Path(write_to).resolve())] = {
326
+ "bind": f"{self._container_log_location}",
327
+ "mode": "rw",
328
+ }
329
+
330
+ match self._context.catalog_handler.service_name:
331
+ case "file-system":
332
+ catalog_location = self._context.catalog_handler.catalog_location
333
+ self._volumes[str(Path(catalog_location).resolve())] = {
334
+ "bind": f"{self._container_catalog_location}",
335
+ "mode": "rw",
336
+ }
337
+
338
+ match self._context.secrets_handler.service_name:
339
+ case "dotenv":
340
+ secrets_location = self._context.secrets_handler.location
341
+ self._volumes[str(Path(secrets_location).resolve())] = {
342
+ "bind": f"{self._container_secrets_location}",
343
+ "mode": "ro",
344
+ }
345
+
346
+ def _use_volumes(self):
347
+ match self._context.run_log_store.service_name:
348
+ case "file-system":
349
+ self._context.run_log_store.log_folder = self._container_log_location
350
+ case "chunked-fs":
351
+ self._context.run_log_store.log_folder = self._container_log_location
352
+
353
+ match self._context.catalog_handler.service_name:
354
+ case "file-system":
355
+ self._context.catalog_handler.catalog_location = (
356
+ self._container_catalog_location
357
+ )
358
+
359
+ match self._context.secrets_handler.service_name:
360
+ case "dotenv":
361
+ self._context.secrets_handler.location = (
362
+ self._container_secrets_location
363
+ )
@@ -0,0 +1,161 @@
1
+ import copy
2
+ import logging
3
+ from typing import Any, Dict, Type, cast
4
+
5
+ from pydantic import ConfigDict, Field
6
+
7
+ from extensions.nodes.nodes import TaskNode
8
+ from extensions.pipeline_executor import GenericPipelineExecutor
9
+ from runnable import context, defaults
10
+ from runnable.defaults import TypeMapVariable
11
+ from runnable.nodes import BaseNode
12
+ from runnable.tasks import BaseTaskType
13
+
14
+ logger = logging.getLogger(defaults.LOGGER_NAME)
15
+
16
+
17
+ def create_executable(
18
+ params: Dict[str, Any], model: Type[BaseTaskType], node_name: str
19
+ ) -> BaseTaskType:
20
+ class EasyModel(model): # type: ignore
21
+ model_config = ConfigDict(extra="ignore")
22
+
23
+ swallow_all = EasyModel(node_name=node_name, **params)
24
+ return swallow_all
25
+
26
+
27
+ class MockedExecutor(GenericPipelineExecutor):
28
+ service_name: str = "mocked"
29
+ _is_local: bool = True
30
+
31
+ model_config = ConfigDict(extra="ignore")
32
+
33
+ patches: Dict[str, Any] = Field(default_factory=dict)
34
+
35
+ @property
36
+ def _context(self):
37
+ return context.run_context
38
+
39
+ def execute_from_graph(
40
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
41
+ ):
42
+ """
43
+ This is the entry point to from the graph execution.
44
+
45
+ While the self.execute_graph is responsible for traversing the graph, this function is responsible for
46
+ actual execution of the node.
47
+
48
+ If the node type is:
49
+ * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
50
+ * success: We can delegate to _execute_node
51
+ * fail: We can delegate to _execute_node
52
+
53
+ For nodes that are internally graphs:
54
+ * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
55
+ * dag: Delegate the responsibility of execution to the node.execute_as_graph()
56
+ * map: Delegate the responsibility of execution to the node.execute_as_graph()
57
+
58
+ Transpilers will NEVER use this method and will NEVER call ths method.
59
+ This method should only be used by interactive executors.
60
+
61
+ Args:
62
+ node (Node): The node to execute
63
+ map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
64
+ Defaults to None.
65
+ """
66
+ step_log = self._context.run_log_store.create_step_log(
67
+ node.name, node._get_step_log_name(map_variable)
68
+ )
69
+
70
+ self.add_code_identities(node=node, step_log=step_log)
71
+
72
+ step_log.step_type = node.node_type
73
+ step_log.status = defaults.PROCESSING
74
+
75
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
76
+
77
+ logger.info(f"Executing node: {node.get_summary()}")
78
+
79
+ # Add the step log to the database as per the situation.
80
+ # If its a terminal node, complete it now
81
+ if node.node_type in ["success", "fail"]:
82
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
83
+ self._execute_node(node, map_variable=map_variable, **kwargs)
84
+ return
85
+
86
+ # We call an internal function to iterate the sub graphs and execute them
87
+ if node.is_composite:
88
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
89
+ node.execute_as_graph(map_variable=map_variable, **kwargs)
90
+ return
91
+
92
+ if node.name not in self.patches:
93
+ # node is not patched, so mock it
94
+ self._execute_node(node, map_variable=map_variable, mock=True, **kwargs)
95
+ else:
96
+ # node is patched
97
+ # command as the patch value
98
+ node_to_send: TaskNode = cast(TaskNode, node).model_copy(deep=True)
99
+ executable_type = node_to_send.executable.__class__
100
+ executable = create_executable(
101
+ self.patches[node.name],
102
+ executable_type,
103
+ node_name=node.name,
104
+ )
105
+ node_to_send.executable = executable
106
+ self._execute_node(
107
+ node_to_send, map_variable=map_variable, mock=False, **kwargs
108
+ )
109
+
110
+ def _resolve_executor_config(self, node: BaseNode):
111
+ """
112
+ The overrides section can contain specific over-rides to an global executor config.
113
+ To avoid too much clutter in the dag definition, we allow the configuration file to have overrides block.
114
+ The nodes can over-ride the global config by referring to key in the overrides.
115
+
116
+ This function also applies variables to the effective node config.
117
+
118
+ For example:
119
+ # configuration.yaml
120
+ execution:
121
+ type: cloud-implementation
122
+ config:
123
+ k1: v1
124
+ k3: v3
125
+ overrides:
126
+ custom_config:
127
+ k1: v11
128
+ k2: v2 # Could be a mapping internally.
129
+
130
+ # in pipeline definition.yaml
131
+ dag:
132
+ steps:
133
+ step1:
134
+ overrides:
135
+ cloud-implementation: custom_config
136
+
137
+ This method should resolve the node_config to {'k1': 'v11', 'k2': 'v2', 'k3': 'v3'}
138
+
139
+ Args:
140
+ node (BaseNode): The current node being processed.
141
+
142
+ """
143
+ effective_node_config = copy.deepcopy(self.model_dump())
144
+
145
+ return effective_node_config
146
+
147
+ def execute_node(
148
+ self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
149
+ ):
150
+ """
151
+ The entry point for all executors apart from local.
152
+ We have already prepared for node execution.
153
+
154
+ Args:
155
+ node (BaseNode): The node to execute
156
+ map_variable (dict, optional): If the node is part of a map, send in the map dictionary. Defaults to None.
157
+
158
+ Raises:
159
+ NotImplementedError: _description_
160
+ """
161
+ ...
@@ -0,0 +1,16 @@
1
+ [project]
2
+ name = "pipeline_executor"
3
+ version = "0.0.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.10"
7
+ dependencies = []
8
+
9
+
10
+ [build-system]
11
+ requires = ["hatchling"]
12
+ build-backend = "hatchling.build"
13
+
14
+
15
+ [tool.hatch.build.targets.wheel]
16
+ packages = ["."]