runnable 0.28.8__py3-none-any.whl → 0.30.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- extensions/job_executor/k8s.py +8 -9
- extensions/job_executor/local.py +13 -5
- extensions/job_executor/local_container.py +7 -5
- extensions/nodes/nodes.py +15 -195
- extensions/nodes/torch.py +169 -0
- extensions/nodes/torch_config.py +33 -0
- extensions/pipeline_executor/__init__.py +10 -14
- extensions/pipeline_executor/argo.py +106 -4
- extensions/pipeline_executor/local.py +10 -11
- extensions/pipeline_executor/local_container.py +51 -36
- extensions/pipeline_executor/mocked.py +6 -12
- extensions/pipeline_executor/retry.py +6 -10
- extensions/run_log_store/generic_chunked.py +1 -2
- extensions/secrets/dotenv.py +1 -1
- runnable/__init__.py +1 -0
- runnable/executor.py +7 -12
- runnable/nodes.py +44 -25
- runnable/sdk.py +46 -6
- runnable/secrets.py +3 -3
- runnable/tasks.py +0 -4
- {runnable-0.28.8.dist-info → runnable-0.30.0.dist-info}/METADATA +3 -1
- {runnable-0.28.8.dist-info → runnable-0.30.0.dist-info}/RECORD +25 -23
- {runnable-0.28.8.dist-info → runnable-0.30.0.dist-info}/entry_points.txt +1 -0
- {runnable-0.28.8.dist-info → runnable-0.30.0.dist-info}/WHEEL +0 -0
- {runnable-0.28.8.dist-info → runnable-0.30.0.dist-info}/licenses/LICENSE +0 -0
@@ -198,7 +198,6 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
198
198
|
node: BaseNode,
|
199
199
|
map_variable: TypeMapVariable = None,
|
200
200
|
mock: bool = False,
|
201
|
-
**kwargs,
|
202
201
|
):
|
203
202
|
"""
|
204
203
|
This is the entry point when we do the actual execution of the function.
|
@@ -232,7 +231,6 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
232
231
|
map_variable=map_variable,
|
233
232
|
attempt_number=self.step_attempt_number,
|
234
233
|
mock=mock,
|
235
|
-
**kwargs,
|
236
234
|
)
|
237
235
|
|
238
236
|
data_catalogs_put: Optional[List[DataCatalog]] = self._sync_catalog(stage="put")
|
@@ -248,7 +246,7 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
248
246
|
|
249
247
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
250
248
|
|
251
|
-
def add_code_identities(self, node: BaseNode, step_log: StepLog
|
249
|
+
def add_code_identities(self, node: BaseNode, step_log: StepLog):
|
252
250
|
"""
|
253
251
|
Add code identities specific to the implementation.
|
254
252
|
|
@@ -260,9 +258,7 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
260
258
|
"""
|
261
259
|
step_log.code_identities.append(utils.get_git_code_identity())
|
262
260
|
|
263
|
-
def execute_from_graph(
|
264
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
265
|
-
):
|
261
|
+
def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
266
262
|
"""
|
267
263
|
This is the entry point to from the graph execution.
|
268
264
|
|
@@ -303,12 +299,12 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
303
299
|
# Add the step log to the database as per the situation.
|
304
300
|
# If its a terminal node, complete it now
|
305
301
|
if node.node_type in ["success", "fail"]:
|
306
|
-
self._execute_node(node, map_variable=map_variable
|
302
|
+
self._execute_node(node, map_variable=map_variable)
|
307
303
|
return
|
308
304
|
|
309
305
|
# We call an internal function to iterate the sub graphs and execute them
|
310
306
|
if node.is_composite:
|
311
|
-
node.execute_as_graph(map_variable=map_variable
|
307
|
+
node.execute_as_graph(map_variable=map_variable)
|
312
308
|
return
|
313
309
|
|
314
310
|
task_console.export_text(clear=True)
|
@@ -317,10 +313,10 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
317
313
|
console.print(
|
318
314
|
f":runner: Executing the node {task_name} ... ", style="bold color(208)"
|
319
315
|
)
|
320
|
-
self.trigger_node_execution(node=node, map_variable=map_variable
|
316
|
+
self.trigger_node_execution(node=node, map_variable=map_variable)
|
321
317
|
|
322
318
|
def trigger_node_execution(
|
323
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None
|
319
|
+
self, node: BaseNode, map_variable: TypeMapVariable = None
|
324
320
|
):
|
325
321
|
"""
|
326
322
|
Call this method only if we are responsible for traversing the graph via
|
@@ -376,7 +372,7 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
376
372
|
|
377
373
|
return step_log.status, next_node_name
|
378
374
|
|
379
|
-
def execute_graph(self, dag: Graph, map_variable: TypeMapVariable = None
|
375
|
+
def execute_graph(self, dag: Graph, map_variable: TypeMapVariable = None):
|
380
376
|
"""
|
381
377
|
The parallelization is controlled by the nodes and not by this function.
|
382
378
|
|
@@ -430,7 +426,7 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
430
426
|
)
|
431
427
|
|
432
428
|
try:
|
433
|
-
self.execute_from_graph(working_on, map_variable=map_variable
|
429
|
+
self.execute_from_graph(working_on, map_variable=map_variable)
|
434
430
|
status, next_node_name = self._get_status_and_next_node_name(
|
435
431
|
current_node=working_on, dag=dag, map_variable=map_variable
|
436
432
|
)
|
@@ -593,7 +589,7 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
593
589
|
step_log.status = defaults.PROCESSING
|
594
590
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
595
591
|
|
596
|
-
node.fan_out(
|
592
|
+
node.fan_out(map_variable=map_variable)
|
597
593
|
|
598
594
|
def fan_in(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
599
595
|
"""
|
@@ -614,7 +610,7 @@ class GenericPipelineExecutor(BasePipelineExecutor):
|
|
614
610
|
map_variable (dict, optional): If the node if of a map state,.Defaults to None.
|
615
611
|
|
616
612
|
"""
|
617
|
-
node.fan_in(
|
613
|
+
node.fan_in(map_variable=map_variable)
|
618
614
|
|
619
615
|
step_log = self._context.run_log_store.get_step_log(
|
620
616
|
node._get_step_log_name(map_variable=map_variable), self._context.run_id
|
@@ -27,6 +27,7 @@ from extensions.nodes.nodes import (
|
|
27
27
|
SuccessNode,
|
28
28
|
TaskNode,
|
29
29
|
)
|
30
|
+
from extensions.nodes.torch import TorchNode
|
30
31
|
from extensions.pipeline_executor import GenericPipelineExecutor
|
31
32
|
from runnable import defaults, utils
|
32
33
|
from runnable.defaults import TypeMapVariable
|
@@ -370,6 +371,89 @@ class CustomVolume(BaseModelWIthConfig):
|
|
370
371
|
|
371
372
|
|
372
373
|
class ArgoExecutor(GenericPipelineExecutor):
|
374
|
+
"""
|
375
|
+
Executes the pipeline using Argo Workflows.
|
376
|
+
|
377
|
+
The defaults configuration is kept similar to the
|
378
|
+
[Argo Workflow spec](https://argo-workflows.readthedocs.io/en/latest/fields/#workflow).
|
379
|
+
|
380
|
+
Configuration:
|
381
|
+
|
382
|
+
```yaml
|
383
|
+
pipeline-executor:
|
384
|
+
type: argo
|
385
|
+
config:
|
386
|
+
pvc_for_runnable: "my-pvc"
|
387
|
+
custom_volumes:
|
388
|
+
- mount_path: "/tmp"
|
389
|
+
persistent_volume_claim:
|
390
|
+
claim_name: "my-pvc"
|
391
|
+
read_only: false/true
|
392
|
+
expose_parameters_as_inputs: true/false
|
393
|
+
secrets_from_k8s:
|
394
|
+
- key1
|
395
|
+
- key2
|
396
|
+
- ...
|
397
|
+
output_file: "argo-pipeline.yaml"
|
398
|
+
log_level: "DEBUG"/"INFO"/"WARNING"/"ERROR"/"CRITICAL"
|
399
|
+
defaults:
|
400
|
+
image: "my-image"
|
401
|
+
activeDeadlineSeconds: 86400
|
402
|
+
failFast: true
|
403
|
+
nodeSelector:
|
404
|
+
label: value
|
405
|
+
parallelism: 1
|
406
|
+
retryStrategy:
|
407
|
+
backoff:
|
408
|
+
duration: "2m"
|
409
|
+
factor: 2
|
410
|
+
maxDuration: "1h"
|
411
|
+
limit: 0
|
412
|
+
retryPolicy: "Always"
|
413
|
+
timeout: "1h"
|
414
|
+
tolerations:
|
415
|
+
imagePullPolicy: "Always"/"IfNotPresent"/"Never"
|
416
|
+
resources:
|
417
|
+
limits:
|
418
|
+
memory: "1Gi"
|
419
|
+
cpu: "250m"
|
420
|
+
gpu: 0
|
421
|
+
requests:
|
422
|
+
memory: "1Gi"
|
423
|
+
cpu: "250m"
|
424
|
+
env:
|
425
|
+
- name: "MY_ENV"
|
426
|
+
value: "my-value"
|
427
|
+
- name: secret_env
|
428
|
+
secretName: "my-secret"
|
429
|
+
secretKey: "my-key"
|
430
|
+
overrides:
|
431
|
+
key1:
|
432
|
+
... similar structure to defaults
|
433
|
+
|
434
|
+
argoWorkflow:
|
435
|
+
metadata:
|
436
|
+
annotations:
|
437
|
+
key1: value1
|
438
|
+
key2: value2
|
439
|
+
generateName: "my-workflow"
|
440
|
+
labels:
|
441
|
+
key1: value1
|
442
|
+
|
443
|
+
```
|
444
|
+
|
445
|
+
As of now, ```runnable``` needs a pvc to store the logs and the catalog; provided by ```pvc_for_runnable```.
|
446
|
+
- ```custom_volumes``` can be used to mount additional volumes to the container.
|
447
|
+
|
448
|
+
- ```expose_parameters_as_inputs``` can be used to expose the initial parameters as inputs to the workflow.
|
449
|
+
- ```secrets_from_k8s``` can be used to expose the secrets from the k8s secret store.
|
450
|
+
- ```output_file``` is the file where the argo pipeline will be dumped.
|
451
|
+
- ```log_level``` is the log level for the containers.
|
452
|
+
- ```defaults``` is the default configuration for all the containers.
|
453
|
+
|
454
|
+
|
455
|
+
"""
|
456
|
+
|
373
457
|
service_name: str = "argo"
|
374
458
|
_is_local: bool = False
|
375
459
|
mock: bool = False
|
@@ -510,6 +594,7 @@ class ArgoExecutor(GenericPipelineExecutor):
|
|
510
594
|
isinstance(node, TaskNode)
|
511
595
|
or isinstance(node, StubNode)
|
512
596
|
or isinstance(node, SuccessNode)
|
597
|
+
or isinstance(node, TorchNode)
|
513
598
|
)
|
514
599
|
|
515
600
|
node_override = None
|
@@ -522,7 +607,7 @@ class ArgoExecutor(GenericPipelineExecutor):
|
|
522
607
|
|
523
608
|
effective_settings = self.defaults.model_dump()
|
524
609
|
if node_override:
|
525
|
-
effective_settings.update(node_override.model_dump())
|
610
|
+
effective_settings.update(node_override.model_dump(exclude_none=True))
|
526
611
|
|
527
612
|
inputs = inputs or Inputs(parameters=[])
|
528
613
|
|
@@ -792,6 +877,25 @@ class ArgoExecutor(GenericPipelineExecutor):
|
|
792
877
|
|
793
878
|
self._templates.append(composite_template)
|
794
879
|
|
880
|
+
case "torch":
|
881
|
+
assert isinstance(working_on, TorchNode)
|
882
|
+
# TODO: Need to add multi-node functionality
|
883
|
+
|
884
|
+
template_of_container = self._create_container_template(
|
885
|
+
working_on,
|
886
|
+
task_name=task_name,
|
887
|
+
inputs=Inputs(parameters=parameters),
|
888
|
+
)
|
889
|
+
assert template_of_container.container is not None
|
890
|
+
|
891
|
+
if working_on.node_type == "task":
|
892
|
+
self._expose_secrets_to_task(
|
893
|
+
working_on,
|
894
|
+
container_template=template_of_container.container,
|
895
|
+
)
|
896
|
+
|
897
|
+
self._templates.append(template_of_container)
|
898
|
+
|
795
899
|
self._handle_failures(
|
796
900
|
working_on,
|
797
901
|
dag,
|
@@ -810,7 +914,6 @@ class ArgoExecutor(GenericPipelineExecutor):
|
|
810
914
|
self,
|
811
915
|
dag: Graph,
|
812
916
|
map_variable: dict[str, str | int | float] | None = None,
|
813
|
-
**kwargs,
|
814
917
|
):
|
815
918
|
# All the arguments set at the spec level can be referred as "{{workflow.parameters.*}}"
|
816
919
|
# We want to use that functionality to override the parameters at the task level
|
@@ -886,7 +989,6 @@ class ArgoExecutor(GenericPipelineExecutor):
|
|
886
989
|
self,
|
887
990
|
node: BaseNode,
|
888
991
|
map_variable: dict[str, str | int | float] | None = None,
|
889
|
-
**kwargs,
|
890
992
|
):
|
891
993
|
error_on_existing_run_id = os.environ.get("error_on_existing_run_id", "false")
|
892
994
|
exists_ok = error_on_existing_run_id == "false"
|
@@ -904,7 +1006,7 @@ class ArgoExecutor(GenericPipelineExecutor):
|
|
904
1006
|
step_log.status = defaults.PROCESSING
|
905
1007
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
906
1008
|
|
907
|
-
self._execute_node(node=node, map_variable=map_variable
|
1009
|
+
self._execute_node(node=node, map_variable=map_variable)
|
908
1010
|
|
909
1011
|
# Raise exception if the step failed
|
910
1012
|
step_log = self._context.run_log_store.get_step_log(
|
@@ -18,8 +18,11 @@ class LocalExecutor(GenericPipelineExecutor):
|
|
18
18
|
Also ensure that the local compute is good enough for the compute to happen of all the steps.
|
19
19
|
|
20
20
|
Example config:
|
21
|
-
|
21
|
+
|
22
|
+
```yaml
|
23
|
+
pipeline-executor:
|
22
24
|
type: local
|
25
|
+
```
|
23
26
|
|
24
27
|
"""
|
25
28
|
|
@@ -29,16 +32,14 @@ class LocalExecutor(GenericPipelineExecutor):
|
|
29
32
|
|
30
33
|
_is_local: bool = PrivateAttr(default=True)
|
31
34
|
|
32
|
-
def execute_from_graph(
|
33
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
34
|
-
):
|
35
|
+
def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
35
36
|
if not self.object_serialisation:
|
36
37
|
self._context.object_serialisation = False
|
37
38
|
|
38
|
-
super().execute_from_graph(node=node, map_variable=map_variable
|
39
|
+
super().execute_from_graph(node=node, map_variable=map_variable)
|
39
40
|
|
40
41
|
def trigger_node_execution(
|
41
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None
|
42
|
+
self, node: BaseNode, map_variable: TypeMapVariable = None
|
42
43
|
):
|
43
44
|
"""
|
44
45
|
In this mode of execution, we prepare for the node execution and execute the node
|
@@ -47,11 +48,9 @@ class LocalExecutor(GenericPipelineExecutor):
|
|
47
48
|
node (BaseNode): [description]
|
48
49
|
map_variable (str, optional): [description]. Defaults to ''.
|
49
50
|
"""
|
50
|
-
self.execute_node(node=node, map_variable=map_variable
|
51
|
+
self.execute_node(node=node, map_variable=map_variable)
|
51
52
|
|
52
|
-
def execute_node(
|
53
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
54
|
-
):
|
53
|
+
def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
55
54
|
"""
|
56
55
|
For local execution, we just execute the node.
|
57
56
|
|
@@ -59,4 +58,4 @@ class LocalExecutor(GenericPipelineExecutor):
|
|
59
58
|
node (BaseNode): _description_
|
60
59
|
map_variable (dict[str, str], optional): _description_. Defaults to None.
|
61
60
|
"""
|
62
|
-
self._execute_node(node=node, map_variable=map_variable
|
61
|
+
self._execute_node(node=node, map_variable=map_variable)
|
@@ -3,7 +3,6 @@ from pathlib import Path
|
|
3
3
|
from typing import Dict
|
4
4
|
|
5
5
|
from pydantic import Field
|
6
|
-
from rich import print
|
7
6
|
|
8
7
|
from extensions.pipeline_executor import GenericPipelineExecutor
|
9
8
|
from runnable import console, defaults, task_console, utils
|
@@ -20,31 +19,50 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
20
19
|
|
21
20
|
Ensure that the local compute has enough resources to finish all your jobs.
|
22
21
|
|
23
|
-
|
24
|
-
i.e.:
|
25
|
-
execution:
|
26
|
-
type: 'local-container'
|
27
|
-
config:
|
28
|
-
docker_image: the image you want the code to run in.
|
29
|
-
|
30
|
-
or default image could be over-ridden for a single node by providing a docker_image in the step config.
|
31
|
-
i.e:
|
32
|
-
dag:
|
33
|
-
steps:
|
34
|
-
step:
|
35
|
-
executor_config:
|
36
|
-
local-container:
|
37
|
-
docker_image: The image that you want that single step to run in.
|
38
|
-
This image would only be used for that step only.
|
39
|
-
|
40
|
-
This mode does not build the docker image with the latest code for you, it is still left for the user to build
|
41
|
-
and ensure that the docker image provided is the correct one.
|
22
|
+
Configuration options:
|
42
23
|
|
43
|
-
|
44
|
-
|
24
|
+
```yaml
|
25
|
+
pipeline-executor:
|
45
26
|
type: local-container
|
46
27
|
config:
|
47
|
-
docker_image:
|
28
|
+
docker_image: <required>
|
29
|
+
auto_remove_container: true/false
|
30
|
+
environment:
|
31
|
+
key: value
|
32
|
+
overrides:
|
33
|
+
alternate_config:
|
34
|
+
docker_image: <required>
|
35
|
+
auto_remove_container: true/false
|
36
|
+
environment:
|
37
|
+
key: value
|
38
|
+
```
|
39
|
+
|
40
|
+
- ```docker_image```: The default docker image to use for all the steps.
|
41
|
+
- ```auto_remove_container```: Remove container after execution
|
42
|
+
- ```environment```: Environment variables to pass to the container
|
43
|
+
|
44
|
+
Overrides give you the ability to override the default docker image for a single step.
|
45
|
+
A step can then then refer to the alternate_config in the task definition.
|
46
|
+
|
47
|
+
Example:
|
48
|
+
|
49
|
+
```python
|
50
|
+
from runnable import PythonTask
|
51
|
+
|
52
|
+
task = PythonTask(
|
53
|
+
name="alt_task",
|
54
|
+
overrides={
|
55
|
+
"local-container": "alternate_config"
|
56
|
+
}
|
57
|
+
)
|
58
|
+
```
|
59
|
+
|
60
|
+
In the above example, ```alt_task``` will run in the docker image/configuration
|
61
|
+
as defined in the alternate_config.
|
62
|
+
|
63
|
+
```runnable``` does not build the docker image for you, it is still left for the user to build
|
64
|
+
and ensure that the docker image provided is the correct one.
|
65
|
+
|
48
66
|
"""
|
49
67
|
|
50
68
|
service_name: str = "local-container"
|
@@ -59,7 +77,7 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
59
77
|
_container_secrets_location = "/tmp/dotenv"
|
60
78
|
_volumes: Dict[str, Dict[str, str]] = {}
|
61
79
|
|
62
|
-
def add_code_identities(self, node: BaseNode, step_log: StepLog
|
80
|
+
def add_code_identities(self, node: BaseNode, step_log: StepLog):
|
63
81
|
"""
|
64
82
|
Call the Base class to add the git code identity and add docker identity
|
65
83
|
|
@@ -86,18 +104,18 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
86
104
|
code_id.code_identifier_url = "local docker host"
|
87
105
|
step_log.code_identities.append(code_id)
|
88
106
|
|
89
|
-
def execute_node(
|
90
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
91
|
-
):
|
107
|
+
def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
92
108
|
"""
|
93
109
|
We are already in the container, we just execute the node.
|
94
110
|
The node is already prepared for execution.
|
95
111
|
"""
|
96
112
|
self._use_volumes()
|
97
|
-
return self._execute_node(node, map_variable
|
113
|
+
return self._execute_node(node, map_variable)
|
98
114
|
|
99
115
|
def execute_from_graph(
|
100
|
-
self,
|
116
|
+
self,
|
117
|
+
node: BaseNode,
|
118
|
+
map_variable: TypeMapVariable = None,
|
101
119
|
):
|
102
120
|
"""
|
103
121
|
This is the entry point to from the graph execution.
|
@@ -139,12 +157,12 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
139
157
|
# Add the step log to the database as per the situation.
|
140
158
|
# If its a terminal node, complete it now
|
141
159
|
if node.node_type in ["success", "fail"]:
|
142
|
-
self._execute_node(node, map_variable=map_variable
|
160
|
+
self._execute_node(node, map_variable=map_variable)
|
143
161
|
return
|
144
162
|
|
145
163
|
# We call an internal function to iterate the sub graphs and execute them
|
146
164
|
if node.is_composite:
|
147
|
-
node.execute_as_graph(map_variable=map_variable
|
165
|
+
node.execute_as_graph(map_variable=map_variable)
|
148
166
|
return
|
149
167
|
|
150
168
|
task_console.export_text(clear=True)
|
@@ -153,10 +171,10 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
153
171
|
console.print(
|
154
172
|
f":runner: Executing the node {task_name} ... ", style="bold color(208)"
|
155
173
|
)
|
156
|
-
self.trigger_node_execution(node=node, map_variable=map_variable
|
174
|
+
self.trigger_node_execution(node=node, map_variable=map_variable)
|
157
175
|
|
158
176
|
def trigger_node_execution(
|
159
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None
|
177
|
+
self, node: BaseNode, map_variable: TypeMapVariable = None
|
160
178
|
):
|
161
179
|
"""
|
162
180
|
We come into this step via execute from graph, use trigger job to spin up the container.
|
@@ -181,7 +199,6 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
181
199
|
command=command,
|
182
200
|
map_variable=map_variable,
|
183
201
|
auto_remove_container=auto_remove_container,
|
184
|
-
**kwargs,
|
185
202
|
)
|
186
203
|
|
187
204
|
step_log = self._context.run_log_store.get_step_log(
|
@@ -203,7 +220,6 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
203
220
|
command: str,
|
204
221
|
map_variable: TypeMapVariable = None,
|
205
222
|
auto_remove_container: bool = True,
|
206
|
-
**kwargs,
|
207
223
|
):
|
208
224
|
"""
|
209
225
|
During the flow run, we have to spin up a container with the docker image mentioned
|
@@ -223,7 +239,6 @@ class LocalContainerExecutor(GenericPipelineExecutor):
|
|
223
239
|
|
224
240
|
try:
|
225
241
|
logger.info(f"Running the command {command}")
|
226
|
-
print(command)
|
227
242
|
# Overrides global config with local
|
228
243
|
executor_config = self._resolve_executor_config(node)
|
229
244
|
|
@@ -36,9 +36,7 @@ class MockedExecutor(GenericPipelineExecutor):
|
|
36
36
|
def _context(self):
|
37
37
|
return context.run_context
|
38
38
|
|
39
|
-
def execute_from_graph(
|
40
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
41
|
-
):
|
39
|
+
def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
42
40
|
"""
|
43
41
|
This is the entry point to from the graph execution.
|
44
42
|
|
@@ -80,18 +78,18 @@ class MockedExecutor(GenericPipelineExecutor):
|
|
80
78
|
# If its a terminal node, complete it now
|
81
79
|
if node.node_type in ["success", "fail"]:
|
82
80
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
83
|
-
self._execute_node(node, map_variable=map_variable
|
81
|
+
self._execute_node(node, map_variable=map_variable)
|
84
82
|
return
|
85
83
|
|
86
84
|
# We call an internal function to iterate the sub graphs and execute them
|
87
85
|
if node.is_composite:
|
88
86
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
89
|
-
node.execute_as_graph(map_variable=map_variable
|
87
|
+
node.execute_as_graph(map_variable=map_variable)
|
90
88
|
return
|
91
89
|
|
92
90
|
if node.name not in self.patches:
|
93
91
|
# node is not patched, so mock it
|
94
|
-
self._execute_node(node, map_variable=map_variable, mock=True
|
92
|
+
self._execute_node(node, map_variable=map_variable, mock=True)
|
95
93
|
else:
|
96
94
|
# node is patched
|
97
95
|
# command as the patch value
|
@@ -103,9 +101,7 @@ class MockedExecutor(GenericPipelineExecutor):
|
|
103
101
|
node_name=node.name,
|
104
102
|
)
|
105
103
|
node_to_send.executable = executable
|
106
|
-
self._execute_node(
|
107
|
-
node_to_send, map_variable=map_variable, mock=False, **kwargs
|
108
|
-
)
|
104
|
+
self._execute_node(node_to_send, map_variable=map_variable, mock=False)
|
109
105
|
|
110
106
|
def _resolve_executor_config(self, node: BaseNode):
|
111
107
|
"""
|
@@ -144,9 +140,7 @@ class MockedExecutor(GenericPipelineExecutor):
|
|
144
140
|
|
145
141
|
return effective_node_config
|
146
142
|
|
147
|
-
def execute_node(
|
148
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
149
|
-
):
|
143
|
+
def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
150
144
|
"""
|
151
145
|
The entry point for all executors apart from local.
|
152
146
|
We have already prepared for node execution.
|
@@ -63,9 +63,7 @@ class RetryExecutor(GenericPipelineExecutor):
|
|
63
63
|
# Should the parameters be copied from previous execution
|
64
64
|
# self._set_up_for_re_run(params=params)
|
65
65
|
|
66
|
-
def execute_from_graph(
|
67
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
68
|
-
):
|
66
|
+
def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
69
67
|
"""
|
70
68
|
This is the entry point to from the graph execution.
|
71
69
|
|
@@ -103,7 +101,7 @@ class RetryExecutor(GenericPipelineExecutor):
|
|
103
101
|
# If its a terminal node, complete it now
|
104
102
|
if node.node_type in ["success", "fail"]:
|
105
103
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
106
|
-
self._execute_node(node, map_variable=map_variable
|
104
|
+
self._execute_node(node, map_variable=map_variable)
|
107
105
|
return
|
108
106
|
|
109
107
|
# In retry step
|
@@ -118,12 +116,12 @@ class RetryExecutor(GenericPipelineExecutor):
|
|
118
116
|
# We call an internal function to iterate the sub graphs and execute them
|
119
117
|
if node.is_composite:
|
120
118
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
121
|
-
node.execute_as_graph(map_variable=map_variable
|
119
|
+
node.execute_as_graph(map_variable=map_variable)
|
122
120
|
return
|
123
121
|
|
124
122
|
# Executor specific way to trigger a job
|
125
123
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
126
|
-
self.execute_node(node=node, map_variable=map_variable
|
124
|
+
self.execute_node(node=node, map_variable=map_variable)
|
127
125
|
|
128
126
|
def _is_step_eligible_for_rerun(
|
129
127
|
self, node: BaseNode, map_variable: TypeMapVariable = None
|
@@ -174,7 +172,5 @@ class RetryExecutor(GenericPipelineExecutor):
|
|
174
172
|
self._restart_initiated = True
|
175
173
|
return True
|
176
174
|
|
177
|
-
def execute_node(
|
178
|
-
self
|
179
|
-
):
|
180
|
-
self._execute_node(node, map_variable=map_variable, **kwargs)
|
175
|
+
def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
176
|
+
self._execute_node(node, map_variable=map_variable)
|
@@ -318,7 +318,6 @@ class ChunkedRunLogStore(BaseRunLogStore):
|
|
318
318
|
tag: str = "",
|
319
319
|
original_run_id: str = "",
|
320
320
|
status: str = defaults.CREATED,
|
321
|
-
**kwargs,
|
322
321
|
):
|
323
322
|
"""
|
324
323
|
Creates a Run Log object by using the config
|
@@ -549,7 +548,7 @@ class ChunkedRunLogStore(BaseRunLogStore):
|
|
549
548
|
)
|
550
549
|
|
551
550
|
def get_branch_log(
|
552
|
-
self, internal_branch_name: str, run_id: str
|
551
|
+
self, internal_branch_name: str, run_id: str
|
553
552
|
) -> Union[BranchLog, RunLog]:
|
554
553
|
"""
|
555
554
|
Returns the branch log by the internal branch name for the run id
|
extensions/secrets/dotenv.py
CHANGED
runnable/__init__.py
CHANGED
runnable/executor.py
CHANGED
@@ -107,7 +107,7 @@ class BaseJobExecutor(BaseExecutor):
|
|
107
107
|
...
|
108
108
|
|
109
109
|
@abstractmethod
|
110
|
-
def add_code_identities(self, job_log: JobLog
|
110
|
+
def add_code_identities(self, job_log: JobLog):
|
111
111
|
"""
|
112
112
|
Add code identities specific to the implementation.
|
113
113
|
|
@@ -156,12 +156,12 @@ class BaseJobExecutor(BaseExecutor):
|
|
156
156
|
# TODO: Consolidate execute_node, trigger_node_execution, _execute_node
|
157
157
|
class BasePipelineExecutor(BaseExecutor):
|
158
158
|
service_type: str = "pipeline_executor"
|
159
|
-
overrides: dict = {}
|
159
|
+
overrides: dict[str, Any] = {}
|
160
160
|
|
161
161
|
_context_node: Optional[BaseNode] = PrivateAttr(default=None)
|
162
162
|
|
163
163
|
@abstractmethod
|
164
|
-
def add_code_identities(self, node: BaseNode, step_log: StepLog
|
164
|
+
def add_code_identities(self, node: BaseNode, step_log: StepLog):
|
165
165
|
"""
|
166
166
|
Add code identities specific to the implementation.
|
167
167
|
|
@@ -204,7 +204,6 @@ class BasePipelineExecutor(BaseExecutor):
|
|
204
204
|
node: BaseNode,
|
205
205
|
map_variable: TypeMapVariable = None,
|
206
206
|
mock: bool = False,
|
207
|
-
**kwargs,
|
208
207
|
):
|
209
208
|
"""
|
210
209
|
This is the entry point when we do the actual execution of the function.
|
@@ -227,9 +226,7 @@ class BasePipelineExecutor(BaseExecutor):
|
|
227
226
|
...
|
228
227
|
|
229
228
|
@abstractmethod
|
230
|
-
def execute_node(
|
231
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
232
|
-
):
|
229
|
+
def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
233
230
|
"""
|
234
231
|
The entry point for all executors apart from local.
|
235
232
|
We have already prepared for node execution.
|
@@ -244,9 +241,7 @@ class BasePipelineExecutor(BaseExecutor):
|
|
244
241
|
...
|
245
242
|
|
246
243
|
@abstractmethod
|
247
|
-
def execute_from_graph(
|
248
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs
|
249
|
-
):
|
244
|
+
def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None):
|
250
245
|
"""
|
251
246
|
This is the entry point to from the graph execution.
|
252
247
|
|
@@ -294,7 +289,7 @@ class BasePipelineExecutor(BaseExecutor):
|
|
294
289
|
...
|
295
290
|
|
296
291
|
@abstractmethod
|
297
|
-
def execute_graph(self, dag: Graph, map_variable: TypeMapVariable = None
|
292
|
+
def execute_graph(self, dag: Graph, map_variable: TypeMapVariable = None):
|
298
293
|
"""
|
299
294
|
The parallelization is controlled by the nodes and not by this function.
|
300
295
|
|
@@ -395,7 +390,7 @@ class BasePipelineExecutor(BaseExecutor):
|
|
395
390
|
|
396
391
|
@abstractmethod
|
397
392
|
def trigger_node_execution(
|
398
|
-
self, node: BaseNode, map_variable: TypeMapVariable = None
|
393
|
+
self, node: BaseNode, map_variable: TypeMapVariable = None
|
399
394
|
):
|
400
395
|
"""
|
401
396
|
Executor specific way of triggering jobs when runnable does both traversal and execution
|