runnable 0.34.0a3__py3-none-any.whl → 0.36.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- extensions/job_executor/__init__.py +3 -4
- extensions/job_executor/emulate.py +106 -0
- extensions/job_executor/k8s.py +8 -8
- extensions/job_executor/local_container.py +13 -14
- extensions/nodes/__init__.py +0 -0
- extensions/nodes/conditional.py +243 -0
- extensions/nodes/fail.py +72 -0
- extensions/nodes/map.py +350 -0
- extensions/nodes/parallel.py +159 -0
- extensions/nodes/stub.py +89 -0
- extensions/nodes/success.py +72 -0
- extensions/nodes/task.py +92 -0
- extensions/pipeline_executor/__init__.py +24 -26
- extensions/pipeline_executor/argo.py +50 -41
- extensions/pipeline_executor/emulate.py +112 -0
- extensions/pipeline_executor/local.py +4 -4
- extensions/pipeline_executor/local_container.py +19 -79
- extensions/pipeline_executor/mocked.py +4 -4
- extensions/pipeline_executor/retry.py +6 -10
- extensions/tasks/torch.py +1 -1
- runnable/__init__.py +2 -9
- runnable/catalog.py +1 -21
- runnable/cli.py +0 -59
- runnable/context.py +519 -28
- runnable/datastore.py +51 -54
- runnable/defaults.py +12 -34
- runnable/entrypoints.py +82 -440
- runnable/exceptions.py +35 -34
- runnable/executor.py +13 -20
- runnable/names.py +1 -1
- runnable/nodes.py +18 -16
- runnable/parameters.py +2 -2
- runnable/sdk.py +117 -164
- runnable/tasks.py +62 -21
- runnable/utils.py +6 -268
- {runnable-0.34.0a3.dist-info → runnable-0.36.0.dist-info}/METADATA +1 -2
- runnable-0.36.0.dist-info/RECORD +74 -0
- {runnable-0.34.0a3.dist-info → runnable-0.36.0.dist-info}/entry_points.txt +9 -8
- extensions/nodes/nodes.py +0 -778
- extensions/nodes/torch.py +0 -273
- extensions/nodes/torch_config.py +0 -76
- runnable-0.34.0a3.dist-info/RECORD +0 -67
- {runnable-0.34.0a3.dist-info → runnable-0.36.0.dist-info}/WHEEL +0 -0
- {runnable-0.34.0a3.dist-info → runnable-0.36.0.dist-info}/licenses/LICENSE +0 -0
@@ -84,7 +84,6 @@ class GenericJobExecutor(BaseJobExecutor):
|
|
84
84
|
run_id=self._context.run_id,
|
85
85
|
tag=self._context.tag,
|
86
86
|
status=defaults.PROCESSING,
|
87
|
-
dag_hash=self._context.dag_hash,
|
88
87
|
)
|
89
88
|
# Any interaction with run log store attributes should happen via API if available.
|
90
89
|
self._context.run_log_store.set_parameters(
|
@@ -92,7 +91,7 @@ class GenericJobExecutor(BaseJobExecutor):
|
|
92
91
|
)
|
93
92
|
|
94
93
|
# Update run_config
|
95
|
-
run_config =
|
94
|
+
run_config = self._context.model_dump()
|
96
95
|
logger.debug(f"run_config as seen by executor: {run_config}")
|
97
96
|
self._context.run_log_store.set_run_config(
|
98
97
|
run_id=self._context.run_id, run_config=run_config
|
@@ -147,7 +146,7 @@ class GenericJobExecutor(BaseJobExecutor):
|
|
147
146
|
|
148
147
|
data_catalogs = []
|
149
148
|
for name_pattern in catalog_settings:
|
150
|
-
data_catalog = self._context.
|
149
|
+
data_catalog = self._context.catalog.put(
|
151
150
|
name=name_pattern, allow_file_not_found_exc=allow_file_not_found_exc
|
152
151
|
)
|
153
152
|
|
@@ -165,5 +164,5 @@ class GenericJobExecutor(BaseJobExecutor):
|
|
165
164
|
)
|
166
165
|
task_console.save_text(log_file_name)
|
167
166
|
# Put the log file in the catalog
|
168
|
-
self._context.
|
167
|
+
self._context.catalog.put(name=log_file_name)
|
169
168
|
os.remove(log_file_name)
|
@@ -0,0 +1,106 @@
|
|
1
|
+
import logging
|
2
|
+
import shlex
|
3
|
+
import subprocess
|
4
|
+
import sys
|
5
|
+
from typing import List, Optional
|
6
|
+
|
7
|
+
|
8
|
+
from extensions.job_executor import GenericJobExecutor
|
9
|
+
from runnable import console, context, defaults
|
10
|
+
from runnable.datastore import DataCatalog
|
11
|
+
from runnable.tasks import BaseTaskType
|
12
|
+
|
13
|
+
logger = logging.getLogger(defaults.LOGGER_NAME)
|
14
|
+
|
15
|
+
|
16
|
+
class EmulatorJobExecutor(GenericJobExecutor):
|
17
|
+
"""
|
18
|
+
The EmulatorJobExecutor is a job executor that emulates the job execution.
|
19
|
+
"""
|
20
|
+
|
21
|
+
service_name: str = "emulator"
|
22
|
+
|
23
|
+
def submit_job(self, job: BaseTaskType, catalog_settings=Optional[List[str]]):
|
24
|
+
"""
|
25
|
+
This method gets invoked by the CLI.
|
26
|
+
"""
|
27
|
+
self._set_up_run_log()
|
28
|
+
|
29
|
+
# Call the container job
|
30
|
+
job_log = self._context.run_log_store.create_job_log()
|
31
|
+
self._context.run_log_store.add_job_log(
|
32
|
+
run_id=self._context.run_id, job_log=job_log
|
33
|
+
)
|
34
|
+
self.run_click_command()
|
35
|
+
|
36
|
+
def execute_job(self, job: BaseTaskType, catalog_settings=Optional[List[str]]):
|
37
|
+
"""
|
38
|
+
Focusses on execution of the job.
|
39
|
+
"""
|
40
|
+
logger.info("Trying to execute job")
|
41
|
+
|
42
|
+
job_log = self._context.run_log_store.get_job_log(run_id=self._context.run_id)
|
43
|
+
self.add_code_identities(job_log)
|
44
|
+
|
45
|
+
attempt_log = job.execute_command()
|
46
|
+
|
47
|
+
job_log.status = attempt_log.status
|
48
|
+
job_log.attempts.append(attempt_log)
|
49
|
+
|
50
|
+
allow_file_not_found_exc = True
|
51
|
+
if job_log.status == defaults.SUCCESS:
|
52
|
+
allow_file_not_found_exc = False
|
53
|
+
|
54
|
+
data_catalogs_put: Optional[List[DataCatalog]] = self._sync_catalog(
|
55
|
+
catalog_settings=catalog_settings,
|
56
|
+
allow_file_not_found_exc=allow_file_not_found_exc,
|
57
|
+
)
|
58
|
+
|
59
|
+
logger.debug(f"data_catalogs_put: {data_catalogs_put}")
|
60
|
+
|
61
|
+
job_log.add_data_catalogs(data_catalogs_put or [])
|
62
|
+
|
63
|
+
console.print("Summary of job")
|
64
|
+
console.print(job_log.get_summary())
|
65
|
+
|
66
|
+
self._context.run_log_store.add_job_log(
|
67
|
+
run_id=self._context.run_id, job_log=job_log
|
68
|
+
)
|
69
|
+
|
70
|
+
def run_click_command(self) -> str:
|
71
|
+
"""
|
72
|
+
Execute a Click-based CLI command in the current virtual environment.
|
73
|
+
|
74
|
+
Args:
|
75
|
+
command: Click command to execute
|
76
|
+
"""
|
77
|
+
assert isinstance(self._context, context.JobContext)
|
78
|
+
command = self._context.get_job_callable_command()
|
79
|
+
|
80
|
+
sub_command = [sys.executable, "-m", "runnable.cli"] + shlex.split(command)[1:]
|
81
|
+
|
82
|
+
process = subprocess.Popen(
|
83
|
+
sub_command,
|
84
|
+
stdout=subprocess.PIPE,
|
85
|
+
stderr=subprocess.STDOUT,
|
86
|
+
universal_newlines=True,
|
87
|
+
bufsize=1,
|
88
|
+
)
|
89
|
+
|
90
|
+
output = []
|
91
|
+
try:
|
92
|
+
while True:
|
93
|
+
line = process.stdout.readline() # type: ignore
|
94
|
+
if not line and process.poll() is not None:
|
95
|
+
break
|
96
|
+
print(line, end="")
|
97
|
+
output.append(line)
|
98
|
+
finally:
|
99
|
+
process.stdout.close() # type: ignore
|
100
|
+
|
101
|
+
if process.returncode != 0:
|
102
|
+
raise subprocess.CalledProcessError(
|
103
|
+
process.returncode, command, "".join(output)
|
104
|
+
)
|
105
|
+
|
106
|
+
return "".join(output)
|
extensions/job_executor/k8s.py
CHANGED
@@ -9,7 +9,7 @@ from pydantic import BaseModel, ConfigDict, Field, PlainSerializer, PrivateAttr
|
|
9
9
|
from pydantic.alias_generators import to_camel
|
10
10
|
|
11
11
|
from extensions.job_executor import GenericJobExecutor
|
12
|
-
from runnable import console,
|
12
|
+
from runnable import console, context, defaults
|
13
13
|
from runnable.datastore import DataCatalog, StepAttempt
|
14
14
|
from runnable.tasks import BaseTaskType
|
15
15
|
|
@@ -173,7 +173,7 @@ class GenericK8sJobExecutor(GenericJobExecutor):
|
|
173
173
|
mock: bool = False
|
174
174
|
namespace: str = Field(default="default")
|
175
175
|
|
176
|
-
|
176
|
+
_should_setup_run_log_at_traversal: bool = PrivateAttr(default=False)
|
177
177
|
_volume_mounts: list[VolumeMount] = PrivateAttr(default_factory=lambda: [])
|
178
178
|
_volumes: list[HostPathVolume | PVCVolume] = PrivateAttr(default_factory=lambda: [])
|
179
179
|
|
@@ -258,8 +258,8 @@ class GenericK8sJobExecutor(GenericJobExecutor):
|
|
258
258
|
self._client.V1VolumeMount(**vol.model_dump())
|
259
259
|
for vol in self._volume_mounts
|
260
260
|
]
|
261
|
-
|
262
|
-
command =
|
261
|
+
assert isinstance(self._context, context.JobContext)
|
262
|
+
command = self._context.get_job_callable_command()
|
263
263
|
|
264
264
|
container_env = [
|
265
265
|
self._client.V1EnvVar(**env.model_dump())
|
@@ -355,9 +355,9 @@ class GenericK8sJobExecutor(GenericJobExecutor):
|
|
355
355
|
case "chunked-fs":
|
356
356
|
self._context.run_log_store.log_folder = self._container_log_location
|
357
357
|
|
358
|
-
match self._context.
|
358
|
+
match self._context.catalog.service_name:
|
359
359
|
case "file-system":
|
360
|
-
self._context.
|
360
|
+
self._context.catalog.catalog_location = (
|
361
361
|
self._container_catalog_location
|
362
362
|
)
|
363
363
|
|
@@ -415,7 +415,7 @@ class MiniK8sJobExecutor(GenericK8sJobExecutor):
|
|
415
415
|
)
|
416
416
|
)
|
417
417
|
|
418
|
-
match self._context.
|
418
|
+
match self._context.catalog.service_name:
|
419
419
|
case "file-system":
|
420
420
|
self._volumes.append(
|
421
421
|
HostPathVolume(
|
@@ -503,7 +503,7 @@ class K8sJobExecutor(GenericK8sJobExecutor):
|
|
503
503
|
)
|
504
504
|
)
|
505
505
|
|
506
|
-
match self._context.
|
506
|
+
match self._context.catalog.service_name:
|
507
507
|
case "file-system":
|
508
508
|
self._volume_mounts.append(
|
509
509
|
VolumeMount(
|
@@ -2,10 +2,10 @@ import logging
|
|
2
2
|
from pathlib import Path
|
3
3
|
from typing import Dict, List, Optional
|
4
4
|
|
5
|
-
from pydantic import Field
|
5
|
+
from pydantic import Field, PrivateAttr
|
6
6
|
|
7
7
|
from extensions.job_executor import GenericJobExecutor
|
8
|
-
from runnable import console,
|
8
|
+
from runnable import console, context, defaults
|
9
9
|
from runnable.datastore import DataCatalog, StepAttempt
|
10
10
|
from runnable.tasks import BaseTaskType
|
11
11
|
|
@@ -23,7 +23,7 @@ class LocalContainerJobExecutor(GenericJobExecutor):
|
|
23
23
|
auto_remove_container: bool = True
|
24
24
|
environment: Dict[str, str] = Field(default_factory=dict)
|
25
25
|
|
26
|
-
|
26
|
+
_should_setup_run_log_at_traversal: bool = PrivateAttr(default=True)
|
27
27
|
|
28
28
|
_container_log_location = "/tmp/run_logs/"
|
29
29
|
_container_catalog_location = "/tmp/catalog/"
|
@@ -100,7 +100,8 @@ class LocalContainerJobExecutor(GenericJobExecutor):
|
|
100
100
|
) from ex
|
101
101
|
|
102
102
|
try:
|
103
|
-
|
103
|
+
assert isinstance(self._context, context.JobContext)
|
104
|
+
command = self._context.get_job_callable_command()
|
104
105
|
logger.info(f"Running the command {command}")
|
105
106
|
print(command)
|
106
107
|
|
@@ -165,17 +166,17 @@ class LocalContainerJobExecutor(GenericJobExecutor):
|
|
165
166
|
"mode": "rw",
|
166
167
|
}
|
167
168
|
|
168
|
-
match self._context.
|
169
|
+
match self._context.catalog.service_name:
|
169
170
|
case "file-system":
|
170
|
-
catalog_location = self._context.
|
171
|
+
catalog_location = self._context.catalog.catalog_location
|
171
172
|
self._volumes[str(Path(catalog_location).resolve())] = {
|
172
173
|
"bind": f"{self._container_catalog_location}",
|
173
174
|
"mode": "rw",
|
174
175
|
}
|
175
176
|
|
176
|
-
match self._context.
|
177
|
+
match self._context.secrets.service_name:
|
177
178
|
case "dotenv":
|
178
|
-
secrets_location = self._context.
|
179
|
+
secrets_location = self._context.secrets.location
|
179
180
|
self._volumes[str(Path(secrets_location).resolve())] = {
|
180
181
|
"bind": f"{self._container_secrets_location}",
|
181
182
|
"mode": "ro",
|
@@ -188,14 +189,12 @@ class LocalContainerJobExecutor(GenericJobExecutor):
|
|
188
189
|
case "chunked-fs":
|
189
190
|
self._context.run_log_store.log_folder = self._container_log_location
|
190
191
|
|
191
|
-
match self._context.
|
192
|
+
match self._context.catalog.service_name:
|
192
193
|
case "file-system":
|
193
|
-
self._context.
|
194
|
+
self._context.catalog.catalog_location = (
|
194
195
|
self._container_catalog_location
|
195
196
|
)
|
196
197
|
|
197
|
-
match self._context.
|
198
|
+
match self._context.secrets.service_name:
|
198
199
|
case "dotenv":
|
199
|
-
self._context.
|
200
|
-
self._container_secrets_location
|
201
|
-
)
|
200
|
+
self._context.secrets.location = self._container_secrets_location
|
File without changes
|
@@ -0,0 +1,243 @@
|
|
1
|
+
import logging
|
2
|
+
from copy import deepcopy
|
3
|
+
from typing import Any, cast
|
4
|
+
|
5
|
+
from pydantic import Field, field_serializer, field_validator
|
6
|
+
|
7
|
+
from runnable import console, defaults
|
8
|
+
from runnable.datastore import Parameter
|
9
|
+
from runnable.graph import Graph, create_graph
|
10
|
+
from runnable.nodes import CompositeNode, MapVariableType
|
11
|
+
|
12
|
+
logger = logging.getLogger(defaults.LOGGER_NAME)
|
13
|
+
|
14
|
+
|
15
|
+
class ConditionalNode(CompositeNode):
|
16
|
+
"""
|
17
|
+
parameter: name -> the parameter which is used for evaluation
|
18
|
+
default: Optional[branch] = branch to execute if nothing is matched.
|
19
|
+
branches: {
|
20
|
+
"case1" : branch1,
|
21
|
+
"case2: branch2,
|
22
|
+
}
|
23
|
+
|
24
|
+
Conceptually this is equal to:
|
25
|
+
match parameter:
|
26
|
+
case "case1":
|
27
|
+
branch1
|
28
|
+
case "case2":
|
29
|
+
branch2
|
30
|
+
case _:
|
31
|
+
default
|
32
|
+
|
33
|
+
"""
|
34
|
+
|
35
|
+
node_type: str = Field(default="conditional", serialization_alias="type")
|
36
|
+
|
37
|
+
parameter: str # the name of the parameter should be isalnum
|
38
|
+
default: Graph | None = Field(default=None) # TODO: Think about the design of this
|
39
|
+
branches: dict[str, Graph]
|
40
|
+
# The keys of the branches should be isalnum()
|
41
|
+
|
42
|
+
@field_validator("parameter", mode="after")
|
43
|
+
@classmethod
|
44
|
+
def check_parameter(cls, parameter: str) -> str:
|
45
|
+
"""
|
46
|
+
Validate that the parameter name is alphanumeric.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
parameter (str): The parameter name to validate.
|
50
|
+
|
51
|
+
Raises:
|
52
|
+
ValueError: If the parameter name is not alphanumeric.
|
53
|
+
|
54
|
+
Returns:
|
55
|
+
str: The validated parameter name.
|
56
|
+
"""
|
57
|
+
if not parameter.isalnum():
|
58
|
+
raise ValueError(f"Parameter '{parameter}' must be alphanumeric.")
|
59
|
+
return parameter
|
60
|
+
|
61
|
+
def get_parameter_value(self) -> str | int | bool | float:
|
62
|
+
"""
|
63
|
+
Get the parameter value from the context.
|
64
|
+
|
65
|
+
Returns:
|
66
|
+
Any: The value of the parameter.
|
67
|
+
"""
|
68
|
+
parameters: dict[str, Parameter] = self._context.run_log_store.get_parameters(
|
69
|
+
run_id=self._context.run_id
|
70
|
+
)
|
71
|
+
|
72
|
+
if self.parameter not in parameters:
|
73
|
+
raise Exception(f"Parameter {self.parameter} not found in parameters")
|
74
|
+
|
75
|
+
chosen_parameter_value = parameters[self.parameter].get_value()
|
76
|
+
|
77
|
+
assert isinstance(chosen_parameter_value, (int, float, bool, str)), (
|
78
|
+
f"Parameter '{self.parameter}' must be of type int, float, bool, or str, "
|
79
|
+
f"but got {type(chosen_parameter_value).__name__}."
|
80
|
+
)
|
81
|
+
|
82
|
+
return chosen_parameter_value
|
83
|
+
|
84
|
+
def get_summary(self) -> dict[str, Any]:
|
85
|
+
summary = {
|
86
|
+
"name": self.name,
|
87
|
+
"type": self.node_type,
|
88
|
+
"branches": [branch.get_summary() for branch in self.branches.values()],
|
89
|
+
"parameter": self.parameter,
|
90
|
+
"default": self.default.get_summary() if self.default else None,
|
91
|
+
}
|
92
|
+
|
93
|
+
return summary
|
94
|
+
|
95
|
+
@field_serializer("branches")
|
96
|
+
def ser_branches(self, branches: dict[str, Graph]) -> dict[str, Graph]:
|
97
|
+
ret: dict[str, Graph] = {}
|
98
|
+
|
99
|
+
for branch_name, branch in branches.items():
|
100
|
+
ret[branch_name.split(".")[-1]] = branch
|
101
|
+
|
102
|
+
return ret
|
103
|
+
|
104
|
+
@classmethod
|
105
|
+
def parse_from_config(cls, config: dict[str, Any]) -> "ConditionalNode":
|
106
|
+
internal_name = cast(str, config.get("internal_name"))
|
107
|
+
|
108
|
+
config_branches = config.pop("branches", {})
|
109
|
+
branches = {}
|
110
|
+
for branch_name, branch_config in config_branches.items():
|
111
|
+
sub_graph = create_graph(
|
112
|
+
deepcopy(branch_config),
|
113
|
+
internal_branch_name=internal_name + "." + branch_name,
|
114
|
+
)
|
115
|
+
branches[internal_name + "." + branch_name] = sub_graph
|
116
|
+
|
117
|
+
if not branches:
|
118
|
+
raise Exception("A parallel node should have branches")
|
119
|
+
return cls(branches=branches, **config)
|
120
|
+
|
121
|
+
def _get_branch_by_name(self, branch_name: str) -> Graph:
|
122
|
+
if branch_name in self.branches:
|
123
|
+
return self.branches[branch_name]
|
124
|
+
|
125
|
+
raise Exception(f"Branch {branch_name} does not exist")
|
126
|
+
|
127
|
+
def fan_out(self, map_variable: MapVariableType = None):
|
128
|
+
"""
|
129
|
+
This method is restricted to creating branch logs.
|
130
|
+
"""
|
131
|
+
parameter_value = self.get_parameter_value()
|
132
|
+
|
133
|
+
hit_once = False
|
134
|
+
|
135
|
+
for internal_branch_name, _ in self.branches.items():
|
136
|
+
# the match is done on the last part of the branch name
|
137
|
+
result = str(parameter_value) == internal_branch_name.split(".")[-1]
|
138
|
+
|
139
|
+
if not result:
|
140
|
+
# Need not create a branch log for this branch
|
141
|
+
continue
|
142
|
+
|
143
|
+
effective_branch_name = self._resolve_map_placeholders(
|
144
|
+
internal_branch_name, map_variable=map_variable
|
145
|
+
)
|
146
|
+
|
147
|
+
hit_once = True
|
148
|
+
branch_log = self._context.run_log_store.create_branch_log(
|
149
|
+
effective_branch_name
|
150
|
+
)
|
151
|
+
|
152
|
+
console.print(
|
153
|
+
f"Branch log created for {effective_branch_name}: {branch_log}"
|
154
|
+
)
|
155
|
+
branch_log.status = defaults.PROCESSING
|
156
|
+
self._context.run_log_store.add_branch_log(branch_log, self._context.run_id)
|
157
|
+
|
158
|
+
if not hit_once:
|
159
|
+
raise Exception(
|
160
|
+
"None of the branches were true. Please check your evaluate statements"
|
161
|
+
)
|
162
|
+
|
163
|
+
def execute_as_graph(self, map_variable: MapVariableType = None):
|
164
|
+
"""
|
165
|
+
This function does the actual execution of the sub-branches of the parallel node.
|
166
|
+
|
167
|
+
From a design perspective, this function should not be called if the execution is 3rd party orchestrated.
|
168
|
+
|
169
|
+
The modes that render the job specifications, do not need to interact with this node at all as they have their
|
170
|
+
own internal mechanisms of handing parallel states.
|
171
|
+
If they do not, you can find a way using as-is nodes as hack nodes.
|
172
|
+
|
173
|
+
The execution of a dag, could result in
|
174
|
+
* The dag being completely executed with a definite (fail, success) state in case of
|
175
|
+
local or local-container execution
|
176
|
+
* The dag being in a processing state with PROCESSING status in case of local-aws-batch
|
177
|
+
|
178
|
+
Only fail state is considered failure during this phase of execution.
|
179
|
+
|
180
|
+
Args:
|
181
|
+
executor (Executor): The Executor as per the use config
|
182
|
+
**kwargs: Optional kwargs passed around
|
183
|
+
"""
|
184
|
+
self.fan_out(map_variable=map_variable)
|
185
|
+
parameter_value = self.get_parameter_value()
|
186
|
+
|
187
|
+
for internal_branch_name, branch in self.branches.items():
|
188
|
+
result = str(parameter_value) == internal_branch_name.split(".")[-1]
|
189
|
+
|
190
|
+
if result:
|
191
|
+
# if the condition is met, execute the graph
|
192
|
+
logger.debug(f"Executing graph for {branch}")
|
193
|
+
self._context.pipeline_executor.execute_graph(
|
194
|
+
branch, map_variable=map_variable
|
195
|
+
)
|
196
|
+
|
197
|
+
self.fan_in(map_variable=map_variable)
|
198
|
+
|
199
|
+
def fan_in(self, map_variable: MapVariableType = None):
|
200
|
+
"""
|
201
|
+
The general fan in method for a node of type Parallel.
|
202
|
+
|
203
|
+
3rd party orchestrators should use this method to find the status of the composite step.
|
204
|
+
|
205
|
+
Args:
|
206
|
+
executor (BaseExecutor): The executor class as defined by the config
|
207
|
+
map_variable (dict, optional): If the node is part of a map. Defaults to None.
|
208
|
+
"""
|
209
|
+
effective_internal_name = self._resolve_map_placeholders(
|
210
|
+
self.internal_name, map_variable=map_variable
|
211
|
+
)
|
212
|
+
|
213
|
+
step_success_bool: bool = True
|
214
|
+
parameter_value = self.get_parameter_value()
|
215
|
+
|
216
|
+
for internal_branch_name, _ in self.branches.items():
|
217
|
+
result = str(parameter_value) == internal_branch_name.split(".")[-1]
|
218
|
+
|
219
|
+
if not result:
|
220
|
+
# The branch would not have been executed
|
221
|
+
continue
|
222
|
+
|
223
|
+
effective_branch_name = self._resolve_map_placeholders(
|
224
|
+
internal_branch_name, map_variable=map_variable
|
225
|
+
)
|
226
|
+
|
227
|
+
branch_log = self._context.run_log_store.get_branch_log(
|
228
|
+
effective_branch_name, self._context.run_id
|
229
|
+
)
|
230
|
+
|
231
|
+
if branch_log.status != defaults.SUCCESS:
|
232
|
+
step_success_bool = False
|
233
|
+
|
234
|
+
step_log = self._context.run_log_store.get_step_log(
|
235
|
+
effective_internal_name, self._context.run_id
|
236
|
+
)
|
237
|
+
|
238
|
+
if step_success_bool: # If none failed
|
239
|
+
step_log.status = defaults.SUCCESS
|
240
|
+
else:
|
241
|
+
step_log.status = defaults.FAIL
|
242
|
+
|
243
|
+
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
extensions/nodes/fail.py
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
from datetime import datetime
|
2
|
+
from typing import Any, Dict, cast
|
3
|
+
|
4
|
+
from pydantic import Field
|
5
|
+
|
6
|
+
from runnable import datastore, defaults
|
7
|
+
from runnable.datastore import StepLog
|
8
|
+
from runnable.defaults import MapVariableType
|
9
|
+
from runnable.nodes import TerminalNode
|
10
|
+
|
11
|
+
|
12
|
+
class FailNode(TerminalNode):
|
13
|
+
"""
|
14
|
+
A leaf node of the graph that represents a failure node
|
15
|
+
"""
|
16
|
+
|
17
|
+
node_type: str = Field(default="fail", serialization_alias="type")
|
18
|
+
|
19
|
+
@classmethod
|
20
|
+
def parse_from_config(cls, config: Dict[str, Any]) -> "FailNode":
|
21
|
+
return cast("FailNode", super().parse_from_config(config))
|
22
|
+
|
23
|
+
def get_summary(self) -> Dict[str, Any]:
|
24
|
+
summary = {
|
25
|
+
"name": self.name,
|
26
|
+
"type": self.node_type,
|
27
|
+
}
|
28
|
+
|
29
|
+
return summary
|
30
|
+
|
31
|
+
def execute(
|
32
|
+
self,
|
33
|
+
mock=False,
|
34
|
+
map_variable: MapVariableType = None,
|
35
|
+
attempt_number: int = 1,
|
36
|
+
) -> StepLog:
|
37
|
+
"""
|
38
|
+
Execute the failure node.
|
39
|
+
Set the run or branch log status to failure.
|
40
|
+
|
41
|
+
Args:
|
42
|
+
executor (_type_): the executor class
|
43
|
+
mock (bool, optional): If we should just mock and not do the actual execution. Defaults to False.
|
44
|
+
map_variable (dict, optional): If the node belongs to internal branches. Defaults to None.
|
45
|
+
|
46
|
+
Returns:
|
47
|
+
StepAttempt: The step attempt object
|
48
|
+
"""
|
49
|
+
step_log = self._context.run_log_store.get_step_log(
|
50
|
+
self._get_step_log_name(map_variable), self._context.run_id
|
51
|
+
)
|
52
|
+
|
53
|
+
attempt_log = datastore.StepAttempt(
|
54
|
+
status=defaults.SUCCESS,
|
55
|
+
start_time=str(datetime.now()),
|
56
|
+
end_time=str(datetime.now()),
|
57
|
+
attempt_number=attempt_number,
|
58
|
+
)
|
59
|
+
|
60
|
+
run_or_branch_log = self._context.run_log_store.get_branch_log(
|
61
|
+
self._get_branch_log_name(map_variable), self._context.run_id
|
62
|
+
)
|
63
|
+
run_or_branch_log.status = defaults.FAIL
|
64
|
+
self._context.run_log_store.add_branch_log(
|
65
|
+
run_or_branch_log, self._context.run_id
|
66
|
+
)
|
67
|
+
|
68
|
+
step_log.status = attempt_log.status
|
69
|
+
|
70
|
+
step_log.attempts.append(attempt_log)
|
71
|
+
|
72
|
+
return step_log
|