runnable 0.11.2__py3-none-any.whl → 0.11.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- runnable/__init__.py +4 -1
- runnable/datastore.py +4 -1
- runnable/entrypoints.py +15 -1
- runnable/extensions/executor/__init__.py +0 -5
- runnable/extensions/executor/argo/implementation.py +3 -1
- runnable/extensions/executor/local_container/implementation.py +49 -16
- runnable/extensions/executor/mocked/implementation.py +21 -1
- runnable/extensions/executor/retry/implementation.py +11 -1
- runnable/extensions/nodes.py +45 -23
- runnable/graph.py +1 -0
- runnable/parameters.py +1 -1
- runnable/sdk.py +12 -5
- runnable/tasks.py +123 -106
- {runnable-0.11.2.dist-info → runnable-0.11.4.dist-info}/METADATA +1 -1
- {runnable-0.11.2.dist-info → runnable-0.11.4.dist-info}/RECORD +18 -20
- {runnable-0.11.2.dist-info → runnable-0.11.4.dist-info}/entry_points.txt +0 -1
- runnable/extensions/secrets/env_secrets/__init__.py +0 -0
- runnable/extensions/secrets/env_secrets/implementation.py +0 -42
- {runnable-0.11.2.dist-info → runnable-0.11.4.dist-info}/LICENSE +0 -0
- {runnable-0.11.2.dist-info → runnable-0.11.4.dist-info}/WHEEL +0 -0
runnable/__init__.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2
2
|
|
3
3
|
# TODO: Might need to add Rich to pyinstaller part
|
4
4
|
import logging
|
5
|
+
import os
|
5
6
|
from logging.config import dictConfig
|
6
7
|
|
7
8
|
from rich.console import Console
|
@@ -29,6 +30,8 @@ from runnable.sdk import ( # noqa
|
|
29
30
|
pickled,
|
30
31
|
)
|
31
32
|
|
33
|
+
os.environ["_PLOOMBER_TELEMETRY_DEBUG"] = "false"
|
34
|
+
|
32
35
|
## TODO: Summary should be a bit better for catalog.
|
33
36
|
## If the execution fails, hint them about the retry executor.
|
34
37
|
# Make the retry executor loose!
|
@@ -38,4 +41,4 @@ from runnable.sdk import ( # noqa
|
|
38
41
|
|
39
42
|
|
40
43
|
# TODO: Think of way of generating dag hash without executor configuration
|
41
|
-
#
|
44
|
+
# Try to get a release
|
runnable/datastore.py
CHANGED
@@ -402,7 +402,10 @@ class RunLog(BaseModel):
|
|
402
402
|
"""
|
403
403
|
dot_path = i_name.split(".")
|
404
404
|
if len(dot_path) == 1:
|
405
|
-
|
405
|
+
try:
|
406
|
+
return self.steps[i_name], None
|
407
|
+
except KeyError as e:
|
408
|
+
raise exceptions.StepLogNotFoundError(self.run_id, i_name) from e
|
406
409
|
|
407
410
|
current_steps = self.steps
|
408
411
|
current_step = None
|
runnable/entrypoints.py
CHANGED
@@ -60,6 +60,8 @@ def prepare_configurations(
|
|
60
60
|
variables = utils.gather_variables()
|
61
61
|
|
62
62
|
templated_configuration = {}
|
63
|
+
configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
|
64
|
+
|
63
65
|
if configuration_file:
|
64
66
|
templated_configuration = utils.load_yaml(configuration_file) or {}
|
65
67
|
|
@@ -144,8 +146,8 @@ def prepare_configurations(
|
|
144
146
|
|
145
147
|
|
146
148
|
def execute(
|
147
|
-
configuration_file: str,
|
148
149
|
pipeline_file: str,
|
150
|
+
configuration_file: str = "",
|
149
151
|
tag: str = "",
|
150
152
|
run_id: str = "",
|
151
153
|
parameters_file: str = "",
|
@@ -196,6 +198,10 @@ def execute(
|
|
196
198
|
run_context.progress = progress
|
197
199
|
executor.execute_graph(dag=run_context.dag) # type: ignore
|
198
200
|
|
201
|
+
if not executor._local:
|
202
|
+
executor.send_return_code(stage="traversal")
|
203
|
+
return
|
204
|
+
|
199
205
|
run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
|
200
206
|
|
201
207
|
if run_log.status == defaults.SUCCESS:
|
@@ -205,6 +211,10 @@ def execute(
|
|
205
211
|
except Exception as e: # noqa: E722
|
206
212
|
console.print(e, style=defaults.error_style)
|
207
213
|
progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True)
|
214
|
+
run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
|
215
|
+
run_log.status = defaults.FAIL
|
216
|
+
run_context.run_log_store.add_branch_log(run_log, run_context.run_id)
|
217
|
+
raise e
|
208
218
|
|
209
219
|
executor.send_return_code()
|
210
220
|
|
@@ -235,6 +245,8 @@ def execute_single_node(
|
|
235
245
|
"""
|
236
246
|
from runnable import nodes
|
237
247
|
|
248
|
+
configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
|
249
|
+
|
238
250
|
run_context = prepare_configurations(
|
239
251
|
configuration_file=configuration_file,
|
240
252
|
pipeline_file=pipeline_file,
|
@@ -422,6 +434,8 @@ def fan(
|
|
422
434
|
"""
|
423
435
|
from runnable import nodes
|
424
436
|
|
437
|
+
configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
|
438
|
+
|
425
439
|
run_context = prepare_configurations(
|
426
440
|
configuration_file=configuration_file,
|
427
441
|
pipeline_file=pipeline_file,
|
@@ -185,14 +185,11 @@ class GenericExecutor(BaseExecutor):
|
|
185
185
|
data_catalogs = []
|
186
186
|
for name_pattern in node_catalog_settings.get(stage) or []:
|
187
187
|
if stage == "get":
|
188
|
-
get_catalog_progress = self._context.progress.add_task(f"Getting from catalog {name_pattern}", total=1)
|
189
188
|
data_catalog = self._context.catalog_handler.get(
|
190
189
|
name=name_pattern, run_id=self._context.run_id, compute_data_folder=compute_data_folder
|
191
190
|
)
|
192
|
-
self._context.progress.update(get_catalog_progress, completed=True, visible=False, refresh=True)
|
193
191
|
|
194
192
|
elif stage == "put":
|
195
|
-
put_catalog_progress = self._context.progress.add_task(f"Putting in catalog {name_pattern}", total=1)
|
196
193
|
data_catalog = self._context.catalog_handler.put(
|
197
194
|
name=name_pattern,
|
198
195
|
run_id=self._context.run_id,
|
@@ -200,8 +197,6 @@ class GenericExecutor(BaseExecutor):
|
|
200
197
|
synced_catalogs=synced_catalogs,
|
201
198
|
)
|
202
199
|
|
203
|
-
self._context.progress.update(put_catalog_progress, completed=True, visible=False)
|
204
|
-
|
205
200
|
logger.debug(f"Added data catalog: {data_catalog} to step log")
|
206
201
|
data_catalogs.extend(data_catalog)
|
207
202
|
|
@@ -1033,6 +1033,9 @@ class ArgoExecutor(GenericExecutor):
|
|
1033
1033
|
if working_on.node_type not in ["success", "fail"] and working_on._get_on_failure_node():
|
1034
1034
|
failure_node = dag.get_node_by_name(working_on._get_on_failure_node())
|
1035
1035
|
|
1036
|
+
render_obj = get_renderer(working_on)(executor=self, node=failure_node)
|
1037
|
+
render_obj.render(list_of_iter_values=list_of_iter_values.copy())
|
1038
|
+
|
1036
1039
|
failure_template_name = self.get_clean_name(failure_node)
|
1037
1040
|
# If a task template for clean name exists, retrieve it
|
1038
1041
|
failure_template = templates.get(
|
@@ -1040,7 +1043,6 @@ class ArgoExecutor(GenericExecutor):
|
|
1040
1043
|
DagTaskTemplate(name=failure_template_name, template=failure_template_name),
|
1041
1044
|
)
|
1042
1045
|
failure_template.depends.append(f"{clean_name}.Failed")
|
1043
|
-
|
1044
1046
|
templates[failure_template_name] = failure_template
|
1045
1047
|
|
1046
1048
|
# If we are in a map node, we need to add the values as arguments
|
@@ -5,7 +5,7 @@ from typing import Dict, cast
|
|
5
5
|
from pydantic import Field
|
6
6
|
from rich import print
|
7
7
|
|
8
|
-
from runnable import defaults,
|
8
|
+
from runnable import defaults, utils
|
9
9
|
from runnable.datastore import StepLog
|
10
10
|
from runnable.defaults import TypeMapVariable
|
11
11
|
from runnable.extensions.executor import GenericExecutor
|
@@ -145,16 +145,6 @@ class LocalContainerExecutor(GenericExecutor):
|
|
145
145
|
logger.debug("Here is the resolved executor config")
|
146
146
|
logger.debug(executor_config)
|
147
147
|
|
148
|
-
if executor_config.get("run_in_local", False):
|
149
|
-
# Do not change config but only validate the configuration.
|
150
|
-
# Trigger the job on local system instead of a container
|
151
|
-
integration.validate(self, self._context.run_log_store)
|
152
|
-
integration.validate(self, self._context.catalog_handler)
|
153
|
-
integration.validate(self, self._context.secrets_handler)
|
154
|
-
|
155
|
-
self.execute_node(node=node, map_variable=map_variable, **kwargs)
|
156
|
-
return
|
157
|
-
|
158
148
|
command = utils.get_node_execution_command(node, map_variable=map_variable)
|
159
149
|
|
160
150
|
self._spin_container(
|
@@ -172,7 +162,7 @@ class LocalContainerExecutor(GenericExecutor):
|
|
172
162
|
"Note: If you do not see any docker issue from your side and the code works properly on local execution"
|
173
163
|
"please raise a bug report."
|
174
164
|
)
|
175
|
-
logger.
|
165
|
+
logger.error(msg)
|
176
166
|
step_log.status = defaults.FAIL
|
177
167
|
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
178
168
|
|
@@ -212,6 +202,7 @@ class LocalContainerExecutor(GenericExecutor):
|
|
212
202
|
f"Please provide a docker_image using executor_config of the step {node.name} or at global config"
|
213
203
|
)
|
214
204
|
|
205
|
+
print("container", self._volumes)
|
215
206
|
# TODO: Should consider using getpass.getuser() when running the docker container? Volume permissions
|
216
207
|
container = client.containers.create(
|
217
208
|
image=docker_image,
|
@@ -260,7 +251,9 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
|
|
260
251
|
service_provider = "file-system" # The actual implementation of the service
|
261
252
|
|
262
253
|
def configure_for_traversal(self, **kwargs):
|
263
|
-
from runnable.extensions.run_log_store.file_system.implementation import
|
254
|
+
from runnable.extensions.run_log_store.file_system.implementation import (
|
255
|
+
FileSystemRunLogstore,
|
256
|
+
)
|
264
257
|
|
265
258
|
self.executor = cast(LocalContainerExecutor, self.executor)
|
266
259
|
self.service = cast(FileSystemRunLogstore, self.service)
|
@@ -272,7 +265,9 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
|
|
272
265
|
}
|
273
266
|
|
274
267
|
def configure_for_execution(self, **kwargs):
|
275
|
-
from runnable.extensions.run_log_store.file_system.implementation import
|
268
|
+
from runnable.extensions.run_log_store.file_system.implementation import (
|
269
|
+
FileSystemRunLogstore,
|
270
|
+
)
|
276
271
|
|
277
272
|
self.executor = cast(LocalContainerExecutor, self.executor)
|
278
273
|
self.service = cast(FileSystemRunLogstore, self.service)
|
@@ -280,6 +275,40 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
|
|
280
275
|
self.service.log_folder = self.executor._container_log_location
|
281
276
|
|
282
277
|
|
278
|
+
class LocalContainerComputeChunkedFS(BaseIntegration):
|
279
|
+
"""
|
280
|
+
Integration pattern between Local container and File System catalog
|
281
|
+
"""
|
282
|
+
|
283
|
+
executor_type = "local-container"
|
284
|
+
service_type = "run_log_store" # One of secret, catalog, datastore
|
285
|
+
service_provider = "chunked-fs" # The actual implementation of the service
|
286
|
+
|
287
|
+
def configure_for_traversal(self, **kwargs):
|
288
|
+
from runnable.extensions.run_log_store.chunked_file_system.implementation import (
|
289
|
+
ChunkedFileSystemRunLogStore,
|
290
|
+
)
|
291
|
+
|
292
|
+
self.executor = cast(LocalContainerExecutor, self.executor)
|
293
|
+
self.service = cast(ChunkedFileSystemRunLogStore, self.service)
|
294
|
+
|
295
|
+
write_to = self.service.log_folder
|
296
|
+
self.executor._volumes[str(Path(write_to).resolve())] = {
|
297
|
+
"bind": f"{self.executor._container_log_location}",
|
298
|
+
"mode": "rw",
|
299
|
+
}
|
300
|
+
|
301
|
+
def configure_for_execution(self, **kwargs):
|
302
|
+
from runnable.extensions.run_log_store.chunked_file_system.implementation import (
|
303
|
+
ChunkedFileSystemRunLogStore,
|
304
|
+
)
|
305
|
+
|
306
|
+
self.executor = cast(LocalContainerExecutor, self.executor)
|
307
|
+
self.service = cast(ChunkedFileSystemRunLogStore, self.service)
|
308
|
+
|
309
|
+
self.service.log_folder = self.executor._container_log_location
|
310
|
+
|
311
|
+
|
283
312
|
class LocalContainerComputeFileSystemCatalog(BaseIntegration):
|
284
313
|
"""
|
285
314
|
Integration pattern between Local container and File System catalog
|
@@ -290,7 +319,9 @@ class LocalContainerComputeFileSystemCatalog(BaseIntegration):
|
|
290
319
|
service_provider = "file-system" # The actual implementation of the service
|
291
320
|
|
292
321
|
def configure_for_traversal(self, **kwargs):
|
293
|
-
from runnable.extensions.catalog.file_system.implementation import
|
322
|
+
from runnable.extensions.catalog.file_system.implementation import (
|
323
|
+
FileSystemCatalog,
|
324
|
+
)
|
294
325
|
|
295
326
|
self.executor = cast(LocalContainerExecutor, self.executor)
|
296
327
|
self.service = cast(FileSystemCatalog, self.service)
|
@@ -302,7 +333,9 @@ class LocalContainerComputeFileSystemCatalog(BaseIntegration):
|
|
302
333
|
}
|
303
334
|
|
304
335
|
def configure_for_execution(self, **kwargs):
|
305
|
-
from runnable.extensions.catalog.file_system.implementation import
|
336
|
+
from runnable.extensions.catalog.file_system.implementation import (
|
337
|
+
FileSystemCatalog,
|
338
|
+
)
|
306
339
|
|
307
340
|
self.executor = cast(LocalContainerExecutor, self.executor)
|
308
341
|
self.service = cast(FileSystemCatalog, self.service)
|
@@ -18,7 +18,7 @@ def create_executable(params: Dict[str, Any], model: Type[BaseTaskType], node_na
|
|
18
18
|
class EasyModel(model): # type: ignore
|
19
19
|
model_config = ConfigDict(extra="ignore")
|
20
20
|
|
21
|
-
swallow_all = EasyModel(
|
21
|
+
swallow_all = EasyModel(node_name=node_name, **params)
|
22
22
|
return swallow_all
|
23
23
|
|
24
24
|
|
@@ -26,6 +26,8 @@ class MockedExecutor(GenericExecutor):
|
|
26
26
|
service_name: str = "mocked"
|
27
27
|
_local_executor: bool = True
|
28
28
|
|
29
|
+
model_config = ConfigDict(extra="ignore")
|
30
|
+
|
29
31
|
patches: Dict[str, Any] = Field(default_factory=dict)
|
30
32
|
|
31
33
|
@property
|
@@ -64,6 +66,10 @@ class MockedExecutor(GenericExecutor):
|
|
64
66
|
step_log.step_type = node.node_type
|
65
67
|
step_log.status = defaults.PROCESSING
|
66
68
|
|
69
|
+
self._context.run_log_store.add_step_log(step_log, self._context.run_id)
|
70
|
+
|
71
|
+
logger.info(f"Executing node: {node.get_summary()}")
|
72
|
+
|
67
73
|
# Add the step log to the database as per the situation.
|
68
74
|
# If its a terminal node, complete it now
|
69
75
|
if node.node_type in ["success", "fail"]:
|
@@ -132,3 +138,17 @@ class MockedExecutor(GenericExecutor):
|
|
132
138
|
|
133
139
|
def execute_job(self, node: TaskNode):
|
134
140
|
pass
|
141
|
+
|
142
|
+
def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
|
143
|
+
"""
|
144
|
+
The entry point for all executors apart from local.
|
145
|
+
We have already prepared for node execution.
|
146
|
+
|
147
|
+
Args:
|
148
|
+
node (BaseNode): The node to execute
|
149
|
+
map_variable (dict, optional): If the node is part of a map, send in the map dictionary. Defaults to None.
|
150
|
+
|
151
|
+
Raises:
|
152
|
+
NotImplementedError: _description_
|
153
|
+
"""
|
154
|
+
...
|
@@ -6,6 +6,7 @@ from runnable import context, defaults, exceptions
|
|
6
6
|
from runnable.datastore import RunLog
|
7
7
|
from runnable.defaults import TypeMapVariable
|
8
8
|
from runnable.extensions.executor import GenericExecutor
|
9
|
+
from runnable.extensions.nodes import TaskNode
|
9
10
|
from runnable.nodes import BaseNode
|
10
11
|
|
11
12
|
logger = logging.getLogger(defaults.LOGGER_NAME)
|
@@ -31,6 +32,7 @@ class RetryExecutor(GenericExecutor):
|
|
31
32
|
|
32
33
|
_local: bool = True
|
33
34
|
_original_run_log: Optional[RunLog] = None
|
35
|
+
_restart_initiated: bool = False
|
34
36
|
|
35
37
|
@property
|
36
38
|
def _context(self):
|
@@ -38,7 +40,7 @@ class RetryExecutor(GenericExecutor):
|
|
38
40
|
|
39
41
|
@cached_property
|
40
42
|
def original_run_log(self):
|
41
|
-
|
43
|
+
return self._context.run_log_store.get_run_log_by_id(
|
42
44
|
run_id=self.run_id,
|
43
45
|
full=True,
|
44
46
|
)
|
@@ -140,10 +142,14 @@ class RetryExecutor(GenericExecutor):
|
|
140
142
|
node_step_log_name = node._get_step_log_name(map_variable=map_variable)
|
141
143
|
logger.info(f"Scanning previous run logs for node logs of: {node_step_log_name}")
|
142
144
|
|
145
|
+
if self._restart_initiated:
|
146
|
+
return True
|
147
|
+
|
143
148
|
try:
|
144
149
|
previous_attempt_log, _ = self.original_run_log.search_step_by_internal_name(node_step_log_name)
|
145
150
|
except exceptions.StepLogNotFoundError:
|
146
151
|
logger.warning(f"Did not find the node {node.name} in previous run log")
|
152
|
+
self._restart_initiated = True
|
147
153
|
return True # We should re-run the node.
|
148
154
|
|
149
155
|
logger.info(f"The original step status: {previous_attempt_log.status}")
|
@@ -152,7 +158,11 @@ class RetryExecutor(GenericExecutor):
|
|
152
158
|
return False # We need not run the node
|
153
159
|
|
154
160
|
logger.info(f"The new execution should start executing graph from this node {node.name}")
|
161
|
+
self._restart_initiated = True
|
155
162
|
return True
|
156
163
|
|
157
164
|
def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
|
158
165
|
self._execute_node(node, map_variable=map_variable, **kwargs)
|
166
|
+
|
167
|
+
def execute_job(self, node: TaskNode):
|
168
|
+
pass
|
runnable/extensions/nodes.py
CHANGED
@@ -5,7 +5,7 @@ import sys
|
|
5
5
|
from collections import OrderedDict
|
6
6
|
from copy import deepcopy
|
7
7
|
from datetime import datetime
|
8
|
-
from typing import Any, Dict, List, Optional, Tuple, Union, cast
|
8
|
+
from typing import Annotated, Any, Callable, Dict, List, Optional, Tuple, Union, cast
|
9
9
|
|
10
10
|
from pydantic import (
|
11
11
|
ConfigDict,
|
@@ -14,10 +14,15 @@ from pydantic import (
|
|
14
14
|
field_serializer,
|
15
15
|
field_validator,
|
16
16
|
)
|
17
|
-
from typing_extensions import Annotated
|
18
17
|
|
19
18
|
from runnable import datastore, defaults, utils
|
20
|
-
from runnable.datastore import
|
19
|
+
from runnable.datastore import (
|
20
|
+
JsonParameter,
|
21
|
+
MetricParameter,
|
22
|
+
ObjectParameter,
|
23
|
+
Parameter,
|
24
|
+
StepLog,
|
25
|
+
)
|
21
26
|
from runnable.defaults import TypeMapVariable
|
22
27
|
from runnable.graph import Graph, create_graph
|
23
28
|
from runnable.nodes import CompositeNode, ExecutableNode, TerminalNode
|
@@ -46,8 +51,6 @@ class TaskNode(ExecutableNode):
|
|
46
51
|
task_config = {k: v for k, v in config.items() if k not in TaskNode.model_fields.keys()}
|
47
52
|
node_config = {k: v for k, v in config.items() if k in TaskNode.model_fields.keys()}
|
48
53
|
|
49
|
-
task_config["node_name"] = config.get("name")
|
50
|
-
|
51
54
|
executable = create_task(task_config)
|
52
55
|
return cls(executable=executable, **node_config, **task_config)
|
53
56
|
|
@@ -543,10 +546,14 @@ class MapNode(CompositeNode):
|
|
543
546
|
iterate_on = None
|
544
547
|
try:
|
545
548
|
iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on].get_value()
|
546
|
-
except KeyError:
|
549
|
+
except KeyError as e:
|
547
550
|
raise Exception(
|
548
|
-
|
549
|
-
|
551
|
+
(
|
552
|
+
f"Expected parameter {self.iterate_on}",
|
553
|
+
"not present in Run Log parameters",
|
554
|
+
"was it ever set before?",
|
555
|
+
)
|
556
|
+
) from e
|
550
557
|
|
551
558
|
if not isinstance(iterate_on, list):
|
552
559
|
raise Exception("Only list is allowed as a valid iterator type")
|
@@ -599,29 +606,44 @@ class MapNode(CompositeNode):
|
|
599
606
|
# The final value of the parameter is the result of the reduce function.
|
600
607
|
reducer_f = self.get_reducer_function()
|
601
608
|
|
602
|
-
|
603
|
-
|
604
|
-
|
605
|
-
|
606
|
-
param_name, _ = branch_return
|
607
|
-
to_reduce = []
|
608
|
-
for iter_variable in iterate_on:
|
609
|
-
to_reduce.append(params[f"{iter_variable}_{param_name}"].get_value())
|
609
|
+
def update_param(params: Dict[str, Parameter], reducer_f: Callable, map_prefix: str = ""):
|
610
|
+
from runnable.extensions.executor.mocked.implementation import (
|
611
|
+
MockedExecutor,
|
612
|
+
)
|
610
613
|
|
611
|
-
param_name = f"{v}_{param_name}"
|
612
|
-
params[param_name].value = reducer_f(to_reduce)
|
613
|
-
params[param_name].reduced = True
|
614
|
-
else:
|
615
614
|
for branch_return in self.branch_returns:
|
616
615
|
param_name, _ = branch_return
|
617
616
|
|
618
617
|
to_reduce = []
|
619
618
|
for iter_variable in iterate_on:
|
620
|
-
|
621
|
-
|
622
|
-
|
619
|
+
try:
|
620
|
+
to_reduce.append(params[f"{iter_variable}_{param_name}"].get_value())
|
621
|
+
except KeyError as e:
|
622
|
+
if isinstance(self._context.executor, MockedExecutor):
|
623
|
+
pass
|
624
|
+
else:
|
625
|
+
raise Exception(
|
626
|
+
(
|
627
|
+
f"Expected parameter {iter_variable}_{param_name}",
|
628
|
+
"not present in Run Log parameters",
|
629
|
+
"was it ever set before?",
|
630
|
+
)
|
631
|
+
) from e
|
632
|
+
|
633
|
+
param_name = f"{map_prefix}{param_name}"
|
634
|
+
if to_reduce:
|
635
|
+
params[param_name].value = reducer_f(*to_reduce)
|
636
|
+
else:
|
637
|
+
params[param_name].value = ""
|
623
638
|
params[param_name].reduced = True
|
624
639
|
|
640
|
+
if map_variable:
|
641
|
+
# If we are in a map state already, the param should have an index of the map variable.
|
642
|
+
for _, v in map_variable.items():
|
643
|
+
update_param(params, reducer_f, map_prefix=f"{v}_")
|
644
|
+
else:
|
645
|
+
update_param(params, reducer_f)
|
646
|
+
|
625
647
|
self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id)
|
626
648
|
|
627
649
|
|
runnable/graph.py
CHANGED
runnable/parameters.py
CHANGED
@@ -36,7 +36,7 @@ def get_user_set_parameters(remove: bool = False) -> Dict[str, JsonParameter]:
|
|
36
36
|
try:
|
37
37
|
parameters[key.lower()] = JsonParameter(kind="json", value=json.loads(value))
|
38
38
|
except json.decoder.JSONDecodeError:
|
39
|
-
logger.
|
39
|
+
logger.warning(f"Parameter {key} could not be JSON decoded, adding the literal value")
|
40
40
|
parameters[key.lower()] = JsonParameter(kind="json", value=value)
|
41
41
|
|
42
42
|
if remove:
|
runnable/sdk.py
CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
|
|
2
2
|
|
3
3
|
import logging
|
4
4
|
import os
|
5
|
+
import re
|
5
6
|
from abc import ABC, abstractmethod
|
6
7
|
from pathlib import Path
|
7
8
|
from typing import Any, Callable, Dict, List, Optional, Union
|
@@ -25,7 +26,7 @@ from rich.progress import (
|
|
25
26
|
from rich.table import Column
|
26
27
|
from typing_extensions import Self
|
27
28
|
|
28
|
-
from runnable import console, defaults, entrypoints, graph, utils
|
29
|
+
from runnable import console, defaults, entrypoints, exceptions, graph, utils
|
29
30
|
from runnable.extensions.nodes import (
|
30
31
|
FailNode,
|
31
32
|
MapNode,
|
@@ -310,8 +311,6 @@ class NotebookTask(BaseTask):
|
|
310
311
|
"""
|
311
312
|
|
312
313
|
notebook: str = Field(serialization_alias="command")
|
313
|
-
|
314
|
-
notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path", validate_default=True)
|
315
314
|
optional_ploomber_args: Optional[Dict[str, Any]] = Field(default=None, alias="optional_ploomber_args")
|
316
315
|
|
317
316
|
@computed_field
|
@@ -591,6 +590,7 @@ class Pipeline(BaseModel):
|
|
591
590
|
|
592
591
|
Any definition of pipeline should have one node that terminates with success.
|
593
592
|
"""
|
593
|
+
# TODO: Bug with repeat names
|
594
594
|
|
595
595
|
success_path: List[StepType] = []
|
596
596
|
on_failure_paths: List[List[StepType]] = []
|
@@ -637,7 +637,8 @@ class Pipeline(BaseModel):
|
|
637
637
|
self._dag.check_graph()
|
638
638
|
|
639
639
|
def return_dag(self) -> graph.Graph:
|
640
|
-
|
640
|
+
dag_definition = self._dag.model_dump(by_alias=True, exclude_none=True)
|
641
|
+
return graph.create_graph(dag_definition)
|
641
642
|
|
642
643
|
def execute(
|
643
644
|
self,
|
@@ -708,7 +709,8 @@ class Pipeline(BaseModel):
|
|
708
709
|
caller_stack = inspect.stack()[1]
|
709
710
|
relative_to_root = str(Path(caller_stack.filename).relative_to(Path.cwd()))
|
710
711
|
|
711
|
-
|
712
|
+
module_name = re.sub(r"\b.py\b", "", relative_to_root.replace("/", "."))
|
713
|
+
module_to_call = f"{module_name}.{caller_stack.function}"
|
712
714
|
|
713
715
|
run_context.pipeline_file = f"{module_to_call}.py"
|
714
716
|
|
@@ -728,15 +730,20 @@ class Pipeline(BaseModel):
|
|
728
730
|
pipeline_execution_task = progress.add_task("[dark_orange] Starting execution .. ", total=1)
|
729
731
|
run_context.executor.execute_graph(dag=run_context.dag)
|
730
732
|
|
733
|
+
if not run_context.executor._local:
|
734
|
+
return {}
|
735
|
+
|
731
736
|
run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
|
732
737
|
|
733
738
|
if run_log.status == defaults.SUCCESS:
|
734
739
|
progress.update(pipeline_execution_task, description="[green] Success", completed=True)
|
735
740
|
else:
|
736
741
|
progress.update(pipeline_execution_task, description="[red] Failed", completed=True)
|
742
|
+
raise exceptions.ExecutionFailedError(run_context.run_id)
|
737
743
|
except Exception as e: # noqa: E722
|
738
744
|
console.print(e, style=defaults.error_style)
|
739
745
|
progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True)
|
746
|
+
raise
|
740
747
|
|
741
748
|
if run_context.executor._local:
|
742
749
|
return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id)
|
runnable/tasks.py
CHANGED
@@ -8,11 +8,14 @@ import os
|
|
8
8
|
import subprocess
|
9
9
|
import sys
|
10
10
|
from datetime import datetime
|
11
|
+
from pathlib import Path
|
11
12
|
from pickle import PicklingError
|
12
13
|
from string import Template
|
13
|
-
from typing import Any, Dict, List, Literal,
|
14
|
+
from typing import Any, Dict, List, Literal, Tuple
|
14
15
|
|
15
|
-
from pydantic import BaseModel, ConfigDict, Field,
|
16
|
+
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
17
|
+
|
18
|
+
# from rich import print
|
16
19
|
from rich.console import Console
|
17
20
|
from stevedore import driver
|
18
21
|
|
@@ -34,9 +37,6 @@ logging.getLogger("stevedore").setLevel(logging.CRITICAL)
|
|
34
37
|
# TODO: Can we add memory peak, cpu usage, etc. to the metrics?
|
35
38
|
|
36
39
|
|
37
|
-
console = Console(file=io.StringIO())
|
38
|
-
|
39
|
-
|
40
40
|
class TaskReturns(BaseModel):
|
41
41
|
name: str
|
42
42
|
kind: Literal["json", "object", "metric"] = Field(default="json")
|
@@ -46,7 +46,6 @@ class BaseTaskType(BaseModel):
|
|
46
46
|
"""A base task class which does the execution of command defined by the user."""
|
47
47
|
|
48
48
|
task_type: str = Field(serialization_alias="command_type")
|
49
|
-
node_name: str = Field(exclude=True)
|
50
49
|
secrets: List[str] = Field(default_factory=list)
|
51
50
|
returns: List[TaskReturns] = Field(default_factory=list, alias="returns")
|
52
51
|
|
@@ -153,7 +152,7 @@ class BaseTaskType(BaseModel):
|
|
153
152
|
if not allow_complex:
|
154
153
|
params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)}
|
155
154
|
|
156
|
-
log_file_name = self.
|
155
|
+
log_file_name = self._context.executor._context_node.internal_name
|
157
156
|
if map_variable:
|
158
157
|
for _, value in map_variable.items():
|
159
158
|
log_file_name += "_" + str(value)
|
@@ -163,16 +162,17 @@ class BaseTaskType(BaseModel):
|
|
163
162
|
log_file = open(log_file_name, "w")
|
164
163
|
|
165
164
|
parameters_in = copy.deepcopy(params)
|
166
|
-
|
167
165
|
f = io.StringIO()
|
166
|
+
task_console = Console(file=io.StringIO())
|
168
167
|
try:
|
169
168
|
with contextlib.redirect_stdout(f):
|
170
169
|
# with contextlib.nullcontext():
|
171
|
-
yield params
|
172
|
-
print(
|
170
|
+
yield params, task_console
|
171
|
+
print(task_console.file.getvalue()) # type: ignore
|
173
172
|
except Exception as e: # pylint: disable=broad-except
|
174
173
|
logger.exception(e)
|
175
174
|
finally:
|
175
|
+
task_console = None # type: ignore
|
176
176
|
print(f.getvalue()) # print to console
|
177
177
|
log_file.write(f.getvalue()) # Print to file
|
178
178
|
|
@@ -233,7 +233,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
|
|
233
233
|
"""Execute the notebook as defined by the command."""
|
234
234
|
attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now()))
|
235
235
|
|
236
|
-
with self.execution_context(map_variable=map_variable) as params, self.expose_secrets() as _:
|
236
|
+
with self.execution_context(map_variable=map_variable) as (params, task_console), self.expose_secrets() as _:
|
237
237
|
module, func = utils.get_module_and_attr_names(self.command)
|
238
238
|
sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
|
239
239
|
imported_module = importlib.import_module(module)
|
@@ -245,7 +245,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
|
|
245
245
|
logger.info(f"Calling {func} from {module} with {filtered_parameters}")
|
246
246
|
user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
|
247
247
|
except Exception as e:
|
248
|
-
|
248
|
+
task_console.log(e, style=defaults.error_style, markup=False)
|
249
249
|
raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e
|
250
250
|
|
251
251
|
attempt_log.input_parameters = params.copy()
|
@@ -289,8 +289,8 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
|
|
289
289
|
except Exception as _e:
|
290
290
|
msg = f"Call to the function {self.command} did not succeed.\n"
|
291
291
|
attempt_log.message = msg
|
292
|
-
|
293
|
-
|
292
|
+
task_console.print_exception(show_locals=False)
|
293
|
+
task_console.log(_e, style=defaults.error_style)
|
294
294
|
|
295
295
|
attempt_log.end_time = str(datetime.now())
|
296
296
|
|
@@ -302,25 +302,25 @@ class NotebookTaskType(BaseTaskType):
|
|
302
302
|
|
303
303
|
task_type: str = Field(default="notebook", serialization_alias="command_type")
|
304
304
|
command: str
|
305
|
-
notebook_output_path: Optional[str] = Field(default=None, validate_default=True)
|
306
305
|
optional_ploomber_args: dict = {}
|
307
306
|
|
308
307
|
@field_validator("command")
|
309
308
|
@classmethod
|
310
|
-
def notebook_should_end_with_ipynb(cls, command: str):
|
309
|
+
def notebook_should_end_with_ipynb(cls, command: str) -> str:
|
311
310
|
if not command.endswith(".ipynb"):
|
312
311
|
raise Exception("Notebook task should point to a ipynb file")
|
313
312
|
|
314
313
|
return command
|
315
314
|
|
316
|
-
@
|
317
|
-
|
318
|
-
|
319
|
-
if
|
320
|
-
|
315
|
+
@property
|
316
|
+
def notebook_output_path(self) -> str:
|
317
|
+
node_name = self._context.executor._context_node.internal_name
|
318
|
+
sane_name = "".join(x for x in node_name if x.isalnum())
|
319
|
+
|
320
|
+
output_path = Path(".", self.command)
|
321
|
+
file_name = output_path.parent / (output_path.stem + f"{sane_name}_out.ipynb")
|
321
322
|
|
322
|
-
|
323
|
-
return "".join(command.split(".")[:-1]) + "_out.ipynb"
|
323
|
+
return str(file_name)
|
324
324
|
|
325
325
|
def get_cli_options(self) -> Tuple[str, dict]:
|
326
326
|
return "notebook", {"command": self.command, "notebook-output-path": self.notebook_output_path}
|
@@ -344,17 +344,26 @@ class NotebookTaskType(BaseTaskType):
|
|
344
344
|
import ploomber_engine as pm
|
345
345
|
from ploomber_engine.ipython import PloomberClient
|
346
346
|
|
347
|
-
notebook_output_path = self.notebook_output_path
|
347
|
+
notebook_output_path = self.notebook_output_path
|
348
348
|
|
349
|
-
with self.execution_context(
|
350
|
-
|
351
|
-
|
349
|
+
with self.execution_context(map_variable=map_variable, allow_complex=False) as (
|
350
|
+
params,
|
351
|
+
_,
|
352
|
+
), self.expose_secrets() as _:
|
352
353
|
if map_variable:
|
353
354
|
for key, value in map_variable.items():
|
354
355
|
notebook_output_path += "_" + str(value)
|
355
|
-
params[key] = value
|
356
|
+
params[key] = JsonParameter(kind="json", value=value)
|
357
|
+
|
358
|
+
# Remove any {v}_unreduced parameters from the parameters
|
359
|
+
copy_params = copy.deepcopy(params)
|
360
|
+
unprocessed_params = [k for k, v in copy_params.items() if not v.reduced]
|
356
361
|
|
357
|
-
|
362
|
+
for key in list(copy_params.keys()):
|
363
|
+
if any(key.endswith(f"_{k}") for k in unprocessed_params):
|
364
|
+
del copy_params[key]
|
365
|
+
|
366
|
+
notebook_params = {k: v.get_value() for k, v in copy_params.items()}
|
358
367
|
|
359
368
|
ploomber_optional_args = self.optional_ploomber_args
|
360
369
|
|
@@ -377,6 +386,11 @@ class NotebookTaskType(BaseTaskType):
|
|
377
386
|
try:
|
378
387
|
for task_return in self.returns:
|
379
388
|
param_name = Template(task_return.name).safe_substitute(map_variable) # type: ignore
|
389
|
+
|
390
|
+
if map_variable:
|
391
|
+
for _, v in map_variable.items():
|
392
|
+
param_name = f"{v}_{param_name}"
|
393
|
+
|
380
394
|
output_parameters[param_name] = task_return_to_parameter(
|
381
395
|
task_return=task_return,
|
382
396
|
value=namespace[task_return.name],
|
@@ -453,95 +467,98 @@ class ShellTaskType(BaseTaskType):
|
|
453
467
|
secret_value = context.run_context.secrets_handler.get(key)
|
454
468
|
subprocess_env[key] = secret_value
|
455
469
|
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
460
|
-
|
461
|
-
|
462
|
-
|
463
|
-
|
464
|
-
|
465
|
-
|
466
|
-
|
467
|
-
|
468
|
-
|
469
|
-
|
470
|
-
|
471
|
-
|
472
|
-
|
473
|
-
|
474
|
-
|
475
|
-
|
476
|
-
|
477
|
-
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
483
|
-
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
488
|
-
|
489
|
-
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
continue
|
497
|
-
console.print(line, style=defaults.warning_style)
|
470
|
+
try:
|
471
|
+
with self.execution_context(map_variable=map_variable, allow_complex=False) as (params, task_console):
|
472
|
+
subprocess_env.update({k: v.get_value() for k, v in params.items()})
|
473
|
+
|
474
|
+
# Json dumps all runnable environment variables
|
475
|
+
for key, value in subprocess_env.items():
|
476
|
+
if isinstance(value, str):
|
477
|
+
continue
|
478
|
+
subprocess_env[key] = json.dumps(value)
|
479
|
+
|
480
|
+
collect_delimiter = "=== COLLECT ==="
|
481
|
+
|
482
|
+
command = self.command.strip() + f" && echo '{collect_delimiter}' && env"
|
483
|
+
logger.info(f"Executing shell command: {command}")
|
484
|
+
|
485
|
+
capture = False
|
486
|
+
return_keys = {x.name: x for x in self.returns}
|
487
|
+
|
488
|
+
proc = subprocess.Popen(
|
489
|
+
command,
|
490
|
+
shell=True,
|
491
|
+
env=subprocess_env,
|
492
|
+
stdout=subprocess.PIPE,
|
493
|
+
stderr=subprocess.PIPE,
|
494
|
+
text=True,
|
495
|
+
)
|
496
|
+
result = proc.communicate()
|
497
|
+
logger.debug(result)
|
498
|
+
logger.info(proc.returncode)
|
499
|
+
|
500
|
+
if proc.returncode != 0:
|
501
|
+
msg = ",".join(result[1].split("\n"))
|
502
|
+
task_console.print(msg, style=defaults.error_style)
|
503
|
+
raise exceptions.CommandCallError(msg)
|
504
|
+
|
505
|
+
# for stderr
|
506
|
+
for line in result[1].split("\n"):
|
507
|
+
if line.strip() == "":
|
508
|
+
continue
|
509
|
+
task_console.print(line, style=defaults.warning_style)
|
498
510
|
|
499
|
-
|
500
|
-
|
511
|
+
output_parameters: Dict[str, Parameter] = {}
|
512
|
+
metrics: Dict[str, Parameter] = {}
|
501
513
|
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
514
|
+
# only from stdout
|
515
|
+
for line in result[0].split("\n"):
|
516
|
+
if line.strip() == "":
|
517
|
+
continue
|
506
518
|
|
507
|
-
|
508
|
-
|
519
|
+
logger.info(line)
|
520
|
+
task_console.print(line)
|
509
521
|
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
522
|
+
if line.strip() == collect_delimiter:
|
523
|
+
# The lines from now on should be captured
|
524
|
+
capture = True
|
525
|
+
continue
|
514
526
|
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
527
|
+
if capture:
|
528
|
+
key, value = line.strip().split("=", 1)
|
529
|
+
if key in return_keys:
|
530
|
+
task_return = return_keys[key]
|
519
531
|
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
532
|
+
try:
|
533
|
+
value = json.loads(value)
|
534
|
+
except json.JSONDecodeError:
|
535
|
+
value = value
|
524
536
|
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
537
|
+
output_parameter = task_return_to_parameter(
|
538
|
+
task_return=task_return,
|
539
|
+
value=value,
|
540
|
+
)
|
529
541
|
|
530
|
-
|
531
|
-
|
542
|
+
if task_return.kind == "metric":
|
543
|
+
metrics[task_return.name] = output_parameter
|
532
544
|
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
545
|
+
param_name = task_return.name
|
546
|
+
if map_variable:
|
547
|
+
for _, v in map_variable.items():
|
548
|
+
param_name = f"{v}_{param_name}"
|
537
549
|
|
538
|
-
|
550
|
+
output_parameters[param_name] = output_parameter
|
539
551
|
|
540
|
-
|
541
|
-
|
542
|
-
|
552
|
+
attempt_log.output_parameters = output_parameters
|
553
|
+
attempt_log.user_defined_metrics = metrics
|
554
|
+
params.update(output_parameters)
|
543
555
|
|
544
|
-
|
556
|
+
attempt_log.status = defaults.SUCCESS
|
557
|
+
except exceptions.CommandCallError as e:
|
558
|
+
msg = f"Call to the command {self.command} did not succeed"
|
559
|
+
logger.exception(msg)
|
560
|
+
logger.exception(e)
|
561
|
+
attempt_log.status = defaults.FAIL
|
545
562
|
|
546
563
|
attempt_log.end_time = str(datetime.now())
|
547
564
|
return attempt_log
|
@@ -1,10 +1,10 @@
|
|
1
|
-
runnable/__init__.py,sha256=
|
1
|
+
runnable/__init__.py,sha256=Ov2G5QdLFuM4eqRDJDYKGjt8YHfizVhYEBUTl_hkOb4,954
|
2
2
|
runnable/catalog.py,sha256=22OECi5TrpHErxYIhfx-lJ2vgBUi4-5V9CaYEVm98hE,4138
|
3
3
|
runnable/cli.py,sha256=RILUrEfzernuKD3dNdXPBkqN_1OgE5GosYRuInj0FVs,9618
|
4
4
|
runnable/context.py,sha256=QhiXJHRcEBfSKB1ijvL5yB9w44x0HCe7VEiwK1cUJ9U,1124
|
5
|
-
runnable/datastore.py,sha256=
|
5
|
+
runnable/datastore.py,sha256=8aQZ15KAMdre7a7G61bNRmcTeJFzOdnx_9O9UP4JQc8,27910
|
6
6
|
runnable/defaults.py,sha256=MOX7I2S6yO4FphZaZREFQca94a20oO8uvzXLd6GLKQs,4703
|
7
|
-
runnable/entrypoints.py,sha256=
|
7
|
+
runnable/entrypoints.py,sha256=A76Fpa08RmEdXevZSRvhlWN4aQiIoLODa5NYSp8Kd00,17049
|
8
8
|
runnable/exceptions.py,sha256=6NIYoTAzdKyGQ9PvW1Hu7b80OS746395KiGDhM7ThH8,2526
|
9
9
|
runnable/executor.py,sha256=xfBighQ5t_vejohip000XfxLwsgechUE1ZMIJWrZbUA,14484
|
10
10
|
runnable/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -14,9 +14,9 @@ runnable/extensions/catalog/file_system/implementation.py,sha256=mFPsAwPMNGWbHcz
|
|
14
14
|
runnable/extensions/catalog/k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
15
15
|
runnable/extensions/catalog/k8s_pvc/implementation.py,sha256=oJDDI0APT7lrtjWmzYJRDHLGn3Vhbn2MdFSRYvFBUpY,436
|
16
16
|
runnable/extensions/catalog/k8s_pvc/integration.py,sha256=OfrHbNFN8sR-wsVa4os3ajmWJFSd5H4KOHGVAmjRZTQ,1850
|
17
|
-
runnable/extensions/executor/__init__.py,sha256=
|
17
|
+
runnable/extensions/executor/__init__.py,sha256=0385OpNSpjyA0GjXlLw7gZtqJFFOHGLmYHzWAGBzU98,26247
|
18
18
|
runnable/extensions/executor/argo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
19
|
-
runnable/extensions/executor/argo/implementation.py,sha256=
|
19
|
+
runnable/extensions/executor/argo/implementation.py,sha256=Wd__bOwLxhIrHKwk0iMgavD44F0nEzR7CcSJzAe7hKk,43840
|
20
20
|
runnable/extensions/executor/argo/specification.yaml,sha256=wXQcm2gOQYqy-IOQIhucohS32ZrHKCfGA5zZ0RraPYc,1276
|
21
21
|
runnable/extensions/executor/k8s_job/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
22
22
|
runnable/extensions/executor/k8s_job/implementation_FF.py,sha256=1IfVG1GRcJcVFzQ-WhkJsmzdJuj51QMxXylY9UrWM0U,10259
|
@@ -24,12 +24,12 @@ runnable/extensions/executor/k8s_job/integration_FF.py,sha256=pG6HKhPMgCRIgu1PAn
|
|
24
24
|
runnable/extensions/executor/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
25
25
|
runnable/extensions/executor/local/implementation.py,sha256=e8Tzv-FgQmJeUXVut96jeNERTR83JVG_zkQZMEjCVAs,2469
|
26
26
|
runnable/extensions/executor/local_container/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
27
|
-
runnable/extensions/executor/local_container/implementation.py,sha256=
|
27
|
+
runnable/extensions/executor/local_container/implementation.py,sha256=CmvQK77V98Gyzkyuh4WSDrgnoonN89I7xvNSPfeqMxA,14894
|
28
28
|
runnable/extensions/executor/mocked/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
29
|
-
runnable/extensions/executor/mocked/implementation.py,sha256=
|
29
|
+
runnable/extensions/executor/mocked/implementation.py,sha256=ChvlcLGpBxO6QwJcoqhBgKBR6NfWVnMdOWKQhMgcEjY,5762
|
30
30
|
runnable/extensions/executor/retry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
|
-
runnable/extensions/executor/retry/implementation.py,sha256
|
32
|
-
runnable/extensions/nodes.py,sha256=
|
31
|
+
runnable/extensions/executor/retry/implementation.py,sha256=-g6PBOhSG7IL4D_IlQOcf9H_En9IXiUzCt-6vKeCB6Q,6892
|
32
|
+
runnable/extensions/nodes.py,sha256=JUmovDBissri3oyTYS5K68gOx5sm3e-rThoTE5xKy0Y,32645
|
33
33
|
runnable/extensions/run_log_store/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
34
34
|
runnable/extensions/run_log_store/chunked_file_system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
35
35
|
runnable/extensions/run_log_store/chunked_file_system/implementation.py,sha256=EW2P8lr3eH-pIOsMTJPr5eb-iWc48GQ97W15JzkpC_4,3326
|
@@ -47,20 +47,18 @@ runnable/extensions/run_log_store/k8s_pvc/integration.py,sha256=lxQg327mwC0ykhNp
|
|
47
47
|
runnable/extensions/secrets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
48
48
|
runnable/extensions/secrets/dotenv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
49
49
|
runnable/extensions/secrets/dotenv/implementation.py,sha256=3J5pofWahdZbnwnETwpspE5-PKyvmZF_vkfwA1X_bkA,3365
|
50
|
-
runnable/
|
51
|
-
runnable/extensions/secrets/env_secrets/implementation.py,sha256=5XiHdJvIr0-jkl4fGfEf26UsgE5Q2Z4oCc0RwjlJdJA,1236
|
52
|
-
runnable/graph.py,sha256=18IpGYw5kgHP32m12WwXscx-kG5Kx-AuWS1LFbMfBLg,16202
|
50
|
+
runnable/graph.py,sha256=lVSfODa61zCWZcEyzQbg9bwOYvqoW89-3i92YOkmWII,16240
|
53
51
|
runnable/integration.py,sha256=eb9qJVZR7Ehg0N1UnGPuyjJvoA-xQ1-xP7AlZHUXHqM,6705
|
54
52
|
runnable/names.py,sha256=vn92Kv9ANROYSZX6Z4z1v_WA3WiEdIYmG6KEStBFZug,8134
|
55
53
|
runnable/nodes.py,sha256=UqR-bJx0Hi7uLSUw_saB7VsNdFh3POKtdgsEPsasHfE,16576
|
56
|
-
runnable/parameters.py,sha256=
|
54
|
+
runnable/parameters.py,sha256=yZkMDnwnkdYXIwQ8LflBzn50Y0xRGxEvLlxwno6ovvs,5163
|
57
55
|
runnable/pickler.py,sha256=5SDNf0miMUJ3ZauhQdzwk8_t-9jeOqaTjP5bvRnu9sU,2685
|
58
|
-
runnable/sdk.py,sha256=
|
56
|
+
runnable/sdk.py,sha256=t6d1Q3BoovixqC29QuSjFEwsleVgM0E-pAQlfCfMz_o,27923
|
59
57
|
runnable/secrets.py,sha256=dakb7WRloWVo-KpQp6Vy4rwFdGi58BTlT4OifQY106I,2324
|
60
|
-
runnable/tasks.py,sha256=
|
58
|
+
runnable/tasks.py,sha256=XiFQGTrUvoXub99915lYLvHair8sVWfcUzhY0OceUXo,22351
|
61
59
|
runnable/utils.py,sha256=okZFGbJWqStl5Rq5vLhNUQZDv_vhcT58bq9MDrTVxhc,19449
|
62
|
-
runnable-0.11.
|
63
|
-
runnable-0.11.
|
64
|
-
runnable-0.11.
|
65
|
-
runnable-0.11.
|
66
|
-
runnable-0.11.
|
60
|
+
runnable-0.11.4.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
|
61
|
+
runnable-0.11.4.dist-info/METADATA,sha256=YaiI3bdujfDjcv-QDsiKhBf1COOKxQROME_m23yy9qc,17020
|
62
|
+
runnable-0.11.4.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
63
|
+
runnable-0.11.4.dist-info/entry_points.txt,sha256=amb6ISqKBSIz47um8_6LKnYgpoZ4d_p6-O1-7uUb1cU,1447
|
64
|
+
runnable-0.11.4.dist-info/RECORD,,
|
@@ -32,7 +32,6 @@ file-system=runnable.extensions.run_log_store.file_system.implementation:FileSys
|
|
32
32
|
[secrets]
|
33
33
|
do-nothing=runnable.secrets:DoNothingSecretManager
|
34
34
|
dotenv=runnable.extensions.secrets.dotenv.implementation:DotEnvSecrets
|
35
|
-
env-secrets-manager=runnable.extensions.secrets.env_secrets.implementation:EnvSecretsManager
|
36
35
|
|
37
36
|
[tasks]
|
38
37
|
notebook=runnable.tasks:NotebookTaskType
|
File without changes
|
@@ -1,42 +0,0 @@
|
|
1
|
-
import logging
|
2
|
-
import os
|
3
|
-
|
4
|
-
from runnable import defaults, exceptions
|
5
|
-
from runnable.secrets import BaseSecrets
|
6
|
-
|
7
|
-
logger = logging.getLogger(defaults.LOGGER_NAME)
|
8
|
-
|
9
|
-
|
10
|
-
class EnvSecretsManager(BaseSecrets):
|
11
|
-
"""
|
12
|
-
A secret manager via environment variables.
|
13
|
-
|
14
|
-
This secret manager returns nothing if the key does not match
|
15
|
-
"""
|
16
|
-
|
17
|
-
service_name: str = "env-secrets-manager"
|
18
|
-
prefix: str = ""
|
19
|
-
suffix: str = ""
|
20
|
-
|
21
|
-
def get(self, name: str = "", **kwargs) -> str:
|
22
|
-
"""
|
23
|
-
If a name is provided, we look for that in the environment.
|
24
|
-
If a environment variable by that name is not found, we raise an Exception.
|
25
|
-
|
26
|
-
If a name is not provided, we return an empty dictionary.
|
27
|
-
|
28
|
-
Args:
|
29
|
-
name (str): The name of the secret to retrieve
|
30
|
-
|
31
|
-
Raises:
|
32
|
-
Exception: If the secret by the name is not found.
|
33
|
-
|
34
|
-
Returns:
|
35
|
-
[type]: [description]
|
36
|
-
"""
|
37
|
-
|
38
|
-
try:
|
39
|
-
return os.environ[f"{self.prefix}{name}{self.suffix}"]
|
40
|
-
except KeyError as _e:
|
41
|
-
logger.exception(f"Secret {self.prefix}{name}{self.suffix} not found in environment")
|
42
|
-
raise exceptions.SecretNotFoundError(secret_name=name, secret_setting="environment") from _e
|
File without changes
|
File without changes
|