runnable 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
runnable/__init__.py CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  # TODO: Might need to add Rich to pyinstaller part
4
4
  import logging
5
+ import os
5
6
  from logging.config import dictConfig
6
7
 
7
8
  from rich.console import Console
@@ -29,6 +30,8 @@ from runnable.sdk import ( # noqa
29
30
  pickled,
30
31
  )
31
32
 
33
+ os.environ["_PLOOMBER_TELEMETRY_DEBUG"] = "false"
34
+
32
35
  ## TODO: Summary should be a bit better for catalog.
33
36
  ## If the execution fails, hint them about the retry executor.
34
37
  # Make the retry executor loose!
runnable/datastore.py CHANGED
@@ -312,8 +312,10 @@ class RunLog(BaseModel):
312
312
  summary["Catalog Location"] = _context.catalog_handler.get_summary()
313
313
  summary["Full Run log present at: "] = _context.run_log_store.get_summary()
314
314
 
315
- summary["Final Parameters"] = {p: v.description for p, v in self.parameters.items()}
316
- summary["Collected metrics"] = {p: v.description for p, v in self.parameters.items() if v.kind == "metric"}
315
+ run_log = _context.run_log_store.get_run_log_by_id(run_id=_context.run_id, full=True)
316
+
317
+ summary["Final Parameters"] = {p: v.description for p, v in run_log.parameters.items()}
318
+ summary["Collected metrics"] = {p: v.description for p, v in run_log.parameters.items() if v.kind == "metric"}
317
319
 
318
320
  return summary
319
321
 
@@ -400,7 +402,10 @@ class RunLog(BaseModel):
400
402
  """
401
403
  dot_path = i_name.split(".")
402
404
  if len(dot_path) == 1:
403
- return self.steps[i_name], None
405
+ try:
406
+ return self.steps[i_name], None
407
+ except KeyError as e:
408
+ raise exceptions.StepLogNotFoundError(self.run_id, i_name) from e
404
409
 
405
410
  current_steps = self.steps
406
411
  current_step = None
runnable/entrypoints.py CHANGED
@@ -60,6 +60,8 @@ def prepare_configurations(
60
60
  variables = utils.gather_variables()
61
61
 
62
62
  templated_configuration = {}
63
+ configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
64
+
63
65
  if configuration_file:
64
66
  templated_configuration = utils.load_yaml(configuration_file) or {}
65
67
 
@@ -144,8 +146,8 @@ def prepare_configurations(
144
146
 
145
147
 
146
148
  def execute(
147
- configuration_file: str,
148
149
  pipeline_file: str,
150
+ configuration_file: str = "",
149
151
  tag: str = "",
150
152
  run_id: str = "",
151
153
  parameters_file: str = "",
@@ -196,6 +198,10 @@ def execute(
196
198
  run_context.progress = progress
197
199
  executor.execute_graph(dag=run_context.dag) # type: ignore
198
200
 
201
+ if not executor._local:
202
+ executor.send_return_code(stage="traversal")
203
+ return
204
+
199
205
  run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
200
206
 
201
207
  if run_log.status == defaults.SUCCESS:
@@ -205,6 +211,10 @@ def execute(
205
211
  except Exception as e: # noqa: E722
206
212
  console.print(e, style=defaults.error_style)
207
213
  progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True)
214
+ run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
215
+ run_log.status = defaults.FAIL
216
+ run_context.run_log_store.add_branch_log(run_log, run_context.run_id)
217
+ raise e
208
218
 
209
219
  executor.send_return_code()
210
220
 
@@ -235,6 +245,8 @@ def execute_single_node(
235
245
  """
236
246
  from runnable import nodes
237
247
 
248
+ configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
249
+
238
250
  run_context = prepare_configurations(
239
251
  configuration_file=configuration_file,
240
252
  pipeline_file=pipeline_file,
@@ -422,6 +434,8 @@ def fan(
422
434
  """
423
435
  from runnable import nodes
424
436
 
437
+ configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
438
+
425
439
  run_context = prepare_configurations(
426
440
  configuration_file=configuration_file,
427
441
  pipeline_file=pipeline_file,
@@ -226,7 +226,7 @@ class FileSystemCatalog(BaseCatalog):
226
226
  for cataloged_file in cataloged_files:
227
227
  if str(cataloged_file).endswith("execution.log"):
228
228
  continue
229
- print(cataloged_file.name)
229
+
230
230
  if cataloged_file.is_file():
231
231
  shutil.copy(cataloged_file, run_catalog / cataloged_file.name)
232
232
  else:
@@ -185,14 +185,11 @@ class GenericExecutor(BaseExecutor):
185
185
  data_catalogs = []
186
186
  for name_pattern in node_catalog_settings.get(stage) or []:
187
187
  if stage == "get":
188
- get_catalog_progress = self._context.progress.add_task(f"Getting from catalog {name_pattern}", total=1)
189
188
  data_catalog = self._context.catalog_handler.get(
190
189
  name=name_pattern, run_id=self._context.run_id, compute_data_folder=compute_data_folder
191
190
  )
192
- self._context.progress.update(get_catalog_progress, completed=True, visible=False, refresh=True)
193
191
 
194
192
  elif stage == "put":
195
- put_catalog_progress = self._context.progress.add_task(f"Putting in catalog {name_pattern}", total=1)
196
193
  data_catalog = self._context.catalog_handler.put(
197
194
  name=name_pattern,
198
195
  run_id=self._context.run_id,
@@ -200,8 +197,6 @@ class GenericExecutor(BaseExecutor):
200
197
  synced_catalogs=synced_catalogs,
201
198
  )
202
199
 
203
- self._context.progress.update(put_catalog_progress, completed=True, visible=False)
204
-
205
200
  logger.debug(f"Added data catalog: {data_catalog} to step log")
206
201
  data_catalogs.extend(data_catalog)
207
202
 
@@ -1033,6 +1033,9 @@ class ArgoExecutor(GenericExecutor):
1033
1033
  if working_on.node_type not in ["success", "fail"] and working_on._get_on_failure_node():
1034
1034
  failure_node = dag.get_node_by_name(working_on._get_on_failure_node())
1035
1035
 
1036
+ render_obj = get_renderer(working_on)(executor=self, node=failure_node)
1037
+ render_obj.render(list_of_iter_values=list_of_iter_values.copy())
1038
+
1036
1039
  failure_template_name = self.get_clean_name(failure_node)
1037
1040
  # If a task template for clean name exists, retrieve it
1038
1041
  failure_template = templates.get(
@@ -1040,7 +1043,6 @@ class ArgoExecutor(GenericExecutor):
1040
1043
  DagTaskTemplate(name=failure_template_name, template=failure_template_name),
1041
1044
  )
1042
1045
  failure_template.depends.append(f"{clean_name}.Failed")
1043
-
1044
1046
  templates[failure_template_name] = failure_template
1045
1047
 
1046
1048
  # If we are in a map node, we need to add the values as arguments
@@ -5,7 +5,7 @@ from typing import Dict, cast
5
5
  from pydantic import Field
6
6
  from rich import print
7
7
 
8
- from runnable import defaults, integration, utils
8
+ from runnable import defaults, utils
9
9
  from runnable.datastore import StepLog
10
10
  from runnable.defaults import TypeMapVariable
11
11
  from runnable.extensions.executor import GenericExecutor
@@ -145,16 +145,6 @@ class LocalContainerExecutor(GenericExecutor):
145
145
  logger.debug("Here is the resolved executor config")
146
146
  logger.debug(executor_config)
147
147
 
148
- if executor_config.get("run_in_local", False):
149
- # Do not change config but only validate the configuration.
150
- # Trigger the job on local system instead of a container
151
- integration.validate(self, self._context.run_log_store)
152
- integration.validate(self, self._context.catalog_handler)
153
- integration.validate(self, self._context.secrets_handler)
154
-
155
- self.execute_node(node=node, map_variable=map_variable, **kwargs)
156
- return
157
-
158
148
  command = utils.get_node_execution_command(node, map_variable=map_variable)
159
149
 
160
150
  self._spin_container(
@@ -172,7 +162,7 @@ class LocalContainerExecutor(GenericExecutor):
172
162
  "Note: If you do not see any docker issue from your side and the code works properly on local execution"
173
163
  "please raise a bug report."
174
164
  )
175
- logger.warning(msg)
165
+ logger.error(msg)
176
166
  step_log.status = defaults.FAIL
177
167
  self._context.run_log_store.add_step_log(step_log, self._context.run_id)
178
168
 
@@ -212,6 +202,7 @@ class LocalContainerExecutor(GenericExecutor):
212
202
  f"Please provide a docker_image using executor_config of the step {node.name} or at global config"
213
203
  )
214
204
 
205
+ print("container", self._volumes)
215
206
  # TODO: Should consider using getpass.getuser() when running the docker container? Volume permissions
216
207
  container = client.containers.create(
217
208
  image=docker_image,
@@ -260,7 +251,9 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
260
251
  service_provider = "file-system" # The actual implementation of the service
261
252
 
262
253
  def configure_for_traversal(self, **kwargs):
263
- from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore
254
+ from runnable.extensions.run_log_store.file_system.implementation import (
255
+ FileSystemRunLogstore,
256
+ )
264
257
 
265
258
  self.executor = cast(LocalContainerExecutor, self.executor)
266
259
  self.service = cast(FileSystemRunLogstore, self.service)
@@ -272,7 +265,9 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
272
265
  }
273
266
 
274
267
  def configure_for_execution(self, **kwargs):
275
- from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore
268
+ from runnable.extensions.run_log_store.file_system.implementation import (
269
+ FileSystemRunLogstore,
270
+ )
276
271
 
277
272
  self.executor = cast(LocalContainerExecutor, self.executor)
278
273
  self.service = cast(FileSystemRunLogstore, self.service)
@@ -280,6 +275,40 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
280
275
  self.service.log_folder = self.executor._container_log_location
281
276
 
282
277
 
278
+ class LocalContainerComputeChunkedFS(BaseIntegration):
279
+ """
280
+ Integration pattern between Local container and File System catalog
281
+ """
282
+
283
+ executor_type = "local-container"
284
+ service_type = "run_log_store" # One of secret, catalog, datastore
285
+ service_provider = "chunked-fs" # The actual implementation of the service
286
+
287
+ def configure_for_traversal(self, **kwargs):
288
+ from runnable.extensions.run_log_store.chunked_file_system.implementation import (
289
+ ChunkedFileSystemRunLogStore,
290
+ )
291
+
292
+ self.executor = cast(LocalContainerExecutor, self.executor)
293
+ self.service = cast(ChunkedFileSystemRunLogStore, self.service)
294
+
295
+ write_to = self.service.log_folder
296
+ self.executor._volumes[str(Path(write_to).resolve())] = {
297
+ "bind": f"{self.executor._container_log_location}",
298
+ "mode": "rw",
299
+ }
300
+
301
+ def configure_for_execution(self, **kwargs):
302
+ from runnable.extensions.run_log_store.chunked_file_system.implementation import (
303
+ ChunkedFileSystemRunLogStore,
304
+ )
305
+
306
+ self.executor = cast(LocalContainerExecutor, self.executor)
307
+ self.service = cast(ChunkedFileSystemRunLogStore, self.service)
308
+
309
+ self.service.log_folder = self.executor._container_log_location
310
+
311
+
283
312
  class LocalContainerComputeFileSystemCatalog(BaseIntegration):
284
313
  """
285
314
  Integration pattern between Local container and File System catalog
@@ -290,7 +319,9 @@ class LocalContainerComputeFileSystemCatalog(BaseIntegration):
290
319
  service_provider = "file-system" # The actual implementation of the service
291
320
 
292
321
  def configure_for_traversal(self, **kwargs):
293
- from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog
322
+ from runnable.extensions.catalog.file_system.implementation import (
323
+ FileSystemCatalog,
324
+ )
294
325
 
295
326
  self.executor = cast(LocalContainerExecutor, self.executor)
296
327
  self.service = cast(FileSystemCatalog, self.service)
@@ -302,7 +333,9 @@ class LocalContainerComputeFileSystemCatalog(BaseIntegration):
302
333
  }
303
334
 
304
335
  def configure_for_execution(self, **kwargs):
305
- from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog
336
+ from runnable.extensions.catalog.file_system.implementation import (
337
+ FileSystemCatalog,
338
+ )
306
339
 
307
340
  self.executor = cast(LocalContainerExecutor, self.executor)
308
341
  self.service = cast(FileSystemCatalog, self.service)
@@ -18,7 +18,7 @@ def create_executable(params: Dict[str, Any], model: Type[BaseTaskType], node_na
18
18
  class EasyModel(model): # type: ignore
19
19
  model_config = ConfigDict(extra="ignore")
20
20
 
21
- swallow_all = EasyModel(**params, node_name=node_name)
21
+ swallow_all = EasyModel(node_name=node_name, **params)
22
22
  return swallow_all
23
23
 
24
24
 
@@ -26,6 +26,8 @@ class MockedExecutor(GenericExecutor):
26
26
  service_name: str = "mocked"
27
27
  _local_executor: bool = True
28
28
 
29
+ model_config = ConfigDict(extra="ignore")
30
+
29
31
  patches: Dict[str, Any] = Field(default_factory=dict)
30
32
 
31
33
  @property
@@ -64,6 +66,10 @@ class MockedExecutor(GenericExecutor):
64
66
  step_log.step_type = node.node_type
65
67
  step_log.status = defaults.PROCESSING
66
68
 
69
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
70
+
71
+ logger.info(f"Executing node: {node.get_summary()}")
72
+
67
73
  # Add the step log to the database as per the situation.
68
74
  # If its a terminal node, complete it now
69
75
  if node.node_type in ["success", "fail"]:
@@ -132,3 +138,17 @@ class MockedExecutor(GenericExecutor):
132
138
 
133
139
  def execute_job(self, node: TaskNode):
134
140
  pass
141
+
142
+ def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
143
+ """
144
+ The entry point for all executors apart from local.
145
+ We have already prepared for node execution.
146
+
147
+ Args:
148
+ node (BaseNode): The node to execute
149
+ map_variable (dict, optional): If the node is part of a map, send in the map dictionary. Defaults to None.
150
+
151
+ Raises:
152
+ NotImplementedError: _description_
153
+ """
154
+ ...
@@ -6,6 +6,7 @@ from runnable import context, defaults, exceptions
6
6
  from runnable.datastore import RunLog
7
7
  from runnable.defaults import TypeMapVariable
8
8
  from runnable.extensions.executor import GenericExecutor
9
+ from runnable.extensions.nodes import TaskNode
9
10
  from runnable.nodes import BaseNode
10
11
 
11
12
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -31,6 +32,7 @@ class RetryExecutor(GenericExecutor):
31
32
 
32
33
  _local: bool = True
33
34
  _original_run_log: Optional[RunLog] = None
35
+ _restart_initiated: bool = False
34
36
 
35
37
  @property
36
38
  def _context(self):
@@ -38,7 +40,7 @@ class RetryExecutor(GenericExecutor):
38
40
 
39
41
  @cached_property
40
42
  def original_run_log(self):
41
- self.original_run_log = self._context.run_log_store.get_run_log_by_id(
43
+ return self._context.run_log_store.get_run_log_by_id(
42
44
  run_id=self.run_id,
43
45
  full=True,
44
46
  )
@@ -140,10 +142,14 @@ class RetryExecutor(GenericExecutor):
140
142
  node_step_log_name = node._get_step_log_name(map_variable=map_variable)
141
143
  logger.info(f"Scanning previous run logs for node logs of: {node_step_log_name}")
142
144
 
145
+ if self._restart_initiated:
146
+ return True
147
+
143
148
  try:
144
149
  previous_attempt_log, _ = self.original_run_log.search_step_by_internal_name(node_step_log_name)
145
150
  except exceptions.StepLogNotFoundError:
146
151
  logger.warning(f"Did not find the node {node.name} in previous run log")
152
+ self._restart_initiated = True
147
153
  return True # We should re-run the node.
148
154
 
149
155
  logger.info(f"The original step status: {previous_attempt_log.status}")
@@ -152,7 +158,11 @@ class RetryExecutor(GenericExecutor):
152
158
  return False # We need not run the node
153
159
 
154
160
  logger.info(f"The new execution should start executing graph from this node {node.name}")
161
+ self._restart_initiated = True
155
162
  return True
156
163
 
157
164
  def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
158
165
  self._execute_node(node, map_variable=map_variable, **kwargs)
166
+
167
+ def execute_job(self, node: TaskNode):
168
+ pass
@@ -5,7 +5,7 @@ import sys
5
5
  from collections import OrderedDict
6
6
  from copy import deepcopy
7
7
  from datetime import datetime
8
- from typing import Any, Dict, List, Optional, Tuple, Union, cast
8
+ from typing import Annotated, Any, Callable, Dict, List, Optional, Tuple, Union, cast
9
9
 
10
10
  from pydantic import (
11
11
  ConfigDict,
@@ -14,10 +14,15 @@ from pydantic import (
14
14
  field_serializer,
15
15
  field_validator,
16
16
  )
17
- from typing_extensions import Annotated
18
17
 
19
18
  from runnable import datastore, defaults, utils
20
- from runnable.datastore import JsonParameter, MetricParameter, ObjectParameter, StepLog
19
+ from runnable.datastore import (
20
+ JsonParameter,
21
+ MetricParameter,
22
+ ObjectParameter,
23
+ Parameter,
24
+ StepLog,
25
+ )
21
26
  from runnable.defaults import TypeMapVariable
22
27
  from runnable.graph import Graph, create_graph
23
28
  from runnable.nodes import CompositeNode, ExecutableNode, TerminalNode
@@ -46,8 +51,6 @@ class TaskNode(ExecutableNode):
46
51
  task_config = {k: v for k, v in config.items() if k not in TaskNode.model_fields.keys()}
47
52
  node_config = {k: v for k, v in config.items() if k in TaskNode.model_fields.keys()}
48
53
 
49
- task_config["node_name"] = config.get("name")
50
-
51
54
  executable = create_task(task_config)
52
55
  return cls(executable=executable, **node_config, **task_config)
53
56
 
@@ -505,7 +508,7 @@ class MapNode(CompositeNode):
505
508
  for _, v in map_variable.items():
506
509
  for branch_return in self.branch_returns:
507
510
  param_name, param_type = branch_return
508
- raw_parameters[f"{param_name}_{v}"] = param_type.copy()
511
+ raw_parameters[f"{v}_{param_name}"] = param_type.copy()
509
512
  else:
510
513
  for branch_return in self.branch_returns:
511
514
  param_name, param_type = branch_return
@@ -543,10 +546,14 @@ class MapNode(CompositeNode):
543
546
  iterate_on = None
544
547
  try:
545
548
  iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on].get_value()
546
- except KeyError:
549
+ except KeyError as e:
547
550
  raise Exception(
548
- f"Expected parameter {self.iterate_on} not present in Run Log parameters, was it ever set before?"
549
- )
551
+ (
552
+ f"Expected parameter {self.iterate_on}",
553
+ "not present in Run Log parameters",
554
+ "was it ever set before?",
555
+ )
556
+ ) from e
550
557
 
551
558
  if not isinstance(iterate_on, list):
552
559
  raise Exception("Only list is allowed as a valid iterator type")
@@ -599,29 +606,44 @@ class MapNode(CompositeNode):
599
606
  # The final value of the parameter is the result of the reduce function.
600
607
  reducer_f = self.get_reducer_function()
601
608
 
602
- if map_variable:
603
- # If we are in a map state already, the param should have an index of the map variable.
604
- for _, v in map_variable.items():
605
- for branch_return in self.branch_returns:
606
- param_name, _ = branch_return
607
- to_reduce = []
608
- for iter_variable in iterate_on:
609
- to_reduce.append(params[f"{param_name}_{iter_variable}"].get_value())
610
-
611
- param_name = f"{param_name}_{v}"
612
- params[param_name].value = reducer_f(to_reduce)
613
- params[param_name].reduced = True
614
- else:
609
+ def update_param(params: Dict[str, Parameter], reducer_f: Callable, map_prefix: str = ""):
610
+ from runnable.extensions.executor.mocked.implementation import (
611
+ MockedExecutor,
612
+ )
613
+
615
614
  for branch_return in self.branch_returns:
616
615
  param_name, _ = branch_return
617
616
 
618
617
  to_reduce = []
619
618
  for iter_variable in iterate_on:
620
- to_reduce.append(params[f"{param_name}_{iter_variable}"].get_value())
621
-
622
- params[param_name].value = reducer_f(*to_reduce)
619
+ try:
620
+ to_reduce.append(params[f"{iter_variable}_{param_name}"].get_value())
621
+ except KeyError as e:
622
+ if isinstance(self._context.executor, MockedExecutor):
623
+ pass
624
+ else:
625
+ raise Exception(
626
+ (
627
+ f"Expected parameter {iter_variable}_{param_name}",
628
+ "not present in Run Log parameters",
629
+ "was it ever set before?",
630
+ )
631
+ ) from e
632
+
633
+ param_name = f"{map_prefix}{param_name}"
634
+ if to_reduce:
635
+ params[param_name].value = reducer_f(*to_reduce)
636
+ else:
637
+ params[param_name].value = ""
623
638
  params[param_name].reduced = True
624
639
 
640
+ if map_variable:
641
+ # If we are in a map state already, the param should have an index of the map variable.
642
+ for _, v in map_variable.items():
643
+ update_param(params, reducer_f, map_prefix=f"{v}_")
644
+ else:
645
+ update_param(params, reducer_f)
646
+
625
647
  self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id)
626
648
 
627
649
 
@@ -35,10 +35,10 @@ class ChunkedFileSystemRunLogStore(ChunkedRunLogStore):
35
35
  name (str): The suffix of the file name to check in the run log store.
36
36
  """
37
37
  log_folder = self.log_folder_with_run_id(run_id=run_id)
38
-
39
38
  sub_name = Template(name).safe_substitute({"creation_time": ""})
40
39
 
41
40
  matches = list(log_folder.glob(f"{sub_name}*"))
41
+
42
42
  if matches:
43
43
  if not multiple_allowed:
44
44
  if len(matches) > 1:
@@ -7,7 +7,16 @@ from string import Template
7
7
  from typing import Any, Dict, Optional, Sequence, Union
8
8
 
9
9
  from runnable import defaults, exceptions
10
- from runnable.datastore import BaseRunLogStore, BranchLog, RunLog, StepLog
10
+ from runnable.datastore import (
11
+ BaseRunLogStore,
12
+ BranchLog,
13
+ JsonParameter,
14
+ MetricParameter,
15
+ ObjectParameter,
16
+ Parameter,
17
+ RunLog,
18
+ StepLog,
19
+ )
11
20
 
12
21
  logger = logging.getLogger(defaults.LOGGER_NAME)
13
22
 
@@ -164,7 +173,9 @@ class ChunkedRunLogStore(BaseRunLogStore):
164
173
  raise Exception(f"Name is required during retrieval for {log_type}")
165
174
 
166
175
  naming_pattern = self.naming_pattern(log_type=log_type, name=name)
176
+
167
177
  matches = self.get_matches(run_id=run_id, name=naming_pattern, multiple_allowed=multiple_allowed)
178
+
168
179
  if matches:
169
180
  if not multiple_allowed:
170
181
  contents = self._retrieve(name=matches) # type: ignore
@@ -370,10 +381,17 @@ class ChunkedRunLogStore(BaseRunLogStore):
370
381
  Raises:
371
382
  RunLogNotFoundError: If the run log for run_id is not found in the datastore
372
383
  """
373
- parameters = {}
384
+ parameters: Dict[str, Parameter] = {}
374
385
  try:
375
386
  parameters_list = self.retrieve(run_id=run_id, log_type=self.LogTypes.PARAMETER, multiple_allowed=True)
376
- parameters = {key: value for param in parameters_list for key, value in param.items()}
387
+ for param in parameters_list:
388
+ for key, value in param.items():
389
+ if value["kind"] == "json":
390
+ parameters[key] = JsonParameter(**value)
391
+ if value["kind"] == "metric":
392
+ parameters[key] = MetricParameter(**value)
393
+ if value["kind"] == "object":
394
+ parameters[key] = ObjectParameter(**value)
377
395
  except EntityNotFoundError:
378
396
  # No parameters are set
379
397
  pass
@@ -401,7 +419,7 @@ class ChunkedRunLogStore(BaseRunLogStore):
401
419
  self.store(
402
420
  run_id=run_id,
403
421
  log_type=self.LogTypes.PARAMETER,
404
- contents={key: value},
422
+ contents={key: value.model_dump(by_alias=True)},
405
423
  name=key,
406
424
  )
407
425
 
runnable/graph.py CHANGED
@@ -74,6 +74,7 @@ class Graph(BaseModel):
74
74
  for _, value in self.nodes.items():
75
75
  if value.internal_name == internal_name:
76
76
  return value
77
+ print("graph", internal_name)
77
78
  raise exceptions.NodeNotFoundError(internal_name)
78
79
 
79
80
  def __str__(self): # pragma: no cover
runnable/parameters.py CHANGED
@@ -36,7 +36,7 @@ def get_user_set_parameters(remove: bool = False) -> Dict[str, JsonParameter]:
36
36
  try:
37
37
  parameters[key.lower()] = JsonParameter(kind="json", value=json.loads(value))
38
38
  except json.decoder.JSONDecodeError:
39
- logger.error(f"Parameter {key} could not be JSON decoded, adding the literal value")
39
+ logger.warning(f"Parameter {key} could not be JSON decoded, adding the literal value")
40
40
  parameters[key.lower()] = JsonParameter(kind="json", value=value)
41
41
 
42
42
  if remove:
runnable/sdk.py CHANGED
@@ -2,6 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import logging
4
4
  import os
5
+ import re
5
6
  from abc import ABC, abstractmethod
6
7
  from pathlib import Path
7
8
  from typing import Any, Callable, Dict, List, Optional, Union
@@ -25,7 +26,7 @@ from rich.progress import (
25
26
  from rich.table import Column
26
27
  from typing_extensions import Self
27
28
 
28
- from runnable import console, defaults, entrypoints, graph, utils
29
+ from runnable import console, defaults, entrypoints, exceptions, graph, utils
29
30
  from runnable.extensions.nodes import (
30
31
  FailNode,
31
32
  MapNode,
@@ -310,8 +311,6 @@ class NotebookTask(BaseTask):
310
311
  """
311
312
 
312
313
  notebook: str = Field(serialization_alias="command")
313
-
314
- notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path", validate_default=True)
315
314
  optional_ploomber_args: Optional[Dict[str, Any]] = Field(default=None, alias="optional_ploomber_args")
316
315
 
317
316
  @computed_field
@@ -591,6 +590,7 @@ class Pipeline(BaseModel):
591
590
 
592
591
  Any definition of pipeline should have one node that terminates with success.
593
592
  """
593
+ # TODO: Bug with repeat names
594
594
 
595
595
  success_path: List[StepType] = []
596
596
  on_failure_paths: List[List[StepType]] = []
@@ -637,7 +637,8 @@ class Pipeline(BaseModel):
637
637
  self._dag.check_graph()
638
638
 
639
639
  def return_dag(self) -> graph.Graph:
640
- return self._dag
640
+ dag_definition = self._dag.model_dump(by_alias=True, exclude_none=True)
641
+ return graph.create_graph(dag_definition)
641
642
 
642
643
  def execute(
643
644
  self,
@@ -708,7 +709,8 @@ class Pipeline(BaseModel):
708
709
  caller_stack = inspect.stack()[1]
709
710
  relative_to_root = str(Path(caller_stack.filename).relative_to(Path.cwd()))
710
711
 
711
- module_to_call = f"{relative_to_root.replace('/', '.').replace('.py', '')}.{caller_stack.function}"
712
+ module_name = re.sub(r"\b.py\b", "", relative_to_root.replace("/", "."))
713
+ module_to_call = f"{module_name}.{caller_stack.function}"
712
714
 
713
715
  run_context.pipeline_file = f"{module_to_call}.py"
714
716
 
@@ -728,15 +730,20 @@ class Pipeline(BaseModel):
728
730
  pipeline_execution_task = progress.add_task("[dark_orange] Starting execution .. ", total=1)
729
731
  run_context.executor.execute_graph(dag=run_context.dag)
730
732
 
733
+ if not run_context.executor._local:
734
+ return {}
735
+
731
736
  run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
732
737
 
733
738
  if run_log.status == defaults.SUCCESS:
734
739
  progress.update(pipeline_execution_task, description="[green] Success", completed=True)
735
740
  else:
736
741
  progress.update(pipeline_execution_task, description="[red] Failed", completed=True)
742
+ raise exceptions.ExecutionFailedError(run_context.run_id)
737
743
  except Exception as e: # noqa: E722
738
744
  console.print(e, style=defaults.error_style)
739
745
  progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True)
746
+ raise
740
747
 
741
748
  if run_context.executor._local:
742
749
  return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id)
runnable/tasks.py CHANGED
@@ -1,4 +1,5 @@
1
1
  import contextlib
2
+ import copy
2
3
  import importlib
3
4
  import io
4
5
  import json
@@ -7,11 +8,14 @@ import os
7
8
  import subprocess
8
9
  import sys
9
10
  from datetime import datetime
11
+ from pathlib import Path
10
12
  from pickle import PicklingError
11
13
  from string import Template
12
- from typing import Any, Dict, List, Literal, Optional, Tuple
14
+ from typing import Any, Dict, List, Literal, Tuple
13
15
 
14
- from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
16
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
17
+
18
+ # from rich import print
15
19
  from rich.console import Console
16
20
  from stevedore import driver
17
21
 
@@ -33,9 +37,6 @@ logging.getLogger("stevedore").setLevel(logging.CRITICAL)
33
37
  # TODO: Can we add memory peak, cpu usage, etc. to the metrics?
34
38
 
35
39
 
36
- console = Console(file=io.StringIO())
37
-
38
-
39
40
  class TaskReturns(BaseModel):
40
41
  name: str
41
42
  kind: Literal["json", "object", "metric"] = Field(default="json")
@@ -45,7 +46,6 @@ class BaseTaskType(BaseModel):
45
46
  """A base task class which does the execution of command defined by the user."""
46
47
 
47
48
  task_type: str = Field(serialization_alias="command_type")
48
- node_name: str = Field(exclude=True)
49
49
  secrets: List[str] = Field(default_factory=list)
50
50
  returns: List[TaskReturns] = Field(default_factory=list, alias="returns")
51
51
 
@@ -99,6 +99,20 @@ class BaseTaskType(BaseModel):
99
99
  """
100
100
  raise NotImplementedError()
101
101
 
102
+ def _diff_parameters(
103
+ self, parameters_in: Dict[str, Parameter], context_params: Dict[str, Parameter]
104
+ ) -> Dict[str, Parameter]:
105
+ diff: Dict[str, Parameter] = {}
106
+ for param_name, param in context_params.items():
107
+ if param_name in parameters_in:
108
+ if parameters_in[param_name] != param:
109
+ diff[param_name] = param
110
+ continue
111
+
112
+ diff[param_name] = param
113
+
114
+ return diff
115
+
102
116
  @contextlib.contextmanager
103
117
  def expose_secrets(self):
104
118
  """Context manager to expose secrets to the execution.
@@ -128,7 +142,7 @@ class BaseTaskType(BaseModel):
128
142
  if param.reduced is False:
129
143
  context_param = param_name
130
144
  for _, v in map_variable.items(): # type: ignore
131
- context_param = f"{context_param}_{v}"
145
+ context_param = f"{v}_{context_param}"
132
146
 
133
147
  if context_param in params:
134
148
  params[param_name].value = params[context_param].value
@@ -138,7 +152,7 @@ class BaseTaskType(BaseModel):
138
152
  if not allow_complex:
139
153
  params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)}
140
154
 
141
- log_file_name = self.node_name # + ".execution.log"
155
+ log_file_name = self._context.executor._context_node.internal_name
142
156
  if map_variable:
143
157
  for _, value in map_variable.items():
144
158
  log_file_name += "_" + str(value)
@@ -147,15 +161,18 @@ class BaseTaskType(BaseModel):
147
161
 
148
162
  log_file = open(log_file_name, "w")
149
163
 
164
+ parameters_in = copy.deepcopy(params)
150
165
  f = io.StringIO()
166
+ task_console = Console(file=io.StringIO())
151
167
  try:
152
168
  with contextlib.redirect_stdout(f):
153
169
  # with contextlib.nullcontext():
154
- yield params
155
- print(console.file.getvalue()) # type: ignore
170
+ yield params, task_console
171
+ print(task_console.file.getvalue()) # type: ignore
156
172
  except Exception as e: # pylint: disable=broad-except
157
173
  logger.exception(e)
158
174
  finally:
175
+ task_console = None # type: ignore
159
176
  print(f.getvalue()) # print to console
160
177
  log_file.write(f.getvalue()) # Print to file
161
178
 
@@ -168,7 +185,8 @@ class BaseTaskType(BaseModel):
168
185
 
169
186
  # Update parameters
170
187
  # This should only update the parameters that are changed at the root level.
171
- self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id)
188
+ diff_parameters = self._diff_parameters(parameters_in=parameters_in, context_params=params)
189
+ self._context.run_log_store.set_parameters(parameters=diff_parameters, run_id=self._context.run_id)
172
190
 
173
191
 
174
192
  def task_return_to_parameter(task_return: TaskReturns, value: Any) -> Parameter:
@@ -215,7 +233,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
215
233
  """Execute the notebook as defined by the command."""
216
234
  attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now()))
217
235
 
218
- with self.execution_context(map_variable=map_variable) as params, self.expose_secrets() as _:
236
+ with self.execution_context(map_variable=map_variable) as (params, task_console), self.expose_secrets() as _:
219
237
  module, func = utils.get_module_and_attr_names(self.command)
220
238
  sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
221
239
  imported_module = importlib.import_module(module)
@@ -227,7 +245,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
227
245
  logger.info(f"Calling {func} from {module} with {filtered_parameters}")
228
246
  user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
229
247
  except Exception as e:
230
- console.log(e, style=defaults.error_style, markup=False)
248
+ task_console.log(e, style=defaults.error_style, markup=False)
231
249
  raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e
232
250
 
233
251
  attempt_log.input_parameters = params.copy()
@@ -259,7 +277,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
259
277
  param_name = task_return.name
260
278
  if map_variable:
261
279
  for _, v in map_variable.items():
262
- param_name = f"{param_name}_{v}"
280
+ param_name = f"{v}_{param_name}"
263
281
 
264
282
  output_parameters[param_name] = output_parameter
265
283
 
@@ -271,8 +289,8 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
271
289
  except Exception as _e:
272
290
  msg = f"Call to the function {self.command} did not succeed.\n"
273
291
  attempt_log.message = msg
274
- console.print_exception(show_locals=False)
275
- console.log(_e, style=defaults.error_style)
292
+ task_console.print_exception(show_locals=False)
293
+ task_console.log(_e, style=defaults.error_style)
276
294
 
277
295
  attempt_log.end_time = str(datetime.now())
278
296
 
@@ -284,25 +302,25 @@ class NotebookTaskType(BaseTaskType):
284
302
 
285
303
  task_type: str = Field(default="notebook", serialization_alias="command_type")
286
304
  command: str
287
- notebook_output_path: Optional[str] = Field(default=None, validate_default=True)
288
305
  optional_ploomber_args: dict = {}
289
306
 
290
307
  @field_validator("command")
291
308
  @classmethod
292
- def notebook_should_end_with_ipynb(cls, command: str):
309
+ def notebook_should_end_with_ipynb(cls, command: str) -> str:
293
310
  if not command.endswith(".ipynb"):
294
311
  raise Exception("Notebook task should point to a ipynb file")
295
312
 
296
313
  return command
297
314
 
298
- @field_validator("notebook_output_path")
299
- @classmethod
300
- def correct_notebook_output_path(cls, notebook_output_path: str, info: ValidationInfo):
301
- if notebook_output_path:
302
- return notebook_output_path
315
+ @property
316
+ def notebook_output_path(self) -> str:
317
+ node_name = self._context.executor._context_node.internal_name
318
+ sane_name = "".join(x for x in node_name if x.isalnum())
319
+
320
+ output_path = Path(".", self.command)
321
+ file_name = output_path.parent / (output_path.stem + f"{sane_name}_out.ipynb")
303
322
 
304
- command = info.data["command"]
305
- return "".join(command.split(".")[:-1]) + "_out.ipynb"
323
+ return str(file_name)
306
324
 
307
325
  def get_cli_options(self) -> Tuple[str, dict]:
308
326
  return "notebook", {"command": self.command, "notebook-output-path": self.notebook_output_path}
@@ -326,17 +344,26 @@ class NotebookTaskType(BaseTaskType):
326
344
  import ploomber_engine as pm
327
345
  from ploomber_engine.ipython import PloomberClient
328
346
 
329
- notebook_output_path = self.notebook_output_path or ""
347
+ notebook_output_path = self.notebook_output_path
330
348
 
331
- with self.execution_context(
332
- map_variable=map_variable, allow_complex=False
333
- ) as params, self.expose_secrets() as _:
349
+ with self.execution_context(map_variable=map_variable, allow_complex=False) as (
350
+ params,
351
+ _,
352
+ ), self.expose_secrets() as _:
334
353
  if map_variable:
335
354
  for key, value in map_variable.items():
336
355
  notebook_output_path += "_" + str(value)
337
- params[key] = value
356
+ params[key] = JsonParameter(kind="json", value=value)
357
+
358
+ # Remove any {v}_unreduced parameters from the parameters
359
+ copy_params = copy.deepcopy(params)
360
+ unprocessed_params = [k for k, v in copy_params.items() if not v.reduced]
361
+
362
+ for key in list(copy_params.keys()):
363
+ if any(key.endswith(f"_{k}") for k in unprocessed_params):
364
+ del copy_params[key]
338
365
 
339
- notebook_params = {k: v.get_value() for k, v in params.items()}
366
+ notebook_params = {k: v.get_value() for k, v in copy_params.items()}
340
367
 
341
368
  ploomber_optional_args = self.optional_ploomber_args
342
369
 
@@ -359,6 +386,11 @@ class NotebookTaskType(BaseTaskType):
359
386
  try:
360
387
  for task_return in self.returns:
361
388
  param_name = Template(task_return.name).safe_substitute(map_variable) # type: ignore
389
+
390
+ if map_variable:
391
+ for _, v in map_variable.items():
392
+ param_name = f"{v}_{param_name}"
393
+
362
394
  output_parameters[param_name] = task_return_to_parameter(
363
395
  task_return=task_return,
364
396
  value=namespace[task_return.name],
@@ -435,95 +467,98 @@ class ShellTaskType(BaseTaskType):
435
467
  secret_value = context.run_context.secrets_handler.get(key)
436
468
  subprocess_env[key] = secret_value
437
469
 
438
- with self.execution_context(map_variable=map_variable, allow_complex=False) as params:
439
- subprocess_env.update({k: v.get_value() for k, v in params.items()})
440
-
441
- # Json dumps all runnable environment variables
442
- for key, value in subprocess_env.items():
443
- if isinstance(value, str):
444
- continue
445
- subprocess_env[key] = json.dumps(value)
446
-
447
- collect_delimiter = "=== COLLECT ==="
448
-
449
- command = self.command.strip() + f" && echo '{collect_delimiter}' && env"
450
- logger.info(f"Executing shell command: {command}")
451
-
452
- capture = False
453
- return_keys = {x.name: x for x in self.returns}
454
-
455
- proc = subprocess.Popen(
456
- command,
457
- shell=True,
458
- env=subprocess_env,
459
- stdout=subprocess.PIPE,
460
- stderr=subprocess.PIPE,
461
- text=True,
462
- )
463
- result = proc.communicate()
464
- logger.debug(result)
465
- logger.info(proc.returncode)
466
-
467
- if proc.returncode != 0:
468
- msg = ",".join(result[1].split("\n"))
469
- attempt_log.status = defaults.FAIL
470
- attempt_log.end_time = str(datetime.now())
471
- attempt_log.message = msg
472
- console.print(msg, style=defaults.error_style)
473
- return attempt_log
474
-
475
- # for stderr
476
- for line in result[1].split("\n"):
477
- if line.strip() == "":
478
- continue
479
- console.print(line, style=defaults.warning_style)
470
+ try:
471
+ with self.execution_context(map_variable=map_variable, allow_complex=False) as (params, task_console):
472
+ subprocess_env.update({k: v.get_value() for k, v in params.items()})
473
+
474
+ # Json dumps all runnable environment variables
475
+ for key, value in subprocess_env.items():
476
+ if isinstance(value, str):
477
+ continue
478
+ subprocess_env[key] = json.dumps(value)
479
+
480
+ collect_delimiter = "=== COLLECT ==="
481
+
482
+ command = self.command.strip() + f" && echo '{collect_delimiter}' && env"
483
+ logger.info(f"Executing shell command: {command}")
484
+
485
+ capture = False
486
+ return_keys = {x.name: x for x in self.returns}
487
+
488
+ proc = subprocess.Popen(
489
+ command,
490
+ shell=True,
491
+ env=subprocess_env,
492
+ stdout=subprocess.PIPE,
493
+ stderr=subprocess.PIPE,
494
+ text=True,
495
+ )
496
+ result = proc.communicate()
497
+ logger.debug(result)
498
+ logger.info(proc.returncode)
499
+
500
+ if proc.returncode != 0:
501
+ msg = ",".join(result[1].split("\n"))
502
+ task_console.print(msg, style=defaults.error_style)
503
+ raise exceptions.CommandCallError(msg)
504
+
505
+ # for stderr
506
+ for line in result[1].split("\n"):
507
+ if line.strip() == "":
508
+ continue
509
+ task_console.print(line, style=defaults.warning_style)
480
510
 
481
- output_parameters: Dict[str, Parameter] = {}
482
- metrics: Dict[str, Parameter] = {}
511
+ output_parameters: Dict[str, Parameter] = {}
512
+ metrics: Dict[str, Parameter] = {}
483
513
 
484
- # only from stdout
485
- for line in result[0].split("\n"):
486
- if line.strip() == "":
487
- continue
514
+ # only from stdout
515
+ for line in result[0].split("\n"):
516
+ if line.strip() == "":
517
+ continue
488
518
 
489
- logger.info(line)
490
- console.print(line)
519
+ logger.info(line)
520
+ task_console.print(line)
491
521
 
492
- if line.strip() == collect_delimiter:
493
- # The lines from now on should be captured
494
- capture = True
495
- continue
522
+ if line.strip() == collect_delimiter:
523
+ # The lines from now on should be captured
524
+ capture = True
525
+ continue
496
526
 
497
- if capture:
498
- key, value = line.strip().split("=", 1)
499
- if key in return_keys:
500
- task_return = return_keys[key]
527
+ if capture:
528
+ key, value = line.strip().split("=", 1)
529
+ if key in return_keys:
530
+ task_return = return_keys[key]
501
531
 
502
- try:
503
- value = json.loads(value)
504
- except json.JSONDecodeError:
505
- value = value
532
+ try:
533
+ value = json.loads(value)
534
+ except json.JSONDecodeError:
535
+ value = value
506
536
 
507
- output_parameter = task_return_to_parameter(
508
- task_return=task_return,
509
- value=value,
510
- )
537
+ output_parameter = task_return_to_parameter(
538
+ task_return=task_return,
539
+ value=value,
540
+ )
511
541
 
512
- if task_return.kind == "metric":
513
- metrics[task_return.name] = output_parameter
542
+ if task_return.kind == "metric":
543
+ metrics[task_return.name] = output_parameter
514
544
 
515
- param_name = task_return.name
516
- if map_variable:
517
- for _, v in map_variable.items():
518
- param_name = f"{param_name}_{v}"
545
+ param_name = task_return.name
546
+ if map_variable:
547
+ for _, v in map_variable.items():
548
+ param_name = f"{v}_{param_name}"
519
549
 
520
- output_parameters[param_name] = output_parameter
550
+ output_parameters[param_name] = output_parameter
521
551
 
522
- attempt_log.output_parameters = output_parameters
523
- attempt_log.user_defined_metrics = metrics
524
- params.update(output_parameters)
552
+ attempt_log.output_parameters = output_parameters
553
+ attempt_log.user_defined_metrics = metrics
554
+ params.update(output_parameters)
525
555
 
526
- attempt_log.status = defaults.SUCCESS
556
+ attempt_log.status = defaults.SUCCESS
557
+ except exceptions.CommandCallError as e:
558
+ msg = f"Call to the command {self.command} did not succeed"
559
+ logger.exception(msg)
560
+ logger.exception(e)
561
+ attempt_log.status = defaults.FAIL
527
562
 
528
563
  attempt_log.end_time = str(datetime.now())
529
564
  return attempt_log
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: runnable
3
- Version: 0.11.1
3
+ Version: 0.11.3
4
4
  Summary: A Compute agnostic pipelining software
5
5
  Home-page: https://github.com/vijayvammi/runnable
6
6
  License: Apache-2.0
@@ -1,22 +1,22 @@
1
- runnable/__init__.py,sha256=C5ySSfN_vHWFTscnxLx3tfMjKT3Bs9jfUjXnDvun33Y,870
1
+ runnable/__init__.py,sha256=wFnLO08pQdhkDxYzcyo5C4BRIWPg1ncmCe4PRhey50U,931
2
2
  runnable/catalog.py,sha256=22OECi5TrpHErxYIhfx-lJ2vgBUi4-5V9CaYEVm98hE,4138
3
3
  runnable/cli.py,sha256=RILUrEfzernuKD3dNdXPBkqN_1OgE5GosYRuInj0FVs,9618
4
4
  runnable/context.py,sha256=QhiXJHRcEBfSKB1ijvL5yB9w44x0HCe7VEiwK1cUJ9U,1124
5
- runnable/datastore.py,sha256=EgKi4_b5g6KbInpjMyw8Xwr-EgcSGi1Lx2u5vp4amSQ,27672
5
+ runnable/datastore.py,sha256=8aQZ15KAMdre7a7G61bNRmcTeJFzOdnx_9O9UP4JQc8,27910
6
6
  runnable/defaults.py,sha256=MOX7I2S6yO4FphZaZREFQca94a20oO8uvzXLd6GLKQs,4703
7
- runnable/entrypoints.py,sha256=a8M7vb954as_ni7lM0t65czXQj2AHjB-KrQJ3zt3sWo,16397
7
+ runnable/entrypoints.py,sha256=A76Fpa08RmEdXevZSRvhlWN4aQiIoLODa5NYSp8Kd00,17049
8
8
  runnable/exceptions.py,sha256=6NIYoTAzdKyGQ9PvW1Hu7b80OS746395KiGDhM7ThH8,2526
9
9
  runnable/executor.py,sha256=xfBighQ5t_vejohip000XfxLwsgechUE1ZMIJWrZbUA,14484
10
10
  runnable/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  runnable/extensions/catalog/__init__.py,sha256=uXZ6D-Myr_J4HnBA4F5Hd7LZ0IAjQiFQYxRhMzejhQc,761
12
12
  runnable/extensions/catalog/file_system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
13
- runnable/extensions/catalog/file_system/implementation.py,sha256=9j920o9SULdcVp1Mr8FgeuV-Sv5bR3w5tcohChxHnak,9130
13
+ runnable/extensions/catalog/file_system/implementation.py,sha256=mFPsAwPMNGWbHczpQ84o3mfkPkOEz5zjsT7a3rqNzoE,9092
14
14
  runnable/extensions/catalog/k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  runnable/extensions/catalog/k8s_pvc/implementation.py,sha256=oJDDI0APT7lrtjWmzYJRDHLGn3Vhbn2MdFSRYvFBUpY,436
16
16
  runnable/extensions/catalog/k8s_pvc/integration.py,sha256=OfrHbNFN8sR-wsVa4os3ajmWJFSd5H4KOHGVAmjRZTQ,1850
17
- runnable/extensions/executor/__init__.py,sha256=eV3q_dL2cRqYaJ8RWV6Xk1__KMWMM2hUnQFT7Z5pRso,26698
17
+ runnable/extensions/executor/__init__.py,sha256=0385OpNSpjyA0GjXlLw7gZtqJFFOHGLmYHzWAGBzU98,26247
18
18
  runnable/extensions/executor/argo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- runnable/extensions/executor/argo/implementation.py,sha256=_BfxCe742S6uV-7PuQ53KjzwY-8Rq-5y9txOXMYf20U,43670
19
+ runnable/extensions/executor/argo/implementation.py,sha256=Wd__bOwLxhIrHKwk0iMgavD44F0nEzR7CcSJzAe7hKk,43840
20
20
  runnable/extensions/executor/argo/specification.yaml,sha256=wXQcm2gOQYqy-IOQIhucohS32ZrHKCfGA5zZ0RraPYc,1276
21
21
  runnable/extensions/executor/k8s_job/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  runnable/extensions/executor/k8s_job/implementation_FF.py,sha256=1IfVG1GRcJcVFzQ-WhkJsmzdJuj51QMxXylY9UrWM0U,10259
@@ -24,15 +24,15 @@ runnable/extensions/executor/k8s_job/integration_FF.py,sha256=pG6HKhPMgCRIgu1PAn
24
24
  runnable/extensions/executor/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  runnable/extensions/executor/local/implementation.py,sha256=e8Tzv-FgQmJeUXVut96jeNERTR83JVG_zkQZMEjCVAs,2469
26
26
  runnable/extensions/executor/local_container/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
- runnable/extensions/executor/local_container/implementation.py,sha256=6kYMgdgE5JxZkVAidxsBSpqkHvyKMfEctgZWSZQEpXA,13979
27
+ runnable/extensions/executor/local_container/implementation.py,sha256=CmvQK77V98Gyzkyuh4WSDrgnoonN89I7xvNSPfeqMxA,14894
28
28
  runnable/extensions/executor/mocked/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
- runnable/extensions/executor/mocked/implementation.py,sha256=ChdUyUsiXXjG_v80d0uLp76Nz4jqqGEry36gs9gNn9k,5082
29
+ runnable/extensions/executor/mocked/implementation.py,sha256=ChvlcLGpBxO6QwJcoqhBgKBR6NfWVnMdOWKQhMgcEjY,5762
30
30
  runnable/extensions/executor/retry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
- runnable/extensions/executor/retry/implementation.py,sha256=ZBSYpxSiAIt-SXPD-qIPP-MMo8b7sQ6UKOTJemAjXlI,6625
32
- runnable/extensions/nodes.py,sha256=5soHRhfT8FY2vnQa4kvRqeVphTq_t-GSw-ExNZfgB30,31965
31
+ runnable/extensions/executor/retry/implementation.py,sha256=-g6PBOhSG7IL4D_IlQOcf9H_En9IXiUzCt-6vKeCB6Q,6892
32
+ runnable/extensions/nodes.py,sha256=JUmovDBissri3oyTYS5K68gOx5sm3e-rThoTE5xKy0Y,32645
33
33
  runnable/extensions/run_log_store/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
34
  runnable/extensions/run_log_store/chunked_file_system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- runnable/extensions/run_log_store/chunked_file_system/implementation.py,sha256=wtOeREr9QyIuMHLCT7o_eDCJVCDsBvwmk89kos3dhfQ,3326
35
+ runnable/extensions/run_log_store/chunked_file_system/implementation.py,sha256=EW2P8lr3eH-pIOsMTJPr5eb-iWc48GQ97W15JzkpC_4,3326
36
36
  runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
37
  runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py,sha256=iGzy-s1eT_kAJP7XgzDLmEMOGrBLvACIiGE_wM62jGE,579
38
38
  runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py,sha256=atzdTy5HJ-bZsd6AzDP8kYRI1TshKxviBKeqY359TUs,1979
@@ -40,27 +40,25 @@ runnable/extensions/run_log_store/db/implementation_FF.py,sha256=oEiG5ASWYYbwlBb
40
40
  runnable/extensions/run_log_store/db/integration_FF.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
41
  runnable/extensions/run_log_store/file_system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
42
42
  runnable/extensions/run_log_store/file_system/implementation.py,sha256=WxxfGCaDAB5zHMM3zv9aeDwXZ4DhtyzjXOjfjvyDoZ4,4288
43
- runnable/extensions/run_log_store/generic_chunked.py,sha256=rcY5f-MIYUUiM5iQnDHICOh7cKiOUSCeaxcBG9_fz-U,19390
43
+ runnable/extensions/run_log_store/generic_chunked.py,sha256=PtYK1dheKYdxODwu_ygpGRIHIepgLVaIORSqvsrg0No,19876
44
44
  runnable/extensions/run_log_store/k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
45
  runnable/extensions/run_log_store/k8s_pvc/implementation.py,sha256=tLgXy9HUB_vlFVQ0Itk6PpNU3GlCOILN4vA3fm80jXI,542
46
46
  runnable/extensions/run_log_store/k8s_pvc/integration.py,sha256=lxQg327mwC0ykhNp5Kg34a9g8o1DzJAhfkiqMGmsABs,1873
47
47
  runnable/extensions/secrets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
48
  runnable/extensions/secrets/dotenv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
49
  runnable/extensions/secrets/dotenv/implementation.py,sha256=3J5pofWahdZbnwnETwpspE5-PKyvmZF_vkfwA1X_bkA,3365
50
- runnable/extensions/secrets/env_secrets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
- runnable/extensions/secrets/env_secrets/implementation.py,sha256=5XiHdJvIr0-jkl4fGfEf26UsgE5Q2Z4oCc0RwjlJdJA,1236
52
- runnable/graph.py,sha256=18IpGYw5kgHP32m12WwXscx-kG5Kx-AuWS1LFbMfBLg,16202
50
+ runnable/graph.py,sha256=lVSfODa61zCWZcEyzQbg9bwOYvqoW89-3i92YOkmWII,16240
53
51
  runnable/integration.py,sha256=eb9qJVZR7Ehg0N1UnGPuyjJvoA-xQ1-xP7AlZHUXHqM,6705
54
52
  runnable/names.py,sha256=vn92Kv9ANROYSZX6Z4z1v_WA3WiEdIYmG6KEStBFZug,8134
55
53
  runnable/nodes.py,sha256=UqR-bJx0Hi7uLSUw_saB7VsNdFh3POKtdgsEPsasHfE,16576
56
- runnable/parameters.py,sha256=KGGW8_uoIK2hd3EwzzBmoHBOrai3fh-SESNPpJRTfj4,5161
54
+ runnable/parameters.py,sha256=yZkMDnwnkdYXIwQ8LflBzn50Y0xRGxEvLlxwno6ovvs,5163
57
55
  runnable/pickler.py,sha256=5SDNf0miMUJ3ZauhQdzwk8_t-9jeOqaTjP5bvRnu9sU,2685
58
- runnable/sdk.py,sha256=JsM27GUc3c57ZepK996FHtfzXP6FGs8MP-s96RC-_fo,27648
56
+ runnable/sdk.py,sha256=t6d1Q3BoovixqC29QuSjFEwsleVgM0E-pAQlfCfMz_o,27923
59
57
  runnable/secrets.py,sha256=dakb7WRloWVo-KpQp6Vy4rwFdGi58BTlT4OifQY106I,2324
60
- runnable/tasks.py,sha256=CH0W7evAZK5hco0medJS6DwHRr06bng7oB7xk0Xh6zQ,20618
58
+ runnable/tasks.py,sha256=XiFQGTrUvoXub99915lYLvHair8sVWfcUzhY0OceUXo,22351
61
59
  runnable/utils.py,sha256=okZFGbJWqStl5Rq5vLhNUQZDv_vhcT58bq9MDrTVxhc,19449
62
- runnable-0.11.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
63
- runnable-0.11.1.dist-info/METADATA,sha256=IKuld8eA9gcb6y7YZaO-3Dpmw2x4U2wnPg0eag1fewU,17020
64
- runnable-0.11.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
65
- runnable-0.11.1.dist-info/entry_points.txt,sha256=Wy-dimdD2REO2a36Ri84fqGqA5iwGy2RIbdgRNtCNdM,1540
66
- runnable-0.11.1.dist-info/RECORD,,
60
+ runnable-0.11.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
61
+ runnable-0.11.3.dist-info/METADATA,sha256=ExuQfQL61u-YGaUwUqHY-NllyO4H-m8Uwu84a88-ff8,17020
62
+ runnable-0.11.3.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
63
+ runnable-0.11.3.dist-info/entry_points.txt,sha256=amb6ISqKBSIz47um8_6LKnYgpoZ4d_p6-O1-7uUb1cU,1447
64
+ runnable-0.11.3.dist-info/RECORD,,
@@ -32,7 +32,6 @@ file-system=runnable.extensions.run_log_store.file_system.implementation:FileSys
32
32
  [secrets]
33
33
  do-nothing=runnable.secrets:DoNothingSecretManager
34
34
  dotenv=runnable.extensions.secrets.dotenv.implementation:DotEnvSecrets
35
- env-secrets-manager=runnable.extensions.secrets.env_secrets.implementation:EnvSecretsManager
36
35
 
37
36
  [tasks]
38
37
  notebook=runnable.tasks:NotebookTaskType
File without changes
@@ -1,42 +0,0 @@
1
- import logging
2
- import os
3
-
4
- from runnable import defaults, exceptions
5
- from runnable.secrets import BaseSecrets
6
-
7
- logger = logging.getLogger(defaults.LOGGER_NAME)
8
-
9
-
10
- class EnvSecretsManager(BaseSecrets):
11
- """
12
- A secret manager via environment variables.
13
-
14
- This secret manager returns nothing if the key does not match
15
- """
16
-
17
- service_name: str = "env-secrets-manager"
18
- prefix: str = ""
19
- suffix: str = ""
20
-
21
- def get(self, name: str = "", **kwargs) -> str:
22
- """
23
- If a name is provided, we look for that in the environment.
24
- If a environment variable by that name is not found, we raise an Exception.
25
-
26
- If a name is not provided, we return an empty dictionary.
27
-
28
- Args:
29
- name (str): The name of the secret to retrieve
30
-
31
- Raises:
32
- Exception: If the secret by the name is not found.
33
-
34
- Returns:
35
- [type]: [description]
36
- """
37
-
38
- try:
39
- return os.environ[f"{self.prefix}{name}{self.suffix}"]
40
- except KeyError as _e:
41
- logger.exception(f"Secret {self.prefix}{name}{self.suffix} not found in environment")
42
- raise exceptions.SecretNotFoundError(secret_name=name, secret_setting="environment") from _e