runnable 0.11.1__tar.gz → 0.11.3__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (65) hide show
  1. {runnable-0.11.1 → runnable-0.11.3}/PKG-INFO +1 -1
  2. {runnable-0.11.1 → runnable-0.11.3}/pyproject.toml +6 -3
  3. {runnable-0.11.1 → runnable-0.11.3}/runnable/__init__.py +3 -0
  4. {runnable-0.11.1 → runnable-0.11.3}/runnable/datastore.py +8 -3
  5. {runnable-0.11.1 → runnable-0.11.3}/runnable/entrypoints.py +15 -1
  6. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/catalog/file_system/implementation.py +1 -1
  7. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/__init__.py +0 -5
  8. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/argo/implementation.py +3 -1
  9. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/local_container/implementation.py +49 -16
  10. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/mocked/implementation.py +21 -1
  11. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/retry/implementation.py +11 -1
  12. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/nodes.py +47 -25
  13. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/generic_chunked.py +22 -4
  14. {runnable-0.11.1 → runnable-0.11.3}/runnable/graph.py +1 -0
  15. {runnable-0.11.1 → runnable-0.11.3}/runnable/parameters.py +1 -1
  16. {runnable-0.11.1 → runnable-0.11.3}/runnable/sdk.py +12 -5
  17. {runnable-0.11.1 → runnable-0.11.3}/runnable/tasks.py +143 -108
  18. runnable-0.11.1/runnable/extensions/secrets/env_secrets/__init__.py +0 -0
  19. runnable-0.11.1/runnable/extensions/secrets/env_secrets/implementation.py +0 -42
  20. {runnable-0.11.1 → runnable-0.11.3}/LICENSE +0 -0
  21. {runnable-0.11.1 → runnable-0.11.3}/README.md +0 -0
  22. {runnable-0.11.1 → runnable-0.11.3}/runnable/catalog.py +0 -0
  23. {runnable-0.11.1 → runnable-0.11.3}/runnable/cli.py +0 -0
  24. {runnable-0.11.1 → runnable-0.11.3}/runnable/context.py +0 -0
  25. {runnable-0.11.1 → runnable-0.11.3}/runnable/defaults.py +0 -0
  26. {runnable-0.11.1 → runnable-0.11.3}/runnable/exceptions.py +0 -0
  27. {runnable-0.11.1 → runnable-0.11.3}/runnable/executor.py +0 -0
  28. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/__init__.py +0 -0
  29. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/catalog/__init__.py +0 -0
  30. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/catalog/file_system/__init__.py +0 -0
  31. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/catalog/k8s_pvc/__init__.py +0 -0
  32. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/catalog/k8s_pvc/implementation.py +0 -0
  33. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/catalog/k8s_pvc/integration.py +0 -0
  34. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/argo/__init__.py +0 -0
  35. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/argo/specification.yaml +0 -0
  36. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/k8s_job/__init__.py +0 -0
  37. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/k8s_job/implementation_FF.py +0 -0
  38. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/k8s_job/integration_FF.py +0 -0
  39. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/local/__init__.py +0 -0
  40. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/local/implementation.py +0 -0
  41. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/local_container/__init__.py +0 -0
  42. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/mocked/__init__.py +0 -0
  43. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/executor/retry/__init__.py +0 -0
  44. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/__init__.py +0 -0
  45. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/chunked_file_system/__init__.py +0 -0
  46. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/chunked_file_system/implementation.py +1 -1
  47. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py +0 -0
  48. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py +0 -0
  49. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py +0 -0
  50. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/db/implementation_FF.py +0 -0
  51. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/db/integration_FF.py +0 -0
  52. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/file_system/__init__.py +0 -0
  53. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/file_system/implementation.py +0 -0
  54. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/k8s_pvc/__init__.py +0 -0
  55. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/k8s_pvc/implementation.py +0 -0
  56. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/run_log_store/k8s_pvc/integration.py +0 -0
  57. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/secrets/__init__.py +0 -0
  58. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/secrets/dotenv/__init__.py +0 -0
  59. {runnable-0.11.1 → runnable-0.11.3}/runnable/extensions/secrets/dotenv/implementation.py +0 -0
  60. {runnable-0.11.1 → runnable-0.11.3}/runnable/integration.py +0 -0
  61. {runnable-0.11.1 → runnable-0.11.3}/runnable/names.py +0 -0
  62. {runnable-0.11.1 → runnable-0.11.3}/runnable/nodes.py +0 -0
  63. {runnable-0.11.1 → runnable-0.11.3}/runnable/pickler.py +0 -0
  64. {runnable-0.11.1 → runnable-0.11.3}/runnable/secrets.py +0 -0
  65. {runnable-0.11.1 → runnable-0.11.3}/runnable/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: runnable
3
- Version: 0.11.1
3
+ Version: 0.11.3
4
4
  Summary: A Compute agnostic pipelining software
5
5
  Home-page: https://github.com/vijayvammi/runnable
6
6
  License: Apache-2.0
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "runnable"
3
- version = "0.11.1"
3
+ version = "0.11.3"
4
4
  description = "A Compute agnostic pipelining software"
5
5
  authors = ["Vijay Vammi <mesanthu@gmail.com>"]
6
6
  license = "Apache-2.0"
@@ -53,6 +53,10 @@ matplotlib = "^3.8.3"
53
53
  [tool.poetry.group.release.dependencies]
54
54
  python-semantic-release = "^9.4.2"
55
55
 
56
+
57
+ [tool.poetry.group.examples.dependencies]
58
+ pandas = "^2.2.2"
59
+
56
60
  [tool.poetry.extras]
57
61
  docker = ['docker']
58
62
  notebook = ['ploomber-engine']
@@ -93,7 +97,6 @@ runnable = 'runnable.cli:cli'
93
97
  [tool.poetry.plugins."secrets"]
94
98
  "do-nothing" = "runnable.secrets:DoNothingSecretManager"
95
99
  "dotenv" = "runnable.extensions.secrets.dotenv.implementation:DotEnvSecrets"
96
- "env-secrets-manager" = "runnable.extensions.secrets.env_secrets.implementation:EnvSecretsManager"
97
100
 
98
101
  # Plugins for Run Log store
99
102
  [tool.poetry.plugins."run_log_store"]
@@ -203,7 +206,7 @@ tag_format = "{version}"
203
206
  env = "GH_TOKEN"
204
207
 
205
208
  [tool.semantic_release.branches.main]
206
- match = "main"
209
+ match = ""
207
210
 
208
211
  [tool.semantic_release.remote]
209
212
  ignore_token_for_push = true
@@ -2,6 +2,7 @@
2
2
 
3
3
  # TODO: Might need to add Rich to pyinstaller part
4
4
  import logging
5
+ import os
5
6
  from logging.config import dictConfig
6
7
 
7
8
  from rich.console import Console
@@ -29,6 +30,8 @@ from runnable.sdk import ( # noqa
29
30
  pickled,
30
31
  )
31
32
 
33
+ os.environ["_PLOOMBER_TELEMETRY_DEBUG"] = "false"
34
+
32
35
  ## TODO: Summary should be a bit better for catalog.
33
36
  ## If the execution fails, hint them about the retry executor.
34
37
  # Make the retry executor loose!
@@ -312,8 +312,10 @@ class RunLog(BaseModel):
312
312
  summary["Catalog Location"] = _context.catalog_handler.get_summary()
313
313
  summary["Full Run log present at: "] = _context.run_log_store.get_summary()
314
314
 
315
- summary["Final Parameters"] = {p: v.description for p, v in self.parameters.items()}
316
- summary["Collected metrics"] = {p: v.description for p, v in self.parameters.items() if v.kind == "metric"}
315
+ run_log = _context.run_log_store.get_run_log_by_id(run_id=_context.run_id, full=True)
316
+
317
+ summary["Final Parameters"] = {p: v.description for p, v in run_log.parameters.items()}
318
+ summary["Collected metrics"] = {p: v.description for p, v in run_log.parameters.items() if v.kind == "metric"}
317
319
 
318
320
  return summary
319
321
 
@@ -400,7 +402,10 @@ class RunLog(BaseModel):
400
402
  """
401
403
  dot_path = i_name.split(".")
402
404
  if len(dot_path) == 1:
403
- return self.steps[i_name], None
405
+ try:
406
+ return self.steps[i_name], None
407
+ except KeyError as e:
408
+ raise exceptions.StepLogNotFoundError(self.run_id, i_name) from e
404
409
 
405
410
  current_steps = self.steps
406
411
  current_step = None
@@ -60,6 +60,8 @@ def prepare_configurations(
60
60
  variables = utils.gather_variables()
61
61
 
62
62
  templated_configuration = {}
63
+ configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
64
+
63
65
  if configuration_file:
64
66
  templated_configuration = utils.load_yaml(configuration_file) or {}
65
67
 
@@ -144,8 +146,8 @@ def prepare_configurations(
144
146
 
145
147
 
146
148
  def execute(
147
- configuration_file: str,
148
149
  pipeline_file: str,
150
+ configuration_file: str = "",
149
151
  tag: str = "",
150
152
  run_id: str = "",
151
153
  parameters_file: str = "",
@@ -196,6 +198,10 @@ def execute(
196
198
  run_context.progress = progress
197
199
  executor.execute_graph(dag=run_context.dag) # type: ignore
198
200
 
201
+ if not executor._local:
202
+ executor.send_return_code(stage="traversal")
203
+ return
204
+
199
205
  run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
200
206
 
201
207
  if run_log.status == defaults.SUCCESS:
@@ -205,6 +211,10 @@ def execute(
205
211
  except Exception as e: # noqa: E722
206
212
  console.print(e, style=defaults.error_style)
207
213
  progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True)
214
+ run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
215
+ run_log.status = defaults.FAIL
216
+ run_context.run_log_store.add_branch_log(run_log, run_context.run_id)
217
+ raise e
208
218
 
209
219
  executor.send_return_code()
210
220
 
@@ -235,6 +245,8 @@ def execute_single_node(
235
245
  """
236
246
  from runnable import nodes
237
247
 
248
+ configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
249
+
238
250
  run_context = prepare_configurations(
239
251
  configuration_file=configuration_file,
240
252
  pipeline_file=pipeline_file,
@@ -422,6 +434,8 @@ def fan(
422
434
  """
423
435
  from runnable import nodes
424
436
 
437
+ configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
438
+
425
439
  run_context = prepare_configurations(
426
440
  configuration_file=configuration_file,
427
441
  pipeline_file=pipeline_file,
@@ -226,7 +226,7 @@ class FileSystemCatalog(BaseCatalog):
226
226
  for cataloged_file in cataloged_files:
227
227
  if str(cataloged_file).endswith("execution.log"):
228
228
  continue
229
- print(cataloged_file.name)
229
+
230
230
  if cataloged_file.is_file():
231
231
  shutil.copy(cataloged_file, run_catalog / cataloged_file.name)
232
232
  else:
@@ -185,14 +185,11 @@ class GenericExecutor(BaseExecutor):
185
185
  data_catalogs = []
186
186
  for name_pattern in node_catalog_settings.get(stage) or []:
187
187
  if stage == "get":
188
- get_catalog_progress = self._context.progress.add_task(f"Getting from catalog {name_pattern}", total=1)
189
188
  data_catalog = self._context.catalog_handler.get(
190
189
  name=name_pattern, run_id=self._context.run_id, compute_data_folder=compute_data_folder
191
190
  )
192
- self._context.progress.update(get_catalog_progress, completed=True, visible=False, refresh=True)
193
191
 
194
192
  elif stage == "put":
195
- put_catalog_progress = self._context.progress.add_task(f"Putting in catalog {name_pattern}", total=1)
196
193
  data_catalog = self._context.catalog_handler.put(
197
194
  name=name_pattern,
198
195
  run_id=self._context.run_id,
@@ -200,8 +197,6 @@ class GenericExecutor(BaseExecutor):
200
197
  synced_catalogs=synced_catalogs,
201
198
  )
202
199
 
203
- self._context.progress.update(put_catalog_progress, completed=True, visible=False)
204
-
205
200
  logger.debug(f"Added data catalog: {data_catalog} to step log")
206
201
  data_catalogs.extend(data_catalog)
207
202
 
@@ -1033,6 +1033,9 @@ class ArgoExecutor(GenericExecutor):
1033
1033
  if working_on.node_type not in ["success", "fail"] and working_on._get_on_failure_node():
1034
1034
  failure_node = dag.get_node_by_name(working_on._get_on_failure_node())
1035
1035
 
1036
+ render_obj = get_renderer(working_on)(executor=self, node=failure_node)
1037
+ render_obj.render(list_of_iter_values=list_of_iter_values.copy())
1038
+
1036
1039
  failure_template_name = self.get_clean_name(failure_node)
1037
1040
  # If a task template for clean name exists, retrieve it
1038
1041
  failure_template = templates.get(
@@ -1040,7 +1043,6 @@ class ArgoExecutor(GenericExecutor):
1040
1043
  DagTaskTemplate(name=failure_template_name, template=failure_template_name),
1041
1044
  )
1042
1045
  failure_template.depends.append(f"{clean_name}.Failed")
1043
-
1044
1046
  templates[failure_template_name] = failure_template
1045
1047
 
1046
1048
  # If we are in a map node, we need to add the values as arguments
@@ -5,7 +5,7 @@ from typing import Dict, cast
5
5
  from pydantic import Field
6
6
  from rich import print
7
7
 
8
- from runnable import defaults, integration, utils
8
+ from runnable import defaults, utils
9
9
  from runnable.datastore import StepLog
10
10
  from runnable.defaults import TypeMapVariable
11
11
  from runnable.extensions.executor import GenericExecutor
@@ -145,16 +145,6 @@ class LocalContainerExecutor(GenericExecutor):
145
145
  logger.debug("Here is the resolved executor config")
146
146
  logger.debug(executor_config)
147
147
 
148
- if executor_config.get("run_in_local", False):
149
- # Do not change config but only validate the configuration.
150
- # Trigger the job on local system instead of a container
151
- integration.validate(self, self._context.run_log_store)
152
- integration.validate(self, self._context.catalog_handler)
153
- integration.validate(self, self._context.secrets_handler)
154
-
155
- self.execute_node(node=node, map_variable=map_variable, **kwargs)
156
- return
157
-
158
148
  command = utils.get_node_execution_command(node, map_variable=map_variable)
159
149
 
160
150
  self._spin_container(
@@ -172,7 +162,7 @@ class LocalContainerExecutor(GenericExecutor):
172
162
  "Note: If you do not see any docker issue from your side and the code works properly on local execution"
173
163
  "please raise a bug report."
174
164
  )
175
- logger.warning(msg)
165
+ logger.error(msg)
176
166
  step_log.status = defaults.FAIL
177
167
  self._context.run_log_store.add_step_log(step_log, self._context.run_id)
178
168
 
@@ -212,6 +202,7 @@ class LocalContainerExecutor(GenericExecutor):
212
202
  f"Please provide a docker_image using executor_config of the step {node.name} or at global config"
213
203
  )
214
204
 
205
+ print("container", self._volumes)
215
206
  # TODO: Should consider using getpass.getuser() when running the docker container? Volume permissions
216
207
  container = client.containers.create(
217
208
  image=docker_image,
@@ -260,7 +251,9 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
260
251
  service_provider = "file-system" # The actual implementation of the service
261
252
 
262
253
  def configure_for_traversal(self, **kwargs):
263
- from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore
254
+ from runnable.extensions.run_log_store.file_system.implementation import (
255
+ FileSystemRunLogstore,
256
+ )
264
257
 
265
258
  self.executor = cast(LocalContainerExecutor, self.executor)
266
259
  self.service = cast(FileSystemRunLogstore, self.service)
@@ -272,7 +265,9 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
272
265
  }
273
266
 
274
267
  def configure_for_execution(self, **kwargs):
275
- from runnable.extensions.run_log_store.file_system.implementation import FileSystemRunLogstore
268
+ from runnable.extensions.run_log_store.file_system.implementation import (
269
+ FileSystemRunLogstore,
270
+ )
276
271
 
277
272
  self.executor = cast(LocalContainerExecutor, self.executor)
278
273
  self.service = cast(FileSystemRunLogstore, self.service)
@@ -280,6 +275,40 @@ class LocalContainerComputeFileSystemRunLogstore(BaseIntegration):
280
275
  self.service.log_folder = self.executor._container_log_location
281
276
 
282
277
 
278
+ class LocalContainerComputeChunkedFS(BaseIntegration):
279
+ """
280
+ Integration pattern between Local container and File System catalog
281
+ """
282
+
283
+ executor_type = "local-container"
284
+ service_type = "run_log_store" # One of secret, catalog, datastore
285
+ service_provider = "chunked-fs" # The actual implementation of the service
286
+
287
+ def configure_for_traversal(self, **kwargs):
288
+ from runnable.extensions.run_log_store.chunked_file_system.implementation import (
289
+ ChunkedFileSystemRunLogStore,
290
+ )
291
+
292
+ self.executor = cast(LocalContainerExecutor, self.executor)
293
+ self.service = cast(ChunkedFileSystemRunLogStore, self.service)
294
+
295
+ write_to = self.service.log_folder
296
+ self.executor._volumes[str(Path(write_to).resolve())] = {
297
+ "bind": f"{self.executor._container_log_location}",
298
+ "mode": "rw",
299
+ }
300
+
301
+ def configure_for_execution(self, **kwargs):
302
+ from runnable.extensions.run_log_store.chunked_file_system.implementation import (
303
+ ChunkedFileSystemRunLogStore,
304
+ )
305
+
306
+ self.executor = cast(LocalContainerExecutor, self.executor)
307
+ self.service = cast(ChunkedFileSystemRunLogStore, self.service)
308
+
309
+ self.service.log_folder = self.executor._container_log_location
310
+
311
+
283
312
  class LocalContainerComputeFileSystemCatalog(BaseIntegration):
284
313
  """
285
314
  Integration pattern between Local container and File System catalog
@@ -290,7 +319,9 @@ class LocalContainerComputeFileSystemCatalog(BaseIntegration):
290
319
  service_provider = "file-system" # The actual implementation of the service
291
320
 
292
321
  def configure_for_traversal(self, **kwargs):
293
- from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog
322
+ from runnable.extensions.catalog.file_system.implementation import (
323
+ FileSystemCatalog,
324
+ )
294
325
 
295
326
  self.executor = cast(LocalContainerExecutor, self.executor)
296
327
  self.service = cast(FileSystemCatalog, self.service)
@@ -302,7 +333,9 @@ class LocalContainerComputeFileSystemCatalog(BaseIntegration):
302
333
  }
303
334
 
304
335
  def configure_for_execution(self, **kwargs):
305
- from runnable.extensions.catalog.file_system.implementation import FileSystemCatalog
336
+ from runnable.extensions.catalog.file_system.implementation import (
337
+ FileSystemCatalog,
338
+ )
306
339
 
307
340
  self.executor = cast(LocalContainerExecutor, self.executor)
308
341
  self.service = cast(FileSystemCatalog, self.service)
@@ -18,7 +18,7 @@ def create_executable(params: Dict[str, Any], model: Type[BaseTaskType], node_na
18
18
  class EasyModel(model): # type: ignore
19
19
  model_config = ConfigDict(extra="ignore")
20
20
 
21
- swallow_all = EasyModel(**params, node_name=node_name)
21
+ swallow_all = EasyModel(node_name=node_name, **params)
22
22
  return swallow_all
23
23
 
24
24
 
@@ -26,6 +26,8 @@ class MockedExecutor(GenericExecutor):
26
26
  service_name: str = "mocked"
27
27
  _local_executor: bool = True
28
28
 
29
+ model_config = ConfigDict(extra="ignore")
30
+
29
31
  patches: Dict[str, Any] = Field(default_factory=dict)
30
32
 
31
33
  @property
@@ -64,6 +66,10 @@ class MockedExecutor(GenericExecutor):
64
66
  step_log.step_type = node.node_type
65
67
  step_log.status = defaults.PROCESSING
66
68
 
69
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
70
+
71
+ logger.info(f"Executing node: {node.get_summary()}")
72
+
67
73
  # Add the step log to the database as per the situation.
68
74
  # If its a terminal node, complete it now
69
75
  if node.node_type in ["success", "fail"]:
@@ -132,3 +138,17 @@ class MockedExecutor(GenericExecutor):
132
138
 
133
139
  def execute_job(self, node: TaskNode):
134
140
  pass
141
+
142
+ def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
143
+ """
144
+ The entry point for all executors apart from local.
145
+ We have already prepared for node execution.
146
+
147
+ Args:
148
+ node (BaseNode): The node to execute
149
+ map_variable (dict, optional): If the node is part of a map, send in the map dictionary. Defaults to None.
150
+
151
+ Raises:
152
+ NotImplementedError: _description_
153
+ """
154
+ ...
@@ -6,6 +6,7 @@ from runnable import context, defaults, exceptions
6
6
  from runnable.datastore import RunLog
7
7
  from runnable.defaults import TypeMapVariable
8
8
  from runnable.extensions.executor import GenericExecutor
9
+ from runnable.extensions.nodes import TaskNode
9
10
  from runnable.nodes import BaseNode
10
11
 
11
12
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -31,6 +32,7 @@ class RetryExecutor(GenericExecutor):
31
32
 
32
33
  _local: bool = True
33
34
  _original_run_log: Optional[RunLog] = None
35
+ _restart_initiated: bool = False
34
36
 
35
37
  @property
36
38
  def _context(self):
@@ -38,7 +40,7 @@ class RetryExecutor(GenericExecutor):
38
40
 
39
41
  @cached_property
40
42
  def original_run_log(self):
41
- self.original_run_log = self._context.run_log_store.get_run_log_by_id(
43
+ return self._context.run_log_store.get_run_log_by_id(
42
44
  run_id=self.run_id,
43
45
  full=True,
44
46
  )
@@ -140,10 +142,14 @@ class RetryExecutor(GenericExecutor):
140
142
  node_step_log_name = node._get_step_log_name(map_variable=map_variable)
141
143
  logger.info(f"Scanning previous run logs for node logs of: {node_step_log_name}")
142
144
 
145
+ if self._restart_initiated:
146
+ return True
147
+
143
148
  try:
144
149
  previous_attempt_log, _ = self.original_run_log.search_step_by_internal_name(node_step_log_name)
145
150
  except exceptions.StepLogNotFoundError:
146
151
  logger.warning(f"Did not find the node {node.name} in previous run log")
152
+ self._restart_initiated = True
147
153
  return True # We should re-run the node.
148
154
 
149
155
  logger.info(f"The original step status: {previous_attempt_log.status}")
@@ -152,7 +158,11 @@ class RetryExecutor(GenericExecutor):
152
158
  return False # We need not run the node
153
159
 
154
160
  logger.info(f"The new execution should start executing graph from this node {node.name}")
161
+ self._restart_initiated = True
155
162
  return True
156
163
 
157
164
  def execute_node(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
158
165
  self._execute_node(node, map_variable=map_variable, **kwargs)
166
+
167
+ def execute_job(self, node: TaskNode):
168
+ pass
@@ -5,7 +5,7 @@ import sys
5
5
  from collections import OrderedDict
6
6
  from copy import deepcopy
7
7
  from datetime import datetime
8
- from typing import Any, Dict, List, Optional, Tuple, Union, cast
8
+ from typing import Annotated, Any, Callable, Dict, List, Optional, Tuple, Union, cast
9
9
 
10
10
  from pydantic import (
11
11
  ConfigDict,
@@ -14,10 +14,15 @@ from pydantic import (
14
14
  field_serializer,
15
15
  field_validator,
16
16
  )
17
- from typing_extensions import Annotated
18
17
 
19
18
  from runnable import datastore, defaults, utils
20
- from runnable.datastore import JsonParameter, MetricParameter, ObjectParameter, StepLog
19
+ from runnable.datastore import (
20
+ JsonParameter,
21
+ MetricParameter,
22
+ ObjectParameter,
23
+ Parameter,
24
+ StepLog,
25
+ )
21
26
  from runnable.defaults import TypeMapVariable
22
27
  from runnable.graph import Graph, create_graph
23
28
  from runnable.nodes import CompositeNode, ExecutableNode, TerminalNode
@@ -46,8 +51,6 @@ class TaskNode(ExecutableNode):
46
51
  task_config = {k: v for k, v in config.items() if k not in TaskNode.model_fields.keys()}
47
52
  node_config = {k: v for k, v in config.items() if k in TaskNode.model_fields.keys()}
48
53
 
49
- task_config["node_name"] = config.get("name")
50
-
51
54
  executable = create_task(task_config)
52
55
  return cls(executable=executable, **node_config, **task_config)
53
56
 
@@ -505,7 +508,7 @@ class MapNode(CompositeNode):
505
508
  for _, v in map_variable.items():
506
509
  for branch_return in self.branch_returns:
507
510
  param_name, param_type = branch_return
508
- raw_parameters[f"{param_name}_{v}"] = param_type.copy()
511
+ raw_parameters[f"{v}_{param_name}"] = param_type.copy()
509
512
  else:
510
513
  for branch_return in self.branch_returns:
511
514
  param_name, param_type = branch_return
@@ -543,10 +546,14 @@ class MapNode(CompositeNode):
543
546
  iterate_on = None
544
547
  try:
545
548
  iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[self.iterate_on].get_value()
546
- except KeyError:
549
+ except KeyError as e:
547
550
  raise Exception(
548
- f"Expected parameter {self.iterate_on} not present in Run Log parameters, was it ever set before?"
549
- )
551
+ (
552
+ f"Expected parameter {self.iterate_on}",
553
+ "not present in Run Log parameters",
554
+ "was it ever set before?",
555
+ )
556
+ ) from e
550
557
 
551
558
  if not isinstance(iterate_on, list):
552
559
  raise Exception("Only list is allowed as a valid iterator type")
@@ -599,29 +606,44 @@ class MapNode(CompositeNode):
599
606
  # The final value of the parameter is the result of the reduce function.
600
607
  reducer_f = self.get_reducer_function()
601
608
 
602
- if map_variable:
603
- # If we are in a map state already, the param should have an index of the map variable.
604
- for _, v in map_variable.items():
605
- for branch_return in self.branch_returns:
606
- param_name, _ = branch_return
607
- to_reduce = []
608
- for iter_variable in iterate_on:
609
- to_reduce.append(params[f"{param_name}_{iter_variable}"].get_value())
610
-
611
- param_name = f"{param_name}_{v}"
612
- params[param_name].value = reducer_f(to_reduce)
613
- params[param_name].reduced = True
614
- else:
609
+ def update_param(params: Dict[str, Parameter], reducer_f: Callable, map_prefix: str = ""):
610
+ from runnable.extensions.executor.mocked.implementation import (
611
+ MockedExecutor,
612
+ )
613
+
615
614
  for branch_return in self.branch_returns:
616
615
  param_name, _ = branch_return
617
616
 
618
617
  to_reduce = []
619
618
  for iter_variable in iterate_on:
620
- to_reduce.append(params[f"{param_name}_{iter_variable}"].get_value())
621
-
622
- params[param_name].value = reducer_f(*to_reduce)
619
+ try:
620
+ to_reduce.append(params[f"{iter_variable}_{param_name}"].get_value())
621
+ except KeyError as e:
622
+ if isinstance(self._context.executor, MockedExecutor):
623
+ pass
624
+ else:
625
+ raise Exception(
626
+ (
627
+ f"Expected parameter {iter_variable}_{param_name}",
628
+ "not present in Run Log parameters",
629
+ "was it ever set before?",
630
+ )
631
+ ) from e
632
+
633
+ param_name = f"{map_prefix}{param_name}"
634
+ if to_reduce:
635
+ params[param_name].value = reducer_f(*to_reduce)
636
+ else:
637
+ params[param_name].value = ""
623
638
  params[param_name].reduced = True
624
639
 
640
+ if map_variable:
641
+ # If we are in a map state already, the param should have an index of the map variable.
642
+ for _, v in map_variable.items():
643
+ update_param(params, reducer_f, map_prefix=f"{v}_")
644
+ else:
645
+ update_param(params, reducer_f)
646
+
625
647
  self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id)
626
648
 
627
649
 
@@ -7,7 +7,16 @@ from string import Template
7
7
  from typing import Any, Dict, Optional, Sequence, Union
8
8
 
9
9
  from runnable import defaults, exceptions
10
- from runnable.datastore import BaseRunLogStore, BranchLog, RunLog, StepLog
10
+ from runnable.datastore import (
11
+ BaseRunLogStore,
12
+ BranchLog,
13
+ JsonParameter,
14
+ MetricParameter,
15
+ ObjectParameter,
16
+ Parameter,
17
+ RunLog,
18
+ StepLog,
19
+ )
11
20
 
12
21
  logger = logging.getLogger(defaults.LOGGER_NAME)
13
22
 
@@ -164,7 +173,9 @@ class ChunkedRunLogStore(BaseRunLogStore):
164
173
  raise Exception(f"Name is required during retrieval for {log_type}")
165
174
 
166
175
  naming_pattern = self.naming_pattern(log_type=log_type, name=name)
176
+
167
177
  matches = self.get_matches(run_id=run_id, name=naming_pattern, multiple_allowed=multiple_allowed)
178
+
168
179
  if matches:
169
180
  if not multiple_allowed:
170
181
  contents = self._retrieve(name=matches) # type: ignore
@@ -370,10 +381,17 @@ class ChunkedRunLogStore(BaseRunLogStore):
370
381
  Raises:
371
382
  RunLogNotFoundError: If the run log for run_id is not found in the datastore
372
383
  """
373
- parameters = {}
384
+ parameters: Dict[str, Parameter] = {}
374
385
  try:
375
386
  parameters_list = self.retrieve(run_id=run_id, log_type=self.LogTypes.PARAMETER, multiple_allowed=True)
376
- parameters = {key: value for param in parameters_list for key, value in param.items()}
387
+ for param in parameters_list:
388
+ for key, value in param.items():
389
+ if value["kind"] == "json":
390
+ parameters[key] = JsonParameter(**value)
391
+ if value["kind"] == "metric":
392
+ parameters[key] = MetricParameter(**value)
393
+ if value["kind"] == "object":
394
+ parameters[key] = ObjectParameter(**value)
377
395
  except EntityNotFoundError:
378
396
  # No parameters are set
379
397
  pass
@@ -401,7 +419,7 @@ class ChunkedRunLogStore(BaseRunLogStore):
401
419
  self.store(
402
420
  run_id=run_id,
403
421
  log_type=self.LogTypes.PARAMETER,
404
- contents={key: value},
422
+ contents={key: value.model_dump(by_alias=True)},
405
423
  name=key,
406
424
  )
407
425
 
@@ -74,6 +74,7 @@ class Graph(BaseModel):
74
74
  for _, value in self.nodes.items():
75
75
  if value.internal_name == internal_name:
76
76
  return value
77
+ print("graph", internal_name)
77
78
  raise exceptions.NodeNotFoundError(internal_name)
78
79
 
79
80
  def __str__(self): # pragma: no cover
@@ -36,7 +36,7 @@ def get_user_set_parameters(remove: bool = False) -> Dict[str, JsonParameter]:
36
36
  try:
37
37
  parameters[key.lower()] = JsonParameter(kind="json", value=json.loads(value))
38
38
  except json.decoder.JSONDecodeError:
39
- logger.error(f"Parameter {key} could not be JSON decoded, adding the literal value")
39
+ logger.warning(f"Parameter {key} could not be JSON decoded, adding the literal value")
40
40
  parameters[key.lower()] = JsonParameter(kind="json", value=value)
41
41
 
42
42
  if remove: