runnable 0.11.4__tar.gz → 0.11.5__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (63) hide show
  1. {runnable-0.11.4 → runnable-0.11.5}/PKG-INFO +1 -1
  2. {runnable-0.11.4 → runnable-0.11.5}/pyproject.toml +1 -1
  3. {runnable-0.11.4 → runnable-0.11.5}/runnable/__init__.py +3 -2
  4. {runnable-0.11.4 → runnable-0.11.5}/runnable/entrypoints.py +26 -17
  5. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/argo/implementation.py +31 -26
  6. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/local_container/implementation.py +0 -1
  7. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/nodes.py +7 -3
  8. {runnable-0.11.4 → runnable-0.11.5}/runnable/tasks.py +31 -38
  9. {runnable-0.11.4 → runnable-0.11.5}/runnable/utils.py +20 -2
  10. {runnable-0.11.4 → runnable-0.11.5}/LICENSE +0 -0
  11. {runnable-0.11.4 → runnable-0.11.5}/README.md +0 -0
  12. {runnable-0.11.4 → runnable-0.11.5}/runnable/catalog.py +0 -0
  13. {runnable-0.11.4 → runnable-0.11.5}/runnable/cli.py +0 -0
  14. {runnable-0.11.4 → runnable-0.11.5}/runnable/context.py +0 -0
  15. {runnable-0.11.4 → runnable-0.11.5}/runnable/datastore.py +0 -0
  16. {runnable-0.11.4 → runnable-0.11.5}/runnable/defaults.py +0 -0
  17. {runnable-0.11.4 → runnable-0.11.5}/runnable/exceptions.py +0 -0
  18. {runnable-0.11.4 → runnable-0.11.5}/runnable/executor.py +0 -0
  19. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/__init__.py +0 -0
  20. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/catalog/__init__.py +0 -0
  21. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/catalog/file_system/__init__.py +0 -0
  22. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/catalog/file_system/implementation.py +0 -0
  23. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/catalog/k8s_pvc/__init__.py +0 -0
  24. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/catalog/k8s_pvc/implementation.py +0 -0
  25. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/catalog/k8s_pvc/integration.py +0 -0
  26. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/__init__.py +0 -0
  27. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/argo/__init__.py +0 -0
  28. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/argo/specification.yaml +0 -0
  29. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/k8s_job/__init__.py +0 -0
  30. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/k8s_job/implementation_FF.py +0 -0
  31. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/k8s_job/integration_FF.py +0 -0
  32. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/local/__init__.py +0 -0
  33. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/local/implementation.py +0 -0
  34. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/local_container/__init__.py +0 -0
  35. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/mocked/__init__.py +0 -0
  36. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/mocked/implementation.py +0 -0
  37. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/retry/__init__.py +0 -0
  38. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/executor/retry/implementation.py +0 -0
  39. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/__init__.py +0 -0
  40. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/chunked_file_system/__init__.py +0 -0
  41. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/chunked_file_system/implementation.py +0 -0
  42. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py +0 -0
  43. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py +0 -0
  44. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py +0 -0
  45. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/db/implementation_FF.py +0 -0
  46. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/db/integration_FF.py +0 -0
  47. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/file_system/__init__.py +0 -0
  48. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/file_system/implementation.py +0 -0
  49. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/generic_chunked.py +0 -0
  50. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/k8s_pvc/__init__.py +0 -0
  51. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/k8s_pvc/implementation.py +0 -0
  52. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/run_log_store/k8s_pvc/integration.py +0 -0
  53. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/secrets/__init__.py +0 -0
  54. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/secrets/dotenv/__init__.py +0 -0
  55. {runnable-0.11.4 → runnable-0.11.5}/runnable/extensions/secrets/dotenv/implementation.py +0 -0
  56. {runnable-0.11.4 → runnable-0.11.5}/runnable/graph.py +0 -0
  57. {runnable-0.11.4 → runnable-0.11.5}/runnable/integration.py +0 -0
  58. {runnable-0.11.4 → runnable-0.11.5}/runnable/names.py +0 -0
  59. {runnable-0.11.4 → runnable-0.11.5}/runnable/nodes.py +0 -0
  60. {runnable-0.11.4 → runnable-0.11.5}/runnable/parameters.py +0 -0
  61. {runnable-0.11.4 → runnable-0.11.5}/runnable/pickler.py +0 -0
  62. {runnable-0.11.4 → runnable-0.11.5}/runnable/sdk.py +0 -0
  63. {runnable-0.11.4 → runnable-0.11.5}/runnable/secrets.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: runnable
3
- Version: 0.11.4
3
+ Version: 0.11.5
4
4
  Summary: A Compute agnostic pipelining software
5
5
  Home-page: https://github.com/vijayvammi/runnable
6
6
  License: Apache-2.0
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "runnable"
3
- version = "0.11.4"
3
+ version = "0.11.5"
4
4
  description = "A Compute agnostic pipelining software"
5
5
  authors = ["Vijay Vammi <mesanthu@gmail.com>"]
6
6
  license = "Apache-2.0"
@@ -12,7 +12,7 @@ from runnable import defaults
12
12
  dictConfig(defaults.LOGGING_CONFIG)
13
13
  logger = logging.getLogger(defaults.LOGGER_NAME)
14
14
 
15
- console = Console()
15
+ console = Console(record=True)
16
16
  console.print(":runner: Lets go!!")
17
17
 
18
18
  from runnable.sdk import ( # noqa
@@ -30,7 +30,8 @@ from runnable.sdk import ( # noqa
30
30
  pickled,
31
31
  )
32
32
 
33
- os.environ["_PLOOMBER_TELEMETRY_DEBUG"] = "false"
33
+ # Needed to disable ploomber telemetry
34
+ os.environ["PLOOMBER_STATS_ENABLED"] = "false"
34
35
 
35
36
  ## TODO: Summary should be a bit better for catalog.
36
37
  ## If the execution fails, hint them about the retry executor.
@@ -19,18 +19,11 @@ def get_default_configs() -> RunnableConfig:
19
19
  """
20
20
  User can provide extensions as part of their code base, runnable-config.yaml provides the place to put them.
21
21
  """
22
- user_configs = {}
22
+ user_configs: RunnableConfig = {}
23
23
  if utils.does_file_exist(defaults.USER_CONFIG_FILE):
24
- user_configs = utils.load_yaml(defaults.USER_CONFIG_FILE)
24
+ user_configs = cast(RunnableConfig, utils.load_yaml(defaults.USER_CONFIG_FILE))
25
25
 
26
- if not user_configs:
27
- return {}
28
-
29
- user_defaults = user_configs.get("defaults", {})
30
- if user_defaults:
31
- return user_defaults
32
-
33
- return {}
26
+ return user_configs
34
27
 
35
28
 
36
29
  def prepare_configurations(
@@ -198,6 +191,7 @@ def execute(
198
191
  run_context.progress = progress
199
192
  executor.execute_graph(dag=run_context.dag) # type: ignore
200
193
 
194
+ # Non local executors have no run logs
201
195
  if not executor._local:
202
196
  executor.send_return_code(stage="traversal")
203
197
  return
@@ -245,6 +239,8 @@ def execute_single_node(
245
239
  """
246
240
  from runnable import nodes
247
241
 
242
+ console.print(f"Executing the single node: {step_name} with map variable: {map_variable}")
243
+
248
244
  configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
249
245
 
250
246
  run_context = prepare_configurations(
@@ -264,19 +260,32 @@ def execute_single_node(
264
260
 
265
261
  executor.prepare_for_node_execution()
266
262
 
267
- if not run_context.dag:
268
- # There are a few entry points that make graph dynamically and do not have a dag defined statically.
269
- run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_id, full=False)
270
- run_context.dag = graph.create_graph(run_log.run_config["pipeline"])
271
-
272
- step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
263
+ # TODO: may be make its own entry point
264
+ # if not run_context.dag:
265
+ # # There are a few entry points that make graph dynamically and do not have a dag defined statically.
266
+ # run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_id, full=False)
267
+ # run_context.dag = graph.create_graph(run_log.run_config["pipeline"])
268
+ assert run_context.dag
273
269
 
274
270
  map_variable_dict = utils.json_to_ordered_dict(map_variable)
275
271
 
272
+ step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
276
273
  node_to_execute, _ = graph.search_node_by_internal_name(run_context.dag, step_internal_name)
277
274
 
278
275
  logger.info("Executing the single node of : %s", node_to_execute)
279
- executor.execute_node(node=node_to_execute, map_variable=map_variable_dict)
276
+ ## This step is where we save the log file
277
+ try:
278
+ executor.execute_node(node=node_to_execute, map_variable=map_variable_dict)
279
+ except Exception: # noqa: E722
280
+ log_file_name = utils.make_log_file_name(
281
+ node=node_to_execute,
282
+ map_variable=map_variable_dict,
283
+ )
284
+ console.save_text(log_file_name)
285
+
286
+ # Put the log file in the catalog
287
+ run_context.catalog_handler.put(name=log_file_name, run_id=run_context.run_id)
288
+ os.remove(log_file_name)
280
289
 
281
290
  executor.send_return_code(stage="execution")
282
291
 
@@ -5,7 +5,7 @@ import shlex
5
5
  import string
6
6
  from abc import ABC, abstractmethod
7
7
  from collections import OrderedDict
8
- from typing import Any, Dict, List, Optional, Union, cast
8
+ from typing import Dict, List, Optional, Union, cast
9
9
 
10
10
  from pydantic import (
11
11
  BaseModel,
@@ -19,7 +19,7 @@ from pydantic.functional_serializers import PlainSerializer
19
19
  from ruamel.yaml import YAML
20
20
  from typing_extensions import Annotated
21
21
 
22
- from runnable import defaults, exceptions, integration, parameters, utils
22
+ from runnable import defaults, exceptions, integration, utils
23
23
  from runnable.defaults import TypeMapVariable
24
24
  from runnable.extensions.executor import GenericExecutor
25
25
  from runnable.extensions.nodes import DagNode, MapNode, ParallelNode
@@ -378,6 +378,7 @@ class ExecutionNode(NodeRenderer):
378
378
  self.node,
379
379
  over_write_run_id=self.executor._run_id_placeholder,
380
380
  map_variable=map_variable,
381
+ log_level=self.executor._log_level,
381
382
  )
382
383
 
383
384
  inputs = []
@@ -502,12 +503,16 @@ class MapNodeRender(NodeRenderer):
502
503
  self.node = cast(MapNode, self.node)
503
504
  task_template_arguments = []
504
505
  dag_inputs = []
505
- if list_of_iter_values:
506
- for value in list_of_iter_values:
507
- task_template_arguments.append(Argument(name=value, value="{{inputs.parameters." + value + "}}"))
508
- dag_inputs.append(Parameter(name=value))
506
+
507
+ if not list_of_iter_values:
508
+ list_of_iter_values = []
509
+
510
+ for value in list_of_iter_values:
511
+ task_template_arguments.append(Argument(name=value, value="{{inputs.parameters." + value + "}}"))
512
+ dag_inputs.append(Parameter(name=value))
509
513
 
510
514
  clean_name = self.executor.get_clean_name(self.node)
515
+
511
516
  fan_out_template = self.executor._create_fan_out_template(
512
517
  composite_node=self.node, list_of_iter_values=list_of_iter_values
513
518
  )
@@ -518,9 +523,6 @@ class MapNodeRender(NodeRenderer):
518
523
  )
519
524
  fan_in_template.arguments = task_template_arguments if task_template_arguments else None
520
525
 
521
- if not list_of_iter_values:
522
- list_of_iter_values = []
523
-
524
526
  list_of_iter_values.append(self.node.iterate_as)
525
527
 
526
528
  self.executor._gather_task_templates_of_dag(
@@ -580,8 +582,12 @@ class Spec(BaseModel):
580
582
  node_selector: Optional[Dict[str, str]] = Field(default_factory=dict, serialization_alias="nodeSelector")
581
583
  tolerations: Optional[List[Toleration]] = Field(default=None, serialization_alias="tolerations")
582
584
  parallelism: Optional[int] = Field(default=None, serialization_alias="parallelism")
585
+
583
586
  # TODO: This has to be user driven
584
- pod_gc: Dict[str, str] = Field(default={"strategy": "OnPodCompletion"}, serialization_alias="podGC")
587
+ pod_gc: Dict[str, str] = Field(
588
+ default={"strategy": "OnPodSuccess", "deleteDelayDuration": "600s"},
589
+ serialization_alias="podGC",
590
+ )
585
591
 
586
592
  retry_strategy: Retry = Field(default=Retry(), serialization_alias="retryStrategy")
587
593
  service_account_name: Optional[str] = Field(default=None, serialization_alias="serviceAccountName")
@@ -674,6 +680,8 @@ class ArgoExecutor(GenericExecutor):
674
680
  service_name: str = "argo"
675
681
  _local: bool = False
676
682
 
683
+ # TODO: Add logging level as option.
684
+
677
685
  model_config = ConfigDict(extra="forbid")
678
686
 
679
687
  image: str
@@ -719,6 +727,7 @@ class ArgoExecutor(GenericExecutor):
719
727
  persistent_volumes: List[UserVolumeMounts] = Field(default_factory=list)
720
728
 
721
729
  _run_id_placeholder: str = "{{workflow.parameters.run_id}}"
730
+ _log_level: str = "{{workflow.parameters.log_level}}"
722
731
  _container_templates: List[ContainerTemplate] = []
723
732
  _dag_templates: List[DagTemplate] = []
724
733
  _clean_names: Dict[str, str] = {}
@@ -828,17 +837,7 @@ class ArgoExecutor(GenericExecutor):
828
837
  iterate_on = self._context.run_log_store.get_parameters(self._context.run_id)[node.iterate_on]
829
838
 
830
839
  with open("/tmp/output.txt", mode="w", encoding="utf-8") as myfile:
831
- json.dump(iterate_on, myfile, indent=4)
832
-
833
- def _get_parameters(self) -> Dict[str, Any]:
834
- params = {}
835
- if self._context.parameters_file:
836
- # Parameters from the parameters file if defined
837
- params.update(utils.load_yaml(self._context.parameters_file))
838
- # parameters from environment variables supersede file based
839
- params.update(parameters.get_user_set_parameters())
840
-
841
- return params
840
+ json.dump(iterate_on.get_value(), myfile, indent=4)
842
841
 
843
842
  def sanitize_name(self, name):
844
843
  return name.replace(" ", "-").replace(".", "-").replace("_", "-")
@@ -886,6 +885,7 @@ class ArgoExecutor(GenericExecutor):
886
885
 
887
886
  if working_on.name == self._context.dag.start_at and self.expose_parameters_as_inputs:
888
887
  for key, value in self._get_parameters().items():
888
+ value = value.get_value() # type: ignore
889
889
  # Get the value from work flow parameters for dynamic behavior
890
890
  if isinstance(value, int) or isinstance(value, float) or isinstance(value, str):
891
891
  env_var = EnvVar(
@@ -943,6 +943,7 @@ class ArgoExecutor(GenericExecutor):
943
943
  node=composite_node,
944
944
  run_id=self._run_id_placeholder,
945
945
  map_variable=map_variable,
946
+ log_level=self._log_level,
946
947
  )
947
948
 
948
949
  outputs = []
@@ -984,6 +985,7 @@ class ArgoExecutor(GenericExecutor):
984
985
  node=composite_node,
985
986
  run_id=self._run_id_placeholder,
986
987
  map_variable=map_variable,
988
+ log_level=self._log_level,
987
989
  )
988
990
 
989
991
  step_config = {"command": command, "type": "task", "next": "dummy"}
@@ -1033,6 +1035,8 @@ class ArgoExecutor(GenericExecutor):
1033
1035
  if working_on.node_type not in ["success", "fail"] and working_on._get_on_failure_node():
1034
1036
  failure_node = dag.get_node_by_name(working_on._get_on_failure_node())
1035
1037
 
1038
+ # same logic, if a template exists, retrieve it
1039
+ # if not, create a new one
1036
1040
  render_obj = get_renderer(working_on)(executor=self, node=failure_node)
1037
1041
  render_obj.render(list_of_iter_values=list_of_iter_values.copy())
1038
1042
 
@@ -1083,18 +1087,19 @@ class ArgoExecutor(GenericExecutor):
1083
1087
  # Expose "simple" parameters as workflow arguments for dynamic behavior
1084
1088
  if self.expose_parameters_as_inputs:
1085
1089
  for key, value in self._get_parameters().items():
1090
+ value = value.get_value() # type: ignore
1086
1091
  if isinstance(value, dict) or isinstance(value, list):
1087
1092
  continue
1088
- env_var = EnvVar(name=key, value=value)
1093
+
1094
+ env_var = EnvVar(name=key, value=value) # type: ignore
1089
1095
  arguments.append(env_var)
1090
1096
 
1091
1097
  run_id_var = EnvVar(name="run_id", value="{{workflow.uid}}")
1098
+ log_level_var = EnvVar(name="log_level", value=defaults.LOG_LEVEL)
1092
1099
  arguments.append(run_id_var)
1100
+ arguments.append(log_level_var)
1093
1101
 
1094
- # # TODO: Experimental feature
1095
-
1096
- # original_run_id_var = EnvVar(name="original_run_id")
1097
- # arguments.append(original_run_id_var)
1102
+ # TODO: Can we do reruns?
1098
1103
 
1099
1104
  for volume in self.spec.volumes:
1100
1105
  self._container_volumes.append(ContainerVolume(name=volume.name, mount_path=volume.mount_path))
@@ -202,7 +202,6 @@ class LocalContainerExecutor(GenericExecutor):
202
202
  f"Please provide a docker_image using executor_config of the step {node.name} or at global config"
203
203
  )
204
204
 
205
- print("container", self._volumes)
206
205
  # TODO: Should consider using getpass.getuser() when running the docker container? Volume permissions
207
206
  container = client.containers.create(
208
207
  image=docker_image,
@@ -15,7 +15,7 @@ from pydantic import (
15
15
  field_validator,
16
16
  )
17
17
 
18
- from runnable import datastore, defaults, utils
18
+ from runnable import console, datastore, defaults, utils
19
19
  from runnable.datastore import (
20
20
  JsonParameter,
21
21
  MetricParameter,
@@ -96,11 +96,10 @@ class TaskNode(ExecutableNode):
96
96
  attempt_number=attempt_number,
97
97
  )
98
98
 
99
- logger.debug(f"attempt_log: {attempt_log}")
99
+ logger.info(f"attempt_log: {attempt_log}")
100
100
  logger.info(f"Step {self.name} completed with status: {attempt_log.status}")
101
101
 
102
102
  step_log.status = attempt_log.status
103
-
104
103
  step_log.attempts.append(attempt_log)
105
104
 
106
105
  return step_log
@@ -347,6 +346,7 @@ class ParallelNode(CompositeNode):
347
346
  for internal_branch_name, _ in self.branches.items():
348
347
  effective_branch_name = self._resolve_map_placeholders(internal_branch_name, map_variable=map_variable)
349
348
  branch_log = self._context.run_log_store.get_branch_log(effective_branch_name, self._context.run_id)
349
+
350
350
  if branch_log.status != defaults.SUCCESS:
351
351
  step_success_bool = False
352
352
 
@@ -498,6 +498,8 @@ class MapNode(CompositeNode):
498
498
  self.internal_name + "." + str(iter_variable), map_variable=map_variable
499
499
  )
500
500
  branch_log = self._context.run_log_store.create_branch_log(effective_branch_name)
501
+
502
+ console.print(f"Branch log created for {effective_branch_name}: {branch_log}")
501
503
  branch_log.status = defaults.PROCESSING
502
504
  self._context.run_log_store.add_branch_log(branch_log, self._context.run_id)
503
505
 
@@ -589,6 +591,8 @@ class MapNode(CompositeNode):
589
591
  self.internal_name + "." + str(iter_variable), map_variable=map_variable
590
592
  )
591
593
  branch_log = self._context.run_log_store.get_branch_log(effective_branch_name, self._context.run_id)
594
+ # console.print(f"Branch log for {effective_branch_name}: {branch_log}")
595
+
592
596
  if branch_log.status != defaults.SUCCESS:
593
597
  step_success_bool = False
594
598
 
@@ -14,13 +14,10 @@ from string import Template
14
14
  from typing import Any, Dict, List, Literal, Tuple
15
15
 
16
16
  from pydantic import BaseModel, ConfigDict, Field, field_validator
17
-
18
- # from rich import print
19
- from rich.console import Console
20
17
  from stevedore import driver
21
18
 
22
19
  import runnable.context as context
23
- from runnable import defaults, exceptions, parameters, utils
20
+ from runnable import console, defaults, exceptions, parameters, utils
24
21
  from runnable.datastore import (
25
22
  JsonParameter,
26
23
  MetricParameter,
@@ -147,41 +144,26 @@ class BaseTaskType(BaseModel):
147
144
  if context_param in params:
148
145
  params[param_name].value = params[context_param].value
149
146
 
147
+ console.log("Parameters available for the execution:")
148
+ console.log(params)
149
+
150
150
  logger.debug(f"Resolved parameters: {params}")
151
151
 
152
152
  if not allow_complex:
153
153
  params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)}
154
154
 
155
- log_file_name = self._context.executor._context_node.internal_name
156
- if map_variable:
157
- for _, value in map_variable.items():
158
- log_file_name += "_" + str(value)
159
-
160
- log_file_name = "".join(x for x in log_file_name if x.isalnum()) + ".execution.log"
161
-
162
- log_file = open(log_file_name, "w")
163
-
164
155
  parameters_in = copy.deepcopy(params)
165
156
  f = io.StringIO()
166
- task_console = Console(file=io.StringIO())
167
157
  try:
168
158
  with contextlib.redirect_stdout(f):
169
159
  # with contextlib.nullcontext():
170
- yield params, task_console
171
- print(task_console.file.getvalue()) # type: ignore
160
+ yield params
172
161
  except Exception as e: # pylint: disable=broad-except
162
+ console.log(e, style=defaults.error_style)
173
163
  logger.exception(e)
174
164
  finally:
175
- task_console = None # type: ignore
176
165
  print(f.getvalue()) # print to console
177
- log_file.write(f.getvalue()) # Print to file
178
-
179
166
  f.close()
180
- log_file.close()
181
-
182
- # Put the log file in the catalog
183
- self._context.catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id)
184
- os.remove(log_file.name)
185
167
 
186
168
  # Update parameters
187
169
  # This should only update the parameters that are changed at the root level.
@@ -233,7 +215,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
233
215
  """Execute the notebook as defined by the command."""
234
216
  attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now()))
235
217
 
236
- with self.execution_context(map_variable=map_variable) as (params, task_console), self.expose_secrets() as _:
218
+ with self.execution_context(map_variable=map_variable) as params, self.expose_secrets() as _:
237
219
  module, func = utils.get_module_and_attr_names(self.command)
238
220
  sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
239
221
  imported_module = importlib.import_module(module)
@@ -243,9 +225,10 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
243
225
  try:
244
226
  filtered_parameters = parameters.filter_arguments_for_func(f, params.copy(), map_variable)
245
227
  logger.info(f"Calling {func} from {module} with {filtered_parameters}")
228
+
246
229
  user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
247
230
  except Exception as e:
248
- task_console.log(e, style=defaults.error_style, markup=False)
231
+ console.log(e, style=defaults.error_style, markup=False)
249
232
  raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e
250
233
 
251
234
  attempt_log.input_parameters = params.copy()
@@ -289,8 +272,8 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
289
272
  except Exception as _e:
290
273
  msg = f"Call to the function {self.command} did not succeed.\n"
291
274
  attempt_log.message = msg
292
- task_console.print_exception(show_locals=False)
293
- task_console.log(_e, style=defaults.error_style)
275
+ console.print_exception(show_locals=False)
276
+ console.log(_e, style=defaults.error_style)
294
277
 
295
278
  attempt_log.end_time = str(datetime.now())
296
279
 
@@ -346,17 +329,17 @@ class NotebookTaskType(BaseTaskType):
346
329
 
347
330
  notebook_output_path = self.notebook_output_path
348
331
 
349
- with self.execution_context(map_variable=map_variable, allow_complex=False) as (
350
- params,
351
- _,
352
- ), self.expose_secrets() as _:
332
+ with self.execution_context(
333
+ map_variable=map_variable, allow_complex=False
334
+ ) as params, self.expose_secrets() as _:
335
+ copy_params = copy.deepcopy(params)
336
+
353
337
  if map_variable:
354
338
  for key, value in map_variable.items():
355
339
  notebook_output_path += "_" + str(value)
356
- params[key] = JsonParameter(kind="json", value=value)
340
+ copy_params[key] = JsonParameter(kind="json", value=value)
357
341
 
358
342
  # Remove any {v}_unreduced parameters from the parameters
359
- copy_params = copy.deepcopy(params)
360
343
  unprocessed_params = [k for k, v in copy_params.items() if not v.reduced]
361
344
 
362
345
  for key in list(copy_params.keys()):
@@ -397,6 +380,9 @@ class NotebookTaskType(BaseTaskType):
397
380
  )
398
381
  except PicklingError as e:
399
382
  logger.exception("Notebooks cannot return objects")
383
+ console.log("Notebooks cannot return objects", style=defaults.error_style)
384
+ console.log(e, style=defaults.error_style)
385
+
400
386
  logger.exception(e)
401
387
  raise
402
388
 
@@ -413,6 +399,9 @@ class NotebookTaskType(BaseTaskType):
413
399
  )
414
400
  logger.exception(msg)
415
401
  logger.exception(e)
402
+
403
+ console.log(msg, style=defaults.error_style)
404
+
416
405
  attempt_log.status = defaults.FAIL
417
406
 
418
407
  attempt_log.end_time = str(datetime.now())
@@ -468,7 +457,7 @@ class ShellTaskType(BaseTaskType):
468
457
  subprocess_env[key] = secret_value
469
458
 
470
459
  try:
471
- with self.execution_context(map_variable=map_variable, allow_complex=False) as (params, task_console):
460
+ with self.execution_context(map_variable=map_variable, allow_complex=False) as params:
472
461
  subprocess_env.update({k: v.get_value() for k, v in params.items()})
473
462
 
474
463
  # Json dumps all runnable environment variables
@@ -499,14 +488,14 @@ class ShellTaskType(BaseTaskType):
499
488
 
500
489
  if proc.returncode != 0:
501
490
  msg = ",".join(result[1].split("\n"))
502
- task_console.print(msg, style=defaults.error_style)
491
+ console.print(msg, style=defaults.error_style)
503
492
  raise exceptions.CommandCallError(msg)
504
493
 
505
494
  # for stderr
506
495
  for line in result[1].split("\n"):
507
496
  if line.strip() == "":
508
497
  continue
509
- task_console.print(line, style=defaults.warning_style)
498
+ console.print(line, style=defaults.warning_style)
510
499
 
511
500
  output_parameters: Dict[str, Parameter] = {}
512
501
  metrics: Dict[str, Parameter] = {}
@@ -517,7 +506,7 @@ class ShellTaskType(BaseTaskType):
517
506
  continue
518
507
 
519
508
  logger.info(line)
520
- task_console.print(line)
509
+ console.print(line)
521
510
 
522
511
  if line.strip() == collect_delimiter:
523
512
  # The lines from now on should be captured
@@ -558,6 +547,10 @@ class ShellTaskType(BaseTaskType):
558
547
  msg = f"Call to the command {self.command} did not succeed"
559
548
  logger.exception(msg)
560
549
  logger.exception(e)
550
+
551
+ console.log(msg, style=defaults.error_style)
552
+ console.log(e, style=defaults.error_style)
553
+
561
554
  attempt_log.status = defaults.FAIL
562
555
 
563
556
  attempt_log.end_time = str(datetime.now())
@@ -4,6 +4,8 @@ import hashlib
4
4
  import json
5
5
  import logging
6
6
  import os
7
+ import random
8
+ import string
7
9
  import subprocess
8
10
  from collections import OrderedDict
9
11
  from datetime import datetime
@@ -394,6 +396,7 @@ def get_node_execution_command(
394
396
  node: BaseNode,
395
397
  map_variable: TypeMapVariable = None,
396
398
  over_write_run_id: str = "",
399
+ log_level: str = "",
397
400
  ) -> str:
398
401
  """A utility function to standardize execution call to a node via command line.
399
402
 
@@ -410,7 +413,7 @@ def get_node_execution_command(
410
413
  if over_write_run_id:
411
414
  run_id = over_write_run_id
412
415
 
413
- log_level = logging.getLevelName(logger.getEffectiveLevel())
416
+ log_level = log_level or logging.getLevelName(logger.getEffectiveLevel())
414
417
 
415
418
  action = f"runnable execute_single_node {run_id} " f"{node._command_friendly_name()}" f" --log-level {log_level}"
416
419
 
@@ -437,6 +440,7 @@ def get_fan_command(
437
440
  node: BaseNode,
438
441
  run_id: str,
439
442
  map_variable: TypeMapVariable = None,
443
+ log_level: str = "",
440
444
  ) -> str:
441
445
  """
442
446
  An utility function to return the fan "in or out" command
@@ -451,7 +455,7 @@ def get_fan_command(
451
455
  Returns:
452
456
  str: The fan in or out command
453
457
  """
454
- log_level = logging.getLevelName(logger.getEffectiveLevel())
458
+ log_level = log_level or logging.getLevelName(logger.getEffectiveLevel())
455
459
  action = (
456
460
  f"runnable fan {run_id} "
457
461
  f"{node._command_friendly_name()} "
@@ -614,3 +618,17 @@ def gather_variables() -> dict:
614
618
  variables[key] = value
615
619
 
616
620
  return variables
621
+
622
+
623
+ def make_log_file_name(node: BaseNode, map_variable: TypeMapVariable) -> str:
624
+ random_tag = "".join(random.choices(string.ascii_uppercase + string.digits, k=3))
625
+ log_file_name = node.name
626
+
627
+ if map_variable:
628
+ for _, value in map_variable.items():
629
+ log_file_name += "_" + str(value)
630
+
631
+ log_file_name += "_" + random_tag
632
+ log_file_name = "".join(x for x in log_file_name if x.isalnum()) + ".execution.log"
633
+
634
+ return log_file_name
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes