runnable 0.11.0__tar.gz → 0.11.1__tar.gz

Sign up to get free protection for your applications and to get access to all the features.
Files changed (65) hide show
  1. {runnable-0.11.0 → runnable-0.11.1}/PKG-INFO +2 -3
  2. {runnable-0.11.0 → runnable-0.11.1}/pyproject.toml +11 -13
  3. {runnable-0.11.0 → runnable-0.11.1}/runnable/__init__.py +4 -0
  4. {runnable-0.11.0 → runnable-0.11.1}/runnable/cli.py +1 -0
  5. {runnable-0.11.0 → runnable-0.11.1}/runnable/entrypoints.py +5 -0
  6. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/__init__.py +2 -0
  7. {runnable-0.11.0 → runnable-0.11.1}/runnable/sdk.py +37 -13
  8. {runnable-0.11.0 → runnable-0.11.1}/runnable/tasks.py +95 -43
  9. {runnable-0.11.0 → runnable-0.11.1}/LICENSE +0 -0
  10. {runnable-0.11.0 → runnable-0.11.1}/README.md +0 -0
  11. {runnable-0.11.0 → runnable-0.11.1}/runnable/catalog.py +0 -0
  12. {runnable-0.11.0 → runnable-0.11.1}/runnable/context.py +0 -0
  13. {runnable-0.11.0 → runnable-0.11.1}/runnable/datastore.py +0 -0
  14. {runnable-0.11.0 → runnable-0.11.1}/runnable/defaults.py +0 -0
  15. {runnable-0.11.0 → runnable-0.11.1}/runnable/exceptions.py +0 -0
  16. {runnable-0.11.0 → runnable-0.11.1}/runnable/executor.py +0 -0
  17. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/__init__.py +0 -0
  18. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/catalog/__init__.py +0 -0
  19. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/catalog/file_system/__init__.py +0 -0
  20. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/catalog/file_system/implementation.py +0 -0
  21. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/catalog/k8s_pvc/__init__.py +0 -0
  22. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/catalog/k8s_pvc/implementation.py +0 -0
  23. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/catalog/k8s_pvc/integration.py +0 -0
  24. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/argo/__init__.py +0 -0
  25. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/argo/implementation.py +0 -0
  26. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/argo/specification.yaml +0 -0
  27. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/k8s_job/__init__.py +0 -0
  28. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/k8s_job/implementation_FF.py +0 -0
  29. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/k8s_job/integration_FF.py +0 -0
  30. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/local/__init__.py +0 -0
  31. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/local/implementation.py +0 -0
  32. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/local_container/__init__.py +0 -0
  33. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/local_container/implementation.py +0 -0
  34. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/mocked/__init__.py +0 -0
  35. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/mocked/implementation.py +0 -0
  36. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/retry/__init__.py +0 -0
  37. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/executor/retry/implementation.py +0 -0
  38. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/nodes.py +0 -0
  39. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/__init__.py +0 -0
  40. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/chunked_file_system/__init__.py +0 -0
  41. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/chunked_file_system/implementation.py +0 -0
  42. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py +0 -0
  43. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py +0 -0
  44. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py +0 -0
  45. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/db/implementation_FF.py +0 -0
  46. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/db/integration_FF.py +0 -0
  47. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/file_system/__init__.py +0 -0
  48. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/file_system/implementation.py +0 -0
  49. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/generic_chunked.py +0 -0
  50. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/k8s_pvc/__init__.py +0 -0
  51. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/k8s_pvc/implementation.py +0 -0
  52. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/run_log_store/k8s_pvc/integration.py +0 -0
  53. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/secrets/__init__.py +0 -0
  54. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/secrets/dotenv/__init__.py +0 -0
  55. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/secrets/dotenv/implementation.py +0 -0
  56. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/secrets/env_secrets/__init__.py +0 -0
  57. {runnable-0.11.0 → runnable-0.11.1}/runnable/extensions/secrets/env_secrets/implementation.py +0 -0
  58. {runnable-0.11.0 → runnable-0.11.1}/runnable/graph.py +0 -0
  59. {runnable-0.11.0 → runnable-0.11.1}/runnable/integration.py +0 -0
  60. {runnable-0.11.0 → runnable-0.11.1}/runnable/names.py +0 -0
  61. {runnable-0.11.0 → runnable-0.11.1}/runnable/nodes.py +0 -0
  62. {runnable-0.11.0 → runnable-0.11.1}/runnable/parameters.py +0 -0
  63. {runnable-0.11.0 → runnable-0.11.1}/runnable/pickler.py +0 -0
  64. {runnable-0.11.0 → runnable-0.11.1}/runnable/secrets.py +0 -0
  65. {runnable-0.11.0 → runnable-0.11.1}/runnable/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: runnable
3
- Version: 0.11.0
3
+ Version: 0.11.1
4
4
  Summary: A Compute agnostic pipelining software
5
5
  Home-page: https://github.com/vijayvammi/runnable
6
6
  License: Apache-2.0
@@ -15,13 +15,12 @@ Classifier: Programming Language :: Python :: 3.11
15
15
  Classifier: Programming Language :: Python :: 3.12
16
16
  Provides-Extra: database
17
17
  Provides-Extra: docker
18
- Provides-Extra: mlflow
19
18
  Provides-Extra: notebook
20
19
  Requires-Dist: click
21
20
  Requires-Dist: click-plugins (>=1.1.1,<2.0.0)
22
21
  Requires-Dist: dill (>=0.3.8,<0.4.0)
23
22
  Requires-Dist: docker ; extra == "docker"
24
- Requires-Dist: mlflow-skinny ; extra == "mlflow"
23
+ Requires-Dist: mlflow-skinny
25
24
  Requires-Dist: ploomber-engine (>=0.0.31,<0.0.32) ; extra == "notebook"
26
25
  Requires-Dist: pydantic (>=2.5,<3.0)
27
26
  Requires-Dist: rich (>=13.5.2,<14.0.0)
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "runnable"
3
- version = "0.11.0"
3
+ version = "0.11.1"
4
4
  description = "A Compute agnostic pipelining software"
5
5
  authors = ["Vijay Vammi <mesanthu@gmail.com>"]
6
6
  license = "Apache-2.0"
@@ -49,11 +49,14 @@ scikit-learn = "^1.4.1.post1"
49
49
  en-core-web-sm = { url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.7.1/en_core_web_sm-3.7.1.tar.gz" }
50
50
  matplotlib = "^3.8.3"
51
51
 
52
+
53
+ [tool.poetry.group.release.dependencies]
54
+ python-semantic-release = "^9.4.2"
55
+
52
56
  [tool.poetry.extras]
53
57
  docker = ['docker']
54
58
  notebook = ['ploomber-engine']
55
59
  database = ["sqlalchemy"]
56
- mlflow = ["mlflow-skinny"]
57
60
 
58
61
  [tool.poetry.group.dev.dependencies]
59
62
  pytest = "*"
@@ -66,6 +69,7 @@ ruff = "^0.0.259"
66
69
  commit-linter = "^1.0.2"
67
70
  black = "^23.3.0"
68
71
  gitlint = "^0.19.1"
72
+ pandas = "^2.2.2"
69
73
 
70
74
 
71
75
  [tool.poetry.scripts]
@@ -191,22 +195,16 @@ requires = ["poetry-core>=1.0.0"]
191
195
  build-backend = "poetry.core.masonry.api"
192
196
 
193
197
  [tool.semantic_release]
194
- tag_commit = false
195
- major_on_zero = true
196
- commit_version_number = false
197
- upload_to_pypi = false
198
- upload_to_repository = false
199
- upload_to_release = false
198
+ allow_zero_version = true
199
+ major_on_zero = false
200
200
  tag_format = "{version}"
201
201
 
202
+ [tool.semantic_release.remote.token]
203
+ env = "GH_TOKEN"
204
+
202
205
  [tool.semantic_release.branches.main]
203
206
  match = "main"
204
207
 
205
- [tool.semantic_release.branches."rc"]
206
- match = "rc"
207
- prerelease = true
208
- prerelease_token = "rc"
209
-
210
208
  [tool.semantic_release.remote]
211
209
  ignore_token_for_push = true
212
210
 
@@ -29,6 +29,10 @@ from runnable.sdk import ( # noqa
29
29
  pickled,
30
30
  )
31
31
 
32
+ ## TODO: Summary should be a bit better for catalog.
33
+ ## If the execution fails, hint them about the retry executor.
34
+ # Make the retry executor loose!
35
+
32
36
  # TODO: Think of model registry as a central place to store models.
33
37
  # TODO: Implement Sagemaker pipelines as a executor.
34
38
 
@@ -1,3 +1,4 @@
1
+ # A dummy to trigger the PR
1
2
  import logging
2
3
 
3
4
  import click
@@ -172,6 +172,7 @@ def execute(
172
172
  )
173
173
  console.print("Working with context:")
174
174
  console.print(run_context)
175
+ console.rule(style="[dark orange]")
175
176
 
176
177
  executor = run_context.executor
177
178
 
@@ -243,6 +244,7 @@ def execute_single_node(
243
244
  )
244
245
  console.print("Working with context:")
245
246
  console.print(run_context)
247
+ console.rule(style="[dark orange]")
246
248
 
247
249
  executor = run_context.executor
248
250
  run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
@@ -296,6 +298,7 @@ def execute_notebook(
296
298
 
297
299
  console.print("Working with context:")
298
300
  console.print(run_context)
301
+ console.rule(style="[dark orange]")
299
302
 
300
303
  step_config = {
301
304
  "command": notebook_file,
@@ -358,6 +361,7 @@ def execute_function(
358
361
 
359
362
  console.print("Working with context:")
360
363
  console.print(run_context)
364
+ console.rule(style="[dark orange]")
361
365
 
362
366
  # Prepare the graph with a single node
363
367
  step_config = {
@@ -427,6 +431,7 @@ def fan(
427
431
  )
428
432
  console.print("Working with context:")
429
433
  console.print(run_context)
434
+ console.rule(style="[dark orange]")
430
435
 
431
436
  executor = run_context.executor
432
437
  run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
@@ -476,6 +476,8 @@ class GenericExecutor(BaseExecutor):
476
476
  logger.exception(e)
477
477
  raise
478
478
 
479
+ console.rule(style="[dark orange]")
480
+
479
481
  if working_on.node_type in ["success", "fail"]:
480
482
  break
481
483
 
@@ -15,8 +15,13 @@ from pydantic import (
15
15
  field_validator,
16
16
  model_validator,
17
17
  )
18
- from rich import print
19
- from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
18
+ from rich.progress import (
19
+ BarColumn,
20
+ Progress,
21
+ SpinnerColumn,
22
+ TextColumn,
23
+ TimeElapsedColumn,
24
+ )
20
25
  from rich.table import Column
21
26
  from typing_extensions import Self
22
27
 
@@ -71,7 +76,7 @@ class Catalog(BaseModel):
71
76
 
72
77
  class BaseTraversal(ABC, BaseModel):
73
78
  name: str
74
- next_node: str = Field(default="", alias="next")
79
+ next_node: str = Field(default="", serialization_alias="next_node")
75
80
  terminate_with_success: bool = Field(default=False, exclude=True)
76
81
  terminate_with_failure: bool = Field(default=False, exclude=True)
77
82
  on_failure: str = Field(default="", alias="on_failure")
@@ -83,6 +88,12 @@ class BaseTraversal(ABC, BaseModel):
83
88
  def internal_name(self) -> str:
84
89
  return self.name
85
90
 
91
+ def __hash__(self):
92
+ """
93
+ Needed to Uniqueize DataCatalog objects.
94
+ """
95
+ return hash(self.name)
96
+
86
97
  def __rshift__(self, other: StepType) -> StepType:
87
98
  if self.next_node:
88
99
  raise Exception(f"The node {self} already has a next node: {self.next_node}")
@@ -180,6 +191,7 @@ class BaseTask(BaseTraversal):
180
191
  catalog: Optional[Catalog] = Field(default=None, alias="catalog")
181
192
  overrides: Dict[str, Any] = Field(default_factory=dict, alias="overrides")
182
193
  returns: List[Union[str, TaskReturns]] = Field(default_factory=list, alias="returns")
194
+ secrets: List[str] = Field(default_factory=list)
183
195
 
184
196
  @field_validator("returns", mode="before")
185
197
  @classmethod
@@ -201,7 +213,7 @@ class BaseTask(BaseTraversal):
201
213
  if not (self.terminate_with_failure or self.terminate_with_success):
202
214
  raise AssertionError("A node not being terminated must have a user defined next node")
203
215
 
204
- return TaskNode.parse_from_config(self.model_dump(exclude_none=True))
216
+ return TaskNode.parse_from_config(self.model_dump(exclude_none=True, by_alias=True))
205
217
 
206
218
 
207
219
  class PythonTask(BaseTask):
@@ -297,9 +309,9 @@ class NotebookTask(BaseTask):
297
309
 
298
310
  """
299
311
 
300
- notebook: str = Field(alias="command")
312
+ notebook: str = Field(serialization_alias="command")
301
313
 
302
- notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path")
314
+ notebook_output_path: Optional[str] = Field(default=None, alias="notebook_output_path", validate_default=True)
303
315
  optional_ploomber_args: Optional[Dict[str, Any]] = Field(default=None, alias="optional_ploomber_args")
304
316
 
305
317
  @computed_field
@@ -526,7 +538,7 @@ class Pipeline(BaseModel):
526
538
  _dag: graph.Graph = PrivateAttr()
527
539
  model_config = ConfigDict(extra="forbid")
528
540
 
529
- def _validate_path(self, path: List[StepType]) -> None:
541
+ def _validate_path(self, path: List[StepType], failure_path: bool = False) -> None:
530
542
  # Check if one and only one step terminates with success
531
543
  # Check no more than one step terminates with failure
532
544
 
@@ -544,7 +556,7 @@ class Pipeline(BaseModel):
544
556
  raise Exception("A pipeline cannot have more than one step that terminates with failure")
545
557
  reached_failure = True
546
558
 
547
- if not reached_success:
559
+ if not reached_success and not reached_failure:
548
560
  raise Exception("A pipeline must have at least one step that terminates with success")
549
561
 
550
562
  def _construct_path(self, path: List[StepType]) -> None:
@@ -594,11 +606,21 @@ class Pipeline(BaseModel):
594
606
 
595
607
  # Check all paths are valid and construct the path
596
608
  paths = [success_path] + on_failure_paths
609
+ failure_path = False
597
610
  for path in paths:
598
- self._validate_path(path)
611
+ self._validate_path(path, failure_path)
599
612
  self._construct_path(path)
600
613
 
601
- all_steps: List[StepType] = [step for step in success_path + on_failure_paths] # type: ignore
614
+ failure_path = True
615
+
616
+ all_steps: List[StepType] = []
617
+
618
+ for path in paths:
619
+ for step in path:
620
+ all_steps.append(step)
621
+
622
+ seen = set()
623
+ unique = [x for x in all_steps if not (x in seen or seen.add(x))] # type: ignore
602
624
 
603
625
  self._dag = graph.Graph(
604
626
  start_at=all_steps[0].name,
@@ -606,7 +628,7 @@ class Pipeline(BaseModel):
606
628
  internal_branch_name=self.internal_branch_name,
607
629
  )
608
630
 
609
- for step in all_steps:
631
+ for step in unique:
610
632
  self._dag.add_node(step.create_node())
611
633
 
612
634
  if self.add_terminal_nodes:
@@ -675,8 +697,9 @@ class Pipeline(BaseModel):
675
697
 
676
698
  run_context.dag = graph.create_graph(dag_definition)
677
699
 
678
- print("Working with context:")
679
- print(run_context)
700
+ console.print("Working with context:")
701
+ console.print(run_context)
702
+ console.rule(style="[dark orange]")
680
703
 
681
704
  if not run_context.executor._local:
682
705
  # We are not working with non local executor
@@ -693,6 +716,7 @@ class Pipeline(BaseModel):
693
716
  run_context.executor.prepare_for_graph_execution()
694
717
 
695
718
  with Progress(
719
+ SpinnerColumn(spinner_name="runner"),
696
720
  TextColumn("[progress.description]{task.description}", table_column=Column(ratio=2)),
697
721
  BarColumn(table_column=Column(ratio=1), style="dark_orange"),
698
722
  TimeElapsedColumn(table_column=Column(ratio=1)),
@@ -9,13 +9,14 @@ import sys
9
9
  from datetime import datetime
10
10
  from pickle import PicklingError
11
11
  from string import Template
12
- from typing import Any, Dict, List, Literal, Tuple
12
+ from typing import Any, Dict, List, Literal, Optional, Tuple
13
13
 
14
14
  from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validator
15
+ from rich.console import Console
15
16
  from stevedore import driver
16
17
 
17
18
  import runnable.context as context
18
- from runnable import console, defaults, exceptions, parameters, utils
19
+ from runnable import defaults, exceptions, parameters, utils
19
20
  from runnable.datastore import (
20
21
  JsonParameter,
21
22
  MetricParameter,
@@ -32,6 +33,9 @@ logging.getLogger("stevedore").setLevel(logging.CRITICAL)
32
33
  # TODO: Can we add memory peak, cpu usage, etc. to the metrics?
33
34
 
34
35
 
36
+ console = Console(file=io.StringIO())
37
+
38
+
35
39
  class TaskReturns(BaseModel):
36
40
  name: str
37
41
  kind: Literal["json", "object", "metric"] = Field(default="json")
@@ -42,7 +46,7 @@ class BaseTaskType(BaseModel):
42
46
 
43
47
  task_type: str = Field(serialization_alias="command_type")
44
48
  node_name: str = Field(exclude=True)
45
- secrets: Dict[str, str] = Field(default_factory=dict)
49
+ secrets: List[str] = Field(default_factory=list)
46
50
  returns: List[TaskReturns] = Field(default_factory=list, alias="returns")
47
51
 
48
52
  model_config = ConfigDict(extra="forbid")
@@ -69,15 +73,14 @@ class BaseTaskType(BaseModel):
69
73
  raise NotImplementedError()
70
74
 
71
75
  def set_secrets_as_env_variables(self):
72
- for key, value in self.secrets.items():
76
+ for key in self.secrets:
73
77
  secret_value = context.run_context.secrets_handler.get(key)
74
- self.secrets[value] = secret_value
75
- os.environ[value] = secret_value
78
+ os.environ[key] = secret_value
76
79
 
77
80
  def delete_secrets_from_env_variables(self):
78
- for _, value in self.secrets.items():
79
- if value in os.environ:
80
- del os.environ[value]
81
+ for key in self.secrets:
82
+ if key in os.environ:
83
+ del os.environ[key]
81
84
 
82
85
  def execute_command(
83
86
  self,
@@ -135,17 +138,21 @@ class BaseTaskType(BaseModel):
135
138
  if not allow_complex:
136
139
  params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)}
137
140
 
138
- log_file_name = self.node_name.replace(" ", "_") + ".execution.log"
141
+ log_file_name = self.node_name # + ".execution.log"
139
142
  if map_variable:
140
143
  for _, value in map_variable.items():
141
144
  log_file_name += "_" + str(value)
142
145
 
146
+ log_file_name = "".join(x for x in log_file_name if x.isalnum()) + ".execution.log"
147
+
143
148
  log_file = open(log_file_name, "w")
144
149
 
145
150
  f = io.StringIO()
146
151
  try:
147
152
  with contextlib.redirect_stdout(f):
153
+ # with contextlib.nullcontext():
148
154
  yield params
155
+ print(console.file.getvalue()) # type: ignore
149
156
  except Exception as e: # pylint: disable=broad-except
150
157
  logger.exception(e)
151
158
  finally:
@@ -156,10 +163,11 @@ class BaseTaskType(BaseModel):
156
163
  log_file.close()
157
164
 
158
165
  # Put the log file in the catalog
159
- # self._context.catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id)
166
+ self._context.catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id)
160
167
  os.remove(log_file.name)
161
168
 
162
169
  # Update parameters
170
+ # This should only update the parameters that are changed at the root level.
163
171
  self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id)
164
172
 
165
173
 
@@ -219,8 +227,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
219
227
  logger.info(f"Calling {func} from {module} with {filtered_parameters}")
220
228
  user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
221
229
  except Exception as e:
222
- logger.exception(e)
223
- console.print(e, style=defaults.error_style)
230
+ console.log(e, style=defaults.error_style, markup=False)
224
231
  raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e
225
232
 
226
233
  attempt_log.input_parameters = params.copy()
@@ -263,9 +270,9 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
263
270
  attempt_log.status = defaults.SUCCESS
264
271
  except Exception as _e:
265
272
  msg = f"Call to the function {self.command} did not succeed.\n"
266
- logger.exception(_e)
267
273
  attempt_log.message = msg
268
- console.print(_e, style=defaults.error_style)
274
+ console.print_exception(show_locals=False)
275
+ console.log(_e, style=defaults.error_style)
269
276
 
270
277
  attempt_log.end_time = str(datetime.now())
271
278
 
@@ -277,7 +284,7 @@ class NotebookTaskType(BaseTaskType):
277
284
 
278
285
  task_type: str = Field(default="notebook", serialization_alias="command_type")
279
286
  command: str
280
- notebook_output_path: str = Field(default="", validate_default=True)
287
+ notebook_output_path: Optional[str] = Field(default=None, validate_default=True)
281
288
  optional_ploomber_args: dict = {}
282
289
 
283
290
  @field_validator("command")
@@ -319,7 +326,7 @@ class NotebookTaskType(BaseTaskType):
319
326
  import ploomber_engine as pm
320
327
  from ploomber_engine.ipython import PloomberClient
321
328
 
322
- notebook_output_path = self.notebook_output_path
329
+ notebook_output_path = self.notebook_output_path or ""
323
330
 
324
331
  with self.execution_context(
325
332
  map_variable=map_variable, allow_complex=False
@@ -424,15 +431,17 @@ class ShellTaskType(BaseTaskType):
424
431
 
425
432
  # Expose secrets as environment variables
426
433
  if self.secrets:
427
- for key, value in self.secrets.items():
434
+ for key in self.secrets:
428
435
  secret_value = context.run_context.secrets_handler.get(key)
429
- subprocess_env[value] = secret_value
436
+ subprocess_env[key] = secret_value
430
437
 
431
438
  with self.execution_context(map_variable=map_variable, allow_complex=False) as params:
432
439
  subprocess_env.update({k: v.get_value() for k, v in params.items()})
433
440
 
434
441
  # Json dumps all runnable environment variables
435
442
  for key, value in subprocess_env.items():
443
+ if isinstance(value, str):
444
+ continue
436
445
  subprocess_env[key] = json.dumps(value)
437
446
 
438
447
  collect_delimiter = "=== COLLECT ==="
@@ -441,37 +450,80 @@ class ShellTaskType(BaseTaskType):
441
450
  logger.info(f"Executing shell command: {command}")
442
451
 
443
452
  capture = False
444
- return_keys = [x.name for x in self.returns]
453
+ return_keys = {x.name: x for x in self.returns}
445
454
 
446
- with subprocess.Popen(
455
+ proc = subprocess.Popen(
447
456
  command,
448
457
  shell=True,
449
458
  env=subprocess_env,
450
459
  stdout=subprocess.PIPE,
451
460
  stderr=subprocess.PIPE,
452
461
  text=True,
453
- ) as proc:
454
- for line in proc.stdout: # type: ignore
455
- logger.info(line)
456
- print(line)
457
-
458
- if line.strip() == collect_delimiter:
459
- # The lines from now on should be captured
460
- capture = True
461
- continue
462
-
463
- if capture:
464
- key, value = line.strip().split("=", 1)
465
- if key in (return_keys or []):
466
- param_name = Template(key).safe_substitute(map_variable) # type: ignore
467
- try:
468
- params[param_name] = JsonParameter(kind="json", value=json.loads(value))
469
- except json.JSONDecodeError:
470
- params[param_name] = JsonParameter(kind="json", value=value)
471
-
472
- proc.wait()
473
- if proc.returncode == 0:
474
- attempt_log.status = defaults.SUCCESS
462
+ )
463
+ result = proc.communicate()
464
+ logger.debug(result)
465
+ logger.info(proc.returncode)
466
+
467
+ if proc.returncode != 0:
468
+ msg = ",".join(result[1].split("\n"))
469
+ attempt_log.status = defaults.FAIL
470
+ attempt_log.end_time = str(datetime.now())
471
+ attempt_log.message = msg
472
+ console.print(msg, style=defaults.error_style)
473
+ return attempt_log
474
+
475
+ # for stderr
476
+ for line in result[1].split("\n"):
477
+ if line.strip() == "":
478
+ continue
479
+ console.print(line, style=defaults.warning_style)
480
+
481
+ output_parameters: Dict[str, Parameter] = {}
482
+ metrics: Dict[str, Parameter] = {}
483
+
484
+ # only from stdout
485
+ for line in result[0].split("\n"):
486
+ if line.strip() == "":
487
+ continue
488
+
489
+ logger.info(line)
490
+ console.print(line)
491
+
492
+ if line.strip() == collect_delimiter:
493
+ # The lines from now on should be captured
494
+ capture = True
495
+ continue
496
+
497
+ if capture:
498
+ key, value = line.strip().split("=", 1)
499
+ if key in return_keys:
500
+ task_return = return_keys[key]
501
+
502
+ try:
503
+ value = json.loads(value)
504
+ except json.JSONDecodeError:
505
+ value = value
506
+
507
+ output_parameter = task_return_to_parameter(
508
+ task_return=task_return,
509
+ value=value,
510
+ )
511
+
512
+ if task_return.kind == "metric":
513
+ metrics[task_return.name] = output_parameter
514
+
515
+ param_name = task_return.name
516
+ if map_variable:
517
+ for _, v in map_variable.items():
518
+ param_name = f"{param_name}_{v}"
519
+
520
+ output_parameters[param_name] = output_parameter
521
+
522
+ attempt_log.output_parameters = output_parameters
523
+ attempt_log.user_defined_metrics = metrics
524
+ params.update(output_parameters)
525
+
526
+ attempt_log.status = defaults.SUCCESS
475
527
 
476
528
  attempt_log.end_time = str(datetime.now())
477
529
  return attempt_log
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes