runnable 0.12.0__py3-none-any.whl → 0.12.2__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
runnable/__init__.py CHANGED
@@ -15,6 +15,8 @@ logger = logging.getLogger(defaults.LOGGER_NAME)
15
15
  console = Console(record=True)
16
16
  console.print(":runner: Lets go!!")
17
17
 
18
+ task_console = Console(record=True)
19
+
18
20
  from runnable.sdk import ( # noqa
19
21
  Catalog,
20
22
  Fail,
runnable/defaults.py CHANGED
@@ -77,7 +77,7 @@ DEFAULT_CONTAINER_OUTPUT_PARAMETERS = "parameters.json"
77
77
  DEFAULT_EXECUTOR = ServiceConfig(type="local", config={})
78
78
  DEFAULT_RUN_LOG_STORE = ServiceConfig(type="file-system", config={})
79
79
  DEFAULT_CATALOG = ServiceConfig(type="file-system", config={})
80
- DEFAULT_SECRETS = ServiceConfig(type="do-nothing", config={})
80
+ DEFAULT_SECRETS = ServiceConfig(type="env-secrets", config={})
81
81
  DEFAULT_EXPERIMENT_TRACKER = ServiceConfig(type="do-nothing", config={})
82
82
  DEFAULT_PICKLER = ServiceConfig(type="pickle", config={})
83
83
 
runnable/entrypoints.py CHANGED
@@ -9,7 +9,7 @@ from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
9
9
  from rich.table import Column
10
10
 
11
11
  import runnable.context as context
12
- from runnable import console, defaults, graph, utils
12
+ from runnable import console, defaults, graph, task_console, utils
13
13
  from runnable.defaults import RunnableConfig, ServiceConfig
14
14
 
15
15
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -165,6 +165,7 @@ def execute(
165
165
  tag=tag,
166
166
  parameters_file=parameters_file,
167
167
  )
168
+
168
169
  console.print("Working with context:")
169
170
  console.print(run_context)
170
171
  console.rule(style="[dark orange]")
@@ -239,7 +240,7 @@ def execute_single_node(
239
240
  """
240
241
  from runnable import nodes
241
242
 
242
- console.print(f"Executing the single node: {step_name} with map variable: {map_variable}")
243
+ task_console.print(f"Executing the single node: {step_name} with map variable: {map_variable}")
243
244
 
244
245
  configuration_file = os.environ.get("RUNNABLE_CONFIGURATION_FILE", configuration_file)
245
246
 
@@ -250,9 +251,9 @@ def execute_single_node(
250
251
  tag=tag,
251
252
  parameters_file=parameters_file,
252
253
  )
253
- console.print("Working with context:")
254
- console.print(run_context)
255
- console.rule(style="[dark orange]")
254
+ task_console.print("Working with context:")
255
+ task_console.print(run_context)
256
+ task_console.rule(style="[dark orange]")
256
257
 
257
258
  executor = run_context.executor
258
259
  run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
@@ -276,18 +277,18 @@ def execute_single_node(
276
277
  ## This step is where we save the log file
277
278
  try:
278
279
  executor.execute_node(node=node_to_execute, map_variable=map_variable_dict)
279
- except Exception: # noqa: E722
280
+ finally:
280
281
  log_file_name = utils.make_log_file_name(
281
282
  node=node_to_execute,
282
283
  map_variable=map_variable_dict,
283
284
  )
284
- console.save_text(log_file_name)
285
+ task_console.save_text(log_file_name)
285
286
 
286
287
  # Put the log file in the catalog
287
288
  run_context.catalog_handler.put(name=log_file_name, run_id=run_context.run_id)
288
289
  os.remove(log_file_name)
289
290
 
290
- executor.send_return_code(stage="execution")
291
+ # executor.send_return_code(stage="execution")
291
292
 
292
293
 
293
294
  def execute_notebook(
@@ -11,6 +11,7 @@ from runnable import (
11
11
  exceptions,
12
12
  integration,
13
13
  parameters,
14
+ task_console,
14
15
  utils,
15
16
  )
16
17
  from runnable.datastore import DataCatalog, JsonParameter, RunLog, StepLog
@@ -340,10 +341,18 @@ class GenericExecutor(BaseExecutor):
340
341
  node.execute_as_graph(map_variable=map_variable, **kwargs)
341
342
  return
342
343
 
344
+ task_console.export_text(clear=True)
345
+
343
346
  task_name = node._resolve_map_placeholders(node.internal_name, map_variable)
344
347
  console.print(f":runner: Executing the node {task_name} ... ", style="bold color(208)")
345
348
  self.trigger_job(node=node, map_variable=map_variable, **kwargs)
346
349
 
350
+ log_file_name = utils.make_log_file_name(node=node, map_variable=map_variable)
351
+ task_console.save_text(log_file_name, clear=True)
352
+
353
+ self._context.catalog_handler.put(name=log_file_name, run_id=self._context.run_id)
354
+ os.remove(log_file_name)
355
+
347
356
  def trigger_job(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
348
357
  """
349
358
  Call this method only if we are responsible for traversing the graph via
@@ -493,6 +502,7 @@ class GenericExecutor(BaseExecutor):
493
502
 
494
503
  logger.info(f"Finished execution of the {branch} with status {run_log.status}")
495
504
 
505
+ # We are in the root dag
496
506
  if dag == self._context.dag:
497
507
  run_log = cast(RunLog, run_log)
498
508
  console.print("Completed Execution, Summary:", style="bold color(208)")
@@ -303,7 +303,7 @@ class DagTemplate(BaseModel):
303
303
  tasks: List[DagTaskTemplate] = Field(default=[], exclude=True)
304
304
  inputs: Optional[List[Parameter]] = Field(default=None, serialization_alias="inputs")
305
305
  parallelism: Optional[int] = None
306
- fail_fast: bool = Field(default=True, serialization_alias="failFast")
306
+ fail_fast: bool = Field(default=False, serialization_alias="failFast")
307
307
 
308
308
  @field_validator("parallelism")
309
309
  @classmethod
@@ -5,7 +5,7 @@ from typing import Dict, cast
5
5
  from pydantic import Field
6
6
  from rich import print
7
7
 
8
- from runnable import defaults, utils
8
+ from runnable import console, defaults, task_console, utils
9
9
  from runnable.datastore import StepLog
10
10
  from runnable.defaults import TypeMapVariable
11
11
  from runnable.extensions.executor import GenericExecutor
@@ -96,6 +96,59 @@ class LocalContainerExecutor(GenericExecutor):
96
96
  """
97
97
  return self._execute_node(node, map_variable, **kwargs)
98
98
 
99
+ def execute_from_graph(self, node: BaseNode, map_variable: TypeMapVariable = None, **kwargs):
100
+ """
101
+ This is the entry point to from the graph execution.
102
+
103
+ While the self.execute_graph is responsible for traversing the graph, this function is responsible for
104
+ actual execution of the node.
105
+
106
+ If the node type is:
107
+ * task : We can delegate to _execute_node after checking the eligibility for re-run in cases of a re-run
108
+ * success: We can delegate to _execute_node
109
+ * fail: We can delegate to _execute_node
110
+
111
+ For nodes that are internally graphs:
112
+ * parallel: Delegate the responsibility of execution to the node.execute_as_graph()
113
+ * dag: Delegate the responsibility of execution to the node.execute_as_graph()
114
+ * map: Delegate the responsibility of execution to the node.execute_as_graph()
115
+
116
+ Transpilers will NEVER use this method and will NEVER call ths method.
117
+ This method should only be used by interactive executors.
118
+
119
+ Args:
120
+ node (Node): The node to execute
121
+ map_variable (dict, optional): If the node if of a map state, this corresponds to the value of iterable.
122
+ Defaults to None.
123
+ """
124
+ step_log = self._context.run_log_store.create_step_log(node.name, node._get_step_log_name(map_variable))
125
+
126
+ self.add_code_identities(node=node, step_log=step_log)
127
+
128
+ step_log.step_type = node.node_type
129
+ step_log.status = defaults.PROCESSING
130
+
131
+ self._context.run_log_store.add_step_log(step_log, self._context.run_id)
132
+
133
+ logger.info(f"Executing node: {node.get_summary()}")
134
+
135
+ # Add the step log to the database as per the situation.
136
+ # If its a terminal node, complete it now
137
+ if node.node_type in ["success", "fail"]:
138
+ self._execute_node(node, map_variable=map_variable, **kwargs)
139
+ return
140
+
141
+ # We call an internal function to iterate the sub graphs and execute them
142
+ if node.is_composite:
143
+ node.execute_as_graph(map_variable=map_variable, **kwargs)
144
+ return
145
+
146
+ task_console.export_text(clear=True)
147
+
148
+ task_name = node._resolve_map_placeholders(node.internal_name, map_variable)
149
+ console.print(f":runner: Executing the node {task_name} ... ", style="bold color(208)")
150
+ self.trigger_job(node=node, map_variable=map_variable, **kwargs)
151
+
99
152
  def execute_job(self, node: TaskNode):
100
153
  """
101
154
  Set up the step log and call the execute node
runnable/tasks.py CHANGED
@@ -17,7 +17,7 @@ from pydantic import BaseModel, ConfigDict, Field, field_validator
17
17
  from stevedore import driver
18
18
 
19
19
  import runnable.context as context
20
- from runnable import console, defaults, exceptions, parameters, utils
20
+ from runnable import console, defaults, exceptions, parameters, task_console, utils
21
21
  from runnable.datastore import (
22
22
  JsonParameter,
23
23
  MetricParameter,
@@ -144,8 +144,8 @@ class BaseTaskType(BaseModel):
144
144
  if context_param in params:
145
145
  params[param_name].value = params[context_param].value
146
146
 
147
- console.log("Parameters available for the execution:")
148
- console.log(params)
147
+ task_console.log("Parameters available for the execution:")
148
+ task_console.log(params)
149
149
 
150
150
  logger.debug(f"Resolved parameters: {params}")
151
151
 
@@ -153,18 +153,12 @@ class BaseTaskType(BaseModel):
153
153
  params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)}
154
154
 
155
155
  parameters_in = copy.deepcopy(params)
156
- f = io.StringIO()
157
156
  try:
158
- with contextlib.redirect_stdout(f):
159
- # with contextlib.nullcontext():
160
- yield params
157
+ yield params
161
158
  except Exception as e: # pylint: disable=broad-except
162
159
  console.log(e, style=defaults.error_style)
163
160
  logger.exception(e)
164
161
  finally:
165
- print(f.getvalue()) # print to console
166
- f.close()
167
-
168
162
  # Update parameters
169
163
  # This should only update the parameters that are changed at the root level.
170
164
  diff_parameters = self._diff_parameters(parameters_in=parameters_in, context_params=params)
@@ -226,9 +220,11 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
226
220
  filtered_parameters = parameters.filter_arguments_for_func(f, params.copy(), map_variable)
227
221
  logger.info(f"Calling {func} from {module} with {filtered_parameters}")
228
222
 
229
- user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
223
+ out_file = io.StringIO()
224
+ with contextlib.redirect_stdout(out_file):
225
+ user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
226
+ task_console.print(out_file.getvalue())
230
227
  except Exception as e:
231
- console.log(e, style=defaults.error_style, markup=False)
232
228
  raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e
233
229
 
234
230
  attempt_log.input_parameters = params.copy()
@@ -272,8 +268,8 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
272
268
  except Exception as _e:
273
269
  msg = f"Call to the function {self.command} did not succeed.\n"
274
270
  attempt_log.message = msg
275
- console.print_exception(show_locals=False)
276
- console.log(_e, style=defaults.error_style)
271
+ task_console.print_exception(show_locals=False)
272
+ task_console.log(_e, style=defaults.error_style)
277
273
 
278
274
  attempt_log.end_time = str(datetime.now())
279
275
 
@@ -359,7 +355,11 @@ class NotebookTaskType(BaseTaskType):
359
355
  }
360
356
  kwds.update(ploomber_optional_args)
361
357
 
362
- pm.execute_notebook(**kwds)
358
+ out_file = io.StringIO()
359
+ with contextlib.redirect_stdout(out_file):
360
+ pm.execute_notebook(**kwds)
361
+ task_console.print(out_file.getvalue())
362
+
363
363
  context.run_context.catalog_handler.put(name=notebook_output_path, run_id=context.run_context.run_id)
364
364
 
365
365
  client = PloomberClient.from_path(path=notebook_output_path)
@@ -380,8 +380,8 @@ class NotebookTaskType(BaseTaskType):
380
380
  )
381
381
  except PicklingError as e:
382
382
  logger.exception("Notebooks cannot return objects")
383
- console.log("Notebooks cannot return objects", style=defaults.error_style)
384
- console.log(e, style=defaults.error_style)
383
+ # task_console.log("Notebooks cannot return objects", style=defaults.error_style)
384
+ # task_console.log(e, style=defaults.error_style)
385
385
 
386
386
  logger.exception(e)
387
387
  raise
@@ -400,8 +400,7 @@ class NotebookTaskType(BaseTaskType):
400
400
  logger.exception(msg)
401
401
  logger.exception(e)
402
402
 
403
- console.log(msg, style=defaults.error_style)
404
-
403
+ # task_console.log(msg, style=defaults.error_style)
405
404
  attempt_log.status = defaults.FAIL
406
405
 
407
406
  attempt_log.end_time = str(datetime.now())
@@ -488,14 +487,14 @@ class ShellTaskType(BaseTaskType):
488
487
 
489
488
  if proc.returncode != 0:
490
489
  msg = ",".join(result[1].split("\n"))
491
- console.print(msg, style=defaults.error_style)
490
+ task_console.print(msg, style=defaults.error_style)
492
491
  raise exceptions.CommandCallError(msg)
493
492
 
494
493
  # for stderr
495
494
  for line in result[1].split("\n"):
496
495
  if line.strip() == "":
497
496
  continue
498
- console.print(line, style=defaults.warning_style)
497
+ task_console.print(line, style=defaults.warning_style)
499
498
 
500
499
  output_parameters: Dict[str, Parameter] = {}
501
500
  metrics: Dict[str, Parameter] = {}
@@ -506,7 +505,7 @@ class ShellTaskType(BaseTaskType):
506
505
  continue
507
506
 
508
507
  logger.info(line)
509
- console.print(line)
508
+ task_console.print(line)
510
509
 
511
510
  if line.strip() == collect_delimiter:
512
511
  # The lines from now on should be captured
@@ -548,8 +547,8 @@ class ShellTaskType(BaseTaskType):
548
547
  logger.exception(msg)
549
548
  logger.exception(e)
550
549
 
551
- console.log(msg, style=defaults.error_style)
552
- console.log(e, style=defaults.error_style)
550
+ task_console.log(msg, style=defaults.error_style)
551
+ task_console.log(e, style=defaults.error_style)
553
552
 
554
553
  attempt_log.status = defaults.FAIL
555
554
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: runnable
3
- Version: 0.12.0
3
+ Version: 0.12.2
4
4
  Summary: A Compute agnostic pipelining software
5
5
  Home-page: https://github.com/vijayvammi/runnable
6
6
  License: Apache-2.0
@@ -1,10 +1,10 @@
1
- runnable/__init__.py,sha256=zaZVsNzp9zrpngSMNLzT_aIFkkWSpp9gb_K-GAnr9Cw,1001
1
+ runnable/__init__.py,sha256=G5EUe1eaTOEu1UihDBP9F43PKs3yxCYFfp8DOOpPvms,1038
2
2
  runnable/catalog.py,sha256=22OECi5TrpHErxYIhfx-lJ2vgBUi4-5V9CaYEVm98hE,4138
3
3
  runnable/cli.py,sha256=RILUrEfzernuKD3dNdXPBkqN_1OgE5GosYRuInj0FVs,9618
4
4
  runnable/context.py,sha256=QhiXJHRcEBfSKB1ijvL5yB9w44x0HCe7VEiwK1cUJ9U,1124
5
5
  runnable/datastore.py,sha256=8aQZ15KAMdre7a7G61bNRmcTeJFzOdnx_9O9UP4JQc8,27910
6
- runnable/defaults.py,sha256=MOX7I2S6yO4FphZaZREFQca94a20oO8uvzXLd6GLKQs,4703
7
- runnable/entrypoints.py,sha256=fLipciON3x2iAkBE9w00TrRPqwLxyLJeKu4V7dlgB-A,17611
6
+ runnable/defaults.py,sha256=r9l3jCPlmMFNdYXOj6X1uIhruO2FyLZ8kuzus9Fd6OQ,4704
7
+ runnable/entrypoints.py,sha256=ZdXnjEjtkOqKYyjc0AwqIdfWgxJsKFB8ilHYcUj7oZo,17630
8
8
  runnable/exceptions.py,sha256=6NIYoTAzdKyGQ9PvW1Hu7b80OS746395KiGDhM7ThH8,2526
9
9
  runnable/executor.py,sha256=xfBighQ5t_vejohip000XfxLwsgechUE1ZMIJWrZbUA,14484
10
10
  runnable/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -14,9 +14,9 @@ runnable/extensions/catalog/file_system/implementation.py,sha256=mFPsAwPMNGWbHcz
14
14
  runnable/extensions/catalog/k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
15
15
  runnable/extensions/catalog/k8s_pvc/implementation.py,sha256=oJDDI0APT7lrtjWmzYJRDHLGn3Vhbn2MdFSRYvFBUpY,436
16
16
  runnable/extensions/catalog/k8s_pvc/integration.py,sha256=OfrHbNFN8sR-wsVa4os3ajmWJFSd5H4KOHGVAmjRZTQ,1850
17
- runnable/extensions/executor/__init__.py,sha256=0385OpNSpjyA0GjXlLw7gZtqJFFOHGLmYHzWAGBzU98,26247
17
+ runnable/extensions/executor/__init__.py,sha256=-Xw8ZYVOfM-bYAieluEOhd98ncyrAyde8WSPRg1yFN0,26615
18
18
  runnable/extensions/executor/argo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- runnable/extensions/executor/argo/implementation.py,sha256=RMvHLt1VEHb4_C8dEJomistRztycThHfcX9l4CXVp_s,43910
19
+ runnable/extensions/executor/argo/implementation.py,sha256=FE5jDtzv5nlsnOjN9k0VjtEFQQWAAFaSXVJlroi7q6I,43911
20
20
  runnable/extensions/executor/argo/specification.yaml,sha256=wXQcm2gOQYqy-IOQIhucohS32ZrHKCfGA5zZ0RraPYc,1276
21
21
  runnable/extensions/executor/k8s_job/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
22
22
  runnable/extensions/executor/k8s_job/implementation_FF.py,sha256=1IfVG1GRcJcVFzQ-WhkJsmzdJuj51QMxXylY9UrWM0U,10259
@@ -24,7 +24,7 @@ runnable/extensions/executor/k8s_job/integration_FF.py,sha256=pG6HKhPMgCRIgu1PAn
24
24
  runnable/extensions/executor/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
25
25
  runnable/extensions/executor/local/implementation.py,sha256=e8Tzv-FgQmJeUXVut96jeNERTR83JVG_zkQZMEjCVAs,2469
26
26
  runnable/extensions/executor/local_container/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
- runnable/extensions/executor/local_container/implementation.py,sha256=Kley2sjdZMe7E5CHjjy4YaJ5YErNGvZmpVW33h6D0W0,14848
27
+ runnable/extensions/executor/local_container/implementation.py,sha256=6iwt9tNCQawVEfIErzoqys2hrErWK0DHcAOkO49Ov9w,17322
28
28
  runnable/extensions/executor/mocked/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
29
  runnable/extensions/executor/mocked/implementation.py,sha256=ChvlcLGpBxO6QwJcoqhBgKBR6NfWVnMdOWKQhMgcEjY,5762
30
30
  runnable/extensions/executor/retry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -55,10 +55,10 @@ runnable/parameters.py,sha256=yZkMDnwnkdYXIwQ8LflBzn50Y0xRGxEvLlxwno6ovvs,5163
55
55
  runnable/pickler.py,sha256=5SDNf0miMUJ3ZauhQdzwk8_t-9jeOqaTjP5bvRnu9sU,2685
56
56
  runnable/sdk.py,sha256=pmxGO5HSrcEGYR_VmVk6ObfddFHo5slxiaDI6LuIdKM,27852
57
57
  runnable/secrets.py,sha256=dakb7WRloWVo-KpQp6Vy4rwFdGi58BTlT4OifQY106I,2324
58
- runnable/tasks.py,sha256=a5Fkvl58ku9JPo-qDDJahemD-6a7jJlXlBEowvmeKuc,21910
58
+ runnable/tasks.py,sha256=7sAtFMu6ELD3PJoisNbm47rY2wPKVzz5_h4s_QMep0k,22043
59
59
  runnable/utils.py,sha256=fXOLoFZKYqh3wQgzA2V-VZOu-dSgLPGqCZIbMmsNzOw,20016
60
- runnable-0.12.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
61
- runnable-0.12.0.dist-info/METADATA,sha256=1iBCYGj4Pv_Zw3wSYX8dSh3PVEnvIVFsG4lhthrO_V4,17020
62
- runnable-0.12.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
63
- runnable-0.12.0.dist-info/entry_points.txt,sha256=amb6ISqKBSIz47um8_6LKnYgpoZ4d_p6-O1-7uUb1cU,1447
64
- runnable-0.12.0.dist-info/RECORD,,
60
+ runnable-0.12.2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
61
+ runnable-0.12.2.dist-info/METADATA,sha256=8u2DRuU-gQvmW66oAqKABw5BYsCB0sJde6E_u-coEiE,17020
62
+ runnable-0.12.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
63
+ runnable-0.12.2.dist-info/entry_points.txt,sha256=-csEf-FCAqtOboXaBSzpgaTffz1HwlylYPxnppndpFE,1494
64
+ runnable-0.12.2.dist-info/RECORD,,
@@ -32,6 +32,7 @@ file-system=runnable.extensions.run_log_store.file_system.implementation:FileSys
32
32
  [secrets]
33
33
  do-nothing=runnable.secrets:DoNothingSecretManager
34
34
  dotenv=runnable.extensions.secrets.dotenv.implementation:DotEnvSecrets
35
+ env-secrets=runnable.secrets:EnvSecretsManager
35
36
 
36
37
  [tasks]
37
38
  notebook=runnable.tasks:NotebookTaskType