runnable 0.9.1__py3-none-any.whl → 0.11.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
runnable/sdk.py CHANGED
@@ -6,25 +6,45 @@ from abc import ABC, abstractmethod
6
6
  from pathlib import Path
7
7
  from typing import Any, Callable, Dict, List, Optional, Union
8
8
 
9
- from pydantic import BaseModel, ConfigDict, Field, PrivateAttr, computed_field, field_validator, model_validator
9
+ from pydantic import (
10
+ BaseModel,
11
+ ConfigDict,
12
+ Field,
13
+ PrivateAttr,
14
+ computed_field,
15
+ field_validator,
16
+ model_validator,
17
+ )
10
18
  from rich import print
19
+ from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
20
+ from rich.table import Column
11
21
  from typing_extensions import Self
12
22
 
13
- from runnable import defaults, entrypoints, graph, utils
14
- from runnable.extensions.nodes import FailNode, MapNode, ParallelNode, StubNode, SuccessNode, TaskNode
23
+ from runnable import console, defaults, entrypoints, graph, utils
24
+ from runnable.extensions.nodes import (
25
+ FailNode,
26
+ MapNode,
27
+ ParallelNode,
28
+ StubNode,
29
+ SuccessNode,
30
+ TaskNode,
31
+ )
15
32
  from runnable.nodes import TraversalNode
16
33
  from runnable.tasks import TaskReturns
17
34
 
18
35
  logger = logging.getLogger(defaults.LOGGER_NAME)
19
36
 
20
- StepType = Union["Stub", "PythonTask", "NotebookTask", "ShellTask", "Success", "Fail", "Parallel", "Map"]
21
- TraversalTypes = Union["Stub", "PythonTask", "NotebookTask", "ShellTask", "Parallel", "Map"]
37
+ StepType = Union["Stub", "PythonTask", "NotebookTask", "ShellTask", "Parallel", "Map"]
22
38
 
23
39
 
24
40
  def pickled(name: str) -> TaskReturns:
25
41
  return TaskReturns(name=name, kind="object")
26
42
 
27
43
 
44
+ def metric(name: str) -> TaskReturns:
45
+ return TaskReturns(name=name, kind="metric")
46
+
47
+
28
48
  class Catalog(BaseModel):
29
49
  """
30
50
  Use to instruct a task to sync data from/to the central catalog.
@@ -346,7 +366,7 @@ class Stub(BaseTraversal):
346
366
 
347
367
  """
348
368
 
349
- model_config = ConfigDict(extra="allow")
369
+ model_config = ConfigDict(extra="ignore")
350
370
  catalog: Optional[Catalog] = Field(default=None, alias="catalog")
351
371
 
352
372
  def create_node(self) -> StubNode:
@@ -496,8 +516,7 @@ class Pipeline(BaseModel):
496
516
 
497
517
  """
498
518
 
499
- steps: List[StepType]
500
- start_at: TraversalTypes
519
+ steps: List[Union[StepType, List[StepType]]]
501
520
  name: str = ""
502
521
  description: str = ""
503
522
  add_terminal_nodes: bool = True # Adds "success" and "fail" nodes
@@ -507,20 +526,87 @@ class Pipeline(BaseModel):
507
526
  _dag: graph.Graph = PrivateAttr()
508
527
  model_config = ConfigDict(extra="forbid")
509
528
 
529
+ def _validate_path(self, path: List[StepType]) -> None:
530
+ # Check if one and only one step terminates with success
531
+ # Check no more than one step terminates with failure
532
+
533
+ reached_success = False
534
+ reached_failure = False
535
+
536
+ for step in path:
537
+ if step.terminate_with_success:
538
+ if reached_success:
539
+ raise Exception("A pipeline cannot have more than one step that terminates with success")
540
+ reached_success = True
541
+ continue
542
+ if step.terminate_with_failure:
543
+ if reached_failure:
544
+ raise Exception("A pipeline cannot have more than one step that terminates with failure")
545
+ reached_failure = True
546
+
547
+ if not reached_success:
548
+ raise Exception("A pipeline must have at least one step that terminates with success")
549
+
550
+ def _construct_path(self, path: List[StepType]) -> None:
551
+ prev_step = path[0]
552
+
553
+ for step in path:
554
+ if step == prev_step:
555
+ continue
556
+
557
+ if prev_step.terminate_with_success or prev_step.terminate_with_failure:
558
+ raise Exception(f"A step that terminates with success/failure cannot have a next step: {prev_step}")
559
+
560
+ if prev_step.next_node and prev_step.next_node not in ["success", "fail"]:
561
+ raise Exception(f"Step already has a next node: {prev_step} ")
562
+
563
+ prev_step.next_node = step.name
564
+ prev_step = step
565
+
510
566
  def model_post_init(self, __context: Any) -> None:
511
- self.steps = [model.model_copy(deep=True) for model in self.steps]
567
+ """
568
+ The sequence of steps can either be:
569
+ [step1, step2,..., stepN, [step11, step12,..., step1N], [step21, step22,...,]]
570
+ indicates:
571
+ - step1 > step2 > ... > stepN
572
+ - We expect terminate with success or fail to be explicitly stated on a step.
573
+ - If it is stated, the step cannot have a next step defined apart from "success" and "fail".
574
+
575
+ The inner list of steps is only to accommodate on-failure behaviors.
576
+ - For sake of simplicity, lets assume that it has the same behavior as the happy pipeline.
577
+ - A task which was already seen should not be part of this.
578
+ - There should be at least one step which terminates with success
579
+
580
+ Any definition of pipeline should have one node that terminates with success.
581
+ """
582
+
583
+ success_path: List[StepType] = []
584
+ on_failure_paths: List[List[StepType]] = []
585
+
586
+ for step in self.steps:
587
+ if isinstance(step, (Stub, PythonTask, NotebookTask, ShellTask, Parallel, Map)):
588
+ success_path.append(step)
589
+ continue
590
+ on_failure_paths.append(step)
591
+
592
+ if not success_path:
593
+ raise Exception("There should be some success path")
594
+
595
+ # Check all paths are valid and construct the path
596
+ paths = [success_path] + on_failure_paths
597
+ for path in paths:
598
+ self._validate_path(path)
599
+ self._construct_path(path)
600
+
601
+ all_steps: List[StepType] = [step for step in success_path + on_failure_paths] # type: ignore
512
602
 
513
603
  self._dag = graph.Graph(
514
- start_at=self.start_at.name,
604
+ start_at=all_steps[0].name,
515
605
  description=self.description,
516
606
  internal_branch_name=self.internal_branch_name,
517
607
  )
518
608
 
519
- for step in self.steps:
520
- if step.name == self.start_at.name:
521
- if isinstance(step, Success) or isinstance(step, Fail):
522
- raise Exception("A success or fail node cannot be the start_at of the graph")
523
- assert step.next_node
609
+ for step in all_steps:
524
610
  self._dag.add_node(step.create_node())
525
611
 
526
612
  if self.add_terminal_nodes:
@@ -569,7 +655,7 @@ class Pipeline(BaseModel):
569
655
  py_to_yaml = os.environ.get("RUNNABLE_PY_TO_YAML", "false")
570
656
 
571
657
  if py_to_yaml == "true":
572
- return
658
+ return {}
573
659
 
574
660
  logger.setLevel(log_level)
575
661
 
@@ -606,8 +692,27 @@ class Pipeline(BaseModel):
606
692
  # Prepare for graph execution
607
693
  run_context.executor.prepare_for_graph_execution()
608
694
 
609
- logger.info("Executing the graph")
610
- run_context.executor.execute_graph(dag=run_context.dag)
695
+ with Progress(
696
+ TextColumn("[progress.description]{task.description}", table_column=Column(ratio=2)),
697
+ BarColumn(table_column=Column(ratio=1), style="dark_orange"),
698
+ TimeElapsedColumn(table_column=Column(ratio=1)),
699
+ console=console,
700
+ expand=True,
701
+ ) as progress:
702
+ try:
703
+ run_context.progress = progress
704
+ pipeline_execution_task = progress.add_task("[dark_orange] Starting execution .. ", total=1)
705
+ run_context.executor.execute_graph(dag=run_context.dag)
706
+
707
+ run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id, full=False)
708
+
709
+ if run_log.status == defaults.SUCCESS:
710
+ progress.update(pipeline_execution_task, description="[green] Success", completed=True)
711
+ else:
712
+ progress.update(pipeline_execution_task, description="[red] Failed", completed=True)
713
+ except Exception as e: # noqa: E722
714
+ console.print(e, style=defaults.error_style)
715
+ progress.update(pipeline_execution_task, description="[red] Errored execution", completed=True)
611
716
 
612
717
  if run_context.executor._local:
613
718
  return run_context.run_log_store.get_run_log_by_id(run_id=run_context.run_id)
runnable/tasks.py CHANGED
@@ -15,8 +15,14 @@ from pydantic import BaseModel, ConfigDict, Field, ValidationInfo, field_validat
15
15
  from stevedore import driver
16
16
 
17
17
  import runnable.context as context
18
- from runnable import defaults, parameters, utils
19
- from runnable.datastore import JsonParameter, ObjectParameter, Parameter, StepAttempt
18
+ from runnable import console, defaults, exceptions, parameters, utils
19
+ from runnable.datastore import (
20
+ JsonParameter,
21
+ MetricParameter,
22
+ ObjectParameter,
23
+ Parameter,
24
+ StepAttempt,
25
+ )
20
26
  from runnable.defaults import TypeMapVariable
21
27
 
22
28
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -28,7 +34,7 @@ logging.getLogger("stevedore").setLevel(logging.CRITICAL)
28
34
 
29
35
  class TaskReturns(BaseModel):
30
36
  name: str
31
- kind: Literal["json", "object"] = Field(default="json")
37
+ kind: Literal["json", "object", "metric"] = Field(default="json")
32
38
 
33
39
 
34
40
  class BaseTaskType(BaseModel):
@@ -41,6 +47,9 @@ class BaseTaskType(BaseModel):
41
47
 
42
48
  model_config = ConfigDict(extra="forbid")
43
49
 
50
+ def get_summary(self) -> Dict[str, Any]:
51
+ return self.model_dump(by_alias=True, exclude_none=True)
52
+
44
53
  @property
45
54
  def _context(self):
46
55
  return context.run_context
@@ -98,12 +107,15 @@ class BaseTaskType(BaseModel):
98
107
  self.set_secrets_as_env_variables()
99
108
  try:
100
109
  yield
110
+ except Exception as e: # pylint: disable=broad-except
111
+ logger.exception(e)
101
112
  finally:
102
113
  self.delete_secrets_from_env_variables()
103
114
 
104
115
  @contextlib.contextmanager
105
116
  def execution_context(self, map_variable: TypeMapVariable = None, allow_complex: bool = True):
106
117
  params = self._context.run_log_store.get_parameters(run_id=self._context.run_id).copy()
118
+ logger.info(f"Parameters available for the execution: {params}")
107
119
 
108
120
  for param_name, param in params.items():
109
121
  # Any access to unreduced param should be replaced.
@@ -118,6 +130,8 @@ class BaseTaskType(BaseModel):
118
130
  if context_param in params:
119
131
  params[param_name].value = params[context_param].value
120
132
 
133
+ logger.debug(f"Resolved parameters: {params}")
134
+
121
135
  if not allow_complex:
122
136
  params = {key: value for key, value in params.items() if isinstance(value, JsonParameter)}
123
137
 
@@ -132,6 +146,8 @@ class BaseTaskType(BaseModel):
132
146
  try:
133
147
  with contextlib.redirect_stdout(f):
134
148
  yield params
149
+ except Exception as e: # pylint: disable=broad-except
150
+ logger.exception(e)
135
151
  finally:
136
152
  print(f.getvalue()) # print to console
137
153
  log_file.write(f.getvalue()) # Print to file
@@ -140,15 +156,12 @@ class BaseTaskType(BaseModel):
140
156
  log_file.close()
141
157
 
142
158
  # Put the log file in the catalog
143
- catalog_handler = context.run_context.catalog_handler
144
- catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id)
159
+ # self._context.catalog_handler.put(name=log_file.name, run_id=context.run_context.run_id)
145
160
  os.remove(log_file.name)
146
161
 
147
162
  # Update parameters
148
163
  self._context.run_log_store.set_parameters(parameters=params, run_id=self._context.run_id)
149
164
 
150
- return True # To suppress exceptions
151
-
152
165
 
153
166
  def task_return_to_parameter(task_return: TaskReturns, value: Any) -> Parameter:
154
167
  # implicit support for pydantic models
@@ -161,6 +174,9 @@ def task_return_to_parameter(task_return: TaskReturns, value: Any) -> Parameter:
161
174
  if task_return.kind == "json":
162
175
  return JsonParameter(kind="json", value=value)
163
176
 
177
+ if task_return.kind == "metric":
178
+ return MetricParameter(kind="metric", value=value)
179
+
164
180
  if task_return.kind == "object":
165
181
  obj = ObjectParameter(value=task_return.name, kind="object")
166
182
  obj.put_object(data=value)
@@ -197,13 +213,23 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
197
213
  imported_module = importlib.import_module(module)
198
214
  f = getattr(imported_module, func)
199
215
 
200
- filtered_parameters = parameters.filter_arguments_for_func(f, params.copy(), map_variable)
201
- logger.info(f"Calling {func} from {module} with {filtered_parameters}")
202
-
203
216
  try:
204
- user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
217
+ try:
218
+ filtered_parameters = parameters.filter_arguments_for_func(f, params.copy(), map_variable)
219
+ logger.info(f"Calling {func} from {module} with {filtered_parameters}")
220
+ user_set_parameters = f(**filtered_parameters) # This is a tuple or single value
221
+ except Exception as e:
222
+ logger.exception(e)
223
+ console.print(e, style=defaults.error_style)
224
+ raise exceptions.CommandCallError(f"Function call: {self.command} did not succeed.\n") from e
225
+
205
226
  attempt_log.input_parameters = params.copy()
206
227
 
228
+ if map_variable:
229
+ attempt_log.input_parameters.update(
230
+ {k: JsonParameter(value=v, kind="json") for k, v in map_variable.items()}
231
+ )
232
+
207
233
  if self.returns:
208
234
  if not isinstance(user_set_parameters, tuple): # make it a tuple
209
235
  user_set_parameters = (user_set_parameters,)
@@ -212,6 +238,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
212
238
  raise ValueError("Returns task signature does not match the function returns")
213
239
 
214
240
  output_parameters: Dict[str, Parameter] = {}
241
+ metrics: Dict[str, Parameter] = {}
215
242
 
216
243
  for i, task_return in enumerate(self.returns):
217
244
  output_parameter = task_return_to_parameter(
@@ -219,6 +246,9 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
219
246
  value=user_set_parameters[i],
220
247
  )
221
248
 
249
+ if task_return.kind == "metric":
250
+ metrics[task_return.name] = output_parameter
251
+
222
252
  param_name = task_return.name
223
253
  if map_variable:
224
254
  for _, v in map_variable.items():
@@ -227,14 +257,15 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
227
257
  output_parameters[param_name] = output_parameter
228
258
 
229
259
  attempt_log.output_parameters = output_parameters
260
+ attempt_log.user_defined_metrics = metrics
230
261
  params.update(output_parameters)
231
262
 
232
263
  attempt_log.status = defaults.SUCCESS
233
264
  except Exception as _e:
234
- msg = f"Call to the function {self.command} with {filtered_parameters} did not succeed.\n"
235
- logger.exception(msg)
265
+ msg = f"Call to the function {self.command} did not succeed.\n"
236
266
  logger.exception(_e)
237
- attempt_log.status = defaults.FAIL
267
+ attempt_log.message = msg
268
+ console.print(_e, style=defaults.error_style)
238
269
 
239
270
  attempt_log.end_time = str(datetime.now())
240
271
 
@@ -296,7 +327,6 @@ class NotebookTaskType(BaseTaskType):
296
327
  if map_variable:
297
328
  for key, value in map_variable.items():
298
329
  notebook_output_path += "_" + str(value)
299
-
300
330
  params[key] = value
301
331
 
302
332
  notebook_params = {k: v.get_value() for k, v in params.items()}
runnable/utils.py CHANGED
@@ -538,7 +538,7 @@ def get_provider_by_name_and_type(service_type: str, service_details: defaults.S
538
538
  if "config" in service_details:
539
539
  service_config = service_details.get("config", {})
540
540
 
541
- logger.info(f"Trying to get a service of {service_type} of the name {service_name} with config: {service_config}")
541
+ logger.debug(f"Trying to get a service of {service_type} of the name {service_name} with config: {service_config}")
542
542
  try:
543
543
  mgr = driver.DriverManager(
544
544
  namespace=namespace,
@@ -548,6 +548,7 @@ def get_provider_by_name_and_type(service_type: str, service_details: defaults.S
548
548
  )
549
549
  return mgr.driver
550
550
  except Exception as _e:
551
+ logger.exception(f"Could not find the service of type: {service_type} with config: {service_details}")
551
552
  raise Exception(f"Could not find the service of type: {service_type} with config: {service_details}") from _e
552
553
 
553
554
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: runnable
3
- Version: 0.9.1
3
+ Version: 0.11.0
4
4
  Summary: A Compute agnostic pipelining software
5
5
  Home-page: https://github.com/vijayvammi/runnable
6
6
  License: Apache-2.0
@@ -1,49 +1,45 @@
1
- runnable/__init__.py,sha256=MEJVptRwgHJyQa07gpc0sGcdxoxwX-crhoyAZUEYjBs,619
2
- runnable/catalog.py,sha256=OUaQ73DWfTsMmq2sKlBn0aDz031mupladNGVuF3pWm0,3985
3
- runnable/cli.py,sha256=AZiZf2eRV7zMA7APg6dyTHWqK1--bQwdLiYP8olaKis,9589
4
- runnable/context.py,sha256=GOp-dRPMgYsbgjGy39yVsaWgu5l4RgUxN9-4nggoPmg,1048
5
- runnable/datastore.py,sha256=xtO6atOh1Zf_olvdzh3W_XisAviL26uZPR491N7tn-E,25354
6
- runnable/defaults.py,sha256=lN9HUjTaGGYBC8OPCGTNZXJ4xlOOzqUt-zDjY7GLnLI,4770
7
- runnable/entrypoints.py,sha256=tDQNSX3yx7zU5tqa1hxn3j4yHXfJ9Svg9CpPmEdBQ2o,15359
8
- runnable/exceptions.py,sha256=R__RzUWs7Ow7m7yqawi2w09XXI4OqmA67yeXkECM0xw,2419
1
+ runnable/__init__.py,sha256=BzuufxKGqgYvd-v4fwhH7lgYGCzOnwY7ca5pZZasgx8,720
2
+ runnable/catalog.py,sha256=22OECi5TrpHErxYIhfx-lJ2vgBUi4-5V9CaYEVm98hE,4138
3
+ runnable/cli.py,sha256=gDKk-eg1Vlf0NAWEFqdFEUfcDJdBawRPgSMpNqpbsOc,9590
4
+ runnable/context.py,sha256=QhiXJHRcEBfSKB1ijvL5yB9w44x0HCe7VEiwK1cUJ9U,1124
5
+ runnable/datastore.py,sha256=EgKi4_b5g6KbInpjMyw8Xwr-EgcSGi1Lx2u5vp4amSQ,27672
6
+ runnable/defaults.py,sha256=MOX7I2S6yO4FphZaZREFQca94a20oO8uvzXLd6GLKQs,4703
7
+ runnable/entrypoints.py,sha256=-GbwFCUPkLfXpvslD1abXtlfwZevSocqc6QK8TPL40Q,16197
8
+ runnable/exceptions.py,sha256=6NIYoTAzdKyGQ9PvW1Hu7b80OS746395KiGDhM7ThH8,2526
9
9
  runnable/executor.py,sha256=xfBighQ5t_vejohip000XfxLwsgechUE1ZMIJWrZbUA,14484
10
- runnable/experiment_tracker.py,sha256=bX2Vr73f3bsdnWqxjMSSiKA-WwqkUHfUzJQqZoQBpvY,3668
11
10
  runnable/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
12
11
  runnable/extensions/catalog/__init__.py,sha256=uXZ6D-Myr_J4HnBA4F5Hd7LZ0IAjQiFQYxRhMzejhQc,761
13
12
  runnable/extensions/catalog/file_system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
- runnable/extensions/catalog/file_system/implementation.py,sha256=UNrJFV_tyMpknFKoCMXbBt-CWL4UpDxcwnMh43zVfpc,8958
13
+ runnable/extensions/catalog/file_system/implementation.py,sha256=9j920o9SULdcVp1Mr8FgeuV-Sv5bR3w5tcohChxHnak,9130
15
14
  runnable/extensions/catalog/k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
15
  runnable/extensions/catalog/k8s_pvc/implementation.py,sha256=oJDDI0APT7lrtjWmzYJRDHLGn3Vhbn2MdFSRYvFBUpY,436
17
16
  runnable/extensions/catalog/k8s_pvc/integration.py,sha256=OfrHbNFN8sR-wsVa4os3ajmWJFSd5H4KOHGVAmjRZTQ,1850
18
- runnable/extensions/executor/__init__.py,sha256=tIWUBWu5MNqHrfbOD83AgP59OkM2aJh3JUUQItIP6i4,24187
17
+ runnable/extensions/executor/__init__.py,sha256=FTXtI_etk_eyKRIDpCiTrYfV5yCkfJMl-cuXVDzEqY8,26649
19
18
  runnable/extensions/executor/argo/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
20
- runnable/extensions/executor/argo/implementation.py,sha256=wlDSD5RfZmrdQ65abXTZSdh4KUGv-IzQtbHVtDXNUgQ,43795
19
+ runnable/extensions/executor/argo/implementation.py,sha256=_BfxCe742S6uV-7PuQ53KjzwY-8Rq-5y9txOXMYf20U,43670
21
20
  runnable/extensions/executor/argo/specification.yaml,sha256=wXQcm2gOQYqy-IOQIhucohS32ZrHKCfGA5zZ0RraPYc,1276
22
21
  runnable/extensions/executor/k8s_job/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
23
22
  runnable/extensions/executor/k8s_job/implementation_FF.py,sha256=1IfVG1GRcJcVFzQ-WhkJsmzdJuj51QMxXylY9UrWM0U,10259
24
23
  runnable/extensions/executor/k8s_job/integration_FF.py,sha256=pG6HKhPMgCRIgu1PAnBvsfJQE1FxcjuSiC2I-Hn5sWo,2165
25
24
  runnable/extensions/executor/local/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
- runnable/extensions/executor/local/implementation.py,sha256=r9dSf2lSBGHihbGNhq_GPe3fHKLjf4KI3l-B4w2b5Ls,2468
25
+ runnable/extensions/executor/local/implementation.py,sha256=e8Tzv-FgQmJeUXVut96jeNERTR83JVG_zkQZMEjCVAs,2469
27
26
  runnable/extensions/executor/local_container/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
27
  runnable/extensions/executor/local_container/implementation.py,sha256=6kYMgdgE5JxZkVAidxsBSpqkHvyKMfEctgZWSZQEpXA,13979
29
28
  runnable/extensions/executor/mocked/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
30
29
  runnable/extensions/executor/mocked/implementation.py,sha256=ChdUyUsiXXjG_v80d0uLp76Nz4jqqGEry36gs9gNn9k,5082
31
30
  runnable/extensions/executor/retry/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
32
31
  runnable/extensions/executor/retry/implementation.py,sha256=ZBSYpxSiAIt-SXPD-qIPP-MMo8b7sQ6UKOTJemAjXlI,6625
33
- runnable/extensions/experiment_tracker/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
- runnable/extensions/experiment_tracker/mlflow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
- runnable/extensions/experiment_tracker/mlflow/implementation.py,sha256=sc1Wm1LCf7wBX0BYVx3YVdwsR72AE0qIrzl7cEfIl58,3045
36
- runnable/extensions/nodes.py,sha256=Em-vs21ZfhyvFh-s6NZVhcUydSrX_mY8mb8-NUAO_w8,29883
32
+ runnable/extensions/nodes.py,sha256=5soHRhfT8FY2vnQa4kvRqeVphTq_t-GSw-ExNZfgB30,31965
37
33
  runnable/extensions/run_log_store/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
38
34
  runnable/extensions/run_log_store/chunked_file_system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- runnable/extensions/run_log_store/chunked_file_system/implementation.py,sha256=koHNG-Cv7mpt-rTNC3tiLBR8HcjnQ9L-EvQ4dtKkGRA,3170
35
+ runnable/extensions/run_log_store/chunked_file_system/implementation.py,sha256=wtOeREr9QyIuMHLCT7o_eDCJVCDsBvwmk89kos3dhfQ,3326
40
36
  runnable/extensions/run_log_store/chunked_k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
41
37
  runnable/extensions/run_log_store/chunked_k8s_pvc/implementation.py,sha256=iGzy-s1eT_kAJP7XgzDLmEMOGrBLvACIiGE_wM62jGE,579
42
38
  runnable/extensions/run_log_store/chunked_k8s_pvc/integration.py,sha256=atzdTy5HJ-bZsd6AzDP8kYRI1TshKxviBKeqY359TUs,1979
43
39
  runnable/extensions/run_log_store/db/implementation_FF.py,sha256=oEiG5ASWYYbwlBbnryKarQENB-L_yOsnZahbj2U0GdQ,5155
44
40
  runnable/extensions/run_log_store/db/integration_FF.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
45
41
  runnable/extensions/run_log_store/file_system/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
46
- runnable/extensions/run_log_store/file_system/implementation.py,sha256=PcaM8IKcj-b2iNE9Zup2eC6Y2-987uQzzb0skdd1QX4,4114
42
+ runnable/extensions/run_log_store/file_system/implementation.py,sha256=WxxfGCaDAB5zHMM3zv9aeDwXZ4DhtyzjXOjfjvyDoZ4,4288
47
43
  runnable/extensions/run_log_store/generic_chunked.py,sha256=rcY5f-MIYUUiM5iQnDHICOh7cKiOUSCeaxcBG9_fz-U,19390
48
44
  runnable/extensions/run_log_store/k8s_pvc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
49
45
  runnable/extensions/run_log_store/k8s_pvc/implementation.py,sha256=tLgXy9HUB_vlFVQ0Itk6PpNU3GlCOILN4vA3fm80jXI,542
@@ -53,18 +49,18 @@ runnable/extensions/secrets/dotenv/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQe
53
49
  runnable/extensions/secrets/dotenv/implementation.py,sha256=3J5pofWahdZbnwnETwpspE5-PKyvmZF_vkfwA1X_bkA,3365
54
50
  runnable/extensions/secrets/env_secrets/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
51
  runnable/extensions/secrets/env_secrets/implementation.py,sha256=5XiHdJvIr0-jkl4fGfEf26UsgE5Q2Z4oCc0RwjlJdJA,1236
56
- runnable/graph.py,sha256=w7qEnTuh6lvfeVSAbqNlwMufPqWL9espJI5s_G2prHM,15871
57
- runnable/integration.py,sha256=_jEm5PXl8pbvCmUGKjtssfZ2YtYnMzuRnjr2NySMqzs,7176
52
+ runnable/graph.py,sha256=18IpGYw5kgHP32m12WwXscx-kG5Kx-AuWS1LFbMfBLg,16202
53
+ runnable/integration.py,sha256=eb9qJVZR7Ehg0N1UnGPuyjJvoA-xQ1-xP7AlZHUXHqM,6705
58
54
  runnable/names.py,sha256=vn92Kv9ANROYSZX6Z4z1v_WA3WiEdIYmG6KEStBFZug,8134
59
- runnable/nodes.py,sha256=-vHtjyeRPfmKCmWjuYQItwdZIG3Rpv-IJlGQNbqj99c,16387
60
- runnable/parameters.py,sha256=-DtO-3LnsOt-wa9vqk3Mpq8OB-WVZf5xgEhtaointy0,5091
55
+ runnable/nodes.py,sha256=UqR-bJx0Hi7uLSUw_saB7VsNdFh3POKtdgsEPsasHfE,16576
56
+ runnable/parameters.py,sha256=KGGW8_uoIK2hd3EwzzBmoHBOrai3fh-SESNPpJRTfj4,5161
61
57
  runnable/pickler.py,sha256=5SDNf0miMUJ3ZauhQdzwk8_t-9jeOqaTjP5bvRnu9sU,2685
62
- runnable/sdk.py,sha256=b5Dyo5MZ--YEPnQ3920zhxiunPZOHIjPq6JxiV2MPQQ,22931
58
+ runnable/sdk.py,sha256=hx29PEDYjJIWaTZp3ZhyLDwuulG1HIQ2q7A4HVE1WkM,26998
63
59
  runnable/secrets.py,sha256=dakb7WRloWVo-KpQp6Vy4rwFdGi58BTlT4OifQY106I,2324
64
- runnable/tasks.py,sha256=LSc6Un-vB0bGTayuxcR0VY6D7gilg8WQKIDEgP6Ge90,17663
65
- runnable/utils.py,sha256=mnxLzGjJKht5z46gz5EumNJaaY33dec26F8wRQQRsGY,19337
66
- runnable-0.9.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
67
- runnable-0.9.1.dist-info/METADATA,sha256=7pyhajhfVmr3tk4QUpTQQ8Xb3YuAaUnDn-tfmywLrzo,17062
68
- runnable-0.9.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
69
- runnable-0.9.1.dist-info/entry_points.txt,sha256=_elJX0RSR4u9IWIl8fwYNL-YBXoYaanYd415TJBmTRE,1710
70
- runnable-0.9.1.dist-info/RECORD,,
60
+ runnable/tasks.py,sha256=T8vVLo-yWHanrXKHFJvNdjQXKuWmJ13_-lxZzmf1mQM,18908
61
+ runnable/utils.py,sha256=okZFGbJWqStl5Rq5vLhNUQZDv_vhcT58bq9MDrTVxhc,19449
62
+ runnable-0.11.0.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
63
+ runnable-0.11.0.dist-info/METADATA,sha256=M7YR0Y_j7zMnXCKHFWGk4mlWKYZTdkOyZa3M0v3NokI,17063
64
+ runnable-0.11.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
65
+ runnable-0.11.0.dist-info/entry_points.txt,sha256=Wy-dimdD2REO2a36Ri84fqGqA5iwGy2RIbdgRNtCNdM,1540
66
+ runnable-0.11.0.dist-info/RECORD,,
@@ -12,10 +12,6 @@ local-container=runnable.extensions.executor.local_container.implementation:Loca
12
12
  mocked=runnable.extensions.executor.mocked.implementation:MockedExecutor
13
13
  retry=runnable.extensions.executor.retry.implementation:RetryExecutor
14
14
 
15
- [experiment_tracker]
16
- do-nothing=runnable.experiment_tracker:DoNothingTracker
17
- mlflow=runnable.extensions.experiment_tracker.mlflow.implementation:MLFlowExperimentTracker
18
-
19
15
  [nodes]
20
16
  dag=runnable.extensions.nodes:DagNode
21
17
  fail=runnable.extensions.nodes:FailNode
@@ -1,139 +0,0 @@
1
- import contextlib
2
- import json
3
- import logging
4
- import os
5
- from abc import ABC, abstractmethod
6
- from collections import defaultdict
7
- from typing import Any, ContextManager, Dict, Tuple, Union
8
-
9
- from pydantic import BaseModel, ConfigDict
10
-
11
- import runnable.context as context
12
- from runnable import defaults
13
- from runnable.utils import remove_prefix
14
-
15
- logger = logging.getLogger(defaults.LOGGER_NAME)
16
-
17
-
18
- def retrieve_step_details(key: str) -> Tuple[str, int]:
19
- key = remove_prefix(key, defaults.TRACK_PREFIX)
20
- data = key.split(defaults.STEP_INDICATOR)
21
-
22
- key = data[0].lower()
23
- step = 0
24
-
25
- if len(data) > 1:
26
- step = int(data[1])
27
-
28
- return key, step
29
-
30
-
31
- def get_tracked_data() -> Dict[str, Any]:
32
- tracked_data: Dict[str, Any] = defaultdict(dict)
33
- for env_var, value in os.environ.items():
34
- if env_var.startswith(defaults.TRACK_PREFIX):
35
- key, step = retrieve_step_details(env_var)
36
-
37
- # print(value, type(value))
38
- try:
39
- value = json.loads(value)
40
- except json.decoder.JSONDecodeError:
41
- logger.warning(f"Tracker {key} could not be JSON decoded, adding the literal value")
42
-
43
- tracked_data[key][step] = value
44
- del os.environ[env_var]
45
-
46
- for key, value in tracked_data.items():
47
- if len(value) == 1:
48
- tracked_data[key] = value[0]
49
-
50
- return tracked_data
51
-
52
-
53
- # --8<-- [start:docs]
54
-
55
-
56
- class BaseExperimentTracker(ABC, BaseModel):
57
- """
58
- Base Experiment tracker class definition.
59
- """
60
-
61
- service_name: str = ""
62
- service_type: str = "experiment_tracker"
63
-
64
- @property
65
- def _context(self):
66
- return context.run_context
67
-
68
- model_config = ConfigDict(extra="forbid")
69
-
70
- @property
71
- def client_context(self) -> ContextManager:
72
- """
73
- Returns the client context.
74
- """
75
- return contextlib.nullcontext()
76
-
77
- def publish_data(self, tracked_data: Dict[str, Any]):
78
- for key, value in tracked_data.items():
79
- if isinstance(value, dict):
80
- for key2, value2 in value.items():
81
- self.log_metric(key, value2, step=key2)
82
- continue
83
- self.log_metric(key, value)
84
-
85
- @abstractmethod
86
- def log_metric(self, key: str, value: Union[int, float], step: int = 0):
87
- """
88
- Sets the metric in the experiment tracking.
89
-
90
- Args:
91
- key (str): The key against you want to store the value
92
- value (float): The value of the metric
93
- step (int): Optional step at which it was recorded
94
-
95
- Raises:
96
- NotImplementedError: Base class, hence not implemented
97
- """
98
- raise NotImplementedError
99
-
100
- @abstractmethod
101
- def log_parameter(self, key: str, value: Any):
102
- """
103
- Logs a parameter in the experiment tracking.
104
-
105
- Args:
106
- key (str): The key against you want to store the value
107
- value (any): The value of the metric
108
-
109
- Raises:
110
- NotImplementedError: Base class, hence not implemented
111
- """
112
- pass
113
-
114
-
115
- # --8<-- [end:docs]
116
-
117
-
118
- class DoNothingTracker(BaseExperimentTracker):
119
- """
120
- A Do nothing tracker
121
- """
122
-
123
- service_name: str = "do-nothing"
124
-
125
- def log_metric(self, key: str, value: Union[int, float], step: int = 0):
126
- """
127
- Sets the metric in the experiment tracking.
128
-
129
- Args:
130
- key (str): The key against you want to store the value
131
- value (float): The value of the metric
132
- """
133
- ...
134
-
135
- def log_parameter(self, key: str, value: Any):
136
- """
137
- Since this is a Do nothing tracker, we don't need to log anything.
138
- """
139
- ...
File without changes
File without changes