runnable 0.34.0a1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of runnable might be problematic. Click here for more details.

Files changed (49) hide show
  1. extensions/catalog/any_path.py +13 -2
  2. extensions/job_executor/__init__.py +7 -5
  3. extensions/job_executor/emulate.py +106 -0
  4. extensions/job_executor/k8s.py +8 -8
  5. extensions/job_executor/local_container.py +13 -14
  6. extensions/nodes/__init__.py +0 -0
  7. extensions/nodes/conditional.py +243 -0
  8. extensions/nodes/fail.py +72 -0
  9. extensions/nodes/map.py +350 -0
  10. extensions/nodes/parallel.py +159 -0
  11. extensions/nodes/stub.py +89 -0
  12. extensions/nodes/success.py +72 -0
  13. extensions/nodes/task.py +92 -0
  14. extensions/pipeline_executor/__init__.py +27 -27
  15. extensions/pipeline_executor/argo.py +52 -46
  16. extensions/pipeline_executor/emulate.py +112 -0
  17. extensions/pipeline_executor/local.py +4 -4
  18. extensions/pipeline_executor/local_container.py +19 -79
  19. extensions/pipeline_executor/mocked.py +5 -9
  20. extensions/pipeline_executor/retry.py +6 -10
  21. runnable/__init__.py +2 -11
  22. runnable/catalog.py +6 -23
  23. runnable/cli.py +145 -48
  24. runnable/context.py +520 -28
  25. runnable/datastore.py +51 -54
  26. runnable/defaults.py +12 -34
  27. runnable/entrypoints.py +82 -440
  28. runnable/exceptions.py +35 -34
  29. runnable/executor.py +13 -20
  30. runnable/gantt.py +1141 -0
  31. runnable/graph.py +1 -1
  32. runnable/names.py +1 -1
  33. runnable/nodes.py +20 -16
  34. runnable/parameters.py +108 -51
  35. runnable/sdk.py +125 -204
  36. runnable/tasks.py +62 -85
  37. runnable/utils.py +6 -268
  38. runnable-1.0.0.dist-info/METADATA +122 -0
  39. runnable-1.0.0.dist-info/RECORD +73 -0
  40. {runnable-0.34.0a1.dist-info → runnable-1.0.0.dist-info}/entry_points.txt +9 -8
  41. extensions/nodes/nodes.py +0 -778
  42. extensions/nodes/torch.py +0 -273
  43. extensions/nodes/torch_config.py +0 -76
  44. extensions/tasks/torch.py +0 -286
  45. extensions/tasks/torch_config.py +0 -76
  46. runnable-0.34.0a1.dist-info/METADATA +0 -267
  47. runnable-0.34.0a1.dist-info/RECORD +0 -67
  48. {runnable-0.34.0a1.dist-info → runnable-1.0.0.dist-info}/WHEEL +0 -0
  49. {runnable-0.34.0a1.dist-info → runnable-1.0.0.dist-info}/licenses/LICENSE +0 -0
runnable/tasks.py CHANGED
@@ -5,7 +5,6 @@ import io
5
5
  import json
6
6
  import logging
7
7
  import os
8
- import runpy
9
8
  import subprocess
10
9
  import sys
11
10
  from datetime import datetime
@@ -26,7 +25,7 @@ from runnable.datastore import (
26
25
  Parameter,
27
26
  StepAttempt,
28
27
  )
29
- from runnable.defaults import TypeMapVariable
28
+ from runnable.defaults import MapVariableType
30
29
 
31
30
  logger = logging.getLogger(defaults.LOGGER_NAME)
32
31
 
@@ -49,7 +48,29 @@ class TeeIO(io.StringIO):
49
48
  self.output_stream.flush()
50
49
 
51
50
 
52
- sys.stdout = TeeIO()
51
+ @contextlib.contextmanager
52
+ def redirect_output():
53
+ # Set the stream handlers to use the custom TeeIO class
54
+
55
+ # Backup the original stdout and stderr
56
+ original_stdout = sys.stdout
57
+ original_stderr = sys.stderr
58
+
59
+ # Redirect stdout and stderr to custom TeeStream objects
60
+ sys.stdout = TeeIO(sys.stdout)
61
+ sys.stderr = TeeIO(sys.stderr)
62
+
63
+ # Replace stream for all StreamHandlers to use the new sys.stdout
64
+ for handler in logging.getLogger().handlers:
65
+ if isinstance(handler, logging.StreamHandler):
66
+ handler.stream = sys.stdout
67
+
68
+ try:
69
+ yield sys.stdout, sys.stderr
70
+ finally:
71
+ # Restore the original stdout and stderr
72
+ sys.stdout = original_stdout
73
+ sys.stderr = original_stderr
53
74
 
54
75
 
55
76
  class TaskReturns(BaseModel):
@@ -80,7 +101,7 @@ class BaseTaskType(BaseModel):
80
101
  def set_secrets_as_env_variables(self):
81
102
  # Preparing the environment for the task execution
82
103
  for key in self.secrets:
83
- secret_value = context.run_context.secrets_handler.get(key)
104
+ secret_value = context.run_context.secrets.get(key)
84
105
  os.environ[key] = secret_value
85
106
 
86
107
  def delete_secrets_from_env_variables(self):
@@ -91,7 +112,7 @@ class BaseTaskType(BaseModel):
91
112
 
92
113
  def execute_command(
93
114
  self,
94
- map_variable: TypeMapVariable = None,
115
+ map_variable: MapVariableType = None,
95
116
  ) -> StepAttempt:
96
117
  """The function to execute the command.
97
118
 
@@ -131,7 +152,7 @@ class BaseTaskType(BaseModel):
131
152
  finally:
132
153
  self.delete_secrets_from_env_variables()
133
154
 
134
- def resolve_unreduced_parameters(self, map_variable: TypeMapVariable = None):
155
+ def resolve_unreduced_parameters(self, map_variable: MapVariableType = None):
135
156
  """Resolve the unreduced parameters."""
136
157
  params = self._context.run_log_store.get_parameters(
137
158
  run_id=self._context.run_id
@@ -154,7 +175,7 @@ class BaseTaskType(BaseModel):
154
175
 
155
176
  @contextlib.contextmanager
156
177
  def execution_context(
157
- self, map_variable: TypeMapVariable = None, allow_complex: bool = True
178
+ self, map_variable: MapVariableType = None, allow_complex: bool = True
158
179
  ):
159
180
  params = self.resolve_unreduced_parameters(map_variable=map_variable)
160
181
  logger.info(f"Parameters available for the execution: {params}")
@@ -268,7 +289,7 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
268
289
 
269
290
  def execute_command(
270
291
  self,
271
- map_variable: TypeMapVariable = None,
292
+ map_variable: MapVariableType = None,
272
293
  ) -> StepAttempt:
273
294
  """Execute the notebook as defined by the command."""
274
295
  attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now()))
@@ -290,13 +311,21 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
290
311
  logger.info(
291
312
  f"Calling {func} from {module} with {filtered_parameters}"
292
313
  )
293
-
294
- out_file = TeeIO()
295
- with contextlib.redirect_stdout(out_file):
314
+ context.progress.stop() # redirecting stdout clashes with rich progress
315
+ with redirect_output() as (buffer, stderr_buffer):
296
316
  user_set_parameters = f(
297
317
  **filtered_parameters
298
318
  ) # This is a tuple or single value
299
- task_console.print(out_file.getvalue())
319
+
320
+ print(
321
+ stderr_buffer.getvalue()
322
+ ) # To print the logging statements
323
+
324
+ # TODO: Avoid double print!!
325
+ with task_console.capture():
326
+ task_console.log(buffer.getvalue())
327
+ task_console.log(stderr_buffer.getvalue())
328
+ context.progress.start()
300
329
  except Exception as e:
301
330
  raise exceptions.CommandCallError(
302
331
  f"Function call: {self.command} did not succeed.\n"
@@ -355,69 +384,6 @@ class PythonTaskType(BaseTaskType): # pylint: disable=too-few-public-methods
355
384
  return attempt_log
356
385
 
357
386
 
358
- class TorchTaskType(BaseTaskType):
359
- task_type: str = Field(default="torch", serialization_alias="command_type")
360
-
361
- entrypoint: str = Field(default="torch.distributed.run", frozen=True)
362
- args_to_torchrun: dict[str, str | bool] = Field(default_factory=dict) # For example
363
- # {"nproc_per_node": 2, "nnodes": 1,}
364
-
365
- script_to_call: str # For example train/script.py
366
-
367
- def execute_command(
368
- self, map_variable: Dict[str, str | int | float] | None = None
369
- ) -> StepAttempt:
370
- attempt_log = StepAttempt(status=defaults.FAIL, start_time=str(datetime.now()))
371
-
372
- with (
373
- self.execution_context(
374
- map_variable=map_variable, allow_complex=False
375
- ) as params,
376
- self.expose_secrets() as _,
377
- ):
378
- try:
379
- entry_point_args = [self.entrypoint]
380
-
381
- for key, value in self.args_to_torchrun.items():
382
- entry_point_args.append(f"--{key}")
383
- if type(value) is not bool:
384
- entry_point_args.append(str(value))
385
-
386
- entry_point_args.append(self.script_to_call)
387
- for key, value in params.items():
388
- entry_point_args.append(f"--{key}")
389
- if type(value.value) is not bool: # type: ignore
390
- entry_point_args.append(str(value.value)) # type: ignore
391
-
392
- # TODO: Check the typing here
393
-
394
- logger.info("Calling the user script with the following parameters:")
395
- logger.info(entry_point_args)
396
- out_file = TeeIO()
397
- try:
398
- with contextlib.redirect_stdout(out_file):
399
- sys.argv = entry_point_args
400
- runpy.run_module(self.entrypoint, run_name="__main__")
401
- task_console.print(out_file.getvalue())
402
- except Exception as e:
403
- raise exceptions.CommandCallError(
404
- f"Call to entrypoint {self.entrypoint} with {self.script_to_call} did not succeed."
405
- ) from e
406
- finally:
407
- sys.argv = sys.argv[:1]
408
-
409
- attempt_log.status = defaults.SUCCESS
410
- except Exception as _e:
411
- msg = f"Call to entrypoint {self.entrypoint} with {self.script_to_call} did not succeed."
412
- attempt_log.message = msg
413
- task_console.print_exception(show_locals=False)
414
- task_console.log(_e, style=defaults.error_style)
415
-
416
- attempt_log.end_time = str(datetime.now())
417
-
418
- return attempt_log
419
-
420
-
421
387
  class NotebookTaskType(BaseTaskType):
422
388
  """
423
389
  --8<-- [start:notebook_reference]
@@ -482,14 +448,15 @@ class NotebookTaskType(BaseTaskType):
482
448
 
483
449
  return command
484
450
 
485
- def get_notebook_output_path(self, map_variable: TypeMapVariable = None) -> str:
451
+ def get_notebook_output_path(self, map_variable: MapVariableType = None) -> str:
486
452
  tag = ""
487
453
  map_variable = map_variable or {}
488
454
  for key, value in map_variable.items():
489
455
  tag += f"{key}_{value}_"
490
456
 
491
- if hasattr(self._context.executor, "_context_node"):
492
- tag += self._context.executor._context_node.name
457
+ if isinstance(self._context, context.PipelineContext):
458
+ assert self._context.pipeline_executor._context_node
459
+ tag += self._context.pipeline_executor._context_node.name
493
460
 
494
461
  tag = "".join(x for x in tag if x.isalnum()).strip("-")
495
462
 
@@ -500,7 +467,7 @@ class NotebookTaskType(BaseTaskType):
500
467
 
501
468
  def execute_command(
502
469
  self,
503
- map_variable: TypeMapVariable = None,
470
+ map_variable: MapVariableType = None,
504
471
  ) -> StepAttempt:
505
472
  """Execute the python notebook as defined by the command.
506
473
 
@@ -555,12 +522,20 @@ class NotebookTaskType(BaseTaskType):
555
522
  }
556
523
  kwds.update(ploomber_optional_args)
557
524
 
558
- out_file = TeeIO()
559
- with contextlib.redirect_stdout(out_file):
525
+ context.progress.stop() # redirecting stdout clashes with rich progress
526
+
527
+ with redirect_output() as (buffer, stderr_buffer):
560
528
  pm.execute_notebook(**kwds)
561
- task_console.print(out_file.getvalue())
562
529
 
563
- context.run_context.catalog_handler.put(name=notebook_output_path)
530
+ print(stderr_buffer.getvalue()) # To print the logging statements
531
+
532
+ with task_console.capture():
533
+ task_console.log(buffer.getvalue())
534
+ task_console.log(stderr_buffer.getvalue())
535
+
536
+ context.progress.start()
537
+
538
+ context.run_context.catalog.put(name=notebook_output_path)
564
539
 
565
540
  client = PloomberClient.from_path(path=notebook_output_path)
566
541
  namespace = client.get_namespace()
@@ -678,7 +653,7 @@ class ShellTaskType(BaseTaskType):
678
653
 
679
654
  def execute_command(
680
655
  self,
681
- map_variable: TypeMapVariable = None,
656
+ map_variable: MapVariableType = None,
682
657
  ) -> StepAttempt:
683
658
  # Using shell=True as we want to have chained commands to be executed in the same shell.
684
659
  """Execute the shell command as defined by the command.
@@ -702,7 +677,7 @@ class ShellTaskType(BaseTaskType):
702
677
  # Expose secrets as environment variables
703
678
  if self.secrets:
704
679
  for key in self.secrets:
705
- secret_value = context.run_context.secrets_handler.get(key)
680
+ secret_value = context.run_context.secrets.get(key)
706
681
  subprocess_env[key] = secret_value
707
682
 
708
683
  try:
@@ -728,6 +703,7 @@ class ShellTaskType(BaseTaskType):
728
703
  capture = False
729
704
  return_keys = {x.name: x for x in self.returns}
730
705
 
706
+ context.progress.stop() # redirecting stdout clashes with rich progress
731
707
  proc = subprocess.Popen(
732
708
  command,
733
709
  shell=True,
@@ -751,6 +727,7 @@ class ShellTaskType(BaseTaskType):
751
727
  continue
752
728
  task_console.print(line, style=defaults.warning_style)
753
729
 
730
+ context.progress.start()
754
731
  output_parameters: Dict[str, Parameter] = {}
755
732
  metrics: Dict[str, Parameter] = {}
756
733
 
runnable/utils.py CHANGED
@@ -11,17 +11,16 @@ from collections import OrderedDict
11
11
  from datetime import datetime
12
12
  from pathlib import Path
13
13
  from string import Template as str_template
14
- from typing import TYPE_CHECKING, Any, Dict, Mapping, Tuple, Union
14
+ from typing import TYPE_CHECKING, Any, Dict, Tuple, Union
15
15
 
16
16
  from ruamel.yaml import YAML
17
- from stevedore import driver
18
17
 
19
18
  import runnable.context as context
20
19
  from runnable import console, defaults, names
21
- from runnable.defaults import TypeMapVariable
20
+ from runnable.defaults import MapVariableType
22
21
 
23
22
  if TYPE_CHECKING: # pragma: no cover
24
- from runnable.nodes import BaseNode
23
+ pass
25
24
 
26
25
 
27
26
  logger = logging.getLogger(defaults.LOGGER_NAME)
@@ -66,6 +65,7 @@ def safe_make_dir(directory: Union[str, Path]):
66
65
  Path(directory).mkdir(parents=True, exist_ok=True)
67
66
 
68
67
 
68
+ # TODO: remove this
69
69
  def generate_run_id(run_id: str = "") -> str:
70
70
  """Generate a new run_id.
71
71
 
@@ -147,19 +147,6 @@ def get_module_and_attr_names(command: str) -> Tuple[str, str]:
147
147
  return module, func
148
148
 
149
149
 
150
- def get_dag_hash(dag: Dict[str, Any]) -> str:
151
- """Generates the hash of the dag definition.
152
-
153
- Args:
154
- dag (dict): The dictionary object containing the dag definition
155
-
156
- Returns:
157
- str: The hash of the dag definition
158
- """
159
- dag_str = json.dumps(dag, sort_keys=True, ensure_ascii=True)
160
- return hashlib.sha1(dag_str.encode("utf-8")).hexdigest()
161
-
162
-
163
150
  def load_yaml(file_path: str, load_type: str = "safe") -> Dict[str, Any]:
164
151
  """Loads an yaml and returns the dictionary.
165
152
 
@@ -314,29 +301,6 @@ def remove_prefix(text: str, prefix: str) -> str:
314
301
  return text # or whatever is given
315
302
 
316
303
 
317
- def get_tracked_data() -> Dict[str, str]:
318
- """Scans the environment variables to find any user tracked variables that have a prefix runnable_TRACK_
319
- Removes the environment variable to prevent any clashes in the future steps.
320
-
321
- Returns:
322
- dict: A dictionary of user tracked data
323
- """
324
- tracked_data = {}
325
- for env_var, value in os.environ.items():
326
- if env_var.startswith(defaults.TRACK_PREFIX):
327
- key = remove_prefix(env_var, defaults.TRACK_PREFIX)
328
- try:
329
- tracked_data[key.lower()] = json.loads(value)
330
- except json.decoder.JSONDecodeError:
331
- logger.warning(
332
- f"Tracker {key} could not be JSON decoded, adding the literal value"
333
- )
334
- tracked_data[key.lower()] = value
335
-
336
- del os.environ[env_var]
337
- return tracked_data
338
-
339
-
340
304
  def diff_dict(d1: Dict[str, Any], d2: Dict[str, Any]) -> Dict[str, Any]:
341
305
  """
342
306
  Given two dicts d1 and d2, return a new dict that has upsert items from d1.
@@ -359,25 +323,6 @@ def diff_dict(d1: Dict[str, Any], d2: Dict[str, Any]) -> Dict[str, Any]:
359
323
  return diff
360
324
 
361
325
 
362
- # def hash_bytestr_iter(bytesiter, hasher, ashexstr=True): # pylint: disable=C0116
363
- # """Hashes the given bytesiter using the given hasher."""
364
- # for block in bytesiter: # pragma: no cover
365
- # hasher.update(block)
366
- # return hasher.hexdigest() if ashexstr else hasher.digest() # pragma: no cover
367
-
368
-
369
- # def file_as_blockiter(afile, blocksize=65536): # pylint: disable=C0116
370
- # """From a StackOverflow answer: that is used to generate a MD5 hash of a large files.
371
- # # https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file.
372
-
373
- # """
374
- # with afile: # pragma: no cover
375
- # block = afile.read(blocksize)
376
- # while len(block) > 0:
377
- # yield block
378
- # block = afile.read(blocksize)
379
-
380
-
381
326
  def get_data_hash(file_name: str) -> str:
382
327
  """Returns the hash of the data file.
383
328
 
@@ -397,193 +342,7 @@ def get_data_hash(file_name: str) -> str:
397
342
  return file_hash.hexdigest()
398
343
 
399
344
 
400
- # TODO: This is not the right place for this.
401
- def get_node_execution_command(
402
- node: BaseNode,
403
- map_variable: TypeMapVariable = None,
404
- over_write_run_id: str = "",
405
- log_level: str = "",
406
- ) -> str:
407
- """A utility function to standardize execution call to a node via command line.
408
-
409
- Args:
410
- executor (object): The executor class.
411
- node (object): The Node to execute
412
- map_variable (str, optional): If the node belongs to a map step. Defaults to None.
413
-
414
- Returns:
415
- str: The execution command to run a node via command line.
416
- """
417
- run_id = context.run_context.run_id
418
-
419
- if over_write_run_id:
420
- run_id = over_write_run_id
421
-
422
- log_level = log_level or logging.getLevelName(logger.getEffectiveLevel())
423
-
424
- action = (
425
- f"runnable execute-single-node {run_id} "
426
- f"{context.run_context.pipeline_file} "
427
- f"{node._command_friendly_name()} "
428
- f"--log-level {log_level} "
429
- )
430
-
431
- if context.run_context.from_sdk:
432
- action = action + "--mode python "
433
-
434
- if map_variable:
435
- action = action + f"--map-variable '{json.dumps(map_variable)}' "
436
-
437
- if context.run_context.configuration_file:
438
- action = action + f"--config {context.run_context.configuration_file} "
439
-
440
- if context.run_context.parameters_file:
441
- action = action + f"--parameters-file {context.run_context.parameters_file} "
442
-
443
- if context.run_context.tag:
444
- action = action + f"--tag {context.run_context.tag}"
445
-
446
- return action
447
-
448
-
449
- # TODO: This is not the right place for this.
450
- def get_fan_command(
451
- mode: str,
452
- node: BaseNode,
453
- run_id: str,
454
- map_variable: TypeMapVariable = None,
455
- log_level: str = "",
456
- ) -> str:
457
- """
458
- An utility function to return the fan "in or out" command
459
-
460
- Args:
461
- executor (BaseExecutor): The executor class
462
- mode (str): in or out
463
- node (BaseNode): The composite node that we are fanning in or out
464
- run_id (str): The run id.
465
- map_variable (dict, optional): If the node is a map, we have the map variable. Defaults to None.
466
-
467
- Returns:
468
- str: The fan in or out command
469
- """
470
- log_level = log_level or logging.getLevelName(logger.getEffectiveLevel())
471
- action = (
472
- f"runnable fan {run_id} "
473
- f"{node._command_friendly_name()} " # step name
474
- f"{context.run_context.pipeline_file} " # yaml or python
475
- f"{mode} " # in or out
476
- f"--log-level {log_level} "
477
- )
478
- if context.run_context.configuration_file:
479
- action = action + f" --config-file {context.run_context.configuration_file} "
480
-
481
- if context.run_context.parameters_file:
482
- action = action + f" --parameters-file {context.run_context.parameters_file}"
483
-
484
- if map_variable:
485
- action = action + f" --map-variable '{json.dumps(map_variable)}'"
486
-
487
- if context.run_context.from_sdk: # execution mode
488
- action = action + " --mode python "
489
-
490
- return action
491
-
492
-
493
- # TODO: This is not the right place for this.
494
- def get_job_execution_command(over_write_run_id: str = "") -> str:
495
- """Get the execution command to run a job via command line.
496
-
497
- This function should be used by all executors to submit jobs in remote environment
498
- """
499
-
500
- run_id = context.run_context.run_id
501
-
502
- if over_write_run_id:
503
- run_id = over_write_run_id
504
-
505
- log_level = logging.getLevelName(logger.getEffectiveLevel())
506
-
507
- action = (
508
- f"runnable execute-job {context.run_context.job_definition_file} {run_id} "
509
- f" --log-level {log_level}"
510
- )
511
-
512
- if context.run_context.configuration_file:
513
- action = action + f" --config {context.run_context.configuration_file}"
514
-
515
- if context.run_context.parameters_file:
516
- action = action + f" --parameters {context.run_context.parameters_file}"
517
-
518
- if context.run_context.from_sdk:
519
- action = action + " --mode python "
520
-
521
- if context.run_context.tag:
522
- action = action + f" --tag {context.run_context.tag}"
523
-
524
- return action
525
-
526
-
527
- def get_provider_by_name_and_type(
528
- service_type: str, service_details: defaults.ServiceConfig
529
- ):
530
- """Given a service type, one of executor, run_log_store, catalog, secrets and the config
531
- return the exact child class implementing the service.
532
- We use stevedore to do the work for us.
533
-
534
- Args:
535
- service_type (str): One of executor, run_log_store, catalog, secrets
536
- service_details (dict): The config used to instantiate the service.
537
-
538
- Raises:
539
- Exception: If the service by that name does not exist
540
-
541
- Returns:
542
- object: A service object
543
- """
544
-
545
- namespace = service_type
546
-
547
- service_name = service_details["type"]
548
- service_config: Mapping[str, Any] = {}
549
- if "config" in service_details:
550
- service_config = service_details.get("config", {})
551
-
552
- logger.debug(
553
- f"Trying to get a service of {service_type} of the name {service_name} with config: {service_config}"
554
- )
555
- try:
556
- mgr = driver.DriverManager(
557
- namespace=namespace,
558
- name=service_name,
559
- invoke_on_load=True,
560
- invoke_kwds={**service_config},
561
- )
562
- return mgr.driver
563
- except Exception as _e:
564
- logger.exception(
565
- f"Could not find the service of type: {service_type} with config: {service_details}"
566
- )
567
- raise Exception(
568
- f"Could not find the service of type: {service_type} with config: {service_details}"
569
- ) from _e
570
-
571
-
572
- def get_run_config() -> dict:
573
- """Given an executor with assigned services, return the run_config.
574
-
575
- Args:
576
- executor (object): The executor with all the services assigned.
577
-
578
- Returns:
579
- dict: The run_config.
580
- """
581
-
582
- run_config = context.run_context.model_dump(by_alias=True)
583
- return run_config
584
-
585
-
586
- def json_to_ordered_dict(json_str: str) -> TypeMapVariable:
345
+ def json_to_ordered_dict(json_str: str) -> MapVariableType:
587
346
  """Decode a JSON str into OrderedDict.
588
347
 
589
348
  Args:
@@ -598,27 +357,6 @@ def json_to_ordered_dict(json_str: str) -> TypeMapVariable:
598
357
  return OrderedDict()
599
358
 
600
359
 
601
- def set_runnable_environment_variables(
602
- run_id: str = "", configuration_file: str = "", tag: str = ""
603
- ) -> None:
604
- """Set the environment variables used by runnable. This function should be called during the prepare configurations
605
- by all executors.
606
-
607
- Args:
608
- run_id (str, optional): The run id of the execution. Defaults to None.
609
- configuration_file (str, optional): The configuration file if used. Defaults to None.
610
- tag (str, optional): The tag associated with a run. Defaults to None.
611
- """
612
- if run_id:
613
- os.environ[defaults.ENV_RUN_ID] = run_id
614
-
615
- if configuration_file:
616
- os.environ[defaults.RUNNABLE_CONFIG_FILE] = configuration_file
617
-
618
- if tag:
619
- os.environ[defaults.RUNNABLE_RUN_TAG] = tag
620
-
621
-
622
360
  def gather_variables() -> Dict[str, str]:
623
361
  """Gather all the environment variables used by runnable. All the variables start with runnable_VAR_.
624
362
 
@@ -635,7 +373,7 @@ def gather_variables() -> Dict[str, str]:
635
373
  return variables
636
374
 
637
375
 
638
- def make_log_file_name(name: str, map_variable: TypeMapVariable) -> str:
376
+ def make_log_file_name(name: str, map_variable: MapVariableType) -> str:
639
377
  random_tag = "".join(random.choices(string.ascii_uppercase + string.digits, k=3))
640
378
  log_file_name = name
641
379