runnable 0.50.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. extensions/README.md +0 -0
  2. extensions/__init__.py +0 -0
  3. extensions/catalog/README.md +0 -0
  4. extensions/catalog/any_path.py +214 -0
  5. extensions/catalog/file_system.py +52 -0
  6. extensions/catalog/minio.py +72 -0
  7. extensions/catalog/pyproject.toml +14 -0
  8. extensions/catalog/s3.py +11 -0
  9. extensions/job_executor/README.md +0 -0
  10. extensions/job_executor/__init__.py +236 -0
  11. extensions/job_executor/emulate.py +70 -0
  12. extensions/job_executor/k8s.py +553 -0
  13. extensions/job_executor/k8s_job_spec.yaml +37 -0
  14. extensions/job_executor/local.py +35 -0
  15. extensions/job_executor/local_container.py +161 -0
  16. extensions/job_executor/pyproject.toml +16 -0
  17. extensions/nodes/README.md +0 -0
  18. extensions/nodes/__init__.py +0 -0
  19. extensions/nodes/conditional.py +301 -0
  20. extensions/nodes/fail.py +78 -0
  21. extensions/nodes/loop.py +394 -0
  22. extensions/nodes/map.py +477 -0
  23. extensions/nodes/parallel.py +281 -0
  24. extensions/nodes/pyproject.toml +15 -0
  25. extensions/nodes/stub.py +93 -0
  26. extensions/nodes/success.py +78 -0
  27. extensions/nodes/task.py +156 -0
  28. extensions/pipeline_executor/README.md +0 -0
  29. extensions/pipeline_executor/__init__.py +871 -0
  30. extensions/pipeline_executor/argo.py +1266 -0
  31. extensions/pipeline_executor/emulate.py +119 -0
  32. extensions/pipeline_executor/local.py +226 -0
  33. extensions/pipeline_executor/local_container.py +369 -0
  34. extensions/pipeline_executor/mocked.py +159 -0
  35. extensions/pipeline_executor/pyproject.toml +16 -0
  36. extensions/run_log_store/README.md +0 -0
  37. extensions/run_log_store/__init__.py +0 -0
  38. extensions/run_log_store/any_path.py +100 -0
  39. extensions/run_log_store/chunked_fs.py +122 -0
  40. extensions/run_log_store/chunked_minio.py +141 -0
  41. extensions/run_log_store/file_system.py +91 -0
  42. extensions/run_log_store/generic_chunked.py +549 -0
  43. extensions/run_log_store/minio.py +114 -0
  44. extensions/run_log_store/pyproject.toml +15 -0
  45. extensions/secrets/README.md +0 -0
  46. extensions/secrets/dotenv.py +62 -0
  47. extensions/secrets/pyproject.toml +15 -0
  48. runnable/__init__.py +108 -0
  49. runnable/catalog.py +141 -0
  50. runnable/cli.py +484 -0
  51. runnable/context.py +730 -0
  52. runnable/datastore.py +1058 -0
  53. runnable/defaults.py +159 -0
  54. runnable/entrypoints.py +390 -0
  55. runnable/exceptions.py +137 -0
  56. runnable/executor.py +561 -0
  57. runnable/gantt.py +1646 -0
  58. runnable/graph.py +501 -0
  59. runnable/names.py +546 -0
  60. runnable/nodes.py +593 -0
  61. runnable/parameters.py +217 -0
  62. runnable/pickler.py +96 -0
  63. runnable/sdk.py +1277 -0
  64. runnable/secrets.py +92 -0
  65. runnable/tasks.py +1268 -0
  66. runnable/telemetry.py +142 -0
  67. runnable/utils.py +423 -0
  68. runnable-0.50.0.dist-info/METADATA +189 -0
  69. runnable-0.50.0.dist-info/RECORD +72 -0
  70. runnable-0.50.0.dist-info/WHEEL +4 -0
  71. runnable-0.50.0.dist-info/entry_points.txt +53 -0
  72. runnable-0.50.0.dist-info/licenses/LICENSE +201 -0
runnable/defaults.py ADDED
@@ -0,0 +1,159 @@
1
+ from typing import Any, Dict, Optional, OrderedDict, Union
2
+
3
+ from pydantic import BaseModel, Field, field_validator
4
+ from rich.style import Style
5
+ from typing_extensions import TypeAlias
6
+
7
+ NAME = "runnable"
8
+ LOGGER_NAME = "runnable"
9
+
10
+ # CLI settings
11
+ LOG_LEVEL = "WARNING"
12
+
13
+
14
+ MapVariableType: TypeAlias = Optional[Dict[str, Union[str, int, float]]]
15
+
16
+ # Config file environment variable
17
+ RUNNABLE_CONFIGURATION_FILE = "RUNNABLE_CONFIGURATION_FILE"
18
+ RUNNABLE_RUN_TAG = "RUNNABLE_RUN_TAG"
19
+ RUNNABLE_PARAMETERS_FILE = "RUNNABLE_PARAMETERS_FILE"
20
+
21
+ # Interaction settings
22
+ PARAMETER_PREFIX = "RUNNABLE_PRM_"
23
+ MAP_VARIABLE = "RUNNABLE_MAP_VARIABLE"
24
+ VARIABLE_PREFIX = "RUNNABLE_VAR_"
25
+ ENV_RUN_ID = "RUNNABLE_RUN_ID"
26
+ RETRY_RUN_ID = "RUNNABLE_RETRY_RUN_ID"
27
+ RETRY_INDICATOR = "RUNNABLE_RETRY_INDICATOR"
28
+ ATTEMPT_NUMBER = "RUNNABLE_STEP_ATTEMPT"
29
+
30
+
31
+ class MapVariableModel(BaseModel):
32
+ value: Any
33
+
34
+ @field_validator("value")
35
+ @classmethod
36
+ def validate_json_serializable(cls, v):
37
+ """Ensure the value is JSON serializable"""
38
+ import json
39
+
40
+ try:
41
+ json.dumps(v)
42
+ return v
43
+ except (TypeError, ValueError) as e:
44
+ raise ValueError(f"Value must be JSON serializable: {e}") from e
45
+
46
+
47
+ class LoopIndexModel(BaseModel):
48
+ value: int
49
+
50
+
51
+ class IterableParameterModel(BaseModel):
52
+ # {i1: {value: v1}, i2: {value: v2}} where i1 is outside map and i2 is nested map
53
+ map_variable: OrderedDict[str, MapVariableModel] | None = Field(
54
+ default_factory=OrderedDict
55
+ )
56
+ # [ {value: v1}, {value: v2} ] for index based iteration,
57
+ # i1 is outside loop and i2 is nested loop
58
+ loop_variable: list[LoopIndexModel] | None = Field(default_factory=lambda: [])
59
+
60
+
61
+ ## Generated pipeline file
62
+ GENERATED_PIPELINE_FILE = "generated_pipeline.yaml"
63
+
64
+ # STATUS progression
65
+ # For Branch, CREATED -> PROCESSING -> SUCCESS OR FAIL
66
+ # For a step, CREATED -> TRIGGERED -> PROCESSING -> SUCCESS OR FAIL
67
+ CREATED = "CREATED"
68
+ PROCESSING = "PROCESSING"
69
+ SUCCESS = "SUCCESS"
70
+ FAIL = "FAIL"
71
+
72
+ # Node and Command settings
73
+ COMMAND_TYPE = "python"
74
+ COMMAND_FRIENDLY_CHARACTER = "%"
75
+
76
+ # Default services
77
+ DEFAULT_SERVICES: dict[str, Any] = {
78
+ "pipeline_executor": {"type": "local", "config": {}},
79
+ "job_executor": {"type": "local", "config": {}},
80
+ "run_log_store": {"type": "file-system", "config": {}},
81
+ "catalog": {"type": "file-system", "config": {}},
82
+ "pickler": {"type": "pickle", "config": {}},
83
+ "secrets": {"type": "env-secrets", "config": {}},
84
+ }
85
+
86
+ # Map state
87
+ MAP_PLACEHOLDER = "map_variable_placeholder"
88
+ LOOP_PLACEHOLDER = "loop_variable_placeholder"
89
+
90
+ # Dag node
91
+ DAG_BRANCH_NAME = "dag"
92
+
93
+ # RUN settings
94
+ RANDOM_RUN_ID_LEN = 6
95
+ MAX_TIME = 86400 # 1 day in seconds
96
+
97
+ # User extensions
98
+ USER_CONFIG_FILE = "runnable-config.yaml"
99
+
100
+
101
+ # RUN log store settings
102
+ LOG_LOCATION_FOLDER = ".run_log_store"
103
+
104
+ # Dag node
105
+ DAG_BRANCH_NAME = "dag"
106
+
107
+ # Data catalog settings
108
+ CATALOG_LOCATION_FOLDER = ".catalog"
109
+ COMPUTE_DATA_FOLDER = "."
110
+
111
+ # Secrets settings
112
+ DOTENV_FILE_LOCATION = ".env"
113
+
114
+ LEN_SHA_FOR_TAG = 8
115
+
116
+ # JOB CONFIG
117
+ DEFAULT_JOB_NAME = "job"
118
+
119
+ ## Logging settings
120
+
121
+ LOGGING_CONFIG = {
122
+ "version": 1,
123
+ "disable_existing_loggers": True,
124
+ "formatters": {
125
+ "standard": {"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"},
126
+ "runnable_formatter": {"format": "%(message)s", "datefmt": "[%X]"},
127
+ },
128
+ "handlers": {
129
+ "default": {
130
+ "formatter": "standard",
131
+ "class": "logging.StreamHandler",
132
+ "stream": "ext://sys.stdout", # Default is stderr
133
+ },
134
+ "runnable_handler": {
135
+ "formatter": "runnable_formatter",
136
+ "class": "rich.logging.RichHandler",
137
+ "rich_tracebacks": True,
138
+ },
139
+ },
140
+ "loggers": {
141
+ "": {
142
+ "handlers": ["default"],
143
+ "propagate": True,
144
+ }, # Root logger
145
+ LOGGER_NAME: {"handlers": ["runnable_handler"], "propagate": False},
146
+ },
147
+ }
148
+
149
+
150
+ # styles
151
+ error_style = Style(color="red", bold=True)
152
+ warning_style = Style(color="yellow", bold=True)
153
+ success_style = Style(color="green", bold=True)
154
+ info_style = Style(color="blue", bold=True)
155
+
156
+ # Hash computation settings
157
+ HASH_ALGORITHM = "sha256" # More secure and faster than MD5
158
+ LARGE_FILE_THRESHOLD_BYTES = 1024 * 1024 * 1024 # 1GB
159
+ HASH_CHUNK_SIZE = 1024 * 1024 # 1MB chunks for fingerprint hashing
@@ -0,0 +1,390 @@
1
+ import json
2
+ import logging
3
+ from typing import Optional
4
+
5
+ import runnable.context as context
6
+ from runnable import console, defaults, graph, nodes
7
+ from runnable.defaults import IterableParameterModel
8
+
9
+ logger = logging.getLogger(defaults.LOGGER_NAME)
10
+
11
+
12
+ def execute_pipeline_yaml_spec(
13
+ pipeline_file: str,
14
+ configuration_file: str = "",
15
+ tag: str = "",
16
+ run_id: str = "",
17
+ parameters_file: str = "",
18
+ ):
19
+ # pylint: disable=R0914,R0913
20
+ """
21
+ The entry point to runnable execution for any YAML based spec.
22
+ The result could:
23
+ - Execution of the pipeline if its local executor
24
+ - Rendering of the spec in the case of non local executor
25
+ """
26
+
27
+ service_configurations = context.ServiceConfigurations(
28
+ configuration_file=configuration_file,
29
+ execution_context=context.ExecutionContext.PIPELINE,
30
+ )
31
+ configurations = {
32
+ "pipeline_definition_file": pipeline_file,
33
+ "parameters_file": parameters_file,
34
+ "tag": tag,
35
+ "run_id": run_id,
36
+ "execution_mode": context.ExecutionMode.YAML,
37
+ "configuration_file": configuration_file,
38
+ **service_configurations.services,
39
+ }
40
+
41
+ logger.info("Resolved configurations:")
42
+ logger.info(json.dumps(configurations, indent=4))
43
+
44
+ run_context = context.PipelineContext.model_validate(configurations)
45
+
46
+ run_context.execute()
47
+
48
+ run_context.pipeline_executor.send_return_code()
49
+
50
+
51
+ def execute_single_node(
52
+ configuration_file: str,
53
+ pipeline_file: str,
54
+ step_name: str,
55
+ iter_variable: str,
56
+ mode: str,
57
+ run_id: str,
58
+ tag: str = "",
59
+ parameters_file: str = "",
60
+ ):
61
+ """
62
+ This entry point is triggered during the execution of the pipeline
63
+ - non local execution environments
64
+
65
+ The mode defines how the pipeline spec is provided to the runnable
66
+ - yaml
67
+ - python
68
+ """
69
+
70
+ service_configurations = context.ServiceConfigurations(
71
+ configuration_file=configuration_file,
72
+ execution_context=context.ExecutionContext.PIPELINE,
73
+ )
74
+ configurations = {
75
+ "pipeline_definition_file": pipeline_file,
76
+ "parameters_file": parameters_file,
77
+ "tag": tag,
78
+ "run_id": run_id,
79
+ "execution_mode": mode,
80
+ "configuration_file": configuration_file,
81
+ **service_configurations.services,
82
+ }
83
+
84
+ logger.info("Resolved configurations:")
85
+ logger.info(json.dumps(configurations, indent=4))
86
+
87
+ run_context = context.PipelineContext.model_validate(configurations)
88
+ context.set_run_context(run_context)
89
+ assert run_context.dag
90
+
91
+ iteration_variable: Optional[IterableParameterModel] = None
92
+ if iter_variable:
93
+ iteration_variable = IterableParameterModel.model_validate_json(iter_variable)
94
+
95
+ step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
96
+ node_to_execute, _ = graph.search_node_by_internal_name(
97
+ run_context.dag, step_internal_name
98
+ )
99
+
100
+ logger.info("Executing the single node of : %s", node_to_execute)
101
+
102
+ run_context.pipeline_executor.execute_node(
103
+ node=node_to_execute, iter_variable=iteration_variable
104
+ )
105
+
106
+ # run_context.pipeline_executor.send_return_code()
107
+
108
+
109
+ def execute_single_branch(
110
+ branch_name: str,
111
+ branch: graph.Graph,
112
+ run_context: context.PipelineContext,
113
+ iter_variable: str | None = None,
114
+ ):
115
+ """
116
+ Execute a single branch in a separate process for parallel execution.
117
+
118
+ This function is designed to be called by multiprocessing to execute
119
+ individual branches of parallel and map nodes.
120
+
121
+ Args:
122
+ branch_name (str): The name/identifier of the branch
123
+ branch (Graph): The graph object representing the branch to execute
124
+ run_context (PipelineContext): The pipeline execution context
125
+ map_variable (dict, optional): Map variables for the execution
126
+ """
127
+ # Set up branch-specific logging
128
+ _setup_branch_logging(branch_name)
129
+
130
+ logger.info(f"Executing single branch: {branch_name}")
131
+
132
+ try:
133
+ context.set_run_context(run_context)
134
+
135
+ # Convert to IterableParameterModel
136
+ iteration_variable: Optional[IterableParameterModel] = None
137
+ if iter_variable:
138
+ iteration_variable = IterableParameterModel.model_validate_json(
139
+ iter_variable
140
+ )
141
+ # Execute the branch using the pipeline executor
142
+ run_context.pipeline_executor.execute_graph(
143
+ branch, iter_variable=iteration_variable
144
+ )
145
+ logger.info(f"Branch {branch_name} completed successfully")
146
+ return True
147
+ except Exception as e:
148
+ logger.error(f"Branch {branch_name} failed with error: {e}")
149
+ return False
150
+
151
+
152
+ def _setup_branch_logging(branch_name: str):
153
+ """
154
+ Set up branch-specific logging with prefixes to organize parallel execution logs.
155
+
156
+ Args:
157
+ branch_name (str): The name of the branch to use as a prefix
158
+ """
159
+ import logging
160
+ import sys
161
+
162
+ # Create a custom formatter that includes the branch name
163
+ class BranchFormatter(logging.Formatter):
164
+ def __init__(self, branch_name: str):
165
+ self.branch_name = branch_name
166
+ # Extract just the meaningful part of the branch name for cleaner display
167
+ self.display_name = self._get_display_name(branch_name)
168
+ super().__init__()
169
+
170
+ def _get_display_name(self, branch_name: str) -> str:
171
+ """Extract a clean display name from the full branch name."""
172
+ # For parallel branches like 'parallel_step.branch1', use 'branch1'
173
+ # For map branches like 'map_state.1', use 'iter:1'
174
+ if "." in branch_name:
175
+ parts = branch_name.split(".")
176
+ if len(parts) >= 2:
177
+ last_part = parts[-1]
178
+ # Check if it looks like a map iteration (numeric)
179
+ if last_part.isdigit():
180
+ return f"iter:{last_part}"
181
+ else:
182
+ return last_part
183
+ return branch_name
184
+
185
+ def format(self, record):
186
+ # Add branch prefix to the message
187
+ original_msg = record.getMessage()
188
+ record.msg = f"[{self.display_name}] {original_msg}"
189
+ record.args = ()
190
+
191
+ # Use a simple format for clarity
192
+ return f"{record.levelname}:{record.msg}"
193
+
194
+ # Get the root logger and add our custom formatter
195
+ root_logger = logging.getLogger()
196
+
197
+ # Remove existing handlers to avoid duplicate logs
198
+ for handler in root_logger.handlers[:]:
199
+ if hasattr(handler, "_branch_handler"):
200
+ root_logger.removeHandler(handler)
201
+
202
+ # Create a new handler with branch-specific formatting
203
+ handler = logging.StreamHandler(sys.stdout)
204
+ handler.setFormatter(BranchFormatter(branch_name))
205
+ handler._branch_handler = True # type: ignore # Mark it as our custom handler
206
+ handler.setLevel(logging.INFO)
207
+
208
+ # Add the handler to the root logger
209
+ root_logger.addHandler(handler)
210
+ root_logger.setLevel(logging.INFO)
211
+
212
+
213
+ def execute_job_non_local(
214
+ job_definition_file: str,
215
+ configuration_file: str = "",
216
+ tag: str = "",
217
+ run_id: str = "",
218
+ parameters_file: str = "",
219
+ ):
220
+ service_configurations = context.ServiceConfigurations(
221
+ configuration_file=configuration_file,
222
+ execution_context=context.ExecutionContext.JOB,
223
+ )
224
+ configurations = {
225
+ "job_definition_file": job_definition_file,
226
+ "parameters_file": parameters_file,
227
+ "tag": tag,
228
+ "run_id": run_id,
229
+ "configuration_file": configuration_file,
230
+ **service_configurations.services,
231
+ }
232
+
233
+ logger.info("Resolved configurations:")
234
+ logger.info(json.dumps(configurations, indent=4))
235
+
236
+ run_context = context.JobContext.model_validate(configurations)
237
+ context.set_run_context(run_context)
238
+ assert run_context.job
239
+
240
+ logger.info("Executing the job in non-local mode")
241
+ logger.info("Job to execute: %s", run_context.job)
242
+
243
+ try:
244
+ run_context.job_executor.execute_job(
245
+ run_context.job,
246
+ catalog_settings=run_context.catalog_settings,
247
+ )
248
+ finally:
249
+ console.print("Job execution completed. Sending return code...")
250
+
251
+ run_context.job_executor.send_return_code()
252
+
253
+
254
+ def fan(
255
+ configuration_file: str,
256
+ pipeline_file: str,
257
+ step_name: str,
258
+ mode: str,
259
+ in_or_out: str,
260
+ iter_variable: str,
261
+ run_id: str,
262
+ tag: str = "",
263
+ parameters_file: str = "",
264
+ ):
265
+ """
266
+ The entry point to either fan in or out for a composite node. Only 3rd party orchestrators should use this.
267
+
268
+ It should have similar set up of configurations to execute because orchestrator modes can initiate the execution.
269
+
270
+ Args:
271
+ configuration_file (str): The configuration file.
272
+ mode: in or out
273
+ step_name : The name of the step to execute in dot path convention
274
+ pipeline_file (str): The config/dag file
275
+ run_id (str): The run id of the run.
276
+ tag (str): If a tag is provided at the run time
277
+ parameters_file (str): The parameters being sent in to the application
278
+
279
+ """
280
+ service_configurations = context.ServiceConfigurations(
281
+ configuration_file=configuration_file,
282
+ execution_context=context.ExecutionContext.PIPELINE,
283
+ )
284
+ configurations = {
285
+ "pipeline_definition_file": pipeline_file,
286
+ "parameters_file": parameters_file,
287
+ "tag": tag,
288
+ "run_id": run_id,
289
+ "execution_mode": mode,
290
+ "configuration_file": configuration_file,
291
+ **service_configurations.services,
292
+ }
293
+
294
+ logger.info("Resolved configurations:")
295
+ logger.info(json.dumps(configurations, indent=4))
296
+
297
+ run_context = context.PipelineContext.model_validate(configurations)
298
+ context.set_run_context(run_context)
299
+ assert run_context.dag
300
+
301
+ step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
302
+ node_to_execute, _ = graph.search_node_by_internal_name(
303
+ run_context.dag, step_internal_name
304
+ )
305
+
306
+ iteration_variable: Optional[IterableParameterModel] = None
307
+ if iter_variable:
308
+ iteration_variable = IterableParameterModel.model_validate_json(iter_variable)
309
+
310
+ if in_or_out == "in":
311
+ logger.info("Fanning in for : %s", node_to_execute)
312
+ run_context.pipeline_executor.fan_in(
313
+ node=node_to_execute, iter_variable=iteration_variable
314
+ )
315
+ elif in_or_out == "out":
316
+ logger.info("Fanning out for : %s", node_to_execute)
317
+ run_context.pipeline_executor.fan_out(
318
+ node=node_to_execute, iter_variable=iteration_variable
319
+ )
320
+ else:
321
+ raise ValueError(f"Invalid mode {mode}")
322
+
323
+
324
+ def retry_pipeline(
325
+ run_id: str,
326
+ configuration_file: str = "",
327
+ tag: str = "",
328
+ ):
329
+ """
330
+ Retry a failed pipeline run from the point of failure.
331
+
332
+ This entrypoint:
333
+ 1. Loads the run log for the given run_id
334
+ 2. Extracts pipeline_definition_file from run_config
335
+ 3. Sets RUNNABLE_RETRY_RUN_ID env var
336
+ 4. Re-executes the pipeline via context
337
+
338
+ Args:
339
+ run_id: The run_id of the failed run to retry
340
+ configuration_file: Optional config file (defaults to local execution)
341
+ tag: Optional tag for the retry run
342
+ """
343
+ import os
344
+
345
+ # Set up service configurations
346
+ service_configurations = context.ServiceConfigurations(
347
+ configuration_file=configuration_file,
348
+ execution_context=context.ExecutionContext.PIPELINE,
349
+ )
350
+
351
+ # Instantiate run log store to query the original run
352
+ run_log_store_config = service_configurations.services["run_log_store"]
353
+ store_instance = context.get_service_by_name(
354
+ "run_log_store", run_log_store_config, None
355
+ )
356
+ run_log = store_instance.get_run_log_by_id(run_id=run_id, full=False)
357
+
358
+ run_config = run_log.run_config
359
+ pipeline_definition_file = run_config.get("pipeline_definition_file", "")
360
+
361
+ if not pipeline_definition_file:
362
+ raise ValueError(f"No pipeline_definition_file found in run log for {run_id}")
363
+
364
+ logger.info(f"Retrying run {run_id}")
365
+ logger.info(f"Pipeline definition: {pipeline_definition_file}")
366
+
367
+ # Set the retry environment variable
368
+ os.environ[defaults.RETRY_RUN_ID] = run_id
369
+
370
+ # Create full pipeline context and execute
371
+ configurations = {
372
+ "pipeline_definition_file": pipeline_definition_file,
373
+ "parameters_file": "",
374
+ "tag": tag,
375
+ "run_id": run_id,
376
+ "execution_mode": context.ExecutionMode.PYTHON,
377
+ "configuration_file": configuration_file,
378
+ **service_configurations.services,
379
+ }
380
+
381
+ run_context = context.PipelineContext.model_validate(configurations)
382
+ context.set_run_context(run_context)
383
+ run_context.execute()
384
+ # run_context.pipeline_executor.send_return_code()
385
+
386
+
387
+ if __name__ == "__main__":
388
+ # This is only for perf testing purposes.
389
+ # execute_single_branch() # Missing required arguments
390
+ pass
runnable/exceptions.py ADDED
@@ -0,0 +1,137 @@
1
+ class RunLogExistsError(Exception): # pragma: no cover
2
+ """
3
+ Exception class
4
+ Args:
5
+ Exception ([type]): [description]
6
+ """
7
+
8
+ def __init__(self, run_id):
9
+ self.run_id = run_id
10
+ message = f"Run id for {run_id} is already found in the datastore"
11
+ super().__init__(message)
12
+
13
+
14
+ class JobLogNotFoundError(Exception):
15
+ """
16
+ Exception class
17
+ Args:
18
+ Exception ([type]): [description]
19
+ """
20
+
21
+ def __init__(self, run_id):
22
+ self.run_id = run_id
23
+ message = f"Job for {run_id} is not found in the datastore"
24
+ super().__init__(message)
25
+
26
+
27
+ class RunLogNotFoundError(Exception): # pragma: no cover
28
+ """
29
+ Exception class
30
+ Args:
31
+ Exception ([type]): [description]
32
+ """
33
+
34
+ def __init__(self, run_id):
35
+ self.run_id = run_id
36
+ message = f"Run id for {run_id} is not found in the datastore"
37
+ super().__init__(message)
38
+
39
+
40
+ class StepLogNotFoundError(Exception): # pragma: no cover
41
+ """
42
+ Exception class
43
+ Args:
44
+ Exception ([type]): [description]
45
+ """
46
+
47
+ def __init__(self, run_id, step_name):
48
+ self.run_id = run_id
49
+ self.step_name = step_name
50
+ message = f"Step log for {step_name} is not found in the datastore for Run id: {run_id}"
51
+ super().__init__(message)
52
+
53
+
54
+ class BranchLogNotFoundError(Exception): # pragma: no cover
55
+ """
56
+ Exception class
57
+ Args:
58
+ Exception ([type]): [description]
59
+ """
60
+
61
+ def __init__(self, run_id, branch_name):
62
+ self.run_id = run_id
63
+ self.branch_name = branch_name
64
+ message = f"Branch log for {branch_name} is not found in the datastore for Run id: {run_id}"
65
+ super().__init__(message)
66
+
67
+
68
+ class NodeNotFoundError(Exception): # pragma: no cover
69
+ """
70
+ Exception class
71
+ Args:
72
+ Exception ([type]): [description]
73
+ """
74
+
75
+ def __init__(self, name):
76
+ self.name = name
77
+ message = f"Node of name {name} is not found the graph"
78
+ super().__init__(message)
79
+
80
+
81
+ class BranchNotFoundError(Exception): # pragma: no cover
82
+ """
83
+ Exception class
84
+ Args:
85
+ Exception ([type]): [description]
86
+ """
87
+
88
+ def __init__(self, name):
89
+ self.name = name
90
+ message = f"Branch of name {name} is not found the graph"
91
+ super().__init__(message)
92
+
93
+
94
+ class NodeMethodCallError(Exception):
95
+ """
96
+ Exception class
97
+ """
98
+
99
+ def __init__(self, message):
100
+ super().__init__(message)
101
+
102
+
103
+ class TerminalNodeError(Exception): # pragma: no cover
104
+ def __init__(self):
105
+ message = "Terminal Nodes do not have next node"
106
+ super().__init__(message)
107
+
108
+
109
+ class SecretNotFoundError(Exception): # pragma: no cover
110
+ def __init__(self, secret_name, secret_setting):
111
+ self.secret_name = secret_name
112
+ self.secret_setting = secret_setting
113
+ message = f"No secret found by name:{secret_name} in {secret_setting}"
114
+ super().__init__(message)
115
+
116
+
117
+ class ExecutionFailedError(Exception): # pragma: no cover
118
+ def __init__(self, run_id: str):
119
+ self.run_id = run_id
120
+ message = f"Execution failed for run id: {run_id}"
121
+ super().__init__(message)
122
+
123
+
124
+ class CommandCallError(Exception): # pragma: no cover
125
+ "An exception during the call of the command"
126
+
127
+
128
+ class RetryValidationError(Exception):
129
+ """Raised when retry validation fails (missing run log, DAG mismatch, etc.)"""
130
+
131
+ def __init__(self, message: str, run_id: str = ""):
132
+ self.run_id = run_id
133
+ super().__init__(message)
134
+
135
+
136
+ class EntityNotFoundError(Exception):
137
+ pass