runnable 0.14.0__py3-none-any.whl → 0.17.0__py3-none-any.whl

Sign up to get free protection for your applications and to get access to all the features.
runnable/entrypoints.py CHANGED
@@ -9,12 +9,16 @@ from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
9
9
  from rich.table import Column
10
10
 
11
11
  import runnable.context as context
12
- from runnable import console, defaults, graph, task_console, utils
12
+ from runnable import console, defaults, graph, task_console, tasks, utils
13
13
  from runnable.defaults import RunnableConfig, ServiceConfig
14
+ from runnable.executor import BaseJobExecutor, BasePipelineExecutor
14
15
 
15
16
  logger = logging.getLogger(defaults.LOGGER_NAME)
16
17
 
17
18
 
19
+ print("") # removes the buffer print
20
+
21
+
18
22
  def get_default_configs() -> RunnableConfig:
19
23
  """
20
24
  User can provide extensions as part of their code base, runnable-config.yaml provides the place to put them.
@@ -29,10 +33,9 @@ def get_default_configs() -> RunnableConfig:
29
33
  def prepare_configurations(
30
34
  run_id: str,
31
35
  configuration_file: str = "",
32
- pipeline_file: str = "",
33
36
  tag: str = "",
34
37
  parameters_file: str = "",
35
- force_local_executor: bool = False,
38
+ is_job: bool = False,
36
39
  ) -> context.Context:
37
40
  """
38
41
  Sets up everything needed
@@ -42,7 +45,6 @@ def prepare_configurations(
42
45
 
43
46
  Args:
44
47
  variables_file (str): The variables file, if used or None
45
- pipeline_file (str): The config/dag file
46
48
  run_id (str): The run id of the run.
47
49
  tag (str): If a tag is provided at the run time
48
50
 
@@ -59,22 +61,26 @@ def prepare_configurations(
59
61
  )
60
62
 
61
63
  if configuration_file:
62
- templated_configuration = utils.load_yaml(configuration_file) or {}
64
+ templated_configuration = utils.load_yaml(configuration_file)
65
+
66
+ # apply variables
67
+ configuration = cast(
68
+ RunnableConfig, utils.apply_variables(templated_configuration, variables)
69
+ )
63
70
 
64
71
  # Since all the services (run_log_store, catalog, secrets, executor) are
65
72
  # dynamically loaded via stevedore, we cannot validate the configuration
66
73
  # before they are passed to the service.
67
74
 
68
- configuration: RunnableConfig = cast(RunnableConfig, templated_configuration)
69
-
70
75
  logger.info(f"Resolved configurations: {configuration}")
71
76
 
72
77
  # Run log settings, configuration over-rides everything
73
- run_log_config: Optional[ServiceConfig] = configuration.get("run_log_store", None)
78
+ # The user config has run-log-store while internally we use run_log_store
79
+ run_log_config: Optional[ServiceConfig] = configuration.get("run-log-store", None) # type: ignore
74
80
  if not run_log_config:
75
81
  run_log_config = cast(
76
82
  ServiceConfig,
77
- runnable_defaults.get("run_log_store", defaults.DEFAULT_RUN_LOG_STORE),
83
+ runnable_defaults.get("run-log-store", defaults.DEFAULT_RUN_LOG_STORE),
78
84
  )
79
85
  run_log_store = utils.get_provider_by_name_and_type("run_log_store", run_log_config)
80
86
 
@@ -100,18 +106,37 @@ def prepare_configurations(
100
106
  )
101
107
  pickler_handler = utils.get_provider_by_name_and_type("pickler", pickler_config)
102
108
 
103
- # executor configurations, configuration over rides everything
104
- executor_config: Optional[ServiceConfig] = configuration.get("executor", None)
105
- if force_local_executor:
106
- executor_config = ServiceConfig(type="local", config={})
109
+ if not is_job:
110
+ # executor configurations, configuration over rides everything
111
+ executor_config: Optional[ServiceConfig] = configuration.get(
112
+ "pipeline-executor", None
113
+ ) # type: ignore
114
+ # as pipeline-executor is not a valid key
115
+ if not executor_config:
116
+ executor_config = cast(
117
+ ServiceConfig,
118
+ runnable_defaults.get(
119
+ "pipeline-executor", defaults.DEFAULT_PIPELINE_EXECUTOR
120
+ ),
121
+ )
122
+ configured_executor = utils.get_provider_by_name_and_type(
123
+ "pipeline_executor", executor_config
124
+ )
125
+ else:
126
+ # executor configurations, configuration over rides everything
127
+ job_executor_config: Optional[ServiceConfig] = configuration.get(
128
+ "job-executor", None
129
+ ) # type: ignore
130
+ if not job_executor_config:
131
+ executor_config = cast(
132
+ ServiceConfig,
133
+ runnable_defaults.get("job-executor", defaults.DEFAULT_JOB_EXECUTOR),
134
+ )
107
135
 
108
- if not executor_config:
109
- executor_config = cast(
110
- ServiceConfig, runnable_defaults.get("executor", defaults.DEFAULT_EXECUTOR)
136
+ assert job_executor_config, "Job executor is not provided"
137
+ configured_executor = utils.get_provider_by_name_and_type(
138
+ "job_executor", job_executor_config
111
139
  )
112
- configured_executor = utils.get_provider_by_name_and_type(
113
- "executor", executor_config
114
- )
115
140
 
116
141
  # Construct the context
117
142
  run_context = context.Context(
@@ -127,38 +152,45 @@ def prepare_configurations(
127
152
  parameters_file=parameters_file,
128
153
  )
129
154
 
130
- if pipeline_file:
131
- if pipeline_file.endswith(".py"):
132
- # converting a pipeline defined in python to a dag in yaml
133
- module_file = pipeline_file.strip(".py")
134
- module, func = utils.get_module_and_attr_names(module_file)
135
- sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
136
- imported_module = importlib.import_module(module)
155
+ context.run_context = run_context
137
156
 
138
- os.environ["RUNNABLE_PY_TO_YAML"] = "true"
139
- dag = getattr(imported_module, func)().return_dag()
157
+ return run_context
140
158
 
141
- else:
142
- pipeline_config = utils.load_yaml(pipeline_file)
143
159
 
144
- logger.info("The input pipeline:")
145
- logger.info(json.dumps(pipeline_config, indent=4))
160
+ def set_pipeline_spec_from_yaml(run_context: context.Context, pipeline_file: str):
161
+ """
162
+ Reads the pipeline file from a YAML file and sets the pipeline spec in the run context
163
+ """
164
+ pipeline_config = utils.load_yaml(pipeline_file)
165
+ logger.info("The input pipeline:")
166
+ logger.info(json.dumps(pipeline_config, indent=4))
146
167
 
147
- dag_config = pipeline_config["dag"]
168
+ dag_config = pipeline_config["dag"]
148
169
 
149
- dag_hash = utils.get_dag_hash(dag_config)
150
- dag = graph.create_graph(dag_config)
151
- run_context.dag_hash = dag_hash
170
+ dag_hash = utils.get_dag_hash(dag_config)
171
+ dag = graph.create_graph(dag_config)
172
+ run_context.dag_hash = dag_hash
152
173
 
153
- run_context.pipeline_file = pipeline_file
154
- run_context.dag = dag
174
+ run_context.pipeline_file = pipeline_file
175
+ run_context.dag = dag
155
176
 
156
- context.run_context = run_context
157
177
 
158
- return run_context
178
+ def set_pipeline_spec_from_python(run_context: context.Context, python_module: str):
179
+ # Call the SDK to get the dag
180
+ # Import the module and call the function to get the dag
181
+ module_file = python_module.strip(".py")
182
+ module, func = utils.get_module_and_attr_names(module_file)
183
+ sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
184
+ imported_module = importlib.import_module(module)
185
+
186
+ run_context.from_sdk = True
187
+ dag = getattr(imported_module, func)().return_dag()
188
+
189
+ run_context.pipeline_file = python_module
190
+ run_context.dag = dag
159
191
 
160
192
 
161
- def execute(
193
+ def execute_pipeline_yaml_spec(
162
194
  pipeline_file: str,
163
195
  configuration_file: str = "",
164
196
  tag: str = "",
@@ -167,39 +199,35 @@ def execute(
167
199
  ):
168
200
  # pylint: disable=R0914,R0913
169
201
  """
170
- The entry point to runnable execution. This method would prepare the configurations and delegates traversal to the
171
- executor
172
-
173
- Args:
174
- pipeline_file (str): The config/dag file
175
- run_id (str): The run id of the run.
176
- tag (str): If a tag is provided at the run time
177
- parameters_file (str): The parameters being sent in to the application
202
+ The entry point to runnable execution for any YAML based spec.
203
+ The result could:
204
+ - Execution of the pipeline if its local executor
205
+ - Rendering of the spec in the case of non local executor
178
206
  """
179
207
  run_id = utils.generate_run_id(run_id=run_id)
180
208
 
181
209
  run_context = prepare_configurations(
182
210
  configuration_file=configuration_file,
183
- pipeline_file=pipeline_file,
184
211
  run_id=run_id,
185
212
  tag=tag,
186
213
  parameters_file=parameters_file,
187
214
  )
188
215
 
189
- console.print("Working with context:")
190
- console.print(run_context)
191
- console.rule(style="[dark orange]")
216
+ assert isinstance(run_context.executor, BasePipelineExecutor)
192
217
 
218
+ set_pipeline_spec_from_yaml(run_context, pipeline_file)
193
219
  executor = run_context.executor
194
220
 
195
- run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
196
-
197
221
  utils.set_runnable_environment_variables(
198
222
  run_id=run_id, configuration_file=configuration_file, tag=tag
199
223
  )
200
224
 
201
225
  # Prepare for graph execution
202
- executor.prepare_for_graph_execution()
226
+ executor._set_up_run_log(exists_ok=False)
227
+
228
+ console.print("Working with context:")
229
+ console.print(run_context)
230
+ console.rule(style="[dark orange]")
203
231
 
204
232
  logger.info(f"Executing the graph: {run_context.dag}")
205
233
  with Progress(
@@ -218,8 +246,8 @@ def execute(
218
246
  run_context.progress = progress
219
247
  executor.execute_graph(dag=run_context.dag) # type: ignore
220
248
 
221
- # Non local executors have no run logs
222
- if not executor._local:
249
+ if not executor._is_local:
250
+ # Non local executors only traverse the graph and do not execute the nodes
223
251
  executor.send_return_code(stage="traversal")
224
252
  return
225
253
 
@@ -259,24 +287,18 @@ def execute_single_node(
259
287
  pipeline_file: str,
260
288
  step_name: str,
261
289
  map_variable: str,
290
+ mode: str,
262
291
  run_id: str,
263
292
  tag: str = "",
264
293
  parameters_file: str = "",
265
294
  ):
266
295
  """
267
- The entry point into executing a single node of runnable. Orchestration modes should extensively use this
268
- entry point.
269
-
270
- It should have similar set up of configurations to execute because orchestrator modes can initiate the execution.
271
-
272
- Args:
273
- variables_file (str): The variables file, if used or None
274
- step_name : The name of the step to execute in dot path convention
275
- pipeline_file (str): The config/dag file
276
- run_id (str): The run id of the run.
277
- tag (str): If a tag is provided at the run time
278
- parameters_file (str): The parameters being sent in to the application
296
+ This entry point is triggered during the execution of the pipeline
297
+ - non local execution environments
279
298
 
299
+ The mode defines how the pipeline spec is provided to the runnable
300
+ - yaml
301
+ - python
280
302
  """
281
303
  from runnable import nodes
282
304
 
@@ -290,30 +312,30 @@ def execute_single_node(
290
312
 
291
313
  run_context = prepare_configurations(
292
314
  configuration_file=configuration_file,
293
- pipeline_file=pipeline_file,
294
315
  run_id=run_id,
295
316
  tag=tag,
296
317
  parameters_file=parameters_file,
297
318
  )
319
+ assert isinstance(run_context.executor, BasePipelineExecutor)
320
+
321
+ if mode == "yaml":
322
+ # Load the yaml file
323
+ set_pipeline_spec_from_yaml(run_context, pipeline_file)
324
+ elif mode == "python":
325
+ # Call the SDK to get the dag
326
+ set_pipeline_spec_from_python(run_context, pipeline_file)
327
+
328
+ assert run_context.dag
329
+
298
330
  task_console.print("Working with context:")
299
331
  task_console.print(run_context)
300
332
  task_console.rule(style="[dark orange]")
301
333
 
302
334
  executor = run_context.executor
303
- run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
304
335
  utils.set_runnable_environment_variables(
305
336
  run_id=run_id, configuration_file=configuration_file, tag=tag
306
337
  )
307
338
 
308
- executor.prepare_for_node_execution()
309
-
310
- # TODO: may be make its own entry point
311
- # if not run_context.dag:
312
- # # There are a few entry points that make graph dynamically and do not have a dag defined statically.
313
- # run_log = run_context.run_log_store.get_run_log_by_id(run_id=run_id, full=False)
314
- # run_context.dag = graph.create_graph(run_log.run_config["pipeline"])
315
- assert run_context.dag
316
-
317
339
  map_variable_dict = utils.json_to_ordered_dict(map_variable)
318
340
 
319
341
  step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
@@ -322,7 +344,7 @@ def execute_single_node(
322
344
  )
323
345
 
324
346
  logger.info("Executing the single node of : %s", node_to_execute)
325
- ## This step is where we save the log file
347
+ ## This step is where we save output of the function/shell command
326
348
  try:
327
349
  executor.execute_node(node=node_to_execute, map_variable=map_variable_dict)
328
350
  finally:
@@ -336,23 +358,15 @@ def execute_single_node(
336
358
  run_context.catalog_handler.put(name=log_file_name, run_id=run_context.run_id)
337
359
  os.remove(log_file_name)
338
360
 
339
- # executor.send_return_code(stage="execution")
340
-
341
361
 
342
- def execute_notebook(
343
- entrypoint: str,
344
- notebook_file: str,
345
- catalog_config: dict,
346
- configuration_file: str,
347
- notebook_output_path: str = "",
362
+ def execute_job_yaml_spec(
363
+ job_definition_file: str,
364
+ configuration_file: str = "",
348
365
  tag: str = "",
349
366
  run_id: str = "",
350
367
  parameters_file: str = "",
351
368
  ):
352
- """
353
- The entry point to runnable execution of a notebook. This method would prepare the configurations and
354
- delegates traversal to the executor
355
- """
369
+ # A job and task are internally the same.
356
370
  run_id = utils.generate_run_id(run_id=run_id)
357
371
 
358
372
  run_context = prepare_configurations(
@@ -360,71 +374,87 @@ def execute_notebook(
360
374
  run_id=run_id,
361
375
  tag=tag,
362
376
  parameters_file=parameters_file,
377
+ is_job=True,
363
378
  )
364
379
 
380
+ assert isinstance(run_context.executor, BaseJobExecutor)
381
+
365
382
  executor = run_context.executor
366
- run_context.execution_plan = defaults.EXECUTION_PLAN.UNCHAINED.value
367
383
  utils.set_runnable_environment_variables(
368
384
  run_id=run_id, configuration_file=configuration_file, tag=tag
369
385
  )
370
386
 
387
+ run_context.job_definition_file = job_definition_file
388
+
389
+ job_config = utils.load_yaml(job_definition_file)
390
+ logger.info(
391
+ "Executing the job from the user."
392
+ f"job definition: {job_definition_file}, config: {job_config}"
393
+ )
394
+ assert job_config.get("type"), "Job type is not provided"
395
+
371
396
  console.print("Working with context:")
372
397
  console.print(run_context)
373
398
  console.rule(style="[dark orange]")
374
399
 
375
- step_config = {
376
- "command": notebook_file,
377
- "command_type": "notebook",
378
- "notebook_output_path": notebook_output_path,
379
- "type": "task",
380
- "next": "success",
381
- "catalog": catalog_config,
382
- }
383
- node = graph.create_node(name="executing job", step_config=step_config)
384
-
385
- if entrypoint == defaults.ENTRYPOINT.USER.value:
386
- # Prepare for graph execution
387
- executor.prepare_for_graph_execution()
388
-
389
- logger.info(
390
- "Executing the job from the user. We are still in the caller's compute environment"
391
- )
392
- executor.execute_job(node=node)
400
+ # A hack where we create a task node and get our job/catalog settings
401
+ catalog_config: list[str] = job_config.pop("catalog", {})
393
402
 
394
- elif entrypoint == defaults.ENTRYPOINT.SYSTEM.value:
395
- executor.prepare_for_node_execution()
396
- logger.info(
397
- "Executing the job from the system. We are in the config's compute environment"
398
- )
399
- executor.execute_node(node=node)
403
+ # rename the type to command_type of task
404
+ job_config["command_type"] = job_config.pop("type")
405
+ job = tasks.create_task(job_config)
400
406
 
401
- # Update the status of the run log
402
- step_log = run_context.run_log_store.get_step_log(
403
- node._get_step_log_name(), run_id
404
- )
405
- run_context.run_log_store.update_run_log_status(
406
- run_id=run_id, status=step_log.status
407
- )
407
+ logger.info(
408
+ "Executing the job from the user. We are still in the caller's compute environment"
409
+ )
408
410
 
409
- else:
410
- raise ValueError(f"Invalid entrypoint {entrypoint}")
411
+ assert isinstance(executor, BaseJobExecutor)
412
+ executor.submit_job(job, catalog_settings=catalog_config)
411
413
 
412
414
  executor.send_return_code()
413
415
 
414
416
 
415
- def execute_function(
416
- entrypoint: str,
417
- command: str,
418
- catalog_config: dict,
419
- configuration_file: str,
417
+ def set_job_spec_from_yaml(run_context: context.Context, job_definition_file: str):
418
+ """
419
+ Reads the pipeline file from a YAML file and sets the pipeline spec in the run context
420
+ """
421
+ job_config = utils.load_yaml(job_definition_file)
422
+ logger.info("The input job definition file:")
423
+ logger.info(json.dumps(job_config, indent=4))
424
+
425
+ catalog_config: list[str] = job_config.pop("catalog", {})
426
+
427
+ job_config["command_type"] = job_config.pop("type")
428
+
429
+ run_context.job_definition_file = job_definition_file
430
+ run_context.job = tasks.create_task(job_config)
431
+ run_context.job_catalog_settings = catalog_config
432
+
433
+
434
+ def set_job_spec_from_python(run_context: context.Context, python_module: str):
435
+ # Import the module and call the function to get the task
436
+ module_file = python_module.strip(".py")
437
+ module, func = utils.get_module_and_attr_names(module_file)
438
+ sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
439
+ imported_module = importlib.import_module(module)
440
+
441
+ run_context.from_sdk = True
442
+ task = getattr(imported_module, func)().return_task()
443
+ catalog_settings = getattr(imported_module, func)().return_catalog_settings()
444
+
445
+ run_context.job_definition_file = python_module
446
+ run_context.job = task
447
+ run_context.job_catalog_settings = catalog_settings
448
+
449
+
450
+ def execute_job_non_local(
451
+ job_definition_file: str,
452
+ configuration_file: str = "",
453
+ mode: str = "yaml",
420
454
  tag: str = "",
421
455
  run_id: str = "",
422
456
  parameters_file: str = "",
423
457
  ):
424
- """
425
- The entry point to runnable execution of a function. This method would prepare the configurations and
426
- delegates traversal to the executor
427
- """
428
458
  run_id = utils.generate_run_id(run_id=run_id)
429
459
 
430
460
  run_context = prepare_configurations(
@@ -432,57 +462,33 @@ def execute_function(
432
462
  run_id=run_id,
433
463
  tag=tag,
434
464
  parameters_file=parameters_file,
465
+ is_job=True,
435
466
  )
436
467
 
437
- executor = run_context.executor
468
+ assert isinstance(run_context.executor, BaseJobExecutor)
438
469
 
439
- run_context.execution_plan = defaults.EXECUTION_PLAN.UNCHAINED.value
440
- utils.set_runnable_environment_variables(
441
- run_id=run_id, configuration_file=configuration_file, tag=tag
442
- )
470
+ if mode == "yaml":
471
+ # Load the yaml file
472
+ set_job_spec_from_yaml(run_context, job_definition_file)
473
+ elif mode == "python":
474
+ # Call the SDK to get the task
475
+ set_job_spec_from_python(run_context, job_definition_file)
476
+
477
+ assert run_context.job
443
478
 
444
479
  console.print("Working with context:")
445
480
  console.print(run_context)
446
481
  console.rule(style="[dark orange]")
447
482
 
448
- # Prepare the graph with a single node
449
- step_config = {
450
- "command": command,
451
- "command_type": "python",
452
- "type": "task",
453
- "next": "success",
454
- "catalog": catalog_config,
455
- }
456
- node = graph.create_node(name="executing job", step_config=step_config)
457
-
458
- if entrypoint == defaults.ENTRYPOINT.USER.value:
459
- # Prepare for graph execution
460
- executor.prepare_for_graph_execution()
461
-
462
- logger.info(
463
- "Executing the job from the user. We are still in the caller's compute environment"
464
- )
465
- executor.execute_job(node=node)
466
-
467
- elif entrypoint == defaults.ENTRYPOINT.SYSTEM.value:
468
- executor.prepare_for_node_execution()
469
- logger.info(
470
- "Executing the job from the system. We are in the config's compute environment"
471
- )
472
- executor.execute_node(node=node)
473
-
474
- # Update the status of the run log
475
- step_log = run_context.run_log_store.get_step_log(
476
- node._get_step_log_name(), run_id
477
- )
478
- run_context.run_log_store.update_run_log_status(
479
- run_id=run_id, status=step_log.status
480
- )
483
+ logger.info(
484
+ "Executing the job from the user. We are still in the caller's compute environment"
485
+ )
481
486
 
482
- else:
483
- raise ValueError(f"Invalid entrypoint {entrypoint}")
487
+ run_context.executor.execute_job(
488
+ run_context.job, catalog_settings=run_context.job_catalog_settings
489
+ )
484
490
 
485
- executor.send_return_code()
491
+ run_context.executor.send_return_code()
486
492
 
487
493
 
488
494
  def fan(
@@ -518,23 +524,29 @@ def fan(
518
524
 
519
525
  run_context = prepare_configurations(
520
526
  configuration_file=configuration_file,
521
- pipeline_file=pipeline_file,
522
527
  run_id=run_id,
523
528
  tag=tag,
524
529
  parameters_file=parameters_file,
525
530
  )
531
+
532
+ assert isinstance(run_context.executor, BasePipelineExecutor)
533
+
534
+ if mode == "yaml":
535
+ # Load the yaml file
536
+ set_pipeline_spec_from_yaml(run_context, pipeline_file)
537
+ elif mode == "python":
538
+ # Call the SDK to get the dag
539
+ set_pipeline_spec_from_python(run_context, pipeline_file)
540
+
526
541
  console.print("Working with context:")
527
542
  console.print(run_context)
528
543
  console.rule(style="[dark orange]")
529
544
 
530
545
  executor = run_context.executor
531
- run_context.execution_plan = defaults.EXECUTION_PLAN.CHAINED.value
532
546
  utils.set_runnable_environment_variables(
533
547
  run_id=run_id, configuration_file=configuration_file, tag=tag
534
548
  )
535
549
 
536
- executor.prepare_for_node_execution()
537
-
538
550
  step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
539
551
  node_to_execute, _ = graph.search_node_by_internal_name(
540
552
  run_context.dag, # type: ignore
@@ -553,6 +565,6 @@ def fan(
553
565
  raise ValueError(f"Invalid mode {mode}")
554
566
 
555
567
 
556
- if __name__ == "__main__":
557
- # This is only for perf testing purposes.
558
- prepare_configurations(run_id="abc", pipeline_file="examples/mocking.yaml")
568
+ # if __name__ == "__main__":
569
+ # # This is only for perf testing purposes.
570
+ # prepare_configurations(run_id="abc", pipeline_file="examples/mocking.yaml")
runnable/exceptions.py CHANGED
@@ -10,6 +10,18 @@ class RunLogExistsError(Exception): # pragma: no cover
10
10
  self.message = f"Run id for {run_id} is already found in the datastore"
11
11
 
12
12
 
13
+ class JobLogNotFoundError(Exception):
14
+ """
15
+ Exception class
16
+ Args:
17
+ Exception ([type]): [description]
18
+ """
19
+
20
+ def __init__(self, run_id):
21
+ super().__init__()
22
+ self.message = f"Job for {run_id} is not found in the datastore"
23
+
24
+
13
25
  class RunLogNotFoundError(Exception): # pragma: no cover
14
26
  """
15
27
  Exception class
@@ -74,6 +86,16 @@ class BranchNotFoundError(Exception): # pragma: no cover
74
86
  self.message = f"Branch of name {name} is not found the graph"
75
87
 
76
88
 
89
+ class NodeMethodCallError(Exception):
90
+ """
91
+ Exception class
92
+ """
93
+
94
+ def __init__(self, message):
95
+ super().__init__()
96
+ self.message = message
97
+
98
+
77
99
  class TerminalNodeError(Exception): # pragma: no cover
78
100
  def __init__(self):
79
101
  super().__init__()