runnable 0.35.0__py3-none-any.whl → 0.36.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. extensions/job_executor/__init__.py +3 -4
  2. extensions/job_executor/emulate.py +106 -0
  3. extensions/job_executor/k8s.py +8 -8
  4. extensions/job_executor/local_container.py +13 -14
  5. extensions/nodes/__init__.py +0 -0
  6. extensions/nodes/conditional.py +7 -5
  7. extensions/nodes/fail.py +72 -0
  8. extensions/nodes/map.py +350 -0
  9. extensions/nodes/parallel.py +159 -0
  10. extensions/nodes/stub.py +89 -0
  11. extensions/nodes/success.py +72 -0
  12. extensions/nodes/task.py +92 -0
  13. extensions/pipeline_executor/__init__.py +24 -26
  14. extensions/pipeline_executor/argo.py +18 -15
  15. extensions/pipeline_executor/emulate.py +112 -0
  16. extensions/pipeline_executor/local.py +4 -4
  17. extensions/pipeline_executor/local_container.py +19 -79
  18. extensions/pipeline_executor/mocked.py +4 -4
  19. extensions/pipeline_executor/retry.py +6 -10
  20. extensions/tasks/torch.py +1 -1
  21. runnable/__init__.py +0 -8
  22. runnable/catalog.py +1 -21
  23. runnable/cli.py +0 -59
  24. runnable/context.py +519 -28
  25. runnable/datastore.py +51 -54
  26. runnable/defaults.py +12 -34
  27. runnable/entrypoints.py +82 -440
  28. runnable/exceptions.py +35 -34
  29. runnable/executor.py +13 -20
  30. runnable/names.py +1 -1
  31. runnable/nodes.py +16 -15
  32. runnable/parameters.py +2 -2
  33. runnable/sdk.py +66 -163
  34. runnable/tasks.py +62 -21
  35. runnable/utils.py +6 -268
  36. {runnable-0.35.0.dist-info → runnable-0.36.0.dist-info}/METADATA +1 -1
  37. runnable-0.36.0.dist-info/RECORD +74 -0
  38. {runnable-0.35.0.dist-info → runnable-0.36.0.dist-info}/entry_points.txt +8 -7
  39. extensions/nodes/nodes.py +0 -778
  40. runnable-0.35.0.dist-info/RECORD +0 -66
  41. {runnable-0.35.0.dist-info → runnable-0.36.0.dist-info}/WHEEL +0 -0
  42. {runnable-0.35.0.dist-info → runnable-0.36.0.dist-info}/licenses/LICENSE +0 -0
runnable/entrypoints.py CHANGED
@@ -1,192 +1,12 @@
1
- import importlib
2
1
  import json
3
2
  import logging
4
- import os
5
- import sys
6
- from typing import Optional, cast
7
-
8
- from rich.progress import BarColumn, Progress, TextColumn, TimeElapsedColumn
9
- from rich.table import Column
10
3
 
11
4
  import runnable.context as context
12
- from runnable import console, defaults, graph, task_console, tasks, utils
13
- from runnable.defaults import RunnableConfig, ServiceConfig
14
- from runnable.executor import BaseJobExecutor, BasePipelineExecutor
5
+ from runnable import defaults, graph, nodes, utils
15
6
 
16
7
  logger = logging.getLogger(defaults.LOGGER_NAME)
17
8
 
18
9
 
19
- def get_default_configs() -> RunnableConfig:
20
- """
21
- User can provide extensions as part of their code base, runnable-config.yaml provides the place to put them.
22
- """
23
- user_configs: RunnableConfig = {}
24
- if utils.does_file_exist(defaults.USER_CONFIG_FILE):
25
- user_configs = cast(RunnableConfig, utils.load_yaml(defaults.USER_CONFIG_FILE))
26
-
27
- return user_configs
28
-
29
-
30
- def prepare_configurations(
31
- run_id: str,
32
- configuration_file: str = "",
33
- tag: str = "",
34
- parameters_file: str = "",
35
- is_job: bool = False,
36
- ) -> context.Context:
37
- """
38
- Sets up everything needed
39
- Replace the placeholders in the dag/config against the variables file.
40
-
41
- Attach the secrets_handler, run_log_store, catalog_handler to the executor and return it.
42
-
43
- Args:
44
- variables_file (str): The variables file, if used or None
45
- run_id (str): The run id of the run.
46
- tag (str): If a tag is provided at the run time
47
-
48
- Returns:
49
- executor.BaseExecutor : A prepared executor as per the dag/config
50
- """
51
- runnable_defaults = get_default_configs()
52
-
53
- variables = utils.gather_variables()
54
-
55
- templated_configuration = {}
56
- configuration_file = os.environ.get(
57
- "RUNNABLE_CONFIGURATION_FILE", configuration_file
58
- )
59
-
60
- if configuration_file:
61
- templated_configuration = utils.load_yaml(configuration_file)
62
-
63
- # apply variables
64
- configuration = cast(
65
- RunnableConfig, utils.apply_variables(templated_configuration, variables)
66
- )
67
-
68
- # Since all the services (run_log_store, catalog, secrets, executor) are
69
- # dynamically loaded via stevedore, we cannot validate the configuration
70
- # before they are passed to the service.
71
-
72
- logger.info(f"Resolved configurations: {configuration}")
73
-
74
- # Run log settings, configuration over-rides everything
75
- # The user config has run-log-store while internally we use run_log_store
76
- run_log_config: Optional[ServiceConfig] = configuration.get("run-log-store", None) # type: ignore
77
- if not run_log_config:
78
- run_log_config = cast(
79
- ServiceConfig,
80
- runnable_defaults.get("run-log-store", defaults.DEFAULT_RUN_LOG_STORE),
81
- )
82
- run_log_store = utils.get_provider_by_name_and_type("run_log_store", run_log_config)
83
-
84
- # Catalog handler settings, configuration over-rides everything
85
- catalog_config: Optional[ServiceConfig] = configuration.get("catalog", None)
86
- if not catalog_config:
87
- catalog_config = cast(
88
- ServiceConfig, runnable_defaults.get("catalog", defaults.DEFAULT_CATALOG)
89
- )
90
- catalog_handler = utils.get_provider_by_name_and_type("catalog", catalog_config)
91
-
92
- # Secret handler settings, configuration over-rides everything
93
- secrets_config: Optional[ServiceConfig] = configuration.get("secrets", None)
94
- if not secrets_config:
95
- secrets_config = cast(
96
- ServiceConfig, runnable_defaults.get("secrets", defaults.DEFAULT_SECRETS)
97
- )
98
- secrets_handler = utils.get_provider_by_name_and_type("secrets", secrets_config)
99
-
100
- # pickler
101
- pickler_config = cast(
102
- ServiceConfig, runnable_defaults.get("pickler", defaults.DEFAULT_PICKLER)
103
- )
104
- pickler_handler = utils.get_provider_by_name_and_type("pickler", pickler_config)
105
-
106
- if not is_job:
107
- # executor configurations, configuration over rides everything
108
- executor_config: Optional[ServiceConfig] = configuration.get(
109
- "pipeline-executor", None
110
- ) # type: ignore
111
- # as pipeline-executor is not a valid key
112
- if not executor_config:
113
- executor_config = cast(
114
- ServiceConfig,
115
- runnable_defaults.get(
116
- "pipeline-executor", defaults.DEFAULT_PIPELINE_EXECUTOR
117
- ),
118
- )
119
- configured_executor = utils.get_provider_by_name_and_type(
120
- "pipeline_executor", executor_config
121
- )
122
- else:
123
- # executor configurations, configuration over rides everything
124
- job_executor_config: Optional[ServiceConfig] = configuration.get(
125
- "job-executor", None
126
- ) # type: ignore
127
- if not job_executor_config:
128
- job_executor_config = cast(
129
- ServiceConfig,
130
- runnable_defaults.get("job-executor", defaults.DEFAULT_JOB_EXECUTOR),
131
- )
132
-
133
- assert job_executor_config, "Job executor is not provided"
134
- configured_executor = utils.get_provider_by_name_and_type(
135
- "job_executor", job_executor_config
136
- )
137
-
138
- # Construct the context
139
- run_context = context.Context(
140
- executor=configured_executor,
141
- run_log_store=run_log_store,
142
- catalog_handler=catalog_handler,
143
- secrets_handler=secrets_handler,
144
- pickler=pickler_handler,
145
- variables=variables,
146
- tag=tag,
147
- run_id=run_id,
148
- configuration_file=configuration_file,
149
- parameters_file=parameters_file,
150
- )
151
-
152
- context.run_context = run_context
153
-
154
- return run_context
155
-
156
-
157
- def set_pipeline_spec_from_yaml(run_context: context.Context, pipeline_file: str):
158
- """
159
- Reads the pipeline file from a YAML file and sets the pipeline spec in the run context
160
- """
161
- pipeline_config = utils.load_yaml(pipeline_file)
162
- logger.info("The input pipeline:")
163
- logger.info(json.dumps(pipeline_config, indent=4))
164
-
165
- dag_config = pipeline_config["dag"]
166
-
167
- dag_hash = utils.get_dag_hash(dag_config)
168
- dag = graph.create_graph(dag_config)
169
- run_context.dag_hash = dag_hash
170
-
171
- run_context.pipeline_file = pipeline_file
172
- run_context.dag = dag
173
-
174
-
175
- def set_pipeline_spec_from_python(run_context: context.Context, python_module: str):
176
- # Call the SDK to get the dag
177
- # Import the module and call the function to get the dag
178
- module_file = python_module.rstrip(".py")
179
- module, func = utils.get_module_and_attr_names(module_file)
180
- sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
181
- imported_module = importlib.import_module(module)
182
-
183
- run_context.from_sdk = True
184
- dag = getattr(imported_module, func)().return_dag()
185
-
186
- run_context.pipeline_file = python_module
187
- run_context.dag = dag
188
-
189
-
190
10
  def execute_pipeline_yaml_spec(
191
11
  pipeline_file: str,
192
12
  configuration_file: str = "",
@@ -201,82 +21,29 @@ def execute_pipeline_yaml_spec(
201
21
  - Execution of the pipeline if its local executor
202
22
  - Rendering of the spec in the case of non local executor
203
23
  """
204
- run_id = utils.generate_run_id(run_id=run_id)
205
24
 
206
- run_context = prepare_configurations(
25
+ service_configurations = context.ServiceConfigurations(
207
26
  configuration_file=configuration_file,
208
- run_id=run_id,
209
- tag=tag,
210
- parameters_file=parameters_file,
27
+ execution_context=context.ExecutionContext.PIPELINE,
211
28
  )
29
+ configurations = {
30
+ "pipeline_definition_file": pipeline_file,
31
+ "parameters_file": parameters_file,
32
+ "tag": tag,
33
+ "run_id": run_id,
34
+ "execution_mode": context.ExecutionMode.YAML,
35
+ "configuration_file": configuration_file,
36
+ **service_configurations.services,
37
+ }
212
38
 
213
- assert isinstance(run_context.executor, BasePipelineExecutor)
39
+ logger.info("Resolved configurations:")
40
+ logger.info(json.dumps(configurations, indent=4))
214
41
 
215
- set_pipeline_spec_from_yaml(run_context, pipeline_file)
216
- executor = run_context.executor
42
+ run_context = context.PipelineContext.model_validate(configurations)
217
43
 
218
- utils.set_runnable_environment_variables(
219
- run_id=run_id, configuration_file=configuration_file, tag=tag
220
- )
44
+ run_context.execute()
221
45
 
222
- # Prepare for graph execution
223
- executor._set_up_run_log(exists_ok=False)
224
-
225
- console.print("Working with context:")
226
- console.print(run_context)
227
- console.rule(style="[dark orange]")
228
-
229
- logger.info(f"Executing the graph: {run_context.dag}")
230
- with Progress(
231
- TextColumn(
232
- "[progress.description]{task.description}", table_column=Column(ratio=2)
233
- ),
234
- BarColumn(table_column=Column(ratio=1), style="dark_orange"),
235
- TimeElapsedColumn(table_column=Column(ratio=1)),
236
- console=console,
237
- expand=True,
238
- ) as progress:
239
- pipeline_execution_task = progress.add_task(
240
- "[dark_orange] Starting execution .. ", total=1
241
- )
242
- try:
243
- run_context.progress = progress
244
- executor.execute_graph(dag=run_context.dag) # type: ignore
245
-
246
- if not executor._is_local:
247
- # Non local executors only traverse the graph and do not execute the nodes
248
- executor.send_return_code(stage="traversal")
249
- return
250
-
251
- run_log = run_context.run_log_store.get_run_log_by_id(
252
- run_id=run_context.run_id, full=False
253
- )
254
-
255
- if run_log.status == defaults.SUCCESS:
256
- progress.update(
257
- pipeline_execution_task,
258
- description="[green] Success",
259
- completed=True,
260
- )
261
- else:
262
- progress.update(
263
- pipeline_execution_task, description="[red] Failed", completed=True
264
- )
265
- except Exception as e: # noqa: E722
266
- console.print(e, style=defaults.error_style)
267
- progress.update(
268
- pipeline_execution_task,
269
- description="[red] Errored execution",
270
- completed=True,
271
- )
272
- run_log = run_context.run_log_store.get_run_log_by_id(
273
- run_id=run_context.run_id, full=False
274
- )
275
- run_log.status = defaults.FAIL
276
- run_context.run_log_store.add_branch_log(run_log, run_context.run_id)
277
- raise e
278
-
279
- executor.send_return_code()
46
+ run_context.pipeline_executor.send_return_code()
280
47
 
281
48
 
282
49
  def execute_single_node(
@@ -297,43 +64,27 @@ def execute_single_node(
297
64
  - yaml
298
65
  - python
299
66
  """
300
- from runnable import nodes
301
-
302
- task_console.print(
303
- f"Executing the single node: {step_name} with map variable: {map_variable}"
304
- )
305
-
306
- configuration_file = os.environ.get(
307
- "RUNNABLE_CONFIGURATION_FILE", configuration_file
308
- )
309
67
 
310
- run_context = prepare_configurations(
68
+ service_configurations = context.ServiceConfigurations(
311
69
  configuration_file=configuration_file,
312
- run_id=run_id,
313
- tag=tag,
314
- parameters_file=parameters_file,
315
- )
316
- assert isinstance(run_context.executor, BasePipelineExecutor)
317
-
318
- if mode == "yaml":
319
- # Load the yaml file
320
- set_pipeline_spec_from_yaml(run_context, pipeline_file)
321
- elif mode == "python":
322
- # Call the SDK to get the dag
323
- set_pipeline_spec_from_python(run_context, pipeline_file)
324
-
70
+ execution_context=context.ExecutionContext.PIPELINE,
71
+ )
72
+ configurations = {
73
+ "pipeline_definition_file": pipeline_file,
74
+ "parameters_file": parameters_file,
75
+ "tag": tag,
76
+ "run_id": run_id,
77
+ "execution_mode": mode,
78
+ "configuration_file": configuration_file,
79
+ **service_configurations.services,
80
+ }
81
+
82
+ logger.info("Resolved configurations:")
83
+ logger.info(json.dumps(configurations, indent=4))
84
+
85
+ run_context = context.PipelineContext.model_validate(configurations)
325
86
  assert run_context.dag
326
87
 
327
- task_console.print("Working with context:")
328
- task_console.print(run_context)
329
- task_console.rule(style="[dark orange]")
330
-
331
- executor = run_context.executor
332
- utils.set_runnable_environment_variables(
333
- run_id=run_id, configuration_file=configuration_file, tag=tag
334
- )
335
-
336
- # TODO: Is it useful to make it get from an environment variable
337
88
  map_variable_dict = utils.json_to_ordered_dict(map_variable)
338
89
 
339
90
  step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
@@ -342,153 +93,52 @@ def execute_single_node(
342
93
  )
343
94
 
344
95
  logger.info("Executing the single node of : %s", node_to_execute)
345
- ## This step is where we save output of the function/shell command
346
- try:
347
- executor.execute_node(node=node_to_execute, map_variable=map_variable_dict)
348
- finally:
349
- run_context.executor.add_task_log_to_catalog(
350
- name=node_to_execute.internal_name, map_variable=map_variable_dict
351
- )
352
-
353
- executor.send_return_code()
354
96
 
355
-
356
- def execute_job_yaml_spec(
357
- job_definition_file: str,
358
- configuration_file: str = "",
359
- tag: str = "",
360
- run_id: str = "",
361
- parameters_file: str = "",
362
- ):
363
- # A job and task are internally the same.
364
- run_id = utils.generate_run_id(run_id=run_id)
365
-
366
- run_context = prepare_configurations(
367
- configuration_file=configuration_file,
368
- run_id=run_id,
369
- tag=tag,
370
- parameters_file=parameters_file,
371
- is_job=True,
372
- )
373
-
374
- assert isinstance(run_context.executor, BaseJobExecutor)
375
-
376
- executor = run_context.executor
377
- utils.set_runnable_environment_variables(
378
- run_id=run_id, configuration_file=configuration_file, tag=tag
97
+ run_context.pipeline_executor.execute_node(
98
+ node=node_to_execute, map_variable=map_variable_dict
379
99
  )
380
100
 
381
- run_context.job_definition_file = job_definition_file
382
-
383
- job_config = utils.load_yaml(job_definition_file)
384
- logger.info(
385
- "Executing the job from the user."
386
- f"job definition: {job_definition_file}, config: {job_config}"
387
- )
388
- assert job_config.get("type"), "Job type is not provided"
389
-
390
- console.print("Working with context:")
391
- console.print(run_context)
392
- console.rule(style="[dark orange]")
393
-
394
- # A hack where we create a task node and get our job/catalog settings
395
- catalog_config: list[str] = job_config.pop("catalog", {})
396
-
397
- # rename the type to command_type of task
398
- job_config["command_type"] = job_config.pop("type")
399
- job = tasks.create_task(job_config)
400
-
401
- logger.info(
402
- "Executing the job from the user. We are still in the caller's compute environment"
403
- )
404
-
405
- assert isinstance(executor, BaseJobExecutor)
406
- try:
407
- executor.submit_job(job, catalog_settings=catalog_config)
408
- finally:
409
- run_context.executor.add_task_log_to_catalog("job")
410
-
411
- executor.send_return_code()
412
-
413
-
414
- def set_job_spec_from_yaml(run_context: context.Context, job_definition_file: str):
415
- """
416
- Reads the pipeline file from a YAML file and sets the pipeline spec in the run context
417
- """
418
- job_config = utils.load_yaml(job_definition_file)
419
- logger.info("The input job definition file:")
420
- logger.info(json.dumps(job_config, indent=4))
421
-
422
- catalog_config: list[str] = job_config.pop("catalog", {})
423
-
424
- job_config["command_type"] = job_config.pop("type")
425
-
426
- run_context.job_definition_file = job_definition_file
427
- run_context.job = tasks.create_task(job_config)
428
- run_context.job_catalog_settings = catalog_config
429
-
430
-
431
- def set_job_spec_from_python(run_context: context.Context, python_module: str):
432
- # Import the module and call the function to get the task
433
- module_file = python_module.rstrip(".py")
434
- module, func = utils.get_module_and_attr_names(module_file)
435
- sys.path.insert(0, os.getcwd()) # Need to add the current directory to path
436
- imported_module = importlib.import_module(module)
437
-
438
- run_context.from_sdk = True
439
- task = getattr(imported_module, func)().get_task()
440
- catalog_settings = getattr(imported_module, func)().return_catalog_settings()
441
-
442
- run_context.job_definition_file = python_module
443
- run_context.job = task
444
- run_context.job_catalog_settings = catalog_settings
101
+ run_context.pipeline_executor.send_return_code()
445
102
 
446
103
 
447
104
  def execute_job_non_local(
448
105
  job_definition_file: str,
449
106
  configuration_file: str = "",
450
- mode: str = "yaml",
451
107
  tag: str = "",
452
108
  run_id: str = "",
453
109
  parameters_file: str = "",
454
110
  ):
455
- run_id = utils.generate_run_id(run_id=run_id)
456
-
457
- run_context = prepare_configurations(
111
+ service_configurations = context.ServiceConfigurations(
458
112
  configuration_file=configuration_file,
459
- run_id=run_id,
460
- tag=tag,
461
- parameters_file=parameters_file,
462
- is_job=True,
113
+ execution_context=context.ExecutionContext.JOB,
463
114
  )
115
+ configurations = {
116
+ "job_definition_file": job_definition_file,
117
+ "parameters_file": parameters_file,
118
+ "tag": tag,
119
+ "run_id": run_id,
120
+ "configuration_file": configuration_file,
121
+ **service_configurations.services,
122
+ }
464
123
 
465
- assert isinstance(run_context.executor, BaseJobExecutor)
466
-
467
- if mode == "yaml":
468
- # Load the yaml file
469
- set_job_spec_from_yaml(run_context, job_definition_file)
470
- elif mode == "python":
471
- # Call the SDK to get the task
472
- set_job_spec_from_python(run_context, job_definition_file)
124
+ logger.info("Resolved configurations:")
125
+ logger.info(json.dumps(configurations, indent=4))
473
126
 
127
+ run_context = context.JobContext.model_validate(configurations)
474
128
  assert run_context.job
475
129
 
476
- console.print("Working with context:")
477
- console.print(run_context)
478
- console.rule(style="[dark orange]")
479
-
480
- logger.info(
481
- "Executing the job from the user. We are still in the caller's compute environment"
482
- )
130
+ logger.info("Executing the job in non-local mode")
131
+ logger.info("Job to execute: %s", run_context.job)
483
132
 
484
133
  try:
485
- run_context.executor.execute_job(
486
- run_context.job, catalog_settings=run_context.job_catalog_settings
134
+ run_context.job_executor.execute_job(
135
+ run_context.job,
136
+ catalog_settings=run_context.catalog_settings,
487
137
  )
488
138
  finally:
489
- run_context.executor.add_task_log_to_catalog("job")
139
+ run_context.job_executor.add_task_log_to_catalog("job")
490
140
 
491
- run_context.executor.send_return_code()
141
+ run_context.job_executor.send_return_code()
492
142
 
493
143
 
494
144
  def fan(
@@ -517,51 +167,43 @@ def fan(
517
167
  parameters_file (str): The parameters being sent in to the application
518
168
 
519
169
  """
520
- from runnable import nodes
521
-
522
- configuration_file = os.environ.get(
523
- "RUNNABLE_CONFIGURATION_FILE", configuration_file
524
- )
525
-
526
- run_context = prepare_configurations(
170
+ service_configurations = context.ServiceConfigurations(
527
171
  configuration_file=configuration_file,
528
- run_id=run_id,
529
- tag=tag,
530
- parameters_file=parameters_file,
531
- )
532
-
533
- assert isinstance(run_context.executor, BasePipelineExecutor)
534
-
535
- if mode == "yaml":
536
- # Load the yaml file
537
- set_pipeline_spec_from_yaml(run_context, pipeline_file)
538
- elif mode == "python":
539
- # Call the SDK to get the dag
540
- set_pipeline_spec_from_python(run_context, pipeline_file)
541
-
542
- console.print("Working with context:")
543
- console.print(run_context)
544
- console.rule(style="[dark orange]")
545
-
546
- executor = run_context.executor
547
- utils.set_runnable_environment_variables(
548
- run_id=run_id, configuration_file=configuration_file, tag=tag
549
- )
172
+ execution_context=context.ExecutionContext.PIPELINE,
173
+ )
174
+ configurations = {
175
+ "pipeline_definition_file": pipeline_file,
176
+ "parameters_file": parameters_file,
177
+ "tag": tag,
178
+ "run_id": run_id,
179
+ "execution_mode": mode,
180
+ "configuration_file": configuration_file,
181
+ **service_configurations.services,
182
+ }
183
+
184
+ logger.info("Resolved configurations:")
185
+ logger.info(json.dumps(configurations, indent=4))
186
+
187
+ run_context = context.PipelineContext.model_validate(configurations)
188
+ assert run_context.dag
550
189
 
551
190
  step_internal_name = nodes.BaseNode._get_internal_name_from_command_name(step_name)
552
191
  node_to_execute, _ = graph.search_node_by_internal_name(
553
- run_context.dag, # type: ignore
554
- step_internal_name,
192
+ run_context.dag, step_internal_name
555
193
  )
556
194
 
557
195
  map_variable_dict = utils.json_to_ordered_dict(map_variable)
558
196
 
559
197
  if in_or_out == "in":
560
198
  logger.info("Fanning in for : %s", node_to_execute)
561
- executor.fan_in(node=node_to_execute, map_variable=map_variable_dict)
199
+ run_context.pipeline_executor.fan_in(
200
+ node=node_to_execute, map_variable=map_variable_dict
201
+ )
562
202
  elif in_or_out == "out":
563
203
  logger.info("Fanning out for : %s", node_to_execute)
564
- executor.fan_out(node=node_to_execute, map_variable=map_variable_dict)
204
+ run_context.pipeline_executor.fan_out(
205
+ node=node_to_execute, map_variable=map_variable_dict
206
+ )
565
207
  else:
566
208
  raise ValueError(f"Invalid mode {mode}")
567
209