griptape-nodes 0.64.10__py3-none-any.whl → 0.65.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. griptape_nodes/app/app.py +25 -5
  2. griptape_nodes/cli/commands/init.py +65 -54
  3. griptape_nodes/cli/commands/libraries.py +92 -85
  4. griptape_nodes/cli/commands/self.py +121 -0
  5. griptape_nodes/common/node_executor.py +2142 -101
  6. griptape_nodes/exe_types/base_iterative_nodes.py +1004 -0
  7. griptape_nodes/exe_types/connections.py +114 -19
  8. griptape_nodes/exe_types/core_types.py +225 -7
  9. griptape_nodes/exe_types/flow.py +3 -3
  10. griptape_nodes/exe_types/node_types.py +681 -225
  11. griptape_nodes/exe_types/param_components/README.md +414 -0
  12. griptape_nodes/exe_types/param_components/api_key_provider_parameter.py +200 -0
  13. griptape_nodes/exe_types/param_components/huggingface/huggingface_model_parameter.py +2 -0
  14. griptape_nodes/exe_types/param_components/huggingface/huggingface_repo_file_parameter.py +79 -5
  15. griptape_nodes/exe_types/param_types/parameter_button.py +443 -0
  16. griptape_nodes/machines/control_flow.py +77 -38
  17. griptape_nodes/machines/dag_builder.py +148 -70
  18. griptape_nodes/machines/parallel_resolution.py +61 -35
  19. griptape_nodes/machines/sequential_resolution.py +11 -113
  20. griptape_nodes/retained_mode/events/app_events.py +1 -0
  21. griptape_nodes/retained_mode/events/base_events.py +16 -13
  22. griptape_nodes/retained_mode/events/connection_events.py +3 -0
  23. griptape_nodes/retained_mode/events/execution_events.py +35 -0
  24. griptape_nodes/retained_mode/events/flow_events.py +15 -2
  25. griptape_nodes/retained_mode/events/library_events.py +347 -0
  26. griptape_nodes/retained_mode/events/node_events.py +48 -0
  27. griptape_nodes/retained_mode/events/os_events.py +86 -3
  28. griptape_nodes/retained_mode/events/project_events.py +15 -1
  29. griptape_nodes/retained_mode/events/workflow_events.py +48 -1
  30. griptape_nodes/retained_mode/griptape_nodes.py +6 -2
  31. griptape_nodes/retained_mode/managers/config_manager.py +10 -8
  32. griptape_nodes/retained_mode/managers/event_manager.py +168 -0
  33. griptape_nodes/retained_mode/managers/fitness_problems/libraries/__init__.py +2 -0
  34. griptape_nodes/retained_mode/managers/fitness_problems/libraries/old_xdg_location_warning_problem.py +43 -0
  35. griptape_nodes/retained_mode/managers/flow_manager.py +664 -123
  36. griptape_nodes/retained_mode/managers/library_manager.py +1143 -139
  37. griptape_nodes/retained_mode/managers/model_manager.py +2 -3
  38. griptape_nodes/retained_mode/managers/node_manager.py +148 -25
  39. griptape_nodes/retained_mode/managers/object_manager.py +3 -1
  40. griptape_nodes/retained_mode/managers/operation_manager.py +3 -1
  41. griptape_nodes/retained_mode/managers/os_manager.py +1158 -122
  42. griptape_nodes/retained_mode/managers/secrets_manager.py +2 -3
  43. griptape_nodes/retained_mode/managers/settings.py +21 -1
  44. griptape_nodes/retained_mode/managers/sync_manager.py +2 -3
  45. griptape_nodes/retained_mode/managers/workflow_manager.py +358 -104
  46. griptape_nodes/retained_mode/retained_mode.py +3 -3
  47. griptape_nodes/traits/button.py +44 -2
  48. griptape_nodes/traits/file_system_picker.py +2 -2
  49. griptape_nodes/utils/file_utils.py +101 -0
  50. griptape_nodes/utils/git_utils.py +1226 -0
  51. griptape_nodes/utils/library_utils.py +122 -0
  52. {griptape_nodes-0.64.10.dist-info → griptape_nodes-0.65.0.dist-info}/METADATA +2 -1
  53. {griptape_nodes-0.64.10.dist-info → griptape_nodes-0.65.0.dist-info}/RECORD +55 -47
  54. {griptape_nodes-0.64.10.dist-info → griptape_nodes-0.65.0.dist-info}/WHEEL +1 -1
  55. {griptape_nodes-0.64.10.dist-info → griptape_nodes-0.65.0.dist-info}/entry_points.txt +0 -0
@@ -5,11 +5,10 @@ import logging
5
5
  from enum import StrEnum
6
6
  from typing import TYPE_CHECKING
7
7
 
8
+ from griptape_nodes.exe_types.base_iterative_nodes import BaseIterativeEndNode, BaseIterativeStartNode
8
9
  from griptape_nodes.exe_types.connections import Direction
9
10
  from griptape_nodes.exe_types.core_types import Parameter, ParameterTypeBuiltin
10
11
  from griptape_nodes.exe_types.node_types import (
11
- CONTROL_INPUT_PARAMETER,
12
- LOCAL_EXECUTION,
13
12
  BaseNode,
14
13
  NodeResolutionState,
15
14
  )
@@ -117,7 +116,7 @@ class ParallelResolutionContext:
117
116
 
118
117
  class ExecuteDagState(State):
119
118
  @staticmethod
120
- async def handle_done_nodes(context: ParallelResolutionContext, done_node: DagNode, network_name: str) -> None:
119
+ async def handle_done_nodes(context: ParallelResolutionContext, done_node: DagNode, network_name: str) -> None: # noqa: C901
121
120
  current_node = done_node.node_reference
122
121
 
123
122
  # Check if node was already resolved (shouldn't happen)
@@ -128,12 +127,23 @@ class ExecuteDagState(State):
128
127
  network_name,
129
128
  )
130
129
  return
130
+
131
+ # Special handling for BaseIterativeStartNode
132
+ # Remove it from the network so the end node can process control flow
133
+ if isinstance(current_node, BaseIterativeStartNode):
134
+ current_node.state = NodeResolutionState.RESOLVED
135
+
136
+ # Remove start node from ALL networks where it appears
137
+ for network in list(context.networks.values()):
138
+ if current_node.name in network.nodes():
139
+ network.remove_node(current_node.name)
140
+
141
+ return
142
+
131
143
  # Publish all parameter updates.
132
144
  current_node.state = NodeResolutionState.RESOLVED
133
145
  # Track this as the last resolved node
134
146
  context.last_resolved_node = current_node
135
- # Track this as the last resolved node
136
- context.last_resolved_node = current_node
137
147
  # Serialization can be slow so only do it if the user wants debug details.
138
148
  if logger.level <= logging.DEBUG:
139
149
  logger.debug(
@@ -184,22 +194,13 @@ class ExecuteDagState(State):
184
194
  )
185
195
  # Now the final thing to do, is to take their directed graph and update it.
186
196
  ExecuteDagState.get_next_control_graph(context, current_node, network_name)
187
-
188
- @staticmethod
189
- def get_next_control_output_for_non_local_execution(node: BaseNode) -> Parameter | None:
190
- for param_name, value in node.parameter_output_values.items():
191
- parameter = node.get_parameter_by_name(param_name)
192
- if (
193
- parameter is not None
194
- and parameter.type == ParameterTypeBuiltin.CONTROL_TYPE.value
195
- and value == CONTROL_INPUT_PARAMETER
196
- ):
197
- # This is the parameter
198
- logger.debug(
199
- "Parallel Resolution: Found control output parameter '%s' for non-local execution", param_name
200
- )
201
- return parameter
202
- return None
197
+ graph = context.networks[network_name]
198
+ if len(graph.nodes()) == 0 and context.dag_builder is not None:
199
+ # remove from dependencies. This is so we can potentially queue the data node.
200
+ data_start_nodes = context.dag_builder.remove_graph_from_dependencies()
201
+ for data_start_node_name in data_start_nodes:
202
+ data_start_node = GriptapeNodes.NodeManager().get_node_by_name(data_start_node_name)
203
+ context.dag_builder.add_node_with_dependencies(data_start_node, network_name)
203
204
 
204
205
  @staticmethod
205
206
  def get_next_control_graph(context: ParallelResolutionContext, node: BaseNode, network_name: str) -> None:
@@ -209,15 +210,7 @@ class ExecuteDagState(State):
209
210
  # Early returns for various conditions
210
211
  if ExecuteDagState._should_skip_control_flow(context, node, network_name, flow_manager):
211
212
  return
212
- from griptape_nodes.exe_types.node_types import NodeGroupNode
213
-
214
- if (
215
- isinstance(node, NodeGroupNode)
216
- and node.get_parameter_value(node.execution_environment.name) != LOCAL_EXECUTION
217
- ):
218
- next_output = ExecuteDagState.get_next_control_output_for_non_local_execution(node)
219
- else:
220
- next_output = node.get_next_control_output()
213
+ next_output = node.get_next_control_output()
221
214
  if next_output is not None:
222
215
  ExecuteDagState._process_next_control_node(context, node, next_output, network_name, flow_manager)
223
216
 
@@ -252,7 +245,7 @@ class ExecuteDagState(State):
252
245
  flow_manager: FlowManager,
253
246
  ) -> None:
254
247
  """Process the next control node in the flow."""
255
- node_connection = flow_manager.get_connections().get_connected_node(node, next_output)
248
+ node_connection = flow_manager.get_connections().get_connected_node(node, next_output, include_internal=False)
256
249
  if node_connection is not None:
257
250
  next_node, next_parameter = node_connection
258
251
  # Set entry control parameter
@@ -374,7 +367,8 @@ class ExecuteDagState(State):
374
367
  # Reinitialize leaf nodes since maybe we changed things up.
375
368
  # We removed nodes from the network. There may be new leaf nodes.
376
369
  # Add all leaf nodes from all networks (using set union to avoid duplicates)
377
- leaf_nodes.update([n for n in network.nodes() if network.in_degree(n) == 0])
370
+ network_leaf_nodes = [n for n in network.nodes() if network.in_degree(n) == 0]
371
+ leaf_nodes.update(network_leaf_nodes)
378
372
  canceled_nodes = set()
379
373
  queued_nodes = set()
380
374
  for node in leaf_nodes:
@@ -434,7 +428,9 @@ class ExecuteDagState(State):
434
428
  for node in context.node_to_reference.values():
435
429
  # Only queue nodes that are waiting - preserve state of already processed nodes.
436
430
  if node.node_state == NodeState.WAITING:
437
- node.node_state = NodeState.QUEUED
431
+ # Use proper queueing method that checks can_queue_control_node()
432
+ # This prevents premature queueing of nodes with multiple control connections
433
+ ExecuteDagState._try_queue_waiting_node(context, node.node_reference.name)
438
434
 
439
435
  context.workflow_state = WorkflowState.NO_ERROR
440
436
 
@@ -443,7 +439,7 @@ class ExecuteDagState(State):
443
439
  return None
444
440
 
445
441
  @staticmethod
446
- async def on_update(context: ParallelResolutionContext) -> type[State] | None: # noqa: C901, PLR0911
442
+ async def on_update(context: ParallelResolutionContext) -> type[State] | None: # noqa: C901, PLR0911, PLR0912, PLR0915
447
443
  # Check if execution is paused
448
444
  if context.paused:
449
445
  return None
@@ -466,7 +462,9 @@ class ExecuteDagState(State):
466
462
  for node in queued_nodes:
467
463
  # Process all queued nodes - the async semaphore will handle concurrency limits
468
464
  node_reference = context.node_to_reference[node]
469
-
465
+ # Skip BaseIterativeEndNode as it's handled by loop execution flow
466
+ if isinstance(node_reference.node_reference, BaseIterativeEndNode):
467
+ continue
470
468
  # Collect parameter values from upstream nodes before executing
471
469
  try:
472
470
  await ExecuteDagState.collect_values_from_upstream_nodes(node_reference)
@@ -487,6 +485,34 @@ class ExecuteDagState(State):
487
485
  logger.error(msg)
488
486
  return ErrorState
489
487
 
488
+ # We've set up the node for success completely. Now we check and handle accordingly if it's a for-each-start node
489
+ # if False:
490
+ if isinstance(node_reference.node_reference, BaseIterativeStartNode):
491
+ # Call handle_done_state to clear it from everything
492
+ end_loop_node = node_reference.node_reference.end_node
493
+ # Set start node to DONE! even if it isn't truly done lolllll.
494
+ node_reference.node_state = NodeState.DONE
495
+ if end_loop_node is None:
496
+ msg = (
497
+ f"Cannot have a Start Loop Node without an End Loop Node: {node_reference.node_reference.name}"
498
+ )
499
+ logger.error(msg)
500
+ return ErrorState
501
+ # We're going to skip straight to the end node here instead.
502
+ # Set end node to node reference
503
+ if context.dag_builder is not None:
504
+ # Check if BaseIterativeEndNode is already in DAG (from pre-building phase)
505
+ if end_loop_node.name in context.dag_builder.node_to_reference:
506
+ # BaseIterativeEndNode already exists in DAG, just get reference and queue it
507
+ end_node_reference = context.dag_builder.node_to_reference[end_loop_node.name]
508
+ end_node_reference.node_state = NodeState.QUEUED
509
+ node_reference = end_node_reference
510
+ else:
511
+ # BaseIterativeEndNode not in DAG yet (backwards compatibility), add it
512
+ end_node_reference = context.dag_builder.add_node(end_loop_node)
513
+ end_node_reference.node_state = NodeState.QUEUED
514
+ node_reference = end_node_reference
515
+
490
516
  def on_task_done(task: asyncio.Task) -> None:
491
517
  if task in context.task_to_node:
492
518
  node = context.task_to_node[task]
@@ -7,7 +7,7 @@ from typing import Any
7
7
 
8
8
  from griptape_nodes.exe_types.connections import Direction
9
9
  from griptape_nodes.exe_types.core_types import ParameterTypeBuiltin
10
- from griptape_nodes.exe_types.node_types import LOCAL_EXECUTION, BaseNode, NodeGroupNode, NodeResolutionState
10
+ from griptape_nodes.exe_types.node_types import BaseNode, NodeResolutionState
11
11
  from griptape_nodes.exe_types.type_validator import TypeValidator
12
12
  from griptape_nodes.machines.fsm import FSM, State
13
13
  from griptape_nodes.node_library.library_registry import LibraryRegistry
@@ -79,42 +79,6 @@ class InitializeSpotlightState(State):
79
79
  return CompleteState
80
80
  current_node = context.current_node
81
81
 
82
- # If this node has a non-LOCAL parent group, redirect to parent instead
83
- # Handle nested groups recursively - keep redirecting until we find the top-level parent
84
- from griptape_nodes.exe_types.node_types import LOCAL_EXECUTION, NodeGroupNode
85
-
86
- while current_node.parent_group is not None and isinstance(current_node.parent_group, NodeGroupNode):
87
- execution_env = current_node.parent_group.get_parameter_value(
88
- current_node.parent_group.execution_environment.name
89
- )
90
- if execution_env != LOCAL_EXECUTION:
91
- # Replace current node with parent group
92
- parent_group = current_node.parent_group
93
- logger.info(
94
- "Sequential Resolution: Redirecting from child node '%s' to parent node group '%s' at InitializeSpotlight",
95
- current_node.name,
96
- parent_group.name,
97
- )
98
- # Update the focus stack to use parent instead
99
- context.focus_stack[-1] = Focus(node=parent_group)
100
- current_node = parent_group
101
- # Continue loop to check if this parent also has a parent
102
- else:
103
- # Parent is LOCAL_EXECUTION, stop redirecting
104
- break
105
-
106
- # For NodeGroups, check external connections for unresolved dependencies
107
- if isinstance(current_node, NodeGroupNode):
108
- unresolved_dependency = EvaluateParameterState._check_node_group_external_dependencies(current_node)
109
- if unresolved_dependency:
110
- logger.info(
111
- "Sequential Resolution: NodeGroup '%s' has unresolved external dependency on '%s', queuing dependency first",
112
- current_node.name,
113
- unresolved_dependency.name,
114
- )
115
- context.focus_stack.append(Focus(node=unresolved_dependency))
116
- return InitializeSpotlightState
117
-
118
82
  if current_node.state == NodeResolutionState.UNRESOLVED:
119
83
  # Mark all future nodes unresolved.
120
84
  # TODO: https://github.com/griptape-ai/griptape-nodes/issues/862
@@ -161,7 +125,7 @@ class EvaluateParameterState(State):
161
125
  @staticmethod
162
126
  def _get_next_node(current_node: BaseNode, current_parameter: Any, connections: Any) -> BaseNode | None:
163
127
  """Get the next node connected to the current parameter."""
164
- next_node = connections.get_connected_node(current_node, current_parameter)
128
+ next_node = connections.get_connected_node(current_node, current_parameter, include_internal=False)
165
129
  if next_node:
166
130
  next_node, _ = next_node
167
131
  return next_node
@@ -173,73 +137,6 @@ class EvaluateParameterState(State):
173
137
  msg = f"Cycle detected between node '{current_node.name}' and '{next_node.name}'."
174
138
  raise RuntimeError(msg)
175
139
 
176
- @staticmethod
177
- def _handle_parent_already_resolved(current_node: BaseNode) -> type[State]:
178
- """Handle case where parent node group is already resolved."""
179
- if current_node.advance_parameter():
180
- return InitializeSpotlightState
181
- return ExecuteNodeState
182
-
183
- @staticmethod
184
- def _check_node_group_external_dependencies(node_group: BaseNode) -> BaseNode | None:
185
- """Check if NodeGroup has unresolved external incoming connections.
186
-
187
- Returns the first unresolved source node (or its parent if applicable) if found, None otherwise.
188
- """
189
- if not isinstance(node_group, NodeGroupNode):
190
- return None
191
-
192
- for conn in node_group.stored_connections.external_connections.incoming_connections:
193
- source_node = conn.source_node
194
- if source_node.state == NodeResolutionState.UNRESOLVED:
195
- # Check if source has a parent group to use instead
196
- if source_node.parent_group is not None and isinstance(source_node.parent_group, NodeGroupNode):
197
- execution_env = source_node.parent_group.get_parameter_value(
198
- source_node.parent_group.execution_environment.name
199
- )
200
- if execution_env != LOCAL_EXECUTION:
201
- return source_node.parent_group
202
- return source_node
203
- return None
204
-
205
- @staticmethod
206
- def _determine_node_to_queue(
207
- next_node: BaseNode, current_node: BaseNode, focus_stack_names: set[str]
208
- ) -> BaseNode | None:
209
- """Determine which node to queue - the next node or its parent group.
210
-
211
- Returns None if the parent node group is already resolved.
212
- """
213
- if next_node.parent_group is None or not isinstance(next_node.parent_group, NodeGroupNode):
214
- return next_node
215
-
216
- parent_group = next_node.parent_group
217
- execution_env = parent_group.get_parameter_value(parent_group.execution_environment.name)
218
- if execution_env == LOCAL_EXECUTION:
219
- return next_node
220
-
221
- if parent_group.state == NodeResolutionState.RESOLVED:
222
- logger.info(
223
- "Sequential Resolution: Parent node group '%s' is already resolved, skipping child node '%s' (execution environment: %s)",
224
- parent_group.name,
225
- next_node.name,
226
- execution_env,
227
- )
228
- return None
229
-
230
- if parent_group.name in focus_stack_names:
231
- msg = f"Cycle detected: parent node group '{parent_group.name}' is already in focus stack while processing dependency for '{current_node.name}'."
232
- raise RuntimeError(msg)
233
-
234
- logger.info(
235
- "Sequential Resolution: Queuing parent node group '%s' instead of child node '%s' (execution environment: %s) - child is a dependency of '%s'",
236
- parent_group.name,
237
- next_node.name,
238
- execution_env,
239
- current_node.name,
240
- )
241
- return parent_group
242
-
243
140
  @staticmethod
244
141
  async def on_update(context: ResolutionContext) -> type[State] | None:
245
142
  from griptape_nodes.retained_mode.griptape_nodes import GriptapeNodes
@@ -258,11 +155,7 @@ class EvaluateParameterState(State):
258
155
  focus_stack_names = {focus.node.name for focus in context.focus_stack}
259
156
  EvaluateParameterState._check_for_cycle(next_node, current_node, focus_stack_names)
260
157
 
261
- node_to_queue = EvaluateParameterState._determine_node_to_queue(next_node, current_node, focus_stack_names)
262
- if node_to_queue is None:
263
- return EvaluateParameterState._handle_parent_already_resolved(current_node)
264
-
265
- context.focus_stack.append(Focus(node=node_to_queue))
158
+ context.focus_stack.append(Focus(node=next_node))
266
159
  return InitializeSpotlightState
267
160
 
268
161
  if current_node.advance_parameter():
@@ -290,7 +183,9 @@ class ExecuteNodeState(State):
290
183
 
291
184
  for parameter in current_node.parameters:
292
185
  # Get the connected upstream node for this parameter
293
- upstream_connection = connections.get_connected_node(current_node, parameter, direction=Direction.UPSTREAM)
186
+ upstream_connection = connections.get_connected_node(
187
+ current_node, parameter, direction=Direction.UPSTREAM, include_internal=False
188
+ )
294
189
  if upstream_connection:
295
190
  upstream_node, upstream_parameter = upstream_connection
296
191
 
@@ -413,7 +308,7 @@ class ExecuteNodeState(State):
413
308
  return CompleteState
414
309
  except Exception as e:
415
310
  logger.exception("Error processing node '%s", current_node.name)
416
- msg = f"Canceling flow run. Node '{current_node.name}' encountered a problem: {e}"
311
+ msg = f"Node '{current_node.name}' encountered a problem: {e}"
417
312
  # Mark the node as unresolved, broadcasting to everyone.
418
313
  current_node.make_node_unresolved(
419
314
  current_states_to_trigger_change_event=set(
@@ -423,7 +318,10 @@ class ExecuteNodeState(State):
423
318
 
424
319
  from griptape_nodes.retained_mode.griptape_nodes import GriptapeNodes
425
320
 
426
- await GriptapeNodes.FlowManager().cancel_flow_run()
321
+ # Do NOT call cancel_flow_run() here - that cancels the global main flow!
322
+ # When executing in a subflow (e.g., for-each loop iteration), we want to
323
+ # terminate only the current subflow, not the entire main workflow.
324
+ # The exception will propagate up to the caller, which will handle it appropriately.
427
325
 
428
326
  GriptapeNodes.EventManager().put_event(
429
327
  ExecutionGriptapeNodeEvent(
@@ -109,6 +109,7 @@ class AppGetSessionResultFailure(WorkflowNotAlteredMixin, ResultPayloadFailure):
109
109
  class AppInitializationComplete(AppPayload):
110
110
  """Application initialization completed successfully. All subsystems ready."""
111
111
 
112
+ libraries_to_download: list[str] = field(default_factory=list)
112
113
  libraries_to_register: list[str] = field(default_factory=list)
113
114
  workflows_to_register: list[str] = field(default_factory=list)
114
115
  models_to_download: list[str] = field(default_factory=list)
@@ -35,7 +35,6 @@ class ResultDetails:
35
35
  *result_details: ResultDetail,
36
36
  message: str | None = None,
37
37
  level: int | None = None,
38
- logger: logging.Logger | str | None = "griptape_nodes",
39
38
  ):
40
39
  """Initialize with ResultDetail objects or create a single one from message/level.
41
40
 
@@ -43,7 +42,6 @@ class ResultDetails:
43
42
  *result_details: Variable number of ResultDetail objects
44
43
  message: If provided, creates a single ResultDetail with this message
45
44
  level: Logging level for the single ResultDetail (required if message is provided)
46
- logger: Logger to use for auto-logging. String for logger name, Logger object, or None to skip
47
45
  """
48
46
  # Handle single message/level convenience
49
47
  if message is not None:
@@ -60,18 +58,13 @@ class ResultDetails:
60
58
  raise ValueError(err_msg)
61
59
  self.result_details = list(result_details)
62
60
 
63
- # Auto-log if logger is provided
64
- if logger is not None:
65
- try:
66
- if isinstance(logger, str):
67
- logger = logging.getLogger(logger)
61
+ def __str__(self) -> str:
62
+ """String representation of ResultDetails.
68
63
 
69
- for detail in self.result_details:
70
- # Handle both string and int levels
71
- logger.log(detail.level, detail.message)
72
- except Exception: # noqa: S110
73
- # If logging fails for any reason, don't let it break the ResultDetails creation
74
- pass
64
+ Returns:
65
+ str: Concatenated messages of all ResultDetail objects
66
+ """
67
+ return "\n".join(detail.message for detail in self.result_details)
75
68
 
76
69
 
77
70
  # The Payload class is a marker interface
@@ -82,7 +75,17 @@ class Payload(ABC): # noqa: B024
82
75
  # Request payload base class with optional request ID
83
76
  @dataclass(kw_only=True)
84
77
  class RequestPayload(Payload, ABC):
78
+ """Base class for all request payloads.
79
+
80
+ Args:
81
+ request_id: Optional request ID for tracking.
82
+ failure_log_level: If set, override the log level for failure results.
83
+ Use logging.DEBUG (10) or logging.INFO (20) to suppress error toasts.
84
+ Default: None (use handler's default, typically ERROR).
85
+ """
86
+
85
87
  request_id: int | None = None
88
+ failure_log_level: int | None = None
86
89
 
87
90
 
88
91
  # Result payload base class with abstract succeeded/failed methods, and indicator whether the current workflow was altered.
@@ -24,6 +24,7 @@ class CreateConnectionRequest(RequestPayload):
24
24
  source_node_name: Name of the source node (None for current context)
25
25
  target_node_name: Name of the target node (None for current context)
26
26
  initial_setup: Skip setup work when loading from file
27
+ is_node_group_internal: Mark this connection as internal to a node group (for DAG building)
27
28
 
28
29
  Results: CreateConnectionResultSuccess | CreateConnectionResultFailure (incompatible types, invalid nodes/parameters)
29
30
  """
@@ -35,6 +36,8 @@ class CreateConnectionRequest(RequestPayload):
35
36
  target_node_name: str | None = None
36
37
  # initial_setup prevents unnecessary work when we are loading a workflow from a file.
37
38
  initial_setup: bool = False
39
+ # Mark this connection as internal to a node group proxy parameter
40
+ is_node_group_internal: bool = False
38
41
 
39
42
 
40
43
  @dataclass
@@ -95,6 +95,41 @@ class StartFlowResultFailure(ResultPayloadFailure):
95
95
  validation_exceptions: list[Exception]
96
96
 
97
97
 
98
+ @dataclass
99
+ @PayloadRegistry.register
100
+ class StartLocalSubflowRequest(RequestPayload):
101
+ """Start an independent local subflow that runs concurrently with the main flow.
102
+
103
+ Use when: Running loop iterations or other independent subflows that need their own
104
+ execution context and should not interfere with the main flow's state.
105
+
106
+ This creates a separate ControlFlowMachine with its own DagBuilder to ensure full isolation.
107
+
108
+ Args:
109
+ flow_name: Name of the flow to start as a subflow
110
+ start_node: The node to start execution from (None to auto-detect start node)
111
+ pickle_control_flow_result: Whether to pickle the result for subprocess retrieval
112
+
113
+ Results: StartLocalSubflowResultSuccess | StartLocalSubflowResultFailure
114
+ """
115
+
116
+ flow_name: str
117
+ start_node: str | None = None
118
+ pickle_control_flow_result: bool = False
119
+
120
+
121
+ @dataclass
122
+ @PayloadRegistry.register
123
+ class StartLocalSubflowResultSuccess(WorkflowAlteredMixin, ResultPayloadSuccess):
124
+ """Local subflow started successfully and is running independently."""
125
+
126
+
127
+ @dataclass
128
+ @PayloadRegistry.register
129
+ class StartLocalSubflowResultFailure(ResultPayloadFailure):
130
+ """Local subflow failed to start. Check result_details for error information."""
131
+
132
+
98
133
  @dataclass
99
134
  @PayloadRegistry.register
100
135
  class StartFlowFromNodeRequest(RequestPayload):
@@ -238,6 +238,7 @@ class SerializedFlowCommands:
238
238
  sub_flows_commands: list[SerializedFlowCommands]
239
239
  node_dependencies: NodeDependencies
240
240
  node_types_used: set[LibraryNameAndNodeType]
241
+ flow_name: str | None = None
241
242
 
242
243
 
243
244
  @dataclass
@@ -278,6 +279,7 @@ class DeserializeFlowFromCommandsRequest(RequestPayload):
278
279
  @PayloadRegistry.register
279
280
  class DeserializeFlowFromCommandsResultSuccess(WorkflowAlteredMixin, ResultPayloadSuccess):
280
281
  flow_name: str
282
+ node_name_mappings: dict[str, str] = field(default_factory=dict) # original_name -> deserialized_name
281
283
 
282
284
 
283
285
  @dataclass
@@ -394,6 +396,7 @@ class SetFlowMetadataResultFailure(WorkflowNotAlteredMixin, ResultPayloadFailure
394
396
  SanitizedParameterName = str # What appears in the serialized flow
395
397
  OriginalNodeName = str # Original node name (can have spaces, dots, etc.)
396
398
  OriginalParameterName = str # Original parameter name
399
+ PackagedNodeName = str # Name of Start/End node in packaged flow
397
400
 
398
401
 
399
402
  class OriginalNodeParameter(NamedTuple):
@@ -403,6 +406,13 @@ class OriginalNodeParameter(NamedTuple):
403
406
  parameter_name: OriginalParameterName
404
407
 
405
408
 
409
+ class PackagedNodeParameterMapping(NamedTuple):
410
+ """Parameter mappings for a packaged node (Start or End)."""
411
+
412
+ node_name: PackagedNodeName # Name of the packaged node (e.g., "Start_Package_MultiNode")
413
+ parameter_mappings: dict[SanitizedParameterName, OriginalNodeParameter] # Parameter name -> original node/param
414
+
415
+
406
416
  class ParameterNameMapping(NamedTuple):
407
417
  """Maps a sanitized parameter name back to its original node and parameter."""
408
418
 
@@ -446,6 +456,7 @@ class PackageNodesAsSerializedFlowRequest(RequestPayload):
446
456
  entry_control_node_name: str | None = None
447
457
  entry_control_parameter_name: str | None = None
448
458
  output_parameter_prefix: str = "packaged_node_"
459
+ node_group_name: str | None = None # Name of the NodeGroupNode if packaging a group
449
460
 
450
461
 
451
462
  @dataclass
@@ -457,13 +468,15 @@ class PackageNodesAsSerializedFlowResultSuccess(WorkflowNotAlteredMixin, ResultP
457
468
  serialized_flow_commands: The complete serialized flow with StartFlow, selected nodes with preserved connections, and EndFlow
458
469
  workflow_shape: The workflow shape defining inputs and outputs for external callers
459
470
  packaged_node_names: List of node names that were included in the package
460
- parameter_name_mappings: Dict mapping sanitized parameter names to original node and parameter names for O(1) lookup
471
+ parameter_name_mappings: List of parameter mappings for packaged nodes.
472
+ Index 0 = Start node mappings, Index 1 = End node mappings.
473
+ Each entry contains the node name and its parameter mappings.
461
474
  """
462
475
 
463
476
  serialized_flow_commands: SerializedFlowCommands
464
477
  workflow_shape: WorkflowShape
465
478
  packaged_node_names: list[str]
466
- parameter_name_mappings: dict[SanitizedParameterName, OriginalNodeParameter]
479
+ parameter_name_mappings: list[PackagedNodeParameterMapping]
467
480
 
468
481
 
469
482
  @dataclass