soe-ai 0.1.1__py3-none-any.whl → 0.1.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. soe/builtin_tools/__init__.py +39 -0
  2. soe/builtin_tools/soe_add_signal.py +82 -0
  3. soe/builtin_tools/soe_call_tool.py +111 -0
  4. soe/builtin_tools/soe_copy_context.py +80 -0
  5. soe/builtin_tools/soe_explore_docs.py +290 -0
  6. soe/builtin_tools/soe_get_available_tools.py +42 -0
  7. soe/builtin_tools/soe_get_context.py +50 -0
  8. soe/builtin_tools/soe_get_workflows.py +63 -0
  9. soe/builtin_tools/soe_inject_node.py +86 -0
  10. soe/builtin_tools/soe_inject_workflow.py +105 -0
  11. soe/builtin_tools/soe_list_contexts.py +73 -0
  12. soe/builtin_tools/soe_remove_node.py +72 -0
  13. soe/builtin_tools/soe_remove_workflow.py +62 -0
  14. soe/builtin_tools/soe_update_context.py +54 -0
  15. soe/docs/_config.yml +10 -0
  16. soe/docs/advanced_patterns/guide_fanout_and_aggregations.md +318 -0
  17. soe/docs/advanced_patterns/guide_inheritance.md +435 -0
  18. soe/docs/advanced_patterns/hybrid_intelligence.md +237 -0
  19. soe/docs/advanced_patterns/index.md +49 -0
  20. soe/docs/advanced_patterns/operational.md +781 -0
  21. soe/docs/advanced_patterns/self_evolving_workflows.md +385 -0
  22. soe/docs/advanced_patterns/swarm_intelligence.md +211 -0
  23. soe/docs/builtins/context.md +164 -0
  24. soe/docs/builtins/explore_docs.md +135 -0
  25. soe/docs/builtins/tools.md +164 -0
  26. soe/docs/builtins/workflows.md +199 -0
  27. soe/docs/guide_00_getting_started.md +341 -0
  28. soe/docs/guide_01_tool.md +206 -0
  29. soe/docs/guide_02_llm.md +143 -0
  30. soe/docs/guide_03_router.md +146 -0
  31. soe/docs/guide_04_patterns.md +475 -0
  32. soe/docs/guide_05_agent.md +159 -0
  33. soe/docs/guide_06_schema.md +397 -0
  34. soe/docs/guide_07_identity.md +540 -0
  35. soe/docs/guide_08_child.md +612 -0
  36. soe/docs/guide_09_ecosystem.md +690 -0
  37. soe/docs/guide_10_infrastructure.md +427 -0
  38. soe/docs/guide_11_builtins.md +118 -0
  39. soe/docs/index.md +104 -0
  40. soe/docs/primitives/backends.md +281 -0
  41. soe/docs/primitives/context.md +256 -0
  42. soe/docs/primitives/node_reference.md +259 -0
  43. soe/docs/primitives/primitives.md +331 -0
  44. soe/docs/primitives/signals.md +865 -0
  45. soe/docs_index.py +1 -1
  46. soe/lib/__init__.py +0 -0
  47. soe/lib/child_context.py +46 -0
  48. soe/lib/context_fields.py +51 -0
  49. soe/lib/inheritance.py +172 -0
  50. soe/lib/jinja_render.py +113 -0
  51. soe/lib/operational.py +51 -0
  52. soe/lib/parent_sync.py +71 -0
  53. soe/lib/register_event.py +75 -0
  54. soe/lib/schema_validation.py +134 -0
  55. soe/lib/yaml_parser.py +14 -0
  56. soe/local_backends/__init__.py +18 -0
  57. soe/local_backends/factory.py +124 -0
  58. soe/local_backends/in_memory/context.py +38 -0
  59. soe/local_backends/in_memory/conversation_history.py +60 -0
  60. soe/local_backends/in_memory/identity.py +52 -0
  61. soe/local_backends/in_memory/schema.py +40 -0
  62. soe/local_backends/in_memory/telemetry.py +38 -0
  63. soe/local_backends/in_memory/workflow.py +33 -0
  64. soe/local_backends/storage/context.py +57 -0
  65. soe/local_backends/storage/conversation_history.py +82 -0
  66. soe/local_backends/storage/identity.py +118 -0
  67. soe/local_backends/storage/schema.py +96 -0
  68. soe/local_backends/storage/telemetry.py +72 -0
  69. soe/local_backends/storage/workflow.py +56 -0
  70. soe/nodes/__init__.py +13 -0
  71. soe/nodes/agent/__init__.py +10 -0
  72. soe/nodes/agent/factory.py +134 -0
  73. soe/nodes/agent/lib/loop_handlers.py +150 -0
  74. soe/nodes/agent/lib/loop_state.py +157 -0
  75. soe/nodes/agent/lib/prompts.py +65 -0
  76. soe/nodes/agent/lib/tools.py +35 -0
  77. soe/nodes/agent/stages/__init__.py +12 -0
  78. soe/nodes/agent/stages/parameter.py +37 -0
  79. soe/nodes/agent/stages/response.py +54 -0
  80. soe/nodes/agent/stages/router.py +37 -0
  81. soe/nodes/agent/state.py +111 -0
  82. soe/nodes/agent/types.py +66 -0
  83. soe/nodes/agent/validation/__init__.py +11 -0
  84. soe/nodes/agent/validation/config.py +95 -0
  85. soe/nodes/agent/validation/operational.py +24 -0
  86. soe/nodes/child/__init__.py +3 -0
  87. soe/nodes/child/factory.py +61 -0
  88. soe/nodes/child/state.py +59 -0
  89. soe/nodes/child/validation/__init__.py +11 -0
  90. soe/nodes/child/validation/config.py +126 -0
  91. soe/nodes/child/validation/operational.py +28 -0
  92. soe/nodes/lib/conditions.py +71 -0
  93. soe/nodes/lib/context.py +24 -0
  94. soe/nodes/lib/conversation_history.py +77 -0
  95. soe/nodes/lib/identity.py +64 -0
  96. soe/nodes/lib/llm_resolver.py +142 -0
  97. soe/nodes/lib/output.py +68 -0
  98. soe/nodes/lib/response_builder.py +91 -0
  99. soe/nodes/lib/signal_emission.py +79 -0
  100. soe/nodes/lib/signals.py +54 -0
  101. soe/nodes/lib/tools.py +100 -0
  102. soe/nodes/llm/__init__.py +7 -0
  103. soe/nodes/llm/factory.py +103 -0
  104. soe/nodes/llm/state.py +76 -0
  105. soe/nodes/llm/types.py +12 -0
  106. soe/nodes/llm/validation/__init__.py +11 -0
  107. soe/nodes/llm/validation/config.py +89 -0
  108. soe/nodes/llm/validation/operational.py +23 -0
  109. soe/nodes/router/__init__.py +3 -0
  110. soe/nodes/router/factory.py +37 -0
  111. soe/nodes/router/state.py +32 -0
  112. soe/nodes/router/validation/__init__.py +11 -0
  113. soe/nodes/router/validation/config.py +58 -0
  114. soe/nodes/router/validation/operational.py +16 -0
  115. soe/nodes/tool/factory.py +66 -0
  116. soe/nodes/tool/lib/__init__.py +11 -0
  117. soe/nodes/tool/lib/conditions.py +35 -0
  118. soe/nodes/tool/lib/failure.py +28 -0
  119. soe/nodes/tool/lib/parameters.py +67 -0
  120. soe/nodes/tool/state.py +66 -0
  121. soe/nodes/tool/types.py +27 -0
  122. soe/nodes/tool/validation/__init__.py +15 -0
  123. soe/nodes/tool/validation/config.py +132 -0
  124. soe/nodes/tool/validation/operational.py +16 -0
  125. soe/validation/__init__.py +18 -0
  126. soe/validation/config.py +195 -0
  127. soe/validation/jinja.py +54 -0
  128. soe/validation/operational.py +110 -0
  129. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/METADATA +5 -5
  130. soe_ai-0.1.3.dist-info/RECORD +137 -0
  131. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/WHEEL +1 -1
  132. soe_ai-0.1.1.dist-info/RECORD +0 -10
  133. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/licenses/LICENSE +0 -0
  134. {soe_ai-0.1.1.dist-info → soe_ai-0.1.3.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,150 @@
1
+ """
2
+ Agent loop action handlers
3
+
4
+ Focused functions for handling each router action type.
5
+ Extracted from factory.py to keep the main loop clean.
6
+ """
7
+
8
+ from typing import Dict, Any, Callable, TYPE_CHECKING
9
+ from ..stages import (
10
+ execute_response_stage,
11
+ execute_parameter_stage,
12
+ FinalResponse,
13
+ )
14
+ from ...lib.tools import create_tool_schema
15
+ from ...lib.output import get_signal_options, get_output_model
16
+
17
+ if TYPE_CHECKING:
18
+ from ..types import CallLlm
19
+ from .loop_state import AgentLoopState
20
+ from ..state import AgentContext, AgentOperationalState
21
+ from ....types import Backends
22
+
23
+
24
+ def handle_finish_action(
25
+ call_llm: "CallLlm",
26
+ agent_context: "AgentContext",
27
+ loop_state: "AgentLoopState",
28
+ node_config: Dict[str, Any],
29
+ backends: "Backends",
30
+ operational_state: "AgentOperationalState",
31
+ ) -> FinalResponse:
32
+ """Handle the 'finish' action from the router."""
33
+ signal_options = get_signal_options(operational_state.event_emissions)
34
+ output_model = get_output_model(
35
+ backends, operational_state.main_execution_id, operational_state.output_field
36
+ )
37
+
38
+ return execute_response_stage(
39
+ call_llm=call_llm,
40
+ agent_context=agent_context,
41
+ loop_state=loop_state,
42
+ config=node_config,
43
+ output_field=operational_state.output_field,
44
+ output_schema=output_model,
45
+ signal_options=signal_options,
46
+ max_retries=operational_state.max_retries,
47
+ )
48
+
49
+
50
+ def handle_tool_call_action(
51
+ call_llm: "CallLlm",
52
+ tool_name: str,
53
+ tools_registry: Dict[str, Dict[str, Any]],
54
+ agent_context: "AgentContext",
55
+ loop_state: "AgentLoopState",
56
+ node_config: Dict[str, Any],
57
+ operational_state: "AgentOperationalState",
58
+ backends: "Backends",
59
+ execution_id: str,
60
+ ) -> bool:
61
+ """Handle the 'call_tool' action from the router."""
62
+ from ....lib.register_event import register_event
63
+ from ....types import EventTypes
64
+
65
+ if not tool_name or tool_name not in tools_registry:
66
+ register_event(
67
+ backends=backends,
68
+ execution_id=execution_id,
69
+ event_type=EventTypes.AGENT_TOOL_NOT_FOUND,
70
+ data={
71
+ "node_name": node_config.get("name", "unknown"),
72
+ "tool_name": tool_name,
73
+ "available_tools": list(tools_registry.keys()),
74
+ }
75
+ )
76
+ loop_state.add_system_error(f"Tool '{tool_name}' not found or not available.")
77
+ return False
78
+
79
+ tool_config = tools_registry[tool_name]
80
+ tool_func = tool_config["function"]
81
+ tool_exec_retries = tool_config.get("max_retries", 0)
82
+ tool_schema = create_tool_schema(tool_func)
83
+
84
+ try:
85
+ tool_args = execute_parameter_stage(
86
+ call_llm=call_llm,
87
+ agent_context=agent_context,
88
+ loop_state=loop_state,
89
+ tool_name=tool_name,
90
+ tool_schema=tool_schema,
91
+ config=node_config,
92
+ max_retries=operational_state.max_retries,
93
+ )
94
+
95
+ tool_args_dict = tool_args.model_dump() if hasattr(tool_args, 'model_dump') else dict(tool_args)
96
+
97
+ register_event(
98
+ backends=backends,
99
+ execution_id=execution_id,
100
+ event_type=EventTypes.AGENT_TOOL_CALL,
101
+ data={
102
+ "node_name": node_config.get("name", "unknown"),
103
+ "tool_name": tool_name,
104
+ "tool_args": tool_args_dict,
105
+ }
106
+ )
107
+
108
+ result = _execute_tool_with_retries(tool_func, tool_args, tool_exec_retries)
109
+
110
+ result_str = str(result)
111
+ result_preview = result_str[:1000] + "..." if len(result_str) > 1000 else result_str
112
+
113
+ register_event(
114
+ backends=backends,
115
+ execution_id=execution_id,
116
+ event_type=EventTypes.AGENT_TOOL_RESULT,
117
+ data={
118
+ "node_name": node_config.get("name", "unknown"),
119
+ "tool_name": tool_name,
120
+ "result_preview": result_preview,
121
+ "result_length": len(result_str),
122
+ }
123
+ )
124
+
125
+ loop_state.add_tool_response(tool_name, result)
126
+
127
+ except Exception as e:
128
+ loop_state.add_tool_error(tool_name, str(e))
129
+
130
+ return True
131
+
132
+
133
+ def _execute_tool_with_retries(
134
+ tool_func: Callable,
135
+ tool_args: Any,
136
+ max_retries: int,
137
+ ) -> Any:
138
+ """Execute a tool with retry logic."""
139
+ last_error = None
140
+
141
+ for attempt in range(max_retries + 1):
142
+ try:
143
+ return tool_func(**tool_args.model_dump())
144
+ except Exception as e:
145
+ last_error = e
146
+ if attempt < max_retries:
147
+ continue
148
+
149
+ if last_error:
150
+ raise last_error
@@ -0,0 +1,157 @@
1
+ """
2
+ Agent loop state management
3
+
4
+ This module defines the internal state that persists across the agent's
5
+ execution loop. When an `identity` is configured, the conversation history
6
+ is persisted to the backend, allowing it to be shared across different
7
+ node executions.
8
+ """
9
+
10
+ from typing import Dict, Any, List, Optional, TYPE_CHECKING
11
+ from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
12
+
13
+ if TYPE_CHECKING:
14
+ from ....types import Backends
15
+
16
+
17
+ class AgentLoopState(BaseModel):
18
+ """
19
+ Internal state for the agent execution loop.
20
+
21
+ When `identity` is None, this state is local to a single agent node
22
+ execution. When `identity` is set, conversation history is loaded from
23
+ and saved to the conversation_history backend, enabling persistence
24
+ across different node executions.
25
+ """
26
+ model_config = ConfigDict(arbitrary_types_allowed=True)
27
+
28
+ tool_responses: Dict[str, Any] = Field(default_factory=dict)
29
+ conversation_history: List[Dict[str, str]] = Field(default_factory=list)
30
+ errors: List[str] = Field(default_factory=list)
31
+ retry_count: int = 0
32
+ max_retries: int = 10
33
+ history_key: Optional[str] = None
34
+ _backends: Optional["Backends"] = PrivateAttr(default=None)
35
+
36
+ @classmethod
37
+ def create(
38
+ cls,
39
+ history_key: Optional[str] = None,
40
+ backends: Optional["Backends"] = None,
41
+ max_retries: int = 10,
42
+ ) -> "AgentLoopState":
43
+ """
44
+ Factory method to create AgentLoopState, optionally loading
45
+ existing conversation history from backend.
46
+
47
+ Args:
48
+ history_key: Optional key for persistent conversation history (main_execution_id)
49
+ backends: Backends container (required if history_key is set)
50
+ max_retries: Maximum retry count
51
+
52
+ Returns:
53
+ AgentLoopState instance, with history loaded if history_key exists
54
+ """
55
+ state = cls(max_retries=max_retries, history_key=history_key)
56
+ state._backends = backends
57
+
58
+ if history_key and backends and backends.conversation_history:
59
+ state.conversation_history = backends.conversation_history.get_conversation_history(history_key)
60
+
61
+ return state
62
+
63
+ def add_tool_response(self, tool_name: str, result: Any) -> None:
64
+ """Record a successful tool response."""
65
+ self.tool_responses[tool_name] = result
66
+ entry = {
67
+ "role": "tool",
68
+ "tool_name": tool_name,
69
+ "content": str(result),
70
+ }
71
+ self.conversation_history.append(entry)
72
+ self._persist_entry(entry)
73
+
74
+ def add_tool_error(self, tool_name: str, error: str) -> None:
75
+ """Record a tool execution error."""
76
+ error_msg = f"Error executing {tool_name}: {error}"
77
+ self.tool_responses[tool_name] = error_msg
78
+ self.errors.append(error_msg)
79
+ entry = {
80
+ "role": "tool_error",
81
+ "tool_name": tool_name,
82
+ "content": error_msg,
83
+ }
84
+ self.conversation_history.append(entry)
85
+ self._persist_entry(entry)
86
+ self.retry_count += 1
87
+
88
+ def add_system_error(self, error: str) -> None:
89
+ """Record a system-level error (e.g., invalid tool name)."""
90
+ self.errors.append(error)
91
+ entry = {
92
+ "role": "system_error",
93
+ "content": error,
94
+ }
95
+ self.conversation_history.append(entry)
96
+ self._persist_entry(entry)
97
+ self.retry_count += 1
98
+
99
+ def _persist_entry(self, entry: Dict[str, str]) -> None:
100
+ """Persist a single entry to the backend if history_key is set."""
101
+ if self.history_key and self._backends and self._backends.conversation_history:
102
+ self._backends.conversation_history.append_to_conversation_history(
103
+ self.history_key, entry
104
+ )
105
+
106
+ def can_retry(self) -> bool:
107
+ """Check if we can still retry."""
108
+ return self.retry_count < self.max_retries
109
+
110
+ def get_execution_state(self) -> str:
111
+ """
112
+ Determine current execution state for prompt selection.
113
+
114
+ Returns one of:
115
+ - 'initial': No tool calls yet
116
+ - 'tool_response': Has successful tool responses
117
+ - 'tool_error': Has tool errors
118
+ - 'retry': Has system errors (e.g., invalid tool name)
119
+ """
120
+ if not self.conversation_history:
121
+ return "initial"
122
+
123
+ last_entry = self.conversation_history[-1]
124
+ role = last_entry.get("role", "")
125
+
126
+ if role == "tool_error":
127
+ return "tool_error"
128
+ elif role == "system_error":
129
+ return "retry"
130
+ elif role == "tool":
131
+ return "tool_response"
132
+
133
+ return "initial"
134
+
135
+ def get_context_for_llm(self) -> str:
136
+ """
137
+ Format the conversation history for inclusion in LLM prompts.
138
+ """
139
+ if not self.conversation_history:
140
+ return ""
141
+
142
+ parts = []
143
+ for entry in self.conversation_history:
144
+ role = entry.get("role", "unknown")
145
+ content = entry.get("content", "")
146
+ tool_name = entry.get("tool_name", "")
147
+
148
+ if role == "tool":
149
+ parts.append(f"[Tool: {tool_name}]\n{content}")
150
+ elif role == "tool_error":
151
+ parts.append(f"[Tool Error: {tool_name}]\n{content}")
152
+ elif role == "system_error":
153
+ parts.append(f"[System Error]\n{content}")
154
+ else:
155
+ parts.append(content)
156
+
157
+ return "\n\n".join(parts)
@@ -0,0 +1,65 @@
1
+ """
2
+ Agent node prompt building utilities
3
+
4
+ Provides state-specific instructions for the agent's router stage.
5
+ """
6
+
7
+
8
+ def get_state_instructions(execution_state: str) -> str:
9
+ """
10
+ Get state-specific instructions for the router stage.
11
+
12
+ The router decides between 'call_tool' and 'finish' actions.
13
+ Instructions vary based on what happened in previous iterations.
14
+ """
15
+
16
+ base_decision = "Decide the next action: 'call_tool' to use a tool, or 'finish' if task is complete."
17
+
18
+ if execution_state == "initial":
19
+ return f"""{base_decision}
20
+
21
+ INITIAL EXECUTION:
22
+ 1. Analyze the task and available context
23
+ 2. Determine if you need additional information from tools
24
+ 3. If tools are needed, choose 'call_tool' and specify which tool
25
+ 4. If you have enough information to complete the task, choose 'finish'
26
+
27
+ IMPORTANT: Only call tools that are NECESSARY. Be selective."""
28
+
29
+ elif execution_state == "tool_response":
30
+ return f"""{base_decision}
31
+
32
+ TOOL RESPONSE RECEIVED:
33
+ Your previous tool call was successful. Review the results in conversation history.
34
+
35
+ NEXT STEPS:
36
+ 1. Analyze if the tool response provides what you need
37
+ 2. If more information is needed, call another tool
38
+ 3. If task can now be completed, choose 'finish'
39
+
40
+ Do NOT re-call tools that already succeeded."""
41
+
42
+ elif execution_state == "tool_error":
43
+ return f"""{base_decision}
44
+
45
+ TOOL ERROR OCCURRED:
46
+ Your previous tool call failed. Review the error in conversation history.
47
+
48
+ RECOVERY:
49
+ 1. Understand what went wrong
50
+ 2. Fix parameters and retry the failed tool, OR
51
+ 3. Try a different approach with another tool
52
+ 4. If task can be completed despite the error, choose 'finish'"""
53
+
54
+ elif execution_state == "retry":
55
+ return f"""{base_decision}
56
+
57
+ RETRY NEEDED:
58
+ A system error occurred (e.g., invalid tool name). Review the error.
59
+
60
+ RECOVERY:
61
+ 1. Check that tool names match available tools exactly
62
+ 2. Use a valid tool name and try again
63
+ 3. If no tools are needed, choose 'finish'"""
64
+
65
+ return base_decision
@@ -0,0 +1,35 @@
1
+ """
2
+ Agent node tool loading utilities.
3
+ """
4
+
5
+ from typing import Dict, Any, List
6
+
7
+ from ...lib.tools import get_tool_signature, get_tool_from_registry
8
+
9
+
10
+ def load_tools_and_build_signatures(
11
+ tool_names: List[str],
12
+ tools_registry: Dict[str, Dict[str, Any]],
13
+ execution_id: str,
14
+ backends,
15
+ ) -> str:
16
+ """Load tools and build signature string for agent prompt.
17
+
18
+ Args:
19
+ tool_names: List of tool names to load
20
+ tools_registry: Dict mapping tool name -> {function: callable, max_retries: int}
21
+ execution_id: Current workflow execution ID
22
+ backends: Backend services
23
+
24
+ Returns:
25
+ Formatted string with all tool signatures for the prompt
26
+ """
27
+ tools_info = []
28
+
29
+ for tool_name in tool_names:
30
+ tool_func, _, _, _ = get_tool_from_registry(
31
+ tool_name, tools_registry, execution_id, backends
32
+ )
33
+ tools_info.append(get_tool_signature(tool_func))
34
+
35
+ return "\n\n".join(tools_info) if tools_info else ""
@@ -0,0 +1,12 @@
1
+ from .router import execute_router_stage
2
+ from .response import execute_response_stage
3
+ from .parameter import execute_parameter_stage
4
+ from ..types import RouterResponse, FinalResponse
5
+
6
+ __all__ = [
7
+ "execute_router_stage",
8
+ "RouterResponse",
9
+ "execute_response_stage",
10
+ "FinalResponse",
11
+ "execute_parameter_stage",
12
+ ]
@@ -0,0 +1,37 @@
1
+ from typing import Type, TypeVar, TYPE_CHECKING
2
+ from pydantic import BaseModel
3
+ from ...lib.llm_resolver import resolve_llm_call
4
+ from ..types import ParameterInput
5
+ from ....types import CallLlm
6
+
7
+ if TYPE_CHECKING:
8
+ from ..state import AgentContext
9
+ from ..lib.loop_state import AgentLoopState
10
+
11
+ T = TypeVar("T", bound=BaseModel)
12
+
13
+
14
+ def execute_parameter_stage(
15
+ call_llm: CallLlm,
16
+ agent_context: "AgentContext",
17
+ loop_state: "AgentLoopState",
18
+ tool_name: str,
19
+ tool_schema: Type[T],
20
+ config: dict,
21
+ max_retries: int = 3,
22
+ ) -> T:
23
+ """Execute the Parameter Generation stage to generate arguments for a tool."""
24
+ input_data = ParameterInput(
25
+ task_description=agent_context.agent_prompt,
26
+ context=agent_context.context_string,
27
+ tool_name=tool_name,
28
+ conversation_history=loop_state.get_context_for_llm(),
29
+ )
30
+
31
+ return resolve_llm_call(
32
+ call_llm=call_llm,
33
+ input_data=input_data,
34
+ config=config,
35
+ response_model=tool_schema,
36
+ max_retries=max_retries,
37
+ )
@@ -0,0 +1,54 @@
1
+ from typing import List, Optional, Type, Any, TYPE_CHECKING
2
+ from pydantic import BaseModel, Field
3
+ from ...lib.llm_resolver import resolve_llm_call
4
+ from ...lib.response_builder import (
5
+ build_response_model,
6
+ extract_output_from_response,
7
+ extract_signal_from_response,
8
+ )
9
+ from ....types import CallLlm
10
+ from ..types import ResponseStageInput, FinalResponse
11
+
12
+ if TYPE_CHECKING:
13
+ from ..state import AgentContext
14
+ from ..lib.loop_state import AgentLoopState
15
+
16
+
17
+ def execute_response_stage(
18
+ call_llm: CallLlm,
19
+ agent_context: "AgentContext",
20
+ loop_state: "AgentLoopState",
21
+ config: dict,
22
+ output_field: Optional[str] = None,
23
+ output_schema: Optional[Type[BaseModel]] = None,
24
+ signal_options: Optional[List[str]] = None,
25
+ max_retries: int = 3,
26
+ ) -> FinalResponse:
27
+ """Execute the Response stage to generate the final output."""
28
+ input_data = ResponseStageInput(
29
+ task_description=agent_context.agent_prompt,
30
+ context=agent_context.context_string,
31
+ conversation_history=loop_state.get_context_for_llm(),
32
+ )
33
+
34
+ response_model = build_response_model(
35
+ output_field=output_field,
36
+ output_schema=output_schema,
37
+ signal_options=signal_options,
38
+ )
39
+
40
+ raw_response = resolve_llm_call(
41
+ call_llm=call_llm,
42
+ input_data=input_data,
43
+ config=config,
44
+ response_model=response_model,
45
+ max_retries=max_retries,
46
+ )
47
+
48
+ output_value = extract_output_from_response(raw_response, output_field)
49
+ selected_signal = extract_signal_from_response(raw_response)
50
+
51
+ return FinalResponse(
52
+ output=output_value,
53
+ selected_signal=selected_signal,
54
+ )
@@ -0,0 +1,37 @@
1
+ from typing import TYPE_CHECKING
2
+ from ...lib.llm_resolver import resolve_llm_call
3
+ from ..lib.prompts import get_state_instructions
4
+ from ..types import RouterInput, RouterResponse
5
+ from ....types import CallLlm
6
+
7
+ if TYPE_CHECKING:
8
+ from ..state import AgentContext
9
+ from ..lib.loop_state import AgentLoopState
10
+
11
+
12
+ def execute_router_stage(
13
+ call_llm: CallLlm,
14
+ agent_context: "AgentContext",
15
+ loop_state: "AgentLoopState",
16
+ tools_signature: str,
17
+ config: dict,
18
+ max_retries: int = 3,
19
+ ) -> RouterResponse:
20
+ """Execute the Router stage to decide the next action."""
21
+ state_instructions = get_state_instructions(loop_state.get_execution_state())
22
+
23
+ input_data = RouterInput(
24
+ instructions=state_instructions,
25
+ task_description=agent_context.agent_prompt,
26
+ context=agent_context.context_string,
27
+ available_tools=tools_signature,
28
+ conversation_history=loop_state.get_context_for_llm(),
29
+ )
30
+
31
+ return resolve_llm_call(
32
+ call_llm=call_llm,
33
+ input_data=input_data,
34
+ config=config,
35
+ response_model=RouterResponse,
36
+ max_retries=max_retries,
37
+ )
@@ -0,0 +1,111 @@
1
+ """Agent node state retrieval."""
2
+
3
+ import json
4
+ from typing import Dict, Any, List, Optional
5
+ from pydantic import BaseModel, ConfigDict
6
+ from ...types import Backends
7
+ from ..lib.conversation_history import get_conversation_history
8
+ from ...lib.jinja_render import render_prompt, get_context_for_prompt
9
+
10
+
11
+ class AgentOperationalState(BaseModel):
12
+ """All data needed for agent node execution."""
13
+ model_config = ConfigDict(arbitrary_types_allowed=True)
14
+
15
+ context: Dict[str, Any]
16
+ main_execution_id: str
17
+ prompt: str
18
+ identity: Optional[str]
19
+ output_field: Optional[str]
20
+ event_emissions: List[Dict[str, Any]]
21
+ max_retries: int
22
+ tools: List[str]
23
+ llm_failure_signal: Optional[str]
24
+ current_workflow_name: str
25
+ history_key: Optional[str]
26
+ conversation_history: List[Dict[str, Any]]
27
+
28
+
29
+ class AgentContext(BaseModel):
30
+ """Context data prepared for each agent loop iteration."""
31
+ model_config = ConfigDict(arbitrary_types_allowed=True)
32
+
33
+ context: Dict[str, Any]
34
+ filtered_context: Dict[str, Any]
35
+ context_string: str
36
+ workflows_registry: Dict[str, Any]
37
+ workflow_name: str
38
+ error_note: str
39
+ agent_prompt: str
40
+ tool_names: List[str]
41
+ event_emissions: List[Dict[str, Any]]
42
+
43
+
44
+ def get_operational_state(
45
+ execution_id: str,
46
+ node_config: Dict[str, Any],
47
+ backends: Backends,
48
+ ) -> AgentOperationalState:
49
+ """Retrieve all state needed for agent node execution."""
50
+ context = backends.context.get_context(execution_id)
51
+ operational = context["__operational__"]
52
+ identity = node_config.get("identity")
53
+ current_workflow_name = backends.workflow.get_current_workflow_name(execution_id)
54
+ history_key, conversation_history = get_conversation_history(
55
+ execution_id, identity, backends
56
+ )
57
+
58
+ return AgentOperationalState(
59
+ context=context,
60
+ main_execution_id=operational["main_execution_id"],
61
+ prompt=node_config["prompt"],
62
+ identity=identity,
63
+ output_field=node_config.get("output_field"),
64
+ event_emissions=node_config.get("event_emissions", []),
65
+ max_retries=node_config.get("retries", 3),
66
+ tools=node_config.get("tools", []),
67
+ llm_failure_signal=node_config.get("llm_failure_signal"),
68
+ current_workflow_name=current_workflow_name,
69
+ history_key=history_key,
70
+ conversation_history=conversation_history,
71
+ )
72
+
73
+
74
+ def prepare_agent_context(
75
+ execution_id: str,
76
+ node_config: Dict[str, Any],
77
+ backends,
78
+ tool_responses: Dict[str, Any],
79
+ ) -> AgentContext:
80
+ """Prepare all context data for agent execution."""
81
+ context = backends.context.get_context(execution_id)
82
+ workflows_registry = backends.workflow.get_workflows_registry(execution_id)
83
+ workflow_name = backends.workflow.get_current_workflow_name(execution_id)
84
+
85
+ prompt_template = node_config["prompt"]
86
+ rendered_prompt, _ = render_prompt(prompt_template, context)
87
+
88
+ filtered_context, _ = get_context_for_prompt(context, prompt_template)
89
+
90
+ has_errors = (
91
+ any("Error:" in str(v) for v in tool_responses.values())
92
+ if tool_responses
93
+ else False
94
+ )
95
+ error_note = (
96
+ "\n⚠️ Previous tool calls had errors. Please fix the parameters and try again."
97
+ if has_errors
98
+ else ""
99
+ )
100
+
101
+ return AgentContext(
102
+ context=context,
103
+ filtered_context=filtered_context,
104
+ context_string=json.dumps(filtered_context, indent=2),
105
+ workflows_registry=workflows_registry,
106
+ workflow_name=workflow_name,
107
+ error_note=error_note,
108
+ agent_prompt=rendered_prompt,
109
+ tool_names=node_config.get("tools", []),
110
+ event_emissions=node_config.get("event_emissions", []),
111
+ )