aiecs 1.3.8__py3-none-any.whl → 1.4.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +1 -1
- aiecs/domain/__init__.py +120 -0
- aiecs/domain/agent/__init__.py +184 -0
- aiecs/domain/agent/base_agent.py +691 -0
- aiecs/domain/agent/exceptions.py +99 -0
- aiecs/domain/agent/hybrid_agent.py +495 -0
- aiecs/domain/agent/integration/__init__.py +23 -0
- aiecs/domain/agent/integration/context_compressor.py +219 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +258 -0
- aiecs/domain/agent/integration/retry_policy.py +228 -0
- aiecs/domain/agent/integration/role_config.py +217 -0
- aiecs/domain/agent/lifecycle.py +298 -0
- aiecs/domain/agent/llm_agent.py +309 -0
- aiecs/domain/agent/memory/__init__.py +13 -0
- aiecs/domain/agent/memory/conversation.py +216 -0
- aiecs/domain/agent/migration/__init__.py +15 -0
- aiecs/domain/agent/migration/conversion.py +171 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +97 -0
- aiecs/domain/agent/models.py +263 -0
- aiecs/domain/agent/observability.py +443 -0
- aiecs/domain/agent/persistence.py +287 -0
- aiecs/domain/agent/prompts/__init__.py +25 -0
- aiecs/domain/agent/prompts/builder.py +164 -0
- aiecs/domain/agent/prompts/formatters.py +192 -0
- aiecs/domain/agent/prompts/template.py +264 -0
- aiecs/domain/agent/registry.py +261 -0
- aiecs/domain/agent/tool_agent.py +267 -0
- aiecs/domain/agent/tools/__init__.py +13 -0
- aiecs/domain/agent/tools/schema_generator.py +222 -0
- aiecs/main.py +2 -2
- aiecs/tools/search_tool/__init__.py +1 -0
- {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/METADATA +1 -1
- {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/RECORD +37 -10
- {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/WHEEL +0 -0
- {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/entry_points.txt +0 -0
- {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/licenses/LICENSE +0 -0
- {aiecs-1.3.8.dist-info → aiecs-1.4.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Domain Exceptions
|
|
3
|
+
|
|
4
|
+
Defines agent-specific exceptions for the base AI agent model.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class AgentException(Exception):
|
|
11
|
+
"""Base exception for agent-related errors."""
|
|
12
|
+
|
|
13
|
+
def __init__(self, message: str, agent_id: Optional[str] = None):
|
|
14
|
+
super().__init__(message)
|
|
15
|
+
self.agent_id = agent_id
|
|
16
|
+
self.message = message
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AgentNotFoundError(AgentException):
|
|
20
|
+
"""Raised when an agent cannot be found."""
|
|
21
|
+
|
|
22
|
+
def __init__(self, agent_id: str, message: Optional[str] = None):
|
|
23
|
+
msg = message or f"Agent with ID '{agent_id}' not found"
|
|
24
|
+
super().__init__(msg, agent_id)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class AgentAlreadyRegisteredError(AgentException):
|
|
28
|
+
"""Raised when attempting to register an agent with an existing ID."""
|
|
29
|
+
|
|
30
|
+
def __init__(self, agent_id: str):
|
|
31
|
+
msg = f"Agent with ID '{agent_id}' is already registered"
|
|
32
|
+
super().__init__(msg, agent_id)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class InvalidStateTransitionError(AgentException):
|
|
36
|
+
"""Raised when an invalid agent state transition is attempted."""
|
|
37
|
+
|
|
38
|
+
def __init__(
|
|
39
|
+
self,
|
|
40
|
+
agent_id: str,
|
|
41
|
+
current_state: str,
|
|
42
|
+
attempted_state: str,
|
|
43
|
+
message: Optional[str] = None,
|
|
44
|
+
):
|
|
45
|
+
msg = (
|
|
46
|
+
message
|
|
47
|
+
or f"Invalid state transition for agent '{agent_id}': "
|
|
48
|
+
f"cannot transition from '{current_state}' to '{attempted_state}'"
|
|
49
|
+
)
|
|
50
|
+
super().__init__(msg, agent_id)
|
|
51
|
+
self.current_state = current_state
|
|
52
|
+
self.attempted_state = attempted_state
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ConfigurationError(AgentException):
|
|
56
|
+
"""Raised when agent configuration is invalid."""
|
|
57
|
+
|
|
58
|
+
def __init__(self, message: str, agent_id: Optional[str] = None, field: Optional[str] = None):
|
|
59
|
+
super().__init__(message, agent_id)
|
|
60
|
+
self.field = field
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
class TaskExecutionError(AgentException):
|
|
64
|
+
"""Raised when task execution fails."""
|
|
65
|
+
|
|
66
|
+
def __init__(
|
|
67
|
+
self,
|
|
68
|
+
message: str,
|
|
69
|
+
agent_id: Optional[str] = None,
|
|
70
|
+
task_id: Optional[str] = None,
|
|
71
|
+
retry_count: Optional[int] = None,
|
|
72
|
+
):
|
|
73
|
+
super().__init__(message, agent_id)
|
|
74
|
+
self.task_id = task_id
|
|
75
|
+
self.retry_count = retry_count
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class ToolAccessDeniedError(AgentException):
|
|
79
|
+
"""Raised when an agent attempts to use a tool it doesn't have access to."""
|
|
80
|
+
|
|
81
|
+
def __init__(self, agent_id: str, tool_name: str):
|
|
82
|
+
msg = f"Agent '{agent_id}' does not have access to tool '{tool_name}'"
|
|
83
|
+
super().__init__(msg, agent_id)
|
|
84
|
+
self.tool_name = tool_name
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class SerializationError(AgentException):
|
|
88
|
+
"""Raised when agent serialization/deserialization fails."""
|
|
89
|
+
|
|
90
|
+
def __init__(self, message: str, agent_id: Optional[str] = None):
|
|
91
|
+
super().__init__(message, agent_id)
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class AgentInitializationError(AgentException):
|
|
95
|
+
"""Raised when agent initialization fails."""
|
|
96
|
+
|
|
97
|
+
def __init__(self, message: str, agent_id: Optional[str] = None):
|
|
98
|
+
super().__init__(message, agent_id)
|
|
99
|
+
|
|
@@ -0,0 +1,495 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Hybrid Agent
|
|
3
|
+
|
|
4
|
+
Agent implementation combining LLM reasoning with tool execution capabilities.
|
|
5
|
+
Implements the ReAct (Reasoning + Acting) pattern.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Dict, List, Any, Optional
|
|
10
|
+
from datetime import datetime
|
|
11
|
+
|
|
12
|
+
from aiecs.llm import BaseLLMClient, LLMMessage, LLMResponse
|
|
13
|
+
from aiecs.tools import get_tool, BaseTool
|
|
14
|
+
|
|
15
|
+
from .base_agent import BaseAIAgent
|
|
16
|
+
from .models import AgentType, AgentConfiguration
|
|
17
|
+
from .exceptions import TaskExecutionError, ToolAccessDeniedError
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class HybridAgent(BaseAIAgent):
|
|
23
|
+
"""
|
|
24
|
+
Hybrid agent combining LLM reasoning with tool execution.
|
|
25
|
+
|
|
26
|
+
Implements ReAct pattern: Reason → Act → Observe loop.
|
|
27
|
+
"""
|
|
28
|
+
|
|
29
|
+
def __init__(
|
|
30
|
+
self,
|
|
31
|
+
agent_id: str,
|
|
32
|
+
name: str,
|
|
33
|
+
llm_client: BaseLLMClient,
|
|
34
|
+
tools: List[str],
|
|
35
|
+
config: AgentConfiguration,
|
|
36
|
+
description: Optional[str] = None,
|
|
37
|
+
version: str = "1.0.0",
|
|
38
|
+
max_iterations: int = 10,
|
|
39
|
+
):
|
|
40
|
+
"""
|
|
41
|
+
Initialize Hybrid agent.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
agent_id: Unique agent identifier
|
|
45
|
+
name: Agent name
|
|
46
|
+
llm_client: LLM client for reasoning
|
|
47
|
+
tools: List of tool names
|
|
48
|
+
config: Agent configuration
|
|
49
|
+
description: Optional description
|
|
50
|
+
version: Agent version
|
|
51
|
+
max_iterations: Maximum ReAct iterations
|
|
52
|
+
"""
|
|
53
|
+
super().__init__(
|
|
54
|
+
agent_id=agent_id,
|
|
55
|
+
name=name,
|
|
56
|
+
agent_type=AgentType.DEVELOPER, # Can be adjusted based on use case
|
|
57
|
+
config=config,
|
|
58
|
+
description=description or "Hybrid agent with LLM reasoning and tool execution",
|
|
59
|
+
version=version,
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
self.llm_client = llm_client
|
|
63
|
+
self._available_tools = tools
|
|
64
|
+
self._max_iterations = max_iterations
|
|
65
|
+
self._tool_instances: Dict[str, BaseTool] = {}
|
|
66
|
+
self._system_prompt: Optional[str] = None
|
|
67
|
+
self._conversation_history: List[LLMMessage] = []
|
|
68
|
+
|
|
69
|
+
logger.info(
|
|
70
|
+
f"HybridAgent initialized: {agent_id} with LLM ({llm_client.provider_name}) "
|
|
71
|
+
f"and tools: {', '.join(tools)}"
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
async def _initialize(self) -> None:
|
|
75
|
+
"""Initialize Hybrid agent - build system prompt and load tools."""
|
|
76
|
+
# Build system prompt
|
|
77
|
+
self._system_prompt = self._build_system_prompt()
|
|
78
|
+
|
|
79
|
+
# Load tool instances
|
|
80
|
+
for tool_name in self._available_tools:
|
|
81
|
+
try:
|
|
82
|
+
self._tool_instances[tool_name] = get_tool(tool_name)
|
|
83
|
+
logger.debug(f"HybridAgent {self.agent_id} loaded tool: {tool_name}")
|
|
84
|
+
except Exception as e:
|
|
85
|
+
logger.warning(f"Failed to load tool {tool_name}: {e}")
|
|
86
|
+
|
|
87
|
+
logger.info(
|
|
88
|
+
f"HybridAgent {self.agent_id} initialized with {len(self._tool_instances)} tools"
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
async def _shutdown(self) -> None:
|
|
92
|
+
"""Shutdown Hybrid agent."""
|
|
93
|
+
self._conversation_history.clear()
|
|
94
|
+
self._tool_instances.clear()
|
|
95
|
+
|
|
96
|
+
if hasattr(self.llm_client, 'close'):
|
|
97
|
+
await self.llm_client.close()
|
|
98
|
+
|
|
99
|
+
logger.info(f"HybridAgent {self.agent_id} shut down")
|
|
100
|
+
|
|
101
|
+
def _build_system_prompt(self) -> str:
|
|
102
|
+
"""Build system prompt including tool descriptions."""
|
|
103
|
+
parts = []
|
|
104
|
+
|
|
105
|
+
# Add goal and backstory
|
|
106
|
+
if self._config.goal:
|
|
107
|
+
parts.append(f"Goal: {self._config.goal}")
|
|
108
|
+
|
|
109
|
+
if self._config.backstory:
|
|
110
|
+
parts.append(f"Background: {self._config.backstory}")
|
|
111
|
+
|
|
112
|
+
# Add ReAct instructions
|
|
113
|
+
parts.append(
|
|
114
|
+
"You are a reasoning agent that can use tools to complete tasks. "
|
|
115
|
+
"Follow the ReAct pattern:\n"
|
|
116
|
+
"1. THOUGHT: Analyze the task and decide what to do\n"
|
|
117
|
+
"2. ACTION: Use a tool if needed, or provide final answer\n"
|
|
118
|
+
"3. OBSERVATION: Review the tool result and continue reasoning\n\n"
|
|
119
|
+
"When you need to use a tool, respond with:\n"
|
|
120
|
+
"TOOL: <tool_name>\n"
|
|
121
|
+
"OPERATION: <operation_name>\n"
|
|
122
|
+
"PARAMETERS: <json_parameters>\n\n"
|
|
123
|
+
"When you have the final answer, respond with:\n"
|
|
124
|
+
"FINAL ANSWER: <your_answer>"
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
# Add available tools
|
|
128
|
+
if self._available_tools:
|
|
129
|
+
parts.append(f"\nAvailable tools: {', '.join(self._available_tools)}")
|
|
130
|
+
|
|
131
|
+
if self._config.domain_knowledge:
|
|
132
|
+
parts.append(f"\nDomain Knowledge: {self._config.domain_knowledge}")
|
|
133
|
+
|
|
134
|
+
return "\n\n".join(parts)
|
|
135
|
+
|
|
136
|
+
async def execute_task(
|
|
137
|
+
self,
|
|
138
|
+
task: Dict[str, Any],
|
|
139
|
+
context: Dict[str, Any]
|
|
140
|
+
) -> Dict[str, Any]:
|
|
141
|
+
"""
|
|
142
|
+
Execute a task using ReAct loop.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
task: Task specification with 'description' or 'prompt'
|
|
146
|
+
context: Execution context
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Execution result with 'output', 'reasoning_steps', 'tool_calls'
|
|
150
|
+
|
|
151
|
+
Raises:
|
|
152
|
+
TaskExecutionError: If task execution fails
|
|
153
|
+
"""
|
|
154
|
+
start_time = datetime.utcnow()
|
|
155
|
+
|
|
156
|
+
try:
|
|
157
|
+
# Extract task description
|
|
158
|
+
task_description = task.get('description') or task.get('prompt') or task.get('task')
|
|
159
|
+
if not task_description:
|
|
160
|
+
raise TaskExecutionError(
|
|
161
|
+
"Task must contain 'description', 'prompt', or 'task' field",
|
|
162
|
+
agent_id=self.agent_id
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
# Transition to busy state
|
|
166
|
+
self._transition_state(self.state.__class__.BUSY)
|
|
167
|
+
self._current_task_id = task.get('task_id')
|
|
168
|
+
|
|
169
|
+
# Execute ReAct loop
|
|
170
|
+
result = await self._react_loop(task_description, context)
|
|
171
|
+
|
|
172
|
+
# Calculate execution time
|
|
173
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
174
|
+
|
|
175
|
+
# Update metrics
|
|
176
|
+
self.update_metrics(
|
|
177
|
+
execution_time=execution_time,
|
|
178
|
+
success=True,
|
|
179
|
+
tokens_used=result.get('total_tokens'),
|
|
180
|
+
tool_calls=result.get('tool_calls_count', 0),
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Transition back to active
|
|
184
|
+
self._transition_state(self.state.__class__.ACTIVE)
|
|
185
|
+
self._current_task_id = None
|
|
186
|
+
self.last_active_at = datetime.utcnow()
|
|
187
|
+
|
|
188
|
+
return {
|
|
189
|
+
"success": True,
|
|
190
|
+
"output": result.get('final_answer'),
|
|
191
|
+
"reasoning_steps": result.get('steps'),
|
|
192
|
+
"tool_calls_count": result.get('tool_calls_count'),
|
|
193
|
+
"iterations": result.get('iterations'),
|
|
194
|
+
"execution_time": execution_time,
|
|
195
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
except Exception as e:
|
|
199
|
+
logger.error(f"Task execution failed for {self.agent_id}: {e}")
|
|
200
|
+
|
|
201
|
+
# Update metrics for failure
|
|
202
|
+
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
|
203
|
+
self.update_metrics(execution_time=execution_time, success=False)
|
|
204
|
+
|
|
205
|
+
# Transition to error state
|
|
206
|
+
self._transition_state(self.state.__class__.ERROR)
|
|
207
|
+
self._current_task_id = None
|
|
208
|
+
|
|
209
|
+
raise TaskExecutionError(
|
|
210
|
+
f"Task execution failed: {str(e)}",
|
|
211
|
+
agent_id=self.agent_id,
|
|
212
|
+
task_id=task.get('task_id')
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
async def process_message(
|
|
216
|
+
self,
|
|
217
|
+
message: str,
|
|
218
|
+
sender_id: Optional[str] = None
|
|
219
|
+
) -> Dict[str, Any]:
|
|
220
|
+
"""
|
|
221
|
+
Process an incoming message using ReAct loop.
|
|
222
|
+
|
|
223
|
+
Args:
|
|
224
|
+
message: Message content
|
|
225
|
+
sender_id: Optional sender identifier
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
Response dictionary with 'response', 'reasoning_steps'
|
|
229
|
+
"""
|
|
230
|
+
try:
|
|
231
|
+
# Build task from message
|
|
232
|
+
task = {
|
|
233
|
+
"description": message,
|
|
234
|
+
"task_id": f"msg_{datetime.utcnow().timestamp()}",
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
# Execute as task
|
|
238
|
+
result = await self.execute_task(task, {"sender_id": sender_id})
|
|
239
|
+
|
|
240
|
+
return {
|
|
241
|
+
"response": result.get("output"),
|
|
242
|
+
"reasoning_steps": result.get("reasoning_steps"),
|
|
243
|
+
"timestamp": result.get("timestamp"),
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
except Exception as e:
|
|
247
|
+
logger.error(f"Message processing failed for {self.agent_id}: {e}")
|
|
248
|
+
raise
|
|
249
|
+
|
|
250
|
+
async def _react_loop(
|
|
251
|
+
self,
|
|
252
|
+
task: str,
|
|
253
|
+
context: Dict[str, Any]
|
|
254
|
+
) -> Dict[str, Any]:
|
|
255
|
+
"""
|
|
256
|
+
Execute ReAct loop: Reason → Act → Observe.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
task: Task description
|
|
260
|
+
context: Context dictionary
|
|
261
|
+
|
|
262
|
+
Returns:
|
|
263
|
+
Result dictionary with 'final_answer', 'steps', 'iterations'
|
|
264
|
+
"""
|
|
265
|
+
steps = []
|
|
266
|
+
tool_calls_count = 0
|
|
267
|
+
total_tokens = 0
|
|
268
|
+
|
|
269
|
+
# Build initial messages
|
|
270
|
+
messages = self._build_initial_messages(task, context)
|
|
271
|
+
|
|
272
|
+
for iteration in range(self._max_iterations):
|
|
273
|
+
logger.debug(f"HybridAgent {self.agent_id} - ReAct iteration {iteration + 1}")
|
|
274
|
+
|
|
275
|
+
# THINK: LLM reasons about next action
|
|
276
|
+
response = await self.llm_client.generate_text(
|
|
277
|
+
messages=messages,
|
|
278
|
+
model=self._config.llm_model,
|
|
279
|
+
temperature=self._config.temperature,
|
|
280
|
+
max_tokens=self._config.max_tokens,
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
thought = response.content
|
|
284
|
+
total_tokens += getattr(response, 'total_tokens', 0)
|
|
285
|
+
|
|
286
|
+
steps.append({
|
|
287
|
+
"type": "thought",
|
|
288
|
+
"content": thought,
|
|
289
|
+
"iteration": iteration + 1,
|
|
290
|
+
})
|
|
291
|
+
|
|
292
|
+
# Check if final answer
|
|
293
|
+
if "FINAL ANSWER:" in thought:
|
|
294
|
+
final_answer = self._extract_final_answer(thought)
|
|
295
|
+
return {
|
|
296
|
+
"final_answer": final_answer,
|
|
297
|
+
"steps": steps,
|
|
298
|
+
"iterations": iteration + 1,
|
|
299
|
+
"tool_calls_count": tool_calls_count,
|
|
300
|
+
"total_tokens": total_tokens,
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
# Check if tool call
|
|
304
|
+
if "TOOL:" in thought:
|
|
305
|
+
# ACT: Execute tool
|
|
306
|
+
try:
|
|
307
|
+
tool_info = self._parse_tool_call(thought)
|
|
308
|
+
tool_result = await self._execute_tool(
|
|
309
|
+
tool_info['tool'],
|
|
310
|
+
tool_info.get('operation'),
|
|
311
|
+
tool_info.get('parameters', {})
|
|
312
|
+
)
|
|
313
|
+
tool_calls_count += 1
|
|
314
|
+
|
|
315
|
+
steps.append({
|
|
316
|
+
"type": "action",
|
|
317
|
+
"tool": tool_info['tool'],
|
|
318
|
+
"operation": tool_info.get('operation'),
|
|
319
|
+
"parameters": tool_info.get('parameters'),
|
|
320
|
+
"iteration": iteration + 1,
|
|
321
|
+
})
|
|
322
|
+
|
|
323
|
+
# OBSERVE: Add tool result to conversation
|
|
324
|
+
observation = f"OBSERVATION: Tool '{tool_info['tool']}' returned: {tool_result}"
|
|
325
|
+
steps.append({
|
|
326
|
+
"type": "observation",
|
|
327
|
+
"content": observation,
|
|
328
|
+
"iteration": iteration + 1,
|
|
329
|
+
})
|
|
330
|
+
|
|
331
|
+
# Add to messages for next iteration
|
|
332
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
333
|
+
messages.append(LLMMessage(role="user", content=observation))
|
|
334
|
+
|
|
335
|
+
except Exception as e:
|
|
336
|
+
error_msg = f"OBSERVATION: Tool execution failed: {str(e)}"
|
|
337
|
+
steps.append({
|
|
338
|
+
"type": "observation",
|
|
339
|
+
"content": error_msg,
|
|
340
|
+
"iteration": iteration + 1,
|
|
341
|
+
"error": True,
|
|
342
|
+
})
|
|
343
|
+
messages.append(LLMMessage(role="assistant", content=thought))
|
|
344
|
+
messages.append(LLMMessage(role="user", content=error_msg))
|
|
345
|
+
|
|
346
|
+
else:
|
|
347
|
+
# LLM didn't provide clear action - treat as final answer
|
|
348
|
+
return {
|
|
349
|
+
"final_answer": thought,
|
|
350
|
+
"steps": steps,
|
|
351
|
+
"iterations": iteration + 1,
|
|
352
|
+
"tool_calls_count": tool_calls_count,
|
|
353
|
+
"total_tokens": total_tokens,
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
# Max iterations reached
|
|
357
|
+
logger.warning(f"HybridAgent {self.agent_id} reached max iterations")
|
|
358
|
+
return {
|
|
359
|
+
"final_answer": "Max iterations reached. Unable to complete task fully.",
|
|
360
|
+
"steps": steps,
|
|
361
|
+
"iterations": self._max_iterations,
|
|
362
|
+
"tool_calls_count": tool_calls_count,
|
|
363
|
+
"total_tokens": total_tokens,
|
|
364
|
+
"max_iterations_reached": True,
|
|
365
|
+
}
|
|
366
|
+
|
|
367
|
+
def _build_initial_messages(
|
|
368
|
+
self,
|
|
369
|
+
task: str,
|
|
370
|
+
context: Dict[str, Any]
|
|
371
|
+
) -> List[LLMMessage]:
|
|
372
|
+
"""Build initial messages for ReAct loop."""
|
|
373
|
+
messages = []
|
|
374
|
+
|
|
375
|
+
# Add system prompt
|
|
376
|
+
if self._system_prompt:
|
|
377
|
+
messages.append(LLMMessage(role="system", content=self._system_prompt))
|
|
378
|
+
|
|
379
|
+
# Add context if provided
|
|
380
|
+
if context:
|
|
381
|
+
context_str = self._format_context(context)
|
|
382
|
+
if context_str:
|
|
383
|
+
messages.append(LLMMessage(
|
|
384
|
+
role="system",
|
|
385
|
+
content=f"Additional Context:\n{context_str}"
|
|
386
|
+
))
|
|
387
|
+
|
|
388
|
+
# Add task
|
|
389
|
+
messages.append(LLMMessage(role="user", content=f"Task: {task}"))
|
|
390
|
+
|
|
391
|
+
return messages
|
|
392
|
+
|
|
393
|
+
def _format_context(self, context: Dict[str, Any]) -> str:
|
|
394
|
+
"""Format context dictionary as string."""
|
|
395
|
+
relevant_fields = []
|
|
396
|
+
for key, value in context.items():
|
|
397
|
+
if not key.startswith('_') and value is not None:
|
|
398
|
+
relevant_fields.append(f"{key}: {value}")
|
|
399
|
+
return "\n".join(relevant_fields) if relevant_fields else ""
|
|
400
|
+
|
|
401
|
+
def _extract_final_answer(self, thought: str) -> str:
|
|
402
|
+
"""Extract final answer from thought."""
|
|
403
|
+
if "FINAL ANSWER:" in thought:
|
|
404
|
+
return thought.split("FINAL ANSWER:", 1)[1].strip()
|
|
405
|
+
return thought
|
|
406
|
+
|
|
407
|
+
def _parse_tool_call(self, thought: str) -> Dict[str, Any]:
|
|
408
|
+
"""
|
|
409
|
+
Parse tool call from LLM thought.
|
|
410
|
+
|
|
411
|
+
Expected format:
|
|
412
|
+
TOOL: <tool_name>
|
|
413
|
+
OPERATION: <operation_name>
|
|
414
|
+
PARAMETERS: <json_parameters>
|
|
415
|
+
|
|
416
|
+
Args:
|
|
417
|
+
thought: LLM thought containing tool call
|
|
418
|
+
|
|
419
|
+
Returns:
|
|
420
|
+
Dictionary with 'tool', 'operation', 'parameters'
|
|
421
|
+
"""
|
|
422
|
+
import json
|
|
423
|
+
|
|
424
|
+
result = {}
|
|
425
|
+
|
|
426
|
+
# Extract tool
|
|
427
|
+
if "TOOL:" in thought:
|
|
428
|
+
tool_line = [line for line in thought.split('\n') if line.startswith('TOOL:')][0]
|
|
429
|
+
result['tool'] = tool_line.split('TOOL:', 1)[1].strip()
|
|
430
|
+
|
|
431
|
+
# Extract operation (optional)
|
|
432
|
+
if "OPERATION:" in thought:
|
|
433
|
+
op_line = [line for line in thought.split('\n') if line.startswith('OPERATION:')][0]
|
|
434
|
+
result['operation'] = op_line.split('OPERATION:', 1)[1].strip()
|
|
435
|
+
|
|
436
|
+
# Extract parameters (optional)
|
|
437
|
+
if "PARAMETERS:" in thought:
|
|
438
|
+
param_line = [line for line in thought.split('\n') if line.startswith('PARAMETERS:')][0]
|
|
439
|
+
param_str = param_line.split('PARAMETERS:', 1)[1].strip()
|
|
440
|
+
try:
|
|
441
|
+
result['parameters'] = json.loads(param_str)
|
|
442
|
+
except json.JSONDecodeError:
|
|
443
|
+
logger.warning(f"Failed to parse parameters: {param_str}")
|
|
444
|
+
result['parameters'] = {}
|
|
445
|
+
|
|
446
|
+
return result
|
|
447
|
+
|
|
448
|
+
async def _execute_tool(
|
|
449
|
+
self,
|
|
450
|
+
tool_name: str,
|
|
451
|
+
operation: Optional[str],
|
|
452
|
+
parameters: Dict[str, Any]
|
|
453
|
+
) -> Any:
|
|
454
|
+
"""Execute a tool operation."""
|
|
455
|
+
# Check access
|
|
456
|
+
if tool_name not in self._available_tools:
|
|
457
|
+
raise ToolAccessDeniedError(self.agent_id, tool_name)
|
|
458
|
+
|
|
459
|
+
tool = self._tool_instances.get(tool_name)
|
|
460
|
+
if not tool:
|
|
461
|
+
raise ValueError(f"Tool {tool_name} not loaded")
|
|
462
|
+
|
|
463
|
+
# Execute tool
|
|
464
|
+
if operation:
|
|
465
|
+
result = await tool.run_async(operation, **parameters)
|
|
466
|
+
else:
|
|
467
|
+
if hasattr(tool, 'run_async'):
|
|
468
|
+
result = await tool.run_async(**parameters)
|
|
469
|
+
else:
|
|
470
|
+
raise ValueError(f"Tool {tool_name} requires operation to be specified")
|
|
471
|
+
|
|
472
|
+
return result
|
|
473
|
+
|
|
474
|
+
def get_available_tools(self) -> List[str]:
|
|
475
|
+
"""Get list of available tools."""
|
|
476
|
+
return self._available_tools.copy()
|
|
477
|
+
|
|
478
|
+
@classmethod
|
|
479
|
+
def from_dict(cls, data: Dict[str, Any]) -> "HybridAgent":
|
|
480
|
+
"""
|
|
481
|
+
Deserialize HybridAgent from dictionary.
|
|
482
|
+
|
|
483
|
+
Note: LLM client must be provided separately.
|
|
484
|
+
|
|
485
|
+
Args:
|
|
486
|
+
data: Dictionary representation
|
|
487
|
+
|
|
488
|
+
Returns:
|
|
489
|
+
HybridAgent instance
|
|
490
|
+
"""
|
|
491
|
+
raise NotImplementedError(
|
|
492
|
+
"HybridAgent.from_dict requires LLM client to be provided separately. "
|
|
493
|
+
"Use constructor instead."
|
|
494
|
+
)
|
|
495
|
+
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Integration Module
|
|
3
|
+
|
|
4
|
+
Integration adapters for external systems.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from .context_engine_adapter import ContextEngineAdapter
|
|
8
|
+
from .retry_policy import EnhancedRetryPolicy, ErrorClassifier, ErrorType
|
|
9
|
+
from .role_config import RoleConfiguration, load_role_config
|
|
10
|
+
from .context_compressor import ContextCompressor, compress_messages, CompressionStrategy
|
|
11
|
+
|
|
12
|
+
__all__ = [
|
|
13
|
+
"ContextEngineAdapter",
|
|
14
|
+
"EnhancedRetryPolicy",
|
|
15
|
+
"ErrorClassifier",
|
|
16
|
+
"ErrorType",
|
|
17
|
+
"RoleConfiguration",
|
|
18
|
+
"load_role_config",
|
|
19
|
+
"ContextCompressor",
|
|
20
|
+
"compress_messages",
|
|
21
|
+
"CompressionStrategy",
|
|
22
|
+
]
|
|
23
|
+
|