quantalogic 0.61.2__py3-none-any.whl → 0.80__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/agent.py +0 -1
- quantalogic/codeact/TODO.md +14 -0
- quantalogic/codeact/agent.py +400 -421
- quantalogic/codeact/cli.py +42 -224
- quantalogic/codeact/cli_commands/__init__.py +0 -0
- quantalogic/codeact/cli_commands/create_toolbox.py +45 -0
- quantalogic/codeact/cli_commands/install_toolbox.py +20 -0
- quantalogic/codeact/cli_commands/list_executor.py +15 -0
- quantalogic/codeact/cli_commands/list_reasoners.py +15 -0
- quantalogic/codeact/cli_commands/list_toolboxes.py +47 -0
- quantalogic/codeact/cli_commands/task.py +215 -0
- quantalogic/codeact/cli_commands/tool_info.py +24 -0
- quantalogic/codeact/cli_commands/uninstall_toolbox.py +43 -0
- quantalogic/codeact/config.yaml +21 -0
- quantalogic/codeact/constants.py +1 -1
- quantalogic/codeact/events.py +12 -5
- quantalogic/codeact/examples/README.md +342 -0
- quantalogic/codeact/examples/agent_sample.yaml +29 -0
- quantalogic/codeact/executor.py +186 -0
- quantalogic/codeact/history_manager.py +94 -0
- quantalogic/codeact/llm_util.py +3 -22
- quantalogic/codeact/plugin_manager.py +92 -0
- quantalogic/codeact/prompts/generate_action.j2 +65 -14
- quantalogic/codeact/prompts/generate_program.j2 +32 -19
- quantalogic/codeact/react_agent.py +318 -0
- quantalogic/codeact/reasoner.py +185 -0
- quantalogic/codeact/templates/toolbox/README.md.j2 +10 -0
- quantalogic/codeact/templates/toolbox/pyproject.toml.j2 +16 -0
- quantalogic/codeact/templates/toolbox/tools.py.j2 +6 -0
- quantalogic/codeact/templates.py +7 -0
- quantalogic/codeact/tools_manager.py +242 -119
- quantalogic/codeact/utils.py +16 -89
- quantalogic/codeact/xml_utils.py +126 -0
- quantalogic/flow/flow.py +151 -41
- quantalogic/flow/flow_extractor.py +61 -1
- quantalogic/flow/flow_generator.py +34 -6
- quantalogic/flow/flow_manager.py +64 -25
- quantalogic/flow/flow_manager_schema.py +32 -0
- quantalogic/tools/action_gen.py +1 -1
- quantalogic/tools/action_gen_safe.py +340 -0
- quantalogic/tools/tool.py +531 -109
- quantalogic/tools/write_file_tool.py +7 -8
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/METADATA +3 -2
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/RECORD +47 -42
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/WHEEL +1 -1
- quantalogic-0.80.dist-info/entry_points.txt +3 -0
- quantalogic/python_interpreter/__init__.py +0 -23
- quantalogic/python_interpreter/assignment_visitors.py +0 -63
- quantalogic/python_interpreter/base_visitors.py +0 -20
- quantalogic/python_interpreter/class_visitors.py +0 -22
- quantalogic/python_interpreter/comprehension_visitors.py +0 -172
- quantalogic/python_interpreter/context_visitors.py +0 -59
- quantalogic/python_interpreter/control_flow_visitors.py +0 -88
- quantalogic/python_interpreter/exception_visitors.py +0 -109
- quantalogic/python_interpreter/exceptions.py +0 -39
- quantalogic/python_interpreter/execution.py +0 -202
- quantalogic/python_interpreter/function_utils.py +0 -386
- quantalogic/python_interpreter/function_visitors.py +0 -209
- quantalogic/python_interpreter/import_visitors.py +0 -28
- quantalogic/python_interpreter/interpreter_core.py +0 -358
- quantalogic/python_interpreter/literal_visitors.py +0 -74
- quantalogic/python_interpreter/misc_visitors.py +0 -148
- quantalogic/python_interpreter/operator_visitors.py +0 -108
- quantalogic/python_interpreter/scope.py +0 -10
- quantalogic/python_interpreter/visit_handlers.py +0 -110
- quantalogic-0.61.2.dist-info/entry_points.txt +0 -6
- {quantalogic-0.61.2.dist-info → quantalogic-0.80.dist-info}/LICENSE +0 -0
quantalogic/codeact/agent.py
CHANGED
@@ -1,472 +1,447 @@
|
|
1
|
+
"""High-level interface for the Quantalogic Agent with modular configuration."""
|
2
|
+
|
1
3
|
import asyncio
|
2
|
-
import
|
3
|
-
from
|
4
|
+
import os
|
5
|
+
from dataclasses import dataclass, field
|
6
|
+
from pathlib import Path
|
7
|
+
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
4
8
|
|
5
|
-
|
9
|
+
import yaml
|
10
|
+
from jinja2 import Environment
|
6
11
|
from loguru import logger
|
7
12
|
from lxml import etree
|
8
13
|
|
9
|
-
from quantalogic.python_interpreter import execute_async
|
10
14
|
from quantalogic.tools import Tool
|
11
15
|
|
12
|
-
from .constants import
|
13
|
-
from .
|
14
|
-
ActionExecutedEvent,
|
15
|
-
ActionGeneratedEvent,
|
16
|
-
ErrorOccurredEvent,
|
17
|
-
StepCompletedEvent,
|
18
|
-
StepStartedEvent,
|
19
|
-
TaskCompletedEvent,
|
20
|
-
TaskStartedEvent,
|
21
|
-
ThoughtGeneratedEvent,
|
22
|
-
ToolExecutionCompletedEvent,
|
23
|
-
ToolExecutionErrorEvent,
|
24
|
-
ToolExecutionStartedEvent,
|
25
|
-
)
|
16
|
+
from .constants import MAX_HISTORY_TOKENS, MAX_TOKENS
|
17
|
+
from .executor import BaseExecutor, Executor
|
26
18
|
from .llm_util import litellm_completion
|
19
|
+
from .plugin_manager import PluginManager
|
20
|
+
from .react_agent import ReActAgent
|
21
|
+
from .reasoner import BaseReasoner, Reasoner
|
22
|
+
from .templates import jinja_env as default_jinja_env
|
27
23
|
from .tools_manager import RetrieveStepTool, get_default_tools
|
28
|
-
from .utils import
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
{"role": "user", "content": prompt}
|
55
|
-
],
|
56
|
-
max_tokens=max_tokens,
|
57
|
-
temperature=0.3,
|
58
|
-
stream=streaming, # Use streaming flag
|
59
|
-
step=step,
|
60
|
-
notify_event=notify_event
|
61
|
-
)
|
62
|
-
code = response.strip()
|
63
|
-
return code[9:-3].strip() if code.startswith("```python") and code.endswith("```") else code
|
64
|
-
except Exception as e:
|
65
|
-
if attempt < 2:
|
66
|
-
await asyncio.sleep(2 ** attempt)
|
67
|
-
else:
|
68
|
-
raise Exception(f"Code generation failed with {model}: {e}")
|
69
|
-
|
70
|
-
class Reasoner:
|
71
|
-
"""Handles action generation using the language model."""
|
72
|
-
def __init__(self, model: str, tools: List[Tool]):
|
73
|
-
self.model = model
|
74
|
-
self.tools = tools
|
75
|
-
|
76
|
-
async def generate_action(
|
77
|
-
self,
|
78
|
-
task: str,
|
79
|
-
history_str: str,
|
80
|
-
step: int,
|
81
|
-
max_iterations: int,
|
82
|
-
system_prompt: Optional[str] = None,
|
83
|
-
notify_event: Callable = None,
|
84
|
-
streaming: bool = False # New parameter for streaming
|
85
|
-
) -> str:
|
86
|
-
"""Generate an action based on task and history with streaming support."""
|
87
|
-
try:
|
88
|
-
task_prompt = jinja_env.get_template("generate_action.j2").render(
|
89
|
-
task=task if not system_prompt else f"{system_prompt}\nTask: {task}",
|
90
|
-
history_str=history_str,
|
91
|
-
current_step=step,
|
92
|
-
max_iterations=max_iterations
|
93
|
-
)
|
94
|
-
program = await generate_program(task_prompt, self.tools, self.model, MAX_GENERATE_PROGRAM_TOKENS, step, notify_event, streaming=streaming)
|
95
|
-
response = jinja_env.get_template("response_format.j2").render(
|
96
|
-
task=task,
|
97
|
-
history_str=history_str,
|
98
|
-
program=program,
|
99
|
-
current_step=step,
|
100
|
-
max_iterations=max_iterations
|
101
|
-
)
|
102
|
-
if not validate_xml(response):
|
103
|
-
raise ValueError("Invalid XML generated")
|
104
|
-
return response
|
105
|
-
except Exception as e:
|
106
|
-
return jinja_env.get_template("error_format.j2").render(error=str(e))
|
107
|
-
|
108
|
-
class Executor:
|
109
|
-
"""Manages action execution and context updates."""
|
110
|
-
def __init__(self, tools: List[Tool], notify_event: Callable):
|
111
|
-
self.tools = tools
|
112
|
-
self.notify_event = notify_event # Callback to notify observers
|
113
|
-
self.tool_namespace = self._build_tool_namespace()
|
114
|
-
|
115
|
-
def _build_tool_namespace(self) -> Dict:
|
116
|
-
"""Build the namespace with wrapped tool functions that trigger events."""
|
117
|
-
def wrap_tool(tool):
|
118
|
-
async def wrapped_tool(**kwargs):
|
119
|
-
# Get the current step from the namespace
|
120
|
-
current_step = self.tool_namespace.get('current_step', None)
|
121
|
-
# Summarize parameters to keep events lightweight
|
122
|
-
parameters_summary = {
|
123
|
-
k: str(v)[:100] + "..." if len(str(v)) > 100 else str(v)
|
124
|
-
for k, v in kwargs.items()
|
125
|
-
}
|
126
|
-
# Trigger start event
|
127
|
-
await self.notify_event(ToolExecutionStartedEvent(
|
128
|
-
event_type="ToolExecutionStarted",
|
129
|
-
step_number=current_step,
|
130
|
-
tool_name=tool.name,
|
131
|
-
parameters_summary=parameters_summary
|
132
|
-
))
|
133
|
-
try:
|
134
|
-
result = await tool.async_execute(**kwargs)
|
135
|
-
# Summarize result
|
136
|
-
result_summary = str(result)[:100] + "..." if len(str(result)) > 100 else str(result)
|
137
|
-
# Trigger completion event
|
138
|
-
await self.notify_event(ToolExecutionCompletedEvent(
|
139
|
-
event_type="ToolExecutionCompleted",
|
140
|
-
step_number=current_step,
|
141
|
-
tool_name=tool.name,
|
142
|
-
result_summary=result_summary
|
143
|
-
))
|
144
|
-
return result
|
145
|
-
except Exception as e:
|
146
|
-
# Trigger error event
|
147
|
-
await self.notify_event(ToolExecutionErrorEvent(
|
148
|
-
event_type="ToolExecutionError",
|
149
|
-
step_number=current_step,
|
150
|
-
tool_name=tool.name,
|
151
|
-
error=str(e)
|
152
|
-
))
|
153
|
-
raise
|
154
|
-
return wrapped_tool
|
155
|
-
|
156
|
-
return {
|
157
|
-
"asyncio": asyncio,
|
158
|
-
"context_vars": {}, # Updated dynamically
|
159
|
-
**{tool.name: wrap_tool(tool) for tool in self.tools}
|
160
|
-
}
|
161
|
-
|
162
|
-
async def execute_action(self, code: str, context_vars: Dict, step: int, timeout: int = 300) -> str:
|
163
|
-
"""Execute the generated code and return the result, setting the step number."""
|
164
|
-
self.tool_namespace["context_vars"] = context_vars
|
165
|
-
self.tool_namespace['current_step'] = step # Set step for tools to access
|
166
|
-
if not validate_code(code):
|
167
|
-
return etree.tostring(
|
168
|
-
etree.Element("ExecutionResult", status="Error", message="Code lacks async main()"),
|
169
|
-
encoding="unicode"
|
170
|
-
)
|
171
|
-
|
172
|
-
try:
|
173
|
-
result = await execute_async(
|
174
|
-
code=code, timeout=timeout, entry_point="main",
|
175
|
-
allowed_modules=["asyncio"], namespace=self.tool_namespace
|
176
|
-
)
|
177
|
-
if result.local_variables:
|
178
|
-
context_vars.update({
|
179
|
-
k: v for k, v in result.local_variables.items()
|
180
|
-
if not k.startswith('__') and not callable(v)
|
181
|
-
})
|
182
|
-
return XMLResultHandler.format_execution_result(result)
|
183
|
-
except Exception as e:
|
184
|
-
return etree.tostring(
|
185
|
-
etree.Element("ExecutionResult", status="Error", message=f"Execution error: {e}"),
|
186
|
-
encoding="unicode"
|
187
|
-
)
|
188
|
-
|
189
|
-
class ReActAgent:
|
190
|
-
"""Core agent implementing the ReAct framework with modular components."""
|
191
|
-
def __init__(self, model: str, tools: List[Tool], max_iterations: int = 5, max_history_tokens: int = 2000):
|
192
|
-
self.reasoner = Reasoner(model, tools)
|
193
|
-
self.executor = Executor(tools, notify_event=self._notify_observers)
|
194
|
-
self.max_iterations = max_iterations
|
195
|
-
self.max_history_tokens = max_history_tokens # Limit history token size
|
196
|
-
self.context_vars: Dict = {}
|
197
|
-
self._observers: List[Tuple[Callable, List[str]]] = []
|
198
|
-
self.history_store: List[Dict] = [] # Persistent storage for all steps
|
199
|
-
|
200
|
-
def add_observer(self, observer: Callable, event_types: List[str]) -> 'ReActAgent':
|
201
|
-
"""Add an observer for specific event types."""
|
202
|
-
self._observers.append((observer, event_types))
|
203
|
-
return self
|
204
|
-
|
205
|
-
async def _notify_observers(self, event):
|
206
|
-
"""Notify all subscribed observers of an event."""
|
207
|
-
await asyncio.gather(
|
208
|
-
*(observer(event) for observer, types in self._observers if event.event_type in types),
|
209
|
-
return_exceptions=True
|
210
|
-
)
|
211
|
-
|
212
|
-
async def generate_action(
|
213
|
-
self,
|
214
|
-
task: str,
|
215
|
-
history: List[Dict],
|
216
|
-
step: int,
|
217
|
-
max_iterations: int,
|
218
|
-
system_prompt: Optional[str] = None,
|
219
|
-
streaming: bool = False # New parameter for streaming
|
220
|
-
) -> str:
|
221
|
-
"""Generate an action using the Reasoner."""
|
222
|
-
history_str = self._format_history(history, max_iterations)
|
223
|
-
start = time.perf_counter()
|
224
|
-
response = await self.reasoner.generate_action(task, history_str, step, max_iterations, system_prompt, self._notify_observers, streaming=streaming)
|
225
|
-
thought, code = XMLResultHandler.parse_response(response)
|
226
|
-
gen_time = time.perf_counter() - start
|
227
|
-
await self._notify_observers(ThoughtGeneratedEvent(
|
228
|
-
event_type="ThoughtGenerated", step_number=step, thought=thought, generation_time=gen_time
|
229
|
-
))
|
230
|
-
await self._notify_observers(ActionGeneratedEvent(
|
231
|
-
event_type="ActionGenerated", step_number=step, action_code=code, generation_time=gen_time
|
232
|
-
))
|
233
|
-
if not response.endswith("</Code>"):
|
234
|
-
logger.warning(f"Response might be truncated at step {step}")
|
235
|
-
return response
|
236
|
-
|
237
|
-
async def execute_action(self, code: str, step: int, timeout: int = 300) -> str:
|
238
|
-
"""Execute an action using the Executor, passing the step number."""
|
239
|
-
start = time.perf_counter()
|
240
|
-
result_xml = await self.executor.execute_action(code, self.context_vars, step, timeout)
|
241
|
-
execution_time = time.perf_counter() - start
|
242
|
-
await self._notify_observers(ActionExecutedEvent(
|
243
|
-
event_type="ActionExecuted", step_number=step, result_xml=result_xml, execution_time=execution_time
|
244
|
-
))
|
245
|
-
return result_xml
|
246
|
-
|
247
|
-
def _format_history(self, history: List[Dict], max_iterations: int) -> str:
|
248
|
-
"""Format the history with available variables, truncating to fit within max_history_tokens."""
|
249
|
-
included_steps = []
|
250
|
-
total_tokens = 0
|
251
|
-
for step in reversed(history): # Start from most recent
|
252
|
-
# Extract variables from context_vars updated after this step
|
253
|
-
try:
|
254
|
-
root = etree.fromstring(step['result'])
|
255
|
-
vars_elem = root.find("Variables")
|
256
|
-
available_vars = (
|
257
|
-
[var.get('name') for var in vars_elem.findall("Variable")]
|
258
|
-
if vars_elem is not None else []
|
259
|
-
)
|
260
|
-
except etree.XMLSyntaxError:
|
261
|
-
available_vars = []
|
262
|
-
|
263
|
-
step_str = (
|
264
|
-
f"===== Step {step['step_number']} of {max_iterations} max =====\n"
|
265
|
-
f"Thought:\n{step['thought']}\n\n"
|
266
|
-
f"Action:\n{step['action']}\n\n"
|
267
|
-
f"Result:\n{XMLResultHandler.format_result_summary(step['result'])}\n"
|
268
|
-
f"Available variables: {', '.join(available_vars) or 'None'}"
|
269
|
-
)
|
270
|
-
step_tokens = len(step_str.split()) # Approximate token count
|
271
|
-
if total_tokens + step_tokens > self.max_history_tokens:
|
272
|
-
break
|
273
|
-
included_steps.append(step_str)
|
274
|
-
total_tokens += step_tokens
|
275
|
-
return "\n".join(reversed(included_steps)) or "No previous steps"
|
276
|
-
|
277
|
-
async def is_task_complete(self, task: str, history: List[Dict], result: str, success_criteria: Optional[str]) -> Tuple[bool, str]:
|
278
|
-
"""Check if the task is complete based on the result."""
|
279
|
-
try:
|
280
|
-
root = etree.fromstring(result)
|
281
|
-
if root.findtext("Completed") == "true":
|
282
|
-
final_answer = root.findtext("FinalAnswer") or ""
|
283
|
-
verification = await litellm_completion(
|
284
|
-
model=self.reasoner.model,
|
285
|
-
messages=[{
|
286
|
-
"role": "user",
|
287
|
-
"content": f"Does '{final_answer}' solve '{task}' given history:\n{self._format_history(history, self.max_iterations)}?"
|
288
|
-
}],
|
289
|
-
max_tokens=100,
|
290
|
-
temperature=0.1,
|
291
|
-
stream=False # Non-streaming for quick verification
|
292
|
-
)
|
293
|
-
if "yes" in verification.lower():
|
294
|
-
return True, final_answer
|
295
|
-
return True, final_answer
|
296
|
-
except etree.XMLSyntaxError:
|
297
|
-
pass
|
298
|
-
|
299
|
-
if success_criteria and (result_value := XMLResultHandler.extract_result_value(result)) and success_criteria in result_value:
|
300
|
-
return True, result_value
|
301
|
-
return False, ""
|
302
|
-
|
303
|
-
async def solve(
|
304
|
-
self,
|
305
|
-
task: str,
|
306
|
-
success_criteria: Optional[str] = None,
|
307
|
-
system_prompt: Optional[str] = None,
|
308
|
-
max_iterations: Optional[int] = None,
|
309
|
-
streaming: bool = False # New parameter for streaming
|
310
|
-
) -> List[Dict]:
|
311
|
-
"""Solve a task using the ReAct framework."""
|
312
|
-
max_iters = max_iterations if max_iterations is not None else self.max_iterations
|
313
|
-
history = []
|
314
|
-
self.history_store = [] # Reset for each new task
|
315
|
-
await self._notify_observers(TaskStartedEvent(event_type="TaskStarted", task_description=task))
|
316
|
-
|
317
|
-
for step in range(1, max_iters + 1):
|
318
|
-
await self._notify_observers(StepStartedEvent(event_type="StepStarted", step_number=step))
|
319
|
-
try:
|
320
|
-
response = await self.generate_action(task, history, step, max_iters, system_prompt, streaming=streaming)
|
321
|
-
thought, code = XMLResultHandler.parse_response(response)
|
322
|
-
result = await self.execute_action(code, step)
|
323
|
-
step_data = {"step_number": step, "thought": thought, "action": code, "result": result}
|
324
|
-
history.append(step_data)
|
325
|
-
self.history_store.append(step_data) # Store every step persistently
|
326
|
-
|
327
|
-
is_complete, final_answer = await self.is_task_complete(task, history, result, success_criteria)
|
328
|
-
if is_complete:
|
329
|
-
history[-1]["result"] += f"\n<FinalAnswer><![CDATA[\n{final_answer}\n]]></FinalAnswer>"
|
24
|
+
from .utils import process_tools
|
25
|
+
|
26
|
+
|
27
|
+
@dataclass
|
28
|
+
class AgentConfig:
|
29
|
+
"""Comprehensive configuration for the Agent, loadable from a YAML file or direct arguments."""
|
30
|
+
model: str = "gemini/gemini-2.0-flash"
|
31
|
+
max_iterations: int = 5
|
32
|
+
tools: Optional[List[Union[Tool, Callable]]] = None
|
33
|
+
max_history_tokens: int = MAX_HISTORY_TOKENS
|
34
|
+
toolbox_directory: str = "toolboxes"
|
35
|
+
enabled_toolboxes: Optional[List[str]] = None
|
36
|
+
reasoner_name: str = "default"
|
37
|
+
executor_name: str = "default"
|
38
|
+
personality: Optional[str] = None
|
39
|
+
backstory: Optional[str] = None
|
40
|
+
sop: Optional[str] = None
|
41
|
+
jinja_env: Optional[Environment] = None
|
42
|
+
name: Optional[str] = None
|
43
|
+
tools_config: Optional[List[Dict[str, Any]]] = None
|
44
|
+
reasoner: Optional[Dict[str, Any]] = field(default_factory=lambda: {"name": "default"})
|
45
|
+
executor: Optional[Dict[str, Any]] = field(default_factory=lambda: {"name": "default"})
|
46
|
+
profile: Optional[str] = None
|
47
|
+
customizations: Optional[Dict[str, Any]] = None
|
48
|
+
agent_tool_model: str = "gemini/gemini-2.0-flash" # Configurable model for AgentTool
|
49
|
+
agent_tool_timeout: int = 30 # Configurable timeout for AgentTool
|
330
50
|
|
331
|
-
await self._notify_observers(StepCompletedEvent(
|
332
|
-
event_type="StepCompleted", step_number=step, thought=thought,
|
333
|
-
action=code, result=history[-1]["result"], is_complete=is_complete,
|
334
|
-
final_answer=final_answer if is_complete else None
|
335
|
-
))
|
336
|
-
|
337
|
-
if is_complete:
|
338
|
-
await self._notify_observers(TaskCompletedEvent(
|
339
|
-
event_type="TaskCompleted", final_answer=final_answer, reason="success"
|
340
|
-
))
|
341
|
-
break
|
342
|
-
except Exception as e:
|
343
|
-
await self._notify_observers(ErrorOccurredEvent(
|
344
|
-
event_type="ErrorOccurred", error_message=str(e), step_number=step
|
345
|
-
))
|
346
|
-
break
|
347
|
-
|
348
|
-
if not any("<FinalAnswer>" in step["result"] for step in history):
|
349
|
-
await self._notify_observers(TaskCompletedEvent(
|
350
|
-
event_type="TaskCompleted", final_answer=None,
|
351
|
-
reason="max_iterations_reached" if len(history) == max_iters else "error"
|
352
|
-
))
|
353
|
-
return history
|
354
|
-
|
355
|
-
class Agent:
|
356
|
-
"""High-level interface for the Quantalogic Agent, providing chat and solve functionalities."""
|
357
51
|
def __init__(
|
358
52
|
self,
|
359
53
|
model: str = "gemini/gemini-2.0-flash",
|
360
|
-
tools: Optional[List[Tool]] = None,
|
361
54
|
max_iterations: int = 5,
|
55
|
+
tools: Optional[List[Union[Tool, Callable]]] = None,
|
56
|
+
max_history_tokens: int = MAX_HISTORY_TOKENS,
|
57
|
+
toolbox_directory: str = "toolboxes",
|
58
|
+
enabled_toolboxes: Optional[List[str]] = None,
|
59
|
+
reasoner_name: str = "default",
|
60
|
+
executor_name: str = "default",
|
362
61
|
personality: Optional[str] = None,
|
363
62
|
backstory: Optional[str] = None,
|
364
63
|
sop: Optional[str] = None,
|
365
|
-
|
366
|
-
|
64
|
+
jinja_env: Optional[Environment] = None,
|
65
|
+
config_file: Optional[str] = None,
|
66
|
+
name: Optional[str] = None,
|
67
|
+
tools_config: Optional[List[Dict[str, Any]]] = None,
|
68
|
+
reasoner: Optional[Dict[str, Any]] = None,
|
69
|
+
executor: Optional[Dict[str, Any]] = None,
|
70
|
+
profile: Optional[str] = None,
|
71
|
+
customizations: Optional[Dict[str, Any]] = None,
|
72
|
+
agent_tool_model: str = "gemini/gemini-2.0-flash",
|
73
|
+
agent_tool_timeout: int = 30
|
74
|
+
) -> None:
|
75
|
+
"""Initialize configuration from arguments or a YAML file."""
|
76
|
+
if config_file:
|
77
|
+
try:
|
78
|
+
with open(Path(__file__).parent / config_file) as f:
|
79
|
+
config: Dict = yaml.safe_load(f) or {}
|
80
|
+
self._load_from_config(config, model, max_iterations, max_history_tokens, toolbox_directory,
|
81
|
+
tools, enabled_toolboxes, reasoner_name, executor_name, personality,
|
82
|
+
backstory, sop, jinja_env, name, tools_config, reasoner, executor,
|
83
|
+
profile, customizations, agent_tool_model, agent_tool_timeout)
|
84
|
+
except FileNotFoundError as e:
|
85
|
+
logger.warning(f"Config file {config_file} not found: {e}. Using defaults.")
|
86
|
+
self._set_defaults(model, max_iterations, max_history_tokens, toolbox_directory,
|
87
|
+
tools, enabled_toolboxes, reasoner_name, executor_name, personality,
|
88
|
+
backstory, sop, jinja_env, name, tools_config, reasoner, executor,
|
89
|
+
profile, customizations, agent_tool_model, agent_tool_timeout)
|
90
|
+
except yaml.YAMLError as e:
|
91
|
+
logger.error(f"Error parsing YAML config {config_file}: {e}. Falling back to defaults.")
|
92
|
+
self._set_defaults(model, max_iterations, max_history_tokens, toolbox_directory,
|
93
|
+
tools, enabled_toolboxes, reasoner_name, executor_name, personality,
|
94
|
+
backstory, sop, jinja_env, name, tools_config, reasoner, executor,
|
95
|
+
profile, customizations, agent_tool_model, agent_tool_timeout)
|
96
|
+
else:
|
97
|
+
self._set_defaults(model, max_iterations, max_history_tokens, toolbox_directory,
|
98
|
+
tools, enabled_toolboxes, reasoner_name, executor_name, personality,
|
99
|
+
backstory, sop, jinja_env, name, tools_config, reasoner, executor,
|
100
|
+
profile, customizations, agent_tool_model, agent_tool_timeout)
|
101
|
+
self.__post_init__()
|
102
|
+
|
103
|
+
def _load_from_config(self, config: Dict, *args) -> None:
|
104
|
+
"""Load configuration from a dictionary, overriding with explicit arguments if provided."""
|
105
|
+
model, max_iterations, max_history_tokens, toolbox_directory, tools, enabled_toolboxes, \
|
106
|
+
reasoner_name, executor_name, personality, backstory, sop, jinja_env, name, tools_config, \
|
107
|
+
reasoner, executor, profile, customizations, agent_tool_model, agent_tool_timeout = args
|
108
|
+
|
109
|
+
self.model = config.get("model", model)
|
110
|
+
self.max_iterations = config.get("max_iterations", max_iterations)
|
111
|
+
self.max_history_tokens = config.get("max_history_tokens", max_history_tokens)
|
112
|
+
self.toolbox_directory = config.get("toolbox_directory", toolbox_directory)
|
113
|
+
self.tools = tools if tools is not None else config.get("tools")
|
114
|
+
self.enabled_toolboxes = config.get("enabled_toolboxes", enabled_toolboxes)
|
115
|
+
self.reasoner = config.get("reasoner", {"name": config.get("reasoner_name", reasoner_name)})
|
116
|
+
self.executor = config.get("executor", {"name": config.get("executor_name", executor_name)})
|
117
|
+
self.personality = config.get("personality", personality)
|
118
|
+
self.backstory = config.get("backstory", backstory)
|
119
|
+
self.sop = config.get("sop", sop)
|
120
|
+
self.jinja_env = jinja_env or default_jinja_env
|
121
|
+
self.name = config.get("name", name)
|
122
|
+
self.tools_config = config.get("tools_config", tools_config)
|
123
|
+
self.profile = config.get("profile", profile)
|
124
|
+
self.customizations = config.get("customizations", customizations)
|
125
|
+
self.agent_tool_model = config.get("agent_tool_model", agent_tool_model)
|
126
|
+
self.agent_tool_timeout = config.get("agent_tool_timeout", agent_tool_timeout)
|
127
|
+
|
128
|
+
def _set_defaults(self, model, max_iterations, max_history_tokens, toolbox_directory,
|
129
|
+
tools, enabled_toolboxes, reasoner_name, executor_name, personality,
|
130
|
+
backstory, sop, jinja_env, name, tools_config, reasoner, executor,
|
131
|
+
profile, customizations, agent_tool_model, agent_tool_timeout) -> None:
|
132
|
+
"""Set default values for all configuration fields."""
|
367
133
|
self.model = model
|
368
|
-
self.default_tools = tools if tools is not None else get_default_tools(model)
|
369
134
|
self.max_iterations = max_iterations
|
135
|
+
self.max_history_tokens = max_history_tokens
|
136
|
+
self.toolbox_directory = toolbox_directory
|
137
|
+
self.tools = tools
|
138
|
+
self.enabled_toolboxes = enabled_toolboxes
|
139
|
+
self.reasoner = reasoner if reasoner is not None else {"name": reasoner_name}
|
140
|
+
self.executor = executor if executor is not None else {"name": executor_name}
|
370
141
|
self.personality = personality
|
371
142
|
self.backstory = backstory
|
372
143
|
self.sop = sop
|
373
|
-
self.
|
144
|
+
self.jinja_env = jinja_env or default_jinja_env
|
145
|
+
self.name = name
|
146
|
+
self.tools_config = tools_config
|
147
|
+
self.profile = profile
|
148
|
+
self.customizations = customizations
|
149
|
+
self.agent_tool_model = agent_tool_model
|
150
|
+
self.agent_tool_timeout = agent_tool_timeout
|
151
|
+
|
152
|
+
def __post_init__(self) -> None:
|
153
|
+
"""Apply profile defaults and customizations after initialization."""
|
154
|
+
profiles = {
|
155
|
+
"math_expert": {
|
156
|
+
"personality": {"traits": ["precise", "logical"]},
|
157
|
+
"tools_config": [{"name": "math_tools", "enabled": True}],
|
158
|
+
"sop": "Focus on accuracy and clarity in mathematical solutions."
|
159
|
+
},
|
160
|
+
"creative_writer": {
|
161
|
+
"personality": {"traits": ["creative", "expressive"]},
|
162
|
+
"tools_config": [{"name": "text_tools", "enabled": True}],
|
163
|
+
"sop": "Generate engaging and imaginative content."
|
164
|
+
}
|
165
|
+
}
|
166
|
+
if self.profile and self.profile in profiles:
|
167
|
+
base_config = profiles[self.profile]
|
168
|
+
for key, value in base_config.items():
|
169
|
+
if not getattr(self, key) or (key == "personality" and isinstance(getattr(self, key), str)):
|
170
|
+
setattr(self, key, value)
|
171
|
+
if self.customizations:
|
172
|
+
for key, value in self.customizations.items():
|
173
|
+
if hasattr(self, key):
|
174
|
+
current = getattr(self, key)
|
175
|
+
if isinstance(current, dict):
|
176
|
+
current.update(value)
|
177
|
+
elif current is None or (key in ["personality", "backstory"] and isinstance(current, str)):
|
178
|
+
setattr(self, key, value)
|
179
|
+
|
180
|
+
class Agent:
|
181
|
+
"""High-level interface for the Quantalogic Agent with unified configuration."""
|
182
|
+
def __init__(
|
183
|
+
self,
|
184
|
+
config: Union[AgentConfig, str, None] = None
|
185
|
+
) -> None:
|
186
|
+
"""Initialize the agent with a configuration."""
|
187
|
+
try:
|
188
|
+
if isinstance(config, str):
|
189
|
+
config = AgentConfig(config_file=config)
|
190
|
+
elif config is None:
|
191
|
+
config = AgentConfig()
|
192
|
+
elif not isinstance(config, AgentConfig):
|
193
|
+
raise ValueError("Config must be an AgentConfig instance or a string path to a config file.")
|
194
|
+
except Exception as e:
|
195
|
+
logger.error(f"Failed to initialize config: {e}. Using default configuration.")
|
196
|
+
config = AgentConfig()
|
197
|
+
|
198
|
+
self.config = config
|
199
|
+
self.plugin_manager = PluginManager()
|
200
|
+
try:
|
201
|
+
self.plugin_manager.load_plugins()
|
202
|
+
except Exception as e:
|
203
|
+
logger.error(f"Failed to load plugins: {e}")
|
204
|
+
self.model: str = config.model
|
205
|
+
self.default_tools: List[Tool] = self._get_tools()
|
206
|
+
self.max_iterations: int = config.max_iterations
|
207
|
+
self.personality = config.personality
|
208
|
+
self.backstory = config.backstory
|
209
|
+
self.sop: Optional[str] = config.sop
|
210
|
+
self.name: Optional[str] = config.name
|
211
|
+
self.max_history_tokens: int = config.max_history_tokens
|
212
|
+
self.jinja_env: Environment = config.jinja_env
|
374
213
|
self._observers: List[Tuple[Callable, List[str]]] = []
|
375
|
-
# New attribute to store context_vars from the last solve call
|
376
214
|
self.last_solve_context_vars: Dict = {}
|
215
|
+
self.default_reasoner_name: str = config.reasoner.get("name", config.reasoner_name)
|
216
|
+
self.default_executor_name: str = config.executor.get("name", config.executor_name)
|
217
|
+
|
218
|
+
def _get_tools(self) -> List[Tool]:
|
219
|
+
"""Load tools, applying tools_config if provided."""
|
220
|
+
try:
|
221
|
+
base_tools = (
|
222
|
+
process_tools(self.config.tools)
|
223
|
+
if self.config.tools is not None
|
224
|
+
else get_default_tools(self.model, enabled_toolboxes=self.config.enabled_toolboxes)
|
225
|
+
)
|
226
|
+
if not self.config.tools_config:
|
227
|
+
return base_tools
|
228
|
+
|
229
|
+
self._resolve_secrets(self.config.tools_config)
|
230
|
+
filtered_tools = []
|
231
|
+
processed_names = set()
|
232
|
+
for tool_conf in self.config.tools_config:
|
233
|
+
tool_name = tool_conf.get("name")
|
234
|
+
if tool_conf.get("enabled", True):
|
235
|
+
tool = next((t for t in base_tools if t.name == tool_name or t.toolbox_name == tool_name), None)
|
236
|
+
if tool and tool.name not in processed_names:
|
237
|
+
for key, value in tool_conf.items():
|
238
|
+
if key not in ["name", "enabled"]:
|
239
|
+
setattr(tool, key, value)
|
240
|
+
filtered_tools.append(tool)
|
241
|
+
processed_names.add(tool.name)
|
242
|
+
for tool in base_tools:
|
243
|
+
if tool.name not in processed_names:
|
244
|
+
filtered_tools.append(tool)
|
245
|
+
logger.info(f"Loaded {len(filtered_tools)} tools successfully.")
|
246
|
+
return filtered_tools
|
247
|
+
except Exception as e:
|
248
|
+
logger.error(f"Error loading tools: {e}. Returning empty toolset.")
|
249
|
+
return []
|
250
|
+
|
251
|
+
def _resolve_secrets(self, config_dict: List[Dict[str, Any]]) -> None:
|
252
|
+
"""Resolve environment variable placeholders in tools_config."""
|
253
|
+
try:
|
254
|
+
for item in config_dict:
|
255
|
+
for key, value in item.items():
|
256
|
+
if isinstance(value, str) and "{{ env." in value:
|
257
|
+
env_var = value.split("{{ env.")[1].split("}}")[0]
|
258
|
+
item[key] = os.getenv(env_var, value)
|
259
|
+
elif isinstance(value, dict):
|
260
|
+
self._resolve_secrets([value])
|
261
|
+
except Exception as e:
|
262
|
+
logger.error(f"Error resolving secrets in tools_config: {e}")
|
377
263
|
|
378
264
|
def _build_system_prompt(self) -> str:
|
379
|
-
"""
|
380
|
-
|
381
|
-
|
382
|
-
|
383
|
-
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
|
265
|
+
"""Build a system prompt based on name, personality, backstory, and SOP."""
|
266
|
+
try:
|
267
|
+
prompt = f"I am {self.name}, an AI assistant." if self.name else "You are an AI assistant."
|
268
|
+
if self.personality:
|
269
|
+
if isinstance(self.personality, str):
|
270
|
+
prompt += f" I have a {self.personality} personality."
|
271
|
+
elif isinstance(self.personality, dict):
|
272
|
+
traits = self.personality.get("traits", [])
|
273
|
+
if traits:
|
274
|
+
prompt += f" I have the following personality traits: {', '.join(traits)}."
|
275
|
+
tone = self.personality.get("tone")
|
276
|
+
if tone:
|
277
|
+
prompt += f" My tone is {tone}."
|
278
|
+
humor = self.personality.get("humor_level")
|
279
|
+
if humor:
|
280
|
+
prompt += f" My humor level is {humor}."
|
281
|
+
if self.backstory:
|
282
|
+
if isinstance(self.backstory, str):
|
283
|
+
prompt += f" My backstory is: {self.backstory}"
|
284
|
+
elif isinstance(self.backstory, dict):
|
285
|
+
origin = self.backstory.get("origin")
|
286
|
+
if origin:
|
287
|
+
prompt += f" I was created by {origin}."
|
288
|
+
purpose = self.backstory.get("purpose")
|
289
|
+
if purpose:
|
290
|
+
prompt += f" My purpose is {purpose}."
|
291
|
+
experience = self.backstory.get("experience")
|
292
|
+
if experience:
|
293
|
+
prompt += f" My experience includes: {experience}"
|
294
|
+
if self.sop:
|
295
|
+
prompt += f" Follow this standard operating procedure: {self.sop}"
|
296
|
+
return prompt
|
297
|
+
except Exception as e:
|
298
|
+
logger.error(f"Error building system prompt: {e}. Using default.")
|
299
|
+
return "You are an AI assistant."
|
388
300
|
|
389
301
|
async def chat(
|
390
302
|
self,
|
391
303
|
message: str,
|
392
304
|
use_tools: bool = False,
|
393
|
-
tools: Optional[List[Tool]] = None,
|
305
|
+
tools: Optional[List[Union[Tool, Callable]]] = None,
|
394
306
|
timeout: int = 30,
|
395
307
|
max_tokens: int = MAX_TOKENS,
|
396
308
|
temperature: float = 0.7,
|
397
|
-
streaming: bool = False
|
309
|
+
streaming: bool = False,
|
310
|
+
reasoner_name: Optional[str] = None,
|
311
|
+
executor_name: Optional[str] = None
|
398
312
|
) -> str:
|
399
313
|
"""Single-step interaction with optional custom tools and streaming."""
|
400
|
-
system_prompt = self._build_system_prompt()
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
406
|
-
|
407
|
-
|
408
|
-
|
409
|
-
|
410
|
-
|
411
|
-
|
412
|
-
|
413
|
-
|
414
|
-
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
|
419
|
-
|
420
|
-
|
421
|
-
|
422
|
-
|
423
|
-
|
314
|
+
system_prompt: str = self._build_system_prompt()
|
315
|
+
try:
|
316
|
+
if use_tools:
|
317
|
+
chat_tools: List[Tool] = process_tools(tools) if tools is not None else self.default_tools
|
318
|
+
reasoner_name = reasoner_name or self.default_reasoner_name
|
319
|
+
executor_name = executor_name or self.default_executor_name
|
320
|
+
reasoner_cls = self.plugin_manager.reasoners.get(reasoner_name, Reasoner)
|
321
|
+
executor_cls = self.plugin_manager.executors.get(executor_name, Executor)
|
322
|
+
reasoner_config = self.config.reasoner.get("config", {})
|
323
|
+
executor_config = self.config.executor.get("config", {})
|
324
|
+
chat_agent = ReActAgent(
|
325
|
+
model=self.model,
|
326
|
+
tools=chat_tools,
|
327
|
+
max_iterations=1,
|
328
|
+
max_history_tokens=self.max_history_tokens,
|
329
|
+
reasoner=reasoner_cls(self.model, chat_tools, **reasoner_config),
|
330
|
+
executor=executor_cls(chat_tools, self._notify_observers, **executor_config)
|
331
|
+
)
|
332
|
+
chat_agent.executor.register_tool(RetrieveStepTool(chat_agent.history_manager.store))
|
333
|
+
for observer, event_types in self._observers:
|
334
|
+
chat_agent.add_observer(observer, event_types)
|
335
|
+
history: List[Dict] = await chat_agent.solve(message, system_prompt=system_prompt, streaming=streaming)
|
336
|
+
return self._extract_response(history)
|
337
|
+
else:
|
338
|
+
response: str = await litellm_completion(
|
339
|
+
model=self.model,
|
340
|
+
messages=[
|
341
|
+
{"role": "system", "content": system_prompt},
|
342
|
+
{"role": "user", "content": message}
|
343
|
+
],
|
344
|
+
max_tokens=max_tokens,
|
345
|
+
temperature=temperature,
|
346
|
+
stream=streaming,
|
347
|
+
notify_event=self._notify_observers if streaming else None
|
348
|
+
)
|
349
|
+
return response.strip()
|
350
|
+
except Exception as e:
|
351
|
+
logger.error(f"Chat failed: {e}")
|
352
|
+
return f"Error: Unable to process chat request due to {str(e)}"
|
424
353
|
|
425
354
|
def sync_chat(self, message: str, timeout: int = 30) -> str:
|
426
355
|
"""Synchronous wrapper for chat."""
|
427
|
-
|
356
|
+
try:
|
357
|
+
return asyncio.run(self.chat(message, timeout=timeout))
|
358
|
+
except Exception as e:
|
359
|
+
logger.error(f"Synchronous chat failed: {e}")
|
360
|
+
return f"Error: {str(e)}"
|
428
361
|
|
429
362
|
async def solve(
|
430
363
|
self,
|
431
364
|
task: str,
|
432
365
|
success_criteria: Optional[str] = None,
|
433
366
|
max_iterations: Optional[int] = None,
|
434
|
-
tools: Optional[List[Tool]] = None,
|
367
|
+
tools: Optional[List[Union[Tool, Callable]]] = None,
|
435
368
|
timeout: int = 300,
|
436
|
-
streaming: bool = False
|
369
|
+
streaming: bool = False,
|
370
|
+
reasoner_name: Optional[str] = None,
|
371
|
+
executor_name: Optional[str] = None
|
437
372
|
) -> List[Dict]:
|
438
373
|
"""Multi-step task solving with optional custom tools, max_iterations, and streaming."""
|
439
|
-
system_prompt = self._build_system_prompt()
|
440
|
-
|
441
|
-
|
442
|
-
|
443
|
-
|
444
|
-
|
445
|
-
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
|
374
|
+
system_prompt: str = self._build_system_prompt()
|
375
|
+
try:
|
376
|
+
solve_tools: List[Tool] = process_tools(tools) if tools is not None else self.default_tools
|
377
|
+
reasoner_name = reasoner_name or self.default_reasoner_name
|
378
|
+
executor_name = executor_name or self.default_executor_name
|
379
|
+
reasoner_cls = self.plugin_manager.reasoners.get(reasoner_name, Reasoner)
|
380
|
+
executor_cls = self.plugin_manager.executors.get(executor_name, Executor)
|
381
|
+
reasoner_config = self.config.reasoner.get("config", {})
|
382
|
+
executor_config = self.config.executor.get("config", {})
|
383
|
+
solve_agent = ReActAgent(
|
384
|
+
model=self.model,
|
385
|
+
tools=solve_tools,
|
386
|
+
max_iterations=max_iterations if max_iterations is not None else self.max_iterations,
|
387
|
+
max_history_tokens=self.max_history_tokens,
|
388
|
+
reasoner=reasoner_cls(self.model, solve_tools, **reasoner_config),
|
389
|
+
executor=executor_cls(solve_tools, self._notify_observers, **executor_config)
|
390
|
+
)
|
391
|
+
solve_agent.executor.register_tool(RetrieveStepTool(solve_agent.history_manager.store))
|
392
|
+
for observer, event_types in self._observers:
|
393
|
+
solve_agent.add_observer(observer, event_types)
|
394
|
+
|
395
|
+
history: List[Dict] = await solve_agent.solve(
|
396
|
+
task,
|
397
|
+
success_criteria,
|
398
|
+
system_prompt=system_prompt,
|
399
|
+
max_iterations=max_iterations,
|
400
|
+
streaming=streaming
|
401
|
+
)
|
402
|
+
self.last_solve_context_vars = solve_agent.context_vars.copy()
|
403
|
+
return history
|
404
|
+
except Exception as e:
|
405
|
+
logger.error(f"Solve failed: {e}")
|
406
|
+
return [{"error": f"Failed to solve task: {str(e)}"}]
|
460
407
|
|
461
408
|
def sync_solve(self, task: str, success_criteria: Optional[str] = None, timeout: int = 300) -> List[Dict]:
|
462
409
|
"""Synchronous wrapper for solve."""
|
463
|
-
|
410
|
+
try:
|
411
|
+
return asyncio.run(self.solve(task, success_criteria, timeout=timeout))
|
412
|
+
except Exception as e:
|
413
|
+
logger.error(f"Synchronous solve failed: {e}")
|
414
|
+
return [{"error": f"Failed to solve task synchronously: {str(e)}"}]
|
464
415
|
|
465
416
|
def add_observer(self, observer: Callable, event_types: List[str]) -> 'Agent':
|
466
417
|
"""Add an observer to be applied to agents created in chat and solve."""
|
467
418
|
self._observers.append((observer, event_types))
|
468
419
|
return self
|
469
420
|
|
421
|
+
def register_tool(self, tool: Tool) -> None:
|
422
|
+
"""Register a new tool dynamically at runtime."""
|
423
|
+
try:
|
424
|
+
if tool.name in [t.name for t in self.default_tools]:
|
425
|
+
raise ValueError(f"Tool '{tool.name}' is already registered")
|
426
|
+
self.default_tools.append(tool)
|
427
|
+
self.plugin_manager.tools.register(tool)
|
428
|
+
except Exception as e:
|
429
|
+
logger.error(f"Failed to register tool {tool.name}: {e}")
|
430
|
+
|
431
|
+
def register_reasoner(self, reasoner: BaseReasoner, name: str) -> None:
|
432
|
+
"""Register a new reasoner dynamically at runtime."""
|
433
|
+
try:
|
434
|
+
self.plugin_manager.reasoners[name] = reasoner.__class__
|
435
|
+
except Exception as e:
|
436
|
+
logger.error(f"Failed to register reasoner {name}: {e}")
|
437
|
+
|
438
|
+
def register_executor(self, executor: BaseExecutor, name: str) -> None:
|
439
|
+
"""Register a new executor dynamically at runtime."""
|
440
|
+
try:
|
441
|
+
self.plugin_manager.executors[name] = executor.__class__
|
442
|
+
except Exception as e:
|
443
|
+
logger.error(f"Failed to register executor {name}: {e}")
|
444
|
+
|
470
445
|
def list_tools(self) -> List[str]:
|
471
446
|
"""Return a list of available tool names."""
|
472
447
|
return [tool.name for tool in self.default_tools]
|
@@ -479,21 +454,25 @@ class Agent:
|
|
479
454
|
"""Extract a clean response from the history."""
|
480
455
|
if not history:
|
481
456
|
return "No response generated."
|
482
|
-
last_result = history[-1]
|
457
|
+
last_result: str = history[-1].get("result", "")
|
483
458
|
try:
|
484
459
|
root = etree.fromstring(last_result)
|
485
460
|
if root.findtext("Status") == "Success":
|
486
|
-
value = root.findtext("Value") or ""
|
487
|
-
final_answer = root.findtext("FinalAnswer")
|
461
|
+
value: str = root.findtext("Value") or ""
|
462
|
+
final_answer: Optional[str] = root.findtext("FinalAnswer")
|
488
463
|
return final_answer.strip() if final_answer else value.strip()
|
489
464
|
else:
|
490
465
|
return f"Error: {root.findtext('Value') or 'Unknown error'}"
|
491
|
-
except etree.XMLSyntaxError:
|
466
|
+
except etree.XMLSyntaxError as e:
|
467
|
+
logger.error(f"Failed to parse response XML: {e}")
|
492
468
|
return last_result
|
493
469
|
|
494
|
-
async def _notify_observers(self, event):
|
470
|
+
async def _notify_observers(self, event: object) -> None:
|
495
471
|
"""Notify all subscribed observers of an event."""
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
472
|
+
try:
|
473
|
+
await asyncio.gather(
|
474
|
+
*(observer(event) for observer, types in self._observers if event.event_type in types),
|
475
|
+
return_exceptions=True
|
476
|
+
)
|
477
|
+
except Exception as e:
|
478
|
+
logger.error(f"Error notifying observers: {e}")
|