kite-agent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kite/__init__.py +46 -0
- kite/ab_testing.py +384 -0
- kite/agent.py +556 -0
- kite/agents/__init__.py +3 -0
- kite/agents/plan_execute.py +191 -0
- kite/agents/react_agent.py +509 -0
- kite/agents/reflective_agent.py +90 -0
- kite/agents/rewoo.py +119 -0
- kite/agents/tot.py +151 -0
- kite/conversation.py +125 -0
- kite/core.py +974 -0
- kite/data_loaders.py +111 -0
- kite/embedding_providers.py +372 -0
- kite/llm_providers.py +1278 -0
- kite/memory/__init__.py +6 -0
- kite/memory/advanced_rag.py +333 -0
- kite/memory/graph_rag.py +719 -0
- kite/memory/session_memory.py +423 -0
- kite/memory/vector_memory.py +579 -0
- kite/monitoring.py +611 -0
- kite/observers.py +107 -0
- kite/optimization/__init__.py +9 -0
- kite/optimization/resource_router.py +80 -0
- kite/persistence.py +42 -0
- kite/pipeline/__init__.py +5 -0
- kite/pipeline/deterministic_pipeline.py +323 -0
- kite/pipeline/reactive_pipeline.py +171 -0
- kite/pipeline_manager.py +15 -0
- kite/routing/__init__.py +6 -0
- kite/routing/aggregator_router.py +325 -0
- kite/routing/llm_router.py +149 -0
- kite/routing/semantic_router.py +228 -0
- kite/safety/__init__.py +6 -0
- kite/safety/circuit_breaker.py +360 -0
- kite/safety/guardrails.py +82 -0
- kite/safety/idempotency_manager.py +304 -0
- kite/safety/kill_switch.py +75 -0
- kite/tool.py +183 -0
- kite/tool_registry.py +87 -0
- kite/tools/__init__.py +21 -0
- kite/tools/code_execution.py +53 -0
- kite/tools/contrib/__init__.py +19 -0
- kite/tools/contrib/calculator.py +26 -0
- kite/tools/contrib/datetime_utils.py +20 -0
- kite/tools/contrib/linkedin.py +428 -0
- kite/tools/contrib/web_search.py +30 -0
- kite/tools/mcp/__init__.py +31 -0
- kite/tools/mcp/database_mcp.py +267 -0
- kite/tools/mcp/gdrive_mcp_server.py +503 -0
- kite/tools/mcp/gmail_mcp_server.py +601 -0
- kite/tools/mcp/postgres_mcp_server.py +490 -0
- kite/tools/mcp/slack_mcp_server.py +538 -0
- kite/tools/mcp/stripe_mcp_server.py +219 -0
- kite/tools/search.py +90 -0
- kite/tools/system_tools.py +54 -0
- kite/tools_manager.py +27 -0
- kite_agent-0.1.0.dist-info/METADATA +621 -0
- kite_agent-0.1.0.dist-info/RECORD +61 -0
- kite_agent-0.1.0.dist-info/WHEEL +5 -0
- kite_agent-0.1.0.dist-info/licenses/LICENSE +21 -0
- kite_agent-0.1.0.dist-info/top_level.txt +1 -0
kite/agent.py
ADDED
|
@@ -0,0 +1,556 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import asyncio
|
|
3
|
+
import re
|
|
4
|
+
import time
|
|
5
|
+
from typing import List, Dict, Optional, Callable
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Agent:
|
|
9
|
+
"""
|
|
10
|
+
High-reliability agent with native tool calling and direct extraction fallback.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
def __init__(self,
|
|
14
|
+
name: str,
|
|
15
|
+
system_prompt: str,
|
|
16
|
+
tools: List,
|
|
17
|
+
framework,
|
|
18
|
+
llm=None,
|
|
19
|
+
max_iterations: int = 10,
|
|
20
|
+
knowledge_sources: List[str] = None,
|
|
21
|
+
verbose: bool = False,
|
|
22
|
+
agent_type: str = "simple"):
|
|
23
|
+
self.name = name
|
|
24
|
+
self.system_prompt = system_prompt
|
|
25
|
+
# Logic: Explicit LLM > Framework LLM > Error
|
|
26
|
+
self.llm = llm or getattr(framework, 'llm', None)
|
|
27
|
+
if not self.llm:
|
|
28
|
+
raise ValueError("Agent requires an LLM. Pass 'llm' explicitly or provide a 'framework' with an initialized LLM.")
|
|
29
|
+
self.tools = {tool.name: tool for tool in tools}
|
|
30
|
+
self.framework = framework
|
|
31
|
+
self.max_iterations = max_iterations
|
|
32
|
+
self.knowledge_sources = knowledge_sources or []
|
|
33
|
+
self.verbose = verbose
|
|
34
|
+
self.agent_type = agent_type
|
|
35
|
+
|
|
36
|
+
# Stats
|
|
37
|
+
self.call_count = 0
|
|
38
|
+
self.success_count = 0
|
|
39
|
+
self.metadata = {
|
|
40
|
+
"llm": getattr(self.llm, 'name', getattr(self.llm, 'model', 'unknown'))
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
# Log creation
|
|
44
|
+
if self.verbose:
|
|
45
|
+
print(f" Agent '{self.name}' initialized (LLM: {self.metadata['llm']})")
|
|
46
|
+
|
|
47
|
+
def record_outcome(self, outcome_type: str):
|
|
48
|
+
"""Record a domain-specific outcome (e.g., 'lead', 'reject')."""
|
|
49
|
+
self.framework.metrics.record_outcome(self.name, outcome_type)
|
|
50
|
+
|
|
51
|
+
async def run(self, user_input: str, context: Optional[Dict] = None) -> Dict:
|
|
52
|
+
"""
|
|
53
|
+
Run agent on input using Native Tool Calling (if supported) or ReAct fallback.
|
|
54
|
+
"""
|
|
55
|
+
self.call_count += 1
|
|
56
|
+
self.framework.event_bus.emit("agent:run:start", {"agent": self.name, "input": user_input})
|
|
57
|
+
|
|
58
|
+
start_time = time.time()
|
|
59
|
+
success = True
|
|
60
|
+
error_type = None
|
|
61
|
+
|
|
62
|
+
if self.agent_type == "simple":
|
|
63
|
+
return await self._run_simple(user_input, context)
|
|
64
|
+
return await self._run_react(user_input, context)
|
|
65
|
+
|
|
66
|
+
async def _run_simple(self, user_input: str, context: Optional[Dict] = None) -> Dict:
|
|
67
|
+
"""Single-pass/Fast-pass version for maximum speed with tool support."""
|
|
68
|
+
system_p = self.system_prompt
|
|
69
|
+
if context:
|
|
70
|
+
system_p += f"\n\nContext: {context}"
|
|
71
|
+
|
|
72
|
+
if self.tools:
|
|
73
|
+
system_p += "\n\n### AVAILABLE TOOLS:\n"
|
|
74
|
+
for tool in self.tools.values():
|
|
75
|
+
# Provide simplified arg names for better LLM adherence in simple mode
|
|
76
|
+
defn = tool.get_definition()
|
|
77
|
+
params = list(defn.get('parameters', {}).keys())
|
|
78
|
+
system_p += f"- {tool.name}: {tool.description} (Expected Arguments: {params})\n"
|
|
79
|
+
|
|
80
|
+
system_p += """
|
|
81
|
+
### OPERATIONAL RULES:
|
|
82
|
+
1. **Tool Use**: If you need information from a tool, you MUST use an Action.
|
|
83
|
+
2. **Format**: Use the following format:
|
|
84
|
+
Action: [{"name": "...", "args": {...}}]
|
|
85
|
+
3. **No Placeholders**: Never guess or hallucinate data. If you don't know, use a Tool or say you don't know.
|
|
86
|
+
4. **Final Answer**: Once you have all info, provide it after 'Final Answer:'.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
messages = [
|
|
90
|
+
{"role": "system", "content": system_p},
|
|
91
|
+
{"role": "user", "content": user_input}
|
|
92
|
+
]
|
|
93
|
+
|
|
94
|
+
try:
|
|
95
|
+
last_response = ""
|
|
96
|
+
iterations = 0
|
|
97
|
+
# Simple mode allows up to 3 steps (e.g. Search -> Process -> Final Answer)
|
|
98
|
+
for i in range(3):
|
|
99
|
+
iterations = i + 1
|
|
100
|
+
response = await self._call_llm(messages)
|
|
101
|
+
last_response = response.strip()
|
|
102
|
+
messages.append({"role": "assistant", "content": last_response})
|
|
103
|
+
|
|
104
|
+
# Simple visibility
|
|
105
|
+
if "Action:" in last_response or "Final Answer:" in last_response:
|
|
106
|
+
border = "-" * 40
|
|
107
|
+
print(f"\n {border}\n [ {self.name} (SIMPLE) ] Step {iterations}\n {border}")
|
|
108
|
+
for line in last_response.split('\n'):
|
|
109
|
+
if line.strip(): print(f" | {line.strip()}")
|
|
110
|
+
print(f" {border}\n")
|
|
111
|
+
|
|
112
|
+
# Tool extraction
|
|
113
|
+
tool_calls = await self._extract_tool_calls(last_response, context=context)
|
|
114
|
+
if not tool_calls:
|
|
115
|
+
break
|
|
116
|
+
|
|
117
|
+
# Execute tools
|
|
118
|
+
observation_text = "Observations:\n"
|
|
119
|
+
for tool_name, tool_args in tool_calls:
|
|
120
|
+
if tool_name in self.tools:
|
|
121
|
+
try:
|
|
122
|
+
cleaned_args = {str(k): v for k, v in tool_args.items()} if isinstance(tool_args, dict) else {}
|
|
123
|
+
print(f" | [ACTION] {tool_name}({tool_args})")
|
|
124
|
+
# Simple mode uses basic execute
|
|
125
|
+
# Handle both sync and async tools
|
|
126
|
+
if asyncio.iscoroutinefunction(self.tools[tool_name].execute):
|
|
127
|
+
result = await self.tools[tool_name].execute(**cleaned_args)
|
|
128
|
+
else:
|
|
129
|
+
result = await asyncio.to_thread(self.tools[tool_name].execute, **cleaned_args)
|
|
130
|
+
|
|
131
|
+
observation_text += f"- Tool '{tool_name}' returned: {result}\n"
|
|
132
|
+
except Exception as e:
|
|
133
|
+
observation_text += f"- Tool '{tool_name}' FAILED: {str(e)}\n"
|
|
134
|
+
|
|
135
|
+
messages.append({"role": "user", "content": observation_text})
|
|
136
|
+
# Maximum 2 action interactions in 'simple' mode to keep it fast
|
|
137
|
+
if i == 1: continue
|
|
138
|
+
|
|
139
|
+
# Extract final answer
|
|
140
|
+
clean_answer = last_response
|
|
141
|
+
final_answer_match = re.search(r"Final Answer:\s*([\s\S]+)", last_response, re.IGNORECASE)
|
|
142
|
+
if final_answer_match:
|
|
143
|
+
clean_answer = final_answer_match.group(1).strip()
|
|
144
|
+
|
|
145
|
+
self.success_count += 1
|
|
146
|
+
return {
|
|
147
|
+
"success": True,
|
|
148
|
+
"response": clean_answer,
|
|
149
|
+
"agent": self.name,
|
|
150
|
+
"type": "simple",
|
|
151
|
+
"iterations": iterations
|
|
152
|
+
}
|
|
153
|
+
except Exception as e:
|
|
154
|
+
return {"success": False, "error": str(e), "response": f"Error: {str(e)}", "agent": self.name}
|
|
155
|
+
|
|
156
|
+
async def _call_llm(self, messages: List[Dict]) -> str:
|
|
157
|
+
"""Centralized LLM call handles both Chat and Completion APIs."""
|
|
158
|
+
if hasattr(self.llm, 'chat_async'):
|
|
159
|
+
return await self.llm.chat_async(messages)
|
|
160
|
+
|
|
161
|
+
# Fallback to complete (build text prompt)
|
|
162
|
+
prompt = "\n".join([f"{m['role'].upper()}: {m['content']}" for m in messages]) + "\nASSISTANT:"
|
|
163
|
+
|
|
164
|
+
if hasattr(self.llm, 'complete_async'):
|
|
165
|
+
return await self.llm.complete_async(prompt)
|
|
166
|
+
|
|
167
|
+
# Sync fallbacks
|
|
168
|
+
if hasattr(self.llm, 'chat'):
|
|
169
|
+
return await asyncio.to_thread(self.llm.chat, messages)
|
|
170
|
+
|
|
171
|
+
return await asyncio.to_thread(self.llm.complete, prompt)
|
|
172
|
+
|
|
173
|
+
async def _run_react(self, user_input: str, context: Optional[Dict] = None) -> Dict:
|
|
174
|
+
"""Multi-iteration loop version with tool-calling and self-healing."""
|
|
175
|
+
try:
|
|
176
|
+
native_tools = [t.to_schema() for t in self.tools.values()] if self.tools else []
|
|
177
|
+
messages = [{"role": "system", "content": self.system_prompt}, {"role": "user", "content": user_input}]
|
|
178
|
+
|
|
179
|
+
# Context Injection
|
|
180
|
+
if context:
|
|
181
|
+
messages[0]["content"] += f"\n\nContext: {context}"
|
|
182
|
+
|
|
183
|
+
# Knowledge Retrieval
|
|
184
|
+
knowledge_context = ""
|
|
185
|
+
if self.knowledge_sources and hasattr(self.framework, 'knowledge'):
|
|
186
|
+
for source_name in self.knowledge_sources:
|
|
187
|
+
data = self.framework.knowledge.data.get(source_name)
|
|
188
|
+
if data:
|
|
189
|
+
matched = []
|
|
190
|
+
categories = []
|
|
191
|
+
for key, val in data.items():
|
|
192
|
+
categories.append(key)
|
|
193
|
+
if key.lower() in user_input.lower():
|
|
194
|
+
matched.append(f"- [{source_name}] {key}: {val}")
|
|
195
|
+
|
|
196
|
+
if matched:
|
|
197
|
+
knowledge_context += f"\n### KNOWLEDGE CONTEXT ({source_name}):\n"
|
|
198
|
+
knowledge_context += "\n".join(matched) + "\n"
|
|
199
|
+
else:
|
|
200
|
+
knowledge_context += f"\n### AVAILABLE KNOWLEDGE CATEGORIES in {source_name}:\n"
|
|
201
|
+
knowledge_context += f"You have expert expertise in: {', '.join(categories)}.\n"
|
|
202
|
+
|
|
203
|
+
# Vector Memory
|
|
204
|
+
if hasattr(self.framework, 'vector_memory'):
|
|
205
|
+
try:
|
|
206
|
+
mem_results = self.framework.vector_memory.search(user_input, k=3)
|
|
207
|
+
for _, text, dist in mem_results:
|
|
208
|
+
if dist < 0.5:
|
|
209
|
+
knowledge_context += f"- [Memory:{source_name}] {text[:200]}...\n"
|
|
210
|
+
except: pass
|
|
211
|
+
|
|
212
|
+
if knowledge_context:
|
|
213
|
+
messages[0]["content"] += f"\n\n{knowledge_context}"
|
|
214
|
+
self.framework.event_bus.emit("knowledge:retrieved", {
|
|
215
|
+
"agent": self.name,
|
|
216
|
+
"context": knowledge_context,
|
|
217
|
+
"message": "Domain expertise injected from Knowledge Base"
|
|
218
|
+
})
|
|
219
|
+
|
|
220
|
+
# Append Tool Info for Legacy mode anyway (some models need it even with native tools, or as backup)
|
|
221
|
+
if self.tools:
|
|
222
|
+
tool_info = f"\n\nAVAILABLE TOOLS:\n"
|
|
223
|
+
for tool in self.tools.values():
|
|
224
|
+
tool_info += f"- {tool.name}: {tool.description}\n"
|
|
225
|
+
# Add ReAct instructions slightly modified to stay compatible
|
|
226
|
+
tool_info += "\nNOTE: You can use tools natively if supported, OR output 'Action: [{...}]' JSON."
|
|
227
|
+
|
|
228
|
+
# Upstream prompt logic
|
|
229
|
+
memory_text = ""
|
|
230
|
+
# Could read memory here if we had logic, but assuming upstream conflict block had it:
|
|
231
|
+
|
|
232
|
+
tool_info += f"""
|
|
233
|
+
|
|
234
|
+
### CRITICAL OPERATIONAL RULES:
|
|
235
|
+
1. **Mental Lock**: If a fact is in CURRENT MEMORY, you MUST NOT ask for it again. PROPOSING A REDUNDANT TOOL IS A FAILURE.
|
|
236
|
+
2. **True Termination**: You MUST provide a "Final Answer: [result]" and STOP ONLY when the user's request is 100% fulfilled.
|
|
237
|
+
3. **Evidence-Based Checklist**: When marking a task as [Done], you MUST include the SPECIFIC data retrieved.
|
|
238
|
+
- Good: [Done] Found order ORD-001: Status is Shipped, delivery tomorrow.
|
|
239
|
+
4. **Action Format**: Action: [{{"name": "...", "args": {{...}}}}] (Only if NEW data is needed)
|
|
240
|
+
5. **No Placeholders**: Never use "Final Answer: None" or "Final Answer: Not yet". If you aren't done, ONLY provide an Action or Thought.
|
|
241
|
+
|
|
242
|
+
### Reasoning Format:
|
|
243
|
+
Thought:
|
|
244
|
+
Goal: [objective]
|
|
245
|
+
Checklist:
|
|
246
|
+
- [status] task 1 (include specific data if Done)
|
|
247
|
+
Reasoning: [Analysis of Memory vs Goal]
|
|
248
|
+
Action: [{{"name": "...", "args": {{...}}}}] (OMIT IF GOAL IS REACHED)
|
|
249
|
+
Final Answer: [The final response to the user]
|
|
250
|
+
"""
|
|
251
|
+
messages[0]["content"] += tool_info
|
|
252
|
+
|
|
253
|
+
all_data = {}
|
|
254
|
+
last_valid_response = ""
|
|
255
|
+
response = ""
|
|
256
|
+
|
|
257
|
+
for i in range(self.max_iterations):
|
|
258
|
+
if i > 0:
|
|
259
|
+
print(f" [DEBUG] [{self.name}] Iteration {i+1}/{self.max_iterations}...")
|
|
260
|
+
|
|
261
|
+
# Call LLM
|
|
262
|
+
response_data = None
|
|
263
|
+
try:
|
|
264
|
+
# Optimized Fallback Logic: Don't retry native tools if they already failed once
|
|
265
|
+
should_try_native = (
|
|
266
|
+
native_tools
|
|
267
|
+
and hasattr(self.llm, 'chat_async')
|
|
268
|
+
and not getattr(self, '_native_tools_failed', False)
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
try:
|
|
272
|
+
if should_try_native:
|
|
273
|
+
response_data = await self.llm.chat_async(messages, tools=native_tools)
|
|
274
|
+
elif hasattr(self.llm, 'chat_async'):
|
|
275
|
+
response_data = await self.llm.chat_async(messages)
|
|
276
|
+
elif hasattr(self.llm, 'complete_async'):
|
|
277
|
+
# Fallback for completion only models
|
|
278
|
+
prompt = "\n".join([f"{m['role'].upper()}: {m['content']}" for m in messages]) + "\nASSISTANT:"
|
|
279
|
+
response_data = await self.llm.complete_async(prompt)
|
|
280
|
+
else:
|
|
281
|
+
# Sync fallback
|
|
282
|
+
if hasattr(self.llm, 'chat'):
|
|
283
|
+
response_data = await asyncio.to_thread(self.llm.chat, messages)
|
|
284
|
+
else:
|
|
285
|
+
prompt = "\n".join([f"{m['role'].upper()}: {m['content']}" for m in messages]) + "\nASSISTANT:"
|
|
286
|
+
response_data = await asyncio.to_thread(self.llm.complete, prompt)
|
|
287
|
+
except Exception as e:
|
|
288
|
+
if should_try_native and "tool" in str(e).lower():
|
|
289
|
+
print(f" [DEBUG] Native tool call failed ({e}), falling back to text for this session...")
|
|
290
|
+
self._native_tools_failed = True
|
|
291
|
+
response_data = await self.llm.chat_async(messages)
|
|
292
|
+
else:
|
|
293
|
+
raise e
|
|
294
|
+
|
|
295
|
+
except Exception as e:
|
|
296
|
+
print(f" [LLM ERROR] {e}")
|
|
297
|
+
# Retry once?
|
|
298
|
+
time.sleep(1)
|
|
299
|
+
continue
|
|
300
|
+
|
|
301
|
+
# Handle Response
|
|
302
|
+
tool_calls = []
|
|
303
|
+
content = ""
|
|
304
|
+
|
|
305
|
+
if isinstance(response_data, dict):
|
|
306
|
+
# Native Tool Call
|
|
307
|
+
content = response_data.get("content", "")
|
|
308
|
+
tool_calls_raw = response_data.get("tool_calls", [])
|
|
309
|
+
|
|
310
|
+
# Convert raw API tool calls to internal format
|
|
311
|
+
for tc in tool_calls_raw:
|
|
312
|
+
if isinstance(tc, dict):
|
|
313
|
+
# OpenAI/Ollama format: function: {name, arguments}
|
|
314
|
+
func = tc.get("function", {})
|
|
315
|
+
name = func.get("name")
|
|
316
|
+
args = func.get("arguments")
|
|
317
|
+
tool_id = tc.get("id")
|
|
318
|
+
if isinstance(args, str):
|
|
319
|
+
try: args = json.loads(args)
|
|
320
|
+
except: pass
|
|
321
|
+
if name:
|
|
322
|
+
tool_calls.append((name, args, tool_id))
|
|
323
|
+
|
|
324
|
+
else:
|
|
325
|
+
# Text Response (Legacy ReAct check)
|
|
326
|
+
content = str(response_data)
|
|
327
|
+
last_valid_response = content
|
|
328
|
+
tool_calls_raw = await self._extract_tool_calls(content, context)
|
|
329
|
+
# Normalize legacy extraction to include None ID
|
|
330
|
+
tool_calls = [(name, args, None) for name, args in tool_calls_raw]
|
|
331
|
+
|
|
332
|
+
# Update history
|
|
333
|
+
if content:
|
|
334
|
+
messages.append({"role": "assistant", "content": content})
|
|
335
|
+
last_valid_response = content
|
|
336
|
+
|
|
337
|
+
if tool_calls and not content:
|
|
338
|
+
messages.append({"role": "assistant", "content": "", "tool_calls": response_data.get("tool_calls")} if isinstance(response_data, dict) else {"role": "assistant", "content": "Executing tools..."})
|
|
339
|
+
|
|
340
|
+
# EMIT THOUGHT
|
|
341
|
+
if content:
|
|
342
|
+
self.framework.event_bus.emit("agent:thought", {
|
|
343
|
+
"agent": self.name,
|
|
344
|
+
"thought": content,
|
|
345
|
+
"iteration": i + 1,
|
|
346
|
+
"task_id": context.get('task_id') if isinstance(context, dict) else None
|
|
347
|
+
})
|
|
348
|
+
|
|
349
|
+
# Check Termination
|
|
350
|
+
if not tool_calls:
|
|
351
|
+
if "Final Answer:" in content:
|
|
352
|
+
clean_answer = content.split("Final Answer:")[-1].strip()
|
|
353
|
+
self.success_count += 1
|
|
354
|
+
return {
|
|
355
|
+
"success": True,
|
|
356
|
+
"response": clean_answer,
|
|
357
|
+
"full_log": content,
|
|
358
|
+
"agent": self.name,
|
|
359
|
+
"data": all_data,
|
|
360
|
+
"iterations": i + 1,
|
|
361
|
+
"type": "react"
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
if content and (not native_tools or not "Action:" in content):
|
|
365
|
+
self.success_count += 1
|
|
366
|
+
return {
|
|
367
|
+
"success": True,
|
|
368
|
+
"response": content,
|
|
369
|
+
"full_log": content,
|
|
370
|
+
"agent": self.name,
|
|
371
|
+
"data": all_data,
|
|
372
|
+
"iterations": i + 1,
|
|
373
|
+
"type": "react"
|
|
374
|
+
}
|
|
375
|
+
|
|
376
|
+
# Execute Tools
|
|
377
|
+
for item in tool_calls:
|
|
378
|
+
if len(item) == 3:
|
|
379
|
+
name, args, tool_id = item
|
|
380
|
+
else:
|
|
381
|
+
name, args = item
|
|
382
|
+
tool_id = None
|
|
383
|
+
|
|
384
|
+
if name in self.tools:
|
|
385
|
+
if self.verbose:
|
|
386
|
+
print(f" | [ACTION] {name}({args})")
|
|
387
|
+
try:
|
|
388
|
+
# Pass framework to tool execution if needed (e.g. for accessing llm inside tool)
|
|
389
|
+
cleaned_args = {str(k): v for k, v in args.items()} if isinstance(args, dict) else {}
|
|
390
|
+
|
|
391
|
+
self.framework.event_bus.emit("agent:tool_call", {"agent": self.name, "tool": name, "args": args})
|
|
392
|
+
# Using self.framework as context
|
|
393
|
+
result = await self.tools[name].execute(**cleaned_args, framework=self.framework)
|
|
394
|
+
self.framework.event_bus.emit("agent:tool_result", {"agent": self.name, "tool": name, "result": result, "success": True})
|
|
395
|
+
|
|
396
|
+
all_data[name] = result
|
|
397
|
+
|
|
398
|
+
# Append Result
|
|
399
|
+
if tool_id:
|
|
400
|
+
messages.append({
|
|
401
|
+
"role": "tool",
|
|
402
|
+
"tool_call_id": tool_id,
|
|
403
|
+
"name": name,
|
|
404
|
+
"content": str(result)
|
|
405
|
+
})
|
|
406
|
+
else:
|
|
407
|
+
messages.append({
|
|
408
|
+
"role": "user",
|
|
409
|
+
"content": f"Tool '{name}' Output: {result}"
|
|
410
|
+
})
|
|
411
|
+
|
|
412
|
+
except Exception as e:
|
|
413
|
+
if self.verbose:
|
|
414
|
+
print(f" [{self.name} FAILED] {name}: {e}")
|
|
415
|
+
self.framework.event_bus.emit("agent:tool_result", {"agent": self.name, "tool": name, "error": str(e), "success": False})
|
|
416
|
+
|
|
417
|
+
if tool_id:
|
|
418
|
+
messages.append({
|
|
419
|
+
"role": "tool",
|
|
420
|
+
"tool_call_id": tool_id,
|
|
421
|
+
"name": name,
|
|
422
|
+
"content": f"Error: {e}"
|
|
423
|
+
})
|
|
424
|
+
else:
|
|
425
|
+
messages.append({
|
|
426
|
+
"role": "user",
|
|
427
|
+
"content": f"Tool '{name}' Failed: {e}"
|
|
428
|
+
})
|
|
429
|
+
|
|
430
|
+
# Max iterations reached
|
|
431
|
+
return {
|
|
432
|
+
"success": False,
|
|
433
|
+
"response": "Max iterations reached without Final Answer.",
|
|
434
|
+
"full_log": last_valid_response,
|
|
435
|
+
"agent": self.name,
|
|
436
|
+
"data": all_data,
|
|
437
|
+
"iterations": self.max_iterations,
|
|
438
|
+
"type": "react"
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
except Exception as e:
|
|
442
|
+
import traceback
|
|
443
|
+
traceback.print_exc()
|
|
444
|
+
success = False
|
|
445
|
+
error_type = type(e).__name__
|
|
446
|
+
return {"success": False, "error": str(e), "response": f"Error: {str(e)}", "agent": self.name}
|
|
447
|
+
finally:
|
|
448
|
+
duration = time.time() - start_time
|
|
449
|
+
self.framework.metrics.record_request(self.name, "run", duration, success, error_type)
|
|
450
|
+
|
|
451
|
+
def run_sync(self, user_input: str, context: Optional[Dict] = None) -> Dict:
|
|
452
|
+
"""
|
|
453
|
+
Synchronous wrapper for run.
|
|
454
|
+
"""
|
|
455
|
+
import asyncio
|
|
456
|
+
try:
|
|
457
|
+
loop = asyncio.get_event_loop()
|
|
458
|
+
except RuntimeError:
|
|
459
|
+
loop = asyncio.new_event_loop()
|
|
460
|
+
asyncio.set_event_loop(loop)
|
|
461
|
+
|
|
462
|
+
if loop.is_running():
|
|
463
|
+
import nest_asyncio
|
|
464
|
+
nest_asyncio.apply()
|
|
465
|
+
return loop.run_until_complete(self.run(user_input, context))
|
|
466
|
+
else:
|
|
467
|
+
return asyncio.run(self.run(user_input, context))
|
|
468
|
+
|
|
469
|
+
async def _extract_tool_calls(self, response: str, context: Optional[Dict] = None) -> List:
|
|
470
|
+
"""Robust tool extraction combining direct regex and LLM fallback."""
|
|
471
|
+
if not self.tools: return []
|
|
472
|
+
|
|
473
|
+
# 1. OPTIMIZATION: Direct Regex Extraction from response text
|
|
474
|
+
action_match = re.search(r"Action:\s*(\[.*?\])", response, re.DOTALL)
|
|
475
|
+
if action_match:
|
|
476
|
+
try:
|
|
477
|
+
raw_json = action_match.group(1).strip()
|
|
478
|
+
raw_json = re.sub(r'\}\s*\{', '}, {', raw_json)
|
|
479
|
+
calls = json.loads(raw_json)
|
|
480
|
+
if isinstance(calls, list):
|
|
481
|
+
valid = self._sanitize_calls(calls)
|
|
482
|
+
if valid:
|
|
483
|
+
print(f" [DEBUG] [{self.name}] Direct regex extraction successful: {len(valid)} calls")
|
|
484
|
+
return valid
|
|
485
|
+
except Exception as e:
|
|
486
|
+
print(f" [DEBUG] [{self.name}] Direct regex parsing failed: {e}. Falling back to LLM...")
|
|
487
|
+
|
|
488
|
+
# 2. FALLBACK: Use LLM for extraction
|
|
489
|
+
tool_defs = [tool.get_definition() for tool in self.tools.values()]
|
|
490
|
+
prompt = f"""Extract tool calls from this text.
|
|
491
|
+
Available tools: {json.dumps(tool_defs, indent=2)}
|
|
492
|
+
Text: {response}
|
|
493
|
+
|
|
494
|
+
### Instructions:
|
|
495
|
+
1. ONLY extract NEW action calls.
|
|
496
|
+
2. Output a valid JSON list: [{{"name": "...", "args": {{...}}}}]
|
|
497
|
+
3. Return [] if no new action is needed.
|
|
498
|
+
"""
|
|
499
|
+
try:
|
|
500
|
+
extractor = self.llm
|
|
501
|
+
if hasattr(extractor, 'complete_async'):
|
|
502
|
+
raw = await extractor.complete_async(prompt)
|
|
503
|
+
else:
|
|
504
|
+
raw = await asyncio.to_thread(extractor.complete, prompt)
|
|
505
|
+
|
|
506
|
+
blocks = re.findall(r'(\[[\s\S]*?\]|\{[\s\S]*?\})', raw.strip())
|
|
507
|
+
all_calls = []
|
|
508
|
+
for block in blocks:
|
|
509
|
+
try:
|
|
510
|
+
parsed = json.loads(re.sub(r'\}\s*\{', '}, {', block))
|
|
511
|
+
if isinstance(parsed, list): all_calls.extend(parsed)
|
|
512
|
+
elif isinstance(parsed, dict) and 'name' in parsed: all_calls.append(parsed)
|
|
513
|
+
except: pass
|
|
514
|
+
|
|
515
|
+
valid = self._sanitize_calls(all_calls)
|
|
516
|
+
return valid
|
|
517
|
+
except: return []
|
|
518
|
+
|
|
519
|
+
def _sanitize_calls(self, calls: List) -> List:
|
|
520
|
+
"""Sanitize and validate raw tool call objects."""
|
|
521
|
+
valid = []
|
|
522
|
+
for c in calls:
|
|
523
|
+
if not isinstance(c, dict): continue
|
|
524
|
+
name = c.get('name')
|
|
525
|
+
if name in self.tools:
|
|
526
|
+
allowed = self.tools[name].get_definition().get('parameters', {}).keys()
|
|
527
|
+
|
|
528
|
+
# Resolve args
|
|
529
|
+
args = {}
|
|
530
|
+
for k in ['args', 'arguments', 'parameters', 'params', 'input']:
|
|
531
|
+
if k in c and isinstance(c[k], dict):
|
|
532
|
+
args = c[k]
|
|
533
|
+
break
|
|
534
|
+
if not args: args = {k: v for k, v in c.items() if k not in ['name', 'args', 'arguments']}
|
|
535
|
+
|
|
536
|
+
# Resilient mapping
|
|
537
|
+
sanitized = {k: v for k, v in args.items() if k in allowed}
|
|
538
|
+
if not sanitized and args and len(allowed) == 1:
|
|
539
|
+
target_key = list(allowed)[0]
|
|
540
|
+
# Map the most likely candidate from the provided dict
|
|
541
|
+
best_val = next((v for v in args.values() if isinstance(v, (str, int, float))), None)
|
|
542
|
+
if best_val:
|
|
543
|
+
sanitized = {target_key: best_val}
|
|
544
|
+
|
|
545
|
+
# Placeholder filter
|
|
546
|
+
if not any(isinstance(v, str) and (v.startswith('<') or '[' in v or v == 'id') for v in sanitized.values()):
|
|
547
|
+
valid.append((name, sanitized))
|
|
548
|
+
return valid
|
|
549
|
+
|
|
550
|
+
def get_metrics(self) -> Dict:
|
|
551
|
+
return {
|
|
552
|
+
"name": self.name,
|
|
553
|
+
"calls": self.call_count,
|
|
554
|
+
"success": self.success_count,
|
|
555
|
+
"success_rate": (self.success_count / self.call_count * 100) if self.call_count > 0 else 0
|
|
556
|
+
}
|
kite/agents/__init__.py
ADDED