webagents 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webagents/__init__.py +18 -0
- webagents/__main__.py +55 -0
- webagents/agents/__init__.py +13 -0
- webagents/agents/core/__init__.py +19 -0
- webagents/agents/core/base_agent.py +1834 -0
- webagents/agents/core/handoffs.py +293 -0
- webagents/agents/handoffs/__init__.py +0 -0
- webagents/agents/interfaces/__init__.py +0 -0
- webagents/agents/lifecycle/__init__.py +0 -0
- webagents/agents/skills/__init__.py +109 -0
- webagents/agents/skills/base.py +136 -0
- webagents/agents/skills/core/__init__.py +8 -0
- webagents/agents/skills/core/guardrails/__init__.py +0 -0
- webagents/agents/skills/core/llm/__init__.py +0 -0
- webagents/agents/skills/core/llm/anthropic/__init__.py +1 -0
- webagents/agents/skills/core/llm/litellm/__init__.py +10 -0
- webagents/agents/skills/core/llm/litellm/skill.py +538 -0
- webagents/agents/skills/core/llm/openai/__init__.py +1 -0
- webagents/agents/skills/core/llm/xai/__init__.py +1 -0
- webagents/agents/skills/core/mcp/README.md +375 -0
- webagents/agents/skills/core/mcp/__init__.py +15 -0
- webagents/agents/skills/core/mcp/skill.py +731 -0
- webagents/agents/skills/core/memory/__init__.py +11 -0
- webagents/agents/skills/core/memory/long_term_memory/__init__.py +10 -0
- webagents/agents/skills/core/memory/long_term_memory/memory_skill.py +639 -0
- webagents/agents/skills/core/memory/short_term_memory/__init__.py +9 -0
- webagents/agents/skills/core/memory/short_term_memory/skill.py +341 -0
- webagents/agents/skills/core/memory/vector_memory/skill.py +447 -0
- webagents/agents/skills/core/planning/__init__.py +9 -0
- webagents/agents/skills/core/planning/planner.py +343 -0
- webagents/agents/skills/ecosystem/__init__.py +0 -0
- webagents/agents/skills/ecosystem/crewai/__init__.py +1 -0
- webagents/agents/skills/ecosystem/database/__init__.py +1 -0
- webagents/agents/skills/ecosystem/filesystem/__init__.py +0 -0
- webagents/agents/skills/ecosystem/google/__init__.py +0 -0
- webagents/agents/skills/ecosystem/google/calendar/__init__.py +6 -0
- webagents/agents/skills/ecosystem/google/calendar/skill.py +306 -0
- webagents/agents/skills/ecosystem/n8n/__init__.py +0 -0
- webagents/agents/skills/ecosystem/openai_agents/__init__.py +0 -0
- webagents/agents/skills/ecosystem/web/__init__.py +0 -0
- webagents/agents/skills/ecosystem/zapier/__init__.py +0 -0
- webagents/agents/skills/robutler/__init__.py +11 -0
- webagents/agents/skills/robutler/auth/README.md +63 -0
- webagents/agents/skills/robutler/auth/__init__.py +17 -0
- webagents/agents/skills/robutler/auth/skill.py +354 -0
- webagents/agents/skills/robutler/crm/__init__.py +18 -0
- webagents/agents/skills/robutler/crm/skill.py +368 -0
- webagents/agents/skills/robutler/discovery/README.md +281 -0
- webagents/agents/skills/robutler/discovery/__init__.py +16 -0
- webagents/agents/skills/robutler/discovery/skill.py +230 -0
- webagents/agents/skills/robutler/kv/__init__.py +6 -0
- webagents/agents/skills/robutler/kv/skill.py +80 -0
- webagents/agents/skills/robutler/message_history/__init__.py +9 -0
- webagents/agents/skills/robutler/message_history/skill.py +270 -0
- webagents/agents/skills/robutler/messages/__init__.py +0 -0
- webagents/agents/skills/robutler/nli/__init__.py +13 -0
- webagents/agents/skills/robutler/nli/skill.py +687 -0
- webagents/agents/skills/robutler/notifications/__init__.py +5 -0
- webagents/agents/skills/robutler/notifications/skill.py +141 -0
- webagents/agents/skills/robutler/payments/__init__.py +41 -0
- webagents/agents/skills/robutler/payments/exceptions.py +255 -0
- webagents/agents/skills/robutler/payments/skill.py +610 -0
- webagents/agents/skills/robutler/storage/__init__.py +10 -0
- webagents/agents/skills/robutler/storage/files/__init__.py +9 -0
- webagents/agents/skills/robutler/storage/files/skill.py +445 -0
- webagents/agents/skills/robutler/storage/json/__init__.py +9 -0
- webagents/agents/skills/robutler/storage/json/skill.py +336 -0
- webagents/agents/skills/robutler/storage/kv/skill.py +88 -0
- webagents/agents/skills/robutler/storage.py +389 -0
- webagents/agents/tools/__init__.py +0 -0
- webagents/agents/tools/decorators.py +426 -0
- webagents/agents/tracing/__init__.py +0 -0
- webagents/agents/workflows/__init__.py +0 -0
- webagents/scripts/__init__.py +0 -0
- webagents/server/__init__.py +28 -0
- webagents/server/context/__init__.py +0 -0
- webagents/server/context/context_vars.py +121 -0
- webagents/server/core/__init__.py +0 -0
- webagents/server/core/app.py +843 -0
- webagents/server/core/middleware.py +69 -0
- webagents/server/core/models.py +98 -0
- webagents/server/core/monitoring.py +59 -0
- webagents/server/endpoints/__init__.py +0 -0
- webagents/server/interfaces/__init__.py +0 -0
- webagents/server/middleware.py +330 -0
- webagents/server/models.py +92 -0
- webagents/server/monitoring.py +659 -0
- webagents/utils/__init__.py +0 -0
- webagents/utils/logging.py +359 -0
- webagents-0.1.0.dist-info/METADATA +230 -0
- webagents-0.1.0.dist-info/RECORD +94 -0
- webagents-0.1.0.dist-info/WHEEL +4 -0
- webagents-0.1.0.dist-info/entry_points.txt +2 -0
- webagents-0.1.0.dist-info/licenses/LICENSE +20 -0
@@ -0,0 +1,1834 @@
|
|
1
|
+
"""
|
2
|
+
BaseAgent - WebAgents V2.0 Core Agent Implementation
|
3
|
+
|
4
|
+
Central agent implementation with automatic decorator registration,
|
5
|
+
unified context management, and comprehensive tool/hook/handoff execution.
|
6
|
+
|
7
|
+
Key Features:
|
8
|
+
- Agentic Loop: Automatically continues conversation after internal tool execution
|
9
|
+
- Internal tools: Executed server-side, results fed back to LLM for continuation
|
10
|
+
- External tools: Loop breaks, returns tool calls to client for execution
|
11
|
+
- Mixed scenario: Internal tools executed first, then returns external tools
|
12
|
+
- Streaming support with proper chunk handling for tool calls
|
13
|
+
- Unified context management across all operations
|
14
|
+
"""
|
15
|
+
|
16
|
+
import asyncio
|
17
|
+
import os
|
18
|
+
import inspect
|
19
|
+
import json
|
20
|
+
import threading
|
21
|
+
import time
|
22
|
+
import uuid
|
23
|
+
from typing import Dict, Any, List, Optional, Callable, Union, AsyncGenerator
|
24
|
+
from datetime import datetime
|
25
|
+
|
26
|
+
from ..skills.base import Skill, Handoff, HandoffResult
|
27
|
+
from ..tools.decorators import tool, hook, handoff, http
|
28
|
+
from ...server.context.context_vars import Context, set_context, get_context, create_context
|
29
|
+
from webagents.utils.logging import get_logger
|
30
|
+
|
31
|
+
|
32
|
+
from datetime import datetime
|
33
|
+
|
34
|
+
class BaseAgent:
|
35
|
+
"""
|
36
|
+
BaseAgent - Core agent implementation with unified capabilities
|
37
|
+
|
38
|
+
Features:
|
39
|
+
- Automatic decorator registration (@tool, @hook, @handoff, @http)
|
40
|
+
- Direct tools, hooks, handoffs, and HTTP handlers registration via __init__
|
41
|
+
- Unified context management
|
42
|
+
- Streaming and non-streaming execution
|
43
|
+
- Scope-based access control
|
44
|
+
- Comprehensive tool/handoff/HTTP execution
|
45
|
+
- OpenAI-compatible tool call handling
|
46
|
+
- Thread-safe central registry for all capabilities
|
47
|
+
- FastAPI-style direct registration (@agent.tool, @agent.http, etc.)
|
48
|
+
|
49
|
+
Initialization supports:
|
50
|
+
- Tools: List of callable functions (with or without @tool decorator)
|
51
|
+
- Hooks: Dict mapping events to hook functions or configurations
|
52
|
+
- Handoffs: List of Handoff objects or @handoff decorated functions
|
53
|
+
- HTTP handlers: List of @http decorated functions for custom endpoints
|
54
|
+
- Capabilities: List of any decorated functions (auto-categorized)
|
55
|
+
- Skills: Dict of skill instances with automatic capability registration
|
56
|
+
|
57
|
+
HTTP Integration:
|
58
|
+
- Custom endpoints: /{agent_name}/{subpath}
|
59
|
+
- Conflict detection with core paths
|
60
|
+
- FastAPI request handling
|
61
|
+
"""
|
62
|
+
|
63
|
+
def __init__(
|
64
|
+
self,
|
65
|
+
name: str,
|
66
|
+
instructions: str = "",
|
67
|
+
model: Optional[Union[str, Any]] = None,
|
68
|
+
skills: Optional[Dict[str, Skill]] = None,
|
69
|
+
scopes: Optional[List[str]] = None,
|
70
|
+
tools: Optional[List[Callable]] = None,
|
71
|
+
hooks: Optional[Dict[str, List[Union[Callable, Dict[str, Any]]]]] = None,
|
72
|
+
handoffs: Optional[List[Union[Handoff, Callable]]] = None,
|
73
|
+
http_handlers: Optional[List[Callable]] = None,
|
74
|
+
capabilities: Optional[List[Callable]] = None
|
75
|
+
):
|
76
|
+
"""Initialize BaseAgent with comprehensive configuration
|
77
|
+
|
78
|
+
Args:
|
79
|
+
name: Agent identifier (URL-safe)
|
80
|
+
instructions: System instructions/prompt for the agent
|
81
|
+
model: LLM model specification (string like "openai/gpt-4o" or skill instance)
|
82
|
+
skills: Dictionary of skill instances to attach to agent
|
83
|
+
scopes: List of access scopes for agent capabilities (e.g., ["all"], ["owner", "admin"])
|
84
|
+
If None, defaults to ["all"]. Common scopes: "all", "owner", "admin"
|
85
|
+
tools: List of tool functions (with or without @tool decorator)
|
86
|
+
hooks: Dict mapping event names to lists of hook functions or configurations
|
87
|
+
handoffs: List of Handoff objects or functions with @handoff decorator
|
88
|
+
http_handlers: List of HTTP handler functions (with @http decorator)
|
89
|
+
capabilities: List of decorated functions that will be auto-registered based on their decorator type
|
90
|
+
|
91
|
+
Tools can be:
|
92
|
+
- Functions decorated with @tool
|
93
|
+
- Plain functions (will auto-generate schema)
|
94
|
+
|
95
|
+
Hooks format:
|
96
|
+
{
|
97
|
+
"on_request": [hook_func, {"handler": hook_func, "priority": 10}],
|
98
|
+
"on_chunk": [hook_func],
|
99
|
+
...
|
100
|
+
}
|
101
|
+
|
102
|
+
Handoffs can be:
|
103
|
+
- Handoff objects
|
104
|
+
- Functions decorated with @handoff
|
105
|
+
|
106
|
+
HTTP handlers can be:
|
107
|
+
- Functions decorated with @http
|
108
|
+
- Receive FastAPI request arguments directly
|
109
|
+
|
110
|
+
Capabilities auto-registration:
|
111
|
+
- Functions decorated with @tool, @hook, @handoff, @http
|
112
|
+
- Automatically categorized and registered based on decorator type
|
113
|
+
|
114
|
+
Scopes system:
|
115
|
+
- Agent can have multiple scopes: ["owner", "admin"]
|
116
|
+
- Capabilities inherit agent scopes unless explicitly overridden
|
117
|
+
- Use scope management methods: add_scope(), remove_scope(), has_scope()
|
118
|
+
"""
|
119
|
+
self.name = name
|
120
|
+
self.instructions = instructions
|
121
|
+
self.scopes = scopes if scopes is not None else ["all"]
|
122
|
+
|
123
|
+
# Central registries (thread-safe)
|
124
|
+
self._registered_tools: List[Dict[str, Any]] = []
|
125
|
+
self._registered_hooks: Dict[str, List[Dict[str, Any]]] = {}
|
126
|
+
self._registered_handoffs: List[Dict[str, Any]] = []
|
127
|
+
self._registered_prompts: List[Dict[str, Any]] = []
|
128
|
+
self._registered_http_handlers: List[Dict[str, Any]] = []
|
129
|
+
self._registration_lock = threading.Lock()
|
130
|
+
|
131
|
+
# Track tools overridden by external tools (per request)
|
132
|
+
self._overridden_tools: set = set()
|
133
|
+
|
134
|
+
# Skills management
|
135
|
+
self.skills: Dict[str, Skill] = {}
|
136
|
+
|
137
|
+
# Structured logger setup (align with DynamicAgentFactory style)
|
138
|
+
self.logger = get_logger('base_agent', 'core')
|
139
|
+
self._ensure_logger_handler()
|
140
|
+
|
141
|
+
# Process model parameter and initialize skills
|
142
|
+
skills = skills or {}
|
143
|
+
if model:
|
144
|
+
skills = self._process_model_parameter(model, skills)
|
145
|
+
|
146
|
+
# Initialize all skills
|
147
|
+
self._initialize_skills(skills)
|
148
|
+
self.logger.debug(f"🧩 Initialized skills for agent='{name}' count={len(self.skills)}")
|
149
|
+
|
150
|
+
# Register agent-level tools, hooks, handoffs, HTTP handlers, and capabilities
|
151
|
+
self._register_agent_capabilities(tools, hooks, handoffs, http_handlers, capabilities)
|
152
|
+
self.logger.info(f"🤖 BaseAgent created name='{self.name}' scopes={self.scopes}")
|
153
|
+
|
154
|
+
def _ensure_logger_handler(self) -> None:
|
155
|
+
"""Ensure logger emits even in background contexts without adding duplicate handlers."""
|
156
|
+
import logging
|
157
|
+
log_level = os.getenv('LOG_LEVEL', 'INFO').upper()
|
158
|
+
level = getattr(logging, log_level, logging.INFO)
|
159
|
+
# If using a LoggerAdapter (e.g., AgentContextAdapter), operate on the underlying logger
|
160
|
+
base_logger = self.logger.logger if isinstance(self.logger, logging.LoggerAdapter) else self.logger
|
161
|
+
# Set desired level and let it propagate to 'webagents' logger configured by setup_logging
|
162
|
+
base_logger.setLevel(level)
|
163
|
+
base_logger.propagate = True
|
164
|
+
|
165
|
+
def _process_model_parameter(self, model: Union[str, Any], skills: Dict[str, Skill]) -> Dict[str, Skill]:
|
166
|
+
"""Process model parameter - if string, create appropriate LLM skill"""
|
167
|
+
if isinstance(model, str) and "/" in model:
|
168
|
+
# Format: "skill_type/model_name" (e.g., "openai/gpt-4o")
|
169
|
+
skill_type, model_name = model.split("/", 1)
|
170
|
+
|
171
|
+
if skill_type == "openai":
|
172
|
+
from ..skills.core.llm.openai import OpenAISkill
|
173
|
+
skills["primary_llm"] = OpenAISkill({"model": model_name})
|
174
|
+
self.logger.debug(f"🧠 Model configured via skill=openai model='{model_name}'")
|
175
|
+
elif skill_type == "litellm":
|
176
|
+
from ..skills.core.llm.litellm import LiteLLMSkill
|
177
|
+
skills["primary_llm"] = LiteLLMSkill({"model": model_name})
|
178
|
+
self.logger.debug(f"🧠 Model configured via skill=litellm model='{model_name}'")
|
179
|
+
elif skill_type == "anthropic":
|
180
|
+
from ..skills.core.llm.anthropic import AnthropicSkill
|
181
|
+
skills["primary_llm"] = AnthropicSkill({"model": model_name})
|
182
|
+
self.logger.debug(f"🧠 Model configured via skill=anthropic model='{model_name}'")
|
183
|
+
|
184
|
+
return skills
|
185
|
+
|
186
|
+
def _initialize_skills(self, skills: Dict[str, Skill]) -> None:
|
187
|
+
"""Initialize all skills and register their decorators"""
|
188
|
+
self.skills = skills
|
189
|
+
|
190
|
+
for skill_name, skill in skills.items():
|
191
|
+
# Auto-register decorators from skill
|
192
|
+
self._auto_register_skill_decorators(skill, skill_name)
|
193
|
+
|
194
|
+
# Note: Actual skill initialization (with agent reference) will be done when needed
|
195
|
+
# This avoids event loop issues during testing
|
196
|
+
|
197
|
+
async def _ensure_skills_initialized(self) -> None:
|
198
|
+
"""Ensure all skills are initialized with agent reference"""
|
199
|
+
for skill_name, skill in self.skills.items():
|
200
|
+
# Check if skill needs initialization (most skills will have this method)
|
201
|
+
if hasattr(skill, 'initialize') and callable(skill.initialize):
|
202
|
+
# Check if already initialized by looking for agent attribute
|
203
|
+
if not hasattr(skill, 'agent') or skill.agent is None:
|
204
|
+
self.logger.debug(f"🧪 Initializing skill='{skill_name}' for agent='{self.name}'")
|
205
|
+
await skill.initialize(self)
|
206
|
+
self.logger.debug(f"✅ Skill initialized skill='{skill_name}'")
|
207
|
+
|
208
|
+
def _register_agent_capabilities(self, tools: Optional[List[Callable]] = None,
|
209
|
+
hooks: Optional[Dict[str, List[Union[Callable, Dict[str, Any]]]]] = None,
|
210
|
+
handoffs: Optional[List[Union[Handoff, Callable]]] = None,
|
211
|
+
http_handlers: Optional[List[Callable]] = None,
|
212
|
+
capabilities: Optional[List[Callable]] = None) -> None:
|
213
|
+
"""Register agent-level tools, hooks, and handoffs"""
|
214
|
+
|
215
|
+
# Register tools
|
216
|
+
if tools:
|
217
|
+
for tool_func in tools:
|
218
|
+
# For agent-level tools, inheritance logic:
|
219
|
+
# - Decorated tools (@tool) keep their own scope (even default "all")
|
220
|
+
# - Undecorated tools inherit agent scopes
|
221
|
+
if hasattr(tool_func, '_webagents_is_tool') and tool_func._webagents_is_tool:
|
222
|
+
# Tool is decorated - keep its own scope
|
223
|
+
scope = tool_func._tool_scope
|
224
|
+
else:
|
225
|
+
# Tool is undecorated - inherit agent scopes
|
226
|
+
scope = self.scopes
|
227
|
+
self.register_tool(tool_func, source="agent", scope=scope)
|
228
|
+
self.logger.debug(f"🛠️ Registered agent-level tool name='{getattr(tool_func, '_tool_name', tool_func.__name__)}' scope={scope}")
|
229
|
+
|
230
|
+
# Register hooks
|
231
|
+
if hooks:
|
232
|
+
for event, hook_list in hooks.items():
|
233
|
+
for hook_item in hook_list:
|
234
|
+
if callable(hook_item):
|
235
|
+
# Simple function - use default priority and inherit agent scopes
|
236
|
+
priority = getattr(hook_item, '_hook_priority', 50)
|
237
|
+
scope = getattr(hook_item, '_hook_scope', self.scopes)
|
238
|
+
self.register_hook(event, hook_item, priority, source="agent", scope=scope)
|
239
|
+
self.logger.debug(f"🪝 Registered agent-level hook event='{event}' priority={priority} scope={scope}")
|
240
|
+
elif isinstance(hook_item, dict):
|
241
|
+
# Configuration dict
|
242
|
+
handler = hook_item.get('handler')
|
243
|
+
priority = hook_item.get('priority', 50)
|
244
|
+
scope = hook_item.get('scope', self.scopes)
|
245
|
+
if handler and callable(handler):
|
246
|
+
self.register_hook(event, handler, priority, source="agent", scope=scope)
|
247
|
+
self.logger.debug(f"🪝 Registered agent-level hook (dict) event='{event}' priority={priority} scope={scope}")
|
248
|
+
|
249
|
+
# Register handoffs
|
250
|
+
if handoffs:
|
251
|
+
for handoff_item in handoffs:
|
252
|
+
if isinstance(handoff_item, Handoff):
|
253
|
+
# Direct Handoff object
|
254
|
+
self.register_handoff(handoff_item, source="agent")
|
255
|
+
self.logger.debug(f"📨 Registered handoff target='{handoff_item.target}' type='{handoff_item.handoff_type}'")
|
256
|
+
elif callable(handoff_item) and hasattr(handoff_item, '_webagents_is_handoff'):
|
257
|
+
# Function with @handoff decorator
|
258
|
+
handoff_config = Handoff(
|
259
|
+
target=getattr(handoff_item, '_handoff_name', handoff_item.__name__),
|
260
|
+
handoff_type=getattr(handoff_item, '_handoff_type', 'agent'),
|
261
|
+
description=getattr(handoff_item, '_handoff_description', ''),
|
262
|
+
scope=getattr(handoff_item, '_handoff_scope', self.scopes)
|
263
|
+
)
|
264
|
+
handoff_config.metadata = {'function': handoff_item}
|
265
|
+
self.register_handoff(handoff_config, source="agent")
|
266
|
+
self.logger.debug(f"📨 Registered handoff target='{handoff_config.target}' type='{handoff_config.handoff_type}'")
|
267
|
+
|
268
|
+
# Register HTTP handlers
|
269
|
+
if http_handlers:
|
270
|
+
for handler_func in http_handlers:
|
271
|
+
if callable(handler_func):
|
272
|
+
self.register_http_handler(handler_func)
|
273
|
+
self.logger.debug(f"🌐 Registered HTTP handler subpath='{getattr(handler_func, '_http_subpath', '<unknown>')}' method='{getattr(handler_func, '_http_method', 'get')}'")
|
274
|
+
|
275
|
+
# Register capabilities (decorated functions)
|
276
|
+
if capabilities:
|
277
|
+
for capability_func in capabilities:
|
278
|
+
if callable(capability_func):
|
279
|
+
# Attempt to determine decorator type
|
280
|
+
if hasattr(capability_func, '_webagents_is_tool') and capability_func._webagents_is_tool:
|
281
|
+
self.register_tool(capability_func, source="agent")
|
282
|
+
elif hasattr(capability_func, '_webagents_is_hook') and capability_func._webagents_is_hook:
|
283
|
+
priority = getattr(capability_func, '_hook_priority', 50)
|
284
|
+
scope = getattr(capability_func, '_hook_scope', self.scopes)
|
285
|
+
self.register_hook(getattr(capability_func, '_hook_event_type', 'on_request'), capability_func, priority, source="agent", scope=scope)
|
286
|
+
elif hasattr(capability_func, '_webagents_is_handoff') and capability_func._webagents_is_handoff:
|
287
|
+
handoff_config = Handoff(
|
288
|
+
target=getattr(capability_func, '_handoff_name', capability_func.__name__),
|
289
|
+
handoff_type=getattr(capability_func, '_handoff_type', 'agent'),
|
290
|
+
description=getattr(capability_func, '_handoff_description', ''),
|
291
|
+
scope=getattr(capability_func, '_handoff_scope', self.scopes)
|
292
|
+
)
|
293
|
+
handoff_config.metadata = {'function': capability_func}
|
294
|
+
self.register_handoff(handoff_config, source="agent")
|
295
|
+
elif hasattr(capability_func, '_webagents_is_http') and capability_func._webagents_is_http:
|
296
|
+
self.register_http_handler(capability_func)
|
297
|
+
self.logger.debug(f"🌐 Registered HTTP capability subpath='{getattr(capability_func, '_http_subpath', '<unknown>')}' method='{getattr(capability_func, '_http_method', 'get')}'")
|
298
|
+
|
299
|
+
# ===== SCOPE MANAGEMENT METHODS =====
|
300
|
+
|
301
|
+
def add_scope(self, scope: str) -> None:
|
302
|
+
"""Add a scope to the agent if not already present
|
303
|
+
|
304
|
+
Args:
|
305
|
+
scope: Scope to add (e.g., "owner", "admin")
|
306
|
+
"""
|
307
|
+
if scope not in self.scopes:
|
308
|
+
self.scopes.append(scope)
|
309
|
+
|
310
|
+
def remove_scope(self, scope: str) -> None:
|
311
|
+
"""Remove a scope from the agent
|
312
|
+
|
313
|
+
Args:
|
314
|
+
scope: Scope to remove
|
315
|
+
"""
|
316
|
+
if scope in self.scopes:
|
317
|
+
self.scopes.remove(scope)
|
318
|
+
|
319
|
+
def has_scope(self, scope: str) -> bool:
|
320
|
+
"""Check if the agent has a specific scope
|
321
|
+
|
322
|
+
Args:
|
323
|
+
scope: Scope to check for
|
324
|
+
|
325
|
+
Returns:
|
326
|
+
True if agent has the scope, False otherwise
|
327
|
+
"""
|
328
|
+
return scope in self.scopes
|
329
|
+
|
330
|
+
def get_scopes(self) -> List[str]:
|
331
|
+
"""Get all scopes for this agent
|
332
|
+
|
333
|
+
Returns:
|
334
|
+
List of scope strings
|
335
|
+
"""
|
336
|
+
return self.scopes.copy()
|
337
|
+
|
338
|
+
def set_scopes(self, scopes: List[str]) -> None:
|
339
|
+
"""Set the agent's scopes list
|
340
|
+
|
341
|
+
Args:
|
342
|
+
scopes: New list of scopes
|
343
|
+
"""
|
344
|
+
self.scopes = scopes.copy()
|
345
|
+
|
346
|
+
def clear_scopes(self) -> None:
|
347
|
+
"""Clear all scopes from the agent"""
|
348
|
+
self.scopes = []
|
349
|
+
|
350
|
+
def _auto_register_skill_decorators(self, skill: Any, skill_name: str) -> None:
|
351
|
+
"""Auto-discover and register @hook, @tool, @prompt, and @handoff decorated methods"""
|
352
|
+
import inspect
|
353
|
+
|
354
|
+
for attr_name in dir(skill):
|
355
|
+
if attr_name.startswith('_') and not attr_name.startswith('__'):
|
356
|
+
continue
|
357
|
+
|
358
|
+
attr = getattr(skill, attr_name)
|
359
|
+
if not inspect.ismethod(attr) and not inspect.isfunction(attr):
|
360
|
+
continue
|
361
|
+
|
362
|
+
# Check for @hook decorator
|
363
|
+
if hasattr(attr, '_webagents_is_hook') and attr._webagents_is_hook:
|
364
|
+
event_type = attr._hook_event_type
|
365
|
+
priority = getattr(attr, '_hook_priority', 50)
|
366
|
+
scope = getattr(attr, '_hook_scope', None)
|
367
|
+
self.register_hook(event_type, attr, priority, source=skill_name, scope=scope)
|
368
|
+
|
369
|
+
# Check for @tool decorator
|
370
|
+
elif hasattr(attr, '_webagents_is_tool') and attr._webagents_is_tool:
|
371
|
+
scope = getattr(attr, '_tool_scope', None)
|
372
|
+
self.register_tool(attr, source=skill_name, scope=scope)
|
373
|
+
|
374
|
+
# Check for @prompt decorator
|
375
|
+
elif hasattr(attr, '_webagents_is_prompt') and attr._webagents_is_prompt:
|
376
|
+
priority = getattr(attr, '_prompt_priority', 50)
|
377
|
+
scope = getattr(attr, '_prompt_scope', None)
|
378
|
+
self.register_prompt(attr, priority, source=skill_name, scope=scope)
|
379
|
+
|
380
|
+
# Check for @handoff decorator
|
381
|
+
elif hasattr(attr, '_webagents_is_handoff') and attr._webagents_is_handoff:
|
382
|
+
handoff_config = Handoff(
|
383
|
+
target=getattr(attr, '_handoff_name', attr_name),
|
384
|
+
handoff_type=getattr(attr, '_handoff_type', 'agent'),
|
385
|
+
description=getattr(attr, '_handoff_description', ''),
|
386
|
+
scope=getattr(attr, '_handoff_scope', None)
|
387
|
+
)
|
388
|
+
handoff_config.metadata = {'function': attr}
|
389
|
+
self.register_handoff(handoff_config, source=skill_name)
|
390
|
+
|
391
|
+
# Check for @http decorator
|
392
|
+
elif hasattr(attr, '_webagents_is_http') and attr._webagents_is_http:
|
393
|
+
self.register_http_handler(attr, source=skill_name)
|
394
|
+
|
395
|
+
# Central registration methods (thread-safe)
|
396
|
+
def register_tool(self, tool_func: Callable, source: str = "manual", scope: Union[str, List[str]] = None):
|
397
|
+
"""Register a tool function"""
|
398
|
+
with self._registration_lock:
|
399
|
+
tool_config = {
|
400
|
+
'function': tool_func,
|
401
|
+
'source': source,
|
402
|
+
'scope': scope,
|
403
|
+
'name': getattr(tool_func, '_tool_name', tool_func.__name__),
|
404
|
+
'description': getattr(tool_func, '_tool_description', tool_func.__doc__ or ''),
|
405
|
+
'definition': getattr(tool_func, '_webagents_tool_definition', {})
|
406
|
+
}
|
407
|
+
self._registered_tools.append(tool_config)
|
408
|
+
self.logger.debug(f"🛠️ Tool registered name='{tool_config['name']}' source='{source}' scope={scope}")
|
409
|
+
|
410
|
+
def register_hook(self, event: str, handler: Callable, priority: int = 50, source: str = "manual", scope: Union[str, List[str]] = None):
|
411
|
+
"""Register a hook handler for an event"""
|
412
|
+
with self._registration_lock:
|
413
|
+
if event not in self._registered_hooks:
|
414
|
+
self._registered_hooks[event] = []
|
415
|
+
|
416
|
+
hook_config = {
|
417
|
+
'handler': handler,
|
418
|
+
'priority': priority,
|
419
|
+
'source': source,
|
420
|
+
'scope': scope,
|
421
|
+
'event': event
|
422
|
+
}
|
423
|
+
self._registered_hooks[event].append(hook_config)
|
424
|
+
# Sort by priority (higher priority first)
|
425
|
+
self._registered_hooks[event].sort(key=lambda x: x['priority'])
|
426
|
+
self.logger.debug(f"🪝 Hook registered event='{event}' priority={priority} source='{source}' scope={scope}")
|
427
|
+
|
428
|
+
def register_handoff(self, handoff_config: Handoff, source: str = "manual"):
|
429
|
+
"""Register a handoff configuration"""
|
430
|
+
with self._registration_lock:
|
431
|
+
self._registered_handoffs.append({
|
432
|
+
'config': handoff_config,
|
433
|
+
'source': source
|
434
|
+
})
|
435
|
+
self.logger.debug(f"📨 Handoff registered target='{handoff_config.target}' type='{handoff_config.handoff_type}' source='{source}'")
|
436
|
+
|
437
|
+
def register_prompt(self, prompt_func: Callable, priority: int = 50, source: str = "manual", scope: Union[str, List[str]] = None):
|
438
|
+
"""Register a prompt provider function"""
|
439
|
+
with self._registration_lock:
|
440
|
+
prompt_config = {
|
441
|
+
'function': prompt_func,
|
442
|
+
'priority': priority,
|
443
|
+
'source': source,
|
444
|
+
'scope': scope,
|
445
|
+
'name': getattr(prompt_func, '__name__', 'unnamed_prompt')
|
446
|
+
}
|
447
|
+
self._registered_prompts.append(prompt_config)
|
448
|
+
# Sort by priority (lower numbers execute first)
|
449
|
+
self._registered_prompts.sort(key=lambda x: x['priority'])
|
450
|
+
self.logger.debug(f"🧾 Prompt registered name='{prompt_config['name']}' priority={priority} source='{source}' scope={scope}")
|
451
|
+
|
452
|
+
def register_http_handler(self, handler_func: Callable, source: str = "manual"):
|
453
|
+
"""Register an HTTP handler function with conflict detection"""
|
454
|
+
if not hasattr(handler_func, '_webagents_is_http'):
|
455
|
+
raise ValueError(f"Function {handler_func.__name__} is not decorated with @http")
|
456
|
+
|
457
|
+
subpath = getattr(handler_func, '_http_subpath')
|
458
|
+
method = getattr(handler_func, '_http_method')
|
459
|
+
scope = getattr(handler_func, '_http_scope')
|
460
|
+
description = getattr(handler_func, '_http_description')
|
461
|
+
|
462
|
+
# Check for conflicts with core handlers
|
463
|
+
core_paths = ['/chat/completions', '/info', '/capabilities']
|
464
|
+
if subpath in core_paths:
|
465
|
+
raise ValueError(f"HTTP subpath '{subpath}' conflicts with core handler. Core paths: {core_paths}")
|
466
|
+
|
467
|
+
with self._registration_lock:
|
468
|
+
# Check for conflicts with existing handlers
|
469
|
+
for existing_handler in self._registered_http_handlers:
|
470
|
+
existing_subpath = existing_handler.get('subpath')
|
471
|
+
existing_method = existing_handler.get('method')
|
472
|
+
if existing_subpath == subpath and existing_method == method:
|
473
|
+
raise ValueError(f"HTTP handler conflict: {method.upper()} {subpath} already registered")
|
474
|
+
|
475
|
+
handler_config = {
|
476
|
+
'function': handler_func,
|
477
|
+
'source': source,
|
478
|
+
'subpath': subpath,
|
479
|
+
'method': method,
|
480
|
+
'scope': scope,
|
481
|
+
'description': description,
|
482
|
+
'name': getattr(handler_func, '__name__', 'unnamed_handler')
|
483
|
+
}
|
484
|
+
self._registered_http_handlers.append(handler_config)
|
485
|
+
self.logger.debug(f"🌐 HTTP handler registered method='{method}' subpath='{subpath}' scope={scope} source='{source}'")
|
486
|
+
|
487
|
+
def get_all_hooks(self, event: str) -> List[Dict[str, Any]]:
|
488
|
+
"""Get all hooks for a specific event"""
|
489
|
+
return self._registered_hooks.get(event, [])
|
490
|
+
|
491
|
+
def get_prompts_for_scope(self, auth_scope: str) -> List[Dict[str, Any]]:
|
492
|
+
"""Get prompt providers filtered by user scope"""
|
493
|
+
scope_hierarchy = {"admin": 3, "owner": 2, "all": 1}
|
494
|
+
user_level = scope_hierarchy.get(auth_scope, 1)
|
495
|
+
|
496
|
+
available_prompts = []
|
497
|
+
with self._registration_lock:
|
498
|
+
for prompt_config in self._registered_prompts:
|
499
|
+
prompt_scope = prompt_config.get('scope', 'all')
|
500
|
+
if isinstance(prompt_scope, list):
|
501
|
+
# If scope is a list, check if auth_scope is in it
|
502
|
+
if auth_scope in prompt_scope or 'all' in prompt_scope:
|
503
|
+
available_prompts.append(prompt_config)
|
504
|
+
else:
|
505
|
+
# Single scope - check hierarchy
|
506
|
+
required_level = scope_hierarchy.get(prompt_scope, 1)
|
507
|
+
if user_level >= required_level:
|
508
|
+
available_prompts.append(prompt_config)
|
509
|
+
|
510
|
+
return available_prompts
|
511
|
+
|
512
|
+
def get_tools_for_scope(self, auth_scope: str) -> List[Dict[str, Any]]:
|
513
|
+
"""Get tools filtered by single user scope
|
514
|
+
|
515
|
+
Args:
|
516
|
+
auth_scope: Single scope to check against (e.g., "owner", "admin")
|
517
|
+
|
518
|
+
Returns:
|
519
|
+
List of tool configurations accessible to the user scope
|
520
|
+
"""
|
521
|
+
return self.get_tools_for_scopes([auth_scope])
|
522
|
+
|
523
|
+
def get_tools_for_scopes(self, auth_scopes: List[str]) -> List[Dict[str, Any]]:
|
524
|
+
"""Get tools filtered by multiple user scopes
|
525
|
+
|
526
|
+
Args:
|
527
|
+
auth_scopes: List of scopes to check against (e.g., ["owner", "admin"])
|
528
|
+
|
529
|
+
Returns:
|
530
|
+
List of tool configurations accessible to any of the user scopes
|
531
|
+
"""
|
532
|
+
scope_hierarchy = {"admin": 3, "owner": 2, "all": 1}
|
533
|
+
user_levels = [scope_hierarchy.get(scope, 1) for scope in auth_scopes]
|
534
|
+
max_user_level = max(user_levels) if user_levels else 1
|
535
|
+
|
536
|
+
available_tools = []
|
537
|
+
with self._registration_lock:
|
538
|
+
for tool_config in self._registered_tools:
|
539
|
+
tool_scope = tool_config.get('scope', 'all')
|
540
|
+
if isinstance(tool_scope, list):
|
541
|
+
# If scope is a list, check if any user scope is in it
|
542
|
+
if any(scope in tool_scope for scope in auth_scopes) or 'all' in tool_scope:
|
543
|
+
available_tools.append(tool_config)
|
544
|
+
else:
|
545
|
+
# Single scope - check hierarchy against max user level
|
546
|
+
required_level = scope_hierarchy.get(tool_scope, 1)
|
547
|
+
if max_user_level >= required_level:
|
548
|
+
available_tools.append(tool_config)
|
549
|
+
|
550
|
+
return available_tools
|
551
|
+
|
552
|
+
def get_all_tools(self) -> List[Dict[str, Any]]:
|
553
|
+
"""Get all registered tools regardless of scope"""
|
554
|
+
with self._registration_lock:
|
555
|
+
return self._registered_tools.copy()
|
556
|
+
|
557
|
+
def get_all_http_handlers(self) -> List[Dict[str, Any]]:
|
558
|
+
"""Get all registered HTTP handlers"""
|
559
|
+
with self._registration_lock:
|
560
|
+
return self._registered_http_handlers.copy()
|
561
|
+
|
562
|
+
def get_http_handlers_for_scope(self, auth_scope: str) -> List[Dict[str, Any]]:
|
563
|
+
"""Get HTTP handlers filtered by single user scope"""
|
564
|
+
return self.get_http_handlers_for_scopes([auth_scope])
|
565
|
+
|
566
|
+
def get_http_handlers_for_scopes(self, auth_scopes: List[str]) -> List[Dict[str, Any]]:
|
567
|
+
"""Get HTTP handlers filtered by multiple user scopes"""
|
568
|
+
scope_hierarchy = {"admin": 3, "owner": 2, "all": 1}
|
569
|
+
user_levels = [scope_hierarchy.get(scope, 1) for scope in auth_scopes]
|
570
|
+
max_user_level = max(user_levels) if user_levels else 1
|
571
|
+
|
572
|
+
available_handlers = []
|
573
|
+
with self._registration_lock:
|
574
|
+
for handler_config in self._registered_http_handlers:
|
575
|
+
handler_scope = handler_config.get('scope', 'all')
|
576
|
+
if isinstance(handler_scope, list):
|
577
|
+
# If scope is a list, check if any user scope is in it
|
578
|
+
if any(scope in handler_scope for scope in auth_scopes) or 'all' in handler_scope:
|
579
|
+
available_handlers.append(handler_config)
|
580
|
+
else:
|
581
|
+
# Single scope - check hierarchy against max user level
|
582
|
+
required_level = scope_hierarchy.get(handler_scope, 1)
|
583
|
+
if max_user_level >= required_level:
|
584
|
+
available_handlers.append(handler_config)
|
585
|
+
|
586
|
+
return available_handlers
|
587
|
+
|
588
|
+
# Hook execution
|
589
|
+
async def _execute_hooks(self, event: str, context: Context) -> Context:
|
590
|
+
"""Execute all hooks for a given event"""
|
591
|
+
hooks = self.get_all_hooks(event)
|
592
|
+
# try:
|
593
|
+
# self.logger.debug(f"⚙️ Executing hooks event='{event}' count={len(hooks)}")
|
594
|
+
# except Exception:
|
595
|
+
# pass
|
596
|
+
|
597
|
+
for hook_config in hooks:
|
598
|
+
handler = hook_config['handler']
|
599
|
+
try:
|
600
|
+
if inspect.iscoroutinefunction(handler):
|
601
|
+
context = await handler(context)
|
602
|
+
else:
|
603
|
+
context = handler(context)
|
604
|
+
except Exception as e:
|
605
|
+
# Re-raise structured errors (e.g., payment errors) immediately to halt execution
|
606
|
+
# We duck-type on common attributes set by our error classes
|
607
|
+
if hasattr(e, 'status_code') or hasattr(e, 'error_code') or hasattr(e, 'detail'):
|
608
|
+
raise e
|
609
|
+
|
610
|
+
# Log other hook execution errors but continue
|
611
|
+
self.logger.warning(f"⚠️ Hook execution error handler='{getattr(handler, '__name__', str(handler))}' error='{e}'")
|
612
|
+
|
613
|
+
# try:
|
614
|
+
# self.logger.debug(f"⚙️ Completed hooks event='{event}'")
|
615
|
+
# except Exception:
|
616
|
+
# pass
|
617
|
+
return context
|
618
|
+
|
619
|
+
# Prompt execution
|
620
|
+
async def _execute_prompts(self, context: Context) -> str:
|
621
|
+
"""Execute all prompt providers and combine their outputs"""
|
622
|
+
# Get user scope from context for filtering
|
623
|
+
auth_scope = getattr(context, 'auth_scope', 'all')
|
624
|
+
prompts = self.get_prompts_for_scope(auth_scope)
|
625
|
+
self.logger.debug(f"🧾 Executing prompts scope='{auth_scope}' count={len(prompts)}")
|
626
|
+
|
627
|
+
prompt_parts = []
|
628
|
+
|
629
|
+
for prompt_config in prompts:
|
630
|
+
handler = prompt_config['function']
|
631
|
+
try:
|
632
|
+
# Don't pass context explicitly - let the decorator wrapper handle it
|
633
|
+
if inspect.iscoroutinefunction(handler):
|
634
|
+
prompt_part = await handler()
|
635
|
+
else:
|
636
|
+
prompt_part = handler()
|
637
|
+
|
638
|
+
if prompt_part and isinstance(prompt_part, str):
|
639
|
+
prompt_parts.append(prompt_part.strip())
|
640
|
+
except Exception as e:
|
641
|
+
# Log prompt execution error but continue
|
642
|
+
self.logger.warning(f"⚠️ Prompt execution error handler='{getattr(handler, '__name__', str(handler))}' error='{e}'")
|
643
|
+
|
644
|
+
prompt_parts.append(f"Your name is {self.name}, you are an AI agent in the Internet of Agents. Current time: {datetime.now().isoformat()}")
|
645
|
+
|
646
|
+
# Combine all prompt parts with newlines
|
647
|
+
return "\n\n".join(prompt_parts) if prompt_parts else ""
|
648
|
+
|
649
|
+
async def _enhance_messages_with_prompts(self, messages: List[Dict[str, Any]], context: Context) -> List[Dict[str, Any]]:
|
650
|
+
"""Enhance messages by adding dynamic prompts to system message"""
|
651
|
+
# Execute all prompt providers to get dynamic content
|
652
|
+
dynamic_prompts = await self._execute_prompts(context)
|
653
|
+
|
654
|
+
# Debug logging
|
655
|
+
self.logger.debug(f"🔍 Enhance messages agent='{self.name}' incoming_count={len(messages)} has_instructions={bool(self.instructions)} has_dynamic_prompts={bool(dynamic_prompts)}")
|
656
|
+
|
657
|
+
# If no dynamic prompts, still ensure agent instructions are in a system message
|
658
|
+
if not dynamic_prompts:
|
659
|
+
base_instructions = self.instructions or ""
|
660
|
+
if not base_instructions:
|
661
|
+
return messages
|
662
|
+
|
663
|
+
# Find first system message
|
664
|
+
system_index = next((i for i, m in enumerate(messages) if m.get("role") == "system"), -1)
|
665
|
+
if system_index >= 0:
|
666
|
+
self.logger.debug("🔧 Merging agent instructions into existing system message")
|
667
|
+
existing = messages[system_index].get("content", "")
|
668
|
+
merged = f"{base_instructions}\n\n{existing}".strip()
|
669
|
+
enhanced_messages = messages.copy()
|
670
|
+
enhanced_messages[system_index] = {**messages[system_index], "content": merged}
|
671
|
+
return enhanced_messages
|
672
|
+
else:
|
673
|
+
self.logger.debug("🔧 Prepending new system message with agent instructions")
|
674
|
+
enhanced_messages = [{
|
675
|
+
"role": "system",
|
676
|
+
"content": base_instructions
|
677
|
+
}] + messages
|
678
|
+
return enhanced_messages
|
679
|
+
|
680
|
+
# Create enhanced messages list
|
681
|
+
enhanced_messages = []
|
682
|
+
system_message_found = False
|
683
|
+
|
684
|
+
for message in messages:
|
685
|
+
if message.get("role") == "system":
|
686
|
+
# Enhance existing system message with agent instructions + prompts
|
687
|
+
system_message_found = True
|
688
|
+
original_content = message.get("content", "")
|
689
|
+
base_instructions = self.instructions or ""
|
690
|
+
parts = []
|
691
|
+
if base_instructions:
|
692
|
+
parts.append(base_instructions)
|
693
|
+
if original_content:
|
694
|
+
parts.append(original_content)
|
695
|
+
if dynamic_prompts:
|
696
|
+
parts.append(dynamic_prompts)
|
697
|
+
enhanced_content = "\n\n".join(parts).strip()
|
698
|
+
enhanced_messages.append({
|
699
|
+
**message,
|
700
|
+
"content": enhanced_content
|
701
|
+
})
|
702
|
+
self.logger.debug("🔧 Enhanced existing system message")
|
703
|
+
else:
|
704
|
+
enhanced_messages.append(message)
|
705
|
+
|
706
|
+
# If no system message exists, create one with agent instructions + dynamic prompts
|
707
|
+
if not system_message_found:
|
708
|
+
base_instructions = self.instructions if self.instructions else "You are a helpful AI assistant."
|
709
|
+
system_content = f"{base_instructions}\n\n{dynamic_prompts}".strip()
|
710
|
+
|
711
|
+
# Insert system message at the beginning
|
712
|
+
enhanced_messages.insert(0, {
|
713
|
+
"role": "system",
|
714
|
+
"content": system_content
|
715
|
+
})
|
716
|
+
self.logger.debug("🔧 Created new system message with base instructions + dynamic prompts")
|
717
|
+
|
718
|
+
self.logger.debug(f"📦 Enhanced messages count={len(enhanced_messages)}")
|
719
|
+
|
720
|
+
return enhanced_messages
|
721
|
+
|
722
|
+
# Tool execution methods
|
723
|
+
def _get_tool_function_by_name(self, function_name: str) -> Optional[Callable]:
|
724
|
+
"""Get a registered tool function by name, respecting external tool overrides"""
|
725
|
+
# If this tool was overridden by an external tool, don't return the internal function
|
726
|
+
if function_name in self._overridden_tools:
|
727
|
+
return None
|
728
|
+
|
729
|
+
with self._registration_lock:
|
730
|
+
for tool_config in self._registered_tools:
|
731
|
+
if tool_config['name'] == function_name:
|
732
|
+
return tool_config['function']
|
733
|
+
return None
|
734
|
+
|
735
|
+
async def _execute_single_tool(self, tool_call: Dict[str, Any]) -> Dict[str, Any]:
|
736
|
+
"""Execute a single agent tool call (NOT external tools - those are executed by client)"""
|
737
|
+
function_name = tool_call["function"]["name"]
|
738
|
+
function_args_str = tool_call["function"]["arguments"]
|
739
|
+
original_tool_call_id = tool_call.get("id")
|
740
|
+
tool_call_id = original_tool_call_id or f"call_{uuid.uuid4().hex[:8]}"
|
741
|
+
|
742
|
+
# Enhanced debugging: Log tool call ID handling
|
743
|
+
if not original_tool_call_id:
|
744
|
+
self.logger.debug(f"🔧 Generated new tool_call_id '{tool_call_id}' for {function_name} (original was missing)")
|
745
|
+
else:
|
746
|
+
self.logger.debug(f"🔧 Using existing tool_call_id '{tool_call_id}' for {function_name}")
|
747
|
+
|
748
|
+
try:
|
749
|
+
# Parse function arguments
|
750
|
+
function_args = json.loads(function_args_str)
|
751
|
+
except json.JSONDecodeError as e:
|
752
|
+
return {
|
753
|
+
"tool_call_id": tool_call_id,
|
754
|
+
"role": "tool",
|
755
|
+
"content": f"Error parsing tool arguments: {str(e)}"
|
756
|
+
}
|
757
|
+
|
758
|
+
# Find the tool function (only for agent's internal @tool functions)
|
759
|
+
tool_func = self._get_tool_function_by_name(function_name)
|
760
|
+
if not tool_func:
|
761
|
+
# This might be an external tool - client should handle it
|
762
|
+
return {
|
763
|
+
"tool_call_id": tool_call_id,
|
764
|
+
"role": "tool",
|
765
|
+
"content": f"Tool '{function_name}' should be executed by client (external tool)"
|
766
|
+
}
|
767
|
+
|
768
|
+
try:
|
769
|
+
self.logger.debug(f"🛠️ Executing tool name='{function_name}' call_id='{tool_call_id}'")
|
770
|
+
# Execute the agent's internal tool function
|
771
|
+
if inspect.iscoroutinefunction(tool_func):
|
772
|
+
result = await tool_func(**function_args)
|
773
|
+
else:
|
774
|
+
result = tool_func(**function_args)
|
775
|
+
|
776
|
+
# If tool returned (result, usage_info), log usage and unwrap result
|
777
|
+
try:
|
778
|
+
if isinstance(result, tuple) and len(result) == 2 and isinstance(result[1], dict):
|
779
|
+
result_value, usage_payload = result
|
780
|
+
# Append unified usage record
|
781
|
+
context = get_context()
|
782
|
+
if context and hasattr(context, 'usage'):
|
783
|
+
import time as _time
|
784
|
+
usage_record = {
|
785
|
+
'type': 'tool',
|
786
|
+
'timestamp': _time.time(),
|
787
|
+
'tool': function_name,
|
788
|
+
}
|
789
|
+
try:
|
790
|
+
usage_record.update(usage_payload or {})
|
791
|
+
except Exception:
|
792
|
+
pass
|
793
|
+
context.usage.append(usage_record)
|
794
|
+
# Use only the actual result for tool response content
|
795
|
+
result = result_value
|
796
|
+
except Exception:
|
797
|
+
# Never fail execution due to logging issues
|
798
|
+
pass
|
799
|
+
|
800
|
+
# Format successful result
|
801
|
+
result_str = str(result)
|
802
|
+
self.logger.debug(f"🛠️ Tool success name='{function_name}' call_id='{tool_call_id}' result_preview='{result_str[:100]}...' (len={len(result_str)})")
|
803
|
+
return {
|
804
|
+
"tool_call_id": tool_call_id,
|
805
|
+
"role": "tool",
|
806
|
+
"content": result_str
|
807
|
+
}
|
808
|
+
|
809
|
+
except Exception as e:
|
810
|
+
# Format error result
|
811
|
+
self.logger.error(f"🛠️ Tool execution error name='{function_name}' call_id='{tool_call_id}' error='{e}'")
|
812
|
+
return {
|
813
|
+
"tool_call_id": tool_call_id,
|
814
|
+
"role": "tool",
|
815
|
+
"content": f"Tool execution error: {str(e)}"
|
816
|
+
}
|
817
|
+
|
818
|
+
def _has_tool_calls(self, llm_response: Dict[str, Any]) -> bool:
|
819
|
+
"""Check if LLM response contains tool calls"""
|
820
|
+
return (llm_response.get("choices", [{}])[0]
|
821
|
+
.get("message", {})
|
822
|
+
.get("tool_calls") is not None)
|
823
|
+
|
824
|
+
|
825
|
+
|
826
|
+
|
827
|
+
|
828
|
+
# Main execution methods
|
829
|
+
async def run(
|
830
|
+
self,
|
831
|
+
messages: List[Dict[str, Any]],
|
832
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
833
|
+
stream: bool = False,
|
834
|
+
**kwargs
|
835
|
+
) -> Dict[str, Any]:
|
836
|
+
"""Run agent with messages (non-streaming) - implements agentic loop for tool calling"""
|
837
|
+
|
838
|
+
# Get existing context or create new one
|
839
|
+
context = get_context()
|
840
|
+
if context:
|
841
|
+
# Update existing context with new data
|
842
|
+
context.messages = messages
|
843
|
+
context.stream = stream
|
844
|
+
context.agent = self
|
845
|
+
else:
|
846
|
+
# Create new context if none exists
|
847
|
+
context = create_context(
|
848
|
+
messages=messages,
|
849
|
+
stream=stream,
|
850
|
+
agent=self
|
851
|
+
)
|
852
|
+
set_context(context)
|
853
|
+
|
854
|
+
try:
|
855
|
+
# Ensure all skills are initialized with agent reference
|
856
|
+
await self._ensure_skills_initialized()
|
857
|
+
|
858
|
+
# Execute on_connection hooks
|
859
|
+
context = await self._execute_hooks("on_connection", context)
|
860
|
+
|
861
|
+
# Merge external tools with agent tools
|
862
|
+
all_tools = self._merge_tools(tools or [])
|
863
|
+
|
864
|
+
# Find primary LLM skill
|
865
|
+
llm_skill = self.skills.get("primary_llm")
|
866
|
+
if not llm_skill:
|
867
|
+
raise ValueError("No LLM skill configured")
|
868
|
+
|
869
|
+
# Enhance messages with dynamic prompts before first LLM call
|
870
|
+
enhanced_messages = await self._enhance_messages_with_prompts(messages, context)
|
871
|
+
|
872
|
+
# Maintain conversation history for agentic loop
|
873
|
+
conversation_messages = enhanced_messages.copy()
|
874
|
+
|
875
|
+
# Agentic loop - continue until no more tool calls or max iterations
|
876
|
+
max_tool_iterations = 10 # Prevent infinite loops
|
877
|
+
tool_iterations = 0
|
878
|
+
response = None
|
879
|
+
|
880
|
+
while tool_iterations < max_tool_iterations:
|
881
|
+
tool_iterations += 1
|
882
|
+
|
883
|
+
# Debug logging for LLM call
|
884
|
+
self.logger.debug(f"🚀 Calling LLM for agent '{self.name}' (iteration {tool_iterations}) with {len(all_tools)} tools")
|
885
|
+
|
886
|
+
# Enhanced debugging: Log conversation history before LLM call
|
887
|
+
self.logger.debug(f"📝 ITERATION {tool_iterations} - Conversation history ({len(conversation_messages)} messages):")
|
888
|
+
for i, msg in enumerate(conversation_messages):
|
889
|
+
role = msg.get('role', 'unknown')
|
890
|
+
content_preview = str(msg.get('content', ''))[:100] + ('...' if len(str(msg.get('content', ''))) > 100 else '')
|
891
|
+
tool_calls = msg.get('tool_calls', [])
|
892
|
+
tool_call_id = msg.get('tool_call_id', '')
|
893
|
+
|
894
|
+
if role == 'system':
|
895
|
+
self.logger.debug(f" [{i}] SYSTEM: {content_preview}")
|
896
|
+
elif role == 'user':
|
897
|
+
self.logger.debug(f" [{i}] USER: {content_preview}")
|
898
|
+
elif role == 'assistant':
|
899
|
+
if tool_calls:
|
900
|
+
tool_names = [tc.get('function', {}).get('name', 'unknown') for tc in tool_calls]
|
901
|
+
self.logger.debug(f" [{i}] ASSISTANT: {content_preview} | TOOL_CALLS: {tool_names}")
|
902
|
+
else:
|
903
|
+
self.logger.debug(f" [{i}] ASSISTANT: {content_preview}")
|
904
|
+
elif role == 'tool':
|
905
|
+
self.logger.debug(f" [{i}] TOOL[{tool_call_id}]: {content_preview}")
|
906
|
+
else:
|
907
|
+
self.logger.debug(f" [{i}] {role.upper()}: {content_preview}")
|
908
|
+
|
909
|
+
# Call LLM with current conversation history
|
910
|
+
response = await llm_skill.chat_completion(conversation_messages, tools=all_tools, stream=False)
|
911
|
+
|
912
|
+
# Store LLM response in context for cost tracking
|
913
|
+
context.set('llm_response', response)
|
914
|
+
|
915
|
+
# Log LLM token usage
|
916
|
+
self._log_llm_usage(response, streaming=False)
|
917
|
+
|
918
|
+
# Enhanced debugging: Log LLM response details
|
919
|
+
self.logger.debug(f"📤 ITERATION {tool_iterations} - LLM Response:")
|
920
|
+
if hasattr(response, 'choices') or (isinstance(response, dict) and 'choices' in response):
|
921
|
+
choices = response.choices if hasattr(response, 'choices') else response['choices']
|
922
|
+
if choices:
|
923
|
+
choice = choices[0]
|
924
|
+
message = choice.message if hasattr(choice, 'message') else choice['message']
|
925
|
+
finish_reason = choice.finish_reason if hasattr(choice, 'finish_reason') else choice.get('finish_reason')
|
926
|
+
|
927
|
+
content = message.content if hasattr(message, 'content') else message.get('content', '')
|
928
|
+
tool_calls = message.tool_calls if hasattr(message, 'tool_calls') else message.get('tool_calls', [])
|
929
|
+
|
930
|
+
content_preview = str(content)[:100] + ('...' if len(str(content)) > 100 else '') if content else '[None]'
|
931
|
+
self.logger.debug(f" Content: {content_preview}")
|
932
|
+
self.logger.debug(f" Finish reason: {finish_reason}")
|
933
|
+
|
934
|
+
if tool_calls:
|
935
|
+
self.logger.debug(f" Tool calls ({len(tool_calls)}):")
|
936
|
+
for j, tc in enumerate(tool_calls):
|
937
|
+
tc_id = tc.id if hasattr(tc, 'id') else tc.get('id', 'unknown')
|
938
|
+
tc_func = tc.function if hasattr(tc, 'function') else tc.get('function', {})
|
939
|
+
tc_name = tc_func.name if hasattr(tc_func, 'name') else tc_func.get('name', 'unknown')
|
940
|
+
tc_args = tc_func.arguments if hasattr(tc_func, 'arguments') else tc_func.get('arguments', '{}')
|
941
|
+
args_preview = tc_args[:100] + ('...' if len(tc_args) > 100 else '') if tc_args else '{}'
|
942
|
+
self.logger.debug(f" [{j}] {tc_name}[{tc_id}]: {args_preview}")
|
943
|
+
else:
|
944
|
+
self.logger.debug(f" No tool calls")
|
945
|
+
|
946
|
+
# Check if response has tool calls
|
947
|
+
if not self._has_tool_calls(response):
|
948
|
+
# No tool calls - LLM is done
|
949
|
+
self.logger.debug(f"✅ LLM finished (no tool calls) after {tool_iterations} iteration(s)")
|
950
|
+
break
|
951
|
+
|
952
|
+
# Extract tool calls from response
|
953
|
+
assistant_message = response["choices"][0]["message"]
|
954
|
+
tool_calls = assistant_message.get("tool_calls", [])
|
955
|
+
|
956
|
+
# More detailed logging to help diagnose tool calling loops
|
957
|
+
tool_details = []
|
958
|
+
for tc in tool_calls:
|
959
|
+
func_name = tc['function']['name']
|
960
|
+
func_args = tc['function'].get('arguments', '{}')
|
961
|
+
try:
|
962
|
+
args_preview = func_args[:100] if len(func_args) > 100 else func_args
|
963
|
+
except:
|
964
|
+
args_preview = str(func_args)[:100]
|
965
|
+
tool_details.append(f"{func_name}(args={args_preview})")
|
966
|
+
self.logger.debug(f"🔧 Iteration {tool_iterations}: Processing {len(tool_calls)} tool call(s): {tool_details}")
|
967
|
+
|
968
|
+
# Separate internal and external tools
|
969
|
+
internal_tools = []
|
970
|
+
external_tools = []
|
971
|
+
|
972
|
+
for tool_call in tool_calls:
|
973
|
+
function_name = tool_call["function"]["name"]
|
974
|
+
if self._get_tool_function_by_name(function_name):
|
975
|
+
internal_tools.append(tool_call)
|
976
|
+
else:
|
977
|
+
external_tools.append(tool_call)
|
978
|
+
|
979
|
+
# If there are ANY external tools, we need to return to client
|
980
|
+
if external_tools:
|
981
|
+
self.logger.debug(f"🔄 Found {len(external_tools)} external tool(s), breaking loop to return to client")
|
982
|
+
|
983
|
+
# First execute any internal tools
|
984
|
+
if internal_tools:
|
985
|
+
self.logger.debug(f"⚡ Executing {len(internal_tools)} internal tool(s) first")
|
986
|
+
for tool_call in internal_tools:
|
987
|
+
# Execute hooks
|
988
|
+
context.set("tool_call", tool_call)
|
989
|
+
context = await self._execute_hooks("before_toolcall", context)
|
990
|
+
tool_call = context.get("tool_call", tool_call)
|
991
|
+
|
992
|
+
# Execute tool
|
993
|
+
result = await self._execute_single_tool(tool_call)
|
994
|
+
|
995
|
+
# Execute hooks
|
996
|
+
context.set("tool_result", result)
|
997
|
+
context = await self._execute_hooks("after_toolcall", context)
|
998
|
+
|
999
|
+
# Return response with external tool calls for client
|
1000
|
+
# Convert response to dict if needed
|
1001
|
+
if hasattr(response, 'dict') and callable(response.dict):
|
1002
|
+
client_response = response.dict()
|
1003
|
+
elif hasattr(response, 'model_dump') and callable(response.model_dump):
|
1004
|
+
client_response = response.model_dump()
|
1005
|
+
else:
|
1006
|
+
import copy
|
1007
|
+
client_response = copy.deepcopy(response)
|
1008
|
+
|
1009
|
+
# Keep only external tool calls in response
|
1010
|
+
client_response["choices"][0]["message"]["tool_calls"] = external_tools
|
1011
|
+
|
1012
|
+
# Mark response appropriately
|
1013
|
+
if internal_tools:
|
1014
|
+
client_response["_mixed_execution"] = True
|
1015
|
+
else:
|
1016
|
+
client_response["_external_tools_only"] = True
|
1017
|
+
|
1018
|
+
# Clean up flags before returning
|
1019
|
+
if "_mixed_execution" in client_response:
|
1020
|
+
del client_response["_mixed_execution"]
|
1021
|
+
if "_external_tools_only" in client_response:
|
1022
|
+
del client_response["_external_tools_only"]
|
1023
|
+
|
1024
|
+
response = client_response
|
1025
|
+
break
|
1026
|
+
|
1027
|
+
# All tools are internal - execute them and continue loop
|
1028
|
+
self.logger.debug(f"⚙️ Executing {len(internal_tools)} internal tool(s)")
|
1029
|
+
|
1030
|
+
# Add assistant message with tool calls to conversation
|
1031
|
+
# IMPORTANT: Preserve the entire assistant message structure to avoid confusing the LLM
|
1032
|
+
# Only modify tool_calls if needed, but keep all original fields
|
1033
|
+
# CRITICAL: Convert message object to dict format for LLM compatibility
|
1034
|
+
original_type = type(assistant_message).__name__
|
1035
|
+
if hasattr(assistant_message, 'dict') and callable(assistant_message.dict):
|
1036
|
+
assistant_msg_copy = assistant_message.dict()
|
1037
|
+
self.logger.debug(f"🔄 ITERATION {tool_iterations} - Converted assistant message from {original_type} to dict via .dict()")
|
1038
|
+
elif hasattr(assistant_message, 'model_dump') and callable(assistant_message.model_dump):
|
1039
|
+
assistant_msg_copy = assistant_message.model_dump()
|
1040
|
+
self.logger.debug(f"🔄 ITERATION {tool_iterations} - Converted assistant message from {original_type} to dict via .model_dump()")
|
1041
|
+
else:
|
1042
|
+
assistant_msg_copy = dict(assistant_message) if hasattr(assistant_message, 'items') else assistant_message.copy()
|
1043
|
+
self.logger.debug(f"🔄 ITERATION {tool_iterations} - Converted assistant message from {original_type} to dict via dict() or copy()")
|
1044
|
+
|
1045
|
+
# If we filtered tools, update the tool_calls (though for internal-only, they should be the same)
|
1046
|
+
if 'tool_calls' in assistant_msg_copy:
|
1047
|
+
# Convert tool_calls to dict format as well
|
1048
|
+
converted_tools = []
|
1049
|
+
for tool_call in internal_tools:
|
1050
|
+
if hasattr(tool_call, 'dict') and callable(tool_call.dict):
|
1051
|
+
converted_tools.append(tool_call.dict())
|
1052
|
+
elif hasattr(tool_call, 'model_dump') and callable(tool_call.model_dump):
|
1053
|
+
converted_tools.append(tool_call.model_dump())
|
1054
|
+
else:
|
1055
|
+
converted_tools.append(dict(tool_call) if hasattr(tool_call, 'items') else tool_call)
|
1056
|
+
assistant_msg_copy['tool_calls'] = converted_tools
|
1057
|
+
|
1058
|
+
# Enhanced debugging: Log assistant message being added to conversation
|
1059
|
+
self.logger.debug(f"📝 ITERATION {tool_iterations} - Adding assistant message to conversation:")
|
1060
|
+
self.logger.debug(f" Original tool_calls count: {len(tool_calls)}")
|
1061
|
+
self.logger.debug(f" Internal tool_calls count: {len(internal_tools)}")
|
1062
|
+
self.logger.debug(f" External tool_calls count: {len(external_tools) if external_tools else 0}")
|
1063
|
+
for i, tc in enumerate(internal_tools):
|
1064
|
+
tc_id = tc.get('id', 'unknown')
|
1065
|
+
tc_name = tc.get('function', {}).get('name', 'unknown')
|
1066
|
+
self.logger.debug(f" Internal tool[{i}]: {tc_name}[{tc_id}]")
|
1067
|
+
|
1068
|
+
conversation_messages.append(assistant_msg_copy)
|
1069
|
+
|
1070
|
+
# Execute each internal tool and add results
|
1071
|
+
for tool_call in internal_tools:
|
1072
|
+
# Execute hooks
|
1073
|
+
context.set("tool_call", tool_call)
|
1074
|
+
context = await self._execute_hooks("before_toolcall", context)
|
1075
|
+
tool_call = context.get("tool_call", tool_call)
|
1076
|
+
|
1077
|
+
# Enhanced debugging: Log tool execution details
|
1078
|
+
tc_name = tool_call.get('function', {}).get('name', 'unknown')
|
1079
|
+
tc_id = tool_call.get('id', 'unknown')
|
1080
|
+
tc_args = tool_call.get('function', {}).get('arguments', '{}')
|
1081
|
+
self.logger.debug(f"🔧 ITERATION {tool_iterations} - Executing tool: {tc_name}[{tc_id}] with args: {tc_args}")
|
1082
|
+
|
1083
|
+
# Execute tool
|
1084
|
+
result = await self._execute_single_tool(tool_call)
|
1085
|
+
|
1086
|
+
# Enhanced debugging: Log tool result
|
1087
|
+
result_content = result.get('content', '')
|
1088
|
+
result_preview = result_content[:200] + ('...' if len(result_content) > 200 else '')
|
1089
|
+
self.logger.debug(f"🔧 ITERATION {tool_iterations} - Tool result for {tc_name}[{tc_id}]: {result_preview}")
|
1090
|
+
|
1091
|
+
# Enhanced debugging: Verify tool call ID consistency
|
1092
|
+
result_tool_call_id = result.get('tool_call_id', 'unknown')
|
1093
|
+
if result_tool_call_id != tc_id:
|
1094
|
+
self.logger.warning(f"⚠️ ITERATION {tool_iterations} - Tool call ID mismatch! Expected: {tc_id}, Got: {result_tool_call_id}")
|
1095
|
+
else:
|
1096
|
+
self.logger.debug(f"✅ ITERATION {tool_iterations} - Tool call ID matches: {tc_id}")
|
1097
|
+
|
1098
|
+
# Add tool result to conversation
|
1099
|
+
conversation_messages.append(result)
|
1100
|
+
|
1101
|
+
# Execute hooks
|
1102
|
+
context.set("tool_result", result)
|
1103
|
+
context = await self._execute_hooks("after_toolcall", context)
|
1104
|
+
|
1105
|
+
# Continue loop - LLM will be called again with tool results
|
1106
|
+
self.logger.debug(f"🔄 Continuing agentic loop with tool results")
|
1107
|
+
|
1108
|
+
if tool_iterations >= max_tool_iterations:
|
1109
|
+
self.logger.warning(f"⚠️ Reached max tool iterations ({max_tool_iterations})")
|
1110
|
+
|
1111
|
+
# Generate a helpful explanation for the user about hitting iteration limit
|
1112
|
+
explanation_response = self._generate_iteration_limit_explanation(
|
1113
|
+
max_iterations=max_tool_iterations,
|
1114
|
+
conversation_messages=conversation_messages,
|
1115
|
+
original_request=messages[0] if messages else None
|
1116
|
+
)
|
1117
|
+
|
1118
|
+
# Set the response to the explanation
|
1119
|
+
response = explanation_response
|
1120
|
+
|
1121
|
+
# Execute on_message hooks (payment skill will track LLM costs here)
|
1122
|
+
context = await self._execute_hooks("on_message", context)
|
1123
|
+
|
1124
|
+
# Execute finalize_connection hooks
|
1125
|
+
context = await self._execute_hooks("finalize_connection", context)
|
1126
|
+
|
1127
|
+
return response
|
1128
|
+
|
1129
|
+
except Exception as e:
|
1130
|
+
# Handle errors and cleanup
|
1131
|
+
self.logger.exception(f"💥 Agent execution error agent='{self.name}' error='{e}'")
|
1132
|
+
await self._execute_hooks("finalize_connection", context)
|
1133
|
+
raise
|
1134
|
+
|
1135
|
+
def _generate_iteration_limit_explanation(
|
1136
|
+
self,
|
1137
|
+
max_iterations: int,
|
1138
|
+
conversation_messages: List[Dict[str, Any]],
|
1139
|
+
original_request: Optional[Dict[str, Any]] = None
|
1140
|
+
) -> Dict[str, Any]:
|
1141
|
+
"""Generate a helpful explanation when hitting the iteration limit"""
|
1142
|
+
|
1143
|
+
# Analyze the recent tool calls to understand what went wrong
|
1144
|
+
recent_tool_calls = []
|
1145
|
+
failed_operations = []
|
1146
|
+
|
1147
|
+
# Look at the last few messages to understand the pattern
|
1148
|
+
for msg in conversation_messages[-10:]: # Last 10 messages
|
1149
|
+
if msg.get("role") == "assistant" and msg.get("tool_calls"):
|
1150
|
+
for tool_call in msg["tool_calls"]:
|
1151
|
+
tool_name = tool_call.get("function", {}).get("name", "unknown")
|
1152
|
+
recent_tool_calls.append(tool_name)
|
1153
|
+
elif msg.get("role") == "tool":
|
1154
|
+
content = msg.get("content", "")
|
1155
|
+
# Check for common failure patterns
|
1156
|
+
if any(fail_indicator in content.lower() for fail_indicator in
|
1157
|
+
["failed", "error", "upload failed", "not found", "timeout"]):
|
1158
|
+
failed_operations.append(content[:100] + "..." if len(content) > 100 else content)
|
1159
|
+
|
1160
|
+
# Determine the original task from the first user message
|
1161
|
+
original_task = "complete your request"
|
1162
|
+
if original_request and original_request.get("role") == "user":
|
1163
|
+
user_content = original_request.get("content", "")
|
1164
|
+
if user_content:
|
1165
|
+
original_task = f'"{user_content[:100]}{"..." if len(user_content) > 100 else ""}"'
|
1166
|
+
|
1167
|
+
# Count repeated tool calls to identify loops
|
1168
|
+
tool_call_counts = {}
|
1169
|
+
for tool in recent_tool_calls:
|
1170
|
+
tool_call_counts[tool] = tool_call_counts.get(tool, 0) + 1
|
1171
|
+
|
1172
|
+
# Generate explanation based on analysis
|
1173
|
+
explanation_parts = [
|
1174
|
+
f"I apologize, but I encountered technical difficulties while trying to {original_task}."
|
1175
|
+
]
|
1176
|
+
|
1177
|
+
if failed_operations:
|
1178
|
+
explanation_parts.append(
|
1179
|
+
f"I attempted several operations but encountered repeated failures: {'; '.join(failed_operations[:3])}"
|
1180
|
+
)
|
1181
|
+
|
1182
|
+
# Identify the most common repeated tool
|
1183
|
+
if tool_call_counts:
|
1184
|
+
most_repeated_tool = max(tool_call_counts.items(), key=lambda x: x[1])
|
1185
|
+
if most_repeated_tool[1] > 3: # If a tool was called more than 3 times
|
1186
|
+
explanation_parts.append(
|
1187
|
+
f"I repeatedly tried using the '{most_repeated_tool[0]}' tool ({most_repeated_tool[1]} times) but it kept failing."
|
1188
|
+
)
|
1189
|
+
|
1190
|
+
explanation_parts.extend([
|
1191
|
+
f"After {max_iterations} attempts, I've reached my maximum number of tool iterations and need to stop here to prevent an infinite loop.",
|
1192
|
+
"",
|
1193
|
+
"This could be due to:",
|
1194
|
+
"• A temporary service issue with one of my tools",
|
1195
|
+
"• A configuration problem that's causing repeated failures",
|
1196
|
+
"• The task requiring a different approach than I attempted",
|
1197
|
+
"",
|
1198
|
+
"Would you like to:",
|
1199
|
+
"• Try the request again (the issue might be temporary)",
|
1200
|
+
"• Rephrase your request in a different way",
|
1201
|
+
"• Break down your request into smaller, more specific tasks"
|
1202
|
+
])
|
1203
|
+
|
1204
|
+
explanation_text = "\n".join(explanation_parts)
|
1205
|
+
|
1206
|
+
# Create a properly formatted OpenAI-style response
|
1207
|
+
return {
|
1208
|
+
"id": f"chatcmpl-iteration-limit-{int(datetime.now().timestamp())}",
|
1209
|
+
"object": "chat.completion",
|
1210
|
+
"created": int(datetime.now().timestamp()),
|
1211
|
+
"model": "iteration-limit-handler",
|
1212
|
+
"choices": [{
|
1213
|
+
"index": 0,
|
1214
|
+
"message": {
|
1215
|
+
"role": "assistant",
|
1216
|
+
"content": explanation_text
|
1217
|
+
},
|
1218
|
+
"finish_reason": "stop"
|
1219
|
+
}],
|
1220
|
+
"usage": {
|
1221
|
+
"prompt_tokens": 0,
|
1222
|
+
"completion_tokens": len(explanation_text.split()),
|
1223
|
+
"total_tokens": len(explanation_text.split())
|
1224
|
+
}
|
1225
|
+
}
|
1226
|
+
|
1227
|
+
def _log_llm_usage(self, response: Any, streaming: bool = False) -> None:
|
1228
|
+
"""Helper to log LLM usage from response"""
|
1229
|
+
try:
|
1230
|
+
model_name = None
|
1231
|
+
usage_obj = None
|
1232
|
+
if hasattr(response, 'model'):
|
1233
|
+
model_name = getattr(response, 'model')
|
1234
|
+
elif isinstance(response, dict):
|
1235
|
+
model_name = response.get('model')
|
1236
|
+
if hasattr(response, 'usage'):
|
1237
|
+
usage_obj = getattr(response, 'usage')
|
1238
|
+
elif isinstance(response, dict):
|
1239
|
+
usage_obj = response.get('usage')
|
1240
|
+
if usage_obj:
|
1241
|
+
prompt_tokens = int(getattr(usage_obj, 'prompt_tokens', None) or usage_obj.get('prompt_tokens') or 0)
|
1242
|
+
completion_tokens = int(getattr(usage_obj, 'completion_tokens', None) or usage_obj.get('completion_tokens') or 0)
|
1243
|
+
total_tokens = int(getattr(usage_obj, 'total_tokens', None) or usage_obj.get('total_tokens') or (prompt_tokens + completion_tokens))
|
1244
|
+
self._append_usage_record(
|
1245
|
+
record_type='llm',
|
1246
|
+
payload={
|
1247
|
+
'model': model_name or 'unknown',
|
1248
|
+
'prompt_tokens': prompt_tokens,
|
1249
|
+
'completion_tokens': completion_tokens,
|
1250
|
+
'total_tokens': total_tokens,
|
1251
|
+
'streaming': streaming,
|
1252
|
+
}
|
1253
|
+
)
|
1254
|
+
except Exception:
|
1255
|
+
pass
|
1256
|
+
|
1257
|
+
async def run_streaming(
|
1258
|
+
self,
|
1259
|
+
messages: List[Dict[str, Any]],
|
1260
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
1261
|
+
**kwargs
|
1262
|
+
) -> AsyncGenerator[Dict[str, Any], None]:
|
1263
|
+
"""Run agent with streaming response - implements agentic loop for tool calling"""
|
1264
|
+
|
1265
|
+
# Get existing context or create new one
|
1266
|
+
context = get_context()
|
1267
|
+
if context:
|
1268
|
+
# Update existing context with new data
|
1269
|
+
context.messages = messages
|
1270
|
+
context.stream = True
|
1271
|
+
context.agent = self
|
1272
|
+
else:
|
1273
|
+
# Create new context if none exists
|
1274
|
+
context = create_context(
|
1275
|
+
messages=messages,
|
1276
|
+
stream=True,
|
1277
|
+
agent=self
|
1278
|
+
)
|
1279
|
+
set_context(context)
|
1280
|
+
|
1281
|
+
try:
|
1282
|
+
# Ensure all skills are initialized with agent reference
|
1283
|
+
await self._ensure_skills_initialized()
|
1284
|
+
|
1285
|
+
# Execute on_connection hooks
|
1286
|
+
context = await self._execute_hooks("on_connection", context)
|
1287
|
+
|
1288
|
+
# Merge external tools
|
1289
|
+
all_tools = self._merge_tools(tools or [])
|
1290
|
+
|
1291
|
+
# Find primary LLM skill
|
1292
|
+
llm_skill = self.skills.get("primary_llm")
|
1293
|
+
if not llm_skill:
|
1294
|
+
raise ValueError("No LLM skill configured")
|
1295
|
+
|
1296
|
+
# Enhance messages with dynamic prompts before first LLM call
|
1297
|
+
enhanced_messages = await self._enhance_messages_with_prompts(messages, context)
|
1298
|
+
|
1299
|
+
# Maintain conversation history for agentic loop
|
1300
|
+
conversation_messages = enhanced_messages.copy()
|
1301
|
+
|
1302
|
+
# Agentic loop for streaming
|
1303
|
+
max_tool_iterations = 10
|
1304
|
+
tool_iterations = 0
|
1305
|
+
|
1306
|
+
while tool_iterations < max_tool_iterations:
|
1307
|
+
tool_iterations += 1
|
1308
|
+
|
1309
|
+
# Debug logging
|
1310
|
+
self.logger.debug(f"🚀 Streaming LLM for agent '{self.name}' (iteration {tool_iterations}) with {len(all_tools)} tools")
|
1311
|
+
|
1312
|
+
# Enhanced debugging: Log conversation history before streaming LLM call
|
1313
|
+
self.logger.debug(f"📝 STREAMING ITERATION {tool_iterations} - Conversation history ({len(conversation_messages)} messages):")
|
1314
|
+
for i, msg in enumerate(conversation_messages):
|
1315
|
+
role = msg.get('role', 'unknown')
|
1316
|
+
content_preview = str(msg.get('content', ''))[:100] + ('...' if len(str(msg.get('content', ''))) > 100 else '')
|
1317
|
+
tool_calls = msg.get('tool_calls', [])
|
1318
|
+
tool_call_id = msg.get('tool_call_id', '')
|
1319
|
+
|
1320
|
+
if role == 'system':
|
1321
|
+
self.logger.debug(f" [{i}] SYSTEM: {content_preview}")
|
1322
|
+
elif role == 'user':
|
1323
|
+
self.logger.debug(f" [{i}] USER: {content_preview}")
|
1324
|
+
elif role == 'assistant':
|
1325
|
+
if tool_calls:
|
1326
|
+
tool_names = [tc.get('function', {}).get('name', 'unknown') for tc in tool_calls]
|
1327
|
+
self.logger.debug(f" [{i}] ASSISTANT: {content_preview} | TOOL_CALLS: {tool_names}")
|
1328
|
+
else:
|
1329
|
+
self.logger.debug(f" [{i}] ASSISTANT: {content_preview}")
|
1330
|
+
elif role == 'tool':
|
1331
|
+
self.logger.debug(f" [{i}] TOOL[{tool_call_id}]: {content_preview}")
|
1332
|
+
else:
|
1333
|
+
self.logger.debug(f" [{i}] {role.upper()}: {content_preview}")
|
1334
|
+
|
1335
|
+
# Stream from LLM and collect chunks
|
1336
|
+
full_response_chunks = []
|
1337
|
+
held_chunks = [] # Chunks with tool fragments
|
1338
|
+
tool_calls_detected = False
|
1339
|
+
chunk_count = 0
|
1340
|
+
|
1341
|
+
async for chunk in llm_skill.chat_completion_stream(conversation_messages, tools=all_tools):
|
1342
|
+
chunk_count += 1
|
1343
|
+
|
1344
|
+
# Execute on_chunk hooks
|
1345
|
+
context.set("chunk", chunk)
|
1346
|
+
context = await self._execute_hooks("on_chunk", context)
|
1347
|
+
modified_chunk = context.get("chunk", chunk)
|
1348
|
+
|
1349
|
+
# Store chunk for potential tool processing
|
1350
|
+
full_response_chunks.append(modified_chunk)
|
1351
|
+
|
1352
|
+
# Check for tool call indicators
|
1353
|
+
choice = modified_chunk.get("choices", [{}])[0] if isinstance(modified_chunk, dict) else {}
|
1354
|
+
delta = choice.get("delta", {}) if isinstance(choice, dict) else {}
|
1355
|
+
delta_tool_calls = delta.get("tool_calls")
|
1356
|
+
finish_reason = choice.get("finish_reason")
|
1357
|
+
|
1358
|
+
# Check if we have tool call fragments
|
1359
|
+
if delta_tool_calls is not None:
|
1360
|
+
held_chunks.append(modified_chunk)
|
1361
|
+
self.logger.debug(f"🔧 STREAMING: Tool call fragment in chunk #{chunk_count}")
|
1362
|
+
continue # Don't yield tool fragments
|
1363
|
+
|
1364
|
+
# Check if tool calls are complete
|
1365
|
+
if finish_reason == "tool_calls":
|
1366
|
+
tool_calls_detected = True
|
1367
|
+
self.logger.debug(f"🔧 STREAMING: Tool calls complete at chunk #{chunk_count}")
|
1368
|
+
break # Exit streaming loop to process tools
|
1369
|
+
|
1370
|
+
# Yield content chunks
|
1371
|
+
# - In first iteration: yield all non-tool chunks for real-time display
|
1372
|
+
# - In subsequent iterations: yield the final response after tools
|
1373
|
+
if not delta_tool_calls:
|
1374
|
+
yield modified_chunk
|
1375
|
+
|
1376
|
+
# Log usage if final chunk
|
1377
|
+
if finish_reason and modified_chunk.get('usage'):
|
1378
|
+
self._log_llm_usage(modified_chunk, streaming=True)
|
1379
|
+
|
1380
|
+
# If no tool calls detected, we're done
|
1381
|
+
if not tool_calls_detected:
|
1382
|
+
self.logger.debug(f"✅ Streaming finished (no tool calls) after {tool_iterations} iteration(s)")
|
1383
|
+
break
|
1384
|
+
|
1385
|
+
# Reconstruct response from chunks to process tool calls
|
1386
|
+
full_response = self._reconstruct_response_from_chunks(full_response_chunks)
|
1387
|
+
|
1388
|
+
if not self._has_tool_calls(full_response):
|
1389
|
+
# No tool calls after all - shouldn't happen but handle gracefully
|
1390
|
+
self.logger.debug("🔧 STREAMING: No tool calls found in reconstructed response")
|
1391
|
+
break
|
1392
|
+
|
1393
|
+
# Extract tool calls
|
1394
|
+
assistant_message = full_response["choices"][0]["message"]
|
1395
|
+
tool_calls = assistant_message.get("tool_calls", [])
|
1396
|
+
|
1397
|
+
# More detailed logging to help diagnose tool calling loops
|
1398
|
+
tool_details = []
|
1399
|
+
for tc in tool_calls:
|
1400
|
+
func_name = tc['function']['name']
|
1401
|
+
func_args = tc['function'].get('arguments', '{}')
|
1402
|
+
try:
|
1403
|
+
args_preview = func_args[:100] if len(func_args) > 100 else func_args
|
1404
|
+
except:
|
1405
|
+
args_preview = str(func_args)[:100]
|
1406
|
+
tool_details.append(f"{func_name}(args={args_preview})")
|
1407
|
+
self.logger.debug(f"🔧 Iteration {tool_iterations}: Processing {len(tool_calls)} tool call(s): {tool_details}")
|
1408
|
+
|
1409
|
+
# Separate internal and external tools
|
1410
|
+
internal_tools = []
|
1411
|
+
external_tools = []
|
1412
|
+
|
1413
|
+
for tool_call in tool_calls:
|
1414
|
+
function_name = tool_call["function"]["name"]
|
1415
|
+
if self._get_tool_function_by_name(function_name):
|
1416
|
+
internal_tools.append(tool_call)
|
1417
|
+
else:
|
1418
|
+
external_tools.append(tool_call)
|
1419
|
+
|
1420
|
+
# If there are ANY external tools, return to client
|
1421
|
+
if external_tools:
|
1422
|
+
self.logger.debug(f"🔄 Found {len(external_tools)} external tool(s), returning to client")
|
1423
|
+
|
1424
|
+
# First execute any internal tools
|
1425
|
+
if internal_tools:
|
1426
|
+
self.logger.debug(f"⚡ Executing {len(internal_tools)} internal tool(s) first")
|
1427
|
+
for tool_call in internal_tools:
|
1428
|
+
# Execute hooks
|
1429
|
+
context.set("tool_call", tool_call)
|
1430
|
+
context = await self._execute_hooks("before_toolcall", context)
|
1431
|
+
tool_call = context.get("tool_call", tool_call)
|
1432
|
+
|
1433
|
+
# Execute tool
|
1434
|
+
result = await self._execute_single_tool(tool_call)
|
1435
|
+
|
1436
|
+
# Execute hooks
|
1437
|
+
context.set("tool_result", result)
|
1438
|
+
context = await self._execute_hooks("after_toolcall", context)
|
1439
|
+
|
1440
|
+
# Yield held chunks to let client reconstruct tool calls
|
1441
|
+
for held_chunk in held_chunks:
|
1442
|
+
yield held_chunk
|
1443
|
+
|
1444
|
+
# Yield final chunk with external tool calls
|
1445
|
+
if hasattr(full_response, 'dict'):
|
1446
|
+
final_response = full_response.dict()
|
1447
|
+
elif hasattr(full_response, 'model_dump'):
|
1448
|
+
final_response = full_response.model_dump()
|
1449
|
+
else:
|
1450
|
+
import copy
|
1451
|
+
final_response = copy.deepcopy(full_response)
|
1452
|
+
|
1453
|
+
# Keep only external tool calls
|
1454
|
+
final_response["choices"][0]["message"]["tool_calls"] = external_tools
|
1455
|
+
|
1456
|
+
# Convert to streaming chunk format
|
1457
|
+
final_chunk = self._convert_response_to_chunk(final_response)
|
1458
|
+
yield final_chunk
|
1459
|
+
|
1460
|
+
# Execute cleanup hooks
|
1461
|
+
context = await self._execute_hooks("on_message", context)
|
1462
|
+
context = await self._execute_hooks("finalize_connection", context)
|
1463
|
+
return
|
1464
|
+
|
1465
|
+
# All tools are internal - execute and continue loop
|
1466
|
+
self.logger.debug(f"⚙️ Executing {len(internal_tools)} internal tool(s)")
|
1467
|
+
|
1468
|
+
# Add assistant message with tool calls to conversation
|
1469
|
+
# IMPORTANT: Preserve the entire assistant message structure to avoid confusing the LLM
|
1470
|
+
# Only modify tool_calls if needed, but keep all original fields
|
1471
|
+
# CRITICAL: Convert message object to dict format for LLM compatibility
|
1472
|
+
original_type = type(assistant_message).__name__
|
1473
|
+
if hasattr(assistant_message, 'dict') and callable(assistant_message.dict):
|
1474
|
+
assistant_msg_copy = assistant_message.dict()
|
1475
|
+
self.logger.debug(f"🔄 ITERATION {tool_iterations} - Converted assistant message from {original_type} to dict via .dict()")
|
1476
|
+
elif hasattr(assistant_message, 'model_dump') and callable(assistant_message.model_dump):
|
1477
|
+
assistant_msg_copy = assistant_message.model_dump()
|
1478
|
+
self.logger.debug(f"🔄 ITERATION {tool_iterations} - Converted assistant message from {original_type} to dict via .model_dump()")
|
1479
|
+
else:
|
1480
|
+
assistant_msg_copy = dict(assistant_message) if hasattr(assistant_message, 'items') else assistant_message.copy()
|
1481
|
+
self.logger.debug(f"🔄 ITERATION {tool_iterations} - Converted assistant message from {original_type} to dict via dict() or copy()")
|
1482
|
+
|
1483
|
+
# If we filtered tools, update the tool_calls (though for internal-only, they should be the same)
|
1484
|
+
if 'tool_calls' in assistant_msg_copy:
|
1485
|
+
# Convert tool_calls to dict format as well
|
1486
|
+
converted_tools = []
|
1487
|
+
for tool_call in internal_tools:
|
1488
|
+
if hasattr(tool_call, 'dict') and callable(tool_call.dict):
|
1489
|
+
converted_tools.append(tool_call.dict())
|
1490
|
+
elif hasattr(tool_call, 'model_dump') and callable(tool_call.model_dump):
|
1491
|
+
converted_tools.append(tool_call.model_dump())
|
1492
|
+
else:
|
1493
|
+
converted_tools.append(dict(tool_call) if hasattr(tool_call, 'items') else tool_call)
|
1494
|
+
assistant_msg_copy['tool_calls'] = converted_tools
|
1495
|
+
conversation_messages.append(assistant_msg_copy)
|
1496
|
+
|
1497
|
+
# Execute each internal tool
|
1498
|
+
for tool_call in internal_tools:
|
1499
|
+
# Execute hooks
|
1500
|
+
context.set("tool_call", tool_call)
|
1501
|
+
context = await self._execute_hooks("before_toolcall", context)
|
1502
|
+
tool_call = context.get("tool_call", tool_call)
|
1503
|
+
|
1504
|
+
# Enhanced debugging: Log streaming tool execution details
|
1505
|
+
tc_name = tool_call.get('function', {}).get('name', 'unknown')
|
1506
|
+
tc_id = tool_call.get('id', 'unknown')
|
1507
|
+
tc_args = tool_call.get('function', {}).get('arguments', '{}')
|
1508
|
+
self.logger.debug(f"🔧 STREAMING ITERATION {tool_iterations} - Executing tool: {tc_name}[{tc_id}] with args: {tc_args}")
|
1509
|
+
|
1510
|
+
# Execute tool
|
1511
|
+
result = await self._execute_single_tool(tool_call)
|
1512
|
+
|
1513
|
+
# Enhanced debugging: Log streaming tool result
|
1514
|
+
result_content = result.get('content', '')
|
1515
|
+
result_preview = result_content[:200] + ('...' if len(result_content) > 200 else '')
|
1516
|
+
self.logger.debug(f"🔧 STREAMING ITERATION {tool_iterations} - Tool result for {tc_name}[{tc_id}]: {result_preview}")
|
1517
|
+
|
1518
|
+
# Enhanced debugging: Verify streaming tool call ID consistency
|
1519
|
+
result_tool_call_id = result.get('tool_call_id', 'unknown')
|
1520
|
+
if result_tool_call_id != tc_id:
|
1521
|
+
self.logger.warning(f"⚠️ STREAMING ITERATION {tool_iterations} - Tool call ID mismatch! Expected: {tc_id}, Got: {result_tool_call_id}")
|
1522
|
+
else:
|
1523
|
+
self.logger.debug(f"✅ STREAMING ITERATION {tool_iterations} - Tool call ID matches: {tc_id}")
|
1524
|
+
|
1525
|
+
# Add result to conversation
|
1526
|
+
conversation_messages.append(result)
|
1527
|
+
|
1528
|
+
# Execute hooks
|
1529
|
+
context.set("tool_result", result)
|
1530
|
+
context = await self._execute_hooks("after_toolcall", context)
|
1531
|
+
|
1532
|
+
# Continue loop - will stream next LLM response
|
1533
|
+
self.logger.debug(f"🔄 Continuing agentic loop with tool results")
|
1534
|
+
|
1535
|
+
if tool_iterations >= max_tool_iterations:
|
1536
|
+
self.logger.warning(f"⚠️ Reached max tool iterations ({max_tool_iterations})")
|
1537
|
+
|
1538
|
+
# Execute final hooks
|
1539
|
+
context = await self._execute_hooks("on_message", context)
|
1540
|
+
context = await self._execute_hooks("finalize_connection", context)
|
1541
|
+
|
1542
|
+
except Exception as e:
|
1543
|
+
self.logger.exception(f"💥 Streaming execution error agent='{self.name}' error='{e}'")
|
1544
|
+
await self._execute_hooks("finalize_connection", context)
|
1545
|
+
raise
|
1546
|
+
|
1547
|
+
def _reconstruct_response_from_chunks(self, chunks: List[Dict[str, Any]]) -> Dict[str, Any]:
|
1548
|
+
"""Reconstruct a full LLM response from streaming chunks for tool processing"""
|
1549
|
+
if not chunks:
|
1550
|
+
return {}
|
1551
|
+
|
1552
|
+
logger = self.logger
|
1553
|
+
|
1554
|
+
# Check if any chunk has complete tool calls in message format
|
1555
|
+
for chunk in chunks:
|
1556
|
+
message_tool_calls = chunk.get("choices", [{}])[0].get("message", {}).get("tool_calls")
|
1557
|
+
if message_tool_calls is not None:
|
1558
|
+
logger.debug(f"🔧 RECONSTRUCTION: Found complete tool calls")
|
1559
|
+
return chunk
|
1560
|
+
|
1561
|
+
# Reconstruct from streaming delta chunks
|
1562
|
+
logger.debug(f"🔧 RECONSTRUCTION: Reconstructing from {len(chunks)} delta chunks")
|
1563
|
+
|
1564
|
+
# Accumulate streaming tool call data
|
1565
|
+
accumulated_tool_calls = {}
|
1566
|
+
final_chunk = chunks[-1] if chunks else {}
|
1567
|
+
|
1568
|
+
for i, chunk in enumerate(chunks):
|
1569
|
+
choice = chunk.get("choices", [{}])[0]
|
1570
|
+
delta = choice.get("delta", {}) if isinstance(choice, dict) else {}
|
1571
|
+
delta_tool_calls = delta.get("tool_calls") if isinstance(delta, dict) else None
|
1572
|
+
|
1573
|
+
if delta_tool_calls:
|
1574
|
+
for tool_call in delta_tool_calls:
|
1575
|
+
tool_index = tool_call.get("index", 0)
|
1576
|
+
|
1577
|
+
# Initialize tool call if not exists
|
1578
|
+
if tool_index not in accumulated_tool_calls:
|
1579
|
+
accumulated_tool_calls[tool_index] = {
|
1580
|
+
"id": None,
|
1581
|
+
"type": "function",
|
1582
|
+
"function": {
|
1583
|
+
"name": None,
|
1584
|
+
"arguments": ""
|
1585
|
+
}
|
1586
|
+
}
|
1587
|
+
|
1588
|
+
# Accumulate data
|
1589
|
+
if tool_call.get("id"):
|
1590
|
+
accumulated_tool_calls[tool_index]["id"] = tool_call["id"]
|
1591
|
+
|
1592
|
+
func = tool_call.get("function", {})
|
1593
|
+
if func.get("name"):
|
1594
|
+
accumulated_tool_calls[tool_index]["function"]["name"] = func["name"]
|
1595
|
+
if func.get("arguments"):
|
1596
|
+
accumulated_tool_calls[tool_index]["function"]["arguments"] += func["arguments"]
|
1597
|
+
|
1598
|
+
# If we have accumulated tool calls, create a response
|
1599
|
+
if accumulated_tool_calls:
|
1600
|
+
tool_calls_list = list(accumulated_tool_calls.values())
|
1601
|
+
|
1602
|
+
# Try to infer missing tool names based on arguments
|
1603
|
+
for tool_call in tool_calls_list:
|
1604
|
+
if not tool_call["function"]["name"]:
|
1605
|
+
# Try to guess the tool name from the arguments
|
1606
|
+
args = tool_call["function"]["arguments"]
|
1607
|
+
# Look for scope_filter pattern -> likely list_files
|
1608
|
+
if "scope_filter" in args or "_filter" in args:
|
1609
|
+
tool_call["function"]["name"] = "list_files"
|
1610
|
+
logger.debug(f"🔧 RECONSTRUCTION: Inferred tool name: list_files")
|
1611
|
+
|
1612
|
+
# Create reconstructed response with proper streaming format
|
1613
|
+
reconstructed = {
|
1614
|
+
"id": final_chunk.get("id", "chatcmpl-reconstructed"),
|
1615
|
+
"created": final_chunk.get("created", 0),
|
1616
|
+
"model": final_chunk.get("model", "azure/gpt-4o-mini"),
|
1617
|
+
"object": "chat.completion.chunk",
|
1618
|
+
"choices": [{
|
1619
|
+
"index": 0,
|
1620
|
+
"finish_reason": "tool_calls",
|
1621
|
+
"message": {
|
1622
|
+
"role": "assistant",
|
1623
|
+
"content": None,
|
1624
|
+
"tool_calls": tool_calls_list
|
1625
|
+
},
|
1626
|
+
"delta": {},
|
1627
|
+
"logprobs": None
|
1628
|
+
}],
|
1629
|
+
"system_fingerprint": final_chunk.get("system_fingerprint"),
|
1630
|
+
"provider_specific_fields": None,
|
1631
|
+
"stream_options": None
|
1632
|
+
}
|
1633
|
+
logger.debug(f"🔧 RECONSTRUCTION: Reconstructed {len(tool_calls_list)} tool calls")
|
1634
|
+
return reconstructed
|
1635
|
+
|
1636
|
+
# No tool calls found, return the last chunk
|
1637
|
+
logger.debug(f"🔧 RECONSTRUCTION: No tool calls found, returning last chunk")
|
1638
|
+
return final_chunk
|
1639
|
+
|
1640
|
+
def _convert_response_to_chunk(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
1641
|
+
"""Convert a processed response back to streaming chunk format"""
|
1642
|
+
# For streaming, we just return the response as-is
|
1643
|
+
# The frontend will handle it as a final chunk
|
1644
|
+
return response
|
1645
|
+
|
1646
|
+
def _convert_response_to_streaming_chunk(self, response: Dict[str, Any]) -> Dict[str, Any]:
|
1647
|
+
"""Convert a complete LLM response to streaming chunk format"""
|
1648
|
+
if not response or not response.get("choices"):
|
1649
|
+
return response
|
1650
|
+
|
1651
|
+
choice = response["choices"][0]
|
1652
|
+
message = choice.get("message", {})
|
1653
|
+
|
1654
|
+
# Convert to streaming chunk format
|
1655
|
+
streaming_chunk = {
|
1656
|
+
"id": response.get("id", "chatcmpl-converted"),
|
1657
|
+
"created": response.get("created", 0),
|
1658
|
+
"model": response.get("model", "azure/gpt-4o-mini"),
|
1659
|
+
"object": "chat.completion.chunk",
|
1660
|
+
"choices": [{
|
1661
|
+
"index": 0,
|
1662
|
+
"finish_reason": choice.get("finish_reason", "stop"),
|
1663
|
+
"delta": {
|
1664
|
+
"role": message.get("role", "assistant"),
|
1665
|
+
"content": message.get("content"),
|
1666
|
+
"tool_calls": None
|
1667
|
+
},
|
1668
|
+
"logprobs": None
|
1669
|
+
}],
|
1670
|
+
"system_fingerprint": response.get("system_fingerprint"),
|
1671
|
+
"provider_specific_fields": None,
|
1672
|
+
"stream_options": None
|
1673
|
+
}
|
1674
|
+
|
1675
|
+
return streaming_chunk
|
1676
|
+
|
1677
|
+
def _append_usage_record(self, record_type: str, payload: Dict[str, Any]) -> None:
|
1678
|
+
"""Append a normalized usage record to context.usage"""
|
1679
|
+
try:
|
1680
|
+
context = get_context()
|
1681
|
+
if not context or not hasattr(context, 'usage'):
|
1682
|
+
return
|
1683
|
+
import time as _time
|
1684
|
+
base_record = {
|
1685
|
+
'timestamp': _time.time(),
|
1686
|
+
}
|
1687
|
+
if record_type == 'llm':
|
1688
|
+
record = {**base_record, 'type': 'llm', **payload}
|
1689
|
+
elif record_type == 'tool':
|
1690
|
+
record = {**base_record, 'type': 'tool', **payload}
|
1691
|
+
else:
|
1692
|
+
record = {**base_record, 'type': record_type, **payload}
|
1693
|
+
context.usage.append(record)
|
1694
|
+
except Exception:
|
1695
|
+
return
|
1696
|
+
|
1697
|
+
def _merge_tools(self, external_tools: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
1698
|
+
"""Merge external tools with agent tools - external tools have priority"""
|
1699
|
+
# Clear previous overrides (fresh for each request)
|
1700
|
+
self._overridden_tools.clear()
|
1701
|
+
|
1702
|
+
# Get agent tools based on current context user scope
|
1703
|
+
context = get_context()
|
1704
|
+
auth_scope = context.auth_scope if context else "all"
|
1705
|
+
|
1706
|
+
agent_tools = self.get_tools_for_scope(auth_scope)
|
1707
|
+
agent_tool_defs = [tool['definition'] for tool in agent_tools if tool.get('definition')]
|
1708
|
+
|
1709
|
+
# Debug logging
|
1710
|
+
logger = self.logger
|
1711
|
+
|
1712
|
+
external_tool_names = [tool.get('function', {}).get('name', 'unknown') for tool in external_tools] if external_tools else []
|
1713
|
+
agent_tool_names = [tool.get('function', {}).get('name', 'unknown') for tool in agent_tool_defs] if agent_tool_defs else []
|
1714
|
+
|
1715
|
+
logger.debug(f"🔧 Tool merge for scope '{auth_scope}': External tools: {external_tool_names}, Agent tools: {agent_tool_names}")
|
1716
|
+
|
1717
|
+
# Create a dictionary to track tools by name, with external tools taking priority
|
1718
|
+
tools_by_name = {}
|
1719
|
+
|
1720
|
+
# First add agent tools
|
1721
|
+
for tool_def in agent_tool_defs:
|
1722
|
+
tool_name = tool_def.get('function', {}).get('name', 'unknown')
|
1723
|
+
tools_by_name[tool_name] = tool_def
|
1724
|
+
logger.debug(f" 📄 Added agent tool: {tool_name}")
|
1725
|
+
|
1726
|
+
# Then add external tools (these override agent tools with same name)
|
1727
|
+
for tool_def in external_tools:
|
1728
|
+
tool_name = tool_def.get('function', {}).get('name', 'unknown')
|
1729
|
+
if tool_name in tools_by_name:
|
1730
|
+
logger.debug(f" 🔄 External tool '{tool_name}' overrides agent tool")
|
1731
|
+
# Track this tool as overridden so execution logic respects the override
|
1732
|
+
self._overridden_tools.add(tool_name)
|
1733
|
+
else:
|
1734
|
+
logger.debug(f" 📄 Added external tool: {tool_name}")
|
1735
|
+
tools_by_name[tool_name] = tool_def
|
1736
|
+
|
1737
|
+
# Convert back to list with external tools having priority (appear first)
|
1738
|
+
all_tools = list(tools_by_name.values())
|
1739
|
+
|
1740
|
+
final_tool_names = [tool.get('function', {}).get('name', 'unknown') for tool in all_tools]
|
1741
|
+
logger.debug(f"🔧 Final merged tools ({len(all_tools)}): {final_tool_names} | Overridden: {list(self._overridden_tools)}")
|
1742
|
+
|
1743
|
+
return all_tools
|
1744
|
+
|
1745
|
+
# ===== DIRECT REGISTRATION METHODS =====
|
1746
|
+
# FastAPI-style decorator methods for direct registration on agent instances
|
1747
|
+
|
1748
|
+
def tool(self, func: Optional[Callable] = None, *, name: Optional[str] = None,
|
1749
|
+
description: Optional[str] = None, scope: Union[str, List[str]] = "all"):
|
1750
|
+
"""Register a tool function directly on the agent instance
|
1751
|
+
|
1752
|
+
Usage:
|
1753
|
+
@agent.tool
|
1754
|
+
def my_tool(param: str) -> str:
|
1755
|
+
return f"Result: {param}"
|
1756
|
+
|
1757
|
+
@agent.tool(name="custom", scope="owner")
|
1758
|
+
def another_tool(value: int) -> int:
|
1759
|
+
return value * 2
|
1760
|
+
"""
|
1761
|
+
def decorator(f: Callable) -> Callable:
|
1762
|
+
from ..tools.decorators import tool as tool_decorator
|
1763
|
+
decorated_func = tool_decorator(func=f, name=name, description=description, scope=scope)
|
1764
|
+
# Pass the scope from the decorator to register_tool
|
1765
|
+
effective_scope = getattr(decorated_func, '_tool_scope', scope)
|
1766
|
+
self.register_tool(decorated_func, source="agent", scope=effective_scope)
|
1767
|
+
return decorated_func
|
1768
|
+
|
1769
|
+
if func is None:
|
1770
|
+
return decorator
|
1771
|
+
else:
|
1772
|
+
return decorator(func)
|
1773
|
+
|
1774
|
+
def http(self, subpath: str, method: str = "get", scope: Union[str, List[str]] = "all"):
|
1775
|
+
"""Register an HTTP handler directly on the agent instance
|
1776
|
+
|
1777
|
+
Usage:
|
1778
|
+
@agent.http("/weather")
|
1779
|
+
def get_weather(location: str) -> dict:
|
1780
|
+
return {"location": location, "temp": 25}
|
1781
|
+
|
1782
|
+
@agent.http("/data", method="post", scope="owner")
|
1783
|
+
async def post_data(data: dict) -> dict:
|
1784
|
+
return {"received": data}
|
1785
|
+
"""
|
1786
|
+
def decorator(func: Callable) -> Callable:
|
1787
|
+
from ..tools.decorators import http as http_decorator
|
1788
|
+
decorated_func = http_decorator(subpath=subpath, method=method, scope=scope)(func)
|
1789
|
+
self.register_http_handler(decorated_func, source="agent")
|
1790
|
+
return decorated_func
|
1791
|
+
|
1792
|
+
return decorator
|
1793
|
+
|
1794
|
+
def hook(self, event: str, priority: int = 50, scope: Union[str, List[str]] = "all"):
|
1795
|
+
"""Register a hook directly on the agent instance
|
1796
|
+
|
1797
|
+
Usage:
|
1798
|
+
@agent.hook("on_request", priority=10)
|
1799
|
+
async def my_hook(context):
|
1800
|
+
# Process context
|
1801
|
+
return context
|
1802
|
+
"""
|
1803
|
+
def decorator(func: Callable) -> Callable:
|
1804
|
+
from ..tools.decorators import hook as hook_decorator
|
1805
|
+
decorated_func = hook_decorator(event=event, priority=priority, scope=scope)(func)
|
1806
|
+
self.register_hook(event, decorated_func, priority, source="agent", scope=scope)
|
1807
|
+
return decorated_func
|
1808
|
+
|
1809
|
+
return decorator
|
1810
|
+
|
1811
|
+
def handoff(self, name: Optional[str] = None, handoff_type: str = "agent",
|
1812
|
+
description: Optional[str] = None, scope: Union[str, List[str]] = "all"):
|
1813
|
+
"""Register a handoff directly on the agent instance
|
1814
|
+
|
1815
|
+
Usage:
|
1816
|
+
@agent.handoff(handoff_type="agent")
|
1817
|
+
async def escalate_to_supervisor(issue: str):
|
1818
|
+
return HandoffResult(result=f"Escalated: {issue}", handoff_type="agent")
|
1819
|
+
"""
|
1820
|
+
def decorator(func: Callable) -> Callable:
|
1821
|
+
from ..tools.decorators import handoff as handoff_decorator
|
1822
|
+
decorated_func = handoff_decorator(name=name, handoff_type=handoff_type,
|
1823
|
+
description=description, scope=scope)(func)
|
1824
|
+
handoff_config = Handoff(
|
1825
|
+
target=getattr(decorated_func, '_handoff_name', decorated_func.__name__),
|
1826
|
+
handoff_type=getattr(decorated_func, '_handoff_type', 'agent'),
|
1827
|
+
description=getattr(decorated_func, '_handoff_description', ''),
|
1828
|
+
scope=getattr(decorated_func, '_handoff_scope', scope)
|
1829
|
+
)
|
1830
|
+
handoff_config.metadata = {'function': decorated_func}
|
1831
|
+
self.register_handoff(handoff_config, source="agent")
|
1832
|
+
return decorated_func
|
1833
|
+
|
1834
|
+
return decorator
|