uer-mcp 3.0.0 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/python/src/uer/models/__init__.py +12 -0
- package/python/src/uer/models/message.py +71 -0
- package/python/src/uer/orchestration/__init__.py +13 -0
- package/python/src/uer/orchestration/context.py +327 -0
- package/python/src/uer/orchestration/history.py +170 -0
- package/python/src/uer/orchestration/orchestrator.py +380 -0
- package/python/src/uer/server.py +9 -0
- package/python/src/uer/tools/delegate.py +230 -0
package/package.json
CHANGED
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
"""Message models for chat history and multi-agent communication."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, Literal
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class ToolCall(BaseModel):
|
|
9
|
+
"""Represents a tool call made by an assistant."""
|
|
10
|
+
|
|
11
|
+
id: str = Field(..., description="Unique identifier for the tool call")
|
|
12
|
+
type: str = Field(default="function", description="Type of tool call")
|
|
13
|
+
function: dict[str, Any] = Field(
|
|
14
|
+
..., description="Function details including name and arguments"
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Message(BaseModel):
|
|
19
|
+
"""Represents a message in a chat conversation.
|
|
20
|
+
|
|
21
|
+
Supports system, user, assistant, and tool roles for multi-agent orchestration.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
role: Literal["system", "user", "assistant", "tool"] = Field(
|
|
25
|
+
..., description="Role of the message sender"
|
|
26
|
+
)
|
|
27
|
+
content: str | None = Field(default=None, description="Text content of the message")
|
|
28
|
+
tool_calls: list[ToolCall] | None = Field(
|
|
29
|
+
default=None, description="Tool calls made by assistant (assistant role only)"
|
|
30
|
+
)
|
|
31
|
+
tool_call_id: str | None = Field(
|
|
32
|
+
default=None, description="ID of the tool call this responds to (tool role only)"
|
|
33
|
+
)
|
|
34
|
+
name: str | None = Field(
|
|
35
|
+
default=None, description="Name of the tool or function (tool role only)"
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
def to_dict(self) -> dict[str, Any]:
|
|
39
|
+
"""Convert message to dictionary format for LLM APIs."""
|
|
40
|
+
result: dict[str, Any] = {"role": self.role}
|
|
41
|
+
|
|
42
|
+
if self.content is not None:
|
|
43
|
+
result["content"] = self.content
|
|
44
|
+
|
|
45
|
+
if self.tool_calls is not None:
|
|
46
|
+
result["tool_calls"] = [
|
|
47
|
+
{
|
|
48
|
+
"id": tc.id,
|
|
49
|
+
"type": tc.type,
|
|
50
|
+
"function": tc.function,
|
|
51
|
+
}
|
|
52
|
+
for tc in self.tool_calls
|
|
53
|
+
]
|
|
54
|
+
|
|
55
|
+
if self.tool_call_id is not None:
|
|
56
|
+
result["tool_call_id"] = self.tool_call_id
|
|
57
|
+
|
|
58
|
+
if self.name is not None:
|
|
59
|
+
result["name"] = self.name
|
|
60
|
+
|
|
61
|
+
return result
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class ContextReference(BaseModel):
|
|
65
|
+
"""Reference to stored context in S3 storage."""
|
|
66
|
+
|
|
67
|
+
uri: str = Field(..., description="S3 URI or registry URI to context")
|
|
68
|
+
description: str | None = Field(default=None, description="Optional description of the context")
|
|
69
|
+
inject_as: Literal["system", "user"] = Field(
|
|
70
|
+
default="system", description="How to inject the context into messages"
|
|
71
|
+
)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
"""Orchestration components for subagent delegation and multi-agent coordination."""
|
|
2
|
+
|
|
3
|
+
from .context import ContextManager
|
|
4
|
+
from .history import ChatHistoryBuilder
|
|
5
|
+
from .orchestrator import BehaviorLog, DelegationResult, SubagentOrchestrator
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"ChatHistoryBuilder",
|
|
9
|
+
"ContextManager",
|
|
10
|
+
"SubagentOrchestrator",
|
|
11
|
+
"DelegationResult",
|
|
12
|
+
"BehaviorLog",
|
|
13
|
+
]
|
|
@@ -0,0 +1,327 @@
|
|
|
1
|
+
"""Enhanced context manager with Jinja2 templates and registry data fetching."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from jinja2 import BaseLoader, Environment, TemplateNotFound
|
|
8
|
+
|
|
9
|
+
from ..storage.manager import StorageManager
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class RegistryLoader(BaseLoader):
|
|
15
|
+
"""Jinja2 loader that fetches templates from S3 storage."""
|
|
16
|
+
|
|
17
|
+
def __init__(self, storage: StorageManager):
|
|
18
|
+
"""Initialize registry loader.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
storage: Storage manager for fetching templates
|
|
22
|
+
"""
|
|
23
|
+
self.storage = storage
|
|
24
|
+
self._cache: dict[str, tuple[str, str | None]] = {}
|
|
25
|
+
|
|
26
|
+
def get_source(self, environment: Environment, template: str):
|
|
27
|
+
"""Load template from storage.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
environment: Jinja2 environment
|
|
31
|
+
template: Template URI (e.g., 'registry://templates/prompt.md' or 's3://bucket/key')
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Tuple of (source, filename, uptodate_func)
|
|
35
|
+
"""
|
|
36
|
+
# Check cache first
|
|
37
|
+
if template in self._cache:
|
|
38
|
+
source, etag = self._cache[template]
|
|
39
|
+
return source, template, lambda: True
|
|
40
|
+
|
|
41
|
+
try:
|
|
42
|
+
if not self.storage.is_available():
|
|
43
|
+
logger.warning(f"Storage not available, cannot load template: {template}")
|
|
44
|
+
raise TemplateNotFound(template)
|
|
45
|
+
|
|
46
|
+
# Fetch from storage
|
|
47
|
+
content, metadata = self.storage.get_sync(template)
|
|
48
|
+
source = content.decode("utf-8")
|
|
49
|
+
etag = metadata.etag if hasattr(metadata, "etag") else None
|
|
50
|
+
|
|
51
|
+
# Cache it
|
|
52
|
+
self._cache[template] = (source, etag)
|
|
53
|
+
|
|
54
|
+
logger.debug(f"Loaded template from {template} ({len(source)} chars)")
|
|
55
|
+
return source, template, lambda: True
|
|
56
|
+
|
|
57
|
+
except Exception as e:
|
|
58
|
+
logger.error(f"Failed to load template {template}: {e}")
|
|
59
|
+
raise TemplateNotFound(template) from None
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class ContextManager:
|
|
63
|
+
"""Manages context assembly with Jinja2 templates and registry data fetching.
|
|
64
|
+
|
|
65
|
+
Features:
|
|
66
|
+
- Template-based context assembly
|
|
67
|
+
- Registry data expansion ({{ uri | expand }})
|
|
68
|
+
- Token optimization through caching
|
|
69
|
+
- Dynamic variable injection
|
|
70
|
+
- Nested template support
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
def __init__(self, storage: StorageManager | None = None):
|
|
74
|
+
"""Initialize context manager.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
storage: Storage manager for registry access
|
|
78
|
+
"""
|
|
79
|
+
self.storage = storage or StorageManager()
|
|
80
|
+
|
|
81
|
+
# Initialize Jinja2 environment with registry loader
|
|
82
|
+
self.env = Environment(
|
|
83
|
+
loader=RegistryLoader(self.storage),
|
|
84
|
+
autoescape=False, # Don't escape for LLM prompts
|
|
85
|
+
trim_blocks=True,
|
|
86
|
+
lstrip_blocks=True,
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
# Add custom filters
|
|
90
|
+
self.env.filters["expand"] = self._expand_filter
|
|
91
|
+
self.env.filters["fetch"] = self._fetch_filter
|
|
92
|
+
self.env.filters["truncate_tokens"] = self._truncate_tokens_filter
|
|
93
|
+
self.env.filters["summarize"] = self._summarize_filter
|
|
94
|
+
|
|
95
|
+
# Context cache for token optimization
|
|
96
|
+
self._context_cache: dict[str, tuple[str, datetime]] = {}
|
|
97
|
+
self._cache_ttl_seconds = 300 # 5 minutes
|
|
98
|
+
|
|
99
|
+
logger.info("ContextManager initialized with Jinja2 support")
|
|
100
|
+
|
|
101
|
+
def _expand_filter(self, uri: str) -> str:
|
|
102
|
+
"""Jinja2 filter to expand a URI by fetching its content.
|
|
103
|
+
|
|
104
|
+
Usage: {{ 'registry://context/analysis.txt' | expand }}
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
uri: URI to expand
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Content from the URI
|
|
111
|
+
"""
|
|
112
|
+
try:
|
|
113
|
+
# Check cache
|
|
114
|
+
if uri in self._context_cache:
|
|
115
|
+
content, cached_at = self._context_cache[uri]
|
|
116
|
+
age = (datetime.now() - cached_at).total_seconds()
|
|
117
|
+
if age < self._cache_ttl_seconds:
|
|
118
|
+
logger.debug(f"Cache hit for {uri} (age: {age:.1f}s)")
|
|
119
|
+
return content
|
|
120
|
+
|
|
121
|
+
if not self.storage.is_available():
|
|
122
|
+
logger.warning(f"Storage not available, cannot expand: {uri}")
|
|
123
|
+
return f"[Storage unavailable: {uri}]"
|
|
124
|
+
|
|
125
|
+
# Fetch from storage
|
|
126
|
+
content_bytes, metadata = self.storage.get_sync(uri)
|
|
127
|
+
content = content_bytes.decode("utf-8")
|
|
128
|
+
|
|
129
|
+
# Cache it
|
|
130
|
+
self._context_cache[uri] = (content, datetime.now())
|
|
131
|
+
|
|
132
|
+
logger.info(f"Expanded {uri} ({len(content)} chars)")
|
|
133
|
+
return content
|
|
134
|
+
|
|
135
|
+
except Exception as e:
|
|
136
|
+
logger.error(f"Failed to expand {uri}: {e}")
|
|
137
|
+
return f"[Error expanding {uri}: {e}]"
|
|
138
|
+
|
|
139
|
+
def _fetch_filter(self, uri: str, default: str = "") -> str:
|
|
140
|
+
"""Jinja2 filter to fetch content with a default fallback.
|
|
141
|
+
|
|
142
|
+
Usage: {{ 'registry://context/optional.txt' | fetch('default value') }}
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
uri: URI to fetch
|
|
146
|
+
default: Default value if fetch fails
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Content or default value
|
|
150
|
+
"""
|
|
151
|
+
try:
|
|
152
|
+
return self._expand_filter(uri)
|
|
153
|
+
except Exception:
|
|
154
|
+
return default
|
|
155
|
+
|
|
156
|
+
def _truncate_tokens_filter(self, text: str, max_tokens: int = 1000) -> str:
|
|
157
|
+
"""Jinja2 filter to truncate text to approximate token count.
|
|
158
|
+
|
|
159
|
+
Usage: {{ long_text | truncate_tokens(500) }}
|
|
160
|
+
|
|
161
|
+
Args:
|
|
162
|
+
text: Text to truncate
|
|
163
|
+
max_tokens: Maximum token count (approximate)
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Truncated text
|
|
167
|
+
"""
|
|
168
|
+
# Rough approximation: 4 chars per token
|
|
169
|
+
max_chars = max_tokens * 4
|
|
170
|
+
if len(text) <= max_chars:
|
|
171
|
+
return text
|
|
172
|
+
|
|
173
|
+
truncated = text[:max_chars]
|
|
174
|
+
logger.debug(
|
|
175
|
+
f"Truncated text from {len(text)} to {len(truncated)} chars (~{max_tokens} tokens)"
|
|
176
|
+
)
|
|
177
|
+
return truncated + "..."
|
|
178
|
+
|
|
179
|
+
def _summarize_filter(self, text: str, max_lines: int = 10) -> str:
|
|
180
|
+
"""Jinja2 filter to summarize text by taking first N lines.
|
|
181
|
+
|
|
182
|
+
Usage: {{ long_text | summarize(5) }}
|
|
183
|
+
|
|
184
|
+
Args:
|
|
185
|
+
text: Text to summarize
|
|
186
|
+
max_lines: Maximum number of lines
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Summarized text
|
|
190
|
+
"""
|
|
191
|
+
lines = text.split("\n")
|
|
192
|
+
if len(lines) <= max_lines:
|
|
193
|
+
return text
|
|
194
|
+
|
|
195
|
+
summary = "\n".join(lines[:max_lines])
|
|
196
|
+
logger.debug(f"Summarized text from {len(lines)} to {max_lines} lines")
|
|
197
|
+
return summary + f"\n... ({len(lines) - max_lines} more lines)"
|
|
198
|
+
|
|
199
|
+
async def render_template(
|
|
200
|
+
self, template_uri: str, variables: dict[str, Any] | None = None
|
|
201
|
+
) -> str:
|
|
202
|
+
"""Render a Jinja2 template with variables.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
template_uri: URI to template (e.g., 'registry://templates/prompt.md')
|
|
206
|
+
variables: Variables to inject into template
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Rendered template content
|
|
210
|
+
"""
|
|
211
|
+
variables = variables or {}
|
|
212
|
+
|
|
213
|
+
try:
|
|
214
|
+
template = self.env.get_template(template_uri)
|
|
215
|
+
rendered = template.render(**variables)
|
|
216
|
+
logger.info(f"Rendered template {template_uri} ({len(rendered)} chars)")
|
|
217
|
+
return rendered
|
|
218
|
+
|
|
219
|
+
except Exception as e:
|
|
220
|
+
logger.error(f"Failed to render template {template_uri}: {e}")
|
|
221
|
+
raise
|
|
222
|
+
|
|
223
|
+
def render_string(self, template_string: str, variables: dict[str, Any] | None = None) -> str:
|
|
224
|
+
"""Render a template string with variables.
|
|
225
|
+
|
|
226
|
+
Args:
|
|
227
|
+
template_string: Template content as string
|
|
228
|
+
variables: Variables to inject into template
|
|
229
|
+
|
|
230
|
+
Returns:
|
|
231
|
+
Rendered content
|
|
232
|
+
"""
|
|
233
|
+
variables = variables or {}
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
template = self.env.from_string(template_string)
|
|
237
|
+
rendered = template.render(**variables)
|
|
238
|
+
logger.debug(f"Rendered string template ({len(rendered)} chars)")
|
|
239
|
+
return rendered
|
|
240
|
+
|
|
241
|
+
except Exception as e:
|
|
242
|
+
logger.error(f"Failed to render string template: {e}")
|
|
243
|
+
raise
|
|
244
|
+
|
|
245
|
+
async def assemble_context(
|
|
246
|
+
self,
|
|
247
|
+
template: str | None = None,
|
|
248
|
+
context_refs: list[str] | None = None,
|
|
249
|
+
variables: dict[str, Any] | None = None,
|
|
250
|
+
max_tokens: int | None = None,
|
|
251
|
+
) -> str:
|
|
252
|
+
"""Assemble context from template and/or URIs.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
template: Template string or URI to render
|
|
256
|
+
context_refs: List of URIs to expand and include
|
|
257
|
+
variables: Variables for template rendering
|
|
258
|
+
max_tokens: Optional token limit for truncation
|
|
259
|
+
|
|
260
|
+
Returns:
|
|
261
|
+
Assembled context string
|
|
262
|
+
"""
|
|
263
|
+
variables = variables or {}
|
|
264
|
+
parts: list[str] = []
|
|
265
|
+
|
|
266
|
+
# Add context refs as variables
|
|
267
|
+
if context_refs:
|
|
268
|
+
for i, uri in enumerate(context_refs):
|
|
269
|
+
try:
|
|
270
|
+
content = self._expand_filter(uri)
|
|
271
|
+
variables[f"context_{i}"] = content
|
|
272
|
+
variables[f"context_{i}_uri"] = uri
|
|
273
|
+
except Exception as e:
|
|
274
|
+
logger.warning(f"Failed to load context ref {uri}: {e}")
|
|
275
|
+
|
|
276
|
+
# Render template if provided
|
|
277
|
+
if template:
|
|
278
|
+
if template.startswith("registry://") or template.startswith("s3://"):
|
|
279
|
+
# Template URI
|
|
280
|
+
rendered = await self.render_template(template, variables)
|
|
281
|
+
else:
|
|
282
|
+
# Template string
|
|
283
|
+
rendered = self.render_string(template, variables)
|
|
284
|
+
parts.append(rendered)
|
|
285
|
+
|
|
286
|
+
# Add raw context refs if no template
|
|
287
|
+
elif context_refs:
|
|
288
|
+
for uri in context_refs:
|
|
289
|
+
try:
|
|
290
|
+
content = self._expand_filter(uri)
|
|
291
|
+
parts.append(f"# Context from {uri}\n\n{content}")
|
|
292
|
+
except Exception as e:
|
|
293
|
+
logger.warning(f"Failed to expand {uri}: {e}")
|
|
294
|
+
|
|
295
|
+
# Combine parts
|
|
296
|
+
assembled = "\n\n---\n\n".join(parts)
|
|
297
|
+
|
|
298
|
+
# Truncate if needed
|
|
299
|
+
if max_tokens:
|
|
300
|
+
assembled = self._truncate_tokens_filter(assembled, max_tokens)
|
|
301
|
+
|
|
302
|
+
logger.info(f"Assembled context ({len(assembled)} chars, ~{len(assembled) // 4} tokens)")
|
|
303
|
+
return assembled
|
|
304
|
+
|
|
305
|
+
def clear_cache(self) -> None:
|
|
306
|
+
"""Clear the context cache."""
|
|
307
|
+
self._context_cache.clear()
|
|
308
|
+
logger.info("Cleared context cache")
|
|
309
|
+
|
|
310
|
+
def get_cache_stats(self) -> dict[str, Any]:
|
|
311
|
+
"""Get cache statistics.
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
Dictionary with cache stats
|
|
315
|
+
"""
|
|
316
|
+
now = datetime.now()
|
|
317
|
+
valid_entries = sum(
|
|
318
|
+
1
|
|
319
|
+
for _, (_, cached_at) in self._context_cache.items()
|
|
320
|
+
if (now - cached_at).total_seconds() < self._cache_ttl_seconds
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
return {
|
|
324
|
+
"total_entries": len(self._context_cache),
|
|
325
|
+
"valid_entries": valid_entries,
|
|
326
|
+
"ttl_seconds": self._cache_ttl_seconds,
|
|
327
|
+
}
|
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"""Chat history builder for multi-agent conversations."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from ..models.message import ContextReference, Message, ToolCall
|
|
7
|
+
|
|
8
|
+
logger = logging.getLogger(__name__)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ChatHistoryBuilder:
|
|
12
|
+
"""Builds chat history for LLM conversations with support for context injection.
|
|
13
|
+
|
|
14
|
+
Supports system, user, assistant, and tool messages for multi-agent orchestration.
|
|
15
|
+
Can inject context from S3 storage URIs.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self):
|
|
19
|
+
"""Initialize empty chat history."""
|
|
20
|
+
self.messages: list[Message] = []
|
|
21
|
+
self.context_refs: list[ContextReference] = []
|
|
22
|
+
|
|
23
|
+
def add_system(self, content: str) -> "ChatHistoryBuilder":
|
|
24
|
+
"""Add a system message.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
content: System message content (instructions, context, etc.)
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
Self for method chaining
|
|
31
|
+
"""
|
|
32
|
+
self.messages.append(Message(role="system", content=content))
|
|
33
|
+
logger.debug(f"Added system message ({len(content)} chars)")
|
|
34
|
+
return self
|
|
35
|
+
|
|
36
|
+
def add_user(self, content: str) -> "ChatHistoryBuilder":
|
|
37
|
+
"""Add a user message.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
content: User message content
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Self for method chaining
|
|
44
|
+
"""
|
|
45
|
+
self.messages.append(Message(role="user", content=content))
|
|
46
|
+
logger.debug(f"Added user message ({len(content)} chars)")
|
|
47
|
+
return self
|
|
48
|
+
|
|
49
|
+
def add_assistant(
|
|
50
|
+
self, content: str | None = None, tool_calls: list[dict[str, Any]] | None = None
|
|
51
|
+
) -> "ChatHistoryBuilder":
|
|
52
|
+
"""Add an assistant message.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
content: Assistant response content (optional if tool_calls provided)
|
|
56
|
+
tool_calls: List of tool calls made by assistant
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Self for method chaining
|
|
60
|
+
"""
|
|
61
|
+
parsed_tool_calls = None
|
|
62
|
+
if tool_calls:
|
|
63
|
+
parsed_tool_calls = [
|
|
64
|
+
ToolCall(
|
|
65
|
+
id=tc.get("id", ""),
|
|
66
|
+
type=tc.get("type", "function"),
|
|
67
|
+
function=tc.get("function", {}),
|
|
68
|
+
)
|
|
69
|
+
for tc in tool_calls
|
|
70
|
+
]
|
|
71
|
+
|
|
72
|
+
self.messages.append(
|
|
73
|
+
Message(role="assistant", content=content, tool_calls=parsed_tool_calls)
|
|
74
|
+
)
|
|
75
|
+
logger.debug(
|
|
76
|
+
f"Added assistant message (content: {len(content or '')} chars, "
|
|
77
|
+
f"tool_calls: {len(tool_calls or [])})"
|
|
78
|
+
)
|
|
79
|
+
return self
|
|
80
|
+
|
|
81
|
+
def add_tool_result(
|
|
82
|
+
self, tool_call_id: str, result: str, name: str | None = None
|
|
83
|
+
) -> "ChatHistoryBuilder":
|
|
84
|
+
"""Add a tool result message.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
tool_call_id: ID of the tool call this responds to
|
|
88
|
+
result: Result content from tool execution
|
|
89
|
+
name: Optional name of the tool
|
|
90
|
+
|
|
91
|
+
Returns:
|
|
92
|
+
Self for method chaining
|
|
93
|
+
"""
|
|
94
|
+
self.messages.append(
|
|
95
|
+
Message(role="tool", content=result, tool_call_id=tool_call_id, name=name)
|
|
96
|
+
)
|
|
97
|
+
logger.debug(f"Added tool result for call {tool_call_id} ({len(result)} chars)")
|
|
98
|
+
return self
|
|
99
|
+
|
|
100
|
+
def add_context_ref(
|
|
101
|
+
self,
|
|
102
|
+
uri: str,
|
|
103
|
+
description: str | None = None,
|
|
104
|
+
inject_as: str = "system",
|
|
105
|
+
) -> "ChatHistoryBuilder":
|
|
106
|
+
"""Add a context reference to be resolved later.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
uri: S3 URI or registry URI to context
|
|
110
|
+
description: Optional description of the context
|
|
111
|
+
inject_as: How to inject context ('system' or 'user')
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Self for method chaining
|
|
115
|
+
"""
|
|
116
|
+
self.context_refs.append(
|
|
117
|
+
ContextReference(uri=uri, description=description, inject_as=inject_as)
|
|
118
|
+
)
|
|
119
|
+
logger.debug(f"Added context reference: {uri} (inject as {inject_as})")
|
|
120
|
+
return self
|
|
121
|
+
|
|
122
|
+
def build(self) -> list[dict[str, Any]]:
|
|
123
|
+
"""Build the final message list for LLM API.
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
List of message dictionaries ready for LLM API
|
|
127
|
+
"""
|
|
128
|
+
result = [msg.to_dict() for msg in self.messages]
|
|
129
|
+
logger.info(f"Built chat history with {len(result)} messages")
|
|
130
|
+
return result
|
|
131
|
+
|
|
132
|
+
def get_messages(self) -> list[Message]:
|
|
133
|
+
"""Get the raw message list.
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
List of Message objects
|
|
137
|
+
"""
|
|
138
|
+
return self.messages
|
|
139
|
+
|
|
140
|
+
def clear(self) -> "ChatHistoryBuilder":
|
|
141
|
+
"""Clear all messages and context references.
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
Self for method chaining
|
|
145
|
+
"""
|
|
146
|
+
self.messages.clear()
|
|
147
|
+
self.context_refs.clear()
|
|
148
|
+
logger.debug("Cleared chat history")
|
|
149
|
+
return self
|
|
150
|
+
|
|
151
|
+
def message_count(self) -> int:
|
|
152
|
+
"""Get the number of messages in the history.
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Number of messages
|
|
156
|
+
"""
|
|
157
|
+
return len(self.messages)
|
|
158
|
+
|
|
159
|
+
def estimate_tokens(self) -> int:
|
|
160
|
+
"""Estimate token count (rough approximation).
|
|
161
|
+
|
|
162
|
+
Uses simple heuristic: ~4 characters per token.
|
|
163
|
+
|
|
164
|
+
Returns:
|
|
165
|
+
Estimated token count
|
|
166
|
+
"""
|
|
167
|
+
total_chars = sum(len(msg.content or "") for msg in self.messages)
|
|
168
|
+
estimated_tokens = total_chars // 4
|
|
169
|
+
logger.debug(f"Estimated tokens: {estimated_tokens} ({total_chars} chars)")
|
|
170
|
+
return estimated_tokens
|
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
"""Subagent orchestrator for multi-agent delegation and coordination."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel, Field
|
|
8
|
+
|
|
9
|
+
from ..llm.gateway import LLMGateway
|
|
10
|
+
from ..storage.manager import StorageManager
|
|
11
|
+
from .context import ContextManager
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class DelegationResult(BaseModel):
|
|
17
|
+
"""Result from a subagent delegation."""
|
|
18
|
+
|
|
19
|
+
success: bool = Field(..., description="Whether delegation succeeded")
|
|
20
|
+
response: str | None = Field(default=None, description="Final response from agent")
|
|
21
|
+
tool_calls: list[dict[str, Any]] | None = Field(
|
|
22
|
+
default=None, description="Tool calls made by agent"
|
|
23
|
+
)
|
|
24
|
+
error: str | None = Field(default=None, description="Error message if failed")
|
|
25
|
+
tokens_used: int | None = Field(default=None, description="Total tokens used in delegation")
|
|
26
|
+
model_used: str | None = Field(default=None, description="Model used for delegation")
|
|
27
|
+
iterations: int = Field(default=0, description="Number of agentic loop iterations")
|
|
28
|
+
stored_at: str | None = Field(default=None, description="URI where result was stored")
|
|
29
|
+
metadata: dict[str, Any] = Field(default_factory=dict, description="Additional metadata")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class BehaviorLog(BaseModel):
|
|
33
|
+
"""Log entry for multi-agent behavior monitoring."""
|
|
34
|
+
|
|
35
|
+
timestamp: datetime = Field(default_factory=datetime.now)
|
|
36
|
+
agent_id: str = Field(..., description="Identifier for the agent")
|
|
37
|
+
behavior_type: str = Field(
|
|
38
|
+
..., description="Type of behavior (volunteer, conformity, destructive, etc.)"
|
|
39
|
+
)
|
|
40
|
+
description: str = Field(..., description="Description of the behavior")
|
|
41
|
+
context: dict[str, Any] = Field(default_factory=dict, description="Context information")
|
|
42
|
+
severity: str = Field(default="info", description="Severity level (info, warning, critical)")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class SubagentOrchestrator:
|
|
46
|
+
"""Orchestrates subagent delegation with multi-agent behavior monitoring.
|
|
47
|
+
|
|
48
|
+
Inspired by Chen 2024 AgentVerse research on emergent behaviors:
|
|
49
|
+
- Volunteer behaviors: Agents offering unsolicited assistance
|
|
50
|
+
- Conformity behaviors: Agents aligning with group goals
|
|
51
|
+
- Destructive behaviors: Actions leading to undesired outcomes
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
def __init__(
|
|
55
|
+
self,
|
|
56
|
+
gateway: LLMGateway | None = None,
|
|
57
|
+
storage: StorageManager | None = None,
|
|
58
|
+
):
|
|
59
|
+
"""Initialize orchestrator.
|
|
60
|
+
|
|
61
|
+
Args:
|
|
62
|
+
gateway: LLM gateway for model calls (creates new if None)
|
|
63
|
+
storage: Storage manager for context resolution (creates new if None)
|
|
64
|
+
"""
|
|
65
|
+
self.gateway = gateway or LLMGateway()
|
|
66
|
+
self.storage = storage or StorageManager()
|
|
67
|
+
self.context_manager = ContextManager(storage=self.storage)
|
|
68
|
+
self.behavior_logs: list[BehaviorLog] = []
|
|
69
|
+
logger.info("SubagentOrchestrator initialized with ContextManager")
|
|
70
|
+
|
|
71
|
+
async def delegate(
|
|
72
|
+
self,
|
|
73
|
+
model: str,
|
|
74
|
+
messages: list[dict[str, Any]],
|
|
75
|
+
tools: list[dict[str, Any]] | None = None,
|
|
76
|
+
context_refs: list[str] | None = None,
|
|
77
|
+
context_template: str | None = None,
|
|
78
|
+
context_variables: dict[str, Any] | None = None,
|
|
79
|
+
store_result: str | None = None,
|
|
80
|
+
max_iterations: int = 10,
|
|
81
|
+
max_context_tokens: int | None = None,
|
|
82
|
+
agent_id: str | None = None,
|
|
83
|
+
) -> DelegationResult:
|
|
84
|
+
"""Delegate a task to a subagent with enhanced context assembly.
|
|
85
|
+
|
|
86
|
+
Args:
|
|
87
|
+
model: Model identifier (e.g., 'gpt-4', 'claude-3-5-sonnet')
|
|
88
|
+
messages: List of message dictionaries
|
|
89
|
+
tools: Optional list of tools available to agent
|
|
90
|
+
context_refs: Optional list of S3/registry URIs to inject as context
|
|
91
|
+
context_template: Optional Jinja2 template string or URI for context assembly
|
|
92
|
+
context_variables: Variables to inject into context template
|
|
93
|
+
store_result: Optional URI to store the final result
|
|
94
|
+
max_iterations: Maximum agentic loop iterations
|
|
95
|
+
max_context_tokens: Optional token limit for context truncation
|
|
96
|
+
agent_id: Optional identifier for behavior tracking
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
DelegationResult with response and metadata
|
|
100
|
+
"""
|
|
101
|
+
agent_id = agent_id or f"agent_{datetime.now().timestamp()}"
|
|
102
|
+
logger.info(
|
|
103
|
+
f"Delegating to {model} (agent_id: {agent_id}, " f"max_iterations: {max_iterations})"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
# Assemble and inject context using ContextManager
|
|
108
|
+
if context_refs or context_template:
|
|
109
|
+
messages = await self._inject_context(
|
|
110
|
+
messages,
|
|
111
|
+
context_refs,
|
|
112
|
+
context_template,
|
|
113
|
+
context_variables,
|
|
114
|
+
max_context_tokens,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Agentic loop
|
|
118
|
+
iterations = 0
|
|
119
|
+
total_tokens = 0
|
|
120
|
+
current_messages = messages.copy()
|
|
121
|
+
|
|
122
|
+
while iterations < max_iterations:
|
|
123
|
+
iterations += 1
|
|
124
|
+
logger.debug(f"Iteration {iterations}/{max_iterations}")
|
|
125
|
+
|
|
126
|
+
# Call LLM
|
|
127
|
+
response = await self.gateway.call(
|
|
128
|
+
model=model, messages=current_messages, tools=tools
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
# Track token usage
|
|
132
|
+
if response.get("usage"):
|
|
133
|
+
total_tokens += response["usage"].get("total_tokens", 0)
|
|
134
|
+
|
|
135
|
+
# Get response content
|
|
136
|
+
message = response.get("choices", [{}])[0].get("message", {})
|
|
137
|
+
content = message.get("content")
|
|
138
|
+
tool_calls = message.get("tool_calls")
|
|
139
|
+
|
|
140
|
+
# Check for destructive behavior patterns
|
|
141
|
+
if content:
|
|
142
|
+
self._monitor_behavior(agent_id, content, "response", iterations)
|
|
143
|
+
|
|
144
|
+
# If no tool calls, we're done
|
|
145
|
+
if not tool_calls:
|
|
146
|
+
logger.info(
|
|
147
|
+
f"Delegation complete after {iterations} iterations "
|
|
148
|
+
f"({total_tokens} tokens)"
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# Store result if requested
|
|
152
|
+
stored_at = None
|
|
153
|
+
if store_result and content:
|
|
154
|
+
stored_at = await self._store_result(store_result, content)
|
|
155
|
+
|
|
156
|
+
return DelegationResult(
|
|
157
|
+
success=True,
|
|
158
|
+
response=content,
|
|
159
|
+
tokens_used=total_tokens,
|
|
160
|
+
model_used=model,
|
|
161
|
+
iterations=iterations,
|
|
162
|
+
stored_at=stored_at,
|
|
163
|
+
metadata={
|
|
164
|
+
"agent_id": agent_id,
|
|
165
|
+
"behavior_logs": len(self.behavior_logs),
|
|
166
|
+
},
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Add assistant message with tool calls
|
|
170
|
+
current_messages.append(message)
|
|
171
|
+
|
|
172
|
+
# Execute tool calls (simulated for now)
|
|
173
|
+
for tool_call in tool_calls:
|
|
174
|
+
tool_name = tool_call.get("function", {}).get("name")
|
|
175
|
+
logger.debug(f"Tool call: {tool_name}")
|
|
176
|
+
|
|
177
|
+
# Monitor for volunteer behavior (unsolicited tool use)
|
|
178
|
+
self._monitor_behavior(
|
|
179
|
+
agent_id,
|
|
180
|
+
f"Tool call: {tool_name}",
|
|
181
|
+
"tool_use",
|
|
182
|
+
iterations,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
# Add tool result (placeholder)
|
|
186
|
+
current_messages.append(
|
|
187
|
+
{
|
|
188
|
+
"role": "tool",
|
|
189
|
+
"tool_call_id": tool_call.get("id"),
|
|
190
|
+
"content": f"Tool {tool_name} executed successfully",
|
|
191
|
+
}
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
# Max iterations reached
|
|
195
|
+
logger.warning(f"Max iterations ({max_iterations}) reached")
|
|
196
|
+
return DelegationResult(
|
|
197
|
+
success=False,
|
|
198
|
+
error=f"Maximum iterations ({max_iterations}) reached",
|
|
199
|
+
tokens_used=total_tokens,
|
|
200
|
+
model_used=model,
|
|
201
|
+
iterations=iterations,
|
|
202
|
+
metadata={
|
|
203
|
+
"agent_id": agent_id,
|
|
204
|
+
"behavior_logs": len(self.behavior_logs),
|
|
205
|
+
},
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
except Exception as e:
|
|
209
|
+
logger.error(f"Delegation failed: {e}", exc_info=True)
|
|
210
|
+
return DelegationResult(success=False, error=str(e), metadata={"agent_id": agent_id})
|
|
211
|
+
|
|
212
|
+
async def _inject_context(
|
|
213
|
+
self,
|
|
214
|
+
messages: list[dict[str, Any]],
|
|
215
|
+
context_refs: list[str] | None = None,
|
|
216
|
+
context_template: str | None = None,
|
|
217
|
+
context_variables: dict[str, Any] | None = None,
|
|
218
|
+
max_context_tokens: int | None = None,
|
|
219
|
+
) -> list[dict[str, Any]]:
|
|
220
|
+
"""Inject context from storage URIs with optional template assembly.
|
|
221
|
+
|
|
222
|
+
Uses ContextManager for enhanced features:
|
|
223
|
+
- Jinja2 template rendering with {{ uri | expand }} filter
|
|
224
|
+
- Token optimization through caching
|
|
225
|
+
- Dynamic variable injection
|
|
226
|
+
- Context truncation
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
messages: Original message list
|
|
230
|
+
context_refs: Optional list of URIs to inject
|
|
231
|
+
context_template: Optional Jinja2 template string or URI
|
|
232
|
+
context_variables: Variables for template rendering
|
|
233
|
+
max_context_tokens: Optional token limit for truncation
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Messages with injected context
|
|
237
|
+
"""
|
|
238
|
+
injected = messages.copy()
|
|
239
|
+
|
|
240
|
+
try:
|
|
241
|
+
# Assemble context using ContextManager
|
|
242
|
+
assembled_context = await self.context_manager.assemble_context(
|
|
243
|
+
template=context_template,
|
|
244
|
+
context_refs=context_refs,
|
|
245
|
+
variables=context_variables,
|
|
246
|
+
max_tokens=max_context_tokens,
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
if assembled_context:
|
|
250
|
+
# Inject as system message at the beginning
|
|
251
|
+
injected.insert(
|
|
252
|
+
0,
|
|
253
|
+
{
|
|
254
|
+
"role": "system",
|
|
255
|
+
"content": assembled_context,
|
|
256
|
+
},
|
|
257
|
+
)
|
|
258
|
+
logger.info(
|
|
259
|
+
f"Injected assembled context ({len(assembled_context)} chars, "
|
|
260
|
+
f"~{len(assembled_context) // 4} tokens)"
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
except Exception as e:
|
|
264
|
+
logger.error(f"Failed to assemble/inject context: {e}", exc_info=True)
|
|
265
|
+
|
|
266
|
+
return injected
|
|
267
|
+
|
|
268
|
+
async def _store_result(self, uri: str, content: str) -> str:
|
|
269
|
+
"""Store delegation result in storage.
|
|
270
|
+
|
|
271
|
+
Args:
|
|
272
|
+
uri: URI to store at
|
|
273
|
+
content: Content to store
|
|
274
|
+
|
|
275
|
+
Returns:
|
|
276
|
+
URI where content was stored
|
|
277
|
+
"""
|
|
278
|
+
try:
|
|
279
|
+
if not self.storage.is_available():
|
|
280
|
+
logger.warning("Storage not available, cannot store result")
|
|
281
|
+
return uri
|
|
282
|
+
|
|
283
|
+
await self.storage.put(
|
|
284
|
+
uri,
|
|
285
|
+
content.encode("utf-8"),
|
|
286
|
+
content_type="text/plain",
|
|
287
|
+
metadata={"type": "delegation_result", "timestamp": str(datetime.now())},
|
|
288
|
+
)
|
|
289
|
+
logger.info(f"Stored result at {uri}")
|
|
290
|
+
return uri
|
|
291
|
+
|
|
292
|
+
except Exception as e:
|
|
293
|
+
logger.error(f"Failed to store result at {uri}: {e}")
|
|
294
|
+
return uri
|
|
295
|
+
|
|
296
|
+
def _monitor_behavior(
|
|
297
|
+
self, agent_id: str, content: str, behavior_context: str, iteration: int
|
|
298
|
+
) -> None:
|
|
299
|
+
"""Monitor agent behavior for emergent patterns.
|
|
300
|
+
|
|
301
|
+
Based on Chen 2024 AgentVerse research:
|
|
302
|
+
- Volunteer: Unsolicited assistance or tool use
|
|
303
|
+
- Conformity: Alignment with instructions
|
|
304
|
+
- Destructive: Potentially harmful actions
|
|
305
|
+
|
|
306
|
+
Args:
|
|
307
|
+
agent_id: Agent identifier
|
|
308
|
+
content: Content to analyze
|
|
309
|
+
behavior_context: Context of behavior (response, tool_use, etc.)
|
|
310
|
+
iteration: Current iteration number
|
|
311
|
+
"""
|
|
312
|
+
content_lower = content.lower()
|
|
313
|
+
|
|
314
|
+
# Check for destructive patterns
|
|
315
|
+
destructive_keywords = [
|
|
316
|
+
"delete",
|
|
317
|
+
"remove",
|
|
318
|
+
"destroy",
|
|
319
|
+
"override",
|
|
320
|
+
"bypass",
|
|
321
|
+
"ignore",
|
|
322
|
+
"hack",
|
|
323
|
+
]
|
|
324
|
+
if any(keyword in content_lower for keyword in destructive_keywords):
|
|
325
|
+
self.behavior_logs.append(
|
|
326
|
+
BehaviorLog(
|
|
327
|
+
agent_id=agent_id,
|
|
328
|
+
behavior_type="destructive",
|
|
329
|
+
description=f"Potentially destructive action detected: {content[:100]}",
|
|
330
|
+
context={
|
|
331
|
+
"iteration": iteration,
|
|
332
|
+
"context": behavior_context,
|
|
333
|
+
},
|
|
334
|
+
severity="warning",
|
|
335
|
+
)
|
|
336
|
+
)
|
|
337
|
+
logger.warning(f"Destructive behavior detected in {agent_id} at iteration {iteration}")
|
|
338
|
+
|
|
339
|
+
# Check for volunteer patterns (unsolicited tool use)
|
|
340
|
+
if behavior_context == "tool_use" and iteration == 1:
|
|
341
|
+
self.behavior_logs.append(
|
|
342
|
+
BehaviorLog(
|
|
343
|
+
agent_id=agent_id,
|
|
344
|
+
behavior_type="volunteer",
|
|
345
|
+
description=f"Proactive tool use: {content[:100]}",
|
|
346
|
+
context={
|
|
347
|
+
"iteration": iteration,
|
|
348
|
+
"context": behavior_context,
|
|
349
|
+
},
|
|
350
|
+
severity="info",
|
|
351
|
+
)
|
|
352
|
+
)
|
|
353
|
+
logger.debug(f"Volunteer behavior detected in {agent_id} at iteration {iteration}")
|
|
354
|
+
|
|
355
|
+
def get_behavior_logs(
|
|
356
|
+
self, agent_id: str | None = None, behavior_type: str | None = None
|
|
357
|
+
) -> list[BehaviorLog]:
|
|
358
|
+
"""Get behavior logs with optional filtering.
|
|
359
|
+
|
|
360
|
+
Args:
|
|
361
|
+
agent_id: Optional agent ID to filter by
|
|
362
|
+
behavior_type: Optional behavior type to filter by
|
|
363
|
+
|
|
364
|
+
Returns:
|
|
365
|
+
List of matching behavior logs
|
|
366
|
+
"""
|
|
367
|
+
logs = self.behavior_logs
|
|
368
|
+
|
|
369
|
+
if agent_id:
|
|
370
|
+
logs = [log for log in logs if log.agent_id == agent_id]
|
|
371
|
+
|
|
372
|
+
if behavior_type:
|
|
373
|
+
logs = [log for log in logs if log.behavior_type == behavior_type]
|
|
374
|
+
|
|
375
|
+
return logs
|
|
376
|
+
|
|
377
|
+
def clear_behavior_logs(self) -> None:
|
|
378
|
+
"""Clear all behavior logs."""
|
|
379
|
+
self.behavior_logs.clear()
|
|
380
|
+
logger.info("Cleared behavior logs")
|
package/python/src/uer/server.py
CHANGED
|
@@ -16,6 +16,7 @@ from uer.mcp.manager import MCPManager
|
|
|
16
16
|
from uer.models.llm import LLMCallRequest
|
|
17
17
|
from uer.storage import StorageManager
|
|
18
18
|
from uer.tools import skills_tools, storage_tools, template_tools
|
|
19
|
+
from uer.tools.delegate import DelegateToolHandler
|
|
19
20
|
|
|
20
21
|
# Configure logging
|
|
21
22
|
logging.basicConfig(
|
|
@@ -40,6 +41,9 @@ if storage_manager.is_available():
|
|
|
40
41
|
else:
|
|
41
42
|
logger.info("Storage backend disabled - storage/skills/template tools will not be available")
|
|
42
43
|
|
|
44
|
+
# Initialize delegate tool handler for multi-agent orchestration
|
|
45
|
+
delegate_handler = DelegateToolHandler(gateway=gateway, storage=storage_manager)
|
|
46
|
+
|
|
43
47
|
|
|
44
48
|
@app.list_tools()
|
|
45
49
|
async def list_tools() -> list[Tool]:
|
|
@@ -277,6 +281,8 @@ async def list_tools() -> list[Tool]:
|
|
|
277
281
|
"required": ["operation"],
|
|
278
282
|
},
|
|
279
283
|
),
|
|
284
|
+
# Delegate tool for multi-agent orchestration
|
|
285
|
+
delegate_handler.get_tool_definition(),
|
|
280
286
|
]
|
|
281
287
|
|
|
282
288
|
# Conditionally add storage-dependent tools
|
|
@@ -320,6 +326,9 @@ async def call_tool(name: str, arguments: Any) -> Sequence[TextContent]:
|
|
|
320
326
|
return await handle_mcp_registry(arguments)
|
|
321
327
|
elif name == "mcp_servers":
|
|
322
328
|
return await handle_mcp_servers(arguments)
|
|
329
|
+
# Delegate tool for multi-agent orchestration
|
|
330
|
+
elif name == "delegate":
|
|
331
|
+
return await delegate_handler.handle(arguments)
|
|
323
332
|
# Storage tools
|
|
324
333
|
elif name == "storage_put":
|
|
325
334
|
return await storage_tools.storage_put(arguments)
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
"""Delegation tool for multi-agent orchestration."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from mcp.types import TextContent, Tool
|
|
7
|
+
|
|
8
|
+
from ..llm.gateway import LLMGateway
|
|
9
|
+
from ..orchestration.orchestrator import DelegationResult, SubagentOrchestrator
|
|
10
|
+
from ..storage.manager import StorageManager
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class DelegateToolHandler:
|
|
16
|
+
"""Handler for the delegate tool."""
|
|
17
|
+
|
|
18
|
+
def __init__(self, gateway: LLMGateway, storage: StorageManager):
|
|
19
|
+
"""Initialize delegate tool handler.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
gateway: LLM gateway for model calls
|
|
23
|
+
storage: Storage manager for context resolution
|
|
24
|
+
"""
|
|
25
|
+
self.orchestrator = SubagentOrchestrator(gateway=gateway, storage=storage)
|
|
26
|
+
logger.info("DelegateToolHandler initialized")
|
|
27
|
+
|
|
28
|
+
def get_tool_definition(self) -> Tool:
|
|
29
|
+
"""Get the delegate tool definition for MCP.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Tool definition
|
|
33
|
+
"""
|
|
34
|
+
return Tool(
|
|
35
|
+
name="delegate",
|
|
36
|
+
description=(
|
|
37
|
+
"Delegate a task to a subagent with a different model. "
|
|
38
|
+
"Enables multi-agent orchestration with behavior monitoring. "
|
|
39
|
+
"Based on Chen 2024 AgentVerse research on emergent behaviors. "
|
|
40
|
+
"Supports context injection from S3 storage and result persistence."
|
|
41
|
+
),
|
|
42
|
+
inputSchema={
|
|
43
|
+
"type": "object",
|
|
44
|
+
"properties": {
|
|
45
|
+
"model": {
|
|
46
|
+
"type": "string",
|
|
47
|
+
"description": (
|
|
48
|
+
"Model to delegate to (e.g., 'gpt-4', 'claude-3-5-sonnet-20241022', "
|
|
49
|
+
"'gemini/gemini-2.0-flash-exp')"
|
|
50
|
+
),
|
|
51
|
+
},
|
|
52
|
+
"task": {
|
|
53
|
+
"type": "string",
|
|
54
|
+
"description": "Task description for the subagent",
|
|
55
|
+
},
|
|
56
|
+
"messages": {
|
|
57
|
+
"type": "array",
|
|
58
|
+
"description": (
|
|
59
|
+
"Optional pre-built message list. If not provided, "
|
|
60
|
+
"will create from task description."
|
|
61
|
+
),
|
|
62
|
+
"items": {"type": "object"},
|
|
63
|
+
},
|
|
64
|
+
"tools": {
|
|
65
|
+
"type": "array",
|
|
66
|
+
"description": "Optional list of tools available to the subagent",
|
|
67
|
+
"items": {"type": "object"},
|
|
68
|
+
},
|
|
69
|
+
"context_refs": {
|
|
70
|
+
"type": "array",
|
|
71
|
+
"description": (
|
|
72
|
+
"Optional list of S3/registry URIs to inject as context "
|
|
73
|
+
"(e.g., ['s3://uer-context/analysis.txt', 'registry://skills/financial'])"
|
|
74
|
+
),
|
|
75
|
+
"items": {"type": "string"},
|
|
76
|
+
},
|
|
77
|
+
"context_template": {
|
|
78
|
+
"type": "string",
|
|
79
|
+
"description": (
|
|
80
|
+
"Optional Jinja2 template for dynamic context assembly. "
|
|
81
|
+
"Can be a template string or URI to template in storage. "
|
|
82
|
+
"Supports filters: {{ uri | expand }}, "
|
|
83
|
+
"{{ text | truncate_tokens(500) }}, {{ text | summarize(10) }}, "
|
|
84
|
+
"{{ uri | fetch('default') }}. "
|
|
85
|
+
"Example: 'Analysis of {{ context_0 | summarize(5) }}\\n\\n"
|
|
86
|
+
'{{ "s3://data.txt" | expand }}\''
|
|
87
|
+
),
|
|
88
|
+
},
|
|
89
|
+
"context_variables": {
|
|
90
|
+
"type": "object",
|
|
91
|
+
"description": (
|
|
92
|
+
"Variables to inject into context template. "
|
|
93
|
+
"Context refs are automatically available as "
|
|
94
|
+
"context_0, context_1, etc. "
|
|
95
|
+
"Example: {'project': 'UER', 'date': '2026-01-11'}"
|
|
96
|
+
),
|
|
97
|
+
},
|
|
98
|
+
"max_context_tokens": {
|
|
99
|
+
"type": "integer",
|
|
100
|
+
"description": (
|
|
101
|
+
"Optional token limit for context truncation (approximate). "
|
|
102
|
+
"Useful for staying within model context windows."
|
|
103
|
+
),
|
|
104
|
+
},
|
|
105
|
+
"store_result": {
|
|
106
|
+
"type": "string",
|
|
107
|
+
"description": (
|
|
108
|
+
"Optional URI to store the delegation result "
|
|
109
|
+
"(e.g., 's3://uer-context/result.txt')"
|
|
110
|
+
),
|
|
111
|
+
},
|
|
112
|
+
"max_iterations": {
|
|
113
|
+
"type": "integer",
|
|
114
|
+
"description": "Maximum agentic loop iterations (default: 10)",
|
|
115
|
+
"default": 10,
|
|
116
|
+
},
|
|
117
|
+
"agent_id": {
|
|
118
|
+
"type": "string",
|
|
119
|
+
"description": (
|
|
120
|
+
"Optional identifier for behavior tracking "
|
|
121
|
+
"(auto-generated if not provided)"
|
|
122
|
+
),
|
|
123
|
+
},
|
|
124
|
+
},
|
|
125
|
+
"required": ["model", "task"],
|
|
126
|
+
},
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
async def handle(self, arguments: dict[str, Any]) -> list[TextContent]:
|
|
130
|
+
"""Handle delegate tool call.
|
|
131
|
+
|
|
132
|
+
Args:
|
|
133
|
+
arguments: Tool arguments
|
|
134
|
+
|
|
135
|
+
Returns:
|
|
136
|
+
List of text content with delegation result
|
|
137
|
+
"""
|
|
138
|
+
model = arguments["model"]
|
|
139
|
+
task = arguments["task"]
|
|
140
|
+
messages = arguments.get("messages")
|
|
141
|
+
tools = arguments.get("tools")
|
|
142
|
+
context_refs = arguments.get("context_refs")
|
|
143
|
+
context_template = arguments.get("context_template")
|
|
144
|
+
context_variables = arguments.get("context_variables")
|
|
145
|
+
max_context_tokens = arguments.get("max_context_tokens")
|
|
146
|
+
store_result = arguments.get("store_result")
|
|
147
|
+
max_iterations = arguments.get("max_iterations", 10)
|
|
148
|
+
agent_id = arguments.get("agent_id")
|
|
149
|
+
|
|
150
|
+
logger.info(f"Handling delegate call to {model} for task: {task[:100]}")
|
|
151
|
+
|
|
152
|
+
# Build messages if not provided
|
|
153
|
+
if not messages:
|
|
154
|
+
messages = [{"role": "user", "content": task}]
|
|
155
|
+
|
|
156
|
+
# Delegate to subagent with enhanced context assembly
|
|
157
|
+
result: DelegationResult = await self.orchestrator.delegate(
|
|
158
|
+
model=model,
|
|
159
|
+
messages=messages,
|
|
160
|
+
tools=tools,
|
|
161
|
+
context_refs=context_refs,
|
|
162
|
+
context_template=context_template,
|
|
163
|
+
context_variables=context_variables,
|
|
164
|
+
max_context_tokens=max_context_tokens,
|
|
165
|
+
store_result=store_result,
|
|
166
|
+
max_iterations=max_iterations,
|
|
167
|
+
agent_id=agent_id,
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Format response
|
|
171
|
+
if result.success:
|
|
172
|
+
response_text = "✅ Delegation successful\n\n"
|
|
173
|
+
response_text += f"**Model:** {result.model_used}\n"
|
|
174
|
+
response_text += f"**Iterations:** {result.iterations}\n"
|
|
175
|
+
response_text += f"**Tokens:** {result.tokens_used}\n"
|
|
176
|
+
|
|
177
|
+
if result.stored_at:
|
|
178
|
+
response_text += f"**Stored at:** {result.stored_at}\n"
|
|
179
|
+
|
|
180
|
+
# Check for behavior logs
|
|
181
|
+
behavior_logs = self.orchestrator.get_behavior_logs(
|
|
182
|
+
agent_id=result.metadata.get("agent_id")
|
|
183
|
+
)
|
|
184
|
+
if behavior_logs:
|
|
185
|
+
response_text += f"\n**Behaviors detected:** {len(behavior_logs)}\n"
|
|
186
|
+
for log in behavior_logs:
|
|
187
|
+
response_text += f"- {log.behavior_type}: {log.description[:100]}\n"
|
|
188
|
+
|
|
189
|
+
response_text += f"\n**Response:**\n{result.response}"
|
|
190
|
+
|
|
191
|
+
else:
|
|
192
|
+
response_text = "❌ Delegation failed\n\n"
|
|
193
|
+
response_text += f"**Error:** {result.error}\n"
|
|
194
|
+
response_text += f"**Iterations:** {result.iterations}\n"
|
|
195
|
+
if result.tokens_used:
|
|
196
|
+
response_text += f"**Tokens used:** {result.tokens_used}\n"
|
|
197
|
+
|
|
198
|
+
return [TextContent(type="text", text=response_text)]
|
|
199
|
+
|
|
200
|
+
def get_behavior_summary(self) -> str:
|
|
201
|
+
"""Get summary of all behavior logs.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Formatted summary of behavior logs
|
|
205
|
+
"""
|
|
206
|
+
logs = self.orchestrator.get_behavior_logs()
|
|
207
|
+
|
|
208
|
+
if not logs:
|
|
209
|
+
return "No behaviors logged yet."
|
|
210
|
+
|
|
211
|
+
summary = f"**Behavior Summary ({len(logs)} total)**\n\n"
|
|
212
|
+
|
|
213
|
+
# Count by type
|
|
214
|
+
by_type: dict[str, int] = {}
|
|
215
|
+
for log in logs:
|
|
216
|
+
by_type[log.behavior_type] = by_type.get(log.behavior_type, 0) + 1
|
|
217
|
+
|
|
218
|
+
summary += "**By Type:**\n"
|
|
219
|
+
for behavior_type, count in sorted(by_type.items()):
|
|
220
|
+
summary += f"- {behavior_type}: {count}\n"
|
|
221
|
+
|
|
222
|
+
# Show recent logs
|
|
223
|
+
summary += "\n**Recent Behaviors:**\n"
|
|
224
|
+
for log in logs[-5:]:
|
|
225
|
+
summary += (
|
|
226
|
+
f"- [{log.severity}] {log.agent_id}: "
|
|
227
|
+
f"{log.behavior_type} - {log.description[:80]}\n"
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
return summary
|