agnt5 0.2.8a10__cp310-abi3-manylinux_2_34_x86_64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agnt5 might be problematic. Click here for more details.
- agnt5/__init__.py +91 -0
- agnt5/_compat.py +16 -0
- agnt5/_core.abi3.so +0 -0
- agnt5/_retry_utils.py +169 -0
- agnt5/_schema_utils.py +312 -0
- agnt5/_telemetry.py +182 -0
- agnt5/agent.py +1685 -0
- agnt5/client.py +741 -0
- agnt5/context.py +178 -0
- agnt5/entity.py +795 -0
- agnt5/exceptions.py +102 -0
- agnt5/function.py +321 -0
- agnt5/lm.py +813 -0
- agnt5/tool.py +648 -0
- agnt5/tracing.py +196 -0
- agnt5/types.py +110 -0
- agnt5/version.py +19 -0
- agnt5/worker.py +1619 -0
- agnt5/workflow.py +1048 -0
- agnt5-0.2.8a10.dist-info/METADATA +25 -0
- agnt5-0.2.8a10.dist-info/RECORD +22 -0
- agnt5-0.2.8a10.dist-info/WHEEL +4 -0
agnt5/agent.py
ADDED
|
@@ -0,0 +1,1685 @@
|
|
|
1
|
+
"""Agent component implementation for AGNT5 SDK.
|
|
2
|
+
|
|
3
|
+
Provides simple agent with external LLM integration and tool orchestration.
|
|
4
|
+
Future: Platform-backed agents with durable execution and multi-agent coordination.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import functools
|
|
10
|
+
import json
|
|
11
|
+
import logging
|
|
12
|
+
import time
|
|
13
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
14
|
+
|
|
15
|
+
from .context import Context, get_current_context, set_current_context
|
|
16
|
+
from . import lm
|
|
17
|
+
from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ModelConfig, ToolDefinition
|
|
18
|
+
from .tool import Tool, ToolRegistry
|
|
19
|
+
from ._telemetry import setup_module_logger
|
|
20
|
+
from .exceptions import WaitingForUserInputException
|
|
21
|
+
|
|
22
|
+
logger = setup_module_logger(__name__)
|
|
23
|
+
|
|
24
|
+
# Global agent registry
|
|
25
|
+
_AGENT_REGISTRY: Dict[str, "Agent"] = {}
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class AgentContext(Context):
|
|
29
|
+
"""
|
|
30
|
+
Context for agent execution with conversation state management.
|
|
31
|
+
|
|
32
|
+
Extends base Context with:
|
|
33
|
+
- State management via EntityStateManager
|
|
34
|
+
- Conversation history persistence
|
|
35
|
+
- Context inheritance (child agents share parent's state)
|
|
36
|
+
|
|
37
|
+
Three initialization modes:
|
|
38
|
+
1. Standalone: Creates own state manager (playground testing)
|
|
39
|
+
2. Inherit WorkflowContext: Shares parent's state manager
|
|
40
|
+
3. Inherit parent AgentContext: Shares parent's state manager
|
|
41
|
+
|
|
42
|
+
Example:
|
|
43
|
+
```python
|
|
44
|
+
# Standalone agent with conversation history
|
|
45
|
+
ctx = AgentContext(run_id="session-1", agent_name="tutor")
|
|
46
|
+
result = await agent.run("Hello", context=ctx)
|
|
47
|
+
result = await agent.run("Continue", context=ctx) # Remembers previous message
|
|
48
|
+
|
|
49
|
+
# Agent in workflow - shares workflow state
|
|
50
|
+
@workflow
|
|
51
|
+
async def research_workflow(ctx: WorkflowContext):
|
|
52
|
+
agent_result = await research_agent.run("Find AI trends", context=ctx)
|
|
53
|
+
# Agent has access to workflow state via inherited context
|
|
54
|
+
```
|
|
55
|
+
"""
|
|
56
|
+
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
run_id: str,
|
|
60
|
+
agent_name: str,
|
|
61
|
+
session_id: Optional[str] = None,
|
|
62
|
+
state_manager: Optional[Any] = None,
|
|
63
|
+
parent_context: Optional[Context] = None,
|
|
64
|
+
attempt: int = 0,
|
|
65
|
+
runtime_context: Optional[Any] = None,
|
|
66
|
+
):
|
|
67
|
+
"""
|
|
68
|
+
Initialize agent context.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
run_id: Unique execution identifier
|
|
72
|
+
agent_name: Name of the agent
|
|
73
|
+
session_id: Session identifier for conversation history (default: run_id)
|
|
74
|
+
state_manager: Optional state manager (for context inheritance)
|
|
75
|
+
parent_context: Parent context to inherit state from
|
|
76
|
+
attempt: Retry attempt number
|
|
77
|
+
runtime_context: RuntimeContext for trace correlation
|
|
78
|
+
"""
|
|
79
|
+
super().__init__(run_id, attempt, runtime_context)
|
|
80
|
+
|
|
81
|
+
self._agent_name = agent_name
|
|
82
|
+
self._session_id = session_id or run_id
|
|
83
|
+
self.parent_context = parent_context # Store for context chain traversal
|
|
84
|
+
|
|
85
|
+
# Determine state adapter based on parent context
|
|
86
|
+
from .entity import EntityStateAdapter, _get_state_adapter
|
|
87
|
+
|
|
88
|
+
if state_manager:
|
|
89
|
+
# Explicit state adapter provided (parameter name kept for backward compat)
|
|
90
|
+
self._state_adapter = state_manager
|
|
91
|
+
elif parent_context:
|
|
92
|
+
# Try to inherit state adapter from parent
|
|
93
|
+
try:
|
|
94
|
+
# Check if parent is WorkflowContext or AgentContext
|
|
95
|
+
if hasattr(parent_context, '_workflow_entity'):
|
|
96
|
+
# WorkflowContext - get state adapter from worker context
|
|
97
|
+
self._state_adapter = _get_state_adapter()
|
|
98
|
+
elif hasattr(parent_context, '_state_adapter'):
|
|
99
|
+
# Parent AgentContext - share state adapter
|
|
100
|
+
self._state_adapter = parent_context._state_adapter
|
|
101
|
+
elif hasattr(parent_context, '_state_manager'):
|
|
102
|
+
# Backward compatibility: parent has old _state_manager
|
|
103
|
+
self._state_adapter = parent_context._state_manager
|
|
104
|
+
else:
|
|
105
|
+
# FunctionContext or base Context - create new state adapter
|
|
106
|
+
self._state_adapter = EntityStateAdapter()
|
|
107
|
+
except RuntimeError as e:
|
|
108
|
+
# _get_state_adapter() failed (not in worker context) - create standalone
|
|
109
|
+
self._state_adapter = EntityStateAdapter()
|
|
110
|
+
else:
|
|
111
|
+
# Try to get from worker context first
|
|
112
|
+
try:
|
|
113
|
+
self._state_adapter = _get_state_adapter()
|
|
114
|
+
except RuntimeError as e:
|
|
115
|
+
# Standalone - create new state adapter
|
|
116
|
+
self._state_adapter = EntityStateAdapter()
|
|
117
|
+
|
|
118
|
+
# Conversation key for state storage (used for in-memory state)
|
|
119
|
+
self._conversation_key = f"agent:{agent_name}:{self._session_id}:messages"
|
|
120
|
+
# Entity key for database persistence (without :messages suffix to match API expectations)
|
|
121
|
+
self._entity_key = f"agent:{agent_name}:{self._session_id}"
|
|
122
|
+
|
|
123
|
+
# Determine storage mode: "workflow" if parent is WorkflowContext, else "standalone"
|
|
124
|
+
self._storage_mode = "standalone" # Default mode
|
|
125
|
+
self._workflow_entity = None
|
|
126
|
+
|
|
127
|
+
if parent_context and hasattr(parent_context, '_workflow_entity'):
|
|
128
|
+
# Agent is running within a workflow - store conversation in workflow state
|
|
129
|
+
self._storage_mode = "workflow"
|
|
130
|
+
self._workflow_entity = parent_context._workflow_entity
|
|
131
|
+
logger.debug(f"Agent '{agent_name}' using workflow storage mode (workflow entity: {self._workflow_entity.key})")
|
|
132
|
+
|
|
133
|
+
@property
|
|
134
|
+
def state(self):
|
|
135
|
+
"""
|
|
136
|
+
Get state interface for agent state management.
|
|
137
|
+
|
|
138
|
+
Note: This is a simplified in-memory state interface for agent-specific data.
|
|
139
|
+
Conversation history is managed separately via get_conversation_history() and
|
|
140
|
+
save_conversation_history() which use the Rust-backed persistence layer.
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Dict-like object for state operations
|
|
144
|
+
|
|
145
|
+
Example:
|
|
146
|
+
# Store agent-specific data (in-memory only)
|
|
147
|
+
ctx.state["research_results"] = data
|
|
148
|
+
ctx.state["iteration_count"] = 5
|
|
149
|
+
"""
|
|
150
|
+
# Simple dict-based state for agent-specific data
|
|
151
|
+
# This is in-memory only and not persisted to platform
|
|
152
|
+
if not hasattr(self, '_agent_state'):
|
|
153
|
+
self._agent_state = {}
|
|
154
|
+
return self._agent_state
|
|
155
|
+
|
|
156
|
+
@property
|
|
157
|
+
def session_id(self) -> str:
|
|
158
|
+
"""Get session identifier for this agent context."""
|
|
159
|
+
return self._session_id
|
|
160
|
+
|
|
161
|
+
async def get_conversation_history(self) -> List[Message]:
|
|
162
|
+
"""
|
|
163
|
+
Retrieve conversation history from state, loading from database if needed.
|
|
164
|
+
|
|
165
|
+
Uses the EntityStateAdapter which delegates to Rust core for cache-first loading.
|
|
166
|
+
If running within a workflow, loads from workflow entity state instead.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
List of Message objects from conversation history
|
|
170
|
+
"""
|
|
171
|
+
if self._storage_mode == "workflow":
|
|
172
|
+
return await self._load_from_workflow_state()
|
|
173
|
+
else:
|
|
174
|
+
return await self._load_from_entity_storage()
|
|
175
|
+
|
|
176
|
+
async def _load_from_workflow_state(self) -> List[Message]:
|
|
177
|
+
"""Load conversation history from workflow entity state."""
|
|
178
|
+
key = f"agent.{self._agent_name}"
|
|
179
|
+
agent_data = self._workflow_entity.state.get(key, {})
|
|
180
|
+
messages_data = agent_data.get("messages", [])
|
|
181
|
+
|
|
182
|
+
# Convert dict representations back to Message objects
|
|
183
|
+
return self._convert_dicts_to_messages(messages_data)
|
|
184
|
+
|
|
185
|
+
async def _load_from_entity_storage(self) -> List[Message]:
|
|
186
|
+
"""Load conversation history from AgentSession entity (standalone mode)."""
|
|
187
|
+
entity_type = "AgentSession"
|
|
188
|
+
entity_key = self._entity_key
|
|
189
|
+
|
|
190
|
+
# Load session data via adapter (Rust handles cache + platform load)
|
|
191
|
+
session_data = await self._state_adapter.load_state(entity_type, entity_key)
|
|
192
|
+
|
|
193
|
+
# Extract messages from session object
|
|
194
|
+
if isinstance(session_data, dict) and "messages" in session_data:
|
|
195
|
+
# New format with session metadata
|
|
196
|
+
messages_data = session_data["messages"]
|
|
197
|
+
elif isinstance(session_data, list):
|
|
198
|
+
# Old format - just messages array
|
|
199
|
+
messages_data = session_data
|
|
200
|
+
else:
|
|
201
|
+
# No messages found
|
|
202
|
+
messages_data = []
|
|
203
|
+
|
|
204
|
+
# Convert dict representations back to Message objects
|
|
205
|
+
return self._convert_dicts_to_messages(messages_data)
|
|
206
|
+
|
|
207
|
+
def _convert_dicts_to_messages(self, messages_data: list) -> List[Message]:
|
|
208
|
+
"""Convert list of message dicts to Message objects."""
|
|
209
|
+
messages = []
|
|
210
|
+
for msg_dict in messages_data:
|
|
211
|
+
if isinstance(msg_dict, dict):
|
|
212
|
+
role = msg_dict.get("role", "user")
|
|
213
|
+
content = msg_dict.get("content", "")
|
|
214
|
+
if role == "user":
|
|
215
|
+
messages.append(Message.user(content))
|
|
216
|
+
elif role == "assistant":
|
|
217
|
+
messages.append(Message.assistant(content))
|
|
218
|
+
else:
|
|
219
|
+
# Generic message - create with MessageRole enum
|
|
220
|
+
from .lm import MessageRole
|
|
221
|
+
msg_role = MessageRole(role) if role in ("user", "assistant", "system") else MessageRole.USER
|
|
222
|
+
msg = Message(role=msg_role, content=content)
|
|
223
|
+
messages.append(msg)
|
|
224
|
+
else:
|
|
225
|
+
# Already a Message object
|
|
226
|
+
messages.append(msg_dict)
|
|
227
|
+
|
|
228
|
+
return messages
|
|
229
|
+
|
|
230
|
+
async def save_conversation_history(self, messages: List[Message]) -> None:
|
|
231
|
+
"""
|
|
232
|
+
Save conversation history to state and persist to database.
|
|
233
|
+
|
|
234
|
+
Uses the EntityStateAdapter which delegates to Rust core for version-checked saves.
|
|
235
|
+
If running within a workflow, saves to workflow entity state instead.
|
|
236
|
+
|
|
237
|
+
Args:
|
|
238
|
+
messages: List of Message objects to persist
|
|
239
|
+
"""
|
|
240
|
+
if self._storage_mode == "workflow":
|
|
241
|
+
await self._save_to_workflow_state(messages)
|
|
242
|
+
else:
|
|
243
|
+
await self._save_to_entity_storage(messages)
|
|
244
|
+
|
|
245
|
+
async def _save_to_workflow_state(self, messages: List[Message]) -> None:
|
|
246
|
+
"""Save conversation history to workflow entity state."""
|
|
247
|
+
# Convert Message objects to dict for JSON serialization
|
|
248
|
+
messages_data = []
|
|
249
|
+
for msg in messages:
|
|
250
|
+
messages_data.append({
|
|
251
|
+
"role": msg.role.value if hasattr(msg.role, 'value') else str(msg.role),
|
|
252
|
+
"content": msg.content,
|
|
253
|
+
"timestamp": time.time()
|
|
254
|
+
})
|
|
255
|
+
|
|
256
|
+
# Build agent data structure
|
|
257
|
+
key = f"agent.{self._agent_name}"
|
|
258
|
+
current_data = self._workflow_entity.state.get(key, {})
|
|
259
|
+
now = time.time()
|
|
260
|
+
|
|
261
|
+
agent_data = {
|
|
262
|
+
"session_id": self._session_id,
|
|
263
|
+
"agent_name": self._agent_name,
|
|
264
|
+
"created_at": current_data.get("created_at", now),
|
|
265
|
+
"last_message_time": now,
|
|
266
|
+
"message_count": len(messages_data),
|
|
267
|
+
"messages": messages_data,
|
|
268
|
+
"metadata": getattr(self, '_custom_metadata', {})
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# Store in workflow state (WorkflowEntity handles persistence)
|
|
272
|
+
self._workflow_entity.state.set(key, agent_data)
|
|
273
|
+
logger.info(f"Saved conversation to workflow state: {key} ({len(messages_data)} messages)")
|
|
274
|
+
|
|
275
|
+
async def _save_to_entity_storage(self, messages: List[Message]) -> None:
|
|
276
|
+
"""Save conversation history to AgentSession entity (standalone mode)."""
|
|
277
|
+
# Convert Message objects to dict for JSON serialization
|
|
278
|
+
messages_data = []
|
|
279
|
+
for msg in messages:
|
|
280
|
+
messages_data.append({
|
|
281
|
+
"role": msg.role.value if hasattr(msg.role, 'value') else str(msg.role),
|
|
282
|
+
"content": msg.content,
|
|
283
|
+
"timestamp": time.time() # Add timestamp for each message
|
|
284
|
+
})
|
|
285
|
+
|
|
286
|
+
entity_type = "AgentSession"
|
|
287
|
+
entity_key = self._entity_key
|
|
288
|
+
|
|
289
|
+
# Load current state with version for optimistic locking
|
|
290
|
+
current_state, current_version = await self._state_adapter.load_with_version(
|
|
291
|
+
entity_type, entity_key
|
|
292
|
+
)
|
|
293
|
+
|
|
294
|
+
# Build session object with metadata
|
|
295
|
+
now = time.time()
|
|
296
|
+
|
|
297
|
+
# Get custom metadata from instance variable or preserve from loaded state
|
|
298
|
+
custom_metadata = getattr(self, '_custom_metadata', current_state.get("metadata", {}))
|
|
299
|
+
|
|
300
|
+
session_data = {
|
|
301
|
+
"session_id": self._session_id,
|
|
302
|
+
"agent_name": self._agent_name,
|
|
303
|
+
"created_at": current_state.get("created_at", now), # Preserve existing or set new
|
|
304
|
+
"last_message_time": now,
|
|
305
|
+
"message_count": len(messages_data),
|
|
306
|
+
"messages": messages_data,
|
|
307
|
+
"metadata": custom_metadata # Save custom metadata
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
# Save to platform via adapter (Rust handles optimistic locking)
|
|
311
|
+
try:
|
|
312
|
+
new_version = await self._state_adapter.save_state(
|
|
313
|
+
entity_type,
|
|
314
|
+
entity_key,
|
|
315
|
+
session_data,
|
|
316
|
+
current_version
|
|
317
|
+
)
|
|
318
|
+
logger.info(
|
|
319
|
+
f"Persisted conversation history: {entity_key} (version {current_version} -> {new_version})"
|
|
320
|
+
)
|
|
321
|
+
except Exception as e:
|
|
322
|
+
logger.error(f"Failed to persist conversation history to database: {e}")
|
|
323
|
+
# Don't fail - conversation is still in memory for this execution
|
|
324
|
+
|
|
325
|
+
async def get_metadata(self) -> Dict[str, Any]:
|
|
326
|
+
"""
|
|
327
|
+
Get conversation session metadata.
|
|
328
|
+
|
|
329
|
+
Returns session metadata including:
|
|
330
|
+
- created_at: Timestamp of first message (float, Unix timestamp)
|
|
331
|
+
- last_activity: Timestamp of last message (float, Unix timestamp)
|
|
332
|
+
- message_count: Number of messages in conversation (int)
|
|
333
|
+
- custom: Dict of user-provided custom metadata
|
|
334
|
+
|
|
335
|
+
Returns:
|
|
336
|
+
Dictionary with metadata. If no conversation exists yet, returns defaults.
|
|
337
|
+
|
|
338
|
+
Example:
|
|
339
|
+
```python
|
|
340
|
+
metadata = await context.get_metadata()
|
|
341
|
+
print(f"Session created: {metadata['created_at']}")
|
|
342
|
+
print(f"User ID: {metadata['custom'].get('user_id')}")
|
|
343
|
+
```
|
|
344
|
+
"""
|
|
345
|
+
if self._storage_mode == "workflow":
|
|
346
|
+
return await self._get_metadata_from_workflow()
|
|
347
|
+
else:
|
|
348
|
+
return await self._get_metadata_from_entity()
|
|
349
|
+
|
|
350
|
+
async def _get_metadata_from_workflow(self) -> Dict[str, Any]:
|
|
351
|
+
"""Get metadata from workflow entity state."""
|
|
352
|
+
key = f"agent.{self._agent_name}"
|
|
353
|
+
agent_data = self._workflow_entity.state.get(key, {})
|
|
354
|
+
|
|
355
|
+
if not agent_data:
|
|
356
|
+
# No conversation exists yet - return defaults
|
|
357
|
+
return {
|
|
358
|
+
"created_at": None,
|
|
359
|
+
"last_activity": None,
|
|
360
|
+
"message_count": 0,
|
|
361
|
+
"custom": getattr(self, '_custom_metadata', {})
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
messages = agent_data.get("messages", [])
|
|
365
|
+
return {
|
|
366
|
+
"created_at": agent_data.get("created_at"),
|
|
367
|
+
"last_activity": agent_data.get("last_message_time"),
|
|
368
|
+
"message_count": len(messages),
|
|
369
|
+
"custom": agent_data.get("metadata", {})
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
async def _get_metadata_from_entity(self) -> Dict[str, Any]:
|
|
373
|
+
"""Get metadata from AgentSession entity (standalone mode)."""
|
|
374
|
+
entity_type = "AgentSession"
|
|
375
|
+
entity_key = self._entity_key
|
|
376
|
+
|
|
377
|
+
# Load session data
|
|
378
|
+
session_data = await self._state_adapter.load_state(entity_type, entity_key)
|
|
379
|
+
|
|
380
|
+
if not session_data:
|
|
381
|
+
# No conversation exists yet - return defaults
|
|
382
|
+
return {
|
|
383
|
+
"created_at": None,
|
|
384
|
+
"last_activity": None,
|
|
385
|
+
"message_count": 0,
|
|
386
|
+
"custom": getattr(self, '_custom_metadata', {})
|
|
387
|
+
}
|
|
388
|
+
|
|
389
|
+
messages = session_data.get("messages", [])
|
|
390
|
+
|
|
391
|
+
# Derive timestamps from messages if available
|
|
392
|
+
created_at = session_data.get("created_at")
|
|
393
|
+
last_activity = session_data.get("last_message_time")
|
|
394
|
+
|
|
395
|
+
return {
|
|
396
|
+
"created_at": created_at,
|
|
397
|
+
"last_activity": last_activity,
|
|
398
|
+
"message_count": len(messages),
|
|
399
|
+
"custom": session_data.get("metadata", {})
|
|
400
|
+
}
|
|
401
|
+
|
|
402
|
+
def update_metadata(self, **kwargs) -> None:
|
|
403
|
+
"""
|
|
404
|
+
Update custom session metadata.
|
|
405
|
+
|
|
406
|
+
Metadata will be persisted alongside conversation history on next save.
|
|
407
|
+
Use this to store application-specific data like user_id, preferences, etc.
|
|
408
|
+
|
|
409
|
+
Args:
|
|
410
|
+
**kwargs: Key-value pairs to store as metadata
|
|
411
|
+
|
|
412
|
+
Example:
|
|
413
|
+
```python
|
|
414
|
+
# Store user identification and preferences
|
|
415
|
+
context.update_metadata(
|
|
416
|
+
user_id="user-123",
|
|
417
|
+
subscription_tier="premium",
|
|
418
|
+
preferences={"theme": "dark", "language": "en"}
|
|
419
|
+
)
|
|
420
|
+
|
|
421
|
+
# Later retrieve it
|
|
422
|
+
metadata = await context.get_metadata()
|
|
423
|
+
user_id = metadata["custom"]["user_id"]
|
|
424
|
+
```
|
|
425
|
+
|
|
426
|
+
Note:
|
|
427
|
+
- Metadata is merged with existing metadata (doesn't replace)
|
|
428
|
+
- Changes persist on next save_conversation_history() call
|
|
429
|
+
- Use simple JSON-serializable types (str, int, float, dict, list)
|
|
430
|
+
"""
|
|
431
|
+
if not hasattr(self, '_custom_metadata'):
|
|
432
|
+
self._custom_metadata = {}
|
|
433
|
+
self._custom_metadata.update(kwargs)
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
class Handoff:
|
|
437
|
+
"""Configuration for agent-to-agent handoff.
|
|
438
|
+
|
|
439
|
+
Handoffs enable one agent to delegate control to another specialized agent,
|
|
440
|
+
following the pattern popularized by LangGraph and OpenAI Agents SDK.
|
|
441
|
+
|
|
442
|
+
The handoff is exposed to the LLM as a tool named 'transfer_to_{agent_name}'
|
|
443
|
+
that allows explicit delegation with conversation history.
|
|
444
|
+
|
|
445
|
+
Example:
|
|
446
|
+
```python
|
|
447
|
+
specialist = Agent(name="specialist", ...)
|
|
448
|
+
|
|
449
|
+
# Simple: Pass agent directly (auto-wrapped with defaults)
|
|
450
|
+
coordinator = Agent(
|
|
451
|
+
name="coordinator",
|
|
452
|
+
handoffs=[specialist] # Agent auto-converted to Handoff
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
# Advanced: Use Handoff for custom configuration
|
|
456
|
+
coordinator = Agent(
|
|
457
|
+
name="coordinator",
|
|
458
|
+
handoffs=[
|
|
459
|
+
Handoff(
|
|
460
|
+
agent=specialist,
|
|
461
|
+
description="Custom description for LLM",
|
|
462
|
+
tool_name="custom_transfer_name",
|
|
463
|
+
pass_full_history=False
|
|
464
|
+
)
|
|
465
|
+
]
|
|
466
|
+
)
|
|
467
|
+
```
|
|
468
|
+
"""
|
|
469
|
+
|
|
470
|
+
def __init__(
|
|
471
|
+
self,
|
|
472
|
+
agent: "Agent",
|
|
473
|
+
description: Optional[str] = None,
|
|
474
|
+
tool_name: Optional[str] = None,
|
|
475
|
+
pass_full_history: bool = True,
|
|
476
|
+
):
|
|
477
|
+
"""Initialize handoff configuration.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
agent: Target agent to hand off to
|
|
481
|
+
description: Description shown to LLM (defaults to agent instructions)
|
|
482
|
+
tool_name: Custom tool name (defaults to 'transfer_to_{agent_name}')
|
|
483
|
+
pass_full_history: Whether to pass full conversation history to target agent
|
|
484
|
+
"""
|
|
485
|
+
self.agent = agent
|
|
486
|
+
self.description = description or agent.instructions or f"Transfer to {agent.name}"
|
|
487
|
+
self.tool_name = tool_name or f"transfer_to_{agent.name}"
|
|
488
|
+
self.pass_full_history = pass_full_history
|
|
489
|
+
|
|
490
|
+
|
|
491
|
+
def handoff(
|
|
492
|
+
agent: "Agent",
|
|
493
|
+
description: Optional[str] = None,
|
|
494
|
+
tool_name: Optional[str] = None,
|
|
495
|
+
pass_full_history: bool = True,
|
|
496
|
+
) -> Handoff:
|
|
497
|
+
"""Create a handoff configuration for agent-to-agent delegation.
|
|
498
|
+
|
|
499
|
+
This is a convenience function for creating Handoff instances with a clean API.
|
|
500
|
+
|
|
501
|
+
Args:
|
|
502
|
+
agent: Target agent to hand off to
|
|
503
|
+
description: Description shown to LLM
|
|
504
|
+
tool_name: Custom tool name
|
|
505
|
+
pass_full_history: Whether to pass full conversation history
|
|
506
|
+
|
|
507
|
+
Returns:
|
|
508
|
+
Handoff configuration
|
|
509
|
+
|
|
510
|
+
Example:
|
|
511
|
+
```python
|
|
512
|
+
from agnt5 import Agent, handoff
|
|
513
|
+
|
|
514
|
+
research_agent = Agent(name="researcher", ...)
|
|
515
|
+
writer_agent = Agent(name="writer", ...)
|
|
516
|
+
|
|
517
|
+
coordinator = Agent(
|
|
518
|
+
name="coordinator",
|
|
519
|
+
handoffs=[
|
|
520
|
+
handoff(research_agent, "Transfer for research tasks"),
|
|
521
|
+
handoff(writer_agent, "Transfer for writing tasks"),
|
|
522
|
+
]
|
|
523
|
+
)
|
|
524
|
+
```
|
|
525
|
+
"""
|
|
526
|
+
return Handoff(
|
|
527
|
+
agent=agent,
|
|
528
|
+
description=description,
|
|
529
|
+
tool_name=tool_name,
|
|
530
|
+
pass_full_history=pass_full_history,
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
|
|
534
|
+
class AgentRegistry:
|
|
535
|
+
"""Registry for agents."""
|
|
536
|
+
|
|
537
|
+
@staticmethod
|
|
538
|
+
def register(agent: "Agent") -> None:
|
|
539
|
+
"""Register an agent."""
|
|
540
|
+
if agent.name in _AGENT_REGISTRY:
|
|
541
|
+
logger.warning(f"Overwriting existing agent '{agent.name}'")
|
|
542
|
+
_AGENT_REGISTRY[agent.name] = agent
|
|
543
|
+
|
|
544
|
+
@staticmethod
|
|
545
|
+
def get(name: str) -> Optional["Agent"]:
|
|
546
|
+
"""Get agent by name."""
|
|
547
|
+
return _AGENT_REGISTRY.get(name)
|
|
548
|
+
|
|
549
|
+
@staticmethod
|
|
550
|
+
def all() -> Dict[str, "Agent"]:
|
|
551
|
+
"""Get all registered agents."""
|
|
552
|
+
return _AGENT_REGISTRY.copy()
|
|
553
|
+
|
|
554
|
+
@staticmethod
|
|
555
|
+
def clear() -> None:
|
|
556
|
+
"""Clear all registered agents."""
|
|
557
|
+
_AGENT_REGISTRY.clear()
|
|
558
|
+
|
|
559
|
+
|
|
560
|
+
class AgentResult:
|
|
561
|
+
"""Result from agent execution."""
|
|
562
|
+
|
|
563
|
+
def __init__(
|
|
564
|
+
self,
|
|
565
|
+
output: str,
|
|
566
|
+
tool_calls: List[Dict[str, Any]],
|
|
567
|
+
context: Context,
|
|
568
|
+
handoff_to: Optional[str] = None,
|
|
569
|
+
handoff_metadata: Optional[Dict[str, Any]] = None,
|
|
570
|
+
):
|
|
571
|
+
self.output = output
|
|
572
|
+
self.tool_calls = tool_calls
|
|
573
|
+
self.context = context
|
|
574
|
+
self.handoff_to = handoff_to # Name of agent that was handed off to
|
|
575
|
+
self.handoff_metadata = handoff_metadata or {} # Additional handoff info
|
|
576
|
+
|
|
577
|
+
|
|
578
|
+
class Agent:
|
|
579
|
+
"""Autonomous LLM-driven agent with tool orchestration.
|
|
580
|
+
|
|
581
|
+
Current features:
|
|
582
|
+
- LLM integration (OpenAI, Anthropic, etc.)
|
|
583
|
+
- Tool selection and execution
|
|
584
|
+
- Multi-turn reasoning
|
|
585
|
+
- Context and state management
|
|
586
|
+
|
|
587
|
+
Future enhancements:
|
|
588
|
+
- Durable execution with checkpointing
|
|
589
|
+
- Multi-agent coordination
|
|
590
|
+
- Platform-backed tool execution
|
|
591
|
+
- Streaming responses
|
|
592
|
+
|
|
593
|
+
Example:
|
|
594
|
+
```python
|
|
595
|
+
from agnt5 import Agent, tool, Context
|
|
596
|
+
|
|
597
|
+
@tool(auto_schema=True)
|
|
598
|
+
async def search_web(ctx: Context, query: str) -> List[Dict]:
|
|
599
|
+
# Search implementation
|
|
600
|
+
return [{"title": "Result", "url": "..."}]
|
|
601
|
+
|
|
602
|
+
# Simple usage with model string
|
|
603
|
+
agent = Agent(
|
|
604
|
+
name="researcher",
|
|
605
|
+
model="openai/gpt-4o-mini",
|
|
606
|
+
instructions="You are a research assistant.",
|
|
607
|
+
tools=[search_web],
|
|
608
|
+
temperature=0.7
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
result = await agent.run("What are the latest AI trends?")
|
|
612
|
+
print(result.output)
|
|
613
|
+
```
|
|
614
|
+
"""
|
|
615
|
+
|
|
616
|
+
def __init__(
|
|
617
|
+
self,
|
|
618
|
+
name: str,
|
|
619
|
+
model: Any, # Can be string like "openai/gpt-4o-mini" OR LanguageModel instance
|
|
620
|
+
instructions: str,
|
|
621
|
+
tools: Optional[List[Any]] = None,
|
|
622
|
+
handoffs: Optional[List[Union["Agent", Handoff]]] = None, # Accept Agent or Handoff instances
|
|
623
|
+
temperature: float = 0.7,
|
|
624
|
+
max_tokens: Optional[int] = None,
|
|
625
|
+
top_p: Optional[float] = None,
|
|
626
|
+
model_config: Optional[ModelConfig] = None,
|
|
627
|
+
max_iterations: int = 10,
|
|
628
|
+
model_name: Optional[str] = None, # For backwards compatibility with tests
|
|
629
|
+
):
|
|
630
|
+
"""Initialize agent.
|
|
631
|
+
|
|
632
|
+
Args:
|
|
633
|
+
name: Agent name/identifier
|
|
634
|
+
model: Model string with provider prefix (e.g., "openai/gpt-4o-mini") OR LanguageModel instance
|
|
635
|
+
instructions: System instructions for the agent
|
|
636
|
+
tools: List of tools available to the agent (functions, Tool instances, or Agent instances)
|
|
637
|
+
handoffs: List of handoff configurations - can be Agent instances (auto-wrapped) or Handoff instances for custom config
|
|
638
|
+
temperature: LLM temperature (0.0 to 1.0)
|
|
639
|
+
max_tokens: Maximum tokens to generate
|
|
640
|
+
top_p: Nucleus sampling parameter
|
|
641
|
+
model_config: Optional advanced configuration (custom endpoints, headers, etc.)
|
|
642
|
+
max_iterations: Maximum reasoning iterations
|
|
643
|
+
model_name: Optional model name (for backwards compatibility, used when model is a LanguageModel instance)
|
|
644
|
+
"""
|
|
645
|
+
self.name = name
|
|
646
|
+
self.instructions = instructions
|
|
647
|
+
self.temperature = temperature
|
|
648
|
+
self.max_tokens = max_tokens
|
|
649
|
+
self.top_p = top_p
|
|
650
|
+
self.model_config = model_config
|
|
651
|
+
self.max_iterations = max_iterations
|
|
652
|
+
|
|
653
|
+
# Support both string model names and LanguageModel instances
|
|
654
|
+
if isinstance(model, str):
|
|
655
|
+
# New API: model is a string like "openai/gpt-4o-mini"
|
|
656
|
+
self.model = model
|
|
657
|
+
self.model_name = model_name or model
|
|
658
|
+
self._language_model = None # Will create on demand
|
|
659
|
+
elif isinstance(model, LanguageModel):
|
|
660
|
+
# Old API (for tests): model is a LanguageModel instance
|
|
661
|
+
self._language_model = model
|
|
662
|
+
self.model = model # Keep for backwards compatibility
|
|
663
|
+
self.model_name = model_name or "mock-model"
|
|
664
|
+
else:
|
|
665
|
+
raise TypeError(f"model must be a string or LanguageModel instance, got {type(model)}")
|
|
666
|
+
|
|
667
|
+
# Normalize handoffs: convert Agent instances to Handoff instances
|
|
668
|
+
self.handoffs: List[Handoff] = []
|
|
669
|
+
if handoffs:
|
|
670
|
+
for handoff_item in handoffs:
|
|
671
|
+
if isinstance(handoff_item, Agent):
|
|
672
|
+
# Auto-wrap Agent in Handoff with sensible defaults
|
|
673
|
+
self.handoffs.append(Handoff(agent=handoff_item))
|
|
674
|
+
logger.info(f"Auto-wrapped agent '{handoff_item.name}' in Handoff for '{self.name}'")
|
|
675
|
+
elif isinstance(handoff_item, Handoff):
|
|
676
|
+
self.handoffs.append(handoff_item)
|
|
677
|
+
else:
|
|
678
|
+
raise TypeError(f"handoffs must contain Agent or Handoff instances, got {type(handoff_item)}")
|
|
679
|
+
|
|
680
|
+
# Build tool registry (includes regular tools, agent-as-tools, and handoff tools)
|
|
681
|
+
self.tools: Dict[str, Tool] = {}
|
|
682
|
+
if tools:
|
|
683
|
+
for tool_item in tools:
|
|
684
|
+
# Check if it's an Agent instance (agents-as-tools pattern)
|
|
685
|
+
if isinstance(tool_item, Agent):
|
|
686
|
+
agent_tool = tool_item.to_tool()
|
|
687
|
+
self.tools[agent_tool.name] = agent_tool
|
|
688
|
+
logger.info(f"Added agent '{tool_item.name}' as tool to '{self.name}'")
|
|
689
|
+
# Check if it's a Tool instance
|
|
690
|
+
elif isinstance(tool_item, Tool):
|
|
691
|
+
self.tools[tool_item.name] = tool_item
|
|
692
|
+
# Check if it's a decorated function with config
|
|
693
|
+
elif hasattr(tool_item, "_agnt5_config"):
|
|
694
|
+
# Try to get from ToolRegistry first
|
|
695
|
+
tool_config = tool_item._agnt5_config
|
|
696
|
+
tool_instance = ToolRegistry.get(tool_config.name)
|
|
697
|
+
if tool_instance:
|
|
698
|
+
self.tools[tool_instance.name] = tool_instance
|
|
699
|
+
# Otherwise try to look up by function name
|
|
700
|
+
elif callable(tool_item):
|
|
701
|
+
# Try to find in registry by function name
|
|
702
|
+
tool_name = tool_item.__name__
|
|
703
|
+
tool_instance = ToolRegistry.get(tool_name)
|
|
704
|
+
if tool_instance:
|
|
705
|
+
self.tools[tool_instance.name] = tool_instance
|
|
706
|
+
|
|
707
|
+
# Build handoff tools
|
|
708
|
+
for handoff_config in self.handoffs:
|
|
709
|
+
handoff_tool = self._create_handoff_tool(handoff_config)
|
|
710
|
+
self.tools[handoff_tool.name] = handoff_tool
|
|
711
|
+
logger.info(f"Added handoff tool '{handoff_tool.name}' to '{self.name}'")
|
|
712
|
+
|
|
713
|
+
self.logger = logging.getLogger(f"agnt5.agent.{name}")
|
|
714
|
+
|
|
715
|
+
# Define schemas based on the run method signature
|
|
716
|
+
# Input: user_message (string)
|
|
717
|
+
self.input_schema = {
|
|
718
|
+
"type": "object",
|
|
719
|
+
"properties": {
|
|
720
|
+
"user_message": {"type": "string"}
|
|
721
|
+
},
|
|
722
|
+
"required": ["user_message"]
|
|
723
|
+
}
|
|
724
|
+
# Output: AgentResult with output and tool_calls
|
|
725
|
+
self.output_schema = {
|
|
726
|
+
"type": "object",
|
|
727
|
+
"properties": {
|
|
728
|
+
"output": {"type": "string"},
|
|
729
|
+
"tool_calls": {
|
|
730
|
+
"type": "array",
|
|
731
|
+
"items": {"type": "object"}
|
|
732
|
+
}
|
|
733
|
+
}
|
|
734
|
+
}
|
|
735
|
+
|
|
736
|
+
# Auto-register agent for discovery by auto_register=True
|
|
737
|
+
AgentRegistry.register(self)
|
|
738
|
+
|
|
739
|
+
# Store metadata
|
|
740
|
+
self.metadata = {
|
|
741
|
+
"description": instructions,
|
|
742
|
+
"model": model
|
|
743
|
+
}
|
|
744
|
+
|
|
745
|
+
def to_tool(self, description: Optional[str] = None) -> Tool:
|
|
746
|
+
"""Convert this agent to a Tool that can be used by other agents.
|
|
747
|
+
|
|
748
|
+
This enables agents-as-tools pattern where one agent can invoke another
|
|
749
|
+
agent as if it were a regular tool.
|
|
750
|
+
|
|
751
|
+
Args:
|
|
752
|
+
description: Optional custom description (defaults to agent instructions)
|
|
753
|
+
|
|
754
|
+
Returns:
|
|
755
|
+
Tool instance that wraps this agent
|
|
756
|
+
|
|
757
|
+
Example:
|
|
758
|
+
```python
|
|
759
|
+
research_agent = Agent(
|
|
760
|
+
name="researcher",
|
|
761
|
+
model="openai/gpt-4o-mini",
|
|
762
|
+
instructions="You are a research specialist."
|
|
763
|
+
)
|
|
764
|
+
|
|
765
|
+
# Use research agent as a tool for another agent
|
|
766
|
+
coordinator = Agent(
|
|
767
|
+
name="coordinator",
|
|
768
|
+
model="openai/gpt-4o-mini",
|
|
769
|
+
instructions="Coordinate tasks using specialist agents.",
|
|
770
|
+
tools=[research_agent.to_tool()]
|
|
771
|
+
)
|
|
772
|
+
```
|
|
773
|
+
"""
|
|
774
|
+
agent_name = self.name
|
|
775
|
+
|
|
776
|
+
# Handler that runs the agent
|
|
777
|
+
async def agent_tool_handler(ctx: Context, user_message: str) -> str:
|
|
778
|
+
"""Execute agent and return output."""
|
|
779
|
+
ctx.logger.info(f"Invoking agent '{agent_name}' as tool")
|
|
780
|
+
|
|
781
|
+
# Run the agent with the user message
|
|
782
|
+
result = await self.run(user_message, context=ctx)
|
|
783
|
+
|
|
784
|
+
return result.output
|
|
785
|
+
|
|
786
|
+
# Create tool with agent's schema
|
|
787
|
+
tool_description = description or self.instructions or f"Agent: {self.name}"
|
|
788
|
+
|
|
789
|
+
agent_tool = Tool(
|
|
790
|
+
name=self.name,
|
|
791
|
+
description=tool_description,
|
|
792
|
+
handler=agent_tool_handler,
|
|
793
|
+
input_schema=self.input_schema,
|
|
794
|
+
auto_schema=False,
|
|
795
|
+
)
|
|
796
|
+
|
|
797
|
+
return agent_tool
|
|
798
|
+
|
|
799
|
+
def _create_handoff_tool(self, handoff_config: Handoff, current_messages_callback: Optional[Callable] = None) -> Tool:
|
|
800
|
+
"""Create a tool for handoff to another agent.
|
|
801
|
+
|
|
802
|
+
Args:
|
|
803
|
+
handoff_config: Handoff configuration
|
|
804
|
+
current_messages_callback: Optional callback to get current conversation messages
|
|
805
|
+
|
|
806
|
+
Returns:
|
|
807
|
+
Tool instance that executes the handoff
|
|
808
|
+
"""
|
|
809
|
+
target_agent = handoff_config.agent
|
|
810
|
+
tool_name = handoff_config.tool_name
|
|
811
|
+
|
|
812
|
+
# Handler that executes the handoff
|
|
813
|
+
async def handoff_handler(ctx: Context, message: str) -> Dict[str, Any]:
|
|
814
|
+
"""Transfer control to target agent."""
|
|
815
|
+
ctx.logger.info(
|
|
816
|
+
f"Handoff from '{self.name}' to '{target_agent.name}': {message}"
|
|
817
|
+
)
|
|
818
|
+
|
|
819
|
+
# If we should pass conversation history, add it to context
|
|
820
|
+
if handoff_config.pass_full_history:
|
|
821
|
+
# Get current conversation from the agent's run loop
|
|
822
|
+
# (This will be set when we detect the handoff in run())
|
|
823
|
+
conversation_history = getattr(ctx, '_agent_data', {}).get("_current_conversation", [])
|
|
824
|
+
|
|
825
|
+
if conversation_history:
|
|
826
|
+
ctx.logger.info(
|
|
827
|
+
f"Passing {len(conversation_history)} messages to target agent"
|
|
828
|
+
)
|
|
829
|
+
# Store in context for target agent to optionally use
|
|
830
|
+
if not hasattr(ctx, '_agent_data'):
|
|
831
|
+
ctx._agent_data = {}
|
|
832
|
+
ctx._agent_data["_handoff_conversation_history"] = conversation_history
|
|
833
|
+
|
|
834
|
+
# Execute target agent with the message and shared context
|
|
835
|
+
result = await target_agent.run(message, context=ctx)
|
|
836
|
+
|
|
837
|
+
# Store handoff metadata - this signals that a handoff occurred
|
|
838
|
+
handoff_data = {
|
|
839
|
+
"_handoff": True,
|
|
840
|
+
"from_agent": self.name,
|
|
841
|
+
"to_agent": target_agent.name,
|
|
842
|
+
"message": message,
|
|
843
|
+
"output": result.output,
|
|
844
|
+
"tool_calls": result.tool_calls,
|
|
845
|
+
}
|
|
846
|
+
|
|
847
|
+
if not hasattr(ctx, '_agent_data'):
|
|
848
|
+
ctx._agent_data = {}
|
|
849
|
+
ctx._agent_data["_handoff_result"] = handoff_data
|
|
850
|
+
|
|
851
|
+
# Return the handoff data (will be detected in run() loop)
|
|
852
|
+
return handoff_data
|
|
853
|
+
|
|
854
|
+
# Create tool with handoff schema
|
|
855
|
+
handoff_tool = Tool(
|
|
856
|
+
name=tool_name,
|
|
857
|
+
description=handoff_config.description,
|
|
858
|
+
handler=handoff_handler,
|
|
859
|
+
input_schema={
|
|
860
|
+
"type": "object",
|
|
861
|
+
"properties": {
|
|
862
|
+
"message": {
|
|
863
|
+
"type": "string",
|
|
864
|
+
"description": "Message or task to pass to the target agent"
|
|
865
|
+
}
|
|
866
|
+
},
|
|
867
|
+
"required": ["message"]
|
|
868
|
+
},
|
|
869
|
+
auto_schema=False,
|
|
870
|
+
)
|
|
871
|
+
|
|
872
|
+
return handoff_tool
|
|
873
|
+
|
|
874
|
+
def _detect_memory_scope(self, context: Optional[Context]) -> tuple[str, str]:
|
|
875
|
+
"""
|
|
876
|
+
Auto-detect memory scope from context for agent conversation persistence.
|
|
877
|
+
|
|
878
|
+
Implements priority logic:
|
|
879
|
+
1. user_id → user-scoped memory (long-term)
|
|
880
|
+
2. session_id → session-scoped memory (multi-turn)
|
|
881
|
+
3. run_id → run-scoped memory (ephemeral)
|
|
882
|
+
|
|
883
|
+
Args:
|
|
884
|
+
context: WorkflowContext or other context with memory scoping fields
|
|
885
|
+
|
|
886
|
+
Returns:
|
|
887
|
+
Tuple of (entity_key, scope) where:
|
|
888
|
+
- entity_key: e.g., "user:user-456", "session:abc-123", "run:xyz-789"
|
|
889
|
+
- scope: "user", "session", or "run"
|
|
890
|
+
|
|
891
|
+
Example:
|
|
892
|
+
entity_key, scope = agent._detect_memory_scope(ctx)
|
|
893
|
+
# If ctx.user_id="user-123": ("user:user-123", "user")
|
|
894
|
+
# If ctx.session_id="sess-456": ("session:sess-456", "session")
|
|
895
|
+
# Otherwise: ("run:run-789", "run")
|
|
896
|
+
"""
|
|
897
|
+
# Extract identifiers from context
|
|
898
|
+
user_id = getattr(context, 'user_id', None) if context else None
|
|
899
|
+
session_id = getattr(context, 'session_id', None) if context else None
|
|
900
|
+
run_id = getattr(context, 'run_id', None) if context else None
|
|
901
|
+
|
|
902
|
+
# Priority: user_id > session_id > run_id
|
|
903
|
+
if user_id:
|
|
904
|
+
return (f"user:{user_id}", "user")
|
|
905
|
+
elif session_id and session_id != run_id: # Explicit session (not defaulting to run_id)
|
|
906
|
+
return (f"session:{session_id}", "session")
|
|
907
|
+
elif run_id:
|
|
908
|
+
return (f"run:{run_id}", "run")
|
|
909
|
+
else:
|
|
910
|
+
# Fallback: create ephemeral key
|
|
911
|
+
import uuid
|
|
912
|
+
fallback_run_id = f"agent-{self.name}-{uuid.uuid4().hex[:8]}"
|
|
913
|
+
return (f"run:{fallback_run_id}", "run")
|
|
914
|
+
|
|
915
|
+
async def run(
|
|
916
|
+
self,
|
|
917
|
+
user_message: str,
|
|
918
|
+
context: Optional[Context] = None,
|
|
919
|
+
) -> AgentResult:
|
|
920
|
+
"""Run agent to completion.
|
|
921
|
+
|
|
922
|
+
Args:
|
|
923
|
+
user_message: User's input message
|
|
924
|
+
context: Optional context (auto-created if not provided, or read from contextvar)
|
|
925
|
+
|
|
926
|
+
Returns:
|
|
927
|
+
AgentResult with output and execution details
|
|
928
|
+
|
|
929
|
+
Example:
|
|
930
|
+
```python
|
|
931
|
+
result = await agent.run("Analyze recent tech news")
|
|
932
|
+
print(result.output)
|
|
933
|
+
```
|
|
934
|
+
"""
|
|
935
|
+
# Create or adapt context
|
|
936
|
+
if context is None:
|
|
937
|
+
# Try to get context from task-local storage (set by workflow/function decorator)
|
|
938
|
+
context = get_current_context()
|
|
939
|
+
|
|
940
|
+
# IMPORTANT: Capture workflow context NOW before we replace it with AgentContext
|
|
941
|
+
# This allows LM calls inside the agent to emit workflow checkpoints
|
|
942
|
+
from .workflow import WorkflowContext
|
|
943
|
+
workflow_ctx = context if isinstance(context, WorkflowContext) else None
|
|
944
|
+
|
|
945
|
+
if context is None:
|
|
946
|
+
# Standalone execution - create AgentContext
|
|
947
|
+
import uuid
|
|
948
|
+
run_id = f"agent-{self.name}-{uuid.uuid4().hex[:8]}"
|
|
949
|
+
context = AgentContext(
|
|
950
|
+
run_id=run_id,
|
|
951
|
+
agent_name=self.name,
|
|
952
|
+
)
|
|
953
|
+
elif isinstance(context, AgentContext):
|
|
954
|
+
# Already AgentContext - use as-is
|
|
955
|
+
pass
|
|
956
|
+
elif hasattr(context, '_workflow_entity'):
|
|
957
|
+
# WorkflowContext - create AgentContext that inherits state
|
|
958
|
+
# Auto-detect memory scope based on user_id/session_id/run_id priority
|
|
959
|
+
entity_key, scope = self._detect_memory_scope(context)
|
|
960
|
+
|
|
961
|
+
import uuid
|
|
962
|
+
run_id = f"{context.run_id}:agent:{self.name}"
|
|
963
|
+
# Extract the ID from entity_key (e.g., "session:abc-123" → "abc-123")
|
|
964
|
+
detected_session_id = entity_key.split(":", 1)[1] if ":" in entity_key else context.run_id
|
|
965
|
+
|
|
966
|
+
context = AgentContext(
|
|
967
|
+
run_id=run_id,
|
|
968
|
+
agent_name=self.name,
|
|
969
|
+
session_id=detected_session_id, # Use auto-detected scope
|
|
970
|
+
parent_context=context,
|
|
971
|
+
runtime_context=getattr(context, '_runtime_context', None), # Inherit trace context
|
|
972
|
+
)
|
|
973
|
+
else:
|
|
974
|
+
# FunctionContext or other - create new AgentContext
|
|
975
|
+
import uuid
|
|
976
|
+
run_id = f"{context.run_id}:agent:{self.name}"
|
|
977
|
+
context = AgentContext(
|
|
978
|
+
run_id=run_id,
|
|
979
|
+
agent_name=self.name,
|
|
980
|
+
runtime_context=getattr(context, '_runtime_context', None), # Inherit trace context
|
|
981
|
+
)
|
|
982
|
+
|
|
983
|
+
# Emit checkpoint if called within a workflow context
|
|
984
|
+
if workflow_ctx is not None:
|
|
985
|
+
workflow_ctx._send_checkpoint("workflow.agent.started", {
|
|
986
|
+
"agent.name": self.name,
|
|
987
|
+
"agent.model": self.model_name,
|
|
988
|
+
"agent.tools": list(self.tools.keys()),
|
|
989
|
+
"agent.max_iterations": self.max_iterations,
|
|
990
|
+
"user_message": user_message,
|
|
991
|
+
})
|
|
992
|
+
|
|
993
|
+
# NEW: Check if this is a resume from HITL
|
|
994
|
+
if workflow_ctx and hasattr(workflow_ctx, "_agent_resume_info"):
|
|
995
|
+
resume_info = workflow_ctx._agent_resume_info
|
|
996
|
+
if resume_info["agent_name"] == self.name:
|
|
997
|
+
self.logger.info("Detected HITL resume, calling resume_from_hitl()")
|
|
998
|
+
|
|
999
|
+
# Clear resume info to avoid re-entry
|
|
1000
|
+
delattr(workflow_ctx, "_agent_resume_info")
|
|
1001
|
+
|
|
1002
|
+
# Resume from checkpoint (context setup happens inside resume_from_hitl)
|
|
1003
|
+
return await self.resume_from_hitl(
|
|
1004
|
+
context=workflow_ctx,
|
|
1005
|
+
agent_context=resume_info["agent_context"],
|
|
1006
|
+
user_response=resume_info["user_response"],
|
|
1007
|
+
)
|
|
1008
|
+
|
|
1009
|
+
# Set context in task-local storage for automatic propagation to tools and LM calls
|
|
1010
|
+
token = set_current_context(context)
|
|
1011
|
+
try:
|
|
1012
|
+
try:
|
|
1013
|
+
# Load conversation history from state (if AgentContext)
|
|
1014
|
+
if isinstance(context, AgentContext):
|
|
1015
|
+
messages: List[Message] = await context.get_conversation_history()
|
|
1016
|
+
# Add new user message
|
|
1017
|
+
messages.append(Message.user(user_message))
|
|
1018
|
+
# Save updated conversation
|
|
1019
|
+
await context.save_conversation_history(messages)
|
|
1020
|
+
else:
|
|
1021
|
+
# Fallback for non-AgentContext (shouldn't happen with code above)
|
|
1022
|
+
messages = [Message.user(user_message)]
|
|
1023
|
+
|
|
1024
|
+
# Create span for agent execution with trace linking
|
|
1025
|
+
from ._core import create_span
|
|
1026
|
+
|
|
1027
|
+
with create_span(
|
|
1028
|
+
self.name,
|
|
1029
|
+
"agent",
|
|
1030
|
+
context._runtime_context if hasattr(context, "_runtime_context") else None,
|
|
1031
|
+
{
|
|
1032
|
+
"agent.name": self.name,
|
|
1033
|
+
"agent.model": self.model_name, # Use model_name (always a string)
|
|
1034
|
+
"agent.max_iterations": str(self.max_iterations),
|
|
1035
|
+
},
|
|
1036
|
+
) as span:
|
|
1037
|
+
all_tool_calls: List[Dict[str, Any]] = []
|
|
1038
|
+
|
|
1039
|
+
# Reasoning loop
|
|
1040
|
+
for iteration in range(self.max_iterations):
|
|
1041
|
+
# Build tool definitions for LLM
|
|
1042
|
+
tool_defs = [
|
|
1043
|
+
ToolDefinition(
|
|
1044
|
+
name=tool.name,
|
|
1045
|
+
description=tool.description,
|
|
1046
|
+
parameters=tool.input_schema,
|
|
1047
|
+
)
|
|
1048
|
+
for tool in self.tools.values()
|
|
1049
|
+
]
|
|
1050
|
+
|
|
1051
|
+
# Convert messages to dict format for lm.generate()
|
|
1052
|
+
messages_dict = []
|
|
1053
|
+
for msg in messages:
|
|
1054
|
+
messages_dict.append({
|
|
1055
|
+
"role": msg.role.value,
|
|
1056
|
+
"content": msg.content
|
|
1057
|
+
})
|
|
1058
|
+
|
|
1059
|
+
# Call LLM
|
|
1060
|
+
# Check if we have a legacy LanguageModel instance or need to create one
|
|
1061
|
+
if self._language_model is not None:
|
|
1062
|
+
# Legacy API: use provided LanguageModel instance
|
|
1063
|
+
request = GenerateRequest(
|
|
1064
|
+
model="mock-model", # Not used by MockLanguageModel
|
|
1065
|
+
system_prompt=self.instructions,
|
|
1066
|
+
messages=messages,
|
|
1067
|
+
tools=tool_defs if tool_defs else [],
|
|
1068
|
+
)
|
|
1069
|
+
request.config.temperature = self.temperature
|
|
1070
|
+
if self.max_tokens:
|
|
1071
|
+
request.config.max_tokens = self.max_tokens
|
|
1072
|
+
if self.top_p:
|
|
1073
|
+
request.config.top_p = self.top_p
|
|
1074
|
+
response = await self._language_model.generate(request)
|
|
1075
|
+
else:
|
|
1076
|
+
# New API: model is a string, create internal LM instance
|
|
1077
|
+
request = GenerateRequest(
|
|
1078
|
+
model=self.model,
|
|
1079
|
+
system_prompt=self.instructions,
|
|
1080
|
+
messages=messages,
|
|
1081
|
+
tools=tool_defs if tool_defs else [],
|
|
1082
|
+
)
|
|
1083
|
+
request.config.temperature = self.temperature
|
|
1084
|
+
if self.max_tokens:
|
|
1085
|
+
request.config.max_tokens = self.max_tokens
|
|
1086
|
+
if self.top_p:
|
|
1087
|
+
request.config.top_p = self.top_p
|
|
1088
|
+
|
|
1089
|
+
# Create internal LM instance for generation
|
|
1090
|
+
# TODO: Use model_config when provided
|
|
1091
|
+
from .lm import _LanguageModel
|
|
1092
|
+
provider, model_name = self.model.split('/', 1)
|
|
1093
|
+
internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
|
|
1094
|
+
response = await internal_lm.generate(request)
|
|
1095
|
+
|
|
1096
|
+
# Add assistant response to messages
|
|
1097
|
+
messages.append(Message.assistant(response.text))
|
|
1098
|
+
|
|
1099
|
+
# Check if LLM wants to use tools
|
|
1100
|
+
if response.tool_calls:
|
|
1101
|
+
self.logger.debug(f"Agent calling {len(response.tool_calls)} tool(s)")
|
|
1102
|
+
|
|
1103
|
+
# Store current conversation in context for potential handoffs
|
|
1104
|
+
# Use a simple dict attribute since we don't need full state persistence for this
|
|
1105
|
+
if not hasattr(context, '_agent_data'):
|
|
1106
|
+
context._agent_data = {}
|
|
1107
|
+
context._agent_data["_current_conversation"] = messages
|
|
1108
|
+
|
|
1109
|
+
# Execute tool calls
|
|
1110
|
+
tool_results = []
|
|
1111
|
+
for tool_call in response.tool_calls:
|
|
1112
|
+
tool_name = tool_call["name"]
|
|
1113
|
+
tool_args_str = tool_call["arguments"]
|
|
1114
|
+
|
|
1115
|
+
# Track tool call
|
|
1116
|
+
all_tool_calls.append(
|
|
1117
|
+
{
|
|
1118
|
+
"name": tool_name,
|
|
1119
|
+
"arguments": tool_args_str,
|
|
1120
|
+
"iteration": iteration + 1,
|
|
1121
|
+
}
|
|
1122
|
+
)
|
|
1123
|
+
|
|
1124
|
+
# Execute tool
|
|
1125
|
+
try:
|
|
1126
|
+
# Parse arguments
|
|
1127
|
+
tool_args = json.loads(tool_args_str)
|
|
1128
|
+
|
|
1129
|
+
# Get tool
|
|
1130
|
+
tool = self.tools.get(tool_name)
|
|
1131
|
+
if not tool:
|
|
1132
|
+
result_text = f"Error: Tool '{tool_name}' not found"
|
|
1133
|
+
else:
|
|
1134
|
+
# Execute tool
|
|
1135
|
+
result = await tool.invoke(context, **tool_args)
|
|
1136
|
+
|
|
1137
|
+
# Check if this was a handoff
|
|
1138
|
+
if isinstance(result, dict) and result.get("_handoff"):
|
|
1139
|
+
self.logger.info(
|
|
1140
|
+
f"Handoff detected to '{result['to_agent']}', "
|
|
1141
|
+
f"terminating current agent"
|
|
1142
|
+
)
|
|
1143
|
+
# Save conversation before returning
|
|
1144
|
+
if isinstance(context, AgentContext):
|
|
1145
|
+
await context.save_conversation_history(messages)
|
|
1146
|
+
# Return immediately with handoff result
|
|
1147
|
+
return AgentResult(
|
|
1148
|
+
output=result["output"],
|
|
1149
|
+
tool_calls=all_tool_calls + result.get("tool_calls", []),
|
|
1150
|
+
context=context,
|
|
1151
|
+
handoff_to=result["to_agent"],
|
|
1152
|
+
handoff_metadata=result,
|
|
1153
|
+
)
|
|
1154
|
+
|
|
1155
|
+
result_text = json.dumps(result) if result else "null"
|
|
1156
|
+
|
|
1157
|
+
tool_results.append(
|
|
1158
|
+
{"tool": tool_name, "result": result_text, "error": None}
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
except WaitingForUserInputException as e:
|
|
1162
|
+
# HITL PAUSE: Capture agent state and propagate exception
|
|
1163
|
+
self.logger.info(f"Agent pausing for user input at iteration {iteration}")
|
|
1164
|
+
|
|
1165
|
+
# Serialize messages to dict format
|
|
1166
|
+
messages_dict = [
|
|
1167
|
+
{"role": msg.role.value, "content": msg.content}
|
|
1168
|
+
for msg in messages
|
|
1169
|
+
]
|
|
1170
|
+
|
|
1171
|
+
# Enhance exception with agent execution context
|
|
1172
|
+
raise WaitingForUserInputException(
|
|
1173
|
+
question=e.question,
|
|
1174
|
+
input_type=e.input_type,
|
|
1175
|
+
options=e.options,
|
|
1176
|
+
checkpoint_state=e.checkpoint_state,
|
|
1177
|
+
agent_context={
|
|
1178
|
+
"agent_name": self.name,
|
|
1179
|
+
"iteration": iteration,
|
|
1180
|
+
"messages": messages_dict,
|
|
1181
|
+
"tool_results": tool_results,
|
|
1182
|
+
"pending_tool_call": {
|
|
1183
|
+
"name": tool_call["name"],
|
|
1184
|
+
"arguments": tool_call["arguments"],
|
|
1185
|
+
"tool_call_index": response.tool_calls.index(tool_call),
|
|
1186
|
+
},
|
|
1187
|
+
"all_tool_calls": all_tool_calls,
|
|
1188
|
+
"model_config": {
|
|
1189
|
+
"model": self.model,
|
|
1190
|
+
"temperature": self.temperature,
|
|
1191
|
+
"max_tokens": self.max_tokens,
|
|
1192
|
+
"top_p": self.top_p,
|
|
1193
|
+
},
|
|
1194
|
+
},
|
|
1195
|
+
) from e
|
|
1196
|
+
|
|
1197
|
+
except Exception as e:
|
|
1198
|
+
# Regular tool errors - log and continue
|
|
1199
|
+
self.logger.error(f"Tool execution error: {e}")
|
|
1200
|
+
tool_results.append(
|
|
1201
|
+
{"tool": tool_name, "result": None, "error": str(e)}
|
|
1202
|
+
)
|
|
1203
|
+
|
|
1204
|
+
# Add tool results to conversation
|
|
1205
|
+
results_text = "\n".join(
|
|
1206
|
+
[
|
|
1207
|
+
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
1208
|
+
if tr["error"] is None
|
|
1209
|
+
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
1210
|
+
for tr in tool_results
|
|
1211
|
+
]
|
|
1212
|
+
)
|
|
1213
|
+
messages.append(Message.user(f"Tool results:\n{results_text}\n\nPlease provide your final answer based on these results."))
|
|
1214
|
+
|
|
1215
|
+
# Continue loop for agent to process results
|
|
1216
|
+
|
|
1217
|
+
else:
|
|
1218
|
+
# No tool calls - agent is done
|
|
1219
|
+
self.logger.debug(f"Agent completed after {iteration + 1} iterations")
|
|
1220
|
+
# Save conversation before returning
|
|
1221
|
+
if isinstance(context, AgentContext):
|
|
1222
|
+
await context.save_conversation_history(messages)
|
|
1223
|
+
|
|
1224
|
+
# Emit completion checkpoint
|
|
1225
|
+
if workflow_ctx:
|
|
1226
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1227
|
+
"agent.name": self.name,
|
|
1228
|
+
"agent.iterations": iteration + 1,
|
|
1229
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1230
|
+
"output_length": len(response.text),
|
|
1231
|
+
})
|
|
1232
|
+
|
|
1233
|
+
return AgentResult(
|
|
1234
|
+
output=response.text,
|
|
1235
|
+
tool_calls=all_tool_calls,
|
|
1236
|
+
context=context,
|
|
1237
|
+
)
|
|
1238
|
+
|
|
1239
|
+
# Max iterations reached
|
|
1240
|
+
self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
|
|
1241
|
+
final_output = messages[-1].content if messages else "No output generated"
|
|
1242
|
+
# Save conversation before returning
|
|
1243
|
+
if isinstance(context, AgentContext):
|
|
1244
|
+
await context.save_conversation_history(messages)
|
|
1245
|
+
|
|
1246
|
+
# Emit completion checkpoint with max iterations flag
|
|
1247
|
+
if workflow_ctx:
|
|
1248
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1249
|
+
"agent.name": self.name,
|
|
1250
|
+
"agent.iterations": self.max_iterations,
|
|
1251
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1252
|
+
"agent.max_iterations_reached": True,
|
|
1253
|
+
"output_length": len(final_output),
|
|
1254
|
+
})
|
|
1255
|
+
|
|
1256
|
+
return AgentResult(
|
|
1257
|
+
output=final_output,
|
|
1258
|
+
tool_calls=all_tool_calls,
|
|
1259
|
+
context=context,
|
|
1260
|
+
)
|
|
1261
|
+
except Exception as e:
|
|
1262
|
+
# Emit error checkpoint for observability
|
|
1263
|
+
if workflow_ctx:
|
|
1264
|
+
workflow_ctx._send_checkpoint("workflow.agent.error", {
|
|
1265
|
+
"agent.name": self.name,
|
|
1266
|
+
"error": str(e),
|
|
1267
|
+
"error_type": type(e).__name__,
|
|
1268
|
+
})
|
|
1269
|
+
raise
|
|
1270
|
+
finally:
|
|
1271
|
+
# Always reset context to prevent leakage between agent executions
|
|
1272
|
+
from .context import _current_context
|
|
1273
|
+
_current_context.reset(token)
|
|
1274
|
+
|
|
1275
|
+
async def resume_from_hitl(
|
|
1276
|
+
self,
|
|
1277
|
+
context: Context,
|
|
1278
|
+
agent_context: Dict,
|
|
1279
|
+
user_response: str,
|
|
1280
|
+
) -> AgentResult:
|
|
1281
|
+
"""
|
|
1282
|
+
Resume agent execution after HITL pause.
|
|
1283
|
+
|
|
1284
|
+
This method reconstructs agent state from the checkpoint and injects
|
|
1285
|
+
the user's response as the successful tool result, then continues
|
|
1286
|
+
the conversation loop.
|
|
1287
|
+
|
|
1288
|
+
Args:
|
|
1289
|
+
context: Current execution context (workflow or agent)
|
|
1290
|
+
agent_context: Agent state from WaitingForUserInputException.agent_context
|
|
1291
|
+
user_response: User's answer to the HITL question
|
|
1292
|
+
|
|
1293
|
+
Returns:
|
|
1294
|
+
AgentResult with final output and tool calls
|
|
1295
|
+
"""
|
|
1296
|
+
self.logger.info(f"Resuming agent '{self.name}' from HITL pause")
|
|
1297
|
+
|
|
1298
|
+
# 1. Restore conversation state
|
|
1299
|
+
messages = [
|
|
1300
|
+
Message(role=lm.MessageRole(msg["role"]), content=msg["content"])
|
|
1301
|
+
for msg in agent_context["messages"]
|
|
1302
|
+
]
|
|
1303
|
+
iteration = agent_context["iteration"]
|
|
1304
|
+
all_tool_calls = agent_context["all_tool_calls"]
|
|
1305
|
+
|
|
1306
|
+
# 2. Restore partial tool results for current iteration
|
|
1307
|
+
tool_results = agent_context["tool_results"]
|
|
1308
|
+
|
|
1309
|
+
# 3. Inject user response as successful tool result
|
|
1310
|
+
pending_tool = agent_context["pending_tool_call"]
|
|
1311
|
+
tool_results.append({
|
|
1312
|
+
"tool": pending_tool["name"],
|
|
1313
|
+
"result": json.dumps(user_response),
|
|
1314
|
+
"error": None,
|
|
1315
|
+
})
|
|
1316
|
+
|
|
1317
|
+
self.logger.debug(
|
|
1318
|
+
f"Injected user response for tool '{pending_tool['name']}': {user_response}"
|
|
1319
|
+
)
|
|
1320
|
+
|
|
1321
|
+
# 4. Add tool results to conversation
|
|
1322
|
+
results_text = "\n".join([
|
|
1323
|
+
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
1324
|
+
if tr["error"] is None
|
|
1325
|
+
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
1326
|
+
for tr in tool_results
|
|
1327
|
+
])
|
|
1328
|
+
messages.append(Message.user(
|
|
1329
|
+
f"Tool results:\n{results_text}\n\n"
|
|
1330
|
+
f"Please provide your final answer based on these results."
|
|
1331
|
+
))
|
|
1332
|
+
|
|
1333
|
+
# 5. Continue agent execution loop from next iteration
|
|
1334
|
+
return await self._continue_execution_from_iteration(
|
|
1335
|
+
context=context,
|
|
1336
|
+
messages=messages,
|
|
1337
|
+
iteration=iteration + 1, # Next iteration
|
|
1338
|
+
all_tool_calls=all_tool_calls,
|
|
1339
|
+
)
|
|
1340
|
+
|
|
1341
|
+
async def _continue_execution_from_iteration(
|
|
1342
|
+
self,
|
|
1343
|
+
context: Context,
|
|
1344
|
+
messages: List[Message],
|
|
1345
|
+
iteration: int,
|
|
1346
|
+
all_tool_calls: List[Dict],
|
|
1347
|
+
) -> AgentResult:
|
|
1348
|
+
"""
|
|
1349
|
+
Continue agent execution from a specific iteration.
|
|
1350
|
+
|
|
1351
|
+
This is the core execution loop extracted to support both:
|
|
1352
|
+
1. Normal execution (starting from iteration 0)
|
|
1353
|
+
2. Resume after HITL (starting from iteration N)
|
|
1354
|
+
|
|
1355
|
+
Args:
|
|
1356
|
+
context: Execution context
|
|
1357
|
+
messages: Conversation history
|
|
1358
|
+
iteration: Starting iteration number
|
|
1359
|
+
all_tool_calls: Accumulated tool calls
|
|
1360
|
+
|
|
1361
|
+
Returns:
|
|
1362
|
+
AgentResult with output and tool calls
|
|
1363
|
+
"""
|
|
1364
|
+
# Extract workflow context for checkpointing
|
|
1365
|
+
workflow_ctx = None
|
|
1366
|
+
if hasattr(context, "_workflow_entity"):
|
|
1367
|
+
workflow_ctx = context
|
|
1368
|
+
elif hasattr(context, "_agent_data") and "_workflow_ctx" in context._agent_data:
|
|
1369
|
+
workflow_ctx = context._agent_data["_workflow_ctx"]
|
|
1370
|
+
|
|
1371
|
+
# Prepare tool definitions
|
|
1372
|
+
tool_defs = [
|
|
1373
|
+
ToolDefinition(
|
|
1374
|
+
name=name,
|
|
1375
|
+
description=tool.description or f"Tool: {name}",
|
|
1376
|
+
parameters=tool.input_schema if hasattr(tool, "input_schema") else {},
|
|
1377
|
+
)
|
|
1378
|
+
for name, tool in self.tools.items()
|
|
1379
|
+
]
|
|
1380
|
+
|
|
1381
|
+
# Main iteration loop (continue from specified iteration)
|
|
1382
|
+
while iteration < self.max_iterations:
|
|
1383
|
+
self.logger.debug(f"Agent iteration {iteration + 1}/{self.max_iterations}")
|
|
1384
|
+
|
|
1385
|
+
# Call LLM for next response
|
|
1386
|
+
if self._language_model:
|
|
1387
|
+
# Legacy API: model is a LanguageModel instance
|
|
1388
|
+
request = GenerateRequest(
|
|
1389
|
+
system_prompt=self.instructions,
|
|
1390
|
+
messages=messages,
|
|
1391
|
+
tools=tool_defs if tool_defs else [],
|
|
1392
|
+
)
|
|
1393
|
+
request.config.temperature = self.temperature
|
|
1394
|
+
if self.max_tokens:
|
|
1395
|
+
request.config.max_tokens = self.max_tokens
|
|
1396
|
+
if self.top_p:
|
|
1397
|
+
request.config.top_p = self.top_p
|
|
1398
|
+
response = await self._language_model.generate(request)
|
|
1399
|
+
else:
|
|
1400
|
+
# New API: model is a string, create internal LM instance
|
|
1401
|
+
request = GenerateRequest(
|
|
1402
|
+
model=self.model,
|
|
1403
|
+
system_prompt=self.instructions,
|
|
1404
|
+
messages=messages,
|
|
1405
|
+
tools=tool_defs if tool_defs else [],
|
|
1406
|
+
)
|
|
1407
|
+
request.config.temperature = self.temperature
|
|
1408
|
+
if self.max_tokens:
|
|
1409
|
+
request.config.max_tokens = self.max_tokens
|
|
1410
|
+
if self.top_p:
|
|
1411
|
+
request.config.top_p = self.top_p
|
|
1412
|
+
|
|
1413
|
+
# Create internal LM instance for generation
|
|
1414
|
+
from .lm import _LanguageModel
|
|
1415
|
+
provider, model_name = self.model.split('/', 1)
|
|
1416
|
+
internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
|
|
1417
|
+
response = await internal_lm.generate(request)
|
|
1418
|
+
|
|
1419
|
+
# Add assistant response to messages
|
|
1420
|
+
messages.append(Message.assistant(response.text))
|
|
1421
|
+
|
|
1422
|
+
# Check if LLM wants to use tools
|
|
1423
|
+
if response.tool_calls:
|
|
1424
|
+
self.logger.debug(f"Agent calling {len(response.tool_calls)} tool(s)")
|
|
1425
|
+
|
|
1426
|
+
# Store current conversation in context for potential handoffs
|
|
1427
|
+
if not hasattr(context, '_agent_data'):
|
|
1428
|
+
context._agent_data = {}
|
|
1429
|
+
context._agent_data["_current_conversation"] = messages
|
|
1430
|
+
|
|
1431
|
+
# Execute tool calls
|
|
1432
|
+
tool_results = []
|
|
1433
|
+
for tool_call in response.tool_calls:
|
|
1434
|
+
tool_name = tool_call["name"]
|
|
1435
|
+
tool_args_str = tool_call["arguments"]
|
|
1436
|
+
|
|
1437
|
+
# Track tool call
|
|
1438
|
+
all_tool_calls.append({
|
|
1439
|
+
"name": tool_name,
|
|
1440
|
+
"arguments": tool_args_str,
|
|
1441
|
+
"iteration": iteration + 1,
|
|
1442
|
+
})
|
|
1443
|
+
|
|
1444
|
+
# Execute tool
|
|
1445
|
+
try:
|
|
1446
|
+
# Parse arguments
|
|
1447
|
+
tool_args = json.loads(tool_args_str)
|
|
1448
|
+
|
|
1449
|
+
# Get tool
|
|
1450
|
+
tool = self.tools.get(tool_name)
|
|
1451
|
+
if not tool:
|
|
1452
|
+
result_text = f"Error: Tool '{tool_name}' not found"
|
|
1453
|
+
else:
|
|
1454
|
+
# Execute tool
|
|
1455
|
+
result = await tool.invoke(context, **tool_args)
|
|
1456
|
+
|
|
1457
|
+
# Check if this was a handoff
|
|
1458
|
+
if isinstance(result, dict) and result.get("_handoff"):
|
|
1459
|
+
self.logger.info(
|
|
1460
|
+
f"Handoff detected to '{result['to_agent']}', "
|
|
1461
|
+
f"terminating current agent"
|
|
1462
|
+
)
|
|
1463
|
+
# Save conversation before returning
|
|
1464
|
+
if isinstance(context, AgentContext):
|
|
1465
|
+
await context.save_conversation_history(messages)
|
|
1466
|
+
# Return immediately with handoff result
|
|
1467
|
+
return AgentResult(
|
|
1468
|
+
output=result["output"],
|
|
1469
|
+
tool_calls=all_tool_calls + result.get("tool_calls", []),
|
|
1470
|
+
context=context,
|
|
1471
|
+
handoff_to=result["to_agent"],
|
|
1472
|
+
handoff_metadata=result,
|
|
1473
|
+
)
|
|
1474
|
+
|
|
1475
|
+
result_text = json.dumps(result) if result else "null"
|
|
1476
|
+
|
|
1477
|
+
tool_results.append(
|
|
1478
|
+
{"tool": tool_name, "result": result_text, "error": None}
|
|
1479
|
+
)
|
|
1480
|
+
|
|
1481
|
+
except WaitingForUserInputException as e:
|
|
1482
|
+
# HITL PAUSE: Capture agent state and propagate exception
|
|
1483
|
+
self.logger.info(f"Agent pausing for user input at iteration {iteration}")
|
|
1484
|
+
|
|
1485
|
+
# Serialize messages to dict format
|
|
1486
|
+
messages_dict = [
|
|
1487
|
+
{"role": msg.role.value, "content": msg.content}
|
|
1488
|
+
for msg in messages
|
|
1489
|
+
]
|
|
1490
|
+
|
|
1491
|
+
# Enhance exception with agent execution context
|
|
1492
|
+
from .exceptions import WaitingForUserInputException
|
|
1493
|
+
raise WaitingForUserInputException(
|
|
1494
|
+
question=e.question,
|
|
1495
|
+
input_type=e.input_type,
|
|
1496
|
+
options=e.options,
|
|
1497
|
+
checkpoint_state=e.checkpoint_state,
|
|
1498
|
+
agent_context={
|
|
1499
|
+
"agent_name": self.name,
|
|
1500
|
+
"iteration": iteration,
|
|
1501
|
+
"messages": messages_dict,
|
|
1502
|
+
"tool_results": tool_results,
|
|
1503
|
+
"pending_tool_call": {
|
|
1504
|
+
"name": tool_call["name"],
|
|
1505
|
+
"arguments": tool_call["arguments"],
|
|
1506
|
+
"tool_call_index": response.tool_calls.index(tool_call),
|
|
1507
|
+
},
|
|
1508
|
+
"all_tool_calls": all_tool_calls,
|
|
1509
|
+
"model_config": {
|
|
1510
|
+
"model": self.model,
|
|
1511
|
+
"temperature": self.temperature,
|
|
1512
|
+
"max_tokens": self.max_tokens,
|
|
1513
|
+
"top_p": self.top_p,
|
|
1514
|
+
},
|
|
1515
|
+
},
|
|
1516
|
+
) from e
|
|
1517
|
+
|
|
1518
|
+
except Exception as e:
|
|
1519
|
+
# Regular tool errors - log and continue
|
|
1520
|
+
self.logger.error(f"Tool execution error: {e}")
|
|
1521
|
+
tool_results.append(
|
|
1522
|
+
{"tool": tool_name, "result": None, "error": str(e)}
|
|
1523
|
+
)
|
|
1524
|
+
|
|
1525
|
+
# Add tool results to conversation
|
|
1526
|
+
results_text = "\n".join([
|
|
1527
|
+
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
1528
|
+
if tr["error"] is None
|
|
1529
|
+
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
1530
|
+
for tr in tool_results
|
|
1531
|
+
])
|
|
1532
|
+
messages.append(Message.user(
|
|
1533
|
+
f"Tool results:\n{results_text}\n\n"
|
|
1534
|
+
f"Please provide your final answer based on these results."
|
|
1535
|
+
))
|
|
1536
|
+
|
|
1537
|
+
# Continue loop for agent to process results
|
|
1538
|
+
|
|
1539
|
+
else:
|
|
1540
|
+
# No tool calls - agent is done
|
|
1541
|
+
self.logger.debug(f"Agent completed after {iteration + 1} iterations")
|
|
1542
|
+
# Save conversation before returning
|
|
1543
|
+
if isinstance(context, AgentContext):
|
|
1544
|
+
await context.save_conversation_history(messages)
|
|
1545
|
+
|
|
1546
|
+
# Emit completion checkpoint
|
|
1547
|
+
if workflow_ctx:
|
|
1548
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1549
|
+
"agent.name": self.name,
|
|
1550
|
+
"agent.iterations": iteration + 1,
|
|
1551
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1552
|
+
"output_length": len(response.text),
|
|
1553
|
+
})
|
|
1554
|
+
|
|
1555
|
+
return AgentResult(
|
|
1556
|
+
output=response.text,
|
|
1557
|
+
tool_calls=all_tool_calls,
|
|
1558
|
+
context=context,
|
|
1559
|
+
)
|
|
1560
|
+
|
|
1561
|
+
iteration += 1
|
|
1562
|
+
|
|
1563
|
+
# Max iterations reached
|
|
1564
|
+
self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
|
|
1565
|
+
final_output = messages[-1].content if messages else "No output generated"
|
|
1566
|
+
# Save conversation before returning
|
|
1567
|
+
if isinstance(context, AgentContext):
|
|
1568
|
+
await context.save_conversation_history(messages)
|
|
1569
|
+
|
|
1570
|
+
# Emit completion checkpoint with max iterations flag
|
|
1571
|
+
if workflow_ctx:
|
|
1572
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1573
|
+
"agent.name": self.name,
|
|
1574
|
+
"agent.iterations": self.max_iterations,
|
|
1575
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1576
|
+
"agent.max_iterations_reached": True,
|
|
1577
|
+
"output_length": len(final_output),
|
|
1578
|
+
})
|
|
1579
|
+
|
|
1580
|
+
return AgentResult(
|
|
1581
|
+
output=final_output,
|
|
1582
|
+
tool_calls=all_tool_calls,
|
|
1583
|
+
context=context,
|
|
1584
|
+
)
|
|
1585
|
+
|
|
1586
|
+
|
|
1587
|
+
def agent(
|
|
1588
|
+
_func: Optional[Callable] = None,
|
|
1589
|
+
*,
|
|
1590
|
+
name: Optional[str] = None,
|
|
1591
|
+
model: Optional[LanguageModel] = None,
|
|
1592
|
+
instructions: Optional[str] = None,
|
|
1593
|
+
tools: Optional[List[Any]] = None,
|
|
1594
|
+
model_name: str = "gpt-4o-mini",
|
|
1595
|
+
temperature: float = 0.7,
|
|
1596
|
+
max_iterations: int = 10,
|
|
1597
|
+
) -> Callable:
|
|
1598
|
+
"""
|
|
1599
|
+
Decorator to register a function as an agent and automatically register it.
|
|
1600
|
+
|
|
1601
|
+
This decorator allows you to define agents as functions that create and return Agent instances.
|
|
1602
|
+
The agent will be automatically registered in the AgentRegistry for discovery by the worker.
|
|
1603
|
+
|
|
1604
|
+
Args:
|
|
1605
|
+
name: Agent name (defaults to function name)
|
|
1606
|
+
model: Language model instance (required if not provided in function)
|
|
1607
|
+
instructions: System instructions (required if not provided in function)
|
|
1608
|
+
tools: List of tools available to the agent
|
|
1609
|
+
model_name: Model name to use
|
|
1610
|
+
temperature: LLM temperature
|
|
1611
|
+
max_iterations: Maximum reasoning iterations
|
|
1612
|
+
|
|
1613
|
+
Returns:
|
|
1614
|
+
Decorated function that returns an Agent instance
|
|
1615
|
+
|
|
1616
|
+
Example:
|
|
1617
|
+
```python
|
|
1618
|
+
from agnt5 import agent, tool
|
|
1619
|
+
from agnt5.lm import OpenAILanguageModel
|
|
1620
|
+
|
|
1621
|
+
@agent(
|
|
1622
|
+
name="research_agent",
|
|
1623
|
+
model=OpenAILanguageModel(),
|
|
1624
|
+
instructions="You are a research assistant.",
|
|
1625
|
+
tools=[search_web, analyze_data]
|
|
1626
|
+
)
|
|
1627
|
+
def create_researcher():
|
|
1628
|
+
# Agent is created and registered automatically
|
|
1629
|
+
pass
|
|
1630
|
+
|
|
1631
|
+
# Or create agent directly
|
|
1632
|
+
@agent
|
|
1633
|
+
def my_agent():
|
|
1634
|
+
from agnt5.lm import OpenAILanguageModel
|
|
1635
|
+
return Agent(
|
|
1636
|
+
name="my_agent",
|
|
1637
|
+
model=OpenAILanguageModel(),
|
|
1638
|
+
instructions="You are a helpful assistant."
|
|
1639
|
+
)
|
|
1640
|
+
```
|
|
1641
|
+
"""
|
|
1642
|
+
|
|
1643
|
+
def decorator(func: Callable) -> Callable:
|
|
1644
|
+
# Determine agent name
|
|
1645
|
+
agent_name = name or func.__name__
|
|
1646
|
+
|
|
1647
|
+
# Create the agent
|
|
1648
|
+
@functools.wraps(func)
|
|
1649
|
+
def wrapper(*args, **kwargs) -> Agent:
|
|
1650
|
+
# Check if function returns an Agent
|
|
1651
|
+
result = func(*args, **kwargs)
|
|
1652
|
+
if isinstance(result, Agent):
|
|
1653
|
+
# Function creates its own agent
|
|
1654
|
+
agent_instance = result
|
|
1655
|
+
elif model is not None and instructions is not None:
|
|
1656
|
+
# Create agent from decorator parameters
|
|
1657
|
+
agent_instance = Agent(
|
|
1658
|
+
name=agent_name,
|
|
1659
|
+
model=model,
|
|
1660
|
+
instructions=instructions,
|
|
1661
|
+
tools=tools,
|
|
1662
|
+
model_name=model_name,
|
|
1663
|
+
temperature=temperature,
|
|
1664
|
+
max_iterations=max_iterations,
|
|
1665
|
+
)
|
|
1666
|
+
else:
|
|
1667
|
+
raise ValueError(
|
|
1668
|
+
f"Agent decorator for '{agent_name}' requires either "
|
|
1669
|
+
"the decorated function to return an Agent instance, "
|
|
1670
|
+
"or 'model' and 'instructions' parameters to be provided"
|
|
1671
|
+
)
|
|
1672
|
+
|
|
1673
|
+
# Register agent
|
|
1674
|
+
AgentRegistry.register(agent_instance)
|
|
1675
|
+
return agent_instance
|
|
1676
|
+
|
|
1677
|
+
# Create agent immediately and store reference
|
|
1678
|
+
agent_instance = wrapper()
|
|
1679
|
+
|
|
1680
|
+
# Return the agent instance itself (so it can be used directly)
|
|
1681
|
+
return agent_instance
|
|
1682
|
+
|
|
1683
|
+
if _func is None:
|
|
1684
|
+
return decorator
|
|
1685
|
+
return decorator(_func)
|