agnt5 0.2.8a6__cp310-abi3-macosx_11_0_arm64.whl → 0.2.8a8__cp310-abi3-macosx_11_0_arm64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agnt5 might be problematic. Click here for more details.
- agnt5/_core.abi3.so +0 -0
- agnt5/_telemetry.py +7 -2
- agnt5/agent.py +744 -171
- agnt5/client.py +18 -1
- agnt5/context.py +94 -0
- agnt5/exceptions.py +13 -0
- agnt5/function.py +18 -11
- agnt5/lm.py +124 -16
- agnt5/tool.py +110 -29
- agnt5/worker.py +421 -28
- agnt5/workflow.py +367 -72
- {agnt5-0.2.8a6.dist-info → agnt5-0.2.8a8.dist-info}/METADATA +1 -1
- agnt5-0.2.8a8.dist-info/RECORD +22 -0
- agnt5-0.2.8a6.dist-info/RECORD +0 -22
- {agnt5-0.2.8a6.dist-info → agnt5-0.2.8a8.dist-info}/WHEEL +0 -0
agnt5/agent.py
CHANGED
|
@@ -12,11 +12,12 @@ import logging
|
|
|
12
12
|
import time
|
|
13
13
|
from typing import Any, Callable, Dict, List, Optional, Union
|
|
14
14
|
|
|
15
|
-
from .context import Context
|
|
15
|
+
from .context import Context, get_current_context, set_current_context
|
|
16
16
|
from . import lm
|
|
17
17
|
from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ModelConfig, ToolDefinition
|
|
18
18
|
from .tool import Tool, ToolRegistry
|
|
19
19
|
from ._telemetry import setup_module_logger
|
|
20
|
+
from .exceptions import WaitingForUserInputException
|
|
20
21
|
|
|
21
22
|
logger = setup_module_logger(__name__)
|
|
22
23
|
|
|
@@ -79,6 +80,7 @@ class AgentContext(Context):
|
|
|
79
80
|
|
|
80
81
|
self._agent_name = agent_name
|
|
81
82
|
self._session_id = session_id or run_id
|
|
83
|
+
self.parent_context = parent_context # Store for context chain traversal
|
|
82
84
|
|
|
83
85
|
# Determine state adapter based on parent context
|
|
84
86
|
from .entity import EntityStateAdapter, _get_state_adapter
|
|
@@ -118,6 +120,16 @@ class AgentContext(Context):
|
|
|
118
120
|
# Entity key for database persistence (without :messages suffix to match API expectations)
|
|
119
121
|
self._entity_key = f"agent:{agent_name}:{self._session_id}"
|
|
120
122
|
|
|
123
|
+
# Determine storage mode: "workflow" if parent is WorkflowContext, else "standalone"
|
|
124
|
+
self._storage_mode = "standalone" # Default mode
|
|
125
|
+
self._workflow_entity = None
|
|
126
|
+
|
|
127
|
+
if parent_context and hasattr(parent_context, '_workflow_entity'):
|
|
128
|
+
# Agent is running within a workflow - store conversation in workflow state
|
|
129
|
+
self._storage_mode = "workflow"
|
|
130
|
+
self._workflow_entity = parent_context._workflow_entity
|
|
131
|
+
logger.debug(f"Agent '{agent_name}' using workflow storage mode (workflow entity: {self._workflow_entity.key})")
|
|
132
|
+
|
|
121
133
|
@property
|
|
122
134
|
def state(self):
|
|
123
135
|
"""
|
|
@@ -151,10 +163,27 @@ class AgentContext(Context):
|
|
|
151
163
|
Retrieve conversation history from state, loading from database if needed.
|
|
152
164
|
|
|
153
165
|
Uses the EntityStateAdapter which delegates to Rust core for cache-first loading.
|
|
166
|
+
If running within a workflow, loads from workflow entity state instead.
|
|
154
167
|
|
|
155
168
|
Returns:
|
|
156
169
|
List of Message objects from conversation history
|
|
157
170
|
"""
|
|
171
|
+
if self._storage_mode == "workflow":
|
|
172
|
+
return await self._load_from_workflow_state()
|
|
173
|
+
else:
|
|
174
|
+
return await self._load_from_entity_storage()
|
|
175
|
+
|
|
176
|
+
async def _load_from_workflow_state(self) -> List[Message]:
|
|
177
|
+
"""Load conversation history from workflow entity state."""
|
|
178
|
+
key = f"agent.{self._agent_name}"
|
|
179
|
+
agent_data = self._workflow_entity.state.get(key, {})
|
|
180
|
+
messages_data = agent_data.get("messages", [])
|
|
181
|
+
|
|
182
|
+
# Convert dict representations back to Message objects
|
|
183
|
+
return self._convert_dicts_to_messages(messages_data)
|
|
184
|
+
|
|
185
|
+
async def _load_from_entity_storage(self) -> List[Message]:
|
|
186
|
+
"""Load conversation history from AgentSession entity (standalone mode)."""
|
|
158
187
|
entity_type = "AgentSession"
|
|
159
188
|
entity_key = self._entity_key
|
|
160
189
|
|
|
@@ -173,6 +202,10 @@ class AgentContext(Context):
|
|
|
173
202
|
messages_data = []
|
|
174
203
|
|
|
175
204
|
# Convert dict representations back to Message objects
|
|
205
|
+
return self._convert_dicts_to_messages(messages_data)
|
|
206
|
+
|
|
207
|
+
def _convert_dicts_to_messages(self, messages_data: list) -> List[Message]:
|
|
208
|
+
"""Convert list of message dicts to Message objects."""
|
|
176
209
|
messages = []
|
|
177
210
|
for msg_dict in messages_data:
|
|
178
211
|
if isinstance(msg_dict, dict):
|
|
@@ -199,10 +232,48 @@ class AgentContext(Context):
|
|
|
199
232
|
Save conversation history to state and persist to database.
|
|
200
233
|
|
|
201
234
|
Uses the EntityStateAdapter which delegates to Rust core for version-checked saves.
|
|
235
|
+
If running within a workflow, saves to workflow entity state instead.
|
|
202
236
|
|
|
203
237
|
Args:
|
|
204
238
|
messages: List of Message objects to persist
|
|
205
239
|
"""
|
|
240
|
+
if self._storage_mode == "workflow":
|
|
241
|
+
await self._save_to_workflow_state(messages)
|
|
242
|
+
else:
|
|
243
|
+
await self._save_to_entity_storage(messages)
|
|
244
|
+
|
|
245
|
+
async def _save_to_workflow_state(self, messages: List[Message]) -> None:
|
|
246
|
+
"""Save conversation history to workflow entity state."""
|
|
247
|
+
# Convert Message objects to dict for JSON serialization
|
|
248
|
+
messages_data = []
|
|
249
|
+
for msg in messages:
|
|
250
|
+
messages_data.append({
|
|
251
|
+
"role": msg.role.value if hasattr(msg.role, 'value') else str(msg.role),
|
|
252
|
+
"content": msg.content,
|
|
253
|
+
"timestamp": time.time()
|
|
254
|
+
})
|
|
255
|
+
|
|
256
|
+
# Build agent data structure
|
|
257
|
+
key = f"agent.{self._agent_name}"
|
|
258
|
+
current_data = self._workflow_entity.state.get(key, {})
|
|
259
|
+
now = time.time()
|
|
260
|
+
|
|
261
|
+
agent_data = {
|
|
262
|
+
"session_id": self._session_id,
|
|
263
|
+
"agent_name": self._agent_name,
|
|
264
|
+
"created_at": current_data.get("created_at", now),
|
|
265
|
+
"last_message_time": now,
|
|
266
|
+
"message_count": len(messages_data),
|
|
267
|
+
"messages": messages_data,
|
|
268
|
+
"metadata": getattr(self, '_custom_metadata', {})
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
# Store in workflow state (WorkflowEntity handles persistence)
|
|
272
|
+
self._workflow_entity.state.set(key, agent_data)
|
|
273
|
+
logger.info(f"Saved conversation to workflow state: {key} ({len(messages_data)} messages)")
|
|
274
|
+
|
|
275
|
+
async def _save_to_entity_storage(self, messages: List[Message]) -> None:
|
|
276
|
+
"""Save conversation history to AgentSession entity (standalone mode)."""
|
|
206
277
|
# Convert Message objects to dict for JSON serialization
|
|
207
278
|
messages_data = []
|
|
208
279
|
for msg in messages:
|
|
@@ -271,6 +342,35 @@ class AgentContext(Context):
|
|
|
271
342
|
print(f"User ID: {metadata['custom'].get('user_id')}")
|
|
272
343
|
```
|
|
273
344
|
"""
|
|
345
|
+
if self._storage_mode == "workflow":
|
|
346
|
+
return await self._get_metadata_from_workflow()
|
|
347
|
+
else:
|
|
348
|
+
return await self._get_metadata_from_entity()
|
|
349
|
+
|
|
350
|
+
async def _get_metadata_from_workflow(self) -> Dict[str, Any]:
|
|
351
|
+
"""Get metadata from workflow entity state."""
|
|
352
|
+
key = f"agent.{self._agent_name}"
|
|
353
|
+
agent_data = self._workflow_entity.state.get(key, {})
|
|
354
|
+
|
|
355
|
+
if not agent_data:
|
|
356
|
+
# No conversation exists yet - return defaults
|
|
357
|
+
return {
|
|
358
|
+
"created_at": None,
|
|
359
|
+
"last_activity": None,
|
|
360
|
+
"message_count": 0,
|
|
361
|
+
"custom": getattr(self, '_custom_metadata', {})
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
messages = agent_data.get("messages", [])
|
|
365
|
+
return {
|
|
366
|
+
"created_at": agent_data.get("created_at"),
|
|
367
|
+
"last_activity": agent_data.get("last_message_time"),
|
|
368
|
+
"message_count": len(messages),
|
|
369
|
+
"custom": agent_data.get("metadata", {})
|
|
370
|
+
}
|
|
371
|
+
|
|
372
|
+
async def _get_metadata_from_entity(self) -> Dict[str, Any]:
|
|
373
|
+
"""Get metadata from AgentSession entity (standalone mode)."""
|
|
274
374
|
entity_type = "AgentSession"
|
|
275
375
|
entity_key = self._entity_key
|
|
276
376
|
|
|
@@ -633,6 +733,9 @@ class Agent:
|
|
|
633
733
|
}
|
|
634
734
|
}
|
|
635
735
|
|
|
736
|
+
# Auto-register agent for discovery by auto_register=True
|
|
737
|
+
AgentRegistry.register(self)
|
|
738
|
+
|
|
636
739
|
# Store metadata
|
|
637
740
|
self.metadata = {
|
|
638
741
|
"description": instructions,
|
|
@@ -768,6 +871,47 @@ class Agent:
|
|
|
768
871
|
|
|
769
872
|
return handoff_tool
|
|
770
873
|
|
|
874
|
+
def _detect_memory_scope(self, context: Optional[Context]) -> tuple[str, str]:
|
|
875
|
+
"""
|
|
876
|
+
Auto-detect memory scope from context for agent conversation persistence.
|
|
877
|
+
|
|
878
|
+
Implements priority logic:
|
|
879
|
+
1. user_id → user-scoped memory (long-term)
|
|
880
|
+
2. session_id → session-scoped memory (multi-turn)
|
|
881
|
+
3. run_id → run-scoped memory (ephemeral)
|
|
882
|
+
|
|
883
|
+
Args:
|
|
884
|
+
context: WorkflowContext or other context with memory scoping fields
|
|
885
|
+
|
|
886
|
+
Returns:
|
|
887
|
+
Tuple of (entity_key, scope) where:
|
|
888
|
+
- entity_key: e.g., "user:user-456", "session:abc-123", "run:xyz-789"
|
|
889
|
+
- scope: "user", "session", or "run"
|
|
890
|
+
|
|
891
|
+
Example:
|
|
892
|
+
entity_key, scope = agent._detect_memory_scope(ctx)
|
|
893
|
+
# If ctx.user_id="user-123": ("user:user-123", "user")
|
|
894
|
+
# If ctx.session_id="sess-456": ("session:sess-456", "session")
|
|
895
|
+
# Otherwise: ("run:run-789", "run")
|
|
896
|
+
"""
|
|
897
|
+
# Extract identifiers from context
|
|
898
|
+
user_id = getattr(context, 'user_id', None) if context else None
|
|
899
|
+
session_id = getattr(context, 'session_id', None) if context else None
|
|
900
|
+
run_id = getattr(context, 'run_id', None) if context else None
|
|
901
|
+
|
|
902
|
+
# Priority: user_id > session_id > run_id
|
|
903
|
+
if user_id:
|
|
904
|
+
return (f"user:{user_id}", "user")
|
|
905
|
+
elif session_id and session_id != run_id: # Explicit session (not defaulting to run_id)
|
|
906
|
+
return (f"session:{session_id}", "session")
|
|
907
|
+
elif run_id:
|
|
908
|
+
return (f"run:{run_id}", "run")
|
|
909
|
+
else:
|
|
910
|
+
# Fallback: create ephemeral key
|
|
911
|
+
import uuid
|
|
912
|
+
fallback_run_id = f"agent-{self.name}-{uuid.uuid4().hex[:8]}"
|
|
913
|
+
return (f"run:{fallback_run_id}", "run")
|
|
914
|
+
|
|
771
915
|
async def run(
|
|
772
916
|
self,
|
|
773
917
|
user_message: str,
|
|
@@ -777,7 +921,7 @@ class Agent:
|
|
|
777
921
|
|
|
778
922
|
Args:
|
|
779
923
|
user_message: User's input message
|
|
780
|
-
context: Optional context (auto-created if not provided)
|
|
924
|
+
context: Optional context (auto-created if not provided, or read from contextvar)
|
|
781
925
|
|
|
782
926
|
Returns:
|
|
783
927
|
AgentResult with output and execution details
|
|
@@ -789,6 +933,15 @@ class Agent:
|
|
|
789
933
|
```
|
|
790
934
|
"""
|
|
791
935
|
# Create or adapt context
|
|
936
|
+
if context is None:
|
|
937
|
+
# Try to get context from task-local storage (set by workflow/function decorator)
|
|
938
|
+
context = get_current_context()
|
|
939
|
+
|
|
940
|
+
# IMPORTANT: Capture workflow context NOW before we replace it with AgentContext
|
|
941
|
+
# This allows LM calls inside the agent to emit workflow checkpoints
|
|
942
|
+
from .workflow import WorkflowContext
|
|
943
|
+
workflow_ctx = context if isinstance(context, WorkflowContext) else None
|
|
944
|
+
|
|
792
945
|
if context is None:
|
|
793
946
|
# Standalone execution - create AgentContext
|
|
794
947
|
import uuid
|
|
@@ -802,13 +955,20 @@ class Agent:
|
|
|
802
955
|
pass
|
|
803
956
|
elif hasattr(context, '_workflow_entity'):
|
|
804
957
|
# WorkflowContext - create AgentContext that inherits state
|
|
958
|
+
# Auto-detect memory scope based on user_id/session_id/run_id priority
|
|
959
|
+
entity_key, scope = self._detect_memory_scope(context)
|
|
960
|
+
|
|
805
961
|
import uuid
|
|
806
962
|
run_id = f"{context.run_id}:agent:{self.name}"
|
|
963
|
+
# Extract the ID from entity_key (e.g., "session:abc-123" → "abc-123")
|
|
964
|
+
detected_session_id = entity_key.split(":", 1)[1] if ":" in entity_key else context.run_id
|
|
965
|
+
|
|
807
966
|
context = AgentContext(
|
|
808
967
|
run_id=run_id,
|
|
809
968
|
agent_name=self.name,
|
|
810
|
-
session_id=
|
|
969
|
+
session_id=detected_session_id, # Use auto-detected scope
|
|
811
970
|
parent_context=context,
|
|
971
|
+
runtime_context=getattr(context, '_runtime_context', None), # Inherit trace context
|
|
812
972
|
)
|
|
813
973
|
else:
|
|
814
974
|
# FunctionContext or other - create new AgentContext
|
|
@@ -817,198 +977,611 @@ class Agent:
|
|
|
817
977
|
context = AgentContext(
|
|
818
978
|
run_id=run_id,
|
|
819
979
|
agent_name=self.name,
|
|
980
|
+
runtime_context=getattr(context, '_runtime_context', None), # Inherit trace context
|
|
820
981
|
)
|
|
821
982
|
|
|
822
|
-
#
|
|
823
|
-
if
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
messages = [Message.user(user_message)]
|
|
983
|
+
# Emit checkpoint if called within a workflow context
|
|
984
|
+
if workflow_ctx is not None:
|
|
985
|
+
workflow_ctx._send_checkpoint("workflow.agent.started", {
|
|
986
|
+
"agent.name": self.name,
|
|
987
|
+
"agent.model": self.model_name,
|
|
988
|
+
"agent.tools": list(self.tools.keys()),
|
|
989
|
+
"agent.max_iterations": self.max_iterations,
|
|
990
|
+
"user_message": user_message,
|
|
991
|
+
})
|
|
832
992
|
|
|
833
|
-
#
|
|
834
|
-
|
|
993
|
+
# NEW: Check if this is a resume from HITL
|
|
994
|
+
if workflow_ctx and hasattr(workflow_ctx, "_agent_resume_info"):
|
|
995
|
+
resume_info = workflow_ctx._agent_resume_info
|
|
996
|
+
if resume_info["agent_name"] == self.name:
|
|
997
|
+
self.logger.info("Detected HITL resume, calling resume_from_hitl()")
|
|
835
998
|
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
"agent",
|
|
839
|
-
context._runtime_context if hasattr(context, "_runtime_context") else None,
|
|
840
|
-
{
|
|
841
|
-
"agent.name": self.name,
|
|
842
|
-
"agent.model": self.model_name, # Use model_name (always a string)
|
|
843
|
-
"agent.max_iterations": str(self.max_iterations),
|
|
844
|
-
},
|
|
845
|
-
) as span:
|
|
846
|
-
all_tool_calls: List[Dict[str, Any]] = []
|
|
847
|
-
|
|
848
|
-
# Reasoning loop
|
|
849
|
-
for iteration in range(self.max_iterations):
|
|
850
|
-
# Build tool definitions for LLM
|
|
851
|
-
tool_defs = [
|
|
852
|
-
ToolDefinition(
|
|
853
|
-
name=tool.name,
|
|
854
|
-
description=tool.description,
|
|
855
|
-
parameters=tool.input_schema,
|
|
856
|
-
)
|
|
857
|
-
for tool in self.tools.values()
|
|
858
|
-
]
|
|
859
|
-
|
|
860
|
-
# Convert messages to dict format for lm.generate()
|
|
861
|
-
messages_dict = []
|
|
862
|
-
for msg in messages:
|
|
863
|
-
messages_dict.append({
|
|
864
|
-
"role": msg.role.value,
|
|
865
|
-
"content": msg.content
|
|
866
|
-
})
|
|
999
|
+
# Clear resume info to avoid re-entry
|
|
1000
|
+
delattr(workflow_ctx, "_agent_resume_info")
|
|
867
1001
|
|
|
868
|
-
#
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
873
|
-
|
|
874
|
-
|
|
875
|
-
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
1002
|
+
# Resume from checkpoint (context setup happens inside resume_from_hitl)
|
|
1003
|
+
return await self.resume_from_hitl(
|
|
1004
|
+
context=workflow_ctx,
|
|
1005
|
+
agent_context=resume_info["agent_context"],
|
|
1006
|
+
user_response=resume_info["user_response"],
|
|
1007
|
+
)
|
|
1008
|
+
|
|
1009
|
+
# Set context in task-local storage for automatic propagation to tools and LM calls
|
|
1010
|
+
token = set_current_context(context)
|
|
1011
|
+
try:
|
|
1012
|
+
try:
|
|
1013
|
+
# Load conversation history from state (if AgentContext)
|
|
1014
|
+
if isinstance(context, AgentContext):
|
|
1015
|
+
messages: List[Message] = await context.get_conversation_history()
|
|
1016
|
+
# Add new user message
|
|
1017
|
+
messages.append(Message.user(user_message))
|
|
1018
|
+
# Save updated conversation
|
|
1019
|
+
await context.save_conversation_history(messages)
|
|
884
1020
|
else:
|
|
885
|
-
#
|
|
886
|
-
|
|
887
|
-
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
897
|
-
|
|
898
|
-
|
|
899
|
-
|
|
900
|
-
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
904
|
-
|
|
905
|
-
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
|
|
909
|
-
|
|
910
|
-
|
|
911
|
-
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
if not hasattr(context, '_agent_data'):
|
|
915
|
-
context._agent_data = {}
|
|
916
|
-
context._agent_data["_current_conversation"] = messages
|
|
917
|
-
|
|
918
|
-
# Execute tool calls
|
|
919
|
-
tool_results = []
|
|
920
|
-
for tool_call in response.tool_calls:
|
|
921
|
-
tool_name = tool_call["name"]
|
|
922
|
-
tool_args_str = tool_call["arguments"]
|
|
923
|
-
|
|
924
|
-
# Track tool call
|
|
925
|
-
all_tool_calls.append(
|
|
926
|
-
{
|
|
927
|
-
"name": tool_name,
|
|
928
|
-
"arguments": tool_args_str,
|
|
929
|
-
"iteration": iteration + 1,
|
|
930
|
-
}
|
|
931
|
-
)
|
|
1021
|
+
# Fallback for non-AgentContext (shouldn't happen with code above)
|
|
1022
|
+
messages = [Message.user(user_message)]
|
|
1023
|
+
|
|
1024
|
+
# Create span for agent execution with trace linking
|
|
1025
|
+
from ._core import create_span
|
|
1026
|
+
|
|
1027
|
+
with create_span(
|
|
1028
|
+
self.name,
|
|
1029
|
+
"agent",
|
|
1030
|
+
context._runtime_context if hasattr(context, "_runtime_context") else None,
|
|
1031
|
+
{
|
|
1032
|
+
"agent.name": self.name,
|
|
1033
|
+
"agent.model": self.model_name, # Use model_name (always a string)
|
|
1034
|
+
"agent.max_iterations": str(self.max_iterations),
|
|
1035
|
+
},
|
|
1036
|
+
) as span:
|
|
1037
|
+
all_tool_calls: List[Dict[str, Any]] = []
|
|
1038
|
+
|
|
1039
|
+
# Reasoning loop
|
|
1040
|
+
for iteration in range(self.max_iterations):
|
|
1041
|
+
# Build tool definitions for LLM
|
|
1042
|
+
tool_defs = [
|
|
1043
|
+
ToolDefinition(
|
|
1044
|
+
name=tool.name,
|
|
1045
|
+
description=tool.description,
|
|
1046
|
+
parameters=tool.input_schema,
|
|
1047
|
+
)
|
|
1048
|
+
for tool in self.tools.values()
|
|
1049
|
+
]
|
|
932
1050
|
|
|
933
|
-
#
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
1051
|
+
# Convert messages to dict format for lm.generate()
|
|
1052
|
+
messages_dict = []
|
|
1053
|
+
for msg in messages:
|
|
1054
|
+
messages_dict.append({
|
|
1055
|
+
"role": msg.role.value,
|
|
1056
|
+
"content": msg.content
|
|
1057
|
+
})
|
|
1058
|
+
|
|
1059
|
+
# Call LLM
|
|
1060
|
+
# Check if we have a legacy LanguageModel instance or need to create one
|
|
1061
|
+
if self._language_model is not None:
|
|
1062
|
+
# Legacy API: use provided LanguageModel instance
|
|
1063
|
+
request = GenerateRequest(
|
|
1064
|
+
model="mock-model", # Not used by MockLanguageModel
|
|
1065
|
+
system_prompt=self.instructions,
|
|
1066
|
+
messages=messages,
|
|
1067
|
+
tools=tool_defs if tool_defs else [],
|
|
1068
|
+
)
|
|
1069
|
+
request.config.temperature = self.temperature
|
|
1070
|
+
if self.max_tokens:
|
|
1071
|
+
request.config.max_tokens = self.max_tokens
|
|
1072
|
+
if self.top_p:
|
|
1073
|
+
request.config.top_p = self.top_p
|
|
1074
|
+
response = await self._language_model.generate(request)
|
|
1075
|
+
else:
|
|
1076
|
+
# New API: model is a string, create internal LM instance
|
|
1077
|
+
request = GenerateRequest(
|
|
1078
|
+
model=self.model,
|
|
1079
|
+
system_prompt=self.instructions,
|
|
1080
|
+
messages=messages,
|
|
1081
|
+
tools=tool_defs if tool_defs else [],
|
|
1082
|
+
)
|
|
1083
|
+
request.config.temperature = self.temperature
|
|
1084
|
+
if self.max_tokens:
|
|
1085
|
+
request.config.max_tokens = self.max_tokens
|
|
1086
|
+
if self.top_p:
|
|
1087
|
+
request.config.top_p = self.top_p
|
|
1088
|
+
|
|
1089
|
+
# Create internal LM instance for generation
|
|
1090
|
+
# TODO: Use model_config when provided
|
|
1091
|
+
from .lm import _LanguageModel
|
|
1092
|
+
provider, model_name = self.model.split('/', 1)
|
|
1093
|
+
internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
|
|
1094
|
+
response = await internal_lm.generate(request)
|
|
1095
|
+
|
|
1096
|
+
# Add assistant response to messages
|
|
1097
|
+
messages.append(Message.assistant(response.text))
|
|
1098
|
+
|
|
1099
|
+
# Check if LLM wants to use tools
|
|
1100
|
+
if response.tool_calls:
|
|
1101
|
+
self.logger.debug(f"Agent calling {len(response.tool_calls)} tool(s)")
|
|
1102
|
+
|
|
1103
|
+
# Store current conversation in context for potential handoffs
|
|
1104
|
+
# Use a simple dict attribute since we don't need full state persistence for this
|
|
1105
|
+
if not hasattr(context, '_agent_data'):
|
|
1106
|
+
context._agent_data = {}
|
|
1107
|
+
context._agent_data["_current_conversation"] = messages
|
|
1108
|
+
|
|
1109
|
+
# Execute tool calls
|
|
1110
|
+
tool_results = []
|
|
1111
|
+
for tool_call in response.tool_calls:
|
|
1112
|
+
tool_name = tool_call["name"]
|
|
1113
|
+
tool_args_str = tool_call["arguments"]
|
|
1114
|
+
|
|
1115
|
+
# Track tool call
|
|
1116
|
+
all_tool_calls.append(
|
|
1117
|
+
{
|
|
1118
|
+
"name": tool_name,
|
|
1119
|
+
"arguments": tool_args_str,
|
|
1120
|
+
"iteration": iteration + 1,
|
|
1121
|
+
}
|
|
1122
|
+
)
|
|
937
1123
|
|
|
938
|
-
# Get tool
|
|
939
|
-
tool = self.tools.get(tool_name)
|
|
940
|
-
if not tool:
|
|
941
|
-
result_text = f"Error: Tool '{tool_name}' not found"
|
|
942
|
-
else:
|
|
943
1124
|
# Execute tool
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
await
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
1125
|
+
try:
|
|
1126
|
+
# Parse arguments
|
|
1127
|
+
tool_args = json.loads(tool_args_str)
|
|
1128
|
+
|
|
1129
|
+
# Get tool
|
|
1130
|
+
tool = self.tools.get(tool_name)
|
|
1131
|
+
if not tool:
|
|
1132
|
+
result_text = f"Error: Tool '{tool_name}' not found"
|
|
1133
|
+
else:
|
|
1134
|
+
# Execute tool
|
|
1135
|
+
result = await tool.invoke(context, **tool_args)
|
|
1136
|
+
|
|
1137
|
+
# Check if this was a handoff
|
|
1138
|
+
if isinstance(result, dict) and result.get("_handoff"):
|
|
1139
|
+
self.logger.info(
|
|
1140
|
+
f"Handoff detected to '{result['to_agent']}', "
|
|
1141
|
+
f"terminating current agent"
|
|
1142
|
+
)
|
|
1143
|
+
# Save conversation before returning
|
|
1144
|
+
if isinstance(context, AgentContext):
|
|
1145
|
+
await context.save_conversation_history(messages)
|
|
1146
|
+
# Return immediately with handoff result
|
|
1147
|
+
return AgentResult(
|
|
1148
|
+
output=result["output"],
|
|
1149
|
+
tool_calls=all_tool_calls + result.get("tool_calls", []),
|
|
1150
|
+
context=context,
|
|
1151
|
+
handoff_to=result["to_agent"],
|
|
1152
|
+
handoff_metadata=result,
|
|
1153
|
+
)
|
|
1154
|
+
|
|
1155
|
+
result_text = json.dumps(result) if result else "null"
|
|
1156
|
+
|
|
1157
|
+
tool_results.append(
|
|
1158
|
+
{"tool": tool_name, "result": result_text, "error": None}
|
|
962
1159
|
)
|
|
963
1160
|
|
|
964
|
-
|
|
1161
|
+
except WaitingForUserInputException as e:
|
|
1162
|
+
# HITL PAUSE: Capture agent state and propagate exception
|
|
1163
|
+
self.logger.info(f"Agent pausing for user input at iteration {iteration}")
|
|
1164
|
+
|
|
1165
|
+
# Serialize messages to dict format
|
|
1166
|
+
messages_dict = [
|
|
1167
|
+
{"role": msg.role.value, "content": msg.content}
|
|
1168
|
+
for msg in messages
|
|
1169
|
+
]
|
|
1170
|
+
|
|
1171
|
+
# Enhance exception with agent execution context
|
|
1172
|
+
raise WaitingForUserInputException(
|
|
1173
|
+
question=e.question,
|
|
1174
|
+
input_type=e.input_type,
|
|
1175
|
+
options=e.options,
|
|
1176
|
+
checkpoint_state=e.checkpoint_state,
|
|
1177
|
+
agent_context={
|
|
1178
|
+
"agent_name": self.name,
|
|
1179
|
+
"iteration": iteration,
|
|
1180
|
+
"messages": messages_dict,
|
|
1181
|
+
"tool_results": tool_results,
|
|
1182
|
+
"pending_tool_call": {
|
|
1183
|
+
"name": tool_call["name"],
|
|
1184
|
+
"arguments": tool_call["arguments"],
|
|
1185
|
+
"tool_call_index": response.tool_calls.index(tool_call),
|
|
1186
|
+
},
|
|
1187
|
+
"all_tool_calls": all_tool_calls,
|
|
1188
|
+
"model_config": {
|
|
1189
|
+
"model": self.model,
|
|
1190
|
+
"temperature": self.temperature,
|
|
1191
|
+
"max_tokens": self.max_tokens,
|
|
1192
|
+
"top_p": self.top_p,
|
|
1193
|
+
},
|
|
1194
|
+
},
|
|
1195
|
+
) from e
|
|
1196
|
+
|
|
1197
|
+
except Exception as e:
|
|
1198
|
+
# Regular tool errors - log and continue
|
|
1199
|
+
self.logger.error(f"Tool execution error: {e}")
|
|
1200
|
+
tool_results.append(
|
|
1201
|
+
{"tool": tool_name, "result": None, "error": str(e)}
|
|
1202
|
+
)
|
|
965
1203
|
|
|
966
|
-
|
|
967
|
-
|
|
1204
|
+
# Add tool results to conversation
|
|
1205
|
+
results_text = "\n".join(
|
|
1206
|
+
[
|
|
1207
|
+
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
1208
|
+
if tr["error"] is None
|
|
1209
|
+
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
1210
|
+
for tr in tool_results
|
|
1211
|
+
]
|
|
968
1212
|
)
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
1213
|
+
messages.append(Message.user(f"Tool results:\n{results_text}\n\nPlease provide your final answer based on these results."))
|
|
1214
|
+
|
|
1215
|
+
# Continue loop for agent to process results
|
|
1216
|
+
|
|
1217
|
+
else:
|
|
1218
|
+
# No tool calls - agent is done
|
|
1219
|
+
self.logger.debug(f"Agent completed after {iteration + 1} iterations")
|
|
1220
|
+
# Save conversation before returning
|
|
1221
|
+
if isinstance(context, AgentContext):
|
|
1222
|
+
await context.save_conversation_history(messages)
|
|
1223
|
+
|
|
1224
|
+
# Emit completion checkpoint
|
|
1225
|
+
if workflow_ctx:
|
|
1226
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1227
|
+
"agent.name": self.name,
|
|
1228
|
+
"agent.iterations": iteration + 1,
|
|
1229
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1230
|
+
"output_length": len(response.text),
|
|
1231
|
+
})
|
|
1232
|
+
|
|
1233
|
+
return AgentResult(
|
|
1234
|
+
output=response.text,
|
|
1235
|
+
tool_calls=all_tool_calls,
|
|
1236
|
+
context=context,
|
|
974
1237
|
)
|
|
975
1238
|
|
|
976
|
-
#
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
980
|
-
if tr["error"] is None
|
|
981
|
-
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
982
|
-
for tr in tool_results
|
|
983
|
-
]
|
|
984
|
-
)
|
|
985
|
-
messages.append(Message.user(f"Tool results:\n{results_text}\n\nPlease provide your final answer based on these results."))
|
|
986
|
-
|
|
987
|
-
# Continue loop for agent to process results
|
|
988
|
-
|
|
989
|
-
else:
|
|
990
|
-
# No tool calls - agent is done
|
|
991
|
-
self.logger.debug(f"Agent completed after {iteration + 1} iterations")
|
|
1239
|
+
# Max iterations reached
|
|
1240
|
+
self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
|
|
1241
|
+
final_output = messages[-1].content if messages else "No output generated"
|
|
992
1242
|
# Save conversation before returning
|
|
993
1243
|
if isinstance(context, AgentContext):
|
|
994
1244
|
await context.save_conversation_history(messages)
|
|
1245
|
+
|
|
1246
|
+
# Emit completion checkpoint with max iterations flag
|
|
1247
|
+
if workflow_ctx:
|
|
1248
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1249
|
+
"agent.name": self.name,
|
|
1250
|
+
"agent.iterations": self.max_iterations,
|
|
1251
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1252
|
+
"agent.max_iterations_reached": True,
|
|
1253
|
+
"output_length": len(final_output),
|
|
1254
|
+
})
|
|
1255
|
+
|
|
995
1256
|
return AgentResult(
|
|
996
|
-
output=
|
|
1257
|
+
output=final_output,
|
|
997
1258
|
tool_calls=all_tool_calls,
|
|
998
1259
|
context=context,
|
|
999
1260
|
)
|
|
1261
|
+
except Exception as e:
|
|
1262
|
+
# Emit error checkpoint for observability
|
|
1263
|
+
if workflow_ctx:
|
|
1264
|
+
workflow_ctx._send_checkpoint("workflow.agent.error", {
|
|
1265
|
+
"agent.name": self.name,
|
|
1266
|
+
"error": str(e),
|
|
1267
|
+
"error_type": type(e).__name__,
|
|
1268
|
+
})
|
|
1269
|
+
raise
|
|
1270
|
+
finally:
|
|
1271
|
+
# Always reset context to prevent leakage between agent executions
|
|
1272
|
+
from .context import _current_context
|
|
1273
|
+
_current_context.reset(token)
|
|
1274
|
+
|
|
1275
|
+
async def resume_from_hitl(
|
|
1276
|
+
self,
|
|
1277
|
+
context: Context,
|
|
1278
|
+
agent_context: Dict,
|
|
1279
|
+
user_response: str,
|
|
1280
|
+
) -> AgentResult:
|
|
1281
|
+
"""
|
|
1282
|
+
Resume agent execution after HITL pause.
|
|
1283
|
+
|
|
1284
|
+
This method reconstructs agent state from the checkpoint and injects
|
|
1285
|
+
the user's response as the successful tool result, then continues
|
|
1286
|
+
the conversation loop.
|
|
1287
|
+
|
|
1288
|
+
Args:
|
|
1289
|
+
context: Current execution context (workflow or agent)
|
|
1290
|
+
agent_context: Agent state from WaitingForUserInputException.agent_context
|
|
1291
|
+
user_response: User's answer to the HITL question
|
|
1292
|
+
|
|
1293
|
+
Returns:
|
|
1294
|
+
AgentResult with final output and tool calls
|
|
1295
|
+
"""
|
|
1296
|
+
self.logger.info(f"Resuming agent '{self.name}' from HITL pause")
|
|
1297
|
+
|
|
1298
|
+
# 1. Restore conversation state
|
|
1299
|
+
messages = [
|
|
1300
|
+
Message(role=lm.MessageRole(msg["role"]), content=msg["content"])
|
|
1301
|
+
for msg in agent_context["messages"]
|
|
1302
|
+
]
|
|
1303
|
+
iteration = agent_context["iteration"]
|
|
1304
|
+
all_tool_calls = agent_context["all_tool_calls"]
|
|
1305
|
+
|
|
1306
|
+
# 2. Restore partial tool results for current iteration
|
|
1307
|
+
tool_results = agent_context["tool_results"]
|
|
1308
|
+
|
|
1309
|
+
# 3. Inject user response as successful tool result
|
|
1310
|
+
pending_tool = agent_context["pending_tool_call"]
|
|
1311
|
+
tool_results.append({
|
|
1312
|
+
"tool": pending_tool["name"],
|
|
1313
|
+
"result": json.dumps(user_response),
|
|
1314
|
+
"error": None,
|
|
1315
|
+
})
|
|
1316
|
+
|
|
1317
|
+
self.logger.debug(
|
|
1318
|
+
f"Injected user response for tool '{pending_tool['name']}': {user_response}"
|
|
1319
|
+
)
|
|
1320
|
+
|
|
1321
|
+
# 4. Add tool results to conversation
|
|
1322
|
+
results_text = "\n".join([
|
|
1323
|
+
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
1324
|
+
if tr["error"] is None
|
|
1325
|
+
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
1326
|
+
for tr in tool_results
|
|
1327
|
+
])
|
|
1328
|
+
messages.append(Message.user(
|
|
1329
|
+
f"Tool results:\n{results_text}\n\n"
|
|
1330
|
+
f"Please provide your final answer based on these results."
|
|
1331
|
+
))
|
|
1332
|
+
|
|
1333
|
+
# 5. Continue agent execution loop from next iteration
|
|
1334
|
+
return await self._continue_execution_from_iteration(
|
|
1335
|
+
context=context,
|
|
1336
|
+
messages=messages,
|
|
1337
|
+
iteration=iteration + 1, # Next iteration
|
|
1338
|
+
all_tool_calls=all_tool_calls,
|
|
1339
|
+
)
|
|
1340
|
+
|
|
1341
|
+
async def _continue_execution_from_iteration(
|
|
1342
|
+
self,
|
|
1343
|
+
context: Context,
|
|
1344
|
+
messages: List[Message],
|
|
1345
|
+
iteration: int,
|
|
1346
|
+
all_tool_calls: List[Dict],
|
|
1347
|
+
) -> AgentResult:
|
|
1348
|
+
"""
|
|
1349
|
+
Continue agent execution from a specific iteration.
|
|
1350
|
+
|
|
1351
|
+
This is the core execution loop extracted to support both:
|
|
1352
|
+
1. Normal execution (starting from iteration 0)
|
|
1353
|
+
2. Resume after HITL (starting from iteration N)
|
|
1000
1354
|
|
|
1001
|
-
|
|
1002
|
-
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1006
|
-
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1010
|
-
|
|
1355
|
+
Args:
|
|
1356
|
+
context: Execution context
|
|
1357
|
+
messages: Conversation history
|
|
1358
|
+
iteration: Starting iteration number
|
|
1359
|
+
all_tool_calls: Accumulated tool calls
|
|
1360
|
+
|
|
1361
|
+
Returns:
|
|
1362
|
+
AgentResult with output and tool calls
|
|
1363
|
+
"""
|
|
1364
|
+
# Extract workflow context for checkpointing
|
|
1365
|
+
workflow_ctx = None
|
|
1366
|
+
if hasattr(context, "_workflow_entity"):
|
|
1367
|
+
workflow_ctx = context
|
|
1368
|
+
elif hasattr(context, "_agent_data") and "_workflow_ctx" in context._agent_data:
|
|
1369
|
+
workflow_ctx = context._agent_data["_workflow_ctx"]
|
|
1370
|
+
|
|
1371
|
+
# Prepare tool definitions
|
|
1372
|
+
tool_defs = [
|
|
1373
|
+
ToolDefinition(
|
|
1374
|
+
name=name,
|
|
1375
|
+
description=tool.description or f"Tool: {name}",
|
|
1376
|
+
parameters=tool.input_schema if hasattr(tool, "input_schema") else {},
|
|
1011
1377
|
)
|
|
1378
|
+
for name, tool in self.tools.items()
|
|
1379
|
+
]
|
|
1380
|
+
|
|
1381
|
+
# Main iteration loop (continue from specified iteration)
|
|
1382
|
+
while iteration < self.max_iterations:
|
|
1383
|
+
self.logger.debug(f"Agent iteration {iteration + 1}/{self.max_iterations}")
|
|
1384
|
+
|
|
1385
|
+
# Call LLM for next response
|
|
1386
|
+
if self._language_model:
|
|
1387
|
+
# Legacy API: model is a LanguageModel instance
|
|
1388
|
+
request = GenerateRequest(
|
|
1389
|
+
system_prompt=self.instructions,
|
|
1390
|
+
messages=messages,
|
|
1391
|
+
tools=tool_defs if tool_defs else [],
|
|
1392
|
+
)
|
|
1393
|
+
request.config.temperature = self.temperature
|
|
1394
|
+
if self.max_tokens:
|
|
1395
|
+
request.config.max_tokens = self.max_tokens
|
|
1396
|
+
if self.top_p:
|
|
1397
|
+
request.config.top_p = self.top_p
|
|
1398
|
+
response = await self._language_model.generate(request)
|
|
1399
|
+
else:
|
|
1400
|
+
# New API: model is a string, create internal LM instance
|
|
1401
|
+
request = GenerateRequest(
|
|
1402
|
+
model=self.model,
|
|
1403
|
+
system_prompt=self.instructions,
|
|
1404
|
+
messages=messages,
|
|
1405
|
+
tools=tool_defs if tool_defs else [],
|
|
1406
|
+
)
|
|
1407
|
+
request.config.temperature = self.temperature
|
|
1408
|
+
if self.max_tokens:
|
|
1409
|
+
request.config.max_tokens = self.max_tokens
|
|
1410
|
+
if self.top_p:
|
|
1411
|
+
request.config.top_p = self.top_p
|
|
1412
|
+
|
|
1413
|
+
# Create internal LM instance for generation
|
|
1414
|
+
from .lm import _LanguageModel
|
|
1415
|
+
provider, model_name = self.model.split('/', 1)
|
|
1416
|
+
internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
|
|
1417
|
+
response = await internal_lm.generate(request)
|
|
1418
|
+
|
|
1419
|
+
# Add assistant response to messages
|
|
1420
|
+
messages.append(Message.assistant(response.text))
|
|
1421
|
+
|
|
1422
|
+
# Check if LLM wants to use tools
|
|
1423
|
+
if response.tool_calls:
|
|
1424
|
+
self.logger.debug(f"Agent calling {len(response.tool_calls)} tool(s)")
|
|
1425
|
+
|
|
1426
|
+
# Store current conversation in context for potential handoffs
|
|
1427
|
+
if not hasattr(context, '_agent_data'):
|
|
1428
|
+
context._agent_data = {}
|
|
1429
|
+
context._agent_data["_current_conversation"] = messages
|
|
1430
|
+
|
|
1431
|
+
# Execute tool calls
|
|
1432
|
+
tool_results = []
|
|
1433
|
+
for tool_call in response.tool_calls:
|
|
1434
|
+
tool_name = tool_call["name"]
|
|
1435
|
+
tool_args_str = tool_call["arguments"]
|
|
1436
|
+
|
|
1437
|
+
# Track tool call
|
|
1438
|
+
all_tool_calls.append({
|
|
1439
|
+
"name": tool_name,
|
|
1440
|
+
"arguments": tool_args_str,
|
|
1441
|
+
"iteration": iteration + 1,
|
|
1442
|
+
})
|
|
1443
|
+
|
|
1444
|
+
# Execute tool
|
|
1445
|
+
try:
|
|
1446
|
+
# Parse arguments
|
|
1447
|
+
tool_args = json.loads(tool_args_str)
|
|
1448
|
+
|
|
1449
|
+
# Get tool
|
|
1450
|
+
tool = self.tools.get(tool_name)
|
|
1451
|
+
if not tool:
|
|
1452
|
+
result_text = f"Error: Tool '{tool_name}' not found"
|
|
1453
|
+
else:
|
|
1454
|
+
# Execute tool
|
|
1455
|
+
result = await tool.invoke(context, **tool_args)
|
|
1456
|
+
|
|
1457
|
+
# Check if this was a handoff
|
|
1458
|
+
if isinstance(result, dict) and result.get("_handoff"):
|
|
1459
|
+
self.logger.info(
|
|
1460
|
+
f"Handoff detected to '{result['to_agent']}', "
|
|
1461
|
+
f"terminating current agent"
|
|
1462
|
+
)
|
|
1463
|
+
# Save conversation before returning
|
|
1464
|
+
if isinstance(context, AgentContext):
|
|
1465
|
+
await context.save_conversation_history(messages)
|
|
1466
|
+
# Return immediately with handoff result
|
|
1467
|
+
return AgentResult(
|
|
1468
|
+
output=result["output"],
|
|
1469
|
+
tool_calls=all_tool_calls + result.get("tool_calls", []),
|
|
1470
|
+
context=context,
|
|
1471
|
+
handoff_to=result["to_agent"],
|
|
1472
|
+
handoff_metadata=result,
|
|
1473
|
+
)
|
|
1474
|
+
|
|
1475
|
+
result_text = json.dumps(result) if result else "null"
|
|
1476
|
+
|
|
1477
|
+
tool_results.append(
|
|
1478
|
+
{"tool": tool_name, "result": result_text, "error": None}
|
|
1479
|
+
)
|
|
1480
|
+
|
|
1481
|
+
except WaitingForUserInputException as e:
|
|
1482
|
+
# HITL PAUSE: Capture agent state and propagate exception
|
|
1483
|
+
self.logger.info(f"Agent pausing for user input at iteration {iteration}")
|
|
1484
|
+
|
|
1485
|
+
# Serialize messages to dict format
|
|
1486
|
+
messages_dict = [
|
|
1487
|
+
{"role": msg.role.value, "content": msg.content}
|
|
1488
|
+
for msg in messages
|
|
1489
|
+
]
|
|
1490
|
+
|
|
1491
|
+
# Enhance exception with agent execution context
|
|
1492
|
+
from .exceptions import WaitingForUserInputException
|
|
1493
|
+
raise WaitingForUserInputException(
|
|
1494
|
+
question=e.question,
|
|
1495
|
+
input_type=e.input_type,
|
|
1496
|
+
options=e.options,
|
|
1497
|
+
checkpoint_state=e.checkpoint_state,
|
|
1498
|
+
agent_context={
|
|
1499
|
+
"agent_name": self.name,
|
|
1500
|
+
"iteration": iteration,
|
|
1501
|
+
"messages": messages_dict,
|
|
1502
|
+
"tool_results": tool_results,
|
|
1503
|
+
"pending_tool_call": {
|
|
1504
|
+
"name": tool_call["name"],
|
|
1505
|
+
"arguments": tool_call["arguments"],
|
|
1506
|
+
"tool_call_index": response.tool_calls.index(tool_call),
|
|
1507
|
+
},
|
|
1508
|
+
"all_tool_calls": all_tool_calls,
|
|
1509
|
+
"model_config": {
|
|
1510
|
+
"model": self.model,
|
|
1511
|
+
"temperature": self.temperature,
|
|
1512
|
+
"max_tokens": self.max_tokens,
|
|
1513
|
+
"top_p": self.top_p,
|
|
1514
|
+
},
|
|
1515
|
+
},
|
|
1516
|
+
) from e
|
|
1517
|
+
|
|
1518
|
+
except Exception as e:
|
|
1519
|
+
# Regular tool errors - log and continue
|
|
1520
|
+
self.logger.error(f"Tool execution error: {e}")
|
|
1521
|
+
tool_results.append(
|
|
1522
|
+
{"tool": tool_name, "result": None, "error": str(e)}
|
|
1523
|
+
)
|
|
1524
|
+
|
|
1525
|
+
# Add tool results to conversation
|
|
1526
|
+
results_text = "\n".join([
|
|
1527
|
+
f"Tool: {tr['tool']}\nResult: {tr['result']}"
|
|
1528
|
+
if tr["error"] is None
|
|
1529
|
+
else f"Tool: {tr['tool']}\nError: {tr['error']}"
|
|
1530
|
+
for tr in tool_results
|
|
1531
|
+
])
|
|
1532
|
+
messages.append(Message.user(
|
|
1533
|
+
f"Tool results:\n{results_text}\n\n"
|
|
1534
|
+
f"Please provide your final answer based on these results."
|
|
1535
|
+
))
|
|
1536
|
+
|
|
1537
|
+
# Continue loop for agent to process results
|
|
1538
|
+
|
|
1539
|
+
else:
|
|
1540
|
+
# No tool calls - agent is done
|
|
1541
|
+
self.logger.debug(f"Agent completed after {iteration + 1} iterations")
|
|
1542
|
+
# Save conversation before returning
|
|
1543
|
+
if isinstance(context, AgentContext):
|
|
1544
|
+
await context.save_conversation_history(messages)
|
|
1545
|
+
|
|
1546
|
+
# Emit completion checkpoint
|
|
1547
|
+
if workflow_ctx:
|
|
1548
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1549
|
+
"agent.name": self.name,
|
|
1550
|
+
"agent.iterations": iteration + 1,
|
|
1551
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1552
|
+
"output_length": len(response.text),
|
|
1553
|
+
})
|
|
1554
|
+
|
|
1555
|
+
return AgentResult(
|
|
1556
|
+
output=response.text,
|
|
1557
|
+
tool_calls=all_tool_calls,
|
|
1558
|
+
context=context,
|
|
1559
|
+
)
|
|
1560
|
+
|
|
1561
|
+
iteration += 1
|
|
1562
|
+
|
|
1563
|
+
# Max iterations reached
|
|
1564
|
+
self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
|
|
1565
|
+
final_output = messages[-1].content if messages else "No output generated"
|
|
1566
|
+
# Save conversation before returning
|
|
1567
|
+
if isinstance(context, AgentContext):
|
|
1568
|
+
await context.save_conversation_history(messages)
|
|
1569
|
+
|
|
1570
|
+
# Emit completion checkpoint with max iterations flag
|
|
1571
|
+
if workflow_ctx:
|
|
1572
|
+
workflow_ctx._send_checkpoint("workflow.agent.completed", {
|
|
1573
|
+
"agent.name": self.name,
|
|
1574
|
+
"agent.iterations": self.max_iterations,
|
|
1575
|
+
"agent.tool_calls_count": len(all_tool_calls),
|
|
1576
|
+
"agent.max_iterations_reached": True,
|
|
1577
|
+
"output_length": len(final_output),
|
|
1578
|
+
})
|
|
1579
|
+
|
|
1580
|
+
return AgentResult(
|
|
1581
|
+
output=final_output,
|
|
1582
|
+
tool_calls=all_tool_calls,
|
|
1583
|
+
context=context,
|
|
1584
|
+
)
|
|
1012
1585
|
|
|
1013
1586
|
|
|
1014
1587
|
def agent(
|