agnt5 0.2.8a2__cp310-abi3-manylinux_2_34_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/agent.py ADDED
@@ -0,0 +1,956 @@
1
+ """Agent component implementation for AGNT5 SDK.
2
+
3
+ Provides simple agent with external LLM integration and tool orchestration.
4
+ Future: Platform-backed agents with durable execution and multi-agent coordination.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import functools
10
+ import json
11
+ import logging
12
+ from typing import Any, Callable, Dict, List, Optional
13
+
14
+ from .context import Context
15
+ from . import lm
16
+ from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ModelConfig, ToolDefinition
17
+ from .tool import Tool, ToolRegistry
18
+ from ._telemetry import setup_module_logger
19
+
20
+ logger = setup_module_logger(__name__)
21
+
22
+ # Global agent registry
23
+ _AGENT_REGISTRY: Dict[str, "Agent"] = {}
24
+
25
+
26
+ class AgentContext(Context):
27
+ """
28
+ Context for agent execution with conversation state management.
29
+
30
+ Extends base Context with:
31
+ - State management via EntityStateManager
32
+ - Conversation history persistence
33
+ - Context inheritance (child agents share parent's state)
34
+
35
+ Three initialization modes:
36
+ 1. Standalone: Creates own state manager (playground testing)
37
+ 2. Inherit WorkflowContext: Shares parent's state manager
38
+ 3. Inherit parent AgentContext: Shares parent's state manager
39
+
40
+ Example:
41
+ ```python
42
+ # Standalone agent with conversation history
43
+ ctx = AgentContext(run_id="session-1", agent_name="tutor")
44
+ result = await agent.run("Hello", context=ctx)
45
+ result = await agent.run("Continue", context=ctx) # Remembers previous message
46
+
47
+ # Agent in workflow - shares workflow state
48
+ @workflow
49
+ async def research_workflow(ctx: WorkflowContext):
50
+ agent_result = await research_agent.run("Find AI trends", context=ctx)
51
+ # Agent has access to workflow state via inherited context
52
+ ```
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ run_id: str,
58
+ agent_name: str,
59
+ session_id: Optional[str] = None,
60
+ state_manager: Optional[Any] = None,
61
+ parent_context: Optional[Context] = None,
62
+ attempt: int = 0,
63
+ runtime_context: Optional[Any] = None,
64
+ ):
65
+ """
66
+ Initialize agent context.
67
+
68
+ Args:
69
+ run_id: Unique execution identifier
70
+ agent_name: Name of the agent
71
+ session_id: Session identifier for conversation history (default: run_id)
72
+ state_manager: Optional state manager (for context inheritance)
73
+ parent_context: Parent context to inherit state from
74
+ attempt: Retry attempt number
75
+ runtime_context: RuntimeContext for trace correlation
76
+ """
77
+ super().__init__(run_id, attempt, runtime_context)
78
+
79
+ self._agent_name = agent_name
80
+ self._session_id = session_id or run_id
81
+
82
+ # Determine state manager based on parent context
83
+ from .entity import EntityStateManager, _get_state_manager
84
+
85
+ if state_manager:
86
+ # Explicit state manager provided
87
+ self._state_manager = state_manager
88
+ logger.debug(f"AgentContext using provided state manager")
89
+ elif parent_context:
90
+ # Try to inherit state manager from parent
91
+ try:
92
+ # Check if parent is WorkflowContext or AgentContext
93
+ if hasattr(parent_context, '_workflow_entity'):
94
+ # WorkflowContext - get state manager from worker context
95
+ self._state_manager = _get_state_manager()
96
+ logger.debug(f"AgentContext inheriting state from WorkflowContext")
97
+ elif hasattr(parent_context, '_state_manager'):
98
+ # Parent AgentContext - share state manager
99
+ self._state_manager = parent_context._state_manager
100
+ logger.debug(f"AgentContext inheriting state from parent AgentContext")
101
+ else:
102
+ # FunctionContext or base Context - create new state manager
103
+ self._state_manager = EntityStateManager()
104
+ logger.debug(f"AgentContext created new state manager (parent has no state)")
105
+ except RuntimeError:
106
+ # _get_state_manager() failed (not in worker context) - create standalone
107
+ self._state_manager = EntityStateManager()
108
+ logger.debug(f"AgentContext created standalone state manager")
109
+ else:
110
+ # Standalone - create new state manager
111
+ self._state_manager = EntityStateManager()
112
+ logger.debug(f"AgentContext created standalone state manager")
113
+
114
+ # Conversation key for state storage
115
+ self._conversation_key = f"agent:{agent_name}:{self._session_id}:messages"
116
+
117
+ @property
118
+ def state(self):
119
+ """
120
+ Get state interface for agent state management.
121
+
122
+ Returns:
123
+ EntityState instance for state operations
124
+
125
+ Example:
126
+ # Store conversation history
127
+ messages = ctx.state.get(f"agent:{agent_name}:{session_id}:messages", [])
128
+ messages.append({"role": "user", "content": "Hello"})
129
+ ctx.state.set(f"agent:{agent_name}:{session_id}:messages", messages)
130
+
131
+ # Store agent-specific data
132
+ ctx.state.set("research_results", data)
133
+ """
134
+ from .entity import EntityState
135
+
136
+ # Use agent's conversation key as the state key
137
+ state_key = ("agent", self._conversation_key)
138
+ state_dict = self._state_manager.get_or_create_state(state_key)
139
+ return EntityState(state_dict)
140
+
141
+ @property
142
+ def session_id(self) -> str:
143
+ """Get session identifier for this agent context."""
144
+ return self._session_id
145
+
146
+ def get_conversation_history(self) -> List[Message]:
147
+ """
148
+ Retrieve conversation history from state.
149
+
150
+ Returns:
151
+ List of Message objects from conversation history
152
+ """
153
+ messages_data = self.state.get(self._conversation_key, [])
154
+
155
+ # Convert dict representations back to Message objects
156
+ messages = []
157
+ for msg_dict in messages_data:
158
+ if isinstance(msg_dict, dict):
159
+ role = msg_dict.get("role", "user")
160
+ content = msg_dict.get("content", "")
161
+ if role == "user":
162
+ messages.append(Message.user(content))
163
+ elif role == "assistant":
164
+ messages.append(Message.assistant(content))
165
+ else:
166
+ # Generic message - create with MessageRole enum
167
+ from .lm import MessageRole
168
+ msg_role = MessageRole(role) if role in ("user", "assistant", "system") else MessageRole.USER
169
+ msg = Message(role=msg_role, content=content)
170
+ messages.append(msg)
171
+ else:
172
+ # Already a Message object
173
+ messages.append(msg_dict)
174
+
175
+ return messages
176
+
177
+ def save_conversation_history(self, messages: List[Message]) -> None:
178
+ """
179
+ Save conversation history to state.
180
+
181
+ Args:
182
+ messages: List of Message objects to persist
183
+ """
184
+ # Convert Message objects to dict for JSON serialization
185
+ messages_data = []
186
+ for msg in messages:
187
+ messages_data.append({
188
+ "role": msg.role.value if hasattr(msg.role, 'value') else str(msg.role),
189
+ "content": msg.content
190
+ })
191
+
192
+ self.state.set(self._conversation_key, messages_data)
193
+
194
+
195
+ class Handoff:
196
+ """Configuration for agent-to-agent handoff.
197
+
198
+ Handoffs enable one agent to delegate control to another specialized agent,
199
+ following the pattern popularized by LangGraph and OpenAI Agents SDK.
200
+
201
+ The handoff is exposed to the LLM as a tool named 'transfer_to_{agent_name}'
202
+ that allows explicit delegation with conversation history.
203
+
204
+ Example:
205
+ ```python
206
+ specialist = Agent(name="specialist", ...)
207
+
208
+ # Create handoff configuration
209
+ handoff_to_specialist = Handoff(
210
+ agent=specialist,
211
+ description="Transfer to specialist for detailed analysis"
212
+ )
213
+
214
+ # Use in coordinator agent
215
+ coordinator = Agent(
216
+ name="coordinator",
217
+ handoffs=[handoff_to_specialist]
218
+ )
219
+ ```
220
+ """
221
+
222
+ def __init__(
223
+ self,
224
+ agent: "Agent",
225
+ description: Optional[str] = None,
226
+ tool_name: Optional[str] = None,
227
+ pass_full_history: bool = True,
228
+ ):
229
+ """Initialize handoff configuration.
230
+
231
+ Args:
232
+ agent: Target agent to hand off to
233
+ description: Description shown to LLM (defaults to agent instructions)
234
+ tool_name: Custom tool name (defaults to 'transfer_to_{agent_name}')
235
+ pass_full_history: Whether to pass full conversation history to target agent
236
+ """
237
+ self.agent = agent
238
+ self.description = description or agent.instructions or f"Transfer to {agent.name}"
239
+ self.tool_name = tool_name or f"transfer_to_{agent.name}"
240
+ self.pass_full_history = pass_full_history
241
+
242
+
243
+ def handoff(
244
+ agent: "Agent",
245
+ description: Optional[str] = None,
246
+ tool_name: Optional[str] = None,
247
+ pass_full_history: bool = True,
248
+ ) -> Handoff:
249
+ """Create a handoff configuration for agent-to-agent delegation.
250
+
251
+ This is a convenience function for creating Handoff instances with a clean API.
252
+
253
+ Args:
254
+ agent: Target agent to hand off to
255
+ description: Description shown to LLM
256
+ tool_name: Custom tool name
257
+ pass_full_history: Whether to pass full conversation history
258
+
259
+ Returns:
260
+ Handoff configuration
261
+
262
+ Example:
263
+ ```python
264
+ from agnt5 import Agent, handoff
265
+
266
+ research_agent = Agent(name="researcher", ...)
267
+ writer_agent = Agent(name="writer", ...)
268
+
269
+ coordinator = Agent(
270
+ name="coordinator",
271
+ handoffs=[
272
+ handoff(research_agent, "Transfer for research tasks"),
273
+ handoff(writer_agent, "Transfer for writing tasks"),
274
+ ]
275
+ )
276
+ ```
277
+ """
278
+ return Handoff(
279
+ agent=agent,
280
+ description=description,
281
+ tool_name=tool_name,
282
+ pass_full_history=pass_full_history,
283
+ )
284
+
285
+
286
+ class AgentRegistry:
287
+ """Registry for agents."""
288
+
289
+ @staticmethod
290
+ def register(agent: "Agent") -> None:
291
+ """Register an agent."""
292
+ if agent.name in _AGENT_REGISTRY:
293
+ logger.warning(f"Overwriting existing agent '{agent.name}'")
294
+ _AGENT_REGISTRY[agent.name] = agent
295
+ logger.debug(f"Registered agent '{agent.name}'")
296
+
297
+ @staticmethod
298
+ def get(name: str) -> Optional["Agent"]:
299
+ """Get agent by name."""
300
+ return _AGENT_REGISTRY.get(name)
301
+
302
+ @staticmethod
303
+ def all() -> Dict[str, "Agent"]:
304
+ """Get all registered agents."""
305
+ return _AGENT_REGISTRY.copy()
306
+
307
+ @staticmethod
308
+ def clear() -> None:
309
+ """Clear all registered agents."""
310
+ _AGENT_REGISTRY.clear()
311
+ logger.debug("Cleared agent registry")
312
+
313
+
314
+ class AgentResult:
315
+ """Result from agent execution."""
316
+
317
+ def __init__(
318
+ self,
319
+ output: str,
320
+ tool_calls: List[Dict[str, Any]],
321
+ context: Context,
322
+ handoff_to: Optional[str] = None,
323
+ handoff_metadata: Optional[Dict[str, Any]] = None,
324
+ ):
325
+ self.output = output
326
+ self.tool_calls = tool_calls
327
+ self.context = context
328
+ self.handoff_to = handoff_to # Name of agent that was handed off to
329
+ self.handoff_metadata = handoff_metadata or {} # Additional handoff info
330
+
331
+
332
+ class Agent:
333
+ """Autonomous LLM-driven agent with tool orchestration.
334
+
335
+ Current features:
336
+ - LLM integration (OpenAI, Anthropic, etc.)
337
+ - Tool selection and execution
338
+ - Multi-turn reasoning
339
+ - Context and state management
340
+
341
+ Future enhancements:
342
+ - Durable execution with checkpointing
343
+ - Multi-agent coordination
344
+ - Platform-backed tool execution
345
+ - Streaming responses
346
+
347
+ Example:
348
+ ```python
349
+ from agnt5 import Agent, tool, Context
350
+
351
+ @tool(auto_schema=True)
352
+ async def search_web(ctx: Context, query: str) -> List[Dict]:
353
+ # Search implementation
354
+ return [{"title": "Result", "url": "..."}]
355
+
356
+ # Simple usage with model string
357
+ agent = Agent(
358
+ name="researcher",
359
+ model="openai/gpt-4o-mini",
360
+ instructions="You are a research assistant.",
361
+ tools=[search_web],
362
+ temperature=0.7
363
+ )
364
+
365
+ result = await agent.run("What are the latest AI trends?")
366
+ print(result.output)
367
+ ```
368
+ """
369
+
370
+ def __init__(
371
+ self,
372
+ name: str,
373
+ model: Any, # Can be string like "openai/gpt-4o-mini" OR LanguageModel instance
374
+ instructions: str,
375
+ tools: Optional[List[Any]] = None,
376
+ handoffs: Optional[List[Handoff]] = None,
377
+ temperature: float = 0.7,
378
+ max_tokens: Optional[int] = None,
379
+ top_p: Optional[float] = None,
380
+ model_config: Optional[ModelConfig] = None,
381
+ max_iterations: int = 10,
382
+ model_name: Optional[str] = None, # For backwards compatibility with tests
383
+ ):
384
+ """Initialize agent.
385
+
386
+ Args:
387
+ name: Agent name/identifier
388
+ model: Model string with provider prefix (e.g., "openai/gpt-4o-mini") OR LanguageModel instance
389
+ instructions: System instructions for the agent
390
+ tools: List of tools available to the agent (functions, Tool instances, or Agent instances)
391
+ handoffs: List of Handoff configurations for agent-to-agent delegation
392
+ temperature: LLM temperature (0.0 to 1.0)
393
+ max_tokens: Maximum tokens to generate
394
+ top_p: Nucleus sampling parameter
395
+ model_config: Optional advanced configuration (custom endpoints, headers, etc.)
396
+ max_iterations: Maximum reasoning iterations
397
+ model_name: Optional model name (for backwards compatibility, used when model is a LanguageModel instance)
398
+ """
399
+ self.name = name
400
+ self.instructions = instructions
401
+ self.temperature = temperature
402
+ self.max_tokens = max_tokens
403
+ self.top_p = top_p
404
+ self.model_config = model_config
405
+ self.max_iterations = max_iterations
406
+
407
+ # Support both string model names and LanguageModel instances
408
+ if isinstance(model, str):
409
+ # New API: model is a string like "openai/gpt-4o-mini"
410
+ self.model = model
411
+ self.model_name = model_name or model
412
+ self._language_model = None # Will create on demand
413
+ elif isinstance(model, LanguageModel):
414
+ # Old API (for tests): model is a LanguageModel instance
415
+ self._language_model = model
416
+ self.model = model # Keep for backwards compatibility
417
+ self.model_name = model_name or "mock-model"
418
+ else:
419
+ raise TypeError(f"model must be a string or LanguageModel instance, got {type(model)}")
420
+
421
+ # Store handoffs for building handoff tools
422
+ self.handoffs = handoffs or []
423
+
424
+ # Build tool registry (includes regular tools, agent-as-tools, and handoff tools)
425
+ self.tools: Dict[str, Tool] = {}
426
+ if tools:
427
+ for tool_item in tools:
428
+ # Check if it's an Agent instance (agents-as-tools pattern)
429
+ if isinstance(tool_item, Agent):
430
+ agent_tool = tool_item.to_tool()
431
+ self.tools[agent_tool.name] = agent_tool
432
+ logger.info(f"Added agent '{tool_item.name}' as tool to '{self.name}'")
433
+ # Check if it's a Tool instance
434
+ elif isinstance(tool_item, Tool):
435
+ self.tools[tool_item.name] = tool_item
436
+ # Check if it's a decorated function with config
437
+ elif hasattr(tool_item, "_agnt5_config"):
438
+ # Try to get from ToolRegistry first
439
+ tool_config = tool_item._agnt5_config
440
+ tool_instance = ToolRegistry.get(tool_config.name)
441
+ if tool_instance:
442
+ self.tools[tool_instance.name] = tool_instance
443
+ # Otherwise try to look up by function name
444
+ elif callable(tool_item):
445
+ # Try to find in registry by function name
446
+ tool_name = tool_item.__name__
447
+ tool_instance = ToolRegistry.get(tool_name)
448
+ if tool_instance:
449
+ self.tools[tool_instance.name] = tool_instance
450
+
451
+ # Build handoff tools
452
+ for handoff_config in self.handoffs:
453
+ handoff_tool = self._create_handoff_tool(handoff_config)
454
+ self.tools[handoff_tool.name] = handoff_tool
455
+ logger.info(f"Added handoff tool '{handoff_tool.name}' to '{self.name}'")
456
+
457
+ self.logger = logging.getLogger(f"agnt5.agent.{name}")
458
+
459
+ # Define schemas based on the run method signature
460
+ # Input: user_message (string)
461
+ self.input_schema = {
462
+ "type": "object",
463
+ "properties": {
464
+ "user_message": {"type": "string"}
465
+ },
466
+ "required": ["user_message"]
467
+ }
468
+ # Output: AgentResult with output and tool_calls
469
+ self.output_schema = {
470
+ "type": "object",
471
+ "properties": {
472
+ "output": {"type": "string"},
473
+ "tool_calls": {
474
+ "type": "array",
475
+ "items": {"type": "object"}
476
+ }
477
+ }
478
+ }
479
+
480
+ # Store metadata
481
+ self.metadata = {
482
+ "description": instructions,
483
+ "model": model
484
+ }
485
+
486
+ def to_tool(self, description: Optional[str] = None) -> Tool:
487
+ """Convert this agent to a Tool that can be used by other agents.
488
+
489
+ This enables agents-as-tools pattern where one agent can invoke another
490
+ agent as if it were a regular tool.
491
+
492
+ Args:
493
+ description: Optional custom description (defaults to agent instructions)
494
+
495
+ Returns:
496
+ Tool instance that wraps this agent
497
+
498
+ Example:
499
+ ```python
500
+ research_agent = Agent(
501
+ name="researcher",
502
+ model="openai/gpt-4o-mini",
503
+ instructions="You are a research specialist."
504
+ )
505
+
506
+ # Use research agent as a tool for another agent
507
+ coordinator = Agent(
508
+ name="coordinator",
509
+ model="openai/gpt-4o-mini",
510
+ instructions="Coordinate tasks using specialist agents.",
511
+ tools=[research_agent.to_tool()]
512
+ )
513
+ ```
514
+ """
515
+ agent_name = self.name
516
+
517
+ # Handler that runs the agent
518
+ async def agent_tool_handler(ctx: Context, user_message: str) -> str:
519
+ """Execute agent and return output."""
520
+ ctx.logger.info(f"Invoking agent '{agent_name}' as tool")
521
+
522
+ # Run the agent with the user message
523
+ result = await self.run(user_message, context=ctx)
524
+
525
+ return result.output
526
+
527
+ # Create tool with agent's schema
528
+ tool_description = description or self.instructions or f"Agent: {self.name}"
529
+
530
+ agent_tool = Tool(
531
+ name=self.name,
532
+ description=tool_description,
533
+ handler=agent_tool_handler,
534
+ input_schema=self.input_schema,
535
+ auto_schema=False,
536
+ )
537
+
538
+ return agent_tool
539
+
540
+ def _create_handoff_tool(self, handoff_config: Handoff, current_messages_callback: Optional[Callable] = None) -> Tool:
541
+ """Create a tool for handoff to another agent.
542
+
543
+ Args:
544
+ handoff_config: Handoff configuration
545
+ current_messages_callback: Optional callback to get current conversation messages
546
+
547
+ Returns:
548
+ Tool instance that executes the handoff
549
+ """
550
+ target_agent = handoff_config.agent
551
+ tool_name = handoff_config.tool_name
552
+
553
+ # Handler that executes the handoff
554
+ async def handoff_handler(ctx: Context, message: str) -> Dict[str, Any]:
555
+ """Transfer control to target agent."""
556
+ ctx.logger.info(
557
+ f"Handoff from '{self.name}' to '{target_agent.name}': {message}"
558
+ )
559
+
560
+ # If we should pass conversation history, add it to context
561
+ if handoff_config.pass_full_history:
562
+ # Get current conversation from the agent's run loop
563
+ # (This will be set when we detect the handoff in run())
564
+ conversation_history = getattr(ctx, '_agent_data', {}).get("_current_conversation", [])
565
+
566
+ if conversation_history:
567
+ ctx.logger.info(
568
+ f"Passing {len(conversation_history)} messages to target agent"
569
+ )
570
+ # Store in context for target agent to optionally use
571
+ if not hasattr(ctx, '_agent_data'):
572
+ ctx._agent_data = {}
573
+ ctx._agent_data["_handoff_conversation_history"] = conversation_history
574
+
575
+ # Execute target agent with the message and shared context
576
+ result = await target_agent.run(message, context=ctx)
577
+
578
+ # Store handoff metadata - this signals that a handoff occurred
579
+ handoff_data = {
580
+ "_handoff": True,
581
+ "from_agent": self.name,
582
+ "to_agent": target_agent.name,
583
+ "message": message,
584
+ "output": result.output,
585
+ "tool_calls": result.tool_calls,
586
+ }
587
+
588
+ if not hasattr(ctx, '_agent_data'):
589
+ ctx._agent_data = {}
590
+ ctx._agent_data["_handoff_result"] = handoff_data
591
+
592
+ # Return the handoff data (will be detected in run() loop)
593
+ return handoff_data
594
+
595
+ # Create tool with handoff schema
596
+ handoff_tool = Tool(
597
+ name=tool_name,
598
+ description=handoff_config.description,
599
+ handler=handoff_handler,
600
+ input_schema={
601
+ "type": "object",
602
+ "properties": {
603
+ "message": {
604
+ "type": "string",
605
+ "description": "Message or task to pass to the target agent"
606
+ }
607
+ },
608
+ "required": ["message"]
609
+ },
610
+ auto_schema=False,
611
+ )
612
+
613
+ return handoff_tool
614
+
615
+ async def run(
616
+ self,
617
+ user_message: str,
618
+ context: Optional[Context] = None,
619
+ ) -> AgentResult:
620
+ """Run agent to completion.
621
+
622
+ Args:
623
+ user_message: User's input message
624
+ context: Optional context (auto-created if not provided)
625
+
626
+ Returns:
627
+ AgentResult with output and execution details
628
+
629
+ Example:
630
+ ```python
631
+ result = await agent.run("Analyze recent tech news")
632
+ print(result.output)
633
+ ```
634
+ """
635
+ # Create or adapt context
636
+ if context is None:
637
+ # Standalone execution - create AgentContext
638
+ import uuid
639
+ run_id = f"agent-{self.name}-{uuid.uuid4().hex[:8]}"
640
+ context = AgentContext(
641
+ run_id=run_id,
642
+ agent_name=self.name,
643
+ )
644
+ elif isinstance(context, AgentContext):
645
+ # Already AgentContext - use as-is
646
+ pass
647
+ elif hasattr(context, '_workflow_entity'):
648
+ # WorkflowContext - create AgentContext that inherits state
649
+ import uuid
650
+ run_id = f"{context.run_id}:agent:{self.name}"
651
+ context = AgentContext(
652
+ run_id=run_id,
653
+ agent_name=self.name,
654
+ session_id=context.run_id, # Share workflow's session
655
+ parent_context=context,
656
+ )
657
+ else:
658
+ # FunctionContext or other - create new AgentContext
659
+ import uuid
660
+ run_id = f"{context.run_id}:agent:{self.name}"
661
+ context = AgentContext(
662
+ run_id=run_id,
663
+ agent_name=self.name,
664
+ )
665
+
666
+ # Load conversation history from state (if AgentContext)
667
+ if isinstance(context, AgentContext):
668
+ messages: List[Message] = context.get_conversation_history()
669
+ # Add new user message
670
+ messages.append(Message.user(user_message))
671
+ # Save updated conversation
672
+ context.save_conversation_history(messages)
673
+ else:
674
+ # Fallback for non-AgentContext (shouldn't happen with code above)
675
+ messages = [Message.user(user_message)]
676
+
677
+ # Create span for agent execution with trace linking
678
+ from ._core import create_span
679
+
680
+ with create_span(
681
+ self.name,
682
+ "agent",
683
+ context._runtime_context if hasattr(context, "_runtime_context") else None,
684
+ {
685
+ "agent.name": self.name,
686
+ "agent.model": self.model_name, # Use model_name (always a string)
687
+ "agent.max_iterations": str(self.max_iterations),
688
+ },
689
+ ) as span:
690
+ all_tool_calls: List[Dict[str, Any]] = []
691
+
692
+ # Reasoning loop
693
+ for iteration in range(self.max_iterations):
694
+ # Build tool definitions for LLM
695
+ tool_defs = [
696
+ ToolDefinition(
697
+ name=tool.name,
698
+ description=tool.description,
699
+ parameters=tool.input_schema,
700
+ )
701
+ for tool in self.tools.values()
702
+ ]
703
+
704
+ # Convert messages to dict format for lm.generate()
705
+ messages_dict = []
706
+ for msg in messages:
707
+ messages_dict.append({
708
+ "role": msg.role.value,
709
+ "content": msg.content
710
+ })
711
+
712
+ # Call LLM
713
+ # Check if we have a legacy LanguageModel instance or need to create one
714
+ if self._language_model is not None:
715
+ # Legacy API: use provided LanguageModel instance
716
+ request = GenerateRequest(
717
+ model="mock-model", # Not used by MockLanguageModel
718
+ system_prompt=self.instructions,
719
+ messages=messages,
720
+ tools=tool_defs if tool_defs else [],
721
+ )
722
+ request.config.temperature = self.temperature
723
+ if self.max_tokens:
724
+ request.config.max_tokens = self.max_tokens
725
+ if self.top_p:
726
+ request.config.top_p = self.top_p
727
+ response = await self._language_model.generate(request)
728
+ else:
729
+ # New API: model is a string, create internal LM instance
730
+ request = GenerateRequest(
731
+ model=self.model,
732
+ system_prompt=self.instructions,
733
+ messages=messages,
734
+ tools=tool_defs if tool_defs else [],
735
+ )
736
+ request.config.temperature = self.temperature
737
+ if self.max_tokens:
738
+ request.config.max_tokens = self.max_tokens
739
+ if self.top_p:
740
+ request.config.top_p = self.top_p
741
+
742
+ # Create internal LM instance for generation
743
+ # TODO: Use model_config when provided
744
+ from .lm import _LanguageModel
745
+ provider, model_name = self.model.split('/', 1)
746
+ internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
747
+ response = await internal_lm.generate(request)
748
+
749
+ # Add assistant response to messages
750
+ messages.append(Message.assistant(response.text))
751
+
752
+ # Check if LLM wants to use tools
753
+ if response.tool_calls:
754
+ self.logger.debug(f"Agent calling {len(response.tool_calls)} tool(s)")
755
+
756
+ # Store current conversation in context for potential handoffs
757
+ # Use a simple dict attribute since we don't need full state persistence for this
758
+ if not hasattr(context, '_agent_data'):
759
+ context._agent_data = {}
760
+ context._agent_data["_current_conversation"] = messages
761
+
762
+ # Execute tool calls
763
+ tool_results = []
764
+ for tool_call in response.tool_calls:
765
+ tool_name = tool_call["name"]
766
+ tool_args_str = tool_call["arguments"]
767
+
768
+ # Track tool call
769
+ all_tool_calls.append(
770
+ {
771
+ "name": tool_name,
772
+ "arguments": tool_args_str,
773
+ "iteration": iteration + 1,
774
+ }
775
+ )
776
+
777
+ # Execute tool
778
+ try:
779
+ # Parse arguments
780
+ tool_args = json.loads(tool_args_str)
781
+
782
+ # Get tool
783
+ tool = self.tools.get(tool_name)
784
+ if not tool:
785
+ result_text = f"Error: Tool '{tool_name}' not found"
786
+ else:
787
+ # Execute tool
788
+ result = await tool.invoke(context, **tool_args)
789
+
790
+ # Check if this was a handoff
791
+ if isinstance(result, dict) and result.get("_handoff"):
792
+ self.logger.info(
793
+ f"Handoff detected to '{result['to_agent']}', "
794
+ f"terminating current agent"
795
+ )
796
+ # Save conversation before returning
797
+ if isinstance(context, AgentContext):
798
+ context.save_conversation_history(messages)
799
+ # Return immediately with handoff result
800
+ return AgentResult(
801
+ output=result["output"],
802
+ tool_calls=all_tool_calls + result.get("tool_calls", []),
803
+ context=context,
804
+ handoff_to=result["to_agent"],
805
+ handoff_metadata=result,
806
+ )
807
+
808
+ result_text = json.dumps(result) if result else "null"
809
+
810
+ tool_results.append(
811
+ {"tool": tool_name, "result": result_text, "error": None}
812
+ )
813
+
814
+ except Exception as e:
815
+ self.logger.error(f"Tool execution error: {e}")
816
+ tool_results.append(
817
+ {"tool": tool_name, "result": None, "error": str(e)}
818
+ )
819
+
820
+ # Add tool results to conversation
821
+ results_text = "\n".join(
822
+ [
823
+ f"Tool: {tr['tool']}\nResult: {tr['result']}"
824
+ if tr["error"] is None
825
+ else f"Tool: {tr['tool']}\nError: {tr['error']}"
826
+ for tr in tool_results
827
+ ]
828
+ )
829
+ messages.append(Message.user(f"Tool results:\n{results_text}\n\nPlease provide your final answer based on these results."))
830
+
831
+ # Continue loop for agent to process results
832
+
833
+ else:
834
+ # No tool calls - agent is done
835
+ self.logger.debug(f"Agent completed after {iteration + 1} iterations")
836
+ # Save conversation before returning
837
+ if isinstance(context, AgentContext):
838
+ context.save_conversation_history(messages)
839
+ return AgentResult(
840
+ output=response.text,
841
+ tool_calls=all_tool_calls,
842
+ context=context,
843
+ )
844
+
845
+ # Max iterations reached
846
+ self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
847
+ final_output = messages[-1].content if messages else "No output generated"
848
+ # Save conversation before returning
849
+ if isinstance(context, AgentContext):
850
+ context.save_conversation_history(messages)
851
+ return AgentResult(
852
+ output=final_output,
853
+ tool_calls=all_tool_calls,
854
+ context=context,
855
+ )
856
+
857
+
858
+ def agent(
859
+ _func: Optional[Callable] = None,
860
+ *,
861
+ name: Optional[str] = None,
862
+ model: Optional[LanguageModel] = None,
863
+ instructions: Optional[str] = None,
864
+ tools: Optional[List[Any]] = None,
865
+ model_name: str = "gpt-4o-mini",
866
+ temperature: float = 0.7,
867
+ max_iterations: int = 10,
868
+ ) -> Callable:
869
+ """
870
+ Decorator to register a function as an agent and automatically register it.
871
+
872
+ This decorator allows you to define agents as functions that create and return Agent instances.
873
+ The agent will be automatically registered in the AgentRegistry for discovery by the worker.
874
+
875
+ Args:
876
+ name: Agent name (defaults to function name)
877
+ model: Language model instance (required if not provided in function)
878
+ instructions: System instructions (required if not provided in function)
879
+ tools: List of tools available to the agent
880
+ model_name: Model name to use
881
+ temperature: LLM temperature
882
+ max_iterations: Maximum reasoning iterations
883
+
884
+ Returns:
885
+ Decorated function that returns an Agent instance
886
+
887
+ Example:
888
+ ```python
889
+ from agnt5 import agent, tool
890
+ from agnt5.lm import OpenAILanguageModel
891
+
892
+ @agent(
893
+ name="research_agent",
894
+ model=OpenAILanguageModel(),
895
+ instructions="You are a research assistant.",
896
+ tools=[search_web, analyze_data]
897
+ )
898
+ def create_researcher():
899
+ # Agent is created and registered automatically
900
+ pass
901
+
902
+ # Or create agent directly
903
+ @agent
904
+ def my_agent():
905
+ from agnt5.lm import OpenAILanguageModel
906
+ return Agent(
907
+ name="my_agent",
908
+ model=OpenAILanguageModel(),
909
+ instructions="You are a helpful assistant."
910
+ )
911
+ ```
912
+ """
913
+
914
+ def decorator(func: Callable) -> Callable:
915
+ # Determine agent name
916
+ agent_name = name or func.__name__
917
+
918
+ # Create the agent
919
+ @functools.wraps(func)
920
+ def wrapper(*args, **kwargs) -> Agent:
921
+ # Check if function returns an Agent
922
+ result = func(*args, **kwargs)
923
+ if isinstance(result, Agent):
924
+ # Function creates its own agent
925
+ agent_instance = result
926
+ elif model is not None and instructions is not None:
927
+ # Create agent from decorator parameters
928
+ agent_instance = Agent(
929
+ name=agent_name,
930
+ model=model,
931
+ instructions=instructions,
932
+ tools=tools,
933
+ model_name=model_name,
934
+ temperature=temperature,
935
+ max_iterations=max_iterations,
936
+ )
937
+ else:
938
+ raise ValueError(
939
+ f"Agent decorator for '{agent_name}' requires either "
940
+ "the decorated function to return an Agent instance, "
941
+ "or 'model' and 'instructions' parameters to be provided"
942
+ )
943
+
944
+ # Register agent
945
+ AgentRegistry.register(agent_instance)
946
+ return agent_instance
947
+
948
+ # Create agent immediately and store reference
949
+ agent_instance = wrapper()
950
+
951
+ # Return the agent instance itself (so it can be used directly)
952
+ return agent_instance
953
+
954
+ if _func is None:
955
+ return decorator
956
+ return decorator(_func)