agnt5 0.2.1__cp39-abi3-manylinux_2_34_aarch64.whl → 0.2.6__cp39-abi3-manylinux_2_34_aarch64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agnt5 might be problematic. Click here for more details.

agnt5/agent.py CHANGED
@@ -1,7 +1,7 @@
1
1
  """Agent component implementation for AGNT5 SDK.
2
2
 
3
- Phase 1: Simple agent with external LLM integration and tool orchestration.
4
- Phase 2: Platform-backed agents with durable execution and multi-agent coordination.
3
+ Provides simple agent with external LLM integration and tool orchestration.
4
+ Future: Platform-backed agents with durable execution and multi-agent coordination.
5
5
  """
6
6
 
7
7
  from __future__ import annotations
@@ -13,7 +13,7 @@ from typing import Any, Callable, Dict, List, Optional
13
13
 
14
14
  from .context import Context
15
15
  from . import lm
16
- from .lm import GenerateRequest, GenerateResponse, Message, ModelConfig, ToolDefinition
16
+ from .lm import GenerateRequest, GenerateResponse, LanguageModel, Message, ModelConfig, ToolDefinition
17
17
  from .tool import Tool, ToolRegistry
18
18
  from ._telemetry import setup_module_logger
19
19
 
@@ -23,6 +23,97 @@ logger = setup_module_logger(__name__)
23
23
  _AGENT_REGISTRY: Dict[str, "Agent"] = {}
24
24
 
25
25
 
26
+ class Handoff:
27
+ """Configuration for agent-to-agent handoff.
28
+
29
+ Handoffs enable one agent to delegate control to another specialized agent,
30
+ following the pattern popularized by LangGraph and OpenAI Agents SDK.
31
+
32
+ The handoff is exposed to the LLM as a tool named 'transfer_to_{agent_name}'
33
+ that allows explicit delegation with conversation history.
34
+
35
+ Example:
36
+ ```python
37
+ specialist = Agent(name="specialist", ...)
38
+
39
+ # Create handoff configuration
40
+ handoff_to_specialist = Handoff(
41
+ agent=specialist,
42
+ description="Transfer to specialist for detailed analysis"
43
+ )
44
+
45
+ # Use in coordinator agent
46
+ coordinator = Agent(
47
+ name="coordinator",
48
+ handoffs=[handoff_to_specialist]
49
+ )
50
+ ```
51
+ """
52
+
53
+ def __init__(
54
+ self,
55
+ agent: "Agent",
56
+ description: Optional[str] = None,
57
+ tool_name: Optional[str] = None,
58
+ pass_full_history: bool = True,
59
+ ):
60
+ """Initialize handoff configuration.
61
+
62
+ Args:
63
+ agent: Target agent to hand off to
64
+ description: Description shown to LLM (defaults to agent instructions)
65
+ tool_name: Custom tool name (defaults to 'transfer_to_{agent_name}')
66
+ pass_full_history: Whether to pass full conversation history to target agent
67
+ """
68
+ self.agent = agent
69
+ self.description = description or agent.instructions or f"Transfer to {agent.name}"
70
+ self.tool_name = tool_name or f"transfer_to_{agent.name}"
71
+ self.pass_full_history = pass_full_history
72
+
73
+
74
+ def handoff(
75
+ agent: "Agent",
76
+ description: Optional[str] = None,
77
+ tool_name: Optional[str] = None,
78
+ pass_full_history: bool = True,
79
+ ) -> Handoff:
80
+ """Create a handoff configuration for agent-to-agent delegation.
81
+
82
+ This is a convenience function for creating Handoff instances with a clean API.
83
+
84
+ Args:
85
+ agent: Target agent to hand off to
86
+ description: Description shown to LLM
87
+ tool_name: Custom tool name
88
+ pass_full_history: Whether to pass full conversation history
89
+
90
+ Returns:
91
+ Handoff configuration
92
+
93
+ Example:
94
+ ```python
95
+ from agnt5 import Agent, handoff
96
+
97
+ research_agent = Agent(name="researcher", ...)
98
+ writer_agent = Agent(name="writer", ...)
99
+
100
+ coordinator = Agent(
101
+ name="coordinator",
102
+ handoffs=[
103
+ handoff(research_agent, "Transfer for research tasks"),
104
+ handoff(writer_agent, "Transfer for writing tasks"),
105
+ ]
106
+ )
107
+ ```
108
+ """
109
+ return Handoff(
110
+ agent=agent,
111
+ description=description,
112
+ tool_name=tool_name,
113
+ pass_full_history=pass_full_history,
114
+ )
115
+
116
+
26
117
  class AgentRegistry:
27
118
  """Registry for agents."""
28
119
 
@@ -54,22 +145,31 @@ class AgentRegistry:
54
145
  class AgentResult:
55
146
  """Result from agent execution."""
56
147
 
57
- def __init__(self, output: str, tool_calls: List[Dict[str, Any]], context: Context):
148
+ def __init__(
149
+ self,
150
+ output: str,
151
+ tool_calls: List[Dict[str, Any]],
152
+ context: Context,
153
+ handoff_to: Optional[str] = None,
154
+ handoff_metadata: Optional[Dict[str, Any]] = None,
155
+ ):
58
156
  self.output = output
59
157
  self.tool_calls = tool_calls
60
158
  self.context = context
159
+ self.handoff_to = handoff_to # Name of agent that was handed off to
160
+ self.handoff_metadata = handoff_metadata or {} # Additional handoff info
61
161
 
62
162
 
63
163
  class Agent:
64
164
  """Autonomous LLM-driven agent with tool orchestration.
65
165
 
66
- Phase 1: Simple agent with:
166
+ Current features:
67
167
  - LLM integration (OpenAI, Anthropic, etc.)
68
168
  - Tool selection and execution
69
169
  - Multi-turn reasoning
70
170
  - Context and state management
71
171
 
72
- Phase 2 will add:
172
+ Future enhancements:
73
173
  - Durable execution with checkpointing
74
174
  - Multi-agent coordination
75
175
  - Platform-backed tool execution
@@ -101,30 +201,33 @@ class Agent:
101
201
  def __init__(
102
202
  self,
103
203
  name: str,
104
- model: str,
204
+ model: Any, # Can be string like "openai/gpt-4o-mini" OR LanguageModel instance
105
205
  instructions: str,
106
206
  tools: Optional[List[Any]] = None,
207
+ handoffs: Optional[List[Handoff]] = None,
107
208
  temperature: float = 0.7,
108
209
  max_tokens: Optional[int] = None,
109
210
  top_p: Optional[float] = None,
110
211
  model_config: Optional[ModelConfig] = None,
111
212
  max_iterations: int = 10,
213
+ model_name: Optional[str] = None, # For backwards compatibility with tests
112
214
  ):
113
215
  """Initialize agent.
114
216
 
115
217
  Args:
116
218
  name: Agent name/identifier
117
- model: Model string with provider prefix (e.g., "openai/gpt-4o-mini")
219
+ model: Model string with provider prefix (e.g., "openai/gpt-4o-mini") OR LanguageModel instance
118
220
  instructions: System instructions for the agent
119
- tools: List of tools available to the agent (functions with @tool decorator)
221
+ tools: List of tools available to the agent (functions, Tool instances, or Agent instances)
222
+ handoffs: List of Handoff configurations for agent-to-agent delegation
120
223
  temperature: LLM temperature (0.0 to 1.0)
121
224
  max_tokens: Maximum tokens to generate
122
225
  top_p: Nucleus sampling parameter
123
226
  model_config: Optional advanced configuration (custom endpoints, headers, etc.)
124
227
  max_iterations: Maximum reasoning iterations
228
+ model_name: Optional model name (for backwards compatibility, used when model is a LanguageModel instance)
125
229
  """
126
230
  self.name = name
127
- self.model = model
128
231
  self.instructions = instructions
129
232
  self.temperature = temperature
130
233
  self.max_tokens = max_tokens
@@ -132,12 +235,34 @@ class Agent:
132
235
  self.model_config = model_config
133
236
  self.max_iterations = max_iterations
134
237
 
135
- # Build tool registry
238
+ # Support both string model names and LanguageModel instances
239
+ if isinstance(model, str):
240
+ # New API: model is a string like "openai/gpt-4o-mini"
241
+ self.model = model
242
+ self.model_name = model_name or model
243
+ self._language_model = None # Will create on demand
244
+ elif isinstance(model, LanguageModel):
245
+ # Old API (for tests): model is a LanguageModel instance
246
+ self._language_model = model
247
+ self.model = model # Keep for backwards compatibility
248
+ self.model_name = model_name or "mock-model"
249
+ else:
250
+ raise TypeError(f"model must be a string or LanguageModel instance, got {type(model)}")
251
+
252
+ # Store handoffs for building handoff tools
253
+ self.handoffs = handoffs or []
254
+
255
+ # Build tool registry (includes regular tools, agent-as-tools, and handoff tools)
136
256
  self.tools: Dict[str, Tool] = {}
137
257
  if tools:
138
258
  for tool_item in tools:
259
+ # Check if it's an Agent instance (agents-as-tools pattern)
260
+ if isinstance(tool_item, Agent):
261
+ agent_tool = tool_item.to_tool()
262
+ self.tools[agent_tool.name] = agent_tool
263
+ logger.info(f"Added agent '{tool_item.name}' as tool to '{self.name}'")
139
264
  # Check if it's a Tool instance
140
- if isinstance(tool_item, Tool):
265
+ elif isinstance(tool_item, Tool):
141
266
  self.tools[tool_item.name] = tool_item
142
267
  # Check if it's a decorated function with config
143
268
  elif hasattr(tool_item, "_agnt5_config"):
@@ -154,6 +279,12 @@ class Agent:
154
279
  if tool_instance:
155
280
  self.tools[tool_instance.name] = tool_instance
156
281
 
282
+ # Build handoff tools
283
+ for handoff_config in self.handoffs:
284
+ handoff_tool = self._create_handoff_tool(handoff_config)
285
+ self.tools[handoff_tool.name] = handoff_tool
286
+ logger.info(f"Added handoff tool '{handoff_tool.name}' to '{self.name}'")
287
+
157
288
  self.logger = logging.getLogger(f"agnt5.agent.{name}")
158
289
 
159
290
  # Define schemas based on the run method signature
@@ -180,9 +311,138 @@ class Agent:
180
311
  # Store metadata
181
312
  self.metadata = {
182
313
  "description": instructions,
183
- "model": model_name
314
+ "model": model
184
315
  }
185
316
 
317
+ def to_tool(self, description: Optional[str] = None) -> Tool:
318
+ """Convert this agent to a Tool that can be used by other agents.
319
+
320
+ This enables agents-as-tools pattern where one agent can invoke another
321
+ agent as if it were a regular tool.
322
+
323
+ Args:
324
+ description: Optional custom description (defaults to agent instructions)
325
+
326
+ Returns:
327
+ Tool instance that wraps this agent
328
+
329
+ Example:
330
+ ```python
331
+ research_agent = Agent(
332
+ name="researcher",
333
+ model="openai/gpt-4o-mini",
334
+ instructions="You are a research specialist."
335
+ )
336
+
337
+ # Use research agent as a tool for another agent
338
+ coordinator = Agent(
339
+ name="coordinator",
340
+ model="openai/gpt-4o-mini",
341
+ instructions="Coordinate tasks using specialist agents.",
342
+ tools=[research_agent.to_tool()]
343
+ )
344
+ ```
345
+ """
346
+ agent_name = self.name
347
+
348
+ # Handler that runs the agent
349
+ async def agent_tool_handler(ctx: Context, user_message: str) -> str:
350
+ """Execute agent and return output."""
351
+ ctx.logger.info(f"Invoking agent '{agent_name}' as tool")
352
+
353
+ # Run the agent with the user message
354
+ result = await self.run(user_message, context=ctx)
355
+
356
+ return result.output
357
+
358
+ # Create tool with agent's schema
359
+ tool_description = description or self.instructions or f"Agent: {self.name}"
360
+
361
+ agent_tool = Tool(
362
+ name=self.name,
363
+ description=tool_description,
364
+ handler=agent_tool_handler,
365
+ input_schema=self.input_schema,
366
+ auto_schema=False,
367
+ )
368
+
369
+ return agent_tool
370
+
371
+ def _create_handoff_tool(self, handoff_config: Handoff, current_messages_callback: Optional[Callable] = None) -> Tool:
372
+ """Create a tool for handoff to another agent.
373
+
374
+ Args:
375
+ handoff_config: Handoff configuration
376
+ current_messages_callback: Optional callback to get current conversation messages
377
+
378
+ Returns:
379
+ Tool instance that executes the handoff
380
+ """
381
+ target_agent = handoff_config.agent
382
+ tool_name = handoff_config.tool_name
383
+
384
+ # Handler that executes the handoff
385
+ async def handoff_handler(ctx: Context, message: str) -> Dict[str, Any]:
386
+ """Transfer control to target agent."""
387
+ ctx.logger.info(
388
+ f"Handoff from '{self.name}' to '{target_agent.name}': {message}"
389
+ )
390
+
391
+ # If we should pass conversation history, add it to context
392
+ if handoff_config.pass_full_history:
393
+ # Get current conversation from the agent's run loop
394
+ # (This will be set when we detect the handoff in run())
395
+ conversation_history = getattr(ctx, '_agent_data', {}).get("_current_conversation", [])
396
+
397
+ if conversation_history:
398
+ ctx.logger.info(
399
+ f"Passing {len(conversation_history)} messages to target agent"
400
+ )
401
+ # Store in context for target agent to optionally use
402
+ if not hasattr(ctx, '_agent_data'):
403
+ ctx._agent_data = {}
404
+ ctx._agent_data["_handoff_conversation_history"] = conversation_history
405
+
406
+ # Execute target agent with the message and shared context
407
+ result = await target_agent.run(message, context=ctx)
408
+
409
+ # Store handoff metadata - this signals that a handoff occurred
410
+ handoff_data = {
411
+ "_handoff": True,
412
+ "from_agent": self.name,
413
+ "to_agent": target_agent.name,
414
+ "message": message,
415
+ "output": result.output,
416
+ "tool_calls": result.tool_calls,
417
+ }
418
+
419
+ if not hasattr(ctx, '_agent_data'):
420
+ ctx._agent_data = {}
421
+ ctx._agent_data["_handoff_result"] = handoff_data
422
+
423
+ # Return the handoff data (will be detected in run() loop)
424
+ return handoff_data
425
+
426
+ # Create tool with handoff schema
427
+ handoff_tool = Tool(
428
+ name=tool_name,
429
+ description=handoff_config.description,
430
+ handler=handoff_handler,
431
+ input_schema={
432
+ "type": "object",
433
+ "properties": {
434
+ "message": {
435
+ "type": "string",
436
+ "description": "Message or task to pass to the target agent"
437
+ }
438
+ },
439
+ "required": ["message"]
440
+ },
441
+ auto_schema=False,
442
+ )
443
+
444
+ return handoff_tool
445
+
186
446
  async def run(
187
447
  self,
188
448
  user_message: str,
@@ -209,132 +469,164 @@ class Agent:
209
469
 
210
470
  context = Context(
211
471
  run_id=f"agent-{self.name}-{uuid.uuid4().hex[:8]}",
212
- component_type="agent",
213
472
  )
214
473
 
215
- # Initialize conversation
216
- messages: List[Message] = [Message.user(user_message)]
217
- all_tool_calls: List[Dict[str, Any]] = []
218
-
219
- # Reasoning loop
220
- for iteration in range(self.max_iterations):
221
- self.logger.info(f"Agent iteration {iteration + 1}/{self.max_iterations}")
222
-
223
- # Build tool definitions for LLM
224
- tool_defs = [
225
- ToolDefinition(
226
- name=tool.name,
227
- description=tool.description,
228
- parameters=tool.input_schema,
229
- )
230
- for tool in self.tools.values()
231
- ]
232
-
233
- # Convert messages to dict format for lm.generate()
234
- messages_dict = []
235
- for msg in messages:
236
- messages_dict.append({
237
- "role": msg.role.value,
238
- "content": msg.content
239
- })
240
-
241
- # Call LLM using simplified API
242
- # TODO: Support tools in lm.generate() - for now using GenerateRequest internally
243
- request = GenerateRequest(
244
- model=self.model,
245
- system_prompt=self.instructions,
246
- messages=messages,
247
- tools=tool_defs if tool_defs else [],
248
- )
249
- request.config.temperature = self.temperature
250
- if self.max_tokens:
251
- request.config.max_tokens = self.max_tokens
252
- if self.top_p:
253
- request.config.top_p = self.top_p
254
-
255
- # Create internal LM instance for generation
256
- # TODO: Use model_config when provided
257
- from .lm import _LanguageModel
258
- provider, model_name = self.model.split('/', 1)
259
- internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
260
- response = await internal_lm.generate(request)
261
-
262
- # Add assistant response to messages
263
- messages.append(Message.assistant(response.text))
264
-
265
- # Check if LLM wants to use tools
266
- if response.tool_calls:
267
- self.logger.info(f"Agent calling {len(response.tool_calls)} tool(s)")
268
-
269
- # Execute tool calls
270
- tool_results = []
271
- for tool_call in response.tool_calls:
272
- tool_name = tool_call["name"]
273
- tool_args_str = tool_call["arguments"]
274
-
275
- # Track tool call
276
- all_tool_calls.append(
277
- {
278
- "name": tool_name,
279
- "arguments": tool_args_str,
280
- "iteration": iteration + 1,
281
- }
474
+ # Create span for agent execution with trace linking
475
+ from ._core import create_span
476
+
477
+ with create_span(
478
+ self.name,
479
+ "agent",
480
+ context._runtime_context if hasattr(context, "_runtime_context") else None,
481
+ {
482
+ "agent.name": self.name,
483
+ "agent.model": self.model,
484
+ "agent.max_iterations": str(self.max_iterations),
485
+ },
486
+ ) as span:
487
+ # Initialize conversation
488
+ messages: List[Message] = [Message.user(user_message)]
489
+ all_tool_calls: List[Dict[str, Any]] = []
490
+
491
+ # Reasoning loop
492
+ for iteration in range(self.max_iterations):
493
+ # Build tool definitions for LLM
494
+ tool_defs = [
495
+ ToolDefinition(
496
+ name=tool.name,
497
+ description=tool.description,
498
+ parameters=tool.input_schema,
282
499
  )
283
-
284
- # Execute tool
285
- try:
286
- # Parse arguments
287
- tool_args = json.loads(tool_args_str)
288
-
289
- # Get tool
290
- tool = self.tools.get(tool_name)
291
- if not tool:
292
- result_text = f"Error: Tool '{tool_name}' not found"
293
- else:
294
- # Execute tool
295
- result = await tool.invoke(context, **tool_args)
296
- result_text = json.dumps(result) if result else "null"
297
-
298
- tool_results.append(
299
- {"tool": tool_name, "result": result_text, "error": None}
300
- )
301
-
302
- except Exception as e:
303
- self.logger.error(f"Tool execution error: {e}")
304
- tool_results.append(
305
- {"tool": tool_name, "result": None, "error": str(e)}
500
+ for tool in self.tools.values()
501
+ ]
502
+
503
+ # Convert messages to dict format for lm.generate()
504
+ messages_dict = []
505
+ for msg in messages:
506
+ messages_dict.append({
507
+ "role": msg.role.value,
508
+ "content": msg.content
509
+ })
510
+
511
+ # Call LLM using simplified API
512
+ # TODO: Support tools in lm.generate() - for now using GenerateRequest internally
513
+ request = GenerateRequest(
514
+ model=self.model,
515
+ system_prompt=self.instructions,
516
+ messages=messages,
517
+ tools=tool_defs if tool_defs else [],
518
+ )
519
+ request.config.temperature = self.temperature
520
+ if self.max_tokens:
521
+ request.config.max_tokens = self.max_tokens
522
+ if self.top_p:
523
+ request.config.top_p = self.top_p
524
+
525
+ # Create internal LM instance for generation
526
+ # TODO: Use model_config when provided
527
+ from .lm import _LanguageModel
528
+ provider, model_name = self.model.split('/', 1)
529
+ internal_lm = _LanguageModel(provider=provider.lower(), default_model=None)
530
+ response = await internal_lm.generate(request)
531
+
532
+ # Add assistant response to messages
533
+ messages.append(Message.assistant(response.text))
534
+
535
+ # Check if LLM wants to use tools
536
+ if response.tool_calls:
537
+ self.logger.debug(f"Agent calling {len(response.tool_calls)} tool(s)")
538
+
539
+ # Store current conversation in context for potential handoffs
540
+ # Use a simple dict attribute since we don't need full state persistence for this
541
+ if not hasattr(context, '_agent_data'):
542
+ context._agent_data = {}
543
+ context._agent_data["_current_conversation"] = messages
544
+
545
+ # Execute tool calls
546
+ tool_results = []
547
+ for tool_call in response.tool_calls:
548
+ tool_name = tool_call["name"]
549
+ tool_args_str = tool_call["arguments"]
550
+
551
+ # Track tool call
552
+ all_tool_calls.append(
553
+ {
554
+ "name": tool_name,
555
+ "arguments": tool_args_str,
556
+ "iteration": iteration + 1,
557
+ }
306
558
  )
307
559
 
308
- # Add tool results to conversation
309
- results_text = "\n".join(
310
- [
311
- f"Tool: {tr['tool']}\nResult: {tr['result']}"
312
- if tr["error"] is None
313
- else f"Tool: {tr['tool']}\nError: {tr['error']}"
314
- for tr in tool_results
315
- ]
316
- )
317
- messages.append(Message.user(f"Tool results:\n{results_text}"))
560
+ # Execute tool
561
+ try:
562
+ # Parse arguments
563
+ tool_args = json.loads(tool_args_str)
564
+
565
+ # Get tool
566
+ tool = self.tools.get(tool_name)
567
+ if not tool:
568
+ result_text = f"Error: Tool '{tool_name}' not found"
569
+ else:
570
+ # Execute tool
571
+ result = await tool.invoke(context, **tool_args)
572
+
573
+ # Check if this was a handoff
574
+ if isinstance(result, dict) and result.get("_handoff"):
575
+ self.logger.info(
576
+ f"Handoff detected to '{result['to_agent']}', "
577
+ f"terminating current agent"
578
+ )
579
+ # Return immediately with handoff result
580
+ return AgentResult(
581
+ output=result["output"],
582
+ tool_calls=all_tool_calls + result.get("tool_calls", []),
583
+ context=context,
584
+ handoff_to=result["to_agent"],
585
+ handoff_metadata=result,
586
+ )
587
+
588
+ result_text = json.dumps(result) if result else "null"
589
+
590
+ tool_results.append(
591
+ {"tool": tool_name, "result": result_text, "error": None}
592
+ )
593
+
594
+ except Exception as e:
595
+ self.logger.error(f"Tool execution error: {e}")
596
+ tool_results.append(
597
+ {"tool": tool_name, "result": None, "error": str(e)}
598
+ )
599
+
600
+ # Add tool results to conversation
601
+ results_text = "\n".join(
602
+ [
603
+ f"Tool: {tr['tool']}\nResult: {tr['result']}"
604
+ if tr["error"] is None
605
+ else f"Tool: {tr['tool']}\nError: {tr['error']}"
606
+ for tr in tool_results
607
+ ]
608
+ )
609
+ messages.append(Message.user(f"Tool results:\n{results_text}\n\nPlease provide your final answer based on these results."))
318
610
 
319
- # Continue loop for agent to process results
611
+ # Continue loop for agent to process results
320
612
 
321
- else:
322
- # No tool calls - agent is done
323
- self.logger.info(f"Agent completed after {iteration + 1} iterations")
324
- return AgentResult(
325
- output=response.text,
326
- tool_calls=all_tool_calls,
327
- context=context,
328
- )
613
+ else:
614
+ # No tool calls - agent is done
615
+ self.logger.debug(f"Agent completed after {iteration + 1} iterations")
616
+ return AgentResult(
617
+ output=response.text,
618
+ tool_calls=all_tool_calls,
619
+ context=context,
620
+ )
329
621
 
330
- # Max iterations reached
331
- self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
332
- final_output = messages[-1].content if messages else "No output generated"
333
- return AgentResult(
334
- output=final_output,
335
- tool_calls=all_tool_calls,
336
- context=context,
337
- )
622
+ # Max iterations reached
623
+ self.logger.warning(f"Agent reached max iterations ({self.max_iterations})")
624
+ final_output = messages[-1].content if messages else "No output generated"
625
+ return AgentResult(
626
+ output=final_output,
627
+ tool_calls=all_tool_calls,
628
+ context=context,
629
+ )
338
630
 
339
631
  async def chat(
340
632
  self,
@@ -364,7 +656,6 @@ class Agent:
364
656
 
365
657
  context = Context(
366
658
  run_id=f"agent-chat-{self.name}-{uuid.uuid4().hex[:8]}",
367
- component_type="agent",
368
659
  )
369
660
 
370
661
  # Add user message