camel-ai 0.2.71a12__py3-none-any.whl → 0.2.72__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (42) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +260 -488
  3. camel/memories/agent_memories.py +39 -0
  4. camel/memories/base.py +8 -0
  5. camel/models/gemini_model.py +30 -2
  6. camel/models/moonshot_model.py +36 -4
  7. camel/models/openai_model.py +29 -15
  8. camel/societies/workforce/prompts.py +24 -14
  9. camel/societies/workforce/single_agent_worker.py +9 -7
  10. camel/societies/workforce/workforce.py +44 -16
  11. camel/storages/vectordb_storages/__init__.py +1 -0
  12. camel/storages/vectordb_storages/surreal.py +415 -0
  13. camel/toolkits/__init__.py +10 -1
  14. camel/toolkits/base.py +57 -1
  15. camel/toolkits/human_toolkit.py +5 -1
  16. camel/toolkits/hybrid_browser_toolkit/config_loader.py +127 -414
  17. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +783 -1626
  18. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +489 -0
  19. camel/toolkits/markitdown_toolkit.py +2 -2
  20. camel/toolkits/message_integration.py +592 -0
  21. camel/toolkits/note_taking_toolkit.py +195 -26
  22. camel/toolkits/openai_image_toolkit.py +5 -5
  23. camel/toolkits/origene_mcp_toolkit.py +97 -0
  24. camel/toolkits/screenshot_toolkit.py +213 -0
  25. camel/toolkits/search_toolkit.py +115 -36
  26. camel/toolkits/terminal_toolkit.py +379 -165
  27. camel/toolkits/video_analysis_toolkit.py +13 -13
  28. camel/toolkits/video_download_toolkit.py +11 -11
  29. camel/toolkits/web_deploy_toolkit.py +1024 -0
  30. camel/types/enums.py +6 -3
  31. camel/types/unified_model_type.py +16 -4
  32. camel/utils/mcp_client.py +8 -0
  33. {camel_ai-0.2.71a12.dist-info → camel_ai-0.2.72.dist-info}/METADATA +6 -3
  34. {camel_ai-0.2.71a12.dist-info → camel_ai-0.2.72.dist-info}/RECORD +36 -36
  35. camel/toolkits/hybrid_browser_toolkit/actions.py +0 -417
  36. camel/toolkits/hybrid_browser_toolkit/agent.py +0 -311
  37. camel/toolkits/hybrid_browser_toolkit/browser_session.py +0 -739
  38. camel/toolkits/hybrid_browser_toolkit/snapshot.py +0 -227
  39. camel/toolkits/hybrid_browser_toolkit/stealth_script.js +0 -0
  40. camel/toolkits/hybrid_browser_toolkit/unified_analyzer.js +0 -1002
  41. {camel_ai-0.2.71a12.dist-info → camel_ai-0.2.72.dist-info}/WHEEL +0 -0
  42. {camel_ai-0.2.71a12.dist-info → camel_ai-0.2.72.dist-info}/licenses/LICENSE +0 -0
@@ -74,7 +74,7 @@ from camel.models import (
74
74
  from camel.prompts import TextPrompt
75
75
  from camel.responses import ChatAgentResponse
76
76
  from camel.storages import JsonStorage
77
- from camel.toolkits import FunctionTool
77
+ from camel.toolkits import FunctionTool, RegisteredAgentToolkit
78
78
  from camel.types import (
79
79
  ChatCompletion,
80
80
  ChatCompletionChunk,
@@ -89,7 +89,6 @@ from camel.utils import (
89
89
  model_from_json_schema,
90
90
  )
91
91
  from camel.utils.commons import dependencies_required
92
- from camel.utils.tool_result import ToolResult
93
92
 
94
93
  if TYPE_CHECKING:
95
94
  from camel.terminators import ResponseTerminator
@@ -348,6 +347,13 @@ class ChatAgent(BaseAgent):
348
347
  tools (Optional[List[Union[FunctionTool, Callable]]], optional): List
349
348
  of available :obj:`FunctionTool` or :obj:`Callable`. (default:
350
349
  :obj:`None`)
350
+ toolkits_to_register_agent (Optional[List[RegisteredAgentToolkit]],
351
+ optional): List of toolkit instances that inherit from
352
+ :obj:`RegisteredAgentToolkit`. The agent will register itself with
353
+ these toolkits, allowing them to access the agent instance. Note:
354
+ This does NOT add the toolkit's tools to the agent. To use tools
355
+ from these toolkits, pass them explicitly via the `tools`
356
+ parameter. (default: :obj:`None`)
351
357
  external_tools (Optional[List[Union[FunctionTool, Callable,
352
358
  Dict[str, Any]]]], optional): List of external tools
353
359
  (:obj:`FunctionTool` or :obj:`Callable` or :obj:`Dict[str, Any]`)
@@ -375,6 +381,11 @@ class ChatAgent(BaseAgent):
375
381
  pause_event (Optional[asyncio.Event]): Event to signal pause of the
376
382
  agent's operation. When clear, the agent will pause its execution.
377
383
  (default: :obj:`None`)
384
+ prune_tool_calls_from_memory (bool): Whether to clean tool
385
+ call messages from memory after response generation to save token
386
+ usage. When enabled, removes FUNCTION/TOOL role messages and
387
+ ASSISTANT messages with tool_calls after each step.
388
+ (default: :obj:`False`)
378
389
  """
379
390
 
380
391
  def __init__(
@@ -400,6 +411,9 @@ class ChatAgent(BaseAgent):
400
411
  token_limit: Optional[int] = None,
401
412
  output_language: Optional[str] = None,
402
413
  tools: Optional[List[Union[FunctionTool, Callable]]] = None,
414
+ toolkits_to_register_agent: Optional[
415
+ List[RegisteredAgentToolkit]
416
+ ] = None,
403
417
  external_tools: Optional[
404
418
  List[Union[FunctionTool, Callable, Dict[str, Any]]]
405
419
  ] = None,
@@ -411,6 +425,7 @@ class ChatAgent(BaseAgent):
411
425
  tool_execution_timeout: Optional[float] = None,
412
426
  mask_tool_output: bool = False,
413
427
  pause_event: Optional[asyncio.Event] = None,
428
+ prune_tool_calls_from_memory: bool = False,
414
429
  ) -> None:
415
430
  if isinstance(model, ModelManager):
416
431
  self.model_backend = model
@@ -432,7 +447,7 @@ class ChatAgent(BaseAgent):
432
447
  token_limit or self.model_backend.token_limit,
433
448
  )
434
449
 
435
- self.memory: AgentMemory = memory or ChatHistoryMemory(
450
+ self._memory: AgentMemory = memory or ChatHistoryMemory(
436
451
  context_creator,
437
452
  window_size=message_window_size,
438
453
  agent_id=self.agent_id,
@@ -440,7 +455,7 @@ class ChatAgent(BaseAgent):
440
455
 
441
456
  # So we don't have to pass agent_id when we define memory
442
457
  if memory is not None:
443
- memory.agent_id = self.agent_id
458
+ self._memory.agent_id = self.agent_id
444
459
 
445
460
  # Set up system message and initialize messages
446
461
  self._original_system_message = (
@@ -473,6 +488,12 @@ class ChatAgent(BaseAgent):
473
488
  ]
474
489
  }
475
490
 
491
+ # Register agent with toolkits that have RegisteredAgentToolkit mixin
492
+ if toolkits_to_register_agent:
493
+ for toolkit in toolkits_to_register_agent:
494
+ if isinstance(toolkit, RegisteredAgentToolkit):
495
+ toolkit.register_agent(self)
496
+
476
497
  self._external_tool_schemas = {
477
498
  tool_schema["function"]["name"]: tool_schema
478
499
  for tool_schema in [
@@ -488,17 +509,13 @@ class ChatAgent(BaseAgent):
488
509
  self.tool_execution_timeout = tool_execution_timeout
489
510
  self.mask_tool_output = mask_tool_output
490
511
  self._secure_result_store: Dict[str, Any] = {}
491
- self._pending_images: List[str] = []
492
- self._image_retry_count: Dict[str, int] = {}
493
- # Store images to attach to next user message
494
512
  self.pause_event = pause_event
513
+ self.prune_tool_calls_from_memory = prune_tool_calls_from_memory
495
514
 
496
515
  def reset(self):
497
516
  r"""Resets the :obj:`ChatAgent` to its initial state."""
498
517
  self.terminated = False
499
518
  self.init_messages()
500
- self._pending_images = []
501
- self._image_retry_count = {}
502
519
  for terminator in self.response_terminators:
503
520
  terminator.reset()
504
521
 
@@ -663,6 +680,25 @@ class ChatAgent(BaseAgent):
663
680
  )
664
681
  self.init_messages()
665
682
 
683
+ @property
684
+ def memory(self) -> AgentMemory:
685
+ r"""Returns the agent memory."""
686
+ return self._memory
687
+
688
+ @memory.setter
689
+ def memory(self, value: AgentMemory) -> None:
690
+ r"""Set the agent memory.
691
+
692
+ When setting a new memory, the system message is automatically
693
+ re-added to ensure it's not lost.
694
+
695
+ Args:
696
+ value (AgentMemory): The new agent memory to use.
697
+ """
698
+ self._memory = value
699
+ # Ensure the new memory has the system message
700
+ self.init_messages()
701
+
666
702
  def _get_full_tool_schemas(self) -> List[Dict[str, Any]]:
667
703
  r"""Returns a list of tool schemas of all tools, including internal
668
704
  and external tools.
@@ -1264,7 +1300,11 @@ class ChatAgent(BaseAgent):
1264
1300
  openai_message: OpenAIMessage = {"role": "user", "content": prompt}
1265
1301
  # Explicitly set the tools to empty list to avoid calling tools
1266
1302
  response = self._get_model_response(
1267
- [openai_message], 0, response_format, []
1303
+ openai_messages=[openai_message],
1304
+ num_tokens=0,
1305
+ response_format=response_format,
1306
+ tool_schemas=[],
1307
+ prev_num_openai_messages=0,
1268
1308
  )
1269
1309
  message.content = response.output_messages[0].content
1270
1310
  if not self._try_format_message(message, response_format):
@@ -1292,7 +1332,11 @@ class ChatAgent(BaseAgent):
1292
1332
  prompt = SIMPLE_FORMAT_PROMPT.format(content=message.content)
1293
1333
  openai_message: OpenAIMessage = {"role": "user", "content": prompt}
1294
1334
  response = await self._aget_model_response(
1295
- [openai_message], 0, response_format, []
1335
+ openai_messages=[openai_message],
1336
+ num_tokens=0,
1337
+ response_format=response_format,
1338
+ tool_schemas=[],
1339
+ prev_num_openai_messages=0,
1296
1340
  )
1297
1341
  message.content = response.output_messages[0].content
1298
1342
  self._try_format_message(message, response_format)
@@ -1352,16 +1396,6 @@ class ChatAgent(BaseAgent):
1352
1396
  role_name="User", content=input_message
1353
1397
  )
1354
1398
 
1355
- # Attach any pending images from previous tool calls
1356
- image_list = self._process_pending_images()
1357
- if image_list:
1358
- # Create new message with images attached
1359
- input_message = BaseMessage.make_user_message(
1360
- role_name="User",
1361
- content=input_message.content,
1362
- image_list=image_list,
1363
- )
1364
-
1365
1399
  # Add user input to memory
1366
1400
  self.update_memory(input_message, OpenAIBackendRole.USER)
1367
1401
 
@@ -1374,7 +1408,8 @@ class ChatAgent(BaseAgent):
1374
1408
 
1375
1409
  # Initialize token usage tracker
1376
1410
  step_token_usage = self._create_token_usage_tracker()
1377
- iteration_count = 0
1411
+ iteration_count: int = 0
1412
+ prev_num_openai_messages: int = 0
1378
1413
 
1379
1414
  while True:
1380
1415
  if self.pause_event is not None and not self.pause_event.is_set():
@@ -1391,10 +1426,13 @@ class ChatAgent(BaseAgent):
1391
1426
  # Get response from model backend
1392
1427
  response = self._get_model_response(
1393
1428
  openai_messages,
1394
- accumulated_context_tokens, # Cumulative context tokens
1395
- response_format,
1396
- self._get_full_tool_schemas(),
1429
+ num_tokens=num_tokens,
1430
+ current_iteration=iteration_count,
1431
+ response_format=response_format,
1432
+ tool_schemas=self._get_full_tool_schemas(),
1433
+ prev_num_openai_messages=prev_num_openai_messages,
1397
1434
  )
1435
+ prev_num_openai_messages = len(openai_messages)
1398
1436
  iteration_count += 1
1399
1437
 
1400
1438
  # Accumulate API token usage
@@ -1405,6 +1443,9 @@ class ChatAgent(BaseAgent):
1405
1443
  # Terminate Agent if stop_event is set
1406
1444
  if self.stop_event and self.stop_event.is_set():
1407
1445
  # Use the _step_terminate to terminate the agent with reason
1446
+ logger.info(
1447
+ f"Termination triggered at iteration " f"{iteration_count}"
1448
+ )
1408
1449
  return self._step_terminate(
1409
1450
  accumulated_context_tokens,
1410
1451
  tool_call_records,
@@ -1439,6 +1480,7 @@ class ChatAgent(BaseAgent):
1439
1480
  self.max_iteration is not None
1440
1481
  and iteration_count >= self.max_iteration
1441
1482
  ):
1483
+ logger.info(f"Max iteration reached: {iteration_count}")
1442
1484
  break
1443
1485
 
1444
1486
  # If we're still here, continue the loop
@@ -1456,6 +1498,10 @@ class ChatAgent(BaseAgent):
1456
1498
 
1457
1499
  self._record_final_output(response.output_messages)
1458
1500
 
1501
+ # Clean tool call messages from memory after response generation
1502
+ if self.prune_tool_calls_from_memory and tool_call_records:
1503
+ self.memory.clean_tool_calls()
1504
+
1459
1505
  return self._convert_to_chatagent_response(
1460
1506
  response,
1461
1507
  tool_call_records,
@@ -1544,16 +1590,6 @@ class ChatAgent(BaseAgent):
1544
1590
  role_name="User", content=input_message
1545
1591
  )
1546
1592
 
1547
- # Attach any pending images from previous tool calls
1548
- image_list = self._process_pending_images()
1549
- if image_list:
1550
- # Create new message with images attached
1551
- input_message = BaseMessage.make_user_message(
1552
- role_name="User",
1553
- content=input_message.content,
1554
- image_list=image_list,
1555
- )
1556
-
1557
1593
  self.update_memory(input_message, OpenAIBackendRole.USER)
1558
1594
 
1559
1595
  tool_call_records: List[ToolCallingRecord] = []
@@ -1564,7 +1600,8 @@ class ChatAgent(BaseAgent):
1564
1600
 
1565
1601
  # Initialize token usage tracker
1566
1602
  step_token_usage = self._create_token_usage_tracker()
1567
- iteration_count = 0
1603
+ iteration_count: int = 0
1604
+ prev_num_openai_messages: int = 0
1568
1605
  while True:
1569
1606
  if self.pause_event is not None and not self.pause_event.is_set():
1570
1607
  await self.pause_event.wait()
@@ -1578,10 +1615,13 @@ class ChatAgent(BaseAgent):
1578
1615
 
1579
1616
  response = await self._aget_model_response(
1580
1617
  openai_messages,
1581
- accumulated_context_tokens,
1582
- response_format,
1583
- self._get_full_tool_schemas(),
1618
+ num_tokens=num_tokens,
1619
+ current_iteration=iteration_count,
1620
+ response_format=response_format,
1621
+ tool_schemas=self._get_full_tool_schemas(),
1622
+ prev_num_openai_messages=prev_num_openai_messages,
1584
1623
  )
1624
+ prev_num_openai_messages = len(openai_messages)
1585
1625
  iteration_count += 1
1586
1626
 
1587
1627
  # Accumulate API token usage
@@ -1592,6 +1632,9 @@ class ChatAgent(BaseAgent):
1592
1632
  # Terminate Agent if stop_event is set
1593
1633
  if self.stop_event and self.stop_event.is_set():
1594
1634
  # Use the _step_terminate to terminate the agent with reason
1635
+ logger.info(
1636
+ f"Termination triggered at iteration " f"{iteration_count}"
1637
+ )
1595
1638
  return self._step_terminate(
1596
1639
  accumulated_context_tokens,
1597
1640
  tool_call_records,
@@ -1600,7 +1643,6 @@ class ChatAgent(BaseAgent):
1600
1643
 
1601
1644
  if tool_call_requests := response.tool_call_requests:
1602
1645
  # Process all tool calls
1603
- new_images_from_tools = []
1604
1646
  for tool_call_request in tool_call_requests:
1605
1647
  if (
1606
1648
  tool_call_request.tool_name
@@ -1620,72 +1662,10 @@ class ChatAgent(BaseAgent):
1620
1662
  )
1621
1663
  tool_call_records.append(tool_call_record)
1622
1664
 
1623
- # Check if this tool call produced images
1624
- if (
1625
- hasattr(tool_call_record, 'images')
1626
- and tool_call_record.images
1627
- ):
1628
- new_images_from_tools.extend(
1629
- tool_call_record.images
1630
- )
1631
-
1632
1665
  # If we found an external tool call, break the loop
1633
1666
  if external_tool_call_requests:
1634
1667
  break
1635
1668
 
1636
- # If tools produced images
1637
- # send them to the model as a user message
1638
- if new_images_from_tools:
1639
- # Convert base64 images to PIL Images
1640
- image_list = []
1641
- for img_data in new_images_from_tools:
1642
- try:
1643
- import base64
1644
- import io
1645
-
1646
- from PIL import Image
1647
-
1648
- # Extract base64 data from data URL format
1649
- if img_data.startswith("data:image"):
1650
- # Format:
1651
- # "data:image/png;base64,iVBORw0KGgo..."
1652
- base64_data = img_data.split(',', 1)[1]
1653
- else:
1654
- # Raw base64 data
1655
- base64_data = img_data
1656
-
1657
- # Decode and create PIL Image
1658
- image_bytes = base64.b64decode(base64_data)
1659
- pil_image = Image.open(io.BytesIO(image_bytes))
1660
- # Convert to ensure proper
1661
- # Image.Image type for compatibility
1662
- pil_image_tool_result: Image.Image = (
1663
- pil_image.convert('RGB')
1664
- )
1665
- image_list.append(pil_image_tool_result)
1666
-
1667
- except Exception as e:
1668
- logger.warning(
1669
- f"Failed to convert "
1670
- f"base64 image to PIL for immediate use: {e}"
1671
- )
1672
- continue
1673
-
1674
- # If we have valid images
1675
- # create a user message with images
1676
- if image_list:
1677
- # Create a user message with images
1678
- # to provide visual context immediately
1679
- image_message = BaseMessage.make_user_message(
1680
- role_name="User",
1681
- content="[Visual content from tool execution - please analyze and continue]", # noqa: E501
1682
- image_list=image_list,
1683
- )
1684
-
1685
- self.update_memory(
1686
- image_message, OpenAIBackendRole.USER
1687
- )
1688
-
1689
1669
  if (
1690
1670
  self.max_iteration is not None
1691
1671
  and iteration_count >= self.max_iteration
@@ -1707,6 +1687,10 @@ class ChatAgent(BaseAgent):
1707
1687
 
1708
1688
  self._record_final_output(response.output_messages)
1709
1689
 
1690
+ # Clean tool call messages from memory after response generation
1691
+ if self.prune_tool_calls_from_memory and tool_call_records:
1692
+ self.memory.clean_tool_calls()
1693
+
1710
1694
  return self._convert_to_chatagent_response(
1711
1695
  response,
1712
1696
  tool_call_records,
@@ -1772,69 +1756,6 @@ class ChatAgent(BaseAgent):
1772
1756
  info=info,
1773
1757
  )
1774
1758
 
1775
- def _process_pending_images(self) -> List:
1776
- r"""Process pending images with retry logic and return PIL Image list.
1777
-
1778
- Returns:
1779
- List: List of successfully converted PIL Images.
1780
- """
1781
- if not self._pending_images:
1782
- return []
1783
-
1784
- image_list = []
1785
- successfully_processed = []
1786
- failed_images = []
1787
-
1788
- for img_data in self._pending_images:
1789
- # Track retry count
1790
- retry_count = self._image_retry_count.get(img_data, 0)
1791
-
1792
- # Remove images that have failed too many times (max 3 attempts)
1793
- if retry_count >= 3:
1794
- failed_images.append(img_data)
1795
- logger.warning(
1796
- f"Removing image after {retry_count} failed attempts"
1797
- )
1798
- continue
1799
-
1800
- try:
1801
- import base64
1802
- import io
1803
-
1804
- from PIL import Image
1805
-
1806
- # Extract base64 data from data URL format
1807
- if img_data.startswith("data:image"):
1808
- # Format: "data:image/png;base64,iVBORw0KGgo..."
1809
- base64_data = img_data.split(',', 1)[1]
1810
- else:
1811
- # Raw base64 data
1812
- base64_data = img_data
1813
-
1814
- # Decode and create PIL Image
1815
- image_bytes = base64.b64decode(base64_data)
1816
- pil_image = Image.open(io.BytesIO(image_bytes))
1817
- pil_image_converted: Image.Image = pil_image.convert('RGB')
1818
- image_list.append(pil_image_converted)
1819
- successfully_processed.append(img_data)
1820
-
1821
- except Exception as e:
1822
- # Increment retry count for failed conversion
1823
- self._image_retry_count[img_data] = retry_count + 1
1824
- logger.warning(
1825
- f"Failed to convert base64 image to PIL "
1826
- f"(attempt {retry_count + 1}/3): {e}"
1827
- )
1828
- continue
1829
-
1830
- # Clean up processed and failed images
1831
- for img in successfully_processed + failed_images:
1832
- self._pending_images.remove(img)
1833
- # Clean up retry count for processed/removed images
1834
- self._image_retry_count.pop(img, None)
1835
-
1836
- return image_list
1837
-
1838
1759
  def _record_final_output(self, output_messages: List[BaseMessage]) -> None:
1839
1760
  r"""Log final messages or warnings about multiple responses."""
1840
1761
  if len(output_messages) == 1:
@@ -1845,69 +1766,32 @@ class ChatAgent(BaseAgent):
1845
1766
  "selected message manually using `record_message()`."
1846
1767
  )
1847
1768
 
1848
- def _is_vision_error(self, exc: Exception) -> bool:
1849
- r"""Check if the exception is likely related to vision/image is not
1850
- supported by the model."""
1851
- # TODO: more robust vision error detection
1852
- error_msg = str(exc).lower()
1853
- vision_keywords = [
1854
- 'vision',
1855
- 'image',
1856
- 'multimodal',
1857
- 'unsupported',
1858
- 'invalid content type',
1859
- 'image_url',
1860
- 'visual',
1861
- ]
1862
- return any(keyword in error_msg for keyword in vision_keywords)
1863
-
1864
- def _has_images(self, messages: List[OpenAIMessage]) -> bool:
1865
- r"""Check if any message contains images."""
1866
- for msg in messages:
1867
- content = msg.get('content')
1868
- if isinstance(content, list):
1869
- for item in content:
1870
- if (
1871
- isinstance(item, dict)
1872
- and item.get('type') == 'image_url'
1873
- ):
1874
- return True
1875
- return False
1876
-
1877
- def _strip_images_from_messages(
1878
- self, messages: List[OpenAIMessage]
1879
- ) -> List[OpenAIMessage]:
1880
- r"""Remove images from messages, keeping only text content."""
1881
- stripped_messages = []
1882
- for msg in messages:
1883
- content = msg.get('content')
1884
- if isinstance(content, list):
1885
- # Extract only text content from multimodal messages
1886
- text_content = ""
1887
- for item in content:
1888
- if isinstance(item, dict) and item.get('type') == 'text':
1889
- text_content += item.get('text', '')
1890
-
1891
- # Create new message with only text content
1892
- new_msg = msg.copy()
1893
- new_msg['content'] = (
1894
- text_content
1895
- or "[Image content removed - model doesn't support vision]"
1896
- )
1897
- stripped_messages.append(new_msg)
1898
- else:
1899
- # Regular text message, keep as is
1900
- stripped_messages.append(msg)
1901
- return stripped_messages
1902
-
1769
+ @observe()
1903
1770
  def _get_model_response(
1904
1771
  self,
1905
1772
  openai_messages: List[OpenAIMessage],
1906
1773
  num_tokens: int,
1774
+ current_iteration: int = 0,
1907
1775
  response_format: Optional[Type[BaseModel]] = None,
1908
1776
  tool_schemas: Optional[List[Dict[str, Any]]] = None,
1777
+ prev_num_openai_messages: int = 0,
1909
1778
  ) -> ModelResponse:
1910
- r"""Internal function for agent step model response."""
1779
+ r"""Internal function for agent step model response.
1780
+ Args:
1781
+ openai_messages (List[OpenAIMessage]): The OpenAI
1782
+ messages to process.
1783
+ num_tokens (int): The number of tokens in the context.
1784
+ current_iteration (int): The current iteration of the step.
1785
+ response_format (Optional[Type[BaseModel]]): The response
1786
+ format to use.
1787
+ tool_schemas (Optional[List[Dict[str, Any]]]): The tool
1788
+ schemas to use.
1789
+ prev_num_openai_messages (int): The number of openai messages
1790
+ logged in the previous iteration.
1791
+
1792
+ Returns:
1793
+ ModelResponse: The model response.
1794
+ """
1911
1795
 
1912
1796
  response = None
1913
1797
  try:
@@ -1915,33 +1799,13 @@ class ChatAgent(BaseAgent):
1915
1799
  openai_messages, response_format, tool_schemas or None
1916
1800
  )
1917
1801
  except Exception as exc:
1918
- # Try again without images if the error might be vision-related
1919
- if self._is_vision_error(exc) and self._has_images(
1920
- openai_messages
1921
- ):
1922
- logger.warning(
1923
- "Model appears to not support vision. Retrying without images." # noqa: E501
1924
- )
1925
- try:
1926
- stripped_messages = self._strip_images_from_messages(
1927
- openai_messages
1928
- )
1929
- response = self.model_backend.run(
1930
- stripped_messages,
1931
- response_format,
1932
- tool_schemas or None,
1933
- )
1934
- except Exception:
1935
- pass # Fall through to original error handling
1936
-
1937
- if not response:
1938
- logger.error(
1939
- f"An error occurred while running model "
1940
- f"{self.model_backend.model_type}, "
1941
- f"index: {self.model_backend.current_model_index}",
1942
- exc_info=exc,
1943
- )
1944
- error_info = str(exc)
1802
+ logger.error(
1803
+ f"An error occurred while running model "
1804
+ f"{self.model_backend.model_type}, "
1805
+ f"index: {self.model_backend.current_model_index}",
1806
+ exc_info=exc,
1807
+ )
1808
+ error_info = str(exc)
1945
1809
 
1946
1810
  if not response and self.model_backend.num_models > 1:
1947
1811
  raise ModelProcessingError(
@@ -1955,11 +1819,12 @@ class ChatAgent(BaseAgent):
1955
1819
  )
1956
1820
 
1957
1821
  sanitized_messages = self._sanitize_messages_for_logging(
1958
- openai_messages
1822
+ openai_messages, prev_num_openai_messages
1959
1823
  )
1960
1824
  logger.info(
1961
1825
  f"Model {self.model_backend.model_type}, "
1962
1826
  f"index {self.model_backend.current_model_index}, "
1827
+ f"iteration {current_iteration}, "
1963
1828
  f"processed these messages: {sanitized_messages}"
1964
1829
  )
1965
1830
  if not isinstance(response, ChatCompletion):
@@ -1973,10 +1838,27 @@ class ChatAgent(BaseAgent):
1973
1838
  self,
1974
1839
  openai_messages: List[OpenAIMessage],
1975
1840
  num_tokens: int,
1841
+ current_iteration: int = 0,
1976
1842
  response_format: Optional[Type[BaseModel]] = None,
1977
1843
  tool_schemas: Optional[List[Dict[str, Any]]] = None,
1844
+ prev_num_openai_messages: int = 0,
1978
1845
  ) -> ModelResponse:
1979
- r"""Internal function for agent step model response."""
1846
+ r"""Internal function for agent async step model response.
1847
+ Args:
1848
+ openai_messages (List[OpenAIMessage]): The OpenAI messages
1849
+ to process.
1850
+ num_tokens (int): The number of tokens in the context.
1851
+ current_iteration (int): The current iteration of the step.
1852
+ response_format (Optional[Type[BaseModel]]): The response
1853
+ format to use.
1854
+ tool_schemas (Optional[List[Dict[str, Any]]]): The tool schemas
1855
+ to use.
1856
+ prev_num_openai_messages (int): The number of openai messages
1857
+ logged in the previous iteration.
1858
+
1859
+ Returns:
1860
+ ModelResponse: The model response.
1861
+ """
1980
1862
 
1981
1863
  response = None
1982
1864
  try:
@@ -1984,33 +1866,13 @@ class ChatAgent(BaseAgent):
1984
1866
  openai_messages, response_format, tool_schemas or None
1985
1867
  )
1986
1868
  except Exception as exc:
1987
- # Try again without images if the error might be vision-related
1988
- if self._is_vision_error(exc) and self._has_images(
1989
- openai_messages
1990
- ):
1991
- logger.warning(
1992
- "Model appears to not support vision. Retrying without images." # noqa: E501
1993
- )
1994
- try:
1995
- stripped_messages = self._strip_images_from_messages(
1996
- openai_messages
1997
- )
1998
- response = await self.model_backend.arun(
1999
- stripped_messages,
2000
- response_format,
2001
- tool_schemas or None,
2002
- )
2003
- except Exception:
2004
- pass # Fall through to original error handling
2005
-
2006
- if not response:
2007
- logger.error(
2008
- f"An error occurred while running model "
2009
- f"{self.model_backend.model_type}, "
2010
- f"index: {self.model_backend.current_model_index}",
2011
- exc_info=exc,
2012
- )
2013
- error_info = str(exc)
1869
+ logger.error(
1870
+ f"An error occurred while running model "
1871
+ f"{self.model_backend.model_type}, "
1872
+ f"index: {self.model_backend.current_model_index}",
1873
+ exc_info=exc,
1874
+ )
1875
+ error_info = str(exc)
2014
1876
 
2015
1877
  if not response and self.model_backend.num_models > 1:
2016
1878
  raise ModelProcessingError(
@@ -2024,11 +1886,12 @@ class ChatAgent(BaseAgent):
2024
1886
  )
2025
1887
 
2026
1888
  sanitized_messages = self._sanitize_messages_for_logging(
2027
- openai_messages
1889
+ openai_messages, prev_num_openai_messages
2028
1890
  )
2029
1891
  logger.info(
2030
1892
  f"Model {self.model_backend.model_type}, "
2031
1893
  f"index {self.model_backend.current_model_index}, "
1894
+ f"iteration {current_iteration}, "
2032
1895
  f"processed these messages: {sanitized_messages}"
2033
1896
  )
2034
1897
  if not isinstance(response, ChatCompletion):
@@ -2038,12 +1901,16 @@ class ChatAgent(BaseAgent):
2038
1901
  )
2039
1902
  return self._handle_batch_response(response)
2040
1903
 
2041
- def _sanitize_messages_for_logging(self, messages):
1904
+ def _sanitize_messages_for_logging(
1905
+ self, messages, prev_num_openai_messages: int
1906
+ ):
2042
1907
  r"""Sanitize OpenAI messages for logging by replacing base64 image
2043
1908
  data with a simple message and a link to view the image.
2044
1909
 
2045
1910
  Args:
2046
1911
  messages (List[OpenAIMessage]): The OpenAI messages to sanitize.
1912
+ prev_num_openai_messages (int): The number of openai messages
1913
+ logged in the previous iteration.
2047
1914
 
2048
1915
  Returns:
2049
1916
  List[OpenAIMessage]: The sanitized OpenAI messages.
@@ -2056,7 +1923,7 @@ class ChatAgent(BaseAgent):
2056
1923
  # Create a copy of messages for logging to avoid modifying the
2057
1924
  # original messages
2058
1925
  sanitized_messages = []
2059
- for msg in messages:
1926
+ for msg in messages[prev_num_openai_messages:]:
2060
1927
  if isinstance(msg, dict):
2061
1928
  sanitized_msg = msg.copy()
2062
1929
  # Check if content is a list (multimodal content with images)
@@ -2339,6 +2206,7 @@ class ChatAgent(BaseAgent):
2339
2206
  info=info,
2340
2207
  )
2341
2208
 
2209
+ @observe()
2342
2210
  def _execute_tool(
2343
2211
  self,
2344
2212
  tool_call_request: ToolCallRequest,
@@ -2373,28 +2241,12 @@ class ChatAgent(BaseAgent):
2373
2241
  error_msg = f"Error executing tool '{func_name}': {e!s}"
2374
2242
  result = f"Tool execution failed: {error_msg}"
2375
2243
  mask_flag = False
2376
- logging.warning(error_msg)
2244
+ logger.warning(f"{error_msg} with result: {result}")
2377
2245
 
2378
- # Check if result is a ToolResult with images
2379
- images_to_attach = None
2380
- if isinstance(result, ToolResult):
2381
- images_to_attach = result.images
2382
- result = str(result) # Use string representation for storage
2383
-
2384
- tool_record = self._record_tool_calling(
2246
+ return self._record_tool_calling(
2385
2247
  func_name, args, result, tool_call_id, mask_output=mask_flag
2386
2248
  )
2387
2249
 
2388
- # Store images for later attachment to next user message
2389
- if images_to_attach:
2390
- tool_record.images = images_to_attach
2391
- # Add images with duplicate prevention
2392
- for img in images_to_attach:
2393
- if img not in self._pending_images:
2394
- self._pending_images.append(img)
2395
-
2396
- return tool_record
2397
-
2398
2250
  async def _aexecute_tool(
2399
2251
  self,
2400
2252
  tool_call_request: ToolCallRequest,
@@ -2434,26 +2286,7 @@ class ChatAgent(BaseAgent):
2434
2286
  error_msg = f"Error executing async tool '{func_name}': {e!s}"
2435
2287
  result = f"Tool execution failed: {error_msg}"
2436
2288
  logging.warning(error_msg)
2437
-
2438
- # Check if result is a ToolResult with images
2439
- images_to_attach = None
2440
- if isinstance(result, ToolResult):
2441
- images_to_attach = result.images
2442
- result = str(result) # Use string representation for storage
2443
-
2444
- tool_record = self._record_tool_calling(
2445
- func_name, args, result, tool_call_id
2446
- )
2447
-
2448
- # Store images for later attachment to next user message
2449
- if images_to_attach:
2450
- tool_record.images = images_to_attach
2451
- # Add images with duplicate prevention
2452
- for img in images_to_attach:
2453
- if img not in self._pending_images:
2454
- self._pending_images.append(img)
2455
-
2456
- return tool_record
2289
+ return self._record_tool_calling(func_name, args, result, tool_call_id)
2457
2290
 
2458
2291
  def _record_tool_calling(
2459
2292
  self,
@@ -2594,6 +2427,9 @@ class ChatAgent(BaseAgent):
2594
2427
  while True:
2595
2428
  # Check termination condition
2596
2429
  if self.stop_event and self.stop_event.is_set():
2430
+ logger.info(
2431
+ f"Termination triggered at iteration " f"{iteration_count}"
2432
+ )
2597
2433
  yield self._step_terminate(
2598
2434
  num_tokens, tool_call_records, "termination_triggered"
2599
2435
  )
@@ -2825,21 +2661,13 @@ class ChatAgent(BaseAgent):
2825
2661
  status_response
2826
2662
  ) in self._execute_tools_sync_with_status_accumulator(
2827
2663
  accumulated_tool_calls,
2828
- content_accumulator,
2829
- step_token_usage,
2830
2664
  tool_call_records,
2831
2665
  ):
2832
2666
  yield status_response
2833
2667
 
2834
- # Yield "Sending back result to model" status
2668
+ # Log sending status instead of adding to content
2835
2669
  if tool_call_records:
2836
- sending_status = self._create_tool_status_response_with_accumulator( # noqa: E501
2837
- content_accumulator,
2838
- "\n------\n\nSending back result to model\n\n",
2839
- "tool_sending",
2840
- step_token_usage,
2841
- )
2842
- yield sending_status
2670
+ logger.info("Sending back result to model")
2843
2671
 
2844
2672
  # Record final message only if we have content AND no tool
2845
2673
  # calls. If there are tool calls, _record_tool_calling
@@ -2937,15 +2765,13 @@ class ChatAgent(BaseAgent):
2937
2765
  def _execute_tools_sync_with_status_accumulator(
2938
2766
  self,
2939
2767
  accumulated_tool_calls: Dict[str, Any],
2940
- content_accumulator: StreamContentAccumulator,
2941
- step_token_usage: Dict[str, int],
2942
2768
  tool_call_records: List[ToolCallingRecord],
2943
2769
  ) -> Generator[ChatAgentResponse, None, None]:
2944
2770
  r"""Execute multiple tools synchronously with
2945
2771
  proper content accumulation, using threads+queue for
2946
2772
  non-blocking status streaming."""
2947
2773
 
2948
- def tool_worker(tool_func, args, result_queue, tool_call_data):
2774
+ def tool_worker(result_queue, tool_call_data):
2949
2775
  try:
2950
2776
  tool_call_record = self._execute_tool_from_stream_data(
2951
2777
  tool_call_data
@@ -2981,36 +2807,22 @@ class ChatAgent(BaseAgent):
2981
2807
  )
2982
2808
  thread.start()
2983
2809
 
2984
- status_message = (
2985
- f"\nCalling function: {function_name} "
2986
- f"with arguments:\n{args}\n"
2987
- )
2988
- status_status = self._create_tool_status_response_with_accumulator(
2989
- content_accumulator,
2990
- status_message,
2991
- "tool_calling",
2992
- step_token_usage,
2810
+ # Log debug info instead of adding to content
2811
+ logger.info(
2812
+ f"Calling function: {function_name} with arguments: {args}"
2993
2813
  )
2994
- yield status_status
2814
+
2995
2815
  # wait for tool thread to finish with optional timeout
2996
2816
  thread.join(self.tool_execution_timeout)
2997
2817
 
2998
2818
  # If timeout occurred, mark as error and continue
2999
2819
  if thread.is_alive():
3000
- timeout_msg = (
3001
- f"\nFunction '{function_name}' timed out after "
3002
- f"{self.tool_execution_timeout} seconds.\n---------\n"
3003
- )
3004
- timeout_status = (
3005
- self._create_tool_status_response_with_accumulator(
3006
- content_accumulator,
3007
- timeout_msg,
3008
- "tool_timeout",
3009
- step_token_usage,
3010
- )
2820
+ # Log timeout info instead of adding to content
2821
+ logger.warning(
2822
+ f"Function '{function_name}' timed out after "
2823
+ f"{self.tool_execution_timeout} seconds"
3011
2824
  )
3012
- yield timeout_status
3013
- logger.error(timeout_msg.strip())
2825
+
3014
2826
  # Detach thread (it may still finish later). Skip recording.
3015
2827
  continue
3016
2828
 
@@ -3020,23 +2832,17 @@ class ChatAgent(BaseAgent):
3020
2832
  tool_call_records.append(tool_call_record)
3021
2833
  raw_result = tool_call_record.result
3022
2834
  result_str = str(raw_result)
3023
- status_message = (
3024
- f"\nFunction output: {result_str}\n---------\n"
3025
- )
3026
- output_status = (
3027
- self._create_tool_status_response_with_accumulator(
3028
- content_accumulator,
3029
- status_message,
3030
- "tool_output",
3031
- step_token_usage,
3032
- [tool_call_record],
3033
- )
3034
- )
3035
- yield output_status
2835
+
2836
+ # Log debug info instead of adding to content
2837
+ logger.info(f"Function output: {result_str}")
3036
2838
  else:
3037
2839
  # Error already logged
3038
2840
  continue
3039
2841
 
2842
+ # Ensure this function remains a generator (required by type signature)
2843
+ return
2844
+ yield # This line is never reached but makes this a generator function
2845
+
3040
2846
  def _execute_tool_from_stream_data(
3041
2847
  self, tool_call_data: Dict[str, Any]
3042
2848
  ) -> Optional[ToolCallingRecord]:
@@ -3229,11 +3035,20 @@ class ChatAgent(BaseAgent):
3229
3035
  return
3230
3036
 
3231
3037
  # Start async streaming response
3038
+ last_response = None
3232
3039
  async for response in self._astream_response(
3233
3040
  openai_messages, num_tokens, response_format
3234
3041
  ):
3042
+ last_response = response
3235
3043
  yield response
3236
3044
 
3045
+ # Clean tool call messages from memory after response generation
3046
+ if self.prune_tool_calls_from_memory and last_response:
3047
+ # Extract tool_calls from the last response info
3048
+ tool_calls = last_response.info.get("tool_calls", [])
3049
+ if tool_calls:
3050
+ self.memory.clean_tool_calls()
3051
+
3237
3052
  async def _astream_response(
3238
3053
  self,
3239
3054
  openai_messages: List[OpenAIMessage],
@@ -3252,6 +3067,9 @@ class ChatAgent(BaseAgent):
3252
3067
  while True:
3253
3068
  # Check termination condition
3254
3069
  if self.stop_event and self.stop_event.is_set():
3070
+ logger.info(
3071
+ f"Termination triggered at iteration " f"{iteration_count}"
3072
+ )
3255
3073
  yield self._step_terminate(
3256
3074
  num_tokens, tool_call_records, "termination_triggered"
3257
3075
  )
@@ -3540,15 +3358,9 @@ class ChatAgent(BaseAgent):
3540
3358
  ):
3541
3359
  yield status_response
3542
3360
 
3543
- # Yield "Sending back result to model" status
3361
+ # Log sending status instead of adding to content
3544
3362
  if tool_call_records:
3545
- sending_status = self._create_tool_status_response_with_accumulator( # noqa: E501
3546
- content_accumulator,
3547
- "\n------\n\nSending back result to model\n\n",
3548
- "tool_sending",
3549
- step_token_usage,
3550
- )
3551
- yield sending_status
3363
+ logger.info("Sending back result to model")
3552
3364
 
3553
3365
  # Record final message only if we have content AND no tool
3554
3366
  # calls. If there are tool calls, _record_tool_calling
@@ -3595,21 +3407,10 @@ class ChatAgent(BaseAgent):
3595
3407
  except json.JSONDecodeError:
3596
3408
  args = tool_call_data['function']['arguments']
3597
3409
 
3598
- status_message = (
3599
- f"\nCalling function: {function_name} "
3600
- f"with arguments:\n{args}\n"
3601
- )
3602
-
3603
- # Immediately yield "Calling function" status
3604
- calling_status = (
3605
- self._create_tool_status_response_with_accumulator(
3606
- content_accumulator,
3607
- status_message,
3608
- "tool_calling",
3609
- step_token_usage,
3610
- )
3410
+ # Log debug info instead of adding to content
3411
+ logger.info(
3412
+ f"Calling function: {function_name} with arguments: {args}"
3611
3413
  )
3612
- yield calling_status
3613
3414
 
3614
3415
  # Start tool execution asynchronously (non-blocking)
3615
3416
  if self.tool_execution_timeout is not None:
@@ -3642,80 +3443,25 @@ class ChatAgent(BaseAgent):
3642
3443
  # Create output status message
3643
3444
  raw_result = tool_call_record.result
3644
3445
  result_str = str(raw_result)
3645
- status_message = (
3646
- f"\nFunction output: {result_str}\n---------\n"
3647
- )
3648
3446
 
3649
- # Yield "Function output" status as soon as this
3650
- # tool completes
3651
- output_status = (
3652
- self._create_tool_status_response_with_accumulator(
3653
- content_accumulator,
3654
- status_message,
3655
- "tool_output",
3656
- step_token_usage,
3657
- [tool_call_record],
3658
- )
3659
- )
3660
- yield output_status
3447
+ # Log debug info instead of adding to content
3448
+ logger.info(f"Function output: {result_str}")
3661
3449
 
3662
3450
  except Exception as e:
3663
3451
  if isinstance(e, asyncio.TimeoutError):
3664
- timeout_msg = (
3665
- f"\nFunction timed out after "
3666
- f"{self.tool_execution_timeout} seconds.\n"
3667
- f"---------\n"
3668
- )
3669
- timeout_status = (
3670
- self._create_tool_status_response_with_accumulator(
3671
- content_accumulator,
3672
- timeout_msg,
3673
- "tool_timeout",
3674
- step_token_usage,
3675
- )
3452
+ # Log timeout info instead of adding to content
3453
+ logger.warning(
3454
+ f"Function timed out after "
3455
+ f"{self.tool_execution_timeout} seconds"
3676
3456
  )
3677
- yield timeout_status
3678
- logger.error("Async tool execution timeout")
3679
3457
  else:
3680
3458
  logger.error(f"Error in async tool execution: {e}")
3681
3459
  continue
3682
3460
 
3683
- def _create_tool_status_response_with_accumulator(
3684
- self,
3685
- accumulator: StreamContentAccumulator,
3686
- status_message: str,
3687
- status_type: str,
3688
- step_token_usage: Dict[str, int],
3689
- tool_calls: Optional[List[ToolCallingRecord]] = None,
3690
- ) -> ChatAgentResponse:
3691
- r"""Create a tool status response using content accumulator."""
3692
-
3693
- # Add this status message to accumulator and get full content
3694
- accumulator.add_tool_status(status_message)
3695
- full_content = accumulator.get_full_content()
3696
-
3697
- message = BaseMessage(
3698
- role_name=self.role_name,
3699
- role_type=self.role_type,
3700
- meta_dict={},
3701
- content=full_content,
3702
- )
3703
-
3704
- return ChatAgentResponse(
3705
- msgs=[message],
3706
- terminated=False,
3707
- info={
3708
- "id": "",
3709
- "usage": step_token_usage.copy(),
3710
- "finish_reasons": [status_type],
3711
- "num_tokens": self._get_token_count(full_content),
3712
- "tool_calls": tool_calls or [],
3713
- "external_tool_requests": None,
3714
- "streaming": True,
3715
- "tool_status": status_type,
3716
- "partial": True,
3717
- },
3718
- )
3461
+ # Ensure this function remains an async generator
3462
+ return
3463
+ # This line is never reached but makes this an async generator function
3464
+ yield
3719
3465
 
3720
3466
  def _create_streaming_response_with_accumulator(
3721
3467
  self,
@@ -3806,6 +3552,9 @@ class ChatAgent(BaseAgent):
3806
3552
  # To avoid duplicated system memory.
3807
3553
  system_message = None if with_memory else self._original_system_message
3808
3554
 
3555
+ # Clone tools and collect toolkits that need registration
3556
+ cloned_tools, toolkits_to_register = self._clone_tools()
3557
+
3809
3558
  new_agent = ChatAgent(
3810
3559
  system_message=system_message,
3811
3560
  model=self.model_backend.models, # Pass the existing model_backend
@@ -3815,7 +3564,8 @@ class ChatAgent(BaseAgent):
3815
3564
  self.memory.get_context_creator(), "token_limit", None
3816
3565
  ),
3817
3566
  output_language=self._output_language,
3818
- tools=self._clone_tools(),
3567
+ tools=cloned_tools,
3568
+ toolkits_to_register_agent=toolkits_to_register,
3819
3569
  external_tools=[
3820
3570
  schema for schema in self._external_tool_schemas.values()
3821
3571
  ],
@@ -3827,6 +3577,7 @@ class ChatAgent(BaseAgent):
3827
3577
  stop_event=self.stop_event,
3828
3578
  tool_execution_timeout=self.tool_execution_timeout,
3829
3579
  pause_event=self.pause_event,
3580
+ prune_tool_calls_from_memory=self.prune_tool_calls_from_memory,
3830
3581
  )
3831
3582
 
3832
3583
  # Copy memory if requested
@@ -3839,55 +3590,76 @@ class ChatAgent(BaseAgent):
3839
3590
 
3840
3591
  return new_agent
3841
3592
 
3842
- def _clone_tools(self) -> List[Union[FunctionTool, Callable]]:
3843
- r"""Clone tools for new agent instance,
3844
- handling stateful toolkits properly."""
3593
+ def _clone_tools(
3594
+ self,
3595
+ ) -> Tuple[
3596
+ List[Union[FunctionTool, Callable]], List[RegisteredAgentToolkit]
3597
+ ]:
3598
+ r"""Clone tools and return toolkits that need agent registration.
3599
+
3600
+ This method handles stateful toolkits by cloning them if they have
3601
+ a clone_for_new_session method, and collecting RegisteredAgentToolkit
3602
+ instances for later registration.
3603
+
3604
+ Returns:
3605
+ Tuple containing:
3606
+ - List of cloned tools/functions
3607
+ - List of RegisteredAgentToolkit instances need registration
3608
+ """
3845
3609
  cloned_tools = []
3846
- hybrid_browser_toolkits = {} # Cache for created toolkits
3610
+ toolkits_to_register = []
3611
+ cloned_toolkits = {}
3612
+ # Cache for cloned toolkits by original toolkit id
3847
3613
 
3848
3614
  for tool in self._internal_tools.values():
3849
- # Check if this is a HybridBrowserToolkit method
3850
- if (
3851
- hasattr(tool.func, '__self__')
3852
- and tool.func.__self__.__class__.__name__
3853
- == 'HybridBrowserToolkit'
3854
- ):
3615
+ # Check if this tool is a method bound to a toolkit instance
3616
+ if hasattr(tool.func, '__self__'):
3855
3617
  toolkit_instance = tool.func.__self__
3856
3618
  toolkit_id = id(toolkit_instance)
3857
3619
 
3858
- # Check if we already created a clone for this toolkit
3859
- if toolkit_id not in hybrid_browser_toolkits:
3860
- try:
3861
- import uuid
3620
+ if toolkit_id not in cloned_toolkits:
3621
+ # Check if the toolkit has a clone method
3622
+ if hasattr(toolkit_instance, 'clone_for_new_session'):
3623
+ try:
3624
+ import uuid
3862
3625
 
3863
- new_session_id = str(uuid.uuid4())[:8]
3864
- new_toolkit = toolkit_instance.clone_for_new_session(
3865
- new_session_id
3866
- )
3867
- hybrid_browser_toolkits[toolkit_id] = new_toolkit
3868
- except Exception as e:
3869
- logger.warning(
3870
- f"Failed to clone HybridBrowserToolkit: {e}"
3871
- )
3872
- # Fallback to original function
3873
- cloned_tools.append(tool.func)
3874
- continue
3626
+ new_session_id = str(uuid.uuid4())[:8]
3627
+ new_toolkit = (
3628
+ toolkit_instance.clone_for_new_session(
3629
+ new_session_id
3630
+ )
3631
+ )
3632
+
3633
+ # If this is a RegisteredAgentToolkit,
3634
+ # add it to registration list
3635
+ if isinstance(new_toolkit, RegisteredAgentToolkit):
3636
+ toolkits_to_register.append(new_toolkit)
3637
+
3638
+ cloned_toolkits[toolkit_id] = new_toolkit
3639
+ except Exception as e:
3640
+ logger.warning(
3641
+ f"Failed to clone toolkit {toolkit_instance.__class__.__name__}: {e}" # noqa:E501
3642
+ )
3643
+ # Use original toolkit if cloning fails
3644
+ cloned_toolkits[toolkit_id] = toolkit_instance
3645
+ else:
3646
+ # Toolkit doesn't support cloning, use original
3647
+ cloned_toolkits[toolkit_id] = toolkit_instance
3875
3648
 
3876
- # Get the corresponding method from the cloned toolkit
3877
- new_toolkit = hybrid_browser_toolkits[toolkit_id]
3649
+ # Get the method from the cloned (or original) toolkit
3650
+ toolkit = cloned_toolkits[toolkit_id]
3878
3651
  method_name = tool.func.__name__
3879
- if hasattr(new_toolkit, method_name):
3880
- new_method = getattr(new_toolkit, method_name)
3652
+ if hasattr(toolkit, method_name):
3653
+ new_method = getattr(toolkit, method_name)
3881
3654
  cloned_tools.append(new_method)
3882
3655
  else:
3883
3656
  # Fallback to original function
3884
3657
  cloned_tools.append(tool.func)
3885
3658
  else:
3886
- # Regular tool or other stateless toolkit
3887
- # just use the original function
3659
+ # Not a toolkit method, just use the original function
3888
3660
  cloned_tools.append(tool.func)
3889
3661
 
3890
- return cloned_tools
3662
+ return cloned_tools, toolkits_to_register
3891
3663
 
3892
3664
  def __repr__(self) -> str:
3893
3665
  r"""Returns a string representation of the :obj:`ChatAgent`.