camel-ai 0.2.72a10__py3-none-any.whl → 0.2.73a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (36) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +113 -338
  3. camel/memories/agent_memories.py +18 -17
  4. camel/societies/workforce/prompts.py +10 -4
  5. camel/societies/workforce/single_agent_worker.py +7 -5
  6. camel/toolkits/__init__.py +4 -1
  7. camel/toolkits/base.py +57 -1
  8. camel/toolkits/hybrid_browser_toolkit/config_loader.py +136 -413
  9. camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +796 -1631
  10. camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +4356 -0
  11. camel/toolkits/hybrid_browser_toolkit/ts/package.json +33 -0
  12. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-scripts.js +125 -0
  13. camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +916 -0
  14. camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +226 -0
  15. camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +522 -0
  16. camel/toolkits/hybrid_browser_toolkit/ts/src/index.ts +7 -0
  17. camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +110 -0
  18. camel/toolkits/hybrid_browser_toolkit/ts/tsconfig.json +26 -0
  19. camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +210 -0
  20. camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +533 -0
  21. camel/toolkits/message_integration.py +592 -0
  22. camel/toolkits/screenshot_toolkit.py +116 -31
  23. camel/toolkits/search_toolkit.py +20 -2
  24. camel/toolkits/terminal_toolkit.py +16 -2
  25. camel/toolkits/video_analysis_toolkit.py +13 -13
  26. camel/toolkits/video_download_toolkit.py +11 -11
  27. {camel_ai-0.2.72a10.dist-info → camel_ai-0.2.73a0.dist-info}/METADATA +10 -4
  28. {camel_ai-0.2.72a10.dist-info → camel_ai-0.2.73a0.dist-info}/RECORD +30 -24
  29. camel/toolkits/hybrid_browser_toolkit/actions.py +0 -417
  30. camel/toolkits/hybrid_browser_toolkit/agent.py +0 -311
  31. camel/toolkits/hybrid_browser_toolkit/browser_session.py +0 -740
  32. camel/toolkits/hybrid_browser_toolkit/snapshot.py +0 -227
  33. camel/toolkits/hybrid_browser_toolkit/stealth_script.js +0 -0
  34. camel/toolkits/hybrid_browser_toolkit/unified_analyzer.js +0 -1002
  35. {camel_ai-0.2.72a10.dist-info → camel_ai-0.2.73a0.dist-info}/WHEEL +0 -0
  36. {camel_ai-0.2.72a10.dist-info → camel_ai-0.2.73a0.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py CHANGED
@@ -14,7 +14,7 @@
14
14
 
15
15
  from camel.logger import disable_logging, enable_logging, set_log_level
16
16
 
17
- __version__ = '0.2.72a10'
17
+ __version__ = '0.2.73a0'
18
18
 
19
19
  __all__ = [
20
20
  '__version__',
@@ -74,7 +74,7 @@ from camel.models import (
74
74
  from camel.prompts import TextPrompt
75
75
  from camel.responses import ChatAgentResponse
76
76
  from camel.storages import JsonStorage
77
- from camel.toolkits import FunctionTool
77
+ from camel.toolkits import FunctionTool, RegisteredAgentToolkit
78
78
  from camel.types import (
79
79
  ChatCompletion,
80
80
  ChatCompletionChunk,
@@ -89,7 +89,6 @@ from camel.utils import (
89
89
  model_from_json_schema,
90
90
  )
91
91
  from camel.utils.commons import dependencies_required
92
- from camel.utils.tool_result import ToolResult
93
92
 
94
93
  if TYPE_CHECKING:
95
94
  from camel.terminators import ResponseTerminator
@@ -348,6 +347,13 @@ class ChatAgent(BaseAgent):
348
347
  tools (Optional[List[Union[FunctionTool, Callable]]], optional): List
349
348
  of available :obj:`FunctionTool` or :obj:`Callable`. (default:
350
349
  :obj:`None`)
350
+ toolkits_to_register_agent (Optional[List[RegisteredAgentToolkit]],
351
+ optional): List of toolkit instances that inherit from
352
+ :obj:`RegisteredAgentToolkit`. The agent will register itself with
353
+ these toolkits, allowing them to access the agent instance. Note:
354
+ This does NOT add the toolkit's tools to the agent. To use tools
355
+ from these toolkits, pass them explicitly via the `tools`
356
+ parameter. (default: :obj:`None`)
351
357
  external_tools (Optional[List[Union[FunctionTool, Callable,
352
358
  Dict[str, Any]]]], optional): List of external tools
353
359
  (:obj:`FunctionTool` or :obj:`Callable` or :obj:`Dict[str, Any]`)
@@ -405,6 +411,9 @@ class ChatAgent(BaseAgent):
405
411
  token_limit: Optional[int] = None,
406
412
  output_language: Optional[str] = None,
407
413
  tools: Optional[List[Union[FunctionTool, Callable]]] = None,
414
+ toolkits_to_register_agent: Optional[
415
+ List[RegisteredAgentToolkit]
416
+ ] = None,
408
417
  external_tools: Optional[
409
418
  List[Union[FunctionTool, Callable, Dict[str, Any]]]
410
419
  ] = None,
@@ -438,7 +447,7 @@ class ChatAgent(BaseAgent):
438
447
  token_limit or self.model_backend.token_limit,
439
448
  )
440
449
 
441
- self.memory: AgentMemory = memory or ChatHistoryMemory(
450
+ self._memory: AgentMemory = memory or ChatHistoryMemory(
442
451
  context_creator,
443
452
  window_size=message_window_size,
444
453
  agent_id=self.agent_id,
@@ -446,7 +455,7 @@ class ChatAgent(BaseAgent):
446
455
 
447
456
  # So we don't have to pass agent_id when we define memory
448
457
  if memory is not None:
449
- memory.agent_id = self.agent_id
458
+ self._memory.agent_id = self.agent_id
450
459
 
451
460
  # Set up system message and initialize messages
452
461
  self._original_system_message = (
@@ -479,6 +488,12 @@ class ChatAgent(BaseAgent):
479
488
  ]
480
489
  }
481
490
 
491
+ # Register agent with toolkits that have RegisteredAgentToolkit mixin
492
+ if toolkits_to_register_agent:
493
+ for toolkit in toolkits_to_register_agent:
494
+ if isinstance(toolkit, RegisteredAgentToolkit):
495
+ toolkit.register_agent(self)
496
+
482
497
  self._external_tool_schemas = {
483
498
  tool_schema["function"]["name"]: tool_schema
484
499
  for tool_schema in [
@@ -494,9 +509,6 @@ class ChatAgent(BaseAgent):
494
509
  self.tool_execution_timeout = tool_execution_timeout
495
510
  self.mask_tool_output = mask_tool_output
496
511
  self._secure_result_store: Dict[str, Any] = {}
497
- self._pending_images: List[str] = []
498
- self._image_retry_count: Dict[str, int] = {}
499
- # Store images to attach to next user message
500
512
  self.pause_event = pause_event
501
513
  self.prune_tool_calls_from_memory = prune_tool_calls_from_memory
502
514
 
@@ -504,8 +516,6 @@ class ChatAgent(BaseAgent):
504
516
  r"""Resets the :obj:`ChatAgent` to its initial state."""
505
517
  self.terminated = False
506
518
  self.init_messages()
507
- self._pending_images = []
508
- self._image_retry_count = {}
509
519
  for terminator in self.response_terminators:
510
520
  terminator.reset()
511
521
 
@@ -670,6 +680,25 @@ class ChatAgent(BaseAgent):
670
680
  )
671
681
  self.init_messages()
672
682
 
683
+ @property
684
+ def memory(self) -> AgentMemory:
685
+ r"""Returns the agent memory."""
686
+ return self._memory
687
+
688
+ @memory.setter
689
+ def memory(self, value: AgentMemory) -> None:
690
+ r"""Set the agent memory.
691
+
692
+ When setting a new memory, the system message is automatically
693
+ re-added to ensure it's not lost.
694
+
695
+ Args:
696
+ value (AgentMemory): The new agent memory to use.
697
+ """
698
+ self._memory = value
699
+ # Ensure the new memory has the system message
700
+ self.init_messages()
701
+
673
702
  def _get_full_tool_schemas(self) -> List[Dict[str, Any]]:
674
703
  r"""Returns a list of tool schemas of all tools, including internal
675
704
  and external tools.
@@ -1367,16 +1396,6 @@ class ChatAgent(BaseAgent):
1367
1396
  role_name="User", content=input_message
1368
1397
  )
1369
1398
 
1370
- # Attach any pending images from previous tool calls
1371
- image_list = self._process_pending_images()
1372
- if image_list:
1373
- # Create new message with images attached
1374
- input_message = BaseMessage.make_user_message(
1375
- role_name="User",
1376
- content=input_message.content,
1377
- image_list=image_list,
1378
- )
1379
-
1380
1399
  # Add user input to memory
1381
1400
  self.update_memory(input_message, OpenAIBackendRole.USER)
1382
1401
 
@@ -1571,16 +1590,6 @@ class ChatAgent(BaseAgent):
1571
1590
  role_name="User", content=input_message
1572
1591
  )
1573
1592
 
1574
- # Attach any pending images from previous tool calls
1575
- image_list = self._process_pending_images()
1576
- if image_list:
1577
- # Create new message with images attached
1578
- input_message = BaseMessage.make_user_message(
1579
- role_name="User",
1580
- content=input_message.content,
1581
- image_list=image_list,
1582
- )
1583
-
1584
1593
  self.update_memory(input_message, OpenAIBackendRole.USER)
1585
1594
 
1586
1595
  tool_call_records: List[ToolCallingRecord] = []
@@ -1634,7 +1643,6 @@ class ChatAgent(BaseAgent):
1634
1643
 
1635
1644
  if tool_call_requests := response.tool_call_requests:
1636
1645
  # Process all tool calls
1637
- new_images_from_tools = []
1638
1646
  for tool_call_request in tool_call_requests:
1639
1647
  if (
1640
1648
  tool_call_request.tool_name
@@ -1654,72 +1662,10 @@ class ChatAgent(BaseAgent):
1654
1662
  )
1655
1663
  tool_call_records.append(tool_call_record)
1656
1664
 
1657
- # Check if this tool call produced images
1658
- if (
1659
- hasattr(tool_call_record, 'images')
1660
- and tool_call_record.images
1661
- ):
1662
- new_images_from_tools.extend(
1663
- tool_call_record.images
1664
- )
1665
-
1666
1665
  # If we found an external tool call, break the loop
1667
1666
  if external_tool_call_requests:
1668
1667
  break
1669
1668
 
1670
- # If tools produced images
1671
- # send them to the model as a user message
1672
- if new_images_from_tools:
1673
- # Convert base64 images to PIL Images
1674
- image_list = []
1675
- for img_data in new_images_from_tools:
1676
- try:
1677
- import base64
1678
- import io
1679
-
1680
- from PIL import Image
1681
-
1682
- # Extract base64 data from data URL format
1683
- if img_data.startswith("data:image"):
1684
- # Format:
1685
- # "data:image/png;base64,iVBORw0KGgo..."
1686
- base64_data = img_data.split(',', 1)[1]
1687
- else:
1688
- # Raw base64 data
1689
- base64_data = img_data
1690
-
1691
- # Decode and create PIL Image
1692
- image_bytes = base64.b64decode(base64_data)
1693
- pil_image = Image.open(io.BytesIO(image_bytes))
1694
- # Convert to ensure proper
1695
- # Image.Image type for compatibility
1696
- pil_image_tool_result: Image.Image = (
1697
- pil_image.convert('RGB')
1698
- )
1699
- image_list.append(pil_image_tool_result)
1700
-
1701
- except Exception as e:
1702
- logger.warning(
1703
- f"Failed to convert "
1704
- f"base64 image to PIL for immediate use: {e}"
1705
- )
1706
- continue
1707
-
1708
- # If we have valid images
1709
- # create a user message with images
1710
- if image_list:
1711
- # Create a user message with images
1712
- # to provide visual context immediately
1713
- image_message = BaseMessage.make_user_message(
1714
- role_name="User",
1715
- content="[Visual content from tool execution - please analyze and continue]", # noqa: E501
1716
- image_list=image_list,
1717
- )
1718
-
1719
- self.update_memory(
1720
- image_message, OpenAIBackendRole.USER
1721
- )
1722
-
1723
1669
  if (
1724
1670
  self.max_iteration is not None
1725
1671
  and iteration_count >= self.max_iteration
@@ -1810,69 +1756,6 @@ class ChatAgent(BaseAgent):
1810
1756
  info=info,
1811
1757
  )
1812
1758
 
1813
- def _process_pending_images(self) -> List:
1814
- r"""Process pending images with retry logic and return PIL Image list.
1815
-
1816
- Returns:
1817
- List: List of successfully converted PIL Images.
1818
- """
1819
- if not self._pending_images:
1820
- return []
1821
-
1822
- image_list = []
1823
- successfully_processed = []
1824
- failed_images = []
1825
-
1826
- for img_data in self._pending_images:
1827
- # Track retry count
1828
- retry_count = self._image_retry_count.get(img_data, 0)
1829
-
1830
- # Remove images that have failed too many times (max 3 attempts)
1831
- if retry_count >= 3:
1832
- failed_images.append(img_data)
1833
- logger.warning(
1834
- f"Removing image after {retry_count} failed attempts"
1835
- )
1836
- continue
1837
-
1838
- try:
1839
- import base64
1840
- import io
1841
-
1842
- from PIL import Image
1843
-
1844
- # Extract base64 data from data URL format
1845
- if img_data.startswith("data:image"):
1846
- # Format: "data:image/png;base64,iVBORw0KGgo..."
1847
- base64_data = img_data.split(',', 1)[1]
1848
- else:
1849
- # Raw base64 data
1850
- base64_data = img_data
1851
-
1852
- # Decode and create PIL Image
1853
- image_bytes = base64.b64decode(base64_data)
1854
- pil_image = Image.open(io.BytesIO(image_bytes))
1855
- pil_image_converted: Image.Image = pil_image.convert('RGB')
1856
- image_list.append(pil_image_converted)
1857
- successfully_processed.append(img_data)
1858
-
1859
- except Exception as e:
1860
- # Increment retry count for failed conversion
1861
- self._image_retry_count[img_data] = retry_count + 1
1862
- logger.warning(
1863
- f"Failed to convert base64 image to PIL "
1864
- f"(attempt {retry_count + 1}/3): {e}"
1865
- )
1866
- continue
1867
-
1868
- # Clean up processed and failed images
1869
- for img in successfully_processed + failed_images:
1870
- self._pending_images.remove(img)
1871
- # Clean up retry count for processed/removed images
1872
- self._image_retry_count.pop(img, None)
1873
-
1874
- return image_list
1875
-
1876
1759
  def _record_final_output(self, output_messages: List[BaseMessage]) -> None:
1877
1760
  r"""Log final messages or warnings about multiple responses."""
1878
1761
  if len(output_messages) == 1:
@@ -1883,61 +1766,6 @@ class ChatAgent(BaseAgent):
1883
1766
  "selected message manually using `record_message()`."
1884
1767
  )
1885
1768
 
1886
- def _is_vision_error(self, exc: Exception) -> bool:
1887
- r"""Check if the exception is likely related to vision/image is not
1888
- supported by the model."""
1889
- # TODO: more robust vision error detection
1890
- error_msg = str(exc).lower()
1891
- vision_keywords = [
1892
- 'vision',
1893
- 'image',
1894
- 'multimodal',
1895
- 'unsupported',
1896
- 'invalid content type',
1897
- 'image_url',
1898
- 'visual',
1899
- ]
1900
- return any(keyword in error_msg for keyword in vision_keywords)
1901
-
1902
- def _has_images(self, messages: List[OpenAIMessage]) -> bool:
1903
- r"""Check if any message contains images."""
1904
- for msg in messages:
1905
- content = msg.get('content')
1906
- if isinstance(content, list):
1907
- for item in content:
1908
- if (
1909
- isinstance(item, dict)
1910
- and item.get('type') == 'image_url'
1911
- ):
1912
- return True
1913
- return False
1914
-
1915
- def _strip_images_from_messages(
1916
- self, messages: List[OpenAIMessage]
1917
- ) -> List[OpenAIMessage]:
1918
- r"""Remove images from messages, keeping only text content."""
1919
- stripped_messages = []
1920
- for msg in messages:
1921
- content = msg.get('content')
1922
- if isinstance(content, list):
1923
- # Extract only text content from multimodal messages
1924
- text_content = ""
1925
- for item in content:
1926
- if isinstance(item, dict) and item.get('type') == 'text':
1927
- text_content += item.get('text', '')
1928
-
1929
- # Create new message with only text content
1930
- new_msg = msg.copy()
1931
- new_msg['content'] = (
1932
- text_content
1933
- or "[Image content removed - model doesn't support vision]"
1934
- )
1935
- stripped_messages.append(new_msg)
1936
- else:
1937
- # Regular text message, keep as is
1938
- stripped_messages.append(msg)
1939
- return stripped_messages
1940
-
1941
1769
  @observe()
1942
1770
  def _get_model_response(
1943
1771
  self,
@@ -1971,35 +1799,13 @@ class ChatAgent(BaseAgent):
1971
1799
  openai_messages, response_format, tool_schemas or None
1972
1800
  )
1973
1801
  except Exception as exc:
1974
- # Try again without images if the error might be vision-related
1975
- if self._is_vision_error(exc) and self._has_images(
1976
- openai_messages
1977
- ):
1978
- logger.warning(
1979
- "Model appears to not support vision."
1980
- "Retrying without images."
1981
- )
1982
- try:
1983
- stripped_messages = self._strip_images_from_messages(
1984
- openai_messages
1985
- )
1986
- response = self.model_backend.run(
1987
- stripped_messages,
1988
- response_format,
1989
- tool_schemas or None,
1990
- )
1991
- except Exception:
1992
- pass # Fall through to original error handling
1993
-
1994
- if not response:
1995
- logger.error(
1996
- f"An error occurred while running model "
1997
- f"iteration {current_iteration}, "
1998
- f"{self.model_backend.model_type}, "
1999
- f"index: {self.model_backend.current_model_index}",
2000
- exc_info=exc,
2001
- )
2002
- error_info = str(exc)
1802
+ logger.error(
1803
+ f"An error occurred while running model "
1804
+ f"{self.model_backend.model_type}, "
1805
+ f"index: {self.model_backend.current_model_index}",
1806
+ exc_info=exc,
1807
+ )
1808
+ error_info = str(exc)
2003
1809
 
2004
1810
  if not response and self.model_backend.num_models > 1:
2005
1811
  raise ModelProcessingError(
@@ -2060,33 +1866,13 @@ class ChatAgent(BaseAgent):
2060
1866
  openai_messages, response_format, tool_schemas or None
2061
1867
  )
2062
1868
  except Exception as exc:
2063
- # Try again without images if the error might be vision-related
2064
- if self._is_vision_error(exc) and self._has_images(
2065
- openai_messages
2066
- ):
2067
- logger.warning(
2068
- "Model appears to not support vision. Retrying without images." # noqa: E501
2069
- )
2070
- try:
2071
- stripped_messages = self._strip_images_from_messages(
2072
- openai_messages
2073
- )
2074
- response = await self.model_backend.arun(
2075
- stripped_messages,
2076
- response_format,
2077
- tool_schemas or None,
2078
- )
2079
- except Exception:
2080
- pass # Fall through to original error handling
2081
-
2082
- if not response:
2083
- logger.error(
2084
- f"An error occurred while running model "
2085
- f"{self.model_backend.model_type}, "
2086
- f"index: {self.model_backend.current_model_index}",
2087
- exc_info=exc,
2088
- )
2089
- error_info = str(exc)
1869
+ logger.error(
1870
+ f"An error occurred while running model "
1871
+ f"{self.model_backend.model_type}, "
1872
+ f"index: {self.model_backend.current_model_index}",
1873
+ exc_info=exc,
1874
+ )
1875
+ error_info = str(exc)
2090
1876
 
2091
1877
  if not response and self.model_backend.num_models > 1:
2092
1878
  raise ModelProcessingError(
@@ -2457,26 +2243,9 @@ class ChatAgent(BaseAgent):
2457
2243
  mask_flag = False
2458
2244
  logger.warning(f"{error_msg} with result: {result}")
2459
2245
 
2460
- # Check if result is a ToolResult with images
2461
- images_to_attach = None
2462
- if isinstance(result, ToolResult):
2463
- images_to_attach = result.images
2464
- result = str(result) # Use string representation for storage
2465
-
2466
- tool_record = self._record_tool_calling(
2246
+ return self._record_tool_calling(
2467
2247
  func_name, args, result, tool_call_id, mask_output=mask_flag
2468
2248
  )
2469
- logger.info(f"Tool calling record:\n{tool_record}")
2470
-
2471
- # Store images for later attachment to next user message
2472
- if images_to_attach:
2473
- tool_record.images = images_to_attach
2474
- # Add images with duplicate prevention
2475
- for img in images_to_attach:
2476
- if img not in self._pending_images:
2477
- self._pending_images.append(img)
2478
-
2479
- return tool_record
2480
2249
 
2481
2250
  async def _aexecute_tool(
2482
2251
  self,
@@ -2517,26 +2286,7 @@ class ChatAgent(BaseAgent):
2517
2286
  error_msg = f"Error executing async tool '{func_name}': {e!s}"
2518
2287
  result = f"Tool execution failed: {error_msg}"
2519
2288
  logging.warning(error_msg)
2520
-
2521
- # Check if result is a ToolResult with images
2522
- images_to_attach = None
2523
- if isinstance(result, ToolResult):
2524
- images_to_attach = result.images
2525
- result = str(result) # Use string representation for storage
2526
-
2527
- tool_record = self._record_tool_calling(
2528
- func_name, args, result, tool_call_id
2529
- )
2530
-
2531
- # Store images for later attachment to next user message
2532
- if images_to_attach:
2533
- tool_record.images = images_to_attach
2534
- # Add images with duplicate prevention
2535
- for img in images_to_attach:
2536
- if img not in self._pending_images:
2537
- self._pending_images.append(img)
2538
-
2539
- return tool_record
2289
+ return self._record_tool_calling(func_name, args, result, tool_call_id)
2540
2290
 
2541
2291
  def _record_tool_calling(
2542
2292
  self,
@@ -3802,6 +3552,9 @@ class ChatAgent(BaseAgent):
3802
3552
  # To avoid duplicated system memory.
3803
3553
  system_message = None if with_memory else self._original_system_message
3804
3554
 
3555
+ # Clone tools and collect toolkits that need registration
3556
+ cloned_tools, toolkits_to_register = self._clone_tools()
3557
+
3805
3558
  new_agent = ChatAgent(
3806
3559
  system_message=system_message,
3807
3560
  model=self.model_backend.models, # Pass the existing model_backend
@@ -3811,7 +3564,8 @@ class ChatAgent(BaseAgent):
3811
3564
  self.memory.get_context_creator(), "token_limit", None
3812
3565
  ),
3813
3566
  output_language=self._output_language,
3814
- tools=self._clone_tools(),
3567
+ tools=cloned_tools,
3568
+ toolkits_to_register_agent=toolkits_to_register,
3815
3569
  external_tools=[
3816
3570
  schema for schema in self._external_tool_schemas.values()
3817
3571
  ],
@@ -3836,55 +3590,76 @@ class ChatAgent(BaseAgent):
3836
3590
 
3837
3591
  return new_agent
3838
3592
 
3839
- def _clone_tools(self) -> List[Union[FunctionTool, Callable]]:
3840
- r"""Clone tools for new agent instance,
3841
- handling stateful toolkits properly."""
3593
+ def _clone_tools(
3594
+ self,
3595
+ ) -> Tuple[
3596
+ List[Union[FunctionTool, Callable]], List[RegisteredAgentToolkit]
3597
+ ]:
3598
+ r"""Clone tools and return toolkits that need agent registration.
3599
+
3600
+ This method handles stateful toolkits by cloning them if they have
3601
+ a clone_for_new_session method, and collecting RegisteredAgentToolkit
3602
+ instances for later registration.
3603
+
3604
+ Returns:
3605
+ Tuple containing:
3606
+ - List of cloned tools/functions
3607
+ - List of RegisteredAgentToolkit instances need registration
3608
+ """
3842
3609
  cloned_tools = []
3843
- hybrid_browser_toolkits = {} # Cache for created toolkits
3610
+ toolkits_to_register = []
3611
+ cloned_toolkits = {}
3612
+ # Cache for cloned toolkits by original toolkit id
3844
3613
 
3845
3614
  for tool in self._internal_tools.values():
3846
- # Check if this is a HybridBrowserToolkit method
3847
- if (
3848
- hasattr(tool.func, '__self__')
3849
- and tool.func.__self__.__class__.__name__
3850
- == 'HybridBrowserToolkit'
3851
- ):
3615
+ # Check if this tool is a method bound to a toolkit instance
3616
+ if hasattr(tool.func, '__self__'):
3852
3617
  toolkit_instance = tool.func.__self__
3853
3618
  toolkit_id = id(toolkit_instance)
3854
3619
 
3855
- # Check if we already created a clone for this toolkit
3856
- if toolkit_id not in hybrid_browser_toolkits:
3857
- try:
3858
- import uuid
3620
+ if toolkit_id not in cloned_toolkits:
3621
+ # Check if the toolkit has a clone method
3622
+ if hasattr(toolkit_instance, 'clone_for_new_session'):
3623
+ try:
3624
+ import uuid
3859
3625
 
3860
- new_session_id = str(uuid.uuid4())[:8]
3861
- new_toolkit = toolkit_instance.clone_for_new_session(
3862
- new_session_id
3863
- )
3864
- hybrid_browser_toolkits[toolkit_id] = new_toolkit
3865
- except Exception as e:
3866
- logger.warning(
3867
- f"Failed to clone HybridBrowserToolkit: {e}"
3868
- )
3869
- # Fallback to original function
3870
- cloned_tools.append(tool.func)
3871
- continue
3626
+ new_session_id = str(uuid.uuid4())[:8]
3627
+ new_toolkit = (
3628
+ toolkit_instance.clone_for_new_session(
3629
+ new_session_id
3630
+ )
3631
+ )
3632
+
3633
+ # If this is a RegisteredAgentToolkit,
3634
+ # add it to registration list
3635
+ if isinstance(new_toolkit, RegisteredAgentToolkit):
3636
+ toolkits_to_register.append(new_toolkit)
3637
+
3638
+ cloned_toolkits[toolkit_id] = new_toolkit
3639
+ except Exception as e:
3640
+ logger.warning(
3641
+ f"Failed to clone toolkit {toolkit_instance.__class__.__name__}: {e}" # noqa:E501
3642
+ )
3643
+ # Use original toolkit if cloning fails
3644
+ cloned_toolkits[toolkit_id] = toolkit_instance
3645
+ else:
3646
+ # Toolkit doesn't support cloning, use original
3647
+ cloned_toolkits[toolkit_id] = toolkit_instance
3872
3648
 
3873
- # Get the corresponding method from the cloned toolkit
3874
- new_toolkit = hybrid_browser_toolkits[toolkit_id]
3649
+ # Get the method from the cloned (or original) toolkit
3650
+ toolkit = cloned_toolkits[toolkit_id]
3875
3651
  method_name = tool.func.__name__
3876
- if hasattr(new_toolkit, method_name):
3877
- new_method = getattr(new_toolkit, method_name)
3652
+ if hasattr(toolkit, method_name):
3653
+ new_method = getattr(toolkit, method_name)
3878
3654
  cloned_tools.append(new_method)
3879
3655
  else:
3880
3656
  # Fallback to original function
3881
3657
  cloned_tools.append(tool.func)
3882
3658
  else:
3883
- # Regular tool or other stateless toolkit
3884
- # just use the original function
3659
+ # Not a toolkit method, just use the original function
3885
3660
  cloned_tools.append(tool.func)
3886
3661
 
3887
- return cloned_tools
3662
+ return cloned_tools, toolkits_to_register
3888
3663
 
3889
3664
  def __repr__(self) -> str:
3890
3665
  r"""Returns a string representation of the :obj:`ChatAgent`.
@@ -101,30 +101,31 @@ class ChatHistoryMemory(AgentMemory):
101
101
  if not record_dicts:
102
102
  return
103
103
 
104
- # Filter out tool-related messages
105
- cleaned_records = []
106
- for record in record_dicts:
104
+ # Track indices to remove (reverse order for efficient deletion)
105
+ indices_to_remove = []
106
+
107
+ # Identify indices of tool-related messages
108
+ for i, record in enumerate(record_dicts):
107
109
  role = record.get('role_at_backend')
108
110
 
109
- # Skip FUNCTION messages
111
+ # Mark FUNCTION messages for removal
110
112
  if role == OpenAIBackendRole.FUNCTION.value:
111
- continue
112
-
113
- # Skip TOOL messages
114
- if role == OpenAIBackendRole.TOOL.value:
115
- continue
116
-
117
- # Skip ASSISTANT messages with tool_calls
118
- if role == OpenAIBackendRole.ASSISTANT.value:
113
+ indices_to_remove.append(i)
114
+ # Mark TOOL messages for removal
115
+ elif role == OpenAIBackendRole.TOOL.value:
116
+ indices_to_remove.append(i)
117
+ # Mark ASSISTANT messages with tool_calls for removal
118
+ elif role == OpenAIBackendRole.ASSISTANT.value:
119
119
  meta_dict = record.get('meta_dict', {})
120
120
  if meta_dict and 'tool_calls' in meta_dict:
121
- continue
121
+ indices_to_remove.append(i)
122
122
 
123
- # Keep all other messages
124
- cleaned_records.append(record)
123
+ # Remove records in-place
124
+ for i in reversed(indices_to_remove):
125
+ del record_dicts[i]
125
126
 
126
- # Save the cleaned records back to storage
127
- self._chat_history_block.storage.save(cleaned_records)
127
+ # Save the modified records back to storage
128
+ self._chat_history_block.storage.save(record_dicts)
128
129
 
129
130
 
130
131
  class VectorDBMemory(AgentMemory):