camel-ai 0.2.75a5__py3-none-any.whl → 0.2.76a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +298 -130
- camel/configs/__init__.py +6 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/nebius_config.py +103 -0
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/models/__init__.py +4 -0
- camel/models/amd_model.py +101 -0
- camel/models/model_factory.py +4 -0
- camel/models/nebius_model.py +83 -0
- camel/models/ollama_model.py +3 -3
- camel/models/openai_model.py +0 -6
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/workforce/task_channel.py +120 -27
- camel/societies/workforce/workforce.py +35 -3
- camel/toolkits/__init__.py +5 -3
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/function_tool.py +6 -1
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +8 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +12 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +33 -14
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +135 -40
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +2 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +43 -207
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +231 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +39 -6
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +248 -58
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +5 -1
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +98 -31
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +39 -14
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/search_toolkit.py +13 -2
- camel/toolkits/terminal_toolkit.py +12 -2
- camel/toolkits/video_analysis_toolkit.py +16 -10
- camel/types/enums.py +42 -0
- camel/types/unified_model_type.py +5 -0
- camel/utils/commons.py +2 -0
- camel/utils/mcp.py +136 -2
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/METADATA +5 -11
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/RECORD +47 -38
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a5.dist-info → camel_ai-0.2.76a0.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -14,9 +14,11 @@
|
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
16
|
import asyncio
|
|
17
|
+
import concurrent.futures
|
|
17
18
|
import json
|
|
18
19
|
import logging
|
|
19
20
|
import queue
|
|
21
|
+
import random
|
|
20
22
|
import textwrap
|
|
21
23
|
import threading
|
|
22
24
|
import time
|
|
@@ -40,6 +42,7 @@ from typing import (
|
|
|
40
42
|
|
|
41
43
|
from openai import (
|
|
42
44
|
AsyncStream,
|
|
45
|
+
RateLimitError,
|
|
43
46
|
Stream,
|
|
44
47
|
)
|
|
45
48
|
from pydantic import BaseModel, ValidationError
|
|
@@ -230,7 +233,7 @@ class StreamingChatAgentResponse:
|
|
|
230
233
|
r"""Make this object iterable."""
|
|
231
234
|
if self._consumed:
|
|
232
235
|
# If already consumed, iterate over stored responses
|
|
233
|
-
|
|
236
|
+
yield from self._responses
|
|
234
237
|
else:
|
|
235
238
|
# If not consumed, consume and yield
|
|
236
239
|
try:
|
|
@@ -386,6 +389,13 @@ class ChatAgent(BaseAgent):
|
|
|
386
389
|
usage. When enabled, removes FUNCTION/TOOL role messages and
|
|
387
390
|
ASSISTANT messages with tool_calls after each step.
|
|
388
391
|
(default: :obj:`False`)
|
|
392
|
+
retry_attempts (int, optional): Maximum number of retry attempts for
|
|
393
|
+
rate limit errors. (default: :obj:`3`)
|
|
394
|
+
retry_delay (float, optional): Initial delay in seconds between
|
|
395
|
+
retries. Uses exponential backoff. (default: :obj:`1.0`)
|
|
396
|
+
step_timeout (Optional[float], optional): Timeout in seconds for the
|
|
397
|
+
entire step operation. If None, no timeout is applied.
|
|
398
|
+
(default: :obj:`None`)
|
|
389
399
|
"""
|
|
390
400
|
|
|
391
401
|
def __init__(
|
|
@@ -426,6 +436,9 @@ class ChatAgent(BaseAgent):
|
|
|
426
436
|
mask_tool_output: bool = False,
|
|
427
437
|
pause_event: Optional[asyncio.Event] = None,
|
|
428
438
|
prune_tool_calls_from_memory: bool = False,
|
|
439
|
+
retry_attempts: int = 3,
|
|
440
|
+
retry_delay: float = 1.0,
|
|
441
|
+
step_timeout: Optional[float] = None,
|
|
429
442
|
) -> None:
|
|
430
443
|
if isinstance(model, ModelManager):
|
|
431
444
|
self.model_backend = model
|
|
@@ -511,6 +524,9 @@ class ChatAgent(BaseAgent):
|
|
|
511
524
|
self._secure_result_store: Dict[str, Any] = {}
|
|
512
525
|
self.pause_event = pause_event
|
|
513
526
|
self.prune_tool_calls_from_memory = prune_tool_calls_from_memory
|
|
527
|
+
self.retry_attempts = max(1, retry_attempts)
|
|
528
|
+
self.retry_delay = max(0.0, retry_delay)
|
|
529
|
+
self.step_timeout = step_timeout
|
|
514
530
|
|
|
515
531
|
def reset(self):
|
|
516
532
|
r"""Resets the :obj:`ChatAgent` to its initial state."""
|
|
@@ -1365,6 +1381,9 @@ class ChatAgent(BaseAgent):
|
|
|
1365
1381
|
a StreamingChatAgentResponse that behaves like
|
|
1366
1382
|
ChatAgentResponse but can also be iterated for
|
|
1367
1383
|
streaming updates.
|
|
1384
|
+
|
|
1385
|
+
Raises:
|
|
1386
|
+
TimeoutError: If the step operation exceeds the configured timeout.
|
|
1368
1387
|
"""
|
|
1369
1388
|
|
|
1370
1389
|
stream = self.model_backend.model_config_dict.get("stream", False)
|
|
@@ -1374,6 +1393,30 @@ class ChatAgent(BaseAgent):
|
|
|
1374
1393
|
generator = self._stream(input_message, response_format)
|
|
1375
1394
|
return StreamingChatAgentResponse(generator)
|
|
1376
1395
|
|
|
1396
|
+
# Execute with timeout if configured
|
|
1397
|
+
if self.step_timeout is not None:
|
|
1398
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
1399
|
+
max_workers=1
|
|
1400
|
+
) as executor:
|
|
1401
|
+
future = executor.submit(
|
|
1402
|
+
self._step_impl, input_message, response_format
|
|
1403
|
+
)
|
|
1404
|
+
try:
|
|
1405
|
+
return future.result(timeout=self.step_timeout)
|
|
1406
|
+
except concurrent.futures.TimeoutError:
|
|
1407
|
+
future.cancel()
|
|
1408
|
+
raise TimeoutError(
|
|
1409
|
+
f"Step timed out after {self.step_timeout}s"
|
|
1410
|
+
)
|
|
1411
|
+
else:
|
|
1412
|
+
return self._step_impl(input_message, response_format)
|
|
1413
|
+
|
|
1414
|
+
def _step_impl(
|
|
1415
|
+
self,
|
|
1416
|
+
input_message: Union[BaseMessage, str],
|
|
1417
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
1418
|
+
) -> ChatAgentResponse:
|
|
1419
|
+
r"""Implementation of non-streaming step logic."""
|
|
1377
1420
|
# Set Langfuse session_id using agent_id for trace grouping
|
|
1378
1421
|
try:
|
|
1379
1422
|
from camel.utils.langfuse import set_current_agent_session_id
|
|
@@ -1544,6 +1587,10 @@ class ChatAgent(BaseAgent):
|
|
|
1544
1587
|
True, returns an AsyncStreamingChatAgentResponse that can be
|
|
1545
1588
|
awaited for the final result or async iterated for streaming
|
|
1546
1589
|
updates.
|
|
1590
|
+
|
|
1591
|
+
Raises:
|
|
1592
|
+
asyncio.TimeoutError: If the step operation exceeds the configured
|
|
1593
|
+
timeout.
|
|
1547
1594
|
"""
|
|
1548
1595
|
|
|
1549
1596
|
try:
|
|
@@ -1559,9 +1606,22 @@ class ChatAgent(BaseAgent):
|
|
|
1559
1606
|
async_generator = self._astream(input_message, response_format)
|
|
1560
1607
|
return AsyncStreamingChatAgentResponse(async_generator)
|
|
1561
1608
|
else:
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1609
|
+
if self.step_timeout is not None:
|
|
1610
|
+
try:
|
|
1611
|
+
return await asyncio.wait_for(
|
|
1612
|
+
self._astep_non_streaming_task(
|
|
1613
|
+
input_message, response_format
|
|
1614
|
+
),
|
|
1615
|
+
timeout=self.step_timeout,
|
|
1616
|
+
)
|
|
1617
|
+
except asyncio.TimeoutError:
|
|
1618
|
+
raise asyncio.TimeoutError(
|
|
1619
|
+
f"Async step timed out after {self.step_timeout}s"
|
|
1620
|
+
)
|
|
1621
|
+
else:
|
|
1622
|
+
return await self._astep_non_streaming_task(
|
|
1623
|
+
input_message, response_format
|
|
1624
|
+
)
|
|
1565
1625
|
|
|
1566
1626
|
async def _astep_non_streaming_task(
|
|
1567
1627
|
self,
|
|
@@ -1776,64 +1836,61 @@ class ChatAgent(BaseAgent):
|
|
|
1776
1836
|
tool_schemas: Optional[List[Dict[str, Any]]] = None,
|
|
1777
1837
|
prev_num_openai_messages: int = 0,
|
|
1778
1838
|
) -> ModelResponse:
|
|
1779
|
-
r"""Internal function for agent step model response.
|
|
1780
|
-
|
|
1781
|
-
openai_messages (List[OpenAIMessage]): The OpenAI
|
|
1782
|
-
messages to process.
|
|
1783
|
-
num_tokens (int): The number of tokens in the context.
|
|
1784
|
-
current_iteration (int): The current iteration of the step.
|
|
1785
|
-
response_format (Optional[Type[BaseModel]]): The response
|
|
1786
|
-
format to use.
|
|
1787
|
-
tool_schemas (Optional[List[Dict[str, Any]]]): The tool
|
|
1788
|
-
schemas to use.
|
|
1789
|
-
prev_num_openai_messages (int): The number of openai messages
|
|
1790
|
-
logged in the previous iteration.
|
|
1791
|
-
|
|
1792
|
-
Returns:
|
|
1793
|
-
ModelResponse: The model response.
|
|
1794
|
-
"""
|
|
1839
|
+
r"""Internal function for agent step model response."""
|
|
1840
|
+
last_error = None
|
|
1795
1841
|
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1812
|
-
|
|
1813
|
-
|
|
1814
|
-
|
|
1815
|
-
|
|
1842
|
+
for attempt in range(self.retry_attempts):
|
|
1843
|
+
try:
|
|
1844
|
+
response = self.model_backend.run(
|
|
1845
|
+
openai_messages, response_format, tool_schemas or None
|
|
1846
|
+
)
|
|
1847
|
+
if response:
|
|
1848
|
+
break
|
|
1849
|
+
except RateLimitError as e:
|
|
1850
|
+
last_error = e
|
|
1851
|
+
if attempt < self.retry_attempts - 1:
|
|
1852
|
+
delay = min(self.retry_delay * (2**attempt), 60.0)
|
|
1853
|
+
delay = random.uniform(0, delay) # Add jitter
|
|
1854
|
+
logger.warning(
|
|
1855
|
+
f"Rate limit hit (attempt {attempt + 1}"
|
|
1856
|
+
f"/{self.retry_attempts}). Retrying in {delay:.1f}s"
|
|
1857
|
+
)
|
|
1858
|
+
time.sleep(delay)
|
|
1859
|
+
else:
|
|
1860
|
+
logger.error(
|
|
1861
|
+
f"Rate limit exhausted after "
|
|
1862
|
+
f"{self.retry_attempts} attempts"
|
|
1863
|
+
)
|
|
1864
|
+
except Exception:
|
|
1865
|
+
logger.error(
|
|
1866
|
+
f"Model error: {self.model_backend.model_type}",
|
|
1867
|
+
exc_info=True,
|
|
1868
|
+
)
|
|
1869
|
+
raise
|
|
1870
|
+
else:
|
|
1871
|
+
# Loop completed without success
|
|
1816
1872
|
raise ModelProcessingError(
|
|
1817
|
-
f"Unable to process messages:
|
|
1818
|
-
f"
|
|
1873
|
+
f"Unable to process messages: "
|
|
1874
|
+
f"{str(last_error) if last_error else 'Unknown error'}"
|
|
1819
1875
|
)
|
|
1820
1876
|
|
|
1821
|
-
|
|
1877
|
+
# Log success
|
|
1878
|
+
sanitized = self._sanitize_messages_for_logging(
|
|
1822
1879
|
openai_messages, prev_num_openai_messages
|
|
1823
1880
|
)
|
|
1824
1881
|
logger.info(
|
|
1825
|
-
f"Model {self.model_backend.model_type}
|
|
1826
|
-
f"
|
|
1827
|
-
f"iteration {current_iteration}, "
|
|
1828
|
-
f"processed these messages: {sanitized_messages}"
|
|
1882
|
+
f"Model {self.model_backend.model_type} "
|
|
1883
|
+
f"[{current_iteration}]: {sanitized}"
|
|
1829
1884
|
)
|
|
1885
|
+
|
|
1830
1886
|
if not isinstance(response, ChatCompletion):
|
|
1831
1887
|
raise TypeError(
|
|
1832
|
-
f"Expected
|
|
1833
|
-
f"got {type(response).__name__} instead."
|
|
1888
|
+
f"Expected ChatCompletion, got {type(response).__name__}"
|
|
1834
1889
|
)
|
|
1890
|
+
|
|
1835
1891
|
return self._handle_batch_response(response)
|
|
1836
1892
|
|
|
1893
|
+
@observe()
|
|
1837
1894
|
async def _aget_model_response(
|
|
1838
1895
|
self,
|
|
1839
1896
|
openai_messages: List[OpenAIMessage],
|
|
@@ -1843,62 +1900,59 @@ class ChatAgent(BaseAgent):
|
|
|
1843
1900
|
tool_schemas: Optional[List[Dict[str, Any]]] = None,
|
|
1844
1901
|
prev_num_openai_messages: int = 0,
|
|
1845
1902
|
) -> ModelResponse:
|
|
1846
|
-
r"""Internal function for agent async step model response.
|
|
1847
|
-
|
|
1848
|
-
openai_messages (List[OpenAIMessage]): The OpenAI messages
|
|
1849
|
-
to process.
|
|
1850
|
-
num_tokens (int): The number of tokens in the context.
|
|
1851
|
-
current_iteration (int): The current iteration of the step.
|
|
1852
|
-
response_format (Optional[Type[BaseModel]]): The response
|
|
1853
|
-
format to use.
|
|
1854
|
-
tool_schemas (Optional[List[Dict[str, Any]]]): The tool schemas
|
|
1855
|
-
to use.
|
|
1856
|
-
prev_num_openai_messages (int): The number of openai messages
|
|
1857
|
-
logged in the previous iteration.
|
|
1903
|
+
r"""Internal function for agent async step model response."""
|
|
1904
|
+
last_error = None
|
|
1858
1905
|
|
|
1859
|
-
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1906
|
+
for attempt in range(self.retry_attempts):
|
|
1907
|
+
try:
|
|
1908
|
+
response = await self.model_backend.arun(
|
|
1909
|
+
openai_messages, response_format, tool_schemas or None
|
|
1910
|
+
)
|
|
1911
|
+
if response:
|
|
1912
|
+
break
|
|
1913
|
+
except RateLimitError as e:
|
|
1914
|
+
last_error = e
|
|
1915
|
+
if attempt < self.retry_attempts - 1:
|
|
1916
|
+
delay = min(self.retry_delay * (2**attempt), 60.0)
|
|
1917
|
+
delay = random.uniform(0, delay) # Add jitter
|
|
1918
|
+
logger.warning(
|
|
1919
|
+
f"Rate limit hit (attempt {attempt + 1}"
|
|
1920
|
+
f"/{self.retry_attempts}). "
|
|
1921
|
+
f"Retrying in {delay:.1f}s"
|
|
1922
|
+
)
|
|
1923
|
+
await asyncio.sleep(delay)
|
|
1924
|
+
else:
|
|
1925
|
+
logger.error(
|
|
1926
|
+
f"Rate limit exhausted after "
|
|
1927
|
+
f"{self.retry_attempts} attempts"
|
|
1928
|
+
)
|
|
1929
|
+
except Exception:
|
|
1930
|
+
logger.error(
|
|
1931
|
+
f"Model error: {self.model_backend.model_type}",
|
|
1932
|
+
exc_info=True,
|
|
1933
|
+
)
|
|
1934
|
+
raise
|
|
1935
|
+
else:
|
|
1936
|
+
# Loop completed without success
|
|
1883
1937
|
raise ModelProcessingError(
|
|
1884
|
-
f"Unable to process messages:
|
|
1885
|
-
f"
|
|
1938
|
+
f"Unable to process messages: "
|
|
1939
|
+
f"{str(last_error) if last_error else 'Unknown error'}"
|
|
1886
1940
|
)
|
|
1887
1941
|
|
|
1888
|
-
|
|
1942
|
+
# Log success
|
|
1943
|
+
sanitized = self._sanitize_messages_for_logging(
|
|
1889
1944
|
openai_messages, prev_num_openai_messages
|
|
1890
1945
|
)
|
|
1891
1946
|
logger.info(
|
|
1892
|
-
f"Model {self.model_backend.model_type}
|
|
1893
|
-
f"
|
|
1894
|
-
f"iteration {current_iteration}, "
|
|
1895
|
-
f"processed these messages: {sanitized_messages}"
|
|
1947
|
+
f"Model {self.model_backend.model_type} "
|
|
1948
|
+
f"[{current_iteration}]: {sanitized}"
|
|
1896
1949
|
)
|
|
1950
|
+
|
|
1897
1951
|
if not isinstance(response, ChatCompletion):
|
|
1898
1952
|
raise TypeError(
|
|
1899
|
-
f"Expected
|
|
1900
|
-
f"got {type(response).__name__} instead."
|
|
1953
|
+
f"Expected ChatCompletion, got {type(response).__name__}"
|
|
1901
1954
|
)
|
|
1955
|
+
|
|
1902
1956
|
return self._handle_batch_response(response)
|
|
1903
1957
|
|
|
1904
1958
|
def _sanitize_messages_for_logging(
|
|
@@ -2611,12 +2665,6 @@ class ChatAgent(BaseAgent):
|
|
|
2611
2665
|
stream_completed = False
|
|
2612
2666
|
|
|
2613
2667
|
for chunk in stream:
|
|
2614
|
-
# Update token usage if available
|
|
2615
|
-
if chunk.usage:
|
|
2616
|
-
self._update_token_usage_tracker(
|
|
2617
|
-
step_token_usage, safe_model_dump(chunk.usage)
|
|
2618
|
-
)
|
|
2619
|
-
|
|
2620
2668
|
# Process chunk delta
|
|
2621
2669
|
if chunk.choices and len(chunk.choices) > 0:
|
|
2622
2670
|
choice = chunk.choices[0]
|
|
@@ -2649,12 +2697,6 @@ class ChatAgent(BaseAgent):
|
|
|
2649
2697
|
# If we have complete tool calls, execute them with
|
|
2650
2698
|
# sync status updates
|
|
2651
2699
|
if accumulated_tool_calls:
|
|
2652
|
-
# Record assistant message with tool calls first
|
|
2653
|
-
self._record_assistant_tool_calls_message(
|
|
2654
|
-
accumulated_tool_calls,
|
|
2655
|
-
content_accumulator.get_full_content(),
|
|
2656
|
-
)
|
|
2657
|
-
|
|
2658
2700
|
# Execute tools synchronously with
|
|
2659
2701
|
# optimized status updates
|
|
2660
2702
|
for (
|
|
@@ -2687,7 +2729,49 @@ class ChatAgent(BaseAgent):
|
|
|
2687
2729
|
)
|
|
2688
2730
|
|
|
2689
2731
|
self.record_message(final_message)
|
|
2690
|
-
|
|
2732
|
+
elif chunk.usage and not chunk.choices:
|
|
2733
|
+
# Handle final chunk with usage but empty choices
|
|
2734
|
+
# This happens when stream_options={"include_usage": True}
|
|
2735
|
+
# Update the final usage from this chunk
|
|
2736
|
+
self._update_token_usage_tracker(
|
|
2737
|
+
step_token_usage, safe_model_dump(chunk.usage)
|
|
2738
|
+
)
|
|
2739
|
+
|
|
2740
|
+
# Create final response with final usage
|
|
2741
|
+
final_content = content_accumulator.get_full_content()
|
|
2742
|
+
if final_content.strip():
|
|
2743
|
+
final_message = BaseMessage(
|
|
2744
|
+
role_name=self.role_name,
|
|
2745
|
+
role_type=self.role_type,
|
|
2746
|
+
meta_dict={},
|
|
2747
|
+
content=final_content,
|
|
2748
|
+
)
|
|
2749
|
+
|
|
2750
|
+
if response_format:
|
|
2751
|
+
self._try_format_message(
|
|
2752
|
+
final_message, response_format
|
|
2753
|
+
)
|
|
2754
|
+
|
|
2755
|
+
# Create final response with final usage (not partial)
|
|
2756
|
+
final_response = ChatAgentResponse(
|
|
2757
|
+
msgs=[final_message],
|
|
2758
|
+
terminated=False,
|
|
2759
|
+
info={
|
|
2760
|
+
"id": getattr(chunk, 'id', ''),
|
|
2761
|
+
"usage": step_token_usage.copy(),
|
|
2762
|
+
"finish_reasons": ["stop"],
|
|
2763
|
+
"num_tokens": self._get_token_count(final_content),
|
|
2764
|
+
"tool_calls": tool_call_records or [],
|
|
2765
|
+
"external_tool_requests": None,
|
|
2766
|
+
"streaming": False,
|
|
2767
|
+
"partial": False,
|
|
2768
|
+
},
|
|
2769
|
+
)
|
|
2770
|
+
yield final_response
|
|
2771
|
+
break
|
|
2772
|
+
elif stream_completed:
|
|
2773
|
+
# If we've already seen finish_reason but no usage chunk, exit
|
|
2774
|
+
break
|
|
2691
2775
|
|
|
2692
2776
|
return stream_completed, tool_calls_complete
|
|
2693
2777
|
|
|
@@ -2852,10 +2936,19 @@ class ChatAgent(BaseAgent):
|
|
|
2852
2936
|
tool = self._internal_tools[function_name]
|
|
2853
2937
|
try:
|
|
2854
2938
|
result = tool(**args)
|
|
2939
|
+
# First, create and record the assistant message with tool
|
|
2940
|
+
# call
|
|
2941
|
+
assist_msg = FunctionCallingMessage(
|
|
2942
|
+
role_name=self.role_name,
|
|
2943
|
+
role_type=self.role_type,
|
|
2944
|
+
meta_dict=None,
|
|
2945
|
+
content="",
|
|
2946
|
+
func_name=function_name,
|
|
2947
|
+
args=args,
|
|
2948
|
+
tool_call_id=tool_call_id,
|
|
2949
|
+
)
|
|
2855
2950
|
|
|
2856
|
-
#
|
|
2857
|
-
# message assistant message with tool_calls was already
|
|
2858
|
-
# recorded in _record_assistant_tool_calls_message
|
|
2951
|
+
# Then create the tool response message
|
|
2859
2952
|
func_msg = FunctionCallingMessage(
|
|
2860
2953
|
role_name=self.role_name,
|
|
2861
2954
|
role_type=self.role_type,
|
|
@@ -2866,7 +2959,25 @@ class ChatAgent(BaseAgent):
|
|
|
2866
2959
|
tool_call_id=tool_call_id,
|
|
2867
2960
|
)
|
|
2868
2961
|
|
|
2869
|
-
|
|
2962
|
+
# Record both messages with precise timestamps to ensure
|
|
2963
|
+
# correct ordering
|
|
2964
|
+
import time
|
|
2965
|
+
|
|
2966
|
+
current_time_ns = time.time_ns()
|
|
2967
|
+
base_timestamp = (
|
|
2968
|
+
current_time_ns / 1_000_000_000
|
|
2969
|
+
) # Convert to seconds
|
|
2970
|
+
|
|
2971
|
+
self.update_memory(
|
|
2972
|
+
assist_msg,
|
|
2973
|
+
OpenAIBackendRole.ASSISTANT,
|
|
2974
|
+
timestamp=base_timestamp,
|
|
2975
|
+
)
|
|
2976
|
+
self.update_memory(
|
|
2977
|
+
func_msg,
|
|
2978
|
+
OpenAIBackendRole.FUNCTION,
|
|
2979
|
+
timestamp=base_timestamp + 1e-6,
|
|
2980
|
+
)
|
|
2870
2981
|
|
|
2871
2982
|
return ToolCallingRecord(
|
|
2872
2983
|
tool_name=function_name,
|
|
@@ -2950,10 +3061,19 @@ class ChatAgent(BaseAgent):
|
|
|
2950
3061
|
else:
|
|
2951
3062
|
# Fallback: synchronous call
|
|
2952
3063
|
result = tool(**args)
|
|
3064
|
+
# First, create and record the assistant message with tool
|
|
3065
|
+
# call
|
|
3066
|
+
assist_msg = FunctionCallingMessage(
|
|
3067
|
+
role_name=self.role_name,
|
|
3068
|
+
role_type=self.role_type,
|
|
3069
|
+
meta_dict=None,
|
|
3070
|
+
content="",
|
|
3071
|
+
func_name=function_name,
|
|
3072
|
+
args=args,
|
|
3073
|
+
tool_call_id=tool_call_id,
|
|
3074
|
+
)
|
|
2953
3075
|
|
|
2954
|
-
#
|
|
2955
|
-
# message assistant message with tool_calls was already
|
|
2956
|
-
# recorded in _record_assistant_tool_calls_message
|
|
3076
|
+
# Then create the tool response message
|
|
2957
3077
|
func_msg = FunctionCallingMessage(
|
|
2958
3078
|
role_name=self.role_name,
|
|
2959
3079
|
role_type=self.role_type,
|
|
@@ -2964,7 +3084,25 @@ class ChatAgent(BaseAgent):
|
|
|
2964
3084
|
tool_call_id=tool_call_id,
|
|
2965
3085
|
)
|
|
2966
3086
|
|
|
2967
|
-
|
|
3087
|
+
# Record both messages with precise timestamps to ensure
|
|
3088
|
+
# correct ordering
|
|
3089
|
+
import time
|
|
3090
|
+
|
|
3091
|
+
current_time_ns = time.time_ns()
|
|
3092
|
+
base_timestamp = (
|
|
3093
|
+
current_time_ns / 1_000_000_000
|
|
3094
|
+
) # Convert to seconds
|
|
3095
|
+
|
|
3096
|
+
self.update_memory(
|
|
3097
|
+
assist_msg,
|
|
3098
|
+
OpenAIBackendRole.ASSISTANT,
|
|
3099
|
+
timestamp=base_timestamp,
|
|
3100
|
+
)
|
|
3101
|
+
self.update_memory(
|
|
3102
|
+
func_msg,
|
|
3103
|
+
OpenAIBackendRole.FUNCTION,
|
|
3104
|
+
timestamp=base_timestamp + 1e-6,
|
|
3105
|
+
)
|
|
2968
3106
|
|
|
2969
3107
|
return ToolCallingRecord(
|
|
2970
3108
|
tool_name=function_name,
|
|
@@ -3315,18 +3453,13 @@ class ChatAgent(BaseAgent):
|
|
|
3315
3453
|
response_format: Optional[Type[BaseModel]] = None,
|
|
3316
3454
|
) -> AsyncGenerator[Union[ChatAgentResponse, Tuple[bool, bool]], None]:
|
|
3317
3455
|
r"""Async version of process streaming chunks with
|
|
3318
|
-
content accumulator.
|
|
3456
|
+
content accumulator.
|
|
3457
|
+
"""
|
|
3319
3458
|
|
|
3320
3459
|
tool_calls_complete = False
|
|
3321
3460
|
stream_completed = False
|
|
3322
3461
|
|
|
3323
3462
|
async for chunk in stream:
|
|
3324
|
-
# Update token usage if available
|
|
3325
|
-
if chunk.usage:
|
|
3326
|
-
self._update_token_usage_tracker(
|
|
3327
|
-
step_token_usage, safe_model_dump(chunk.usage)
|
|
3328
|
-
)
|
|
3329
|
-
|
|
3330
3463
|
# Process chunk delta
|
|
3331
3464
|
if chunk.choices and len(chunk.choices) > 0:
|
|
3332
3465
|
choice = chunk.choices[0]
|
|
@@ -3359,13 +3492,6 @@ class ChatAgent(BaseAgent):
|
|
|
3359
3492
|
# If we have complete tool calls, execute them with
|
|
3360
3493
|
# async status updates
|
|
3361
3494
|
if accumulated_tool_calls:
|
|
3362
|
-
# Record assistant message with
|
|
3363
|
-
# tool calls first
|
|
3364
|
-
self._record_assistant_tool_calls_message(
|
|
3365
|
-
accumulated_tool_calls,
|
|
3366
|
-
content_accumulator.get_full_content(),
|
|
3367
|
-
)
|
|
3368
|
-
|
|
3369
3495
|
# Execute tools asynchronously with real-time
|
|
3370
3496
|
# status updates
|
|
3371
3497
|
async for (
|
|
@@ -3400,7 +3526,49 @@ class ChatAgent(BaseAgent):
|
|
|
3400
3526
|
)
|
|
3401
3527
|
|
|
3402
3528
|
self.record_message(final_message)
|
|
3403
|
-
|
|
3529
|
+
elif chunk.usage and not chunk.choices:
|
|
3530
|
+
# Handle final chunk with usage but empty choices
|
|
3531
|
+
# This happens when stream_options={"include_usage": True}
|
|
3532
|
+
# Update the final usage from this chunk
|
|
3533
|
+
self._update_token_usage_tracker(
|
|
3534
|
+
step_token_usage, safe_model_dump(chunk.usage)
|
|
3535
|
+
)
|
|
3536
|
+
|
|
3537
|
+
# Create final response with final usage
|
|
3538
|
+
final_content = content_accumulator.get_full_content()
|
|
3539
|
+
if final_content.strip():
|
|
3540
|
+
final_message = BaseMessage(
|
|
3541
|
+
role_name=self.role_name,
|
|
3542
|
+
role_type=self.role_type,
|
|
3543
|
+
meta_dict={},
|
|
3544
|
+
content=final_content,
|
|
3545
|
+
)
|
|
3546
|
+
|
|
3547
|
+
if response_format:
|
|
3548
|
+
self._try_format_message(
|
|
3549
|
+
final_message, response_format
|
|
3550
|
+
)
|
|
3551
|
+
|
|
3552
|
+
# Create final response with final usage (not partial)
|
|
3553
|
+
final_response = ChatAgentResponse(
|
|
3554
|
+
msgs=[final_message],
|
|
3555
|
+
terminated=False,
|
|
3556
|
+
info={
|
|
3557
|
+
"id": getattr(chunk, 'id', ''),
|
|
3558
|
+
"usage": step_token_usage.copy(),
|
|
3559
|
+
"finish_reasons": ["stop"],
|
|
3560
|
+
"num_tokens": self._get_token_count(final_content),
|
|
3561
|
+
"tool_calls": tool_call_records or [],
|
|
3562
|
+
"external_tool_requests": None,
|
|
3563
|
+
"streaming": False,
|
|
3564
|
+
"partial": False,
|
|
3565
|
+
},
|
|
3566
|
+
)
|
|
3567
|
+
yield final_response
|
|
3568
|
+
break
|
|
3569
|
+
elif stream_completed:
|
|
3570
|
+
# If we've already seen finish_reason but no usage chunk, exit
|
|
3571
|
+
break
|
|
3404
3572
|
|
|
3405
3573
|
# Yield the final status as a tuple
|
|
3406
3574
|
yield (stream_completed, tool_calls_complete)
|
camel/configs/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from .aiml_config import AIML_API_PARAMS, AIMLConfig
|
|
15
|
+
from .amd_config import AMD_API_PARAMS, AMDConfig
|
|
15
16
|
from .anthropic_config import ANTHROPIC_API_PARAMS, AnthropicConfig
|
|
16
17
|
from .base_config import BaseConfig
|
|
17
18
|
from .bedrock_config import BEDROCK_API_PARAMS, BedrockConfig
|
|
@@ -26,6 +27,7 @@ from .lmstudio_config import LMSTUDIO_API_PARAMS, LMStudioConfig
|
|
|
26
27
|
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
27
28
|
from .modelscope_config import MODELSCOPE_API_PARAMS, ModelScopeConfig
|
|
28
29
|
from .moonshot_config import MOONSHOT_API_PARAMS, MoonshotConfig
|
|
30
|
+
from .nebius_config import NEBIUS_API_PARAMS, NebiusConfig
|
|
29
31
|
from .netmind_config import NETMIND_API_PARAMS, NetmindConfig
|
|
30
32
|
from .novita_config import NOVITA_API_PARAMS, NovitaConfig
|
|
31
33
|
from .nvidia_config import NVIDIA_API_PARAMS, NvidiaConfig
|
|
@@ -58,6 +60,8 @@ __all__ = [
|
|
|
58
60
|
'ANTHROPIC_API_PARAMS',
|
|
59
61
|
'GROQ_API_PARAMS',
|
|
60
62
|
'GroqConfig',
|
|
63
|
+
'NEBIUS_API_PARAMS',
|
|
64
|
+
'NebiusConfig',
|
|
61
65
|
'LiteLLMConfig',
|
|
62
66
|
'LITELLM_API_PARAMS',
|
|
63
67
|
'NetmindConfig',
|
|
@@ -108,6 +112,8 @@ __all__ = [
|
|
|
108
112
|
'SILICONFLOW_API_PARAMS',
|
|
109
113
|
'AIMLConfig',
|
|
110
114
|
'AIML_API_PARAMS',
|
|
115
|
+
'AMDConfig',
|
|
116
|
+
'AMD_API_PARAMS',
|
|
111
117
|
'OpenRouterConfig',
|
|
112
118
|
'OPENROUTER_API_PARAMS',
|
|
113
119
|
'LMSTUDIO_API_PARAMS',
|