camel-ai 0.2.75a3__py3-none-any.whl → 0.2.75a6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +147 -93
- camel/configs/__init__.py +3 -0
- camel/configs/nebius_config.py +103 -0
- camel/models/__init__.py +2 -0
- camel/models/model_factory.py +2 -0
- camel/models/nebius_model.py +83 -0
- camel/models/ollama_model.py +3 -3
- camel/societies/workforce/task_channel.py +120 -27
- camel/societies/workforce/workforce.py +35 -3
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +35 -5
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +124 -29
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +1 -1
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +103 -40
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +3 -2
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +8 -1
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +60 -0
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/openai_image_toolkit.py +55 -24
- camel/toolkits/search_toolkit.py +13 -2
- camel/types/enums.py +34 -9
- camel/types/unified_model_type.py +5 -0
- {camel_ai-0.2.75a3.dist-info → camel_ai-0.2.75a6.dist-info}/METADATA +4 -11
- {camel_ai-0.2.75a3.dist-info → camel_ai-0.2.75a6.dist-info}/RECORD +27 -25
- {camel_ai-0.2.75a3.dist-info → camel_ai-0.2.75a6.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.75a3.dist-info → camel_ai-0.2.75a6.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -14,9 +14,11 @@
|
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
16
|
import asyncio
|
|
17
|
+
import concurrent.futures
|
|
17
18
|
import json
|
|
18
19
|
import logging
|
|
19
20
|
import queue
|
|
21
|
+
import random
|
|
20
22
|
import textwrap
|
|
21
23
|
import threading
|
|
22
24
|
import time
|
|
@@ -40,6 +42,7 @@ from typing import (
|
|
|
40
42
|
|
|
41
43
|
from openai import (
|
|
42
44
|
AsyncStream,
|
|
45
|
+
RateLimitError,
|
|
43
46
|
Stream,
|
|
44
47
|
)
|
|
45
48
|
from pydantic import BaseModel, ValidationError
|
|
@@ -386,6 +389,13 @@ class ChatAgent(BaseAgent):
|
|
|
386
389
|
usage. When enabled, removes FUNCTION/TOOL role messages and
|
|
387
390
|
ASSISTANT messages with tool_calls after each step.
|
|
388
391
|
(default: :obj:`False`)
|
|
392
|
+
retry_attempts (int, optional): Maximum number of retry attempts for
|
|
393
|
+
rate limit errors. (default: :obj:`3`)
|
|
394
|
+
retry_delay (float, optional): Initial delay in seconds between
|
|
395
|
+
retries. Uses exponential backoff. (default: :obj:`1.0`)
|
|
396
|
+
step_timeout (Optional[float], optional): Timeout in seconds for the
|
|
397
|
+
entire step operation. If None, no timeout is applied.
|
|
398
|
+
(default: :obj:`None`)
|
|
389
399
|
"""
|
|
390
400
|
|
|
391
401
|
def __init__(
|
|
@@ -426,6 +436,9 @@ class ChatAgent(BaseAgent):
|
|
|
426
436
|
mask_tool_output: bool = False,
|
|
427
437
|
pause_event: Optional[asyncio.Event] = None,
|
|
428
438
|
prune_tool_calls_from_memory: bool = False,
|
|
439
|
+
retry_attempts: int = 3,
|
|
440
|
+
retry_delay: float = 1.0,
|
|
441
|
+
step_timeout: Optional[float] = None,
|
|
429
442
|
) -> None:
|
|
430
443
|
if isinstance(model, ModelManager):
|
|
431
444
|
self.model_backend = model
|
|
@@ -511,6 +524,9 @@ class ChatAgent(BaseAgent):
|
|
|
511
524
|
self._secure_result_store: Dict[str, Any] = {}
|
|
512
525
|
self.pause_event = pause_event
|
|
513
526
|
self.prune_tool_calls_from_memory = prune_tool_calls_from_memory
|
|
527
|
+
self.retry_attempts = max(1, retry_attempts)
|
|
528
|
+
self.retry_delay = max(0.0, retry_delay)
|
|
529
|
+
self.step_timeout = step_timeout
|
|
514
530
|
|
|
515
531
|
def reset(self):
|
|
516
532
|
r"""Resets the :obj:`ChatAgent` to its initial state."""
|
|
@@ -1365,6 +1381,9 @@ class ChatAgent(BaseAgent):
|
|
|
1365
1381
|
a StreamingChatAgentResponse that behaves like
|
|
1366
1382
|
ChatAgentResponse but can also be iterated for
|
|
1367
1383
|
streaming updates.
|
|
1384
|
+
|
|
1385
|
+
Raises:
|
|
1386
|
+
TimeoutError: If the step operation exceeds the configured timeout.
|
|
1368
1387
|
"""
|
|
1369
1388
|
|
|
1370
1389
|
stream = self.model_backend.model_config_dict.get("stream", False)
|
|
@@ -1374,6 +1393,30 @@ class ChatAgent(BaseAgent):
|
|
|
1374
1393
|
generator = self._stream(input_message, response_format)
|
|
1375
1394
|
return StreamingChatAgentResponse(generator)
|
|
1376
1395
|
|
|
1396
|
+
# Execute with timeout if configured
|
|
1397
|
+
if self.step_timeout is not None:
|
|
1398
|
+
with concurrent.futures.ThreadPoolExecutor(
|
|
1399
|
+
max_workers=1
|
|
1400
|
+
) as executor:
|
|
1401
|
+
future = executor.submit(
|
|
1402
|
+
self._step_impl, input_message, response_format
|
|
1403
|
+
)
|
|
1404
|
+
try:
|
|
1405
|
+
return future.result(timeout=self.step_timeout)
|
|
1406
|
+
except concurrent.futures.TimeoutError:
|
|
1407
|
+
future.cancel()
|
|
1408
|
+
raise TimeoutError(
|
|
1409
|
+
f"Step timed out after {self.step_timeout}s"
|
|
1410
|
+
)
|
|
1411
|
+
else:
|
|
1412
|
+
return self._step_impl(input_message, response_format)
|
|
1413
|
+
|
|
1414
|
+
def _step_impl(
|
|
1415
|
+
self,
|
|
1416
|
+
input_message: Union[BaseMessage, str],
|
|
1417
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
1418
|
+
) -> ChatAgentResponse:
|
|
1419
|
+
r"""Implementation of non-streaming step logic."""
|
|
1377
1420
|
# Set Langfuse session_id using agent_id for trace grouping
|
|
1378
1421
|
try:
|
|
1379
1422
|
from camel.utils.langfuse import set_current_agent_session_id
|
|
@@ -1544,6 +1587,10 @@ class ChatAgent(BaseAgent):
|
|
|
1544
1587
|
True, returns an AsyncStreamingChatAgentResponse that can be
|
|
1545
1588
|
awaited for the final result or async iterated for streaming
|
|
1546
1589
|
updates.
|
|
1590
|
+
|
|
1591
|
+
Raises:
|
|
1592
|
+
asyncio.TimeoutError: If the step operation exceeds the configured
|
|
1593
|
+
timeout.
|
|
1547
1594
|
"""
|
|
1548
1595
|
|
|
1549
1596
|
try:
|
|
@@ -1559,9 +1606,22 @@ class ChatAgent(BaseAgent):
|
|
|
1559
1606
|
async_generator = self._astream(input_message, response_format)
|
|
1560
1607
|
return AsyncStreamingChatAgentResponse(async_generator)
|
|
1561
1608
|
else:
|
|
1562
|
-
|
|
1563
|
-
|
|
1564
|
-
|
|
1609
|
+
if self.step_timeout is not None:
|
|
1610
|
+
try:
|
|
1611
|
+
return await asyncio.wait_for(
|
|
1612
|
+
self._astep_non_streaming_task(
|
|
1613
|
+
input_message, response_format
|
|
1614
|
+
),
|
|
1615
|
+
timeout=self.step_timeout,
|
|
1616
|
+
)
|
|
1617
|
+
except asyncio.TimeoutError:
|
|
1618
|
+
raise asyncio.TimeoutError(
|
|
1619
|
+
f"Async step timed out after {self.step_timeout}s"
|
|
1620
|
+
)
|
|
1621
|
+
else:
|
|
1622
|
+
return await self._astep_non_streaming_task(
|
|
1623
|
+
input_message, response_format
|
|
1624
|
+
)
|
|
1565
1625
|
|
|
1566
1626
|
async def _astep_non_streaming_task(
|
|
1567
1627
|
self,
|
|
@@ -1776,64 +1836,61 @@ class ChatAgent(BaseAgent):
|
|
|
1776
1836
|
tool_schemas: Optional[List[Dict[str, Any]]] = None,
|
|
1777
1837
|
prev_num_openai_messages: int = 0,
|
|
1778
1838
|
) -> ModelResponse:
|
|
1779
|
-
r"""Internal function for agent step model response.
|
|
1780
|
-
|
|
1781
|
-
openai_messages (List[OpenAIMessage]): The OpenAI
|
|
1782
|
-
messages to process.
|
|
1783
|
-
num_tokens (int): The number of tokens in the context.
|
|
1784
|
-
current_iteration (int): The current iteration of the step.
|
|
1785
|
-
response_format (Optional[Type[BaseModel]]): The response
|
|
1786
|
-
format to use.
|
|
1787
|
-
tool_schemas (Optional[List[Dict[str, Any]]]): The tool
|
|
1788
|
-
schemas to use.
|
|
1789
|
-
prev_num_openai_messages (int): The number of openai messages
|
|
1790
|
-
logged in the previous iteration.
|
|
1791
|
-
|
|
1792
|
-
Returns:
|
|
1793
|
-
ModelResponse: The model response.
|
|
1794
|
-
"""
|
|
1839
|
+
r"""Internal function for agent step model response."""
|
|
1840
|
+
last_error = None
|
|
1795
1841
|
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1812
|
-
|
|
1813
|
-
|
|
1814
|
-
|
|
1815
|
-
|
|
1842
|
+
for attempt in range(self.retry_attempts):
|
|
1843
|
+
try:
|
|
1844
|
+
response = self.model_backend.run(
|
|
1845
|
+
openai_messages, response_format, tool_schemas or None
|
|
1846
|
+
)
|
|
1847
|
+
if response:
|
|
1848
|
+
break
|
|
1849
|
+
except RateLimitError as e:
|
|
1850
|
+
last_error = e
|
|
1851
|
+
if attempt < self.retry_attempts - 1:
|
|
1852
|
+
delay = min(self.retry_delay * (2**attempt), 60.0)
|
|
1853
|
+
delay = random.uniform(0, delay) # Add jitter
|
|
1854
|
+
logger.warning(
|
|
1855
|
+
f"Rate limit hit (attempt {attempt + 1}"
|
|
1856
|
+
f"/{self.retry_attempts}). Retrying in {delay:.1f}s"
|
|
1857
|
+
)
|
|
1858
|
+
time.sleep(delay)
|
|
1859
|
+
else:
|
|
1860
|
+
logger.error(
|
|
1861
|
+
f"Rate limit exhausted after "
|
|
1862
|
+
f"{self.retry_attempts} attempts"
|
|
1863
|
+
)
|
|
1864
|
+
except Exception:
|
|
1865
|
+
logger.error(
|
|
1866
|
+
f"Model error: {self.model_backend.model_type}",
|
|
1867
|
+
exc_info=True,
|
|
1868
|
+
)
|
|
1869
|
+
raise
|
|
1870
|
+
else:
|
|
1871
|
+
# Loop completed without success
|
|
1816
1872
|
raise ModelProcessingError(
|
|
1817
|
-
f"Unable to process messages:
|
|
1818
|
-
f"
|
|
1873
|
+
f"Unable to process messages: "
|
|
1874
|
+
f"{str(last_error) if last_error else 'Unknown error'}"
|
|
1819
1875
|
)
|
|
1820
1876
|
|
|
1821
|
-
|
|
1877
|
+
# Log success
|
|
1878
|
+
sanitized = self._sanitize_messages_for_logging(
|
|
1822
1879
|
openai_messages, prev_num_openai_messages
|
|
1823
1880
|
)
|
|
1824
1881
|
logger.info(
|
|
1825
|
-
f"Model {self.model_backend.model_type}
|
|
1826
|
-
f"
|
|
1827
|
-
f"iteration {current_iteration}, "
|
|
1828
|
-
f"processed these messages: {sanitized_messages}"
|
|
1882
|
+
f"Model {self.model_backend.model_type} "
|
|
1883
|
+
f"[{current_iteration}]: {sanitized}"
|
|
1829
1884
|
)
|
|
1885
|
+
|
|
1830
1886
|
if not isinstance(response, ChatCompletion):
|
|
1831
1887
|
raise TypeError(
|
|
1832
|
-
f"Expected
|
|
1833
|
-
f"got {type(response).__name__} instead."
|
|
1888
|
+
f"Expected ChatCompletion, got {type(response).__name__}"
|
|
1834
1889
|
)
|
|
1890
|
+
|
|
1835
1891
|
return self._handle_batch_response(response)
|
|
1836
1892
|
|
|
1893
|
+
@observe()
|
|
1837
1894
|
async def _aget_model_response(
|
|
1838
1895
|
self,
|
|
1839
1896
|
openai_messages: List[OpenAIMessage],
|
|
@@ -1843,62 +1900,59 @@ class ChatAgent(BaseAgent):
|
|
|
1843
1900
|
tool_schemas: Optional[List[Dict[str, Any]]] = None,
|
|
1844
1901
|
prev_num_openai_messages: int = 0,
|
|
1845
1902
|
) -> ModelResponse:
|
|
1846
|
-
r"""Internal function for agent async step model response.
|
|
1847
|
-
|
|
1848
|
-
openai_messages (List[OpenAIMessage]): The OpenAI messages
|
|
1849
|
-
to process.
|
|
1850
|
-
num_tokens (int): The number of tokens in the context.
|
|
1851
|
-
current_iteration (int): The current iteration of the step.
|
|
1852
|
-
response_format (Optional[Type[BaseModel]]): The response
|
|
1853
|
-
format to use.
|
|
1854
|
-
tool_schemas (Optional[List[Dict[str, Any]]]): The tool schemas
|
|
1855
|
-
to use.
|
|
1856
|
-
prev_num_openai_messages (int): The number of openai messages
|
|
1857
|
-
logged in the previous iteration.
|
|
1903
|
+
r"""Internal function for agent async step model response."""
|
|
1904
|
+
last_error = None
|
|
1858
1905
|
|
|
1859
|
-
|
|
1860
|
-
|
|
1861
|
-
|
|
1862
|
-
|
|
1863
|
-
|
|
1864
|
-
|
|
1865
|
-
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
|
|
1882
|
-
|
|
1906
|
+
for attempt in range(self.retry_attempts):
|
|
1907
|
+
try:
|
|
1908
|
+
response = await self.model_backend.arun(
|
|
1909
|
+
openai_messages, response_format, tool_schemas or None
|
|
1910
|
+
)
|
|
1911
|
+
if response:
|
|
1912
|
+
break
|
|
1913
|
+
except RateLimitError as e:
|
|
1914
|
+
last_error = e
|
|
1915
|
+
if attempt < self.retry_attempts - 1:
|
|
1916
|
+
delay = min(self.retry_delay * (2**attempt), 60.0)
|
|
1917
|
+
delay = random.uniform(0, delay) # Add jitter
|
|
1918
|
+
logger.warning(
|
|
1919
|
+
f"Rate limit hit (attempt {attempt + 1}"
|
|
1920
|
+
f"/{self.retry_attempts}). "
|
|
1921
|
+
f"Retrying in {delay:.1f}s"
|
|
1922
|
+
)
|
|
1923
|
+
await asyncio.sleep(delay)
|
|
1924
|
+
else:
|
|
1925
|
+
logger.error(
|
|
1926
|
+
f"Rate limit exhausted after "
|
|
1927
|
+
f"{self.retry_attempts} attempts"
|
|
1928
|
+
)
|
|
1929
|
+
except Exception:
|
|
1930
|
+
logger.error(
|
|
1931
|
+
f"Model error: {self.model_backend.model_type}",
|
|
1932
|
+
exc_info=True,
|
|
1933
|
+
)
|
|
1934
|
+
raise
|
|
1935
|
+
else:
|
|
1936
|
+
# Loop completed without success
|
|
1883
1937
|
raise ModelProcessingError(
|
|
1884
|
-
f"Unable to process messages:
|
|
1885
|
-
f"
|
|
1938
|
+
f"Unable to process messages: "
|
|
1939
|
+
f"{str(last_error) if last_error else 'Unknown error'}"
|
|
1886
1940
|
)
|
|
1887
1941
|
|
|
1888
|
-
|
|
1942
|
+
# Log success
|
|
1943
|
+
sanitized = self._sanitize_messages_for_logging(
|
|
1889
1944
|
openai_messages, prev_num_openai_messages
|
|
1890
1945
|
)
|
|
1891
1946
|
logger.info(
|
|
1892
|
-
f"Model {self.model_backend.model_type}
|
|
1893
|
-
f"
|
|
1894
|
-
f"iteration {current_iteration}, "
|
|
1895
|
-
f"processed these messages: {sanitized_messages}"
|
|
1947
|
+
f"Model {self.model_backend.model_type} "
|
|
1948
|
+
f"[{current_iteration}]: {sanitized}"
|
|
1896
1949
|
)
|
|
1950
|
+
|
|
1897
1951
|
if not isinstance(response, ChatCompletion):
|
|
1898
1952
|
raise TypeError(
|
|
1899
|
-
f"Expected
|
|
1900
|
-
f"got {type(response).__name__} instead."
|
|
1953
|
+
f"Expected ChatCompletion, got {type(response).__name__}"
|
|
1901
1954
|
)
|
|
1955
|
+
|
|
1902
1956
|
return self._handle_batch_response(response)
|
|
1903
1957
|
|
|
1904
1958
|
def _sanitize_messages_for_logging(
|
camel/configs/__init__.py
CHANGED
|
@@ -26,6 +26,7 @@ from .lmstudio_config import LMSTUDIO_API_PARAMS, LMStudioConfig
|
|
|
26
26
|
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
27
27
|
from .modelscope_config import MODELSCOPE_API_PARAMS, ModelScopeConfig
|
|
28
28
|
from .moonshot_config import MOONSHOT_API_PARAMS, MoonshotConfig
|
|
29
|
+
from .nebius_config import NEBIUS_API_PARAMS, NebiusConfig
|
|
29
30
|
from .netmind_config import NETMIND_API_PARAMS, NetmindConfig
|
|
30
31
|
from .novita_config import NOVITA_API_PARAMS, NovitaConfig
|
|
31
32
|
from .nvidia_config import NVIDIA_API_PARAMS, NvidiaConfig
|
|
@@ -58,6 +59,8 @@ __all__ = [
|
|
|
58
59
|
'ANTHROPIC_API_PARAMS',
|
|
59
60
|
'GROQ_API_PARAMS',
|
|
60
61
|
'GroqConfig',
|
|
62
|
+
'NEBIUS_API_PARAMS',
|
|
63
|
+
'NebiusConfig',
|
|
61
64
|
'LiteLLMConfig',
|
|
62
65
|
'LITELLM_API_PARAMS',
|
|
63
66
|
'NetmindConfig',
|
|
@@ -0,0 +1,103 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Optional, Sequence, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class NebiusConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using OpenAI
|
|
23
|
+
compatibility with Nebius AI Studio.
|
|
24
|
+
|
|
25
|
+
Reference: https://nebius.com/docs/ai-studio/api
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
29
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
30
|
+
while lower values make it more focused and deterministic.
|
|
31
|
+
(default: :obj:`None`)
|
|
32
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
33
|
+
called nucleus sampling, where the model considers the results of
|
|
34
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
35
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
n (int, optional): How many chat completion choices to generate for
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
39
|
+
response_format (object, optional): An object specifying the format
|
|
40
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
41
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
42
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
43
|
+
message the model generates is valid JSON. Important: when using
|
|
44
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
45
|
+
yourself via a system or user message. Without this, the model
|
|
46
|
+
may generate an unending stream of whitespace until the generation
|
|
47
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
48
|
+
"stuck" request. Also note that the message content may be
|
|
49
|
+
partially cut off if finish_reason="length", which indicates the
|
|
50
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
51
|
+
max context length.
|
|
52
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
53
|
+
as data-only server-sent events as they become available.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
56
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
57
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
58
|
+
in the chat completion. The total length of input tokens and
|
|
59
|
+
generated tokens is limited by the model's context length.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
62
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
63
|
+
they appear in the text so far, increasing the model's likelihood
|
|
64
|
+
to talk about new topics. See more information about frequency and
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
66
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
68
|
+
existing frequency in the text so far, decreasing the model's
|
|
69
|
+
likelihood to repeat the same line verbatim. See more information
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
71
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
72
|
+
which can help OpenAI to monitor and detect abuse.
|
|
73
|
+
(default: :obj:`None`)
|
|
74
|
+
tools (list[FunctionTool], optional): A list of tools the model may
|
|
75
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
76
|
+
to provide a list of functions the model may generate JSON inputs
|
|
77
|
+
for. A max of 128 functions are supported.
|
|
78
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
79
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
80
|
+
will not call any tool and instead generates a message.
|
|
81
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
82
|
+
message or calling one or more tools. :obj:`"required"` means the
|
|
83
|
+
model must call one or more tools. Specifying a particular tool
|
|
84
|
+
via {"type": "function", "function": {"name": "my_function"}}
|
|
85
|
+
forces the model to call that tool. :obj:`"none"` is the default
|
|
86
|
+
when no tools are present. :obj:`"auto"` is the default if tools
|
|
87
|
+
are present.
|
|
88
|
+
"""
|
|
89
|
+
|
|
90
|
+
temperature: Optional[float] = None
|
|
91
|
+
top_p: Optional[float] = None
|
|
92
|
+
n: Optional[int] = None
|
|
93
|
+
stream: Optional[bool] = None
|
|
94
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
95
|
+
max_tokens: Optional[int] = None
|
|
96
|
+
presence_penalty: Optional[float] = None
|
|
97
|
+
response_format: Optional[dict] = None
|
|
98
|
+
frequency_penalty: Optional[float] = None
|
|
99
|
+
user: Optional[str] = None
|
|
100
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
NEBIUS_API_PARAMS = {param for param in NebiusConfig.model_fields.keys()}
|
camel/models/__init__.py
CHANGED
|
@@ -31,6 +31,7 @@ from .model_factory import ModelFactory
|
|
|
31
31
|
from .model_manager import ModelManager, ModelProcessingError
|
|
32
32
|
from .modelscope_model import ModelScopeModel
|
|
33
33
|
from .moonshot_model import MoonshotModel
|
|
34
|
+
from .nebius_model import NebiusModel
|
|
34
35
|
from .nemotron_model import NemotronModel
|
|
35
36
|
from .netmind_model import NetmindModel
|
|
36
37
|
from .novita_model import NovitaModel
|
|
@@ -87,6 +88,7 @@ __all__ = [
|
|
|
87
88
|
'QwenModel',
|
|
88
89
|
'AWSBedrockModel',
|
|
89
90
|
'ModelProcessingError',
|
|
91
|
+
'NebiusModel',
|
|
90
92
|
'DeepSeekModel',
|
|
91
93
|
'FishAudioModel',
|
|
92
94
|
'InternLMModel',
|
camel/models/model_factory.py
CHANGED
|
@@ -31,6 +31,7 @@ from camel.models.lmstudio_model import LMStudioModel
|
|
|
31
31
|
from camel.models.mistral_model import MistralModel
|
|
32
32
|
from camel.models.modelscope_model import ModelScopeModel
|
|
33
33
|
from camel.models.moonshot_model import MoonshotModel
|
|
34
|
+
from camel.models.nebius_model import NebiusModel
|
|
34
35
|
from camel.models.netmind_model import NetmindModel
|
|
35
36
|
from camel.models.novita_model import NovitaModel
|
|
36
37
|
from camel.models.nvidia_model import NvidiaModel
|
|
@@ -83,6 +84,7 @@ class ModelFactory:
|
|
|
83
84
|
ModelPlatformType.AZURE: AzureOpenAIModel,
|
|
84
85
|
ModelPlatformType.ANTHROPIC: AnthropicModel,
|
|
85
86
|
ModelPlatformType.GROQ: GroqModel,
|
|
87
|
+
ModelPlatformType.NEBIUS: NebiusModel,
|
|
86
88
|
ModelPlatformType.LMSTUDIO: LMStudioModel,
|
|
87
89
|
ModelPlatformType.OPENROUTER: OpenRouterModel,
|
|
88
90
|
ModelPlatformType.ZHIPU: ZhipuAIModel,
|
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.configs import NebiusConfig
|
|
18
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
+
from camel.types import ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
api_keys_required,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class NebiusModel(OpenAICompatibleModel):
|
|
27
|
+
r"""LLM API served by Nebius AI Studio in a unified OpenAICompatibleModel
|
|
28
|
+
interface.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`NebiusConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
38
|
+
with the Nebius AI Studio service. (default: :obj:`None`).
|
|
39
|
+
url (Optional[str], optional): The url to the Nebius AI Studio service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
43
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
46
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
47
|
+
environment variable or default to 180 seconds.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
50
|
+
(default: :obj:`3`)
|
|
51
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
52
|
+
initialization.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required([("api_key", "NEBIUS_API_KEY")])
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
64
|
+
max_retries: int = 3,
|
|
65
|
+
**kwargs: Any,
|
|
66
|
+
) -> None:
|
|
67
|
+
if model_config_dict is None:
|
|
68
|
+
model_config_dict = NebiusConfig().as_dict()
|
|
69
|
+
api_key = api_key or os.environ.get("NEBIUS_API_KEY")
|
|
70
|
+
url = url or os.environ.get(
|
|
71
|
+
"NEBIUS_API_BASE_URL", "https://api.studio.nebius.com/v1"
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
camel/models/ollama_model.py
CHANGED
|
@@ -35,8 +35,8 @@ class OllamaModel(OpenAICompatibleModel):
|
|
|
35
35
|
If:obj:`None`, :obj:`OllamaConfig().as_dict()` will be used.
|
|
36
36
|
(default: :obj:`None`)
|
|
37
37
|
api_key (Optional[str], optional): The API key for authenticating with
|
|
38
|
-
the model service.
|
|
39
|
-
|
|
38
|
+
the model service. Required for Ollama cloud services. If not
|
|
39
|
+
provided, defaults to "Not_Provided". (default: :obj:`None`)
|
|
40
40
|
url (Optional[str], optional): The url to the model service.
|
|
41
41
|
(default: :obj:`None`)
|
|
42
42
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
@@ -79,7 +79,7 @@ class OllamaModel(OpenAICompatibleModel):
|
|
|
79
79
|
super().__init__(
|
|
80
80
|
model_type=self._model_type,
|
|
81
81
|
model_config_dict=model_config_dict,
|
|
82
|
-
api_key="
|
|
82
|
+
api_key=api_key or "Not_Provided",
|
|
83
83
|
url=self._url,
|
|
84
84
|
token_counter=token_counter,
|
|
85
85
|
timeout=timeout,
|