camel-ai 0.2.73a4__py3-none-any.whl → 0.2.80a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- camel/__init__.py +1 -1
- camel/agents/_utils.py +38 -0
- camel/agents/chat_agent.py +2217 -519
- camel/agents/mcp_agent.py +30 -27
- camel/configs/__init__.py +15 -0
- camel/configs/aihubmix_config.py +88 -0
- camel/configs/amd_config.py +70 -0
- camel/configs/cometapi_config.py +104 -0
- camel/configs/minimax_config.py +93 -0
- camel/configs/nebius_config.py +103 -0
- camel/data_collectors/alpaca_collector.py +15 -6
- camel/datasets/base_generator.py +39 -10
- camel/environments/single_step.py +28 -3
- camel/environments/tic_tac_toe.py +1 -1
- camel/interpreters/__init__.py +2 -0
- camel/interpreters/docker/Dockerfile +3 -12
- camel/interpreters/e2b_interpreter.py +34 -1
- camel/interpreters/microsandbox_interpreter.py +395 -0
- camel/loaders/__init__.py +11 -2
- camel/loaders/chunkr_reader.py +9 -0
- camel/memories/agent_memories.py +48 -4
- camel/memories/base.py +26 -0
- camel/memories/blocks/chat_history_block.py +122 -4
- camel/memories/context_creators/score_based.py +25 -384
- camel/memories/records.py +88 -8
- camel/messages/base.py +153 -34
- camel/models/__init__.py +10 -0
- camel/models/aihubmix_model.py +83 -0
- camel/models/aiml_model.py +1 -16
- camel/models/amd_model.py +101 -0
- camel/models/anthropic_model.py +6 -19
- camel/models/aws_bedrock_model.py +2 -33
- camel/models/azure_openai_model.py +114 -89
- camel/models/base_audio_model.py +3 -1
- camel/models/base_model.py +32 -14
- camel/models/cohere_model.py +1 -16
- camel/models/cometapi_model.py +83 -0
- camel/models/crynux_model.py +1 -16
- camel/models/deepseek_model.py +1 -16
- camel/models/fish_audio_model.py +6 -0
- camel/models/gemini_model.py +36 -18
- camel/models/groq_model.py +1 -17
- camel/models/internlm_model.py +1 -16
- camel/models/litellm_model.py +1 -16
- camel/models/lmstudio_model.py +1 -17
- camel/models/minimax_model.py +83 -0
- camel/models/mistral_model.py +1 -16
- camel/models/model_factory.py +27 -1
- camel/models/modelscope_model.py +1 -16
- camel/models/moonshot_model.py +105 -24
- camel/models/nebius_model.py +83 -0
- camel/models/nemotron_model.py +0 -5
- camel/models/netmind_model.py +1 -16
- camel/models/novita_model.py +1 -16
- camel/models/nvidia_model.py +1 -16
- camel/models/ollama_model.py +4 -19
- camel/models/openai_compatible_model.py +62 -41
- camel/models/openai_model.py +62 -57
- camel/models/openrouter_model.py +1 -17
- camel/models/ppio_model.py +1 -16
- camel/models/qianfan_model.py +1 -16
- camel/models/qwen_model.py +1 -16
- camel/models/reka_model.py +1 -16
- camel/models/samba_model.py +34 -47
- camel/models/sglang_model.py +64 -31
- camel/models/siliconflow_model.py +1 -16
- camel/models/stub_model.py +0 -4
- camel/models/togetherai_model.py +1 -16
- camel/models/vllm_model.py +1 -16
- camel/models/volcano_model.py +0 -17
- camel/models/watsonx_model.py +1 -16
- camel/models/yi_model.py +1 -16
- camel/models/zhipuai_model.py +60 -16
- camel/parsers/__init__.py +18 -0
- camel/parsers/mcp_tool_call_parser.py +176 -0
- camel/retrievers/auto_retriever.py +1 -0
- camel/runtimes/daytona_runtime.py +11 -12
- camel/societies/__init__.py +2 -0
- camel/societies/workforce/__init__.py +2 -0
- camel/societies/workforce/events.py +122 -0
- camel/societies/workforce/prompts.py +146 -66
- camel/societies/workforce/role_playing_worker.py +15 -11
- camel/societies/workforce/single_agent_worker.py +302 -65
- camel/societies/workforce/structured_output_handler.py +30 -18
- camel/societies/workforce/task_channel.py +163 -27
- camel/societies/workforce/utils.py +107 -13
- camel/societies/workforce/workflow_memory_manager.py +772 -0
- camel/societies/workforce/workforce.py +1949 -579
- camel/societies/workforce/workforce_callback.py +74 -0
- camel/societies/workforce/workforce_logger.py +168 -145
- camel/societies/workforce/workforce_metrics.py +33 -0
- camel/storages/key_value_storages/json.py +15 -2
- camel/storages/key_value_storages/mem0_cloud.py +48 -47
- camel/storages/object_storages/google_cloud.py +1 -1
- camel/storages/vectordb_storages/oceanbase.py +13 -13
- camel/storages/vectordb_storages/qdrant.py +3 -3
- camel/storages/vectordb_storages/tidb.py +8 -6
- camel/tasks/task.py +4 -3
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/aci_toolkit.py +45 -0
- camel/toolkits/base.py +6 -4
- camel/toolkits/code_execution.py +28 -1
- camel/toolkits/context_summarizer_toolkit.py +684 -0
- camel/toolkits/dappier_toolkit.py +5 -1
- camel/toolkits/dingtalk.py +1135 -0
- camel/toolkits/edgeone_pages_mcp_toolkit.py +11 -31
- camel/toolkits/excel_toolkit.py +1 -1
- camel/toolkits/{file_write_toolkit.py → file_toolkit.py} +430 -36
- camel/toolkits/function_tool.py +13 -3
- camel/toolkits/github_toolkit.py +104 -17
- camel/toolkits/gmail_toolkit.py +1839 -0
- camel/toolkits/google_calendar_toolkit.py +38 -4
- camel/toolkits/google_drive_mcp_toolkit.py +12 -31
- camel/toolkits/hybrid_browser_toolkit/config_loader.py +15 -0
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit.py +77 -8
- camel/toolkits/hybrid_browser_toolkit/hybrid_browser_toolkit_ts.py +884 -88
- camel/toolkits/hybrid_browser_toolkit/installer.py +203 -0
- camel/toolkits/hybrid_browser_toolkit/ts/package-lock.json +5 -612
- camel/toolkits/hybrid_browser_toolkit/ts/package.json +0 -1
- camel/toolkits/hybrid_browser_toolkit/ts/src/browser-session.ts +959 -89
- camel/toolkits/hybrid_browser_toolkit/ts/src/config-loader.ts +9 -2
- camel/toolkits/hybrid_browser_toolkit/ts/src/hybrid-browser-toolkit.ts +281 -213
- camel/toolkits/hybrid_browser_toolkit/ts/src/parent-child-filter.ts +226 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/snapshot-parser.ts +219 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/som-screenshot-injected.ts +543 -0
- camel/toolkits/hybrid_browser_toolkit/ts/src/types.ts +23 -3
- camel/toolkits/hybrid_browser_toolkit/ts/websocket-server.js +72 -7
- camel/toolkits/hybrid_browser_toolkit/ws_wrapper.py +582 -132
- camel/toolkits/hybrid_browser_toolkit_py/actions.py +158 -0
- camel/toolkits/hybrid_browser_toolkit_py/browser_session.py +55 -8
- camel/toolkits/hybrid_browser_toolkit_py/config_loader.py +43 -0
- camel/toolkits/hybrid_browser_toolkit_py/hybrid_browser_toolkit.py +321 -8
- camel/toolkits/hybrid_browser_toolkit_py/snapshot.py +10 -4
- camel/toolkits/hybrid_browser_toolkit_py/unified_analyzer.js +45 -4
- camel/toolkits/{openai_image_toolkit.py → image_generation_toolkit.py} +151 -53
- camel/toolkits/klavis_toolkit.py +5 -1
- camel/toolkits/markitdown_toolkit.py +27 -1
- camel/toolkits/math_toolkit.py +64 -10
- camel/toolkits/mcp_toolkit.py +366 -71
- camel/toolkits/memory_toolkit.py +5 -1
- camel/toolkits/message_integration.py +18 -13
- camel/toolkits/minimax_mcp_toolkit.py +195 -0
- camel/toolkits/note_taking_toolkit.py +19 -10
- camel/toolkits/notion_mcp_toolkit.py +16 -26
- camel/toolkits/openbb_toolkit.py +5 -1
- camel/toolkits/origene_mcp_toolkit.py +8 -49
- camel/toolkits/playwright_mcp_toolkit.py +12 -31
- camel/toolkits/resend_toolkit.py +168 -0
- camel/toolkits/search_toolkit.py +264 -91
- camel/toolkits/slack_toolkit.py +64 -10
- camel/toolkits/terminal_toolkit/__init__.py +18 -0
- camel/toolkits/terminal_toolkit/terminal_toolkit.py +957 -0
- camel/toolkits/terminal_toolkit/utils.py +532 -0
- camel/toolkits/vertex_ai_veo_toolkit.py +590 -0
- camel/toolkits/video_analysis_toolkit.py +17 -11
- camel/toolkits/wechat_official_toolkit.py +483 -0
- camel/toolkits/zapier_toolkit.py +5 -1
- camel/types/__init__.py +2 -2
- camel/types/enums.py +274 -7
- camel/types/openai_types.py +2 -2
- camel/types/unified_model_type.py +15 -0
- camel/utils/commons.py +36 -5
- camel/utils/constants.py +3 -0
- camel/utils/context_utils.py +1003 -0
- camel/utils/mcp.py +138 -4
- camel/utils/token_counting.py +43 -20
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/METADATA +223 -83
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/RECORD +170 -141
- camel/loaders/pandas_reader.py +0 -368
- camel/toolkits/openai_agent_toolkit.py +0 -135
- camel/toolkits/terminal_toolkit.py +0 -1550
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.73a4.dist-info → camel_ai-0.2.80a2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import os
|
|
15
|
+
from typing import Any, Dict, Optional, Union
|
|
16
|
+
|
|
17
|
+
from camel.configs import NebiusConfig
|
|
18
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
|
+
from camel.types import ModelType
|
|
20
|
+
from camel.utils import (
|
|
21
|
+
BaseTokenCounter,
|
|
22
|
+
api_keys_required,
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class NebiusModel(OpenAICompatibleModel):
|
|
27
|
+
r"""LLM API served by Nebius AI Studio in a unified OpenAICompatibleModel
|
|
28
|
+
interface.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created.
|
|
33
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
34
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`.
|
|
35
|
+
If:obj:`None`, :obj:`NebiusConfig().as_dict()` will be used.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
api_key (Optional[str], optional): The API key for authenticating
|
|
38
|
+
with the Nebius AI Studio service. (default: :obj:`None`).
|
|
39
|
+
url (Optional[str], optional): The url to the Nebius AI Studio service.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
42
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
43
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
44
|
+
(default: :obj:`None`)
|
|
45
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
46
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
47
|
+
environment variable or default to 180 seconds.
|
|
48
|
+
(default: :obj:`None`)
|
|
49
|
+
max_retries (int, optional): Maximum number of retries for API calls.
|
|
50
|
+
(default: :obj:`3`)
|
|
51
|
+
**kwargs (Any): Additional arguments to pass to the client
|
|
52
|
+
initialization.
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
@api_keys_required([("api_key", "NEBIUS_API_KEY")])
|
|
56
|
+
def __init__(
|
|
57
|
+
self,
|
|
58
|
+
model_type: Union[ModelType, str],
|
|
59
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
60
|
+
api_key: Optional[str] = None,
|
|
61
|
+
url: Optional[str] = None,
|
|
62
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
|
+
timeout: Optional[float] = None,
|
|
64
|
+
max_retries: int = 3,
|
|
65
|
+
**kwargs: Any,
|
|
66
|
+
) -> None:
|
|
67
|
+
if model_config_dict is None:
|
|
68
|
+
model_config_dict = NebiusConfig().as_dict()
|
|
69
|
+
api_key = api_key or os.environ.get("NEBIUS_API_KEY")
|
|
70
|
+
url = url or os.environ.get(
|
|
71
|
+
"NEBIUS_API_BASE_URL", "https://api.studio.nebius.com/v1"
|
|
72
|
+
)
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
74
|
+
super().__init__(
|
|
75
|
+
model_type=model_type,
|
|
76
|
+
model_config_dict=model_config_dict,
|
|
77
|
+
api_key=api_key,
|
|
78
|
+
url=url,
|
|
79
|
+
token_counter=token_counter,
|
|
80
|
+
timeout=timeout,
|
|
81
|
+
max_retries=max_retries,
|
|
82
|
+
**kwargs,
|
|
83
|
+
)
|
camel/models/nemotron_model.py
CHANGED
camel/models/netmind_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import NetmindConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import (
|
|
@@ -87,18 +87,3 @@ class NetmindModel(OpenAICompatibleModel):
|
|
|
87
87
|
max_retries=max_retries,
|
|
88
88
|
**kwargs,
|
|
89
89
|
)
|
|
90
|
-
|
|
91
|
-
def check_model_config(self):
|
|
92
|
-
r"""Check whether the model configuration contains any
|
|
93
|
-
unexpected arguments to NETMIND API.
|
|
94
|
-
|
|
95
|
-
Raises:
|
|
96
|
-
ValueError: If the model configuration dictionary contains any
|
|
97
|
-
unexpected arguments to NETMIND API.
|
|
98
|
-
"""
|
|
99
|
-
for param in self.model_config_dict:
|
|
100
|
-
if param not in NETMIND_API_PARAMS:
|
|
101
|
-
raise ValueError(
|
|
102
|
-
f"Unexpected argument `{param}` is "
|
|
103
|
-
"input into NETMIND model backend."
|
|
104
|
-
)
|
camel/models/novita_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import NovitaConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import (
|
|
@@ -86,18 +86,3 @@ class NovitaModel(OpenAICompatibleModel):
|
|
|
86
86
|
max_retries=max_retries,
|
|
87
87
|
**kwargs,
|
|
88
88
|
)
|
|
89
|
-
|
|
90
|
-
def check_model_config(self):
|
|
91
|
-
r"""Check whether the model configuration contains any
|
|
92
|
-
unexpected arguments to Novita API.
|
|
93
|
-
|
|
94
|
-
Raises:
|
|
95
|
-
ValueError: If the model configuration dictionary contains any
|
|
96
|
-
unexpected arguments to Novita API.
|
|
97
|
-
"""
|
|
98
|
-
for param in self.model_config_dict:
|
|
99
|
-
if param not in NOVITA_API_PARAMS:
|
|
100
|
-
raise ValueError(
|
|
101
|
-
f"Unexpected argument `{param}` is "
|
|
102
|
-
"input into Novita model backend."
|
|
103
|
-
)
|
camel/models/nvidia_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import NvidiaConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import BaseTokenCounter, api_keys_required
|
|
@@ -82,18 +82,3 @@ class NvidiaModel(OpenAICompatibleModel):
|
|
|
82
82
|
max_retries=max_retries,
|
|
83
83
|
**kwargs,
|
|
84
84
|
)
|
|
85
|
-
|
|
86
|
-
def check_model_config(self):
|
|
87
|
-
r"""Check whether the model configuration contains any
|
|
88
|
-
unexpected arguments to NVIDIA API.
|
|
89
|
-
|
|
90
|
-
Raises:
|
|
91
|
-
ValueError: If the model configuration dictionary contains any
|
|
92
|
-
unexpected arguments to NVIDIA API.
|
|
93
|
-
"""
|
|
94
|
-
for param in self.model_config_dict:
|
|
95
|
-
if param not in NVIDIA_API_PARAMS:
|
|
96
|
-
raise ValueError(
|
|
97
|
-
f"Unexpected argument `{param}` is "
|
|
98
|
-
"input into NVIDIA model backend."
|
|
99
|
-
)
|
camel/models/ollama_model.py
CHANGED
|
@@ -15,7 +15,7 @@ import os
|
|
|
15
15
|
import subprocess
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import OllamaConfig
|
|
19
19
|
from camel.logger import get_logger
|
|
20
20
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
21
21
|
from camel.types import ModelType
|
|
@@ -35,8 +35,8 @@ class OllamaModel(OpenAICompatibleModel):
|
|
|
35
35
|
If:obj:`None`, :obj:`OllamaConfig().as_dict()` will be used.
|
|
36
36
|
(default: :obj:`None`)
|
|
37
37
|
api_key (Optional[str], optional): The API key for authenticating with
|
|
38
|
-
the model service.
|
|
39
|
-
|
|
38
|
+
the model service. Required for Ollama cloud services. If not
|
|
39
|
+
provided, defaults to "Not_Provided". (default: :obj:`None`)
|
|
40
40
|
url (Optional[str], optional): The url to the model service.
|
|
41
41
|
(default: :obj:`None`)
|
|
42
42
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
@@ -79,7 +79,7 @@ class OllamaModel(OpenAICompatibleModel):
|
|
|
79
79
|
super().__init__(
|
|
80
80
|
model_type=self._model_type,
|
|
81
81
|
model_config_dict=model_config_dict,
|
|
82
|
-
api_key="
|
|
82
|
+
api_key=api_key or "Not_Provided",
|
|
83
83
|
url=self._url,
|
|
84
84
|
token_counter=token_counter,
|
|
85
85
|
timeout=timeout,
|
|
@@ -102,18 +102,3 @@ class OllamaModel(OpenAICompatibleModel):
|
|
|
102
102
|
)
|
|
103
103
|
except Exception as e:
|
|
104
104
|
logger.error(f"Failed to start Ollama server: {e}.")
|
|
105
|
-
|
|
106
|
-
def check_model_config(self):
|
|
107
|
-
r"""Check whether the model configuration contains any
|
|
108
|
-
unexpected arguments to Ollama API.
|
|
109
|
-
|
|
110
|
-
Raises:
|
|
111
|
-
ValueError: If the model configuration dictionary contains any
|
|
112
|
-
unexpected arguments to OpenAI API.
|
|
113
|
-
"""
|
|
114
|
-
for param in self.model_config_dict:
|
|
115
|
-
if param not in OLLAMA_API_PARAMS:
|
|
116
|
-
raise ValueError(
|
|
117
|
-
f"Unexpected argument `{param}` is "
|
|
118
|
-
"input into Ollama model backend."
|
|
119
|
-
)
|
|
@@ -78,9 +78,21 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
78
78
|
(default: :obj:`None`)
|
|
79
79
|
max_retries (int, optional): Maximum number of retries for API calls.
|
|
80
80
|
(default: :obj:`3`)
|
|
81
|
+
client (Optional[Any], optional): A custom synchronous
|
|
82
|
+
OpenAI-compatible client instance. If provided, this client will
|
|
83
|
+
be used instead of creating a new one. Useful for RL frameworks
|
|
84
|
+
like AReaL or rLLM that provide OpenAI-compatible clients (e.g.,
|
|
85
|
+
ArealOpenAI). The client should implement the OpenAI client
|
|
86
|
+
interface with `.chat.completions.create()` and `.beta.chat.
|
|
87
|
+
completions.parse()` methods. (default: :obj:`None`)
|
|
88
|
+
async_client (Optional[Any], optional): A custom asynchronous
|
|
89
|
+
OpenAI-compatible client instance. If provided, this client will
|
|
90
|
+
be used instead of creating a new one. The client should implement
|
|
91
|
+
the AsyncOpenAI client interface. (default: :obj:`None`)
|
|
81
92
|
**kwargs (Any): Additional arguments to pass to the
|
|
82
93
|
OpenAI client initialization. These can include parameters like
|
|
83
94
|
'organization', 'default_headers', 'http_client', etc.
|
|
95
|
+
Ignored if custom clients are provided.
|
|
84
96
|
"""
|
|
85
97
|
|
|
86
98
|
def __init__(
|
|
@@ -92,6 +104,8 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
92
104
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
93
105
|
timeout: Optional[float] = None,
|
|
94
106
|
max_retries: int = 3,
|
|
107
|
+
client: Optional[Any] = None,
|
|
108
|
+
async_client: Optional[Any] = None,
|
|
95
109
|
**kwargs: Any,
|
|
96
110
|
) -> None:
|
|
97
111
|
api_key = api_key or os.environ.get("OPENAI_COMPATIBILITY_API_KEY")
|
|
@@ -107,39 +121,55 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
107
121
|
timeout,
|
|
108
122
|
max_retries,
|
|
109
123
|
)
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
self._client =
|
|
115
|
-
timeout=self._timeout,
|
|
116
|
-
max_retries=max_retries,
|
|
117
|
-
base_url=self._url,
|
|
118
|
-
api_key=self._api_key,
|
|
119
|
-
**kwargs,
|
|
120
|
-
)
|
|
121
|
-
self._async_client = LangfuseAsyncOpenAI(
|
|
122
|
-
timeout=self._timeout,
|
|
123
|
-
max_retries=max_retries,
|
|
124
|
-
base_url=self._url,
|
|
125
|
-
api_key=self._api_key,
|
|
126
|
-
**kwargs,
|
|
127
|
-
)
|
|
124
|
+
|
|
125
|
+
# Use custom clients if provided, otherwise create new ones
|
|
126
|
+
if client is not None:
|
|
127
|
+
# Use the provided custom sync client
|
|
128
|
+
self._client = client
|
|
128
129
|
else:
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
130
|
+
# Create default sync client
|
|
131
|
+
if is_langfuse_available():
|
|
132
|
+
from langfuse.openai import OpenAI as LangfuseOpenAI
|
|
133
|
+
|
|
134
|
+
self._client = LangfuseOpenAI(
|
|
135
|
+
timeout=self._timeout,
|
|
136
|
+
max_retries=max_retries,
|
|
137
|
+
base_url=self._url,
|
|
138
|
+
api_key=self._api_key,
|
|
139
|
+
**kwargs,
|
|
140
|
+
)
|
|
141
|
+
else:
|
|
142
|
+
self._client = OpenAI(
|
|
143
|
+
timeout=self._timeout,
|
|
144
|
+
max_retries=max_retries,
|
|
145
|
+
base_url=self._url,
|
|
146
|
+
api_key=self._api_key,
|
|
147
|
+
**kwargs,
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if async_client is not None:
|
|
151
|
+
# Use the provided custom async client
|
|
152
|
+
self._async_client = async_client
|
|
153
|
+
else:
|
|
154
|
+
# Create default async client
|
|
155
|
+
if is_langfuse_available():
|
|
156
|
+
from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
|
|
157
|
+
|
|
158
|
+
self._async_client = LangfuseAsyncOpenAI(
|
|
159
|
+
timeout=self._timeout,
|
|
160
|
+
max_retries=max_retries,
|
|
161
|
+
base_url=self._url,
|
|
162
|
+
api_key=self._api_key,
|
|
163
|
+
**kwargs,
|
|
164
|
+
)
|
|
165
|
+
else:
|
|
166
|
+
self._async_client = AsyncOpenAI(
|
|
167
|
+
timeout=self._timeout,
|
|
168
|
+
max_retries=max_retries,
|
|
169
|
+
base_url=self._url,
|
|
170
|
+
api_key=self._api_key,
|
|
171
|
+
**kwargs,
|
|
172
|
+
)
|
|
143
173
|
|
|
144
174
|
@observe()
|
|
145
175
|
def _run(
|
|
@@ -190,9 +220,6 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
190
220
|
is_streaming = self.model_config_dict.get("stream", False)
|
|
191
221
|
|
|
192
222
|
if response_format:
|
|
193
|
-
result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
|
|
194
|
-
self._request_parse(messages, response_format, tools)
|
|
195
|
-
)
|
|
196
223
|
if is_streaming:
|
|
197
224
|
# Use streaming parse for structured output
|
|
198
225
|
return self._request_stream_parse(
|
|
@@ -256,9 +283,6 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
256
283
|
is_streaming = self.model_config_dict.get("stream", False)
|
|
257
284
|
|
|
258
285
|
if response_format:
|
|
259
|
-
result: Union[
|
|
260
|
-
ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
261
|
-
] = await self._arequest_parse(messages, response_format, tools)
|
|
262
286
|
if is_streaming:
|
|
263
287
|
# Use streaming parse for structured output
|
|
264
288
|
return await self._arequest_stream_parse(
|
|
@@ -462,6 +486,3 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
462
486
|
bool: Whether the model is in stream mode.
|
|
463
487
|
"""
|
|
464
488
|
return self.model_config_dict.get('stream', False)
|
|
465
|
-
|
|
466
|
-
def check_model_config(self):
|
|
467
|
-
pass
|
camel/models/openai_model.py
CHANGED
|
@@ -22,7 +22,7 @@ from openai.lib.streaming.chat import (
|
|
|
22
22
|
)
|
|
23
23
|
from pydantic import BaseModel
|
|
24
24
|
|
|
25
|
-
from camel.configs import
|
|
25
|
+
from camel.configs import ChatGPTConfig
|
|
26
26
|
from camel.logger import get_logger
|
|
27
27
|
from camel.messages import OpenAIMessage
|
|
28
28
|
from camel.models import BaseModelBackend
|
|
@@ -90,9 +90,21 @@ class OpenAIModel(BaseModelBackend):
|
|
|
90
90
|
(default: :obj:`None`)
|
|
91
91
|
max_retries (int, optional): Maximum number of retries for API calls.
|
|
92
92
|
(default: :obj:`3`)
|
|
93
|
+
client (Optional[Any], optional): A custom synchronous OpenAI client
|
|
94
|
+
instance. If provided, this client will be used instead of
|
|
95
|
+
creating a new one. Useful for RL frameworks like AReaL or rLLM
|
|
96
|
+
that provide OpenAI-compatible clients. The client should
|
|
97
|
+
implement the OpenAI client interface with
|
|
98
|
+
`.chat.completions.create()` and `.beta.chat.completions.parse()`
|
|
99
|
+
methods. (default: :obj:`None`)
|
|
100
|
+
async_client (Optional[Any], optional): A custom asynchronous OpenAI
|
|
101
|
+
client instance. If provided, this client will be used instead of
|
|
102
|
+
creating a new one. The client should implement the AsyncOpenAI
|
|
103
|
+
client interface. (default: :obj:`None`)
|
|
93
104
|
**kwargs (Any): Additional arguments to pass to the
|
|
94
105
|
OpenAI client initialization. These can include parameters like
|
|
95
106
|
'organization', 'default_headers', 'http_client', etc.
|
|
107
|
+
Ignored if custom clients are provided.
|
|
96
108
|
"""
|
|
97
109
|
|
|
98
110
|
@api_keys_required(
|
|
@@ -109,6 +121,8 @@ class OpenAIModel(BaseModelBackend):
|
|
|
109
121
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
110
122
|
timeout: Optional[float] = None,
|
|
111
123
|
max_retries: int = 3,
|
|
124
|
+
client: Optional[Any] = None,
|
|
125
|
+
async_client: Optional[Any] = None,
|
|
112
126
|
**kwargs: Any,
|
|
113
127
|
) -> None:
|
|
114
128
|
if model_config_dict is None:
|
|
@@ -124,42 +138,54 @@ class OpenAIModel(BaseModelBackend):
|
|
|
124
138
|
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
125
139
|
)
|
|
126
140
|
|
|
127
|
-
if
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
# Create Langfuse client with base parameters and additional
|
|
132
|
-
# arguments
|
|
133
|
-
self._client = LangfuseOpenAI(
|
|
134
|
-
timeout=self._timeout,
|
|
135
|
-
max_retries=self._max_retries,
|
|
136
|
-
base_url=self._url,
|
|
137
|
-
api_key=self._api_key,
|
|
138
|
-
**kwargs,
|
|
139
|
-
)
|
|
140
|
-
self._async_client = LangfuseAsyncOpenAI(
|
|
141
|
-
timeout=self._timeout,
|
|
142
|
-
max_retries=self._max_retries,
|
|
143
|
-
base_url=self._url,
|
|
144
|
-
api_key=self._api_key,
|
|
145
|
-
**kwargs,
|
|
146
|
-
)
|
|
141
|
+
# Use custom clients if provided, otherwise create new ones
|
|
142
|
+
if client is not None:
|
|
143
|
+
# Use the provided custom sync client
|
|
144
|
+
self._client = client
|
|
147
145
|
else:
|
|
148
|
-
# Create
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
146
|
+
# Create default sync client
|
|
147
|
+
if is_langfuse_available():
|
|
148
|
+
from langfuse.openai import OpenAI as LangfuseOpenAI
|
|
149
|
+
|
|
150
|
+
self._client = LangfuseOpenAI(
|
|
151
|
+
timeout=self._timeout,
|
|
152
|
+
max_retries=self._max_retries,
|
|
153
|
+
base_url=self._url,
|
|
154
|
+
api_key=self._api_key,
|
|
155
|
+
**kwargs,
|
|
156
|
+
)
|
|
157
|
+
else:
|
|
158
|
+
self._client = OpenAI(
|
|
159
|
+
timeout=self._timeout,
|
|
160
|
+
max_retries=self._max_retries,
|
|
161
|
+
base_url=self._url,
|
|
162
|
+
api_key=self._api_key,
|
|
163
|
+
**kwargs,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
if async_client is not None:
|
|
167
|
+
# Use the provided custom async client
|
|
168
|
+
self._async_client = async_client
|
|
169
|
+
else:
|
|
170
|
+
# Create default async client
|
|
171
|
+
if is_langfuse_available():
|
|
172
|
+
from langfuse.openai import AsyncOpenAI as LangfuseAsyncOpenAI
|
|
173
|
+
|
|
174
|
+
self._async_client = LangfuseAsyncOpenAI(
|
|
175
|
+
timeout=self._timeout,
|
|
176
|
+
max_retries=self._max_retries,
|
|
177
|
+
base_url=self._url,
|
|
178
|
+
api_key=self._api_key,
|
|
179
|
+
**kwargs,
|
|
180
|
+
)
|
|
181
|
+
else:
|
|
182
|
+
self._async_client = AsyncOpenAI(
|
|
183
|
+
timeout=self._timeout,
|
|
184
|
+
max_retries=self._max_retries,
|
|
185
|
+
base_url=self._url,
|
|
186
|
+
api_key=self._api_key,
|
|
187
|
+
**kwargs,
|
|
188
|
+
)
|
|
163
189
|
|
|
164
190
|
def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|
165
191
|
r"""Sanitize the model configuration for O1 models."""
|
|
@@ -303,9 +329,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
303
329
|
is_streaming = self.model_config_dict.get("stream", False)
|
|
304
330
|
|
|
305
331
|
if response_format:
|
|
306
|
-
result: Union[ChatCompletion, Stream[ChatCompletionChunk]] = (
|
|
307
|
-
self._request_parse(messages, response_format, tools)
|
|
308
|
-
)
|
|
309
332
|
if is_streaming:
|
|
310
333
|
# Use streaming parse for structured output
|
|
311
334
|
return self._request_stream_parse(
|
|
@@ -377,9 +400,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
377
400
|
is_streaming = self.model_config_dict.get("stream", False)
|
|
378
401
|
|
|
379
402
|
if response_format:
|
|
380
|
-
result: Union[
|
|
381
|
-
ChatCompletion, AsyncStream[ChatCompletionChunk]
|
|
382
|
-
] = await self._arequest_parse(messages, response_format, tools)
|
|
383
403
|
if is_streaming:
|
|
384
404
|
# Use streaming parse for structured output
|
|
385
405
|
return await self._arequest_stream_parse(
|
|
@@ -545,21 +565,6 @@ class OpenAIModel(BaseModelBackend):
|
|
|
545
565
|
**request_config,
|
|
546
566
|
)
|
|
547
567
|
|
|
548
|
-
def check_model_config(self):
|
|
549
|
-
r"""Check whether the model configuration contains any
|
|
550
|
-
unexpected arguments to OpenAI API.
|
|
551
|
-
|
|
552
|
-
Raises:
|
|
553
|
-
ValueError: If the model configuration dictionary contains any
|
|
554
|
-
unexpected arguments to OpenAI API.
|
|
555
|
-
"""
|
|
556
|
-
for param in self.model_config_dict:
|
|
557
|
-
if param not in OPENAI_API_PARAMS:
|
|
558
|
-
raise ValueError(
|
|
559
|
-
f"Unexpected argument `{param}` is "
|
|
560
|
-
"input into OpenAI model backend."
|
|
561
|
-
)
|
|
562
|
-
|
|
563
568
|
@property
|
|
564
569
|
def stream(self) -> bool:
|
|
565
570
|
r"""Returns whether the model is in stream mode, which sends partial
|
camel/models/openrouter_model.py
CHANGED
|
@@ -14,7 +14,7 @@
|
|
|
14
14
|
import os
|
|
15
15
|
from typing import Any, Dict, Optional, Union
|
|
16
16
|
|
|
17
|
-
from camel.configs import
|
|
17
|
+
from camel.configs import OpenRouterConfig
|
|
18
18
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
19
19
|
from camel.types import ModelType
|
|
20
20
|
from camel.utils import (
|
|
@@ -81,19 +81,3 @@ class OpenRouterModel(OpenAICompatibleModel):
|
|
|
81
81
|
max_retries=max_retries,
|
|
82
82
|
**kwargs,
|
|
83
83
|
)
|
|
84
|
-
|
|
85
|
-
def check_model_config(self):
|
|
86
|
-
r"""Check whether the model configuration contains any unexpected
|
|
87
|
-
arguments to OpenRouter API. But OpenRouter API does not have any
|
|
88
|
-
additional arguments to check.
|
|
89
|
-
|
|
90
|
-
Raises:
|
|
91
|
-
ValueError: If the model configuration dictionary contains any
|
|
92
|
-
unexpected arguments to OpenRouter API.
|
|
93
|
-
"""
|
|
94
|
-
for param in self.model_config_dict:
|
|
95
|
-
if param not in OPENROUTER_API_PARAMS:
|
|
96
|
-
raise ValueError(
|
|
97
|
-
f"Unexpected argument `{param}` is "
|
|
98
|
-
"input into OpenRouter model backend."
|
|
99
|
-
)
|
camel/models/ppio_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import PPIOConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import (
|
|
@@ -86,18 +86,3 @@ class PPIOModel(OpenAICompatibleModel):
|
|
|
86
86
|
max_retries=max_retries,
|
|
87
87
|
**kwargs,
|
|
88
88
|
)
|
|
89
|
-
|
|
90
|
-
def check_model_config(self):
|
|
91
|
-
r"""Check whether the model configuration contains any
|
|
92
|
-
unexpected arguments to PPIO API.
|
|
93
|
-
|
|
94
|
-
Raises:
|
|
95
|
-
ValueError: If the model configuration dictionary contains any
|
|
96
|
-
unexpected arguments to PPIO API.
|
|
97
|
-
"""
|
|
98
|
-
for param in self.model_config_dict:
|
|
99
|
-
if param not in PPIO_API_PARAMS:
|
|
100
|
-
raise ValueError(
|
|
101
|
-
f"Unexpected argument `{param}` is "
|
|
102
|
-
"input into PPIO model backend."
|
|
103
|
-
)
|
camel/models/qianfan_model.py
CHANGED
|
@@ -15,7 +15,7 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
|
-
from camel.configs import
|
|
18
|
+
from camel.configs import QianfanConfig
|
|
19
19
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
20
|
from camel.types import ModelType
|
|
21
21
|
from camel.utils import (
|
|
@@ -87,18 +87,3 @@ class QianfanModel(OpenAICompatibleModel):
|
|
|
87
87
|
max_retries=max_retries,
|
|
88
88
|
**kwargs,
|
|
89
89
|
)
|
|
90
|
-
|
|
91
|
-
def check_model_config(self):
|
|
92
|
-
r"""Check whether the model configuration contains any
|
|
93
|
-
unexpected arguments to Qianfan API.
|
|
94
|
-
|
|
95
|
-
Raises:
|
|
96
|
-
ValueError: If the model configuration dictionary contains any
|
|
97
|
-
unexpected arguments to Qianfan API.
|
|
98
|
-
"""
|
|
99
|
-
for param in self.model_config_dict:
|
|
100
|
-
if param not in QIANFAN_API_PARAMS:
|
|
101
|
-
raise ValueError(
|
|
102
|
-
f"Unexpected argument `{param}` is "
|
|
103
|
-
"input into QIANFAN model backend."
|
|
104
|
-
)
|
camel/models/qwen_model.py
CHANGED
|
@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Union
|
|
|
18
18
|
|
|
19
19
|
from openai import AsyncStream, Stream
|
|
20
20
|
|
|
21
|
-
from camel.configs import
|
|
21
|
+
from camel.configs import QwenConfig
|
|
22
22
|
from camel.messages import OpenAIMessage
|
|
23
23
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
24
24
|
from camel.types import (
|
|
@@ -259,18 +259,3 @@ class QwenModel(OpenAICompatibleModel):
|
|
|
259
259
|
**request_config,
|
|
260
260
|
)
|
|
261
261
|
return self._post_handle_response(response)
|
|
262
|
-
|
|
263
|
-
def check_model_config(self):
|
|
264
|
-
r"""Check whether the model configuration contains any
|
|
265
|
-
unexpected arguments to Qwen API.
|
|
266
|
-
|
|
267
|
-
Raises:
|
|
268
|
-
ValueError: If the model configuration dictionary contains any
|
|
269
|
-
unexpected arguments to Qwen API.
|
|
270
|
-
"""
|
|
271
|
-
for param in self.model_config_dict:
|
|
272
|
-
if param not in QWEN_API_PARAMS:
|
|
273
|
-
raise ValueError(
|
|
274
|
-
f"Unexpected argument `{param}` is "
|
|
275
|
-
"input into Qwen model backend."
|
|
276
|
-
)
|