camel-ai 0.2.37__py3-none-any.whl → 0.2.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +4 -0
- camel/agents/repo_agent.py +2 -2
- camel/benchmarks/apibank.py +1 -1
- camel/benchmarks/apibench.py +1 -1
- camel/configs/__init__.py +3 -0
- camel/configs/modelscope_config.py +59 -0
- camel/datagen/evol_instruct/__init__.py +20 -0
- camel/datagen/evol_instruct/evol_instruct.py +424 -0
- camel/datagen/evol_instruct/scorer.py +166 -0
- camel/datagen/evol_instruct/templates.py +268 -0
- camel/datagen/self_improving_cot.py +1 -1
- camel/datasets/__init__.py +2 -0
- camel/datasets/base_generator.py +22 -9
- camel/datasets/few_shot_generator.py +2 -3
- camel/datasets/self_instruct_generator.py +415 -0
- camel/embeddings/openai_compatible_embedding.py +13 -5
- camel/environments/models.py +10 -4
- camel/environments/single_step.py +181 -41
- camel/interpreters/docker_interpreter.py +2 -2
- camel/interpreters/e2b_interpreter.py +1 -1
- camel/interpreters/internal_python_interpreter.py +1 -1
- camel/interpreters/subprocess_interpreter.py +1 -1
- camel/loaders/__init__.py +2 -2
- camel/loaders/{panda_reader.py → pandas_reader.py} +61 -30
- camel/loaders/unstructured_io.py +2 -1
- camel/memories/blocks/chat_history_block.py +1 -1
- camel/memories/context_creators/score_based.py +198 -67
- camel/models/__init__.py +2 -0
- camel/models/aiml_model.py +9 -3
- camel/models/anthropic_model.py +11 -3
- camel/models/azure_openai_model.py +9 -3
- camel/models/base_audio_model.py +6 -0
- camel/models/base_model.py +4 -0
- camel/models/deepseek_model.py +9 -3
- camel/models/gemini_model.py +9 -3
- camel/models/groq_model.py +9 -3
- camel/models/internlm_model.py +8 -2
- camel/models/model_factory.py +123 -0
- camel/models/modelscope_model.py +208 -0
- camel/models/moonshot_model.py +8 -2
- camel/models/nemotron_model.py +9 -3
- camel/models/nvidia_model.py +9 -3
- camel/models/ollama_model.py +9 -3
- camel/models/openai_audio_models.py +7 -5
- camel/models/openai_compatible_model.py +9 -3
- camel/models/openai_model.py +58 -5
- camel/models/openrouter_model.py +9 -3
- camel/models/qwen_model.py +9 -3
- camel/models/samba_model.py +9 -3
- camel/models/sglang_model.py +11 -4
- camel/models/siliconflow_model.py +8 -2
- camel/models/stub_model.py +2 -1
- camel/models/togetherai_model.py +11 -5
- camel/models/vllm_model.py +10 -4
- camel/models/yi_model.py +9 -3
- camel/models/zhipuai_model.py +11 -5
- camel/retrievers/auto_retriever.py +14 -0
- camel/retrievers/vector_retriever.py +1 -1
- camel/storages/__init__.py +2 -0
- camel/storages/graph_storages/neo4j_graph.py +1 -1
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/base.py +2 -2
- camel/storages/vectordb_storages/milvus.py +2 -2
- camel/storages/vectordb_storages/qdrant.py +2 -2
- camel/storages/vectordb_storages/tidb.py +332 -0
- camel/tasks/task.py +2 -2
- camel/toolkits/__init__.py +9 -1
- camel/toolkits/arxiv_toolkit.py +2 -1
- camel/toolkits/ask_news_toolkit.py +11 -3
- camel/toolkits/audio_analysis_toolkit.py +2 -0
- camel/toolkits/base.py +3 -0
- camel/toolkits/browser_toolkit.py +84 -61
- camel/toolkits/code_execution.py +3 -1
- camel/toolkits/dappier_toolkit.py +2 -1
- camel/toolkits/data_commons_toolkit.py +2 -0
- camel/toolkits/excel_toolkit.py +2 -0
- camel/toolkits/file_write_toolkit.py +2 -0
- camel/toolkits/github_toolkit.py +6 -4
- camel/toolkits/google_scholar_toolkit.py +2 -0
- camel/toolkits/human_toolkit.py +17 -1
- camel/toolkits/image_analysis_toolkit.py +2 -0
- camel/toolkits/linkedin_toolkit.py +2 -1
- camel/toolkits/math_toolkit.py +2 -0
- camel/toolkits/mcp_toolkit.py +42 -52
- camel/toolkits/meshy_toolkit.py +20 -2
- camel/toolkits/networkx_toolkit.py +2 -0
- camel/toolkits/notion_toolkit.py +7 -0
- camel/toolkits/openai_agent_toolkit.py +131 -0
- camel/toolkits/openbb_toolkit.py +2 -1
- camel/toolkits/pubmed_toolkit.py +2 -0
- camel/toolkits/reddit_toolkit.py +2 -1
- camel/toolkits/retrieval_toolkit.py +2 -1
- camel/toolkits/search_toolkit.py +2 -1
- camel/toolkits/searxng_toolkit.py +207 -0
- camel/toolkits/semantic_scholar_toolkit.py +2 -0
- camel/toolkits/slack_toolkit.py +2 -0
- camel/toolkits/stripe_toolkit.py +2 -1
- camel/toolkits/sympy_toolkit.py +2 -0
- camel/toolkits/terminal_toolkit.py +2 -0
- camel/toolkits/thinking_toolkit.py +168 -12
- camel/toolkits/twitter_toolkit.py +2 -1
- camel/toolkits/video_analysis_toolkit.py +2 -1
- camel/toolkits/video_download_toolkit.py +2 -1
- camel/toolkits/weather_toolkit.py +2 -0
- camel/toolkits/whatsapp_toolkit.py +2 -1
- camel/toolkits/zapier_toolkit.py +2 -1
- camel/types/enums.py +66 -0
- camel/types/unified_model_type.py +5 -0
- camel/utils/__init__.py +2 -0
- camel/utils/chunker/code_chunker.py +9 -9
- camel/utils/commons.py +50 -30
- camel/utils/constants.py +2 -2
- camel/utils/mcp.py +79 -0
- camel/verifiers/__init__.py +2 -0
- camel/verifiers/base.py +15 -15
- camel/verifiers/math_verifier.py +182 -0
- camel/verifiers/python_verifier.py +28 -28
- {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/METADATA +54 -4
- {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/RECORD +122 -110
- {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.37.dist-info → camel_ai-0.2.39.dist-info}/licenses/LICENSE +0 -0
camel/models/model_factory.py
CHANGED
|
@@ -11,8 +11,11 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import json
|
|
14
15
|
from typing import Dict, Optional, Type, Union
|
|
15
16
|
|
|
17
|
+
import yaml
|
|
18
|
+
|
|
16
19
|
from camel.models.aiml_model import AIMLModel
|
|
17
20
|
from camel.models.anthropic_model import AnthropicModel
|
|
18
21
|
from camel.models.azure_openai_model import AzureOpenAIModel
|
|
@@ -24,6 +27,7 @@ from camel.models.groq_model import GroqModel
|
|
|
24
27
|
from camel.models.internlm_model import InternLMModel
|
|
25
28
|
from camel.models.litellm_model import LiteLLMModel
|
|
26
29
|
from camel.models.mistral_model import MistralModel
|
|
30
|
+
from camel.models.modelscope_model import ModelScopeModel
|
|
27
31
|
from camel.models.moonshot_model import MoonshotModel
|
|
28
32
|
from camel.models.nvidia_model import NvidiaModel
|
|
29
33
|
from camel.models.ollama_model import OllamaModel
|
|
@@ -60,6 +64,7 @@ class ModelFactory:
|
|
|
60
64
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
61
65
|
api_key: Optional[str] = None,
|
|
62
66
|
url: Optional[str] = None,
|
|
67
|
+
timeout: Optional[int] = None,
|
|
63
68
|
) -> BaseModelBackend:
|
|
64
69
|
r"""Creates an instance of `BaseModelBackend` of the specified type.
|
|
65
70
|
|
|
@@ -79,6 +84,8 @@ class ModelFactory:
|
|
|
79
84
|
with the model service. (default: :obj:`None`)
|
|
80
85
|
url (Optional[str], optional): The url to the model service.
|
|
81
86
|
(default: :obj:`None`)
|
|
87
|
+
timeout (Optional[float], optional): The timeout value in seconds
|
|
88
|
+
for API calls. (default: :obj:`None`)
|
|
82
89
|
|
|
83
90
|
Returns:
|
|
84
91
|
BaseModelBackend: The initialized backend.
|
|
@@ -142,6 +149,8 @@ class ModelFactory:
|
|
|
142
149
|
model_class = InternLMModel
|
|
143
150
|
elif model_platform.is_moonshot and model_type.is_moonshot:
|
|
144
151
|
model_class = MoonshotModel
|
|
152
|
+
elif model_platform.is_modelscope:
|
|
153
|
+
model_class = ModelScopeModel
|
|
145
154
|
elif model_type == ModelType.STUB:
|
|
146
155
|
model_class = StubModel
|
|
147
156
|
|
|
@@ -157,4 +166,118 @@ class ModelFactory:
|
|
|
157
166
|
api_key=api_key,
|
|
158
167
|
url=url,
|
|
159
168
|
token_counter=token_counter,
|
|
169
|
+
timeout=timeout,
|
|
160
170
|
)
|
|
171
|
+
|
|
172
|
+
@classmethod
|
|
173
|
+
def __parse_model_platform(
|
|
174
|
+
cls, model_platform_str: str
|
|
175
|
+
) -> ModelPlatformType:
|
|
176
|
+
r"""Parses a string and returns the corresponding ModelPlatformType
|
|
177
|
+
enum.
|
|
178
|
+
|
|
179
|
+
Args:
|
|
180
|
+
model_platform_str (str): The platform name as a string. Can be in
|
|
181
|
+
the form "ModelPlatformType.<NAME>" or simply "<NAME>".
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
ModelPlatformType: The matching enum value.
|
|
185
|
+
|
|
186
|
+
Raises:
|
|
187
|
+
ValueError: If the platform name is not a valid member of
|
|
188
|
+
ModelPlatformType.
|
|
189
|
+
"""
|
|
190
|
+
|
|
191
|
+
try:
|
|
192
|
+
if model_platform_str.startswith("ModelPlatformType."):
|
|
193
|
+
platform_name = model_platform_str.split('.')[-1]
|
|
194
|
+
else:
|
|
195
|
+
platform_name = model_platform_str.upper()
|
|
196
|
+
|
|
197
|
+
if platform_name not in ModelPlatformType.__members__:
|
|
198
|
+
raise ValueError(
|
|
199
|
+
f"Invalid model platform: {platform_name}. "
|
|
200
|
+
f"Valid options: "
|
|
201
|
+
f"{', '.join(ModelPlatformType.__members__.keys())}"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
return ModelPlatformType[platform_name]
|
|
205
|
+
|
|
206
|
+
except KeyError:
|
|
207
|
+
raise KeyError(f"Invalid model platform: {model_platform_str}")
|
|
208
|
+
|
|
209
|
+
@classmethod
|
|
210
|
+
def __load_yaml(cls, filepath: str) -> Dict:
|
|
211
|
+
r"""Loads and parses a YAML file into a dictionary.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
filepath (str): Path to the YAML configuration file.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
Dict: The parsed YAML content as a dictionary.
|
|
218
|
+
"""
|
|
219
|
+
with open(filepath, 'r') as file:
|
|
220
|
+
config = yaml.safe_load(file)
|
|
221
|
+
|
|
222
|
+
return config
|
|
223
|
+
|
|
224
|
+
@classmethod
|
|
225
|
+
def __load_json(cls, filepath: str) -> Dict:
|
|
226
|
+
r"""Loads and parses a JSON file into a dictionary.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
filepath (str): Path to the JSON configuration file.
|
|
230
|
+
|
|
231
|
+
Returns:
|
|
232
|
+
Dict: The parsed JSON content as a dictionary.
|
|
233
|
+
"""
|
|
234
|
+
with open(filepath, 'r') as file:
|
|
235
|
+
config = json.load(file)
|
|
236
|
+
|
|
237
|
+
return config
|
|
238
|
+
|
|
239
|
+
@classmethod
|
|
240
|
+
def create_from_yaml(cls, filepath: str) -> BaseModelBackend:
|
|
241
|
+
r"""Creates and returns a model base backend instance
|
|
242
|
+
from a YAML configuration file.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
filepath (str): Path to the YAML file containing model
|
|
246
|
+
configuration.
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
BaseModelBackend: An instance of the model backend based on the
|
|
250
|
+
configuration.
|
|
251
|
+
"""
|
|
252
|
+
|
|
253
|
+
config = cls.__load_yaml(filepath)
|
|
254
|
+
config["model_platform"] = cls.__parse_model_platform(
|
|
255
|
+
config["model_platform"]
|
|
256
|
+
)
|
|
257
|
+
|
|
258
|
+
model = ModelFactory.create(**config)
|
|
259
|
+
|
|
260
|
+
return model
|
|
261
|
+
|
|
262
|
+
@classmethod
|
|
263
|
+
def create_from_json(cls, filepath: str) -> BaseModelBackend:
|
|
264
|
+
r"""Creates and returns a base model backend instance
|
|
265
|
+
from a JSON configuration file.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
filepath (str): Path to the JSON file containing model
|
|
269
|
+
configuration.
|
|
270
|
+
|
|
271
|
+
Returns:
|
|
272
|
+
BaseModelBackend: An instance of the model backend based on the
|
|
273
|
+
configuration.
|
|
274
|
+
"""
|
|
275
|
+
|
|
276
|
+
config = cls.__load_json(filepath)
|
|
277
|
+
config["model_platform"] = cls.__parse_model_platform(
|
|
278
|
+
config["model_platform"]
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
model = ModelFactory.create(**config)
|
|
282
|
+
|
|
283
|
+
return model
|
|
@@ -0,0 +1,208 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
|
+
|
|
18
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
19
|
+
from pydantic import BaseModel
|
|
20
|
+
|
|
21
|
+
from camel.configs import MODELSCOPE_API_PARAMS, ModelScopeConfig
|
|
22
|
+
from camel.messages import OpenAIMessage
|
|
23
|
+
from camel.models import BaseModelBackend
|
|
24
|
+
from camel.models._utils import try_modify_message_with_format
|
|
25
|
+
from camel.types import (
|
|
26
|
+
ChatCompletion,
|
|
27
|
+
ChatCompletionChunk,
|
|
28
|
+
ModelType,
|
|
29
|
+
)
|
|
30
|
+
from camel.utils import (
|
|
31
|
+
BaseTokenCounter,
|
|
32
|
+
OpenAITokenCounter,
|
|
33
|
+
api_keys_required,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ModelScopeModel(BaseModelBackend):
|
|
38
|
+
r"""ModelScope API in a unified BaseModelBackend interface.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
42
|
+
created, one of ModelScope series.
|
|
43
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
44
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
45
|
+
:obj:`None`, :obj:`ModelScopeConfig().as_dict()` will be used.
|
|
46
|
+
(default: :obj:`None`)
|
|
47
|
+
api_key (Optional[str], optional): The MODELSCOPE_SDK_TOKEN for
|
|
48
|
+
authenticating with the ModelScope service. (default: :obj:`None`)
|
|
49
|
+
refer to the following link for more details:
|
|
50
|
+
https://modelscope.cn/my/myaccesstoken
|
|
51
|
+
url (Optional[str], optional): The url to the ModelScope service.
|
|
52
|
+
(default: :obj:`https://api-inference.modelscope.cn/v1/`)
|
|
53
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
54
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
55
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
56
|
+
(default: :obj:`None`)
|
|
57
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
58
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
59
|
+
environment variable or default to 180 seconds.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
@api_keys_required(
|
|
64
|
+
[
|
|
65
|
+
("api_key", 'MODELSCOPE_SDK_TOKEN'),
|
|
66
|
+
]
|
|
67
|
+
)
|
|
68
|
+
def __init__(
|
|
69
|
+
self,
|
|
70
|
+
model_type: Union[ModelType, str],
|
|
71
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
72
|
+
api_key: Optional[str] = None,
|
|
73
|
+
url: Optional[str] = None,
|
|
74
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
75
|
+
timeout: Optional[float] = None,
|
|
76
|
+
) -> None:
|
|
77
|
+
if model_config_dict is None:
|
|
78
|
+
model_config_dict = ModelScopeConfig().as_dict()
|
|
79
|
+
api_key = api_key or os.environ.get("MODELSCOPE_SDK_TOKEN")
|
|
80
|
+
url = url or os.environ.get(
|
|
81
|
+
"MODELSCOPE_API_BASE_URL",
|
|
82
|
+
"https://api-inference.modelscope.cn/v1/",
|
|
83
|
+
)
|
|
84
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
85
|
+
super().__init__(
|
|
86
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
87
|
+
)
|
|
88
|
+
self._client = OpenAI(
|
|
89
|
+
timeout=self._timeout,
|
|
90
|
+
max_retries=3,
|
|
91
|
+
api_key=self._api_key,
|
|
92
|
+
base_url=self._url,
|
|
93
|
+
)
|
|
94
|
+
self._async_client = AsyncOpenAI(
|
|
95
|
+
timeout=self._timeout,
|
|
96
|
+
max_retries=3,
|
|
97
|
+
api_key=self._api_key,
|
|
98
|
+
base_url=self._url,
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
async def _arun(
|
|
102
|
+
self,
|
|
103
|
+
messages: List[OpenAIMessage],
|
|
104
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
105
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
106
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
107
|
+
r"""Runs inference of ModelScope chat completion.
|
|
108
|
+
|
|
109
|
+
Args:
|
|
110
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
111
|
+
in OpenAI API format.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
115
|
+
`ChatCompletion` in the non-stream mode, or
|
|
116
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
117
|
+
"""
|
|
118
|
+
request_config = self._prepare_request(
|
|
119
|
+
messages, response_format, tools
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
response = await self._async_client.chat.completions.create(
|
|
123
|
+
messages=messages,
|
|
124
|
+
model=self.model_type,
|
|
125
|
+
**request_config,
|
|
126
|
+
)
|
|
127
|
+
return response
|
|
128
|
+
|
|
129
|
+
def _run(
|
|
130
|
+
self,
|
|
131
|
+
messages: List[OpenAIMessage],
|
|
132
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
133
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
134
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
135
|
+
r"""Runs inference of ModelScope chat completion.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
139
|
+
in OpenAI API format.
|
|
140
|
+
|
|
141
|
+
Returns:
|
|
142
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
143
|
+
`ChatCompletion` in the non-stream mode, or
|
|
144
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
145
|
+
"""
|
|
146
|
+
request_config = self._prepare_request(
|
|
147
|
+
messages, response_format, tools
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
response = self._client.chat.completions.create(
|
|
151
|
+
messages=messages,
|
|
152
|
+
model=self.model_type,
|
|
153
|
+
**request_config,
|
|
154
|
+
)
|
|
155
|
+
return response
|
|
156
|
+
|
|
157
|
+
def _prepare_request(
|
|
158
|
+
self,
|
|
159
|
+
messages: List[OpenAIMessage],
|
|
160
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
161
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
162
|
+
) -> Dict[str, Any]:
|
|
163
|
+
request_config = self.model_config_dict.copy()
|
|
164
|
+
if tools:
|
|
165
|
+
request_config["tools"] = tools
|
|
166
|
+
elif response_format:
|
|
167
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
168
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
169
|
+
|
|
170
|
+
return request_config
|
|
171
|
+
|
|
172
|
+
@property
|
|
173
|
+
def token_counter(self) -> BaseTokenCounter:
|
|
174
|
+
r"""Initialize the token counter for the model backend.
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
OpenAITokenCounter: The token counter following the model's
|
|
178
|
+
tokenization style.
|
|
179
|
+
"""
|
|
180
|
+
|
|
181
|
+
if not self._token_counter:
|
|
182
|
+
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
183
|
+
return self._token_counter
|
|
184
|
+
|
|
185
|
+
def check_model_config(self):
|
|
186
|
+
r"""Check whether the model configuration contains any
|
|
187
|
+
unexpected arguments to ModelScope API.
|
|
188
|
+
|
|
189
|
+
Raises:
|
|
190
|
+
ValueError: If the model configuration dictionary contains any
|
|
191
|
+
unexpected arguments to ModelScope API.
|
|
192
|
+
"""
|
|
193
|
+
for param in self.model_config_dict:
|
|
194
|
+
if param not in MODELSCOPE_API_PARAMS:
|
|
195
|
+
raise ValueError(
|
|
196
|
+
f"Unexpected argument `{param}` is "
|
|
197
|
+
"input into ModelScope model backend."
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
@property
|
|
201
|
+
def stream(self) -> bool:
|
|
202
|
+
r"""Returns whether the model is in stream mode, which sends partial
|
|
203
|
+
results each time.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
bool: Whether the model is in stream mode.
|
|
207
|
+
"""
|
|
208
|
+
return self.model_config_dict.get('stream', False)
|
camel/models/moonshot_model.py
CHANGED
|
@@ -52,6 +52,10 @@ class MoonshotModel(BaseModelBackend):
|
|
|
52
52
|
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
53
53
|
ModelType.GPT_4)` will be used.
|
|
54
54
|
(default: :obj:`None`)
|
|
55
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
56
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
57
|
+
environment variable or default to 180 seconds.
|
|
58
|
+
(default: :obj:`None`)
|
|
55
59
|
"""
|
|
56
60
|
|
|
57
61
|
@api_keys_required([("api_key", "MOONSHOT_API_KEY")])
|
|
@@ -62,6 +66,7 @@ class MoonshotModel(BaseModelBackend):
|
|
|
62
66
|
api_key: Optional[str] = None,
|
|
63
67
|
url: Optional[str] = None,
|
|
64
68
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
69
|
+
timeout: Optional[float] = None,
|
|
65
70
|
) -> None:
|
|
66
71
|
if model_config_dict is None:
|
|
67
72
|
model_config_dict = MoonshotConfig().as_dict()
|
|
@@ -70,12 +75,13 @@ class MoonshotModel(BaseModelBackend):
|
|
|
70
75
|
"MOONSHOT_API_BASE_URL",
|
|
71
76
|
"https://api.moonshot.cn/v1",
|
|
72
77
|
)
|
|
78
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
73
79
|
super().__init__(
|
|
74
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
80
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
75
81
|
)
|
|
76
82
|
self._client = OpenAI(
|
|
77
83
|
api_key=self._api_key,
|
|
78
|
-
timeout=
|
|
84
|
+
timeout=self._timeout,
|
|
79
85
|
max_retries=3,
|
|
80
86
|
base_url=self._url,
|
|
81
87
|
)
|
camel/models/nemotron_model.py
CHANGED
|
@@ -36,6 +36,10 @@ class NemotronModel(BaseModelBackend):
|
|
|
36
36
|
the Nvidia service. (default: :obj:`None`)
|
|
37
37
|
url (Optional[str], optional): The url to the Nvidia service.
|
|
38
38
|
(default: :obj:`https://integrate.api.nvidia.com/v1`)
|
|
39
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
40
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
41
|
+
environment variable or default to 180 seconds.
|
|
42
|
+
(default: :obj:`None`)
|
|
39
43
|
|
|
40
44
|
Notes:
|
|
41
45
|
Nemotron model doesn't support additional model config like OpenAI.
|
|
@@ -51,20 +55,22 @@ class NemotronModel(BaseModelBackend):
|
|
|
51
55
|
model_type: Union[ModelType, str],
|
|
52
56
|
api_key: Optional[str] = None,
|
|
53
57
|
url: Optional[str] = None,
|
|
58
|
+
timeout: Optional[float] = None,
|
|
54
59
|
) -> None:
|
|
55
60
|
url = url or os.environ.get(
|
|
56
61
|
"NVIDIA_API_BASE_URL", "https://integrate.api.nvidia.com/v1"
|
|
57
62
|
)
|
|
58
63
|
api_key = api_key or os.environ.get("NVIDIA_API_KEY")
|
|
59
|
-
|
|
64
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
65
|
+
super().__init__(model_type, {}, api_key, url, None, timeout)
|
|
60
66
|
self._client = OpenAI(
|
|
61
|
-
timeout=
|
|
67
|
+
timeout=self._timeout,
|
|
62
68
|
max_retries=3,
|
|
63
69
|
base_url=self._url,
|
|
64
70
|
api_key=self._api_key,
|
|
65
71
|
)
|
|
66
72
|
self._async_client = AsyncOpenAI(
|
|
67
|
-
timeout=
|
|
73
|
+
timeout=self._timeout,
|
|
68
74
|
max_retries=3,
|
|
69
75
|
base_url=self._url,
|
|
70
76
|
api_key=self._api_key,
|
camel/models/nvidia_model.py
CHANGED
|
@@ -47,6 +47,10 @@ class NvidiaModel(BaseModelBackend):
|
|
|
47
47
|
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
48
48
|
ModelType.GPT_4)` will be used.
|
|
49
49
|
(default: :obj:`None`)
|
|
50
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
51
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
52
|
+
environment variable or default to 180 seconds.
|
|
53
|
+
(default: :obj:`None`)
|
|
50
54
|
"""
|
|
51
55
|
|
|
52
56
|
@api_keys_required(
|
|
@@ -61,6 +65,7 @@ class NvidiaModel(BaseModelBackend):
|
|
|
61
65
|
api_key: Optional[str] = None,
|
|
62
66
|
url: Optional[str] = None,
|
|
63
67
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
68
|
+
timeout: Optional[float] = None,
|
|
64
69
|
) -> None:
|
|
65
70
|
if model_config_dict is None:
|
|
66
71
|
model_config_dict = NvidiaConfig().as_dict()
|
|
@@ -68,17 +73,18 @@ class NvidiaModel(BaseModelBackend):
|
|
|
68
73
|
url = url or os.environ.get(
|
|
69
74
|
"NVIDIA_API_BASE_URL", "https://integrate.api.nvidia.com/v1"
|
|
70
75
|
)
|
|
76
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
71
77
|
super().__init__(
|
|
72
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
78
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
73
79
|
)
|
|
74
80
|
self._client = OpenAI(
|
|
75
|
-
timeout=
|
|
81
|
+
timeout=self._timeout,
|
|
76
82
|
max_retries=3,
|
|
77
83
|
api_key=self._api_key,
|
|
78
84
|
base_url=self._url,
|
|
79
85
|
)
|
|
80
86
|
self._async_client = AsyncOpenAI(
|
|
81
|
-
timeout=
|
|
87
|
+
timeout=self._timeout,
|
|
82
88
|
max_retries=3,
|
|
83
89
|
api_key=self._api_key,
|
|
84
90
|
base_url=self._url,
|
camel/models/ollama_model.py
CHANGED
|
@@ -49,6 +49,10 @@ class OllamaModel(BaseModelBackend):
|
|
|
49
49
|
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
50
50
|
ModelType.GPT_4O_MINI)` will be used.
|
|
51
51
|
(default: :obj:`None`)
|
|
52
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
53
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
54
|
+
environment variable or default to 180 seconds.
|
|
55
|
+
(default: :obj:`None`)
|
|
52
56
|
|
|
53
57
|
References:
|
|
54
58
|
https://github.com/ollama/ollama/blob/main/docs/openai.md
|
|
@@ -61,24 +65,26 @@ class OllamaModel(BaseModelBackend):
|
|
|
61
65
|
api_key: Optional[str] = None,
|
|
62
66
|
url: Optional[str] = None,
|
|
63
67
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
68
|
+
timeout: Optional[float] = None,
|
|
64
69
|
) -> None:
|
|
65
70
|
if model_config_dict is None:
|
|
66
71
|
model_config_dict = OllamaConfig().as_dict()
|
|
67
72
|
url = url or os.environ.get("OLLAMA_BASE_URL")
|
|
73
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
68
74
|
super().__init__(
|
|
69
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
75
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
70
76
|
)
|
|
71
77
|
if not self._url:
|
|
72
78
|
self._start_server()
|
|
73
79
|
# Use OpenAI client as interface call Ollama
|
|
74
80
|
self._client = OpenAI(
|
|
75
|
-
timeout=
|
|
81
|
+
timeout=self._timeout,
|
|
76
82
|
max_retries=3,
|
|
77
83
|
api_key="Set-but-ignored", # required but ignored
|
|
78
84
|
base_url=self._url,
|
|
79
85
|
)
|
|
80
86
|
self._async_client = AsyncOpenAI(
|
|
81
|
-
timeout=
|
|
87
|
+
timeout=self._timeout,
|
|
82
88
|
max_retries=3,
|
|
83
89
|
api_key="Set-but-ignored", # required but ignored
|
|
84
90
|
base_url=self._url,
|
|
@@ -29,19 +29,21 @@ class OpenAIAudioModels(BaseAudioModel):
|
|
|
29
29
|
self,
|
|
30
30
|
api_key: Optional[str] = None,
|
|
31
31
|
url: Optional[str] = None,
|
|
32
|
+
timeout: Optional[float] = None,
|
|
32
33
|
) -> None:
|
|
33
34
|
r"""Initialize an instance of OpenAI."""
|
|
34
|
-
super().__init__(api_key, url)
|
|
35
|
+
super().__init__(api_key, url, timeout)
|
|
35
36
|
self._url = url or os.environ.get("OPENAI_API_BASE_URL")
|
|
36
37
|
self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
|
|
38
|
+
self._timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
37
39
|
self._client = OpenAI(
|
|
38
|
-
timeout=
|
|
40
|
+
timeout=self._timeout,
|
|
39
41
|
max_retries=3,
|
|
40
42
|
base_url=self._url,
|
|
41
43
|
api_key=self._api_key,
|
|
42
44
|
)
|
|
43
45
|
self._async_client = AsyncOpenAI(
|
|
44
|
-
timeout=
|
|
46
|
+
timeout=self._timeout,
|
|
45
47
|
max_retries=3,
|
|
46
48
|
base_url=self._url,
|
|
47
49
|
api_key=self._api_key,
|
|
@@ -76,8 +78,8 @@ class OpenAIAudioModels(BaseAudioModel):
|
|
|
76
78
|
Returns:
|
|
77
79
|
Union[List[_legacy_response.HttpxBinaryResponseContent],
|
|
78
80
|
_legacy_response.HttpxBinaryResponseContent]: List of response
|
|
79
|
-
content object from OpenAI if input
|
|
80
|
-
single response content if input
|
|
81
|
+
content object from OpenAI if input characters more than 4096,
|
|
82
|
+
single response content if input characters less than 4096.
|
|
81
83
|
|
|
82
84
|
Raises:
|
|
83
85
|
Exception: If there's an error during the TTS API call.
|
|
@@ -46,6 +46,10 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
46
46
|
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
47
47
|
ModelType.GPT_4O_MINI)` will be used.
|
|
48
48
|
(default: :obj:`None`)
|
|
49
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
50
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
51
|
+
environment variable or default to 180 seconds.
|
|
52
|
+
(default: :obj:`None`)
|
|
49
53
|
"""
|
|
50
54
|
|
|
51
55
|
def __init__(
|
|
@@ -55,21 +59,23 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
55
59
|
api_key: Optional[str] = None,
|
|
56
60
|
url: Optional[str] = None,
|
|
57
61
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
62
|
+
timeout: Optional[float] = None,
|
|
58
63
|
) -> None:
|
|
59
64
|
api_key = api_key or os.environ.get("OPENAI_COMPATIBILITY_API_KEY")
|
|
60
65
|
url = url or os.environ.get("OPENAI_COMPATIBILITY_API_BASE_URL")
|
|
66
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
61
67
|
super().__init__(
|
|
62
|
-
model_type, model_config_dict, api_key, url, token_counter
|
|
68
|
+
model_type, model_config_dict, api_key, url, token_counter, timeout
|
|
63
69
|
)
|
|
64
70
|
self._client = OpenAI(
|
|
65
|
-
timeout=
|
|
71
|
+
timeout=self._timeout,
|
|
66
72
|
max_retries=3,
|
|
67
73
|
api_key=self._api_key,
|
|
68
74
|
base_url=self._url,
|
|
69
75
|
)
|
|
70
76
|
|
|
71
77
|
self._async_client = AsyncOpenAI(
|
|
72
|
-
timeout=
|
|
78
|
+
timeout=self._timeout,
|
|
73
79
|
max_retries=3,
|
|
74
80
|
api_key=self._api_key,
|
|
75
81
|
base_url=self._url,
|