camel-ai 0.2.25__py3-none-any.whl → 0.2.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +4 -4
- camel/agents/knowledge_graph_agent.py +15 -3
- camel/configs/anthropic_config.py +0 -1
- camel/datasets/base.py +219 -17
- camel/environments/base.py +16 -8
- camel/extractors/__init__.py +2 -2
- camel/extractors/base.py +86 -64
- camel/extractors/python_strategies.py +226 -0
- camel/models/__init__.py +4 -0
- camel/models/anthropic_model.py +19 -55
- camel/models/azure_openai_model.py +88 -8
- camel/models/model_factory.py +3 -0
- camel/models/openai_compatible_model.py +88 -8
- camel/models/volcano_model.py +100 -0
- camel/py.typed +0 -0
- camel/storages/graph_storages/graph_element.py +3 -1
- camel/storages/graph_storages/neo4j_graph.py +78 -4
- camel/toolkits/__init__.py +4 -2
- camel/toolkits/{web_toolkit.py → browser_toolkit.py} +143 -76
- camel/toolkits/pubmed_toolkit.py +346 -0
- camel/toolkits/search_toolkit.py +63 -0
- camel/toolkits/terminal_toolkit.py +2 -2
- camel/types/enums.py +7 -0
- {camel_ai-0.2.25.dist-info → camel_ai-0.2.27.dist-info}/METADATA +2 -1
- {camel_ai-0.2.25.dist-info → camel_ai-0.2.27.dist-info}/RECORD +28 -24
- {camel_ai-0.2.25.dist-info → camel_ai-0.2.27.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.25.dist-info → camel_ai-0.2.27.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,226 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import ast
|
|
16
|
+
from typing import Optional
|
|
17
|
+
|
|
18
|
+
from camel.extractors.base import BaseExtractorStrategy
|
|
19
|
+
from camel.logger import get_logger
|
|
20
|
+
|
|
21
|
+
logger = get_logger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class BoxedStrategy(BaseExtractorStrategy):
|
|
25
|
+
r"""Extracts content from \\boxed{} environments."""
|
|
26
|
+
|
|
27
|
+
async def extract(self, text: str) -> Optional[str]:
|
|
28
|
+
r"""Extract content from \\boxed{} environments.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
text (str): The input text to process.
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Optional[str]: Content inside \\boxed{} if found, else None.
|
|
35
|
+
"""
|
|
36
|
+
# Find the start of the boxed content
|
|
37
|
+
boxed_pattern = "\\boxed{"
|
|
38
|
+
if boxed_pattern not in text:
|
|
39
|
+
logger.debug("No \\boxed{} content found in the response")
|
|
40
|
+
return None
|
|
41
|
+
|
|
42
|
+
start_idx = text.find(boxed_pattern) + len(boxed_pattern)
|
|
43
|
+
if start_idx >= len(text):
|
|
44
|
+
logger.debug("Malformed \\boxed{} (no content after opening)")
|
|
45
|
+
return None
|
|
46
|
+
|
|
47
|
+
# Use stack-based approach to handle nested braces
|
|
48
|
+
stack = 1 # Start with one opening brace
|
|
49
|
+
end_idx = start_idx
|
|
50
|
+
escape_mode = False
|
|
51
|
+
|
|
52
|
+
for i in range(start_idx, len(text)):
|
|
53
|
+
char = text[i]
|
|
54
|
+
|
|
55
|
+
# Handle escape sequences
|
|
56
|
+
if escape_mode:
|
|
57
|
+
escape_mode = False
|
|
58
|
+
continue
|
|
59
|
+
|
|
60
|
+
if char == '\\':
|
|
61
|
+
escape_mode = True
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
if char == '{':
|
|
65
|
+
stack += 1
|
|
66
|
+
elif char == '}':
|
|
67
|
+
stack -= 1
|
|
68
|
+
|
|
69
|
+
if stack == 0: # Found the matching closing brace
|
|
70
|
+
end_idx = i
|
|
71
|
+
break
|
|
72
|
+
|
|
73
|
+
# Check if we found a complete boxed expression
|
|
74
|
+
if stack != 0:
|
|
75
|
+
logger.debug("Unbalanced braces in \\boxed{} content")
|
|
76
|
+
return None
|
|
77
|
+
|
|
78
|
+
# Extract the content
|
|
79
|
+
content = text[start_idx:end_idx].strip()
|
|
80
|
+
logger.debug(f"Extracted boxed content: {content}")
|
|
81
|
+
return content
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class PythonListStrategy(BaseExtractorStrategy):
|
|
85
|
+
r"""Extracts and normalizes Python lists."""
|
|
86
|
+
|
|
87
|
+
async def extract(self, text: str) -> Optional[str]:
|
|
88
|
+
r"""Extract and normalize a Python list.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
text (str): The input text to process.
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Optional[str]: Normalized list as a string if found, else None.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
text = text.strip()
|
|
98
|
+
if not (text.startswith('[') and text.endswith(']')):
|
|
99
|
+
logger.debug("Content is not a list format (missing brackets)")
|
|
100
|
+
return None
|
|
101
|
+
|
|
102
|
+
try:
|
|
103
|
+
# Fix any escaped quotes before parsing
|
|
104
|
+
fixed_content = text.replace('\\"', '"')
|
|
105
|
+
parsed = ast.literal_eval(fixed_content)
|
|
106
|
+
if isinstance(parsed, list):
|
|
107
|
+
# Sort the list for normalization
|
|
108
|
+
sorted_list = sorted(parsed, key=lambda x: str(x))
|
|
109
|
+
return repr(sorted_list)
|
|
110
|
+
else:
|
|
111
|
+
logger.debug(f"Content is not a list, got {type(parsed)}")
|
|
112
|
+
return None
|
|
113
|
+
except (SyntaxError, ValueError) as e:
|
|
114
|
+
logger.debug(f"Failed to parse as Python list: {e}")
|
|
115
|
+
return None
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class PythonDictStrategy(BaseExtractorStrategy):
|
|
119
|
+
r"""Extracts and normalizes Python dictionaries."""
|
|
120
|
+
|
|
121
|
+
async def extract(self, text: str) -> Optional[str]:
|
|
122
|
+
r"""Extract and normalize a Python dictionary.
|
|
123
|
+
|
|
124
|
+
Args:
|
|
125
|
+
text (str): The input text to process.
|
|
126
|
+
|
|
127
|
+
Returns:
|
|
128
|
+
Optional[str]: Normalized dictionary as a string, else None.
|
|
129
|
+
"""
|
|
130
|
+
|
|
131
|
+
text = text.strip()
|
|
132
|
+
if not (text.startswith('{') and text.endswith('}')):
|
|
133
|
+
logger.debug("Content is not a dictionary format (missing braces)")
|
|
134
|
+
return None
|
|
135
|
+
|
|
136
|
+
try:
|
|
137
|
+
# Fix any escaped quotes before parsing
|
|
138
|
+
fixed_content = text.replace('\\"', '"')
|
|
139
|
+
parsed = ast.literal_eval(fixed_content)
|
|
140
|
+
if isinstance(parsed, dict):
|
|
141
|
+
# Sort the dictionary items for normalization
|
|
142
|
+
sorted_dict = dict(
|
|
143
|
+
sorted(parsed.items(), key=lambda x: str(x[0]))
|
|
144
|
+
)
|
|
145
|
+
return repr(sorted_dict)
|
|
146
|
+
else:
|
|
147
|
+
logger.debug(
|
|
148
|
+
f"Content is not a dictionary, got {type(parsed)}"
|
|
149
|
+
)
|
|
150
|
+
return None
|
|
151
|
+
except (SyntaxError, ValueError) as e:
|
|
152
|
+
logger.debug(f"Failed to parse as Python dictionary: {e}")
|
|
153
|
+
return None
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
class PythonSetStrategy(BaseExtractorStrategy):
|
|
157
|
+
r"""Extracts and normalizes Python sets."""
|
|
158
|
+
|
|
159
|
+
async def extract(self, text: str) -> Optional[str]:
|
|
160
|
+
r"""Extract and normalize a Python set.
|
|
161
|
+
|
|
162
|
+
Args:
|
|
163
|
+
text (str): The input text to process.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
Optional[str]: Normalized set as a string if found, else None.
|
|
167
|
+
"""
|
|
168
|
+
|
|
169
|
+
text = text.strip()
|
|
170
|
+
# Check for set syntax: {1, 2, 3} or set([1, 2, 3])
|
|
171
|
+
if not (
|
|
172
|
+
(text.startswith('{') and text.endswith('}'))
|
|
173
|
+
or (text.startswith('set(') and text.endswith(')'))
|
|
174
|
+
):
|
|
175
|
+
logger.debug("Content is not a set format")
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
# Fix any escaped quotes before parsing
|
|
180
|
+
fixed_content = text.replace('\\"', '"')
|
|
181
|
+
parsed = ast.literal_eval(fixed_content)
|
|
182
|
+
if isinstance(parsed, set):
|
|
183
|
+
# Sort the set elements for normalization
|
|
184
|
+
sorted_set = sorted(parsed, key=lambda x: str(x))
|
|
185
|
+
return repr(set(sorted_set))
|
|
186
|
+
else:
|
|
187
|
+
logger.debug(f"Content is not a set, got {type(parsed)}")
|
|
188
|
+
return None
|
|
189
|
+
except (SyntaxError, ValueError) as e:
|
|
190
|
+
logger.debug(f"Failed to parse as Python set: {e}")
|
|
191
|
+
return None
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
class PythonTupleStrategy(BaseExtractorStrategy):
|
|
195
|
+
r"""Extracts and normalizes Python tuples."""
|
|
196
|
+
|
|
197
|
+
async def extract(self, text: str) -> Optional[str]:
|
|
198
|
+
r"""Extract and normalize a Python tuple.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
text (str): The input text to process.
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Optional[str]: Normalized tuple as a string if found, else None.
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
text = text.strip()
|
|
208
|
+
# Check for tuple syntax: (1, 2, 3) or (1,)
|
|
209
|
+
if not (text.startswith('(') and text.endswith(')')):
|
|
210
|
+
logger.debug("Content is not a tuple format (missing parentheses)")
|
|
211
|
+
return None
|
|
212
|
+
|
|
213
|
+
try:
|
|
214
|
+
# Fix any escaped quotes before parsing
|
|
215
|
+
fixed_content = text.replace('\\"', '"')
|
|
216
|
+
parsed = ast.literal_eval(fixed_content)
|
|
217
|
+
if isinstance(parsed, tuple):
|
|
218
|
+
# Sort the tuple elements for normalization
|
|
219
|
+
sorted_tuple = tuple(sorted(parsed, key=lambda x: str(x)))
|
|
220
|
+
return repr(sorted_tuple)
|
|
221
|
+
else:
|
|
222
|
+
logger.debug(f"Content is not a tuple, got {type(parsed)}")
|
|
223
|
+
return None
|
|
224
|
+
except (SyntaxError, ValueError) as e:
|
|
225
|
+
logger.debug(f"Failed to parse as Python tuple: {e}")
|
|
226
|
+
return None
|
camel/models/__init__.py
CHANGED
|
@@ -37,9 +37,11 @@ from .qwen_model import QwenModel
|
|
|
37
37
|
from .reka_model import RekaModel
|
|
38
38
|
from .samba_model import SambaModel
|
|
39
39
|
from .sglang_model import SGLangModel
|
|
40
|
+
from .siliconflow_model import SiliconFlowModel
|
|
40
41
|
from .stub_model import StubModel
|
|
41
42
|
from .togetherai_model import TogetherAIModel
|
|
42
43
|
from .vllm_model import VLLMModel
|
|
44
|
+
from .volcano_model import VolcanoModel
|
|
43
45
|
from .yi_model import YiModel
|
|
44
46
|
from .zhipuai_model import ZhipuAIModel
|
|
45
47
|
|
|
@@ -76,4 +78,6 @@ __all__ = [
|
|
|
76
78
|
'MoonshotModel',
|
|
77
79
|
'AIMLModel',
|
|
78
80
|
'BaseAudioModel',
|
|
81
|
+
'SiliconFlowModel',
|
|
82
|
+
'VolcanoModel',
|
|
79
83
|
]
|
camel/models/anthropic_model.py
CHANGED
|
@@ -35,13 +35,13 @@ class AnthropicModel(BaseModelBackend):
|
|
|
35
35
|
model_type (Union[ModelType, str]): Model for which a backend is
|
|
36
36
|
created, one of CLAUDE_* series.
|
|
37
37
|
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
38
|
-
that will be fed into
|
|
38
|
+
that will be fed into `openai.ChatCompletion.create()`. If
|
|
39
39
|
:obj:`None`, :obj:`AnthropicConfig().as_dict()` will be used.
|
|
40
40
|
(default: :obj:`None`)
|
|
41
41
|
api_key (Optional[str], optional): The API key for authenticating with
|
|
42
42
|
the Anthropic service. (default: :obj:`None`)
|
|
43
43
|
url (Optional[str], optional): The url to the Anthropic service.
|
|
44
|
-
(default: :obj:`
|
|
44
|
+
(default: :obj:`https://api.anthropic.com/v1/`)
|
|
45
45
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
46
46
|
use for the model. If not provided, :obj:`AnthropicTokenCounter`
|
|
47
47
|
will be used. (default: :obj:`None`)
|
|
@@ -61,43 +61,24 @@ class AnthropicModel(BaseModelBackend):
|
|
|
61
61
|
url: Optional[str] = None,
|
|
62
62
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
63
63
|
) -> None:
|
|
64
|
-
from
|
|
64
|
+
from openai import AsyncOpenAI, OpenAI
|
|
65
65
|
|
|
66
66
|
if model_config_dict is None:
|
|
67
67
|
model_config_dict = AnthropicConfig().as_dict()
|
|
68
68
|
api_key = api_key or os.environ.get("ANTHROPIC_API_KEY")
|
|
69
|
-
url =
|
|
69
|
+
url = (
|
|
70
|
+
url
|
|
71
|
+
or os.environ.get("ANTHROPIC_API_BASE_URL")
|
|
72
|
+
or "https://api.anthropic.com/v1/"
|
|
73
|
+
)
|
|
70
74
|
super().__init__(
|
|
71
75
|
model_type, model_config_dict, api_key, url, token_counter
|
|
72
76
|
)
|
|
73
|
-
self.client =
|
|
74
|
-
self.async_client = AsyncAnthropic(
|
|
75
|
-
api_key=self._api_key, base_url=self._url
|
|
76
|
-
)
|
|
77
|
+
self.client = OpenAI(base_url=self._url, api_key=self._api_key)
|
|
77
78
|
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
obj = ChatCompletion.construct(
|
|
81
|
-
id=None,
|
|
82
|
-
choices=[
|
|
83
|
-
dict(
|
|
84
|
-
index=0,
|
|
85
|
-
message={
|
|
86
|
-
"role": "assistant",
|
|
87
|
-
"content": next(
|
|
88
|
-
content.text
|
|
89
|
-
for content in response.content
|
|
90
|
-
if content.type == "text"
|
|
91
|
-
),
|
|
92
|
-
},
|
|
93
|
-
finish_reason=response.stop_reason,
|
|
94
|
-
)
|
|
95
|
-
],
|
|
96
|
-
created=None,
|
|
97
|
-
model=response.model,
|
|
98
|
-
object="chat.completion",
|
|
79
|
+
self.async_client = AsyncOpenAI(
|
|
80
|
+
api_key=self._api_key, base_url=self._url
|
|
99
81
|
)
|
|
100
|
-
return obj
|
|
101
82
|
|
|
102
83
|
@property
|
|
103
84
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -126,22 +107,13 @@ class AnthropicModel(BaseModelBackend):
|
|
|
126
107
|
Returns:
|
|
127
108
|
ChatCompletion: Response in the OpenAI API format.
|
|
128
109
|
"""
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
if messages[0]["role"] == "system":
|
|
132
|
-
sys_msg = str(messages.pop(0)["content"])
|
|
133
|
-
else:
|
|
134
|
-
sys_msg = NOT_GIVEN # type: ignore[assignment]
|
|
135
|
-
response = self.client.messages.create(
|
|
110
|
+
response = self.client.chat.completions.create(
|
|
136
111
|
model=self.model_type,
|
|
137
|
-
|
|
138
|
-
messages=messages, # type: ignore[arg-type]
|
|
112
|
+
messages=messages,
|
|
139
113
|
**self.model_config_dict,
|
|
114
|
+
tools=tools, # type: ignore[arg-type]
|
|
140
115
|
)
|
|
141
116
|
|
|
142
|
-
# format response to openai format
|
|
143
|
-
response = self._convert_response_from_anthropic_to_openai(response)
|
|
144
|
-
|
|
145
117
|
return response
|
|
146
118
|
|
|
147
119
|
async def _arun(
|
|
@@ -159,21 +131,14 @@ class AnthropicModel(BaseModelBackend):
|
|
|
159
131
|
Returns:
|
|
160
132
|
ChatCompletion: Response in the OpenAI API format.
|
|
161
133
|
"""
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
if messages[0]["role"] == "system":
|
|
165
|
-
sys_msg = str(messages.pop(0)["content"])
|
|
166
|
-
else:
|
|
167
|
-
sys_msg = NOT_GIVEN # type: ignore[assignment]
|
|
168
|
-
response = await self.async_client.messages.create(
|
|
134
|
+
response = await self.async_client.chat.completions.create(
|
|
169
135
|
model=self.model_type,
|
|
170
|
-
|
|
171
|
-
messages=messages, # type: ignore[arg-type]
|
|
136
|
+
messages=messages,
|
|
172
137
|
**self.model_config_dict,
|
|
138
|
+
tools=tools, # type: ignore[arg-type]
|
|
173
139
|
)
|
|
174
140
|
|
|
175
|
-
|
|
176
|
-
return self._convert_response_from_anthropic_to_openai(response)
|
|
141
|
+
return response
|
|
177
142
|
|
|
178
143
|
def check_model_config(self):
|
|
179
144
|
r"""Check whether the model configuration is valid for anthropic
|
|
@@ -181,8 +146,7 @@ class AnthropicModel(BaseModelBackend):
|
|
|
181
146
|
|
|
182
147
|
Raises:
|
|
183
148
|
ValueError: If the model configuration dictionary contains any
|
|
184
|
-
unexpected arguments to
|
|
185
|
-
:obj:`model_path` or :obj:`server_url`.
|
|
149
|
+
unexpected arguments to Anthropic API.
|
|
186
150
|
"""
|
|
187
151
|
for param in self.model_config_dict:
|
|
188
152
|
if param not in ANTHROPIC_API_PARAMS:
|
|
@@ -128,18 +128,23 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
128
128
|
Args:
|
|
129
129
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
130
130
|
in OpenAI API format.
|
|
131
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
132
|
+
response.
|
|
133
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
134
|
+
use for the request.
|
|
131
135
|
|
|
132
136
|
Returns:
|
|
133
137
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
134
138
|
`ChatCompletion` in the non-stream mode, or
|
|
135
139
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
136
140
|
"""
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
model=self.azure_deployment_name, # type:ignore[arg-type]
|
|
140
|
-
**self.model_config_dict,
|
|
141
|
+
response_format = response_format or self.model_config_dict.get(
|
|
142
|
+
"response_format", None
|
|
141
143
|
)
|
|
142
|
-
|
|
144
|
+
if response_format:
|
|
145
|
+
return self._request_parse(messages, response_format, tools)
|
|
146
|
+
else:
|
|
147
|
+
return self._request_chat_completion(messages, tools)
|
|
143
148
|
|
|
144
149
|
async def _arun(
|
|
145
150
|
self,
|
|
@@ -152,18 +157,93 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
152
157
|
Args:
|
|
153
158
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
154
159
|
in OpenAI API format.
|
|
160
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
161
|
+
response.
|
|
162
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
163
|
+
use for the request.
|
|
155
164
|
|
|
156
165
|
Returns:
|
|
157
166
|
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
158
167
|
`ChatCompletion` in the non-stream mode, or
|
|
159
168
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
160
169
|
"""
|
|
161
|
-
|
|
170
|
+
response_format = response_format or self.model_config_dict.get(
|
|
171
|
+
"response_format", None
|
|
172
|
+
)
|
|
173
|
+
if response_format:
|
|
174
|
+
return await self._arequest_parse(messages, response_format, tools)
|
|
175
|
+
else:
|
|
176
|
+
return await self._arequest_chat_completion(messages, tools)
|
|
177
|
+
|
|
178
|
+
def _request_chat_completion(
|
|
179
|
+
self,
|
|
180
|
+
messages: List[OpenAIMessage],
|
|
181
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
182
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
183
|
+
request_config = self.model_config_dict.copy()
|
|
184
|
+
|
|
185
|
+
if tools:
|
|
186
|
+
request_config["tools"] = tools
|
|
187
|
+
|
|
188
|
+
return self._client.chat.completions.create(
|
|
189
|
+
messages=messages,
|
|
190
|
+
model=self.azure_deployment_name, # type:ignore[arg-type]
|
|
191
|
+
**request_config,
|
|
192
|
+
)
|
|
193
|
+
|
|
194
|
+
async def _arequest_chat_completion(
|
|
195
|
+
self,
|
|
196
|
+
messages: List[OpenAIMessage],
|
|
197
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
198
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
199
|
+
request_config = self.model_config_dict.copy()
|
|
200
|
+
|
|
201
|
+
if tools:
|
|
202
|
+
request_config["tools"] = tools
|
|
203
|
+
|
|
204
|
+
return await self._async_client.chat.completions.create(
|
|
205
|
+
messages=messages,
|
|
206
|
+
model=self.azure_deployment_name, # type:ignore[arg-type]
|
|
207
|
+
**request_config,
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
def _request_parse(
|
|
211
|
+
self,
|
|
212
|
+
messages: List[OpenAIMessage],
|
|
213
|
+
response_format: Type[BaseModel],
|
|
214
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
215
|
+
) -> ChatCompletion:
|
|
216
|
+
request_config = self.model_config_dict.copy()
|
|
217
|
+
|
|
218
|
+
request_config["response_format"] = response_format
|
|
219
|
+
request_config.pop("stream", None)
|
|
220
|
+
if tools is not None:
|
|
221
|
+
request_config["tools"] = tools
|
|
222
|
+
|
|
223
|
+
return self._client.beta.chat.completions.parse(
|
|
224
|
+
messages=messages,
|
|
225
|
+
model=self.azure_deployment_name, # type:ignore[arg-type]
|
|
226
|
+
**request_config,
|
|
227
|
+
)
|
|
228
|
+
|
|
229
|
+
async def _arequest_parse(
|
|
230
|
+
self,
|
|
231
|
+
messages: List[OpenAIMessage],
|
|
232
|
+
response_format: Type[BaseModel],
|
|
233
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
234
|
+
) -> ChatCompletion:
|
|
235
|
+
request_config = self.model_config_dict.copy()
|
|
236
|
+
|
|
237
|
+
request_config["response_format"] = response_format
|
|
238
|
+
request_config.pop("stream", None)
|
|
239
|
+
if tools is not None:
|
|
240
|
+
request_config["tools"] = tools
|
|
241
|
+
|
|
242
|
+
return await self._async_client.beta.chat.completions.parse(
|
|
162
243
|
messages=messages,
|
|
163
244
|
model=self.azure_deployment_name, # type:ignore[arg-type]
|
|
164
|
-
**
|
|
245
|
+
**request_config,
|
|
165
246
|
)
|
|
166
|
-
return response
|
|
167
247
|
|
|
168
248
|
def check_model_config(self):
|
|
169
249
|
r"""Check whether the model configuration contains any
|
camel/models/model_factory.py
CHANGED
|
@@ -37,6 +37,7 @@ from camel.models.siliconflow_model import SiliconFlowModel
|
|
|
37
37
|
from camel.models.stub_model import StubModel
|
|
38
38
|
from camel.models.togetherai_model import TogetherAIModel
|
|
39
39
|
from camel.models.vllm_model import VLLMModel
|
|
40
|
+
from camel.models.volcano_model import VolcanoModel
|
|
40
41
|
from camel.models.yi_model import YiModel
|
|
41
42
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
42
43
|
from camel.types import ModelPlatformType, ModelType, UnifiedModelType
|
|
@@ -107,6 +108,8 @@ class ModelFactory:
|
|
|
107
108
|
model_class = SiliconFlowModel
|
|
108
109
|
elif model_platform.is_aiml:
|
|
109
110
|
model_class = AIMLModel
|
|
111
|
+
elif model_platform.is_volcano:
|
|
112
|
+
model_class = VolcanoModel
|
|
110
113
|
|
|
111
114
|
elif model_platform.is_openai and model_type.is_openai:
|
|
112
115
|
model_class = OpenAIModel
|
|
@@ -86,18 +86,23 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
86
86
|
Args:
|
|
87
87
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
88
88
|
in OpenAI API format.
|
|
89
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
90
|
+
response.
|
|
91
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
92
|
+
use for the request.
|
|
89
93
|
|
|
90
94
|
Returns:
|
|
91
95
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
92
96
|
`ChatCompletion` in the non-stream mode, or
|
|
93
97
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
94
98
|
"""
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
model=self.model_type,
|
|
98
|
-
**self.model_config_dict,
|
|
99
|
+
response_format = response_format or self.model_config_dict.get(
|
|
100
|
+
"response_format", None
|
|
99
101
|
)
|
|
100
|
-
|
|
102
|
+
if response_format:
|
|
103
|
+
return self._request_parse(messages, response_format, tools)
|
|
104
|
+
else:
|
|
105
|
+
return self._request_chat_completion(messages, tools)
|
|
101
106
|
|
|
102
107
|
async def _arun(
|
|
103
108
|
self,
|
|
@@ -110,18 +115,93 @@ class OpenAICompatibleModel(BaseModelBackend):
|
|
|
110
115
|
Args:
|
|
111
116
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
112
117
|
in OpenAI API format.
|
|
118
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
119
|
+
response.
|
|
120
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
121
|
+
use for the request.
|
|
113
122
|
|
|
114
123
|
Returns:
|
|
115
124
|
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
116
125
|
`ChatCompletion` in the non-stream mode, or
|
|
117
126
|
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
118
127
|
"""
|
|
119
|
-
|
|
128
|
+
response_format = response_format or self.model_config_dict.get(
|
|
129
|
+
"response_format", None
|
|
130
|
+
)
|
|
131
|
+
if response_format:
|
|
132
|
+
return await self._arequest_parse(messages, response_format, tools)
|
|
133
|
+
else:
|
|
134
|
+
return await self._arequest_chat_completion(messages, tools)
|
|
135
|
+
|
|
136
|
+
def _request_chat_completion(
|
|
137
|
+
self,
|
|
138
|
+
messages: List[OpenAIMessage],
|
|
139
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
140
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
141
|
+
request_config = self.model_config_dict.copy()
|
|
142
|
+
|
|
143
|
+
if tools:
|
|
144
|
+
request_config["tools"] = tools
|
|
145
|
+
|
|
146
|
+
return self._client.chat.completions.create(
|
|
147
|
+
messages=messages,
|
|
148
|
+
model=self.model_type,
|
|
149
|
+
**request_config,
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
async def _arequest_chat_completion(
|
|
153
|
+
self,
|
|
154
|
+
messages: List[OpenAIMessage],
|
|
155
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
156
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
157
|
+
request_config = self.model_config_dict.copy()
|
|
158
|
+
|
|
159
|
+
if tools:
|
|
160
|
+
request_config["tools"] = tools
|
|
161
|
+
|
|
162
|
+
return await self._async_client.chat.completions.create(
|
|
163
|
+
messages=messages,
|
|
164
|
+
model=self.model_type,
|
|
165
|
+
**request_config,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
def _request_parse(
|
|
169
|
+
self,
|
|
170
|
+
messages: List[OpenAIMessage],
|
|
171
|
+
response_format: Type[BaseModel],
|
|
172
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
173
|
+
) -> ChatCompletion:
|
|
174
|
+
request_config = self.model_config_dict.copy()
|
|
175
|
+
|
|
176
|
+
request_config["response_format"] = response_format
|
|
177
|
+
request_config.pop("stream", None)
|
|
178
|
+
if tools is not None:
|
|
179
|
+
request_config["tools"] = tools
|
|
180
|
+
|
|
181
|
+
return self._client.beta.chat.completions.parse(
|
|
182
|
+
messages=messages,
|
|
183
|
+
model=self.model_type,
|
|
184
|
+
**request_config,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
async def _arequest_parse(
|
|
188
|
+
self,
|
|
189
|
+
messages: List[OpenAIMessage],
|
|
190
|
+
response_format: Type[BaseModel],
|
|
191
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
192
|
+
) -> ChatCompletion:
|
|
193
|
+
request_config = self.model_config_dict.copy()
|
|
194
|
+
|
|
195
|
+
request_config["response_format"] = response_format
|
|
196
|
+
request_config.pop("stream", None)
|
|
197
|
+
if tools is not None:
|
|
198
|
+
request_config["tools"] = tools
|
|
199
|
+
|
|
200
|
+
return await self._async_client.beta.chat.completions.parse(
|
|
120
201
|
messages=messages,
|
|
121
202
|
model=self.model_type,
|
|
122
|
-
**
|
|
203
|
+
**request_config,
|
|
123
204
|
)
|
|
124
|
-
return response
|
|
125
205
|
|
|
126
206
|
@property
|
|
127
207
|
def token_counter(self) -> BaseTokenCounter:
|