camel-ai 0.2.21__py3-none-any.whl → 0.2.23a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/_types.py +41 -0
- camel/agents/_utils.py +188 -0
- camel/agents/chat_agent.py +556 -965
- camel/agents/knowledge_graph_agent.py +7 -1
- camel/agents/multi_hop_generator_agent.py +1 -1
- camel/configs/base_config.py +10 -13
- camel/configs/deepseek_config.py +4 -30
- camel/configs/gemini_config.py +5 -31
- camel/configs/openai_config.py +14 -32
- camel/configs/qwen_config.py +36 -36
- camel/datagen/self_improving_cot.py +79 -1
- camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
- camel/datagen/self_instruct/self_instruct.py +7 -2
- camel/datasets/__init__.py +28 -0
- camel/datasets/base.py +969 -0
- camel/embeddings/openai_embedding.py +10 -1
- camel/environments/__init__.py +16 -0
- camel/environments/base.py +503 -0
- camel/extractors/__init__.py +16 -0
- camel/extractors/base.py +263 -0
- camel/interpreters/docker/Dockerfile +12 -0
- camel/interpreters/docker_interpreter.py +19 -1
- camel/interpreters/subprocess_interpreter.py +42 -17
- camel/loaders/__init__.py +2 -0
- camel/loaders/mineru_extractor.py +250 -0
- camel/memories/agent_memories.py +16 -1
- camel/memories/blocks/chat_history_block.py +10 -2
- camel/memories/blocks/vectordb_block.py +1 -0
- camel/memories/context_creators/score_based.py +20 -3
- camel/memories/records.py +10 -0
- camel/messages/base.py +8 -8
- camel/models/_utils.py +57 -0
- camel/models/aiml_model.py +48 -17
- camel/models/anthropic_model.py +41 -3
- camel/models/azure_openai_model.py +39 -3
- camel/models/base_model.py +132 -4
- camel/models/cohere_model.py +88 -11
- camel/models/deepseek_model.py +107 -63
- camel/models/gemini_model.py +133 -15
- camel/models/groq_model.py +72 -10
- camel/models/internlm_model.py +14 -3
- camel/models/litellm_model.py +9 -2
- camel/models/mistral_model.py +42 -5
- camel/models/model_manager.py +48 -3
- camel/models/moonshot_model.py +33 -4
- camel/models/nemotron_model.py +32 -3
- camel/models/nvidia_model.py +43 -3
- camel/models/ollama_model.py +139 -17
- camel/models/openai_audio_models.py +7 -1
- camel/models/openai_compatible_model.py +37 -3
- camel/models/openai_model.py +158 -46
- camel/models/qwen_model.py +61 -4
- camel/models/reka_model.py +53 -3
- camel/models/samba_model.py +209 -4
- camel/models/sglang_model.py +153 -14
- camel/models/siliconflow_model.py +16 -3
- camel/models/stub_model.py +46 -4
- camel/models/togetherai_model.py +38 -3
- camel/models/vllm_model.py +37 -3
- camel/models/yi_model.py +36 -3
- camel/models/zhipuai_model.py +38 -3
- camel/retrievers/__init__.py +3 -0
- camel/retrievers/hybrid_retrival.py +237 -0
- camel/toolkits/__init__.py +9 -2
- camel/toolkits/arxiv_toolkit.py +2 -1
- camel/toolkits/ask_news_toolkit.py +4 -2
- camel/toolkits/base.py +22 -3
- camel/toolkits/code_execution.py +2 -0
- camel/toolkits/dappier_toolkit.py +2 -1
- camel/toolkits/data_commons_toolkit.py +38 -12
- camel/toolkits/function_tool.py +13 -0
- camel/toolkits/github_toolkit.py +5 -1
- camel/toolkits/google_maps_toolkit.py +2 -1
- camel/toolkits/google_scholar_toolkit.py +2 -0
- camel/toolkits/human_toolkit.py +0 -3
- camel/toolkits/linkedin_toolkit.py +3 -2
- camel/toolkits/meshy_toolkit.py +3 -2
- camel/toolkits/mineru_toolkit.py +178 -0
- camel/toolkits/networkx_toolkit.py +240 -0
- camel/toolkits/notion_toolkit.py +2 -0
- camel/toolkits/openbb_toolkit.py +3 -2
- camel/toolkits/reddit_toolkit.py +11 -3
- camel/toolkits/retrieval_toolkit.py +6 -1
- camel/toolkits/semantic_scholar_toolkit.py +2 -1
- camel/toolkits/stripe_toolkit.py +8 -2
- camel/toolkits/sympy_toolkit.py +44 -1
- camel/toolkits/video_toolkit.py +2 -0
- camel/toolkits/whatsapp_toolkit.py +3 -2
- camel/toolkits/zapier_toolkit.py +191 -0
- camel/types/__init__.py +2 -2
- camel/types/agents/__init__.py +16 -0
- camel/types/agents/tool_calling_record.py +52 -0
- camel/types/enums.py +3 -0
- camel/types/openai_types.py +16 -14
- camel/utils/__init__.py +2 -1
- camel/utils/async_func.py +2 -2
- camel/utils/commons.py +114 -1
- camel/verifiers/__init__.py +23 -0
- camel/verifiers/base.py +340 -0
- camel/verifiers/models.py +82 -0
- camel/verifiers/python_verifier.py +202 -0
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/METADATA +273 -256
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/RECORD +106 -85
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/WHEEL +1 -1
- {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/LICENSE +0 -0
camel/models/gemini_model.py
CHANGED
|
@@ -12,9 +12,10 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
|
-
from openai import OpenAI, Stream
|
|
17
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
18
|
+
from pydantic import BaseModel
|
|
18
19
|
|
|
19
20
|
from camel.configs import Gemini_API_PARAMS, GeminiConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
@@ -81,37 +82,154 @@ class GeminiModel(BaseModelBackend):
|
|
|
81
82
|
api_key=self._api_key,
|
|
82
83
|
base_url=self._url,
|
|
83
84
|
)
|
|
85
|
+
self._async_client = AsyncOpenAI(
|
|
86
|
+
timeout=180,
|
|
87
|
+
max_retries=3,
|
|
88
|
+
api_key=self._api_key,
|
|
89
|
+
base_url=self._url,
|
|
90
|
+
)
|
|
84
91
|
|
|
85
|
-
def
|
|
92
|
+
def _process_messages(self, messages) -> List[OpenAIMessage]:
|
|
93
|
+
r"""Process the messages for Gemini API to ensure no empty content,
|
|
94
|
+
which is not accepted by Gemini.
|
|
95
|
+
"""
|
|
96
|
+
processed_messages = []
|
|
97
|
+
for msg in messages:
|
|
98
|
+
msg_copy = msg.copy()
|
|
99
|
+
if 'content' in msg_copy and msg_copy['content'] == '':
|
|
100
|
+
msg_copy['content'] = 'null'
|
|
101
|
+
processed_messages.append(msg_copy)
|
|
102
|
+
return processed_messages
|
|
103
|
+
|
|
104
|
+
def _run(
|
|
86
105
|
self,
|
|
87
106
|
messages: List[OpenAIMessage],
|
|
107
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
108
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
88
109
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
89
110
|
r"""Runs inference of Gemini chat completion.
|
|
90
111
|
|
|
91
112
|
Args:
|
|
92
113
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
93
114
|
in OpenAI API format.
|
|
115
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
116
|
+
response.
|
|
117
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
118
|
+
use for the request.
|
|
94
119
|
|
|
95
120
|
Returns:
|
|
96
121
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
97
122
|
`ChatCompletion` in the non-stream mode, or
|
|
98
123
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
99
124
|
"""
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
125
|
+
response_format = response_format or self.model_config_dict.get(
|
|
126
|
+
"response_format", None
|
|
127
|
+
)
|
|
128
|
+
messages = self._process_messages(messages)
|
|
129
|
+
if response_format:
|
|
130
|
+
return self._request_parse(messages, response_format)
|
|
131
|
+
else:
|
|
132
|
+
return self._request_chat_completion(messages, tools)
|
|
133
|
+
|
|
134
|
+
async def _arun(
|
|
135
|
+
self,
|
|
136
|
+
messages: List[OpenAIMessage],
|
|
137
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
138
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
139
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
140
|
+
r"""Runs inference of OpenAI chat completion in async mode.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
144
|
+
in OpenAI API format.
|
|
145
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
146
|
+
response.
|
|
147
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
148
|
+
use for the request.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
152
|
+
`ChatCompletion` in the non-stream mode, or
|
|
153
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
154
|
+
"""
|
|
155
|
+
response_format = response_format or self.model_config_dict.get(
|
|
156
|
+
"response_format", None
|
|
157
|
+
)
|
|
158
|
+
messages = self._process_messages(messages)
|
|
159
|
+
if response_format:
|
|
160
|
+
return await self._arequest_parse(messages, response_format)
|
|
161
|
+
else:
|
|
162
|
+
return await self._arequest_chat_completion(messages, tools)
|
|
163
|
+
|
|
164
|
+
def _request_chat_completion(
|
|
165
|
+
self,
|
|
166
|
+
messages: List[OpenAIMessage],
|
|
167
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
168
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
169
|
+
request_config = self.model_config_dict.copy()
|
|
170
|
+
|
|
171
|
+
if tools:
|
|
172
|
+
for tool in tools:
|
|
173
|
+
function_dict = tool.get('function', {})
|
|
174
|
+
function_dict.pop("strict", None)
|
|
175
|
+
request_config["tools"] = tools
|
|
176
|
+
|
|
177
|
+
return self._client.chat.completions.create(
|
|
178
|
+
messages=messages,
|
|
179
|
+
model=self.model_type,
|
|
180
|
+
**request_config,
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
async def _arequest_chat_completion(
|
|
184
|
+
self,
|
|
185
|
+
messages: List[OpenAIMessage],
|
|
186
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
187
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
188
|
+
request_config = self.model_config_dict.copy()
|
|
189
|
+
|
|
190
|
+
if tools:
|
|
191
|
+
for tool in tools:
|
|
192
|
+
function_dict = tool.get('function', {})
|
|
193
|
+
function_dict.pop("strict", None)
|
|
194
|
+
request_config["tools"] = tools
|
|
195
|
+
|
|
196
|
+
return await self._async_client.chat.completions.create(
|
|
197
|
+
messages=messages,
|
|
198
|
+
model=self.model_type,
|
|
199
|
+
**request_config,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
def _request_parse(
|
|
203
|
+
self,
|
|
204
|
+
messages: List[OpenAIMessage],
|
|
205
|
+
response_format: Type[BaseModel],
|
|
206
|
+
) -> ChatCompletion:
|
|
207
|
+
request_config = self.model_config_dict.copy()
|
|
208
|
+
|
|
209
|
+
request_config["response_format"] = response_format
|
|
210
|
+
request_config.pop("stream", None)
|
|
211
|
+
|
|
212
|
+
return self._client.beta.chat.completions.parse(
|
|
213
|
+
messages=messages,
|
|
214
|
+
model=self.model_type,
|
|
215
|
+
**request_config,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
async def _arequest_parse(
|
|
219
|
+
self,
|
|
220
|
+
messages: List[OpenAIMessage],
|
|
221
|
+
response_format: Type[BaseModel],
|
|
222
|
+
) -> ChatCompletion:
|
|
223
|
+
request_config = self.model_config_dict.copy()
|
|
224
|
+
|
|
225
|
+
request_config["response_format"] = response_format
|
|
226
|
+
request_config.pop("stream", None)
|
|
108
227
|
|
|
109
|
-
|
|
110
|
-
messages=
|
|
228
|
+
return await self._async_client.beta.chat.completions.parse(
|
|
229
|
+
messages=messages,
|
|
111
230
|
model=self.model_type,
|
|
112
|
-
**
|
|
231
|
+
**request_config,
|
|
113
232
|
)
|
|
114
|
-
return response
|
|
115
233
|
|
|
116
234
|
@property
|
|
117
235
|
def token_counter(self) -> BaseTokenCounter:
|
camel/models/groq_model.py
CHANGED
|
@@ -12,13 +12,15 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
|
-
from openai import OpenAI, Stream
|
|
17
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
18
|
+
from pydantic import BaseModel
|
|
18
19
|
|
|
19
20
|
from camel.configs import GROQ_API_PARAMS, GroqConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
21
22
|
from camel.models import BaseModelBackend
|
|
23
|
+
from camel.models._utils import try_modify_message_with_format
|
|
22
24
|
from camel.types import (
|
|
23
25
|
ChatCompletion,
|
|
24
26
|
ChatCompletionChunk,
|
|
@@ -51,11 +53,7 @@ class GroqModel(BaseModelBackend):
|
|
|
51
53
|
(default: :obj:`None`)
|
|
52
54
|
"""
|
|
53
55
|
|
|
54
|
-
@api_keys_required(
|
|
55
|
-
[
|
|
56
|
-
("api_key", "GROQ_API_KEY"),
|
|
57
|
-
]
|
|
58
|
-
)
|
|
56
|
+
@api_keys_required([("api_key", "GROQ_API_KEY")])
|
|
59
57
|
def __init__(
|
|
60
58
|
self,
|
|
61
59
|
model_type: Union[ModelType, str],
|
|
@@ -79,6 +77,12 @@ class GroqModel(BaseModelBackend):
|
|
|
79
77
|
api_key=self._api_key,
|
|
80
78
|
base_url=self._url,
|
|
81
79
|
)
|
|
80
|
+
self._async_client = AsyncOpenAI(
|
|
81
|
+
timeout=180,
|
|
82
|
+
max_retries=3,
|
|
83
|
+
api_key=self._api_key,
|
|
84
|
+
base_url=self._url,
|
|
85
|
+
)
|
|
82
86
|
|
|
83
87
|
@property
|
|
84
88
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -92,25 +96,83 @@ class GroqModel(BaseModelBackend):
|
|
|
92
96
|
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
93
97
|
return self._token_counter
|
|
94
98
|
|
|
95
|
-
def
|
|
99
|
+
def _prepare_request(
|
|
96
100
|
self,
|
|
97
101
|
messages: List[OpenAIMessage],
|
|
102
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
103
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
104
|
+
) -> Dict[str, Any]:
|
|
105
|
+
request_config = self.model_config_dict.copy()
|
|
106
|
+
if tools:
|
|
107
|
+
request_config["tools"] = tools
|
|
108
|
+
elif response_format:
|
|
109
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
110
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
111
|
+
|
|
112
|
+
return request_config
|
|
113
|
+
|
|
114
|
+
def _run(
|
|
115
|
+
self,
|
|
116
|
+
messages: List[OpenAIMessage],
|
|
117
|
+
response_format: Optional[type[BaseModel]] = None,
|
|
118
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
98
119
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
99
|
-
r"""Runs inference of
|
|
120
|
+
r"""Runs inference of Groq chat completion.
|
|
100
121
|
|
|
101
122
|
Args:
|
|
102
123
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
103
124
|
in OpenAI API format.
|
|
125
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
126
|
+
response.
|
|
127
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
128
|
+
use for the request.
|
|
104
129
|
|
|
105
130
|
Returns:
|
|
106
131
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
107
132
|
`ChatCompletion` in the non-stream mode, or
|
|
108
133
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
109
134
|
"""
|
|
135
|
+
request_config = self._prepare_request(
|
|
136
|
+
messages, response_format, tools
|
|
137
|
+
)
|
|
138
|
+
|
|
110
139
|
response = self._client.chat.completions.create(
|
|
111
140
|
messages=messages,
|
|
112
141
|
model=self.model_type,
|
|
113
|
-
**
|
|
142
|
+
**request_config,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
return response
|
|
146
|
+
|
|
147
|
+
async def _arun(
|
|
148
|
+
self,
|
|
149
|
+
messages: List[OpenAIMessage],
|
|
150
|
+
response_format: Optional[type[BaseModel]] = None,
|
|
151
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
152
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
153
|
+
r"""Runs inference of Groq chat completion asynchronously.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
157
|
+
in OpenAI API format.
|
|
158
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
159
|
+
response.
|
|
160
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
161
|
+
use for the request.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
165
|
+
`ChatCompletion` in the non-stream mode, or
|
|
166
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
167
|
+
"""
|
|
168
|
+
request_config = self._prepare_request(
|
|
169
|
+
messages, response_format, tools
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
response = await self._async_client.chat.completions.create(
|
|
173
|
+
messages=messages,
|
|
174
|
+
model=self.model_type,
|
|
175
|
+
**request_config,
|
|
114
176
|
)
|
|
115
177
|
|
|
116
178
|
return response
|
camel/models/internlm_model.py
CHANGED
|
@@ -13,9 +13,10 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
|
-
from typing import Any, Dict, List, Optional, Union
|
|
16
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
17
|
|
|
18
|
-
from openai import OpenAI, Stream
|
|
18
|
+
from openai import AsyncStream, OpenAI, Stream
|
|
19
|
+
from pydantic import BaseModel
|
|
19
20
|
|
|
20
21
|
from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
|
|
21
22
|
from camel.messages import OpenAIMessage
|
|
@@ -82,9 +83,11 @@ class InternLMModel(BaseModelBackend):
|
|
|
82
83
|
base_url=self._url,
|
|
83
84
|
)
|
|
84
85
|
|
|
85
|
-
def
|
|
86
|
+
def _run(
|
|
86
87
|
self,
|
|
87
88
|
messages: List[OpenAIMessage],
|
|
89
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
90
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
88
91
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
89
92
|
r"""Runs inference of InternLM chat completion.
|
|
90
93
|
|
|
@@ -104,6 +107,14 @@ class InternLMModel(BaseModelBackend):
|
|
|
104
107
|
)
|
|
105
108
|
return response
|
|
106
109
|
|
|
110
|
+
async def _arun(
|
|
111
|
+
self,
|
|
112
|
+
messages: List[OpenAIMessage],
|
|
113
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
114
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
115
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
116
|
+
raise NotImplementedError("InternLM does not support async inference.")
|
|
117
|
+
|
|
107
118
|
@property
|
|
108
119
|
def token_counter(self) -> BaseTokenCounter:
|
|
109
120
|
r"""Initialize the token counter for the model backend.
|
camel/models/litellm_model.py
CHANGED
|
@@ -11,7 +11,9 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
-
from typing import Any, Dict, List, Optional, Union
|
|
14
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
15
|
+
|
|
16
|
+
from pydantic import BaseModel
|
|
15
17
|
|
|
16
18
|
from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
|
|
17
19
|
from camel.messages import OpenAIMessage
|
|
@@ -106,9 +108,14 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
106
108
|
self._token_counter = LiteLLMTokenCounter(self.model_type)
|
|
107
109
|
return self._token_counter
|
|
108
110
|
|
|
109
|
-
def
|
|
111
|
+
async def _arun(self) -> None: # type: ignore[override]
|
|
112
|
+
raise NotImplementedError
|
|
113
|
+
|
|
114
|
+
def _run(
|
|
110
115
|
self,
|
|
111
116
|
messages: List[OpenAIMessage],
|
|
117
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
118
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
112
119
|
) -> ChatCompletion:
|
|
113
120
|
r"""Runs inference of LiteLLM chat completion.
|
|
114
121
|
|
camel/models/mistral_model.py
CHANGED
|
@@ -12,7 +12,9 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
|
|
16
|
+
|
|
17
|
+
from pydantic import BaseModel
|
|
16
18
|
|
|
17
19
|
if TYPE_CHECKING:
|
|
18
20
|
from mistralai.models import (
|
|
@@ -20,10 +22,13 @@ if TYPE_CHECKING:
|
|
|
20
22
|
Messages,
|
|
21
23
|
)
|
|
22
24
|
|
|
25
|
+
from openai import AsyncStream
|
|
26
|
+
|
|
23
27
|
from camel.configs import MISTRAL_API_PARAMS, MistralConfig
|
|
24
28
|
from camel.messages import OpenAIMessage
|
|
25
29
|
from camel.models import BaseModelBackend
|
|
26
|
-
from camel.
|
|
30
|
+
from camel.models._utils import try_modify_message_with_format
|
|
31
|
+
from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
|
|
27
32
|
from camel.utils import (
|
|
28
33
|
BaseTokenCounter,
|
|
29
34
|
OpenAITokenCounter,
|
|
@@ -212,25 +217,42 @@ class MistralModel(BaseModelBackend):
|
|
|
212
217
|
)
|
|
213
218
|
return self._token_counter
|
|
214
219
|
|
|
215
|
-
def
|
|
220
|
+
async def _arun(
|
|
221
|
+
self,
|
|
222
|
+
messages: List[OpenAIMessage],
|
|
223
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
224
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
225
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
226
|
+
raise NotImplementedError("Mistral does not support async inference.")
|
|
227
|
+
|
|
228
|
+
def _run(
|
|
216
229
|
self,
|
|
217
230
|
messages: List[OpenAIMessage],
|
|
231
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
232
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
218
233
|
) -> ChatCompletion:
|
|
219
234
|
r"""Runs inference of Mistral chat completion.
|
|
220
235
|
|
|
221
236
|
Args:
|
|
222
237
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
223
238
|
in OpenAI API format.
|
|
239
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
240
|
+
response for this query.
|
|
241
|
+
tools (Optional[List[Dict[str, Any]]]): The tools to use for this
|
|
242
|
+
query.
|
|
224
243
|
|
|
225
244
|
Returns:
|
|
226
|
-
ChatCompletion.
|
|
245
|
+
ChatCompletion: The response from the model.
|
|
227
246
|
"""
|
|
247
|
+
request_config = self._prepare_request(
|
|
248
|
+
messages, response_format, tools
|
|
249
|
+
)
|
|
228
250
|
mistral_messages = self._to_mistral_chatmessage(messages)
|
|
229
251
|
|
|
230
252
|
response = self._client.chat.complete(
|
|
231
253
|
messages=mistral_messages,
|
|
232
254
|
model=self.model_type,
|
|
233
|
-
**
|
|
255
|
+
**request_config,
|
|
234
256
|
)
|
|
235
257
|
|
|
236
258
|
openai_response = self._to_openai_response(response) # type: ignore[arg-type]
|
|
@@ -251,6 +273,21 @@ class MistralModel(BaseModelBackend):
|
|
|
251
273
|
|
|
252
274
|
return openai_response
|
|
253
275
|
|
|
276
|
+
def _prepare_request(
|
|
277
|
+
self,
|
|
278
|
+
messages: List[OpenAIMessage],
|
|
279
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
280
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
281
|
+
) -> Dict[str, Any]:
|
|
282
|
+
request_config = self.model_config_dict.copy()
|
|
283
|
+
if tools:
|
|
284
|
+
request_config["tools"] = tools
|
|
285
|
+
elif response_format:
|
|
286
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
287
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
288
|
+
|
|
289
|
+
return request_config
|
|
290
|
+
|
|
254
291
|
def check_model_config(self):
|
|
255
292
|
r"""Check whether the model configuration contains any
|
|
256
293
|
unexpected arguments to Mistral API.
|
camel/models/model_manager.py
CHANGED
|
@@ -20,10 +20,13 @@ from typing import (
|
|
|
20
20
|
Callable,
|
|
21
21
|
Dict,
|
|
22
22
|
List,
|
|
23
|
+
Optional,
|
|
24
|
+
Type,
|
|
23
25
|
Union,
|
|
24
26
|
)
|
|
25
27
|
|
|
26
|
-
from openai import Stream
|
|
28
|
+
from openai import AsyncStream, Stream
|
|
29
|
+
from pydantic import BaseModel
|
|
27
30
|
|
|
28
31
|
from camel.messages import OpenAIMessage
|
|
29
32
|
from camel.models.base_model import BaseModelBackend
|
|
@@ -178,7 +181,10 @@ class ModelManager:
|
|
|
178
181
|
return choice(self.models)
|
|
179
182
|
|
|
180
183
|
def run(
|
|
181
|
-
self,
|
|
184
|
+
self,
|
|
185
|
+
messages: List[OpenAIMessage],
|
|
186
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
187
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
182
188
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
183
189
|
r"""Process a list of messages by selecting a model based on
|
|
184
190
|
the scheduling strategy.
|
|
@@ -198,7 +204,46 @@ class ModelManager:
|
|
|
198
204
|
|
|
199
205
|
# Pass all messages to the selected model and get the response
|
|
200
206
|
try:
|
|
201
|
-
response = self.current_model.run(messages)
|
|
207
|
+
response = self.current_model.run(messages, response_format, tools)
|
|
208
|
+
except Exception as exc:
|
|
209
|
+
logger.error(f"Error processing with model: {self.current_model}")
|
|
210
|
+
if self.scheduling_strategy == self.always_first:
|
|
211
|
+
self.scheduling_strategy = self.round_robin
|
|
212
|
+
logger.warning(
|
|
213
|
+
"The scheduling strategy has been changed to 'round_robin'"
|
|
214
|
+
)
|
|
215
|
+
# Skip already used one
|
|
216
|
+
self.current_model = self.scheduling_strategy()
|
|
217
|
+
raise exc
|
|
218
|
+
return response
|
|
219
|
+
|
|
220
|
+
async def arun(
|
|
221
|
+
self,
|
|
222
|
+
messages: List[OpenAIMessage],
|
|
223
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
224
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
225
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
226
|
+
r"""Process a list of messages by selecting a model based on
|
|
227
|
+
the scheduling strategy.
|
|
228
|
+
Sends the entire list of messages to the selected model,
|
|
229
|
+
and returns a single response.
|
|
230
|
+
|
|
231
|
+
Args:
|
|
232
|
+
messages (List[OpenAIMessage]): Message list with the chat
|
|
233
|
+
history in OpenAI API format.
|
|
234
|
+
|
|
235
|
+
Returns:
|
|
236
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
237
|
+
`ChatCompletion` in the non-stream mode, or
|
|
238
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
239
|
+
"""
|
|
240
|
+
self.current_model = self.scheduling_strategy()
|
|
241
|
+
|
|
242
|
+
# Pass all messages to the selected model and get the response
|
|
243
|
+
try:
|
|
244
|
+
response = await self.current_model.arun(
|
|
245
|
+
messages, response_format, tools
|
|
246
|
+
)
|
|
202
247
|
except Exception as exc:
|
|
203
248
|
logger.error(f"Error processing with model: {self.current_model}")
|
|
204
249
|
if self.scheduling_strategy == self.always_first:
|
camel/models/moonshot_model.py
CHANGED
|
@@ -13,13 +13,15 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
|
-
from typing import Any, Dict, List, Optional, Union
|
|
16
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
17
|
|
|
18
|
-
from openai import OpenAI, Stream
|
|
18
|
+
from openai import AsyncStream, OpenAI, Stream
|
|
19
|
+
from pydantic import BaseModel
|
|
19
20
|
|
|
20
21
|
from camel.configs import MOONSHOT_API_PARAMS, MoonshotConfig
|
|
21
22
|
from camel.messages import OpenAIMessage
|
|
22
23
|
from camel.models import BaseModelBackend
|
|
24
|
+
from camel.models._utils import try_modify_message_with_format
|
|
23
25
|
from camel.types import (
|
|
24
26
|
ChatCompletion,
|
|
25
27
|
ChatCompletionChunk,
|
|
@@ -78,9 +80,24 @@ class MoonshotModel(BaseModelBackend):
|
|
|
78
80
|
base_url=self._url,
|
|
79
81
|
)
|
|
80
82
|
|
|
81
|
-
def
|
|
83
|
+
def _prepare_request(
|
|
82
84
|
self,
|
|
83
85
|
messages: List[OpenAIMessage],
|
|
86
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
87
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
88
|
+
) -> Dict[str, Any]:
|
|
89
|
+
request_config = self.model_config_dict.copy()
|
|
90
|
+
if tools:
|
|
91
|
+
request_config["tools"] = tools
|
|
92
|
+
elif response_format:
|
|
93
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
94
|
+
return request_config
|
|
95
|
+
|
|
96
|
+
def _run(
|
|
97
|
+
self,
|
|
98
|
+
messages: List[OpenAIMessage],
|
|
99
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
100
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
84
101
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
85
102
|
r"""Runs inference of Moonshot chat completion.
|
|
86
103
|
|
|
@@ -93,13 +110,25 @@ class MoonshotModel(BaseModelBackend):
|
|
|
93
110
|
`ChatCompletion` in the non-stream mode, or
|
|
94
111
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
95
112
|
"""
|
|
113
|
+
request_config = self._prepare_request(
|
|
114
|
+
messages, response_format, tools
|
|
115
|
+
)
|
|
116
|
+
|
|
96
117
|
response = self._client.chat.completions.create(
|
|
97
118
|
messages=messages,
|
|
98
119
|
model=self.model_type,
|
|
99
|
-
**
|
|
120
|
+
**request_config,
|
|
100
121
|
)
|
|
101
122
|
return response
|
|
102
123
|
|
|
124
|
+
async def _arun(
|
|
125
|
+
self,
|
|
126
|
+
messages: List[OpenAIMessage],
|
|
127
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
128
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
129
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
130
|
+
raise NotImplementedError("Moonshot does not support async inference.")
|
|
131
|
+
|
|
103
132
|
@property
|
|
104
133
|
def token_counter(self) -> BaseTokenCounter:
|
|
105
134
|
r"""Initialize the token counter for the model backend.
|
camel/models/nemotron_model.py
CHANGED
|
@@ -12,9 +12,10 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
|
-
from openai import OpenAI
|
|
17
|
+
from openai import AsyncOpenAI, OpenAI
|
|
18
|
+
from pydantic import BaseModel
|
|
18
19
|
|
|
19
20
|
from camel.messages import OpenAIMessage
|
|
20
21
|
from camel.models import BaseModelBackend
|
|
@@ -62,10 +63,38 @@ class NemotronModel(BaseModelBackend):
|
|
|
62
63
|
base_url=self._url,
|
|
63
64
|
api_key=self._api_key,
|
|
64
65
|
)
|
|
66
|
+
self._async_client = AsyncOpenAI(
|
|
67
|
+
timeout=180,
|
|
68
|
+
max_retries=3,
|
|
69
|
+
base_url=self._url,
|
|
70
|
+
api_key=self._api_key,
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
async def _arun(
|
|
74
|
+
self,
|
|
75
|
+
messages: List[OpenAIMessage],
|
|
76
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
77
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
78
|
+
) -> ChatCompletion:
|
|
79
|
+
r"""Runs inference of OpenAI chat completion asynchronously.
|
|
80
|
+
|
|
81
|
+
Args:
|
|
82
|
+
messages (List[OpenAIMessage]): Message list.
|
|
83
|
+
|
|
84
|
+
Returns:
|
|
85
|
+
ChatCompletion.
|
|
86
|
+
"""
|
|
87
|
+
response = await self._async_client.chat.completions.create(
|
|
88
|
+
messages=messages,
|
|
89
|
+
model=self.model_type,
|
|
90
|
+
)
|
|
91
|
+
return response
|
|
65
92
|
|
|
66
|
-
def
|
|
93
|
+
def _run(
|
|
67
94
|
self,
|
|
68
95
|
messages: List[OpenAIMessage],
|
|
96
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
97
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
69
98
|
) -> ChatCompletion:
|
|
70
99
|
r"""Runs inference of OpenAI chat completion.
|
|
71
100
|
|