camel-ai 0.2.22__py3-none-any.whl → 0.2.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/_types.py +41 -0
- camel/agents/_utils.py +188 -0
- camel/agents/chat_agent.py +570 -965
- camel/agents/knowledge_graph_agent.py +7 -1
- camel/agents/multi_hop_generator_agent.py +1 -1
- camel/configs/base_config.py +10 -13
- camel/configs/deepseek_config.py +4 -30
- camel/configs/gemini_config.py +5 -31
- camel/configs/openai_config.py +14 -32
- camel/configs/qwen_config.py +36 -36
- camel/datagen/self_improving_cot.py +81 -3
- camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
- camel/datagen/self_instruct/self_instruct.py +52 -3
- camel/datasets/__init__.py +28 -0
- camel/datasets/base.py +969 -0
- camel/environments/__init__.py +16 -0
- camel/environments/base.py +503 -0
- camel/extractors/__init__.py +16 -0
- camel/extractors/base.py +263 -0
- camel/memories/agent_memories.py +16 -1
- camel/memories/blocks/chat_history_block.py +10 -2
- camel/memories/blocks/vectordb_block.py +1 -0
- camel/memories/context_creators/score_based.py +20 -3
- camel/memories/records.py +10 -0
- camel/messages/base.py +8 -8
- camel/models/__init__.py +2 -0
- camel/models/_utils.py +57 -0
- camel/models/aiml_model.py +48 -17
- camel/models/anthropic_model.py +41 -3
- camel/models/azure_openai_model.py +39 -3
- camel/models/base_audio_model.py +92 -0
- camel/models/base_model.py +88 -13
- camel/models/cohere_model.py +88 -11
- camel/models/deepseek_model.py +107 -45
- camel/models/fish_audio_model.py +18 -8
- camel/models/gemini_model.py +133 -15
- camel/models/groq_model.py +72 -10
- camel/models/internlm_model.py +14 -3
- camel/models/litellm_model.py +9 -2
- camel/models/mistral_model.py +42 -5
- camel/models/model_manager.py +57 -3
- camel/models/moonshot_model.py +33 -4
- camel/models/nemotron_model.py +32 -3
- camel/models/nvidia_model.py +43 -3
- camel/models/ollama_model.py +139 -17
- camel/models/openai_audio_models.py +87 -2
- camel/models/openai_compatible_model.py +37 -3
- camel/models/openai_model.py +158 -46
- camel/models/qwen_model.py +61 -4
- camel/models/reka_model.py +53 -3
- camel/models/samba_model.py +209 -4
- camel/models/sglang_model.py +153 -14
- camel/models/siliconflow_model.py +16 -3
- camel/models/stub_model.py +46 -4
- camel/models/togetherai_model.py +38 -3
- camel/models/vllm_model.py +37 -3
- camel/models/yi_model.py +36 -3
- camel/models/zhipuai_model.py +38 -3
- camel/retrievers/__init__.py +3 -0
- camel/retrievers/hybrid_retrival.py +237 -0
- camel/toolkits/__init__.py +15 -1
- camel/toolkits/arxiv_toolkit.py +2 -1
- camel/toolkits/ask_news_toolkit.py +4 -2
- camel/toolkits/audio_analysis_toolkit.py +238 -0
- camel/toolkits/base.py +22 -3
- camel/toolkits/code_execution.py +2 -0
- camel/toolkits/dappier_toolkit.py +2 -1
- camel/toolkits/data_commons_toolkit.py +38 -12
- camel/toolkits/excel_toolkit.py +172 -0
- camel/toolkits/function_tool.py +13 -0
- camel/toolkits/github_toolkit.py +5 -1
- camel/toolkits/google_maps_toolkit.py +2 -1
- camel/toolkits/google_scholar_toolkit.py +2 -0
- camel/toolkits/human_toolkit.py +0 -3
- camel/toolkits/image_analysis_toolkit.py +202 -0
- camel/toolkits/linkedin_toolkit.py +3 -2
- camel/toolkits/meshy_toolkit.py +3 -2
- camel/toolkits/mineru_toolkit.py +2 -2
- camel/toolkits/networkx_toolkit.py +240 -0
- camel/toolkits/notion_toolkit.py +2 -0
- camel/toolkits/openbb_toolkit.py +3 -2
- camel/toolkits/page_script.js +376 -0
- camel/toolkits/reddit_toolkit.py +11 -3
- camel/toolkits/retrieval_toolkit.py +6 -1
- camel/toolkits/semantic_scholar_toolkit.py +2 -1
- camel/toolkits/stripe_toolkit.py +8 -2
- camel/toolkits/sympy_toolkit.py +6 -1
- camel/toolkits/video_analysis_toolkit.py +407 -0
- camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
- camel/toolkits/web_toolkit.py +1307 -0
- camel/toolkits/whatsapp_toolkit.py +3 -2
- camel/toolkits/zapier_toolkit.py +191 -0
- camel/types/__init__.py +2 -2
- camel/types/agents/__init__.py +16 -0
- camel/types/agents/tool_calling_record.py +52 -0
- camel/types/enums.py +3 -0
- camel/types/openai_types.py +16 -14
- camel/utils/__init__.py +2 -1
- camel/utils/async_func.py +2 -2
- camel/utils/commons.py +114 -1
- camel/verifiers/__init__.py +23 -0
- camel/verifiers/base.py +340 -0
- camel/verifiers/models.py +82 -0
- camel/verifiers/python_verifier.py +202 -0
- camel_ai-0.2.23.dist-info/METADATA +671 -0
- {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/RECORD +122 -97
- {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
- camel_ai-0.2.22.dist-info/METADATA +0 -527
- {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
camel/models/deepseek_model.py
CHANGED
|
@@ -13,13 +13,15 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
|
-
from typing import Any, Dict, List, Optional, Union
|
|
16
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
17
|
|
|
18
|
-
from openai import OpenAI, Stream
|
|
18
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
19
|
+
from pydantic import BaseModel
|
|
19
20
|
|
|
20
21
|
from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
|
|
21
22
|
from camel.logger import get_logger
|
|
22
23
|
from camel.messages import OpenAIMessage
|
|
24
|
+
from camel.models._utils import try_modify_message_with_format
|
|
23
25
|
from camel.models.base_model import BaseModelBackend
|
|
24
26
|
from camel.types import (
|
|
25
27
|
ChatCompletion,
|
|
@@ -30,6 +32,16 @@ from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
|
|
|
30
32
|
|
|
31
33
|
logger = get_logger(__name__)
|
|
32
34
|
|
|
35
|
+
REASONSER_UNSUPPORTED_PARAMS = [
|
|
36
|
+
"temperature",
|
|
37
|
+
"top_p",
|
|
38
|
+
"presence_penalty",
|
|
39
|
+
"frequency_penalty",
|
|
40
|
+
"logprobs",
|
|
41
|
+
"top_logprobs",
|
|
42
|
+
"tools",
|
|
43
|
+
]
|
|
44
|
+
|
|
33
45
|
|
|
34
46
|
class DeepSeekModel(BaseModelBackend):
|
|
35
47
|
r"""DeepSeek API in a unified BaseModelBackend interface.
|
|
@@ -84,6 +96,13 @@ class DeepSeekModel(BaseModelBackend):
|
|
|
84
96
|
base_url=self._url,
|
|
85
97
|
)
|
|
86
98
|
|
|
99
|
+
self._async_client = AsyncOpenAI(
|
|
100
|
+
timeout=180,
|
|
101
|
+
max_retries=3,
|
|
102
|
+
api_key=self._api_key,
|
|
103
|
+
base_url=self._url,
|
|
104
|
+
)
|
|
105
|
+
|
|
87
106
|
@property
|
|
88
107
|
def token_counter(self) -> BaseTokenCounter:
|
|
89
108
|
r"""Initialize the token counter for the model backend.
|
|
@@ -98,64 +117,51 @@ class DeepSeekModel(BaseModelBackend):
|
|
|
98
117
|
)
|
|
99
118
|
return self._token_counter
|
|
100
119
|
|
|
101
|
-
def
|
|
120
|
+
def _prepare_request(
|
|
102
121
|
self,
|
|
103
122
|
messages: List[OpenAIMessage],
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
messages (List[OpenAIMessage]): Message list with the chat history
|
|
109
|
-
in OpenAI API format.
|
|
123
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
124
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
125
|
+
) -> Dict[str, Any]:
|
|
126
|
+
request_config = self.model_config_dict.copy()
|
|
110
127
|
|
|
111
|
-
Returns:
|
|
112
|
-
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
113
|
-
`ChatCompletion` in the non-stream mode, or
|
|
114
|
-
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
115
|
-
"""
|
|
116
|
-
# deepseek reasoner has limitations
|
|
117
|
-
# reference: https://api-docs.deepseek.com/guides/reasoning_model#api-parameters
|
|
118
128
|
if self.model_type in [
|
|
119
129
|
ModelType.DEEPSEEK_REASONER,
|
|
120
130
|
]:
|
|
121
131
|
logger.warning(
|
|
122
|
-
"You are using
|
|
132
|
+
"Warning: You are using an DeepSeek Reasoner model, "
|
|
123
133
|
"which has certain limitations, reference: "
|
|
124
|
-
"`https://api-docs.deepseek.com/guides/reasoning_model
|
|
134
|
+
"`https://api-docs.deepseek.com/guides/reasoning_model"
|
|
135
|
+
"#api-parameters`.",
|
|
125
136
|
)
|
|
137
|
+
request_config = {
|
|
138
|
+
key: value
|
|
139
|
+
for key, value in request_config.items()
|
|
140
|
+
if key not in REASONSER_UNSUPPORTED_PARAMS
|
|
141
|
+
}
|
|
126
142
|
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
"
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
"top_logprobs",
|
|
136
|
-
"tools",
|
|
137
|
-
]
|
|
138
|
-
for key in unsupported_keys:
|
|
139
|
-
if key in self.model_config_dict:
|
|
140
|
-
del self.model_config_dict[key]
|
|
143
|
+
if tools:
|
|
144
|
+
for tool in tools:
|
|
145
|
+
function_dict = tool.get('function', {})
|
|
146
|
+
function_dict.pop("strict", None)
|
|
147
|
+
request_config["tools"] = tools
|
|
148
|
+
elif response_format:
|
|
149
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
150
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
141
151
|
|
|
142
|
-
|
|
143
|
-
messages=messages,
|
|
144
|
-
model=self.model_type,
|
|
145
|
-
**self.model_config_dict,
|
|
146
|
-
)
|
|
152
|
+
return request_config
|
|
147
153
|
|
|
148
|
-
|
|
154
|
+
def _post_handle_response(
|
|
155
|
+
self, response: ChatCompletion
|
|
156
|
+
) -> ChatCompletion:
|
|
157
|
+
r"""Handle reasoning content with <think> tags at the beginning."""
|
|
149
158
|
if (
|
|
150
|
-
self.model_type
|
|
151
|
-
in [
|
|
152
|
-
ModelType.DEEPSEEK_REASONER,
|
|
153
|
-
]
|
|
159
|
+
self.model_type in [ModelType.DEEPSEEK_REASONER]
|
|
154
160
|
and os.environ.get("GET_REASONING_CONTENT", "false").lower()
|
|
155
161
|
== "true"
|
|
156
162
|
):
|
|
157
|
-
reasoning_content = response.choices[0].message.reasoning_content
|
|
158
|
-
combined_content = (
|
|
163
|
+
reasoning_content = response.choices[0].message.reasoning_content # type: ignore[attr-defined]
|
|
164
|
+
combined_content = ( # type: ignore[operator]
|
|
159
165
|
f"<think>\n{reasoning_content}\n</think>\n"
|
|
160
166
|
if reasoning_content
|
|
161
167
|
else ""
|
|
@@ -181,9 +187,65 @@ class DeepSeekModel(BaseModelBackend):
|
|
|
181
187
|
object="chat.completion",
|
|
182
188
|
usage=response.usage,
|
|
183
189
|
)
|
|
184
|
-
|
|
185
190
|
return response
|
|
186
191
|
|
|
192
|
+
def _run(
|
|
193
|
+
self,
|
|
194
|
+
messages: List[OpenAIMessage],
|
|
195
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
196
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
197
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
198
|
+
r"""Runs inference of DeepSeek chat completion.
|
|
199
|
+
|
|
200
|
+
Args:
|
|
201
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
202
|
+
in OpenAI API format.
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
206
|
+
`ChatCompletion` in the non-stream mode, or
|
|
207
|
+
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
208
|
+
"""
|
|
209
|
+
request_config = self._prepare_request(
|
|
210
|
+
messages, response_format, tools
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
response = self._client.chat.completions.create(
|
|
214
|
+
messages=messages,
|
|
215
|
+
model=self.model_type,
|
|
216
|
+
**request_config,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
return self._post_handle_response(response)
|
|
220
|
+
|
|
221
|
+
async def _arun(
|
|
222
|
+
self,
|
|
223
|
+
messages: List[OpenAIMessage],
|
|
224
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
225
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
226
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
227
|
+
r"""Runs inference of DeepSeek chat completion.
|
|
228
|
+
|
|
229
|
+
Args:
|
|
230
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
231
|
+
in OpenAI API format.
|
|
232
|
+
|
|
233
|
+
Returns:
|
|
234
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
235
|
+
`ChatCompletion` in the non-stream mode, or
|
|
236
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
237
|
+
"""
|
|
238
|
+
request_config = self._prepare_request(
|
|
239
|
+
messages, response_format, tools
|
|
240
|
+
)
|
|
241
|
+
response = await self._async_client.chat.completions.create(
|
|
242
|
+
messages=messages,
|
|
243
|
+
model=self.model_type,
|
|
244
|
+
**request_config,
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
return self._post_handle_response(response)
|
|
248
|
+
|
|
187
249
|
def check_model_config(self):
|
|
188
250
|
r"""Check whether the model configuration contains any
|
|
189
251
|
unexpected arguments to DeepSeek API.
|
camel/models/fish_audio_model.py
CHANGED
|
@@ -15,8 +15,10 @@
|
|
|
15
15
|
import os
|
|
16
16
|
from typing import Any, Optional
|
|
17
17
|
|
|
18
|
+
from camel.models.base_audio_model import BaseAudioModel
|
|
18
19
|
|
|
19
|
-
|
|
20
|
+
|
|
21
|
+
class FishAudioModel(BaseAudioModel):
|
|
20
22
|
r"""Provides access to FishAudio's Text-to-Speech (TTS) and Speech_to_Text
|
|
21
23
|
(STT) models.
|
|
22
24
|
"""
|
|
@@ -37,6 +39,7 @@ class FishAudioModel:
|
|
|
37
39
|
"""
|
|
38
40
|
from fish_audio_sdk import Session
|
|
39
41
|
|
|
42
|
+
super().__init__(api_key, url)
|
|
40
43
|
self._api_key = api_key or os.environ.get("FISHAUDIO_API_KEY")
|
|
41
44
|
self._url = url or os.environ.get(
|
|
42
45
|
"FISHAUDIO_API_BASE_URL", "https://api.fish.audio"
|
|
@@ -46,7 +49,8 @@ class FishAudioModel:
|
|
|
46
49
|
def text_to_speech(
|
|
47
50
|
self,
|
|
48
51
|
input: str,
|
|
49
|
-
|
|
52
|
+
*,
|
|
53
|
+
storage_path: Optional[str] = None,
|
|
50
54
|
reference_id: Optional[str] = None,
|
|
51
55
|
reference_audio: Optional[str] = None,
|
|
52
56
|
reference_audio_text: Optional[str] = None,
|
|
@@ -55,9 +59,9 @@ class FishAudioModel:
|
|
|
55
59
|
r"""Convert text to speech and save the output to a file.
|
|
56
60
|
|
|
57
61
|
Args:
|
|
58
|
-
|
|
59
|
-
storage_path (str): The file path where the resulting
|
|
60
|
-
be saved.
|
|
62
|
+
input (str): The text to convert to speech.
|
|
63
|
+
storage_path (Optional[str]): The file path where the resulting
|
|
64
|
+
speech will be saved. (default: :obj:`None`)
|
|
61
65
|
reference_id (Optional[str]): An optional reference ID to
|
|
62
66
|
associate with the request. (default: :obj:`None`)
|
|
63
67
|
reference_audio (Optional[str]): Path to an audio file for
|
|
@@ -68,12 +72,18 @@ class FishAudioModel:
|
|
|
68
72
|
|
|
69
73
|
Raises:
|
|
70
74
|
FileNotFoundError: If the reference audio file cannot be found.
|
|
75
|
+
ValueError: If storage_path is not provided or if reference_audio
|
|
76
|
+
is provided without reference_audio_text.
|
|
71
77
|
"""
|
|
72
78
|
from fish_audio_sdk import ReferenceAudio, TTSRequest
|
|
73
79
|
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
80
|
+
if storage_path is None:
|
|
81
|
+
raise ValueError(
|
|
82
|
+
"storage_path must be provided for "
|
|
83
|
+
"FishAudioModel.text_to_speech"
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
self._ensure_directory_exists(storage_path)
|
|
77
87
|
|
|
78
88
|
if not reference_audio:
|
|
79
89
|
with open(f"{storage_path}", "wb") as f:
|
camel/models/gemini_model.py
CHANGED
|
@@ -12,9 +12,10 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
|
-
from openai import OpenAI, Stream
|
|
17
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
18
|
+
from pydantic import BaseModel
|
|
18
19
|
|
|
19
20
|
from camel.configs import Gemini_API_PARAMS, GeminiConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
@@ -81,37 +82,154 @@ class GeminiModel(BaseModelBackend):
|
|
|
81
82
|
api_key=self._api_key,
|
|
82
83
|
base_url=self._url,
|
|
83
84
|
)
|
|
85
|
+
self._async_client = AsyncOpenAI(
|
|
86
|
+
timeout=180,
|
|
87
|
+
max_retries=3,
|
|
88
|
+
api_key=self._api_key,
|
|
89
|
+
base_url=self._url,
|
|
90
|
+
)
|
|
84
91
|
|
|
85
|
-
def
|
|
92
|
+
def _process_messages(self, messages) -> List[OpenAIMessage]:
|
|
93
|
+
r"""Process the messages for Gemini API to ensure no empty content,
|
|
94
|
+
which is not accepted by Gemini.
|
|
95
|
+
"""
|
|
96
|
+
processed_messages = []
|
|
97
|
+
for msg in messages:
|
|
98
|
+
msg_copy = msg.copy()
|
|
99
|
+
if 'content' in msg_copy and msg_copy['content'] == '':
|
|
100
|
+
msg_copy['content'] = 'null'
|
|
101
|
+
processed_messages.append(msg_copy)
|
|
102
|
+
return processed_messages
|
|
103
|
+
|
|
104
|
+
def _run(
|
|
86
105
|
self,
|
|
87
106
|
messages: List[OpenAIMessage],
|
|
107
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
108
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
88
109
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
89
110
|
r"""Runs inference of Gemini chat completion.
|
|
90
111
|
|
|
91
112
|
Args:
|
|
92
113
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
93
114
|
in OpenAI API format.
|
|
115
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
116
|
+
response.
|
|
117
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
118
|
+
use for the request.
|
|
94
119
|
|
|
95
120
|
Returns:
|
|
96
121
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
97
122
|
`ChatCompletion` in the non-stream mode, or
|
|
98
123
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
99
124
|
"""
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
125
|
+
response_format = response_format or self.model_config_dict.get(
|
|
126
|
+
"response_format", None
|
|
127
|
+
)
|
|
128
|
+
messages = self._process_messages(messages)
|
|
129
|
+
if response_format:
|
|
130
|
+
return self._request_parse(messages, response_format)
|
|
131
|
+
else:
|
|
132
|
+
return self._request_chat_completion(messages, tools)
|
|
133
|
+
|
|
134
|
+
async def _arun(
|
|
135
|
+
self,
|
|
136
|
+
messages: List[OpenAIMessage],
|
|
137
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
138
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
139
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
140
|
+
r"""Runs inference of OpenAI chat completion in async mode.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
144
|
+
in OpenAI API format.
|
|
145
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
146
|
+
response.
|
|
147
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
148
|
+
use for the request.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
152
|
+
`ChatCompletion` in the non-stream mode, or
|
|
153
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
154
|
+
"""
|
|
155
|
+
response_format = response_format or self.model_config_dict.get(
|
|
156
|
+
"response_format", None
|
|
157
|
+
)
|
|
158
|
+
messages = self._process_messages(messages)
|
|
159
|
+
if response_format:
|
|
160
|
+
return await self._arequest_parse(messages, response_format)
|
|
161
|
+
else:
|
|
162
|
+
return await self._arequest_chat_completion(messages, tools)
|
|
163
|
+
|
|
164
|
+
def _request_chat_completion(
|
|
165
|
+
self,
|
|
166
|
+
messages: List[OpenAIMessage],
|
|
167
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
168
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
169
|
+
request_config = self.model_config_dict.copy()
|
|
170
|
+
|
|
171
|
+
if tools:
|
|
172
|
+
for tool in tools:
|
|
173
|
+
function_dict = tool.get('function', {})
|
|
174
|
+
function_dict.pop("strict", None)
|
|
175
|
+
request_config["tools"] = tools
|
|
176
|
+
|
|
177
|
+
return self._client.chat.completions.create(
|
|
178
|
+
messages=messages,
|
|
179
|
+
model=self.model_type,
|
|
180
|
+
**request_config,
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
async def _arequest_chat_completion(
|
|
184
|
+
self,
|
|
185
|
+
messages: List[OpenAIMessage],
|
|
186
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
187
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
188
|
+
request_config = self.model_config_dict.copy()
|
|
189
|
+
|
|
190
|
+
if tools:
|
|
191
|
+
for tool in tools:
|
|
192
|
+
function_dict = tool.get('function', {})
|
|
193
|
+
function_dict.pop("strict", None)
|
|
194
|
+
request_config["tools"] = tools
|
|
195
|
+
|
|
196
|
+
return await self._async_client.chat.completions.create(
|
|
197
|
+
messages=messages,
|
|
198
|
+
model=self.model_type,
|
|
199
|
+
**request_config,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
def _request_parse(
|
|
203
|
+
self,
|
|
204
|
+
messages: List[OpenAIMessage],
|
|
205
|
+
response_format: Type[BaseModel],
|
|
206
|
+
) -> ChatCompletion:
|
|
207
|
+
request_config = self.model_config_dict.copy()
|
|
208
|
+
|
|
209
|
+
request_config["response_format"] = response_format
|
|
210
|
+
request_config.pop("stream", None)
|
|
211
|
+
|
|
212
|
+
return self._client.beta.chat.completions.parse(
|
|
213
|
+
messages=messages,
|
|
214
|
+
model=self.model_type,
|
|
215
|
+
**request_config,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
async def _arequest_parse(
|
|
219
|
+
self,
|
|
220
|
+
messages: List[OpenAIMessage],
|
|
221
|
+
response_format: Type[BaseModel],
|
|
222
|
+
) -> ChatCompletion:
|
|
223
|
+
request_config = self.model_config_dict.copy()
|
|
224
|
+
|
|
225
|
+
request_config["response_format"] = response_format
|
|
226
|
+
request_config.pop("stream", None)
|
|
108
227
|
|
|
109
|
-
|
|
110
|
-
messages=
|
|
228
|
+
return await self._async_client.beta.chat.completions.parse(
|
|
229
|
+
messages=messages,
|
|
111
230
|
model=self.model_type,
|
|
112
|
-
**
|
|
231
|
+
**request_config,
|
|
113
232
|
)
|
|
114
|
-
return response
|
|
115
233
|
|
|
116
234
|
@property
|
|
117
235
|
def token_counter(self) -> BaseTokenCounter:
|
camel/models/groq_model.py
CHANGED
|
@@ -12,13 +12,15 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
|
-
from openai import OpenAI, Stream
|
|
17
|
+
from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
|
|
18
|
+
from pydantic import BaseModel
|
|
18
19
|
|
|
19
20
|
from camel.configs import GROQ_API_PARAMS, GroqConfig
|
|
20
21
|
from camel.messages import OpenAIMessage
|
|
21
22
|
from camel.models import BaseModelBackend
|
|
23
|
+
from camel.models._utils import try_modify_message_with_format
|
|
22
24
|
from camel.types import (
|
|
23
25
|
ChatCompletion,
|
|
24
26
|
ChatCompletionChunk,
|
|
@@ -51,11 +53,7 @@ class GroqModel(BaseModelBackend):
|
|
|
51
53
|
(default: :obj:`None`)
|
|
52
54
|
"""
|
|
53
55
|
|
|
54
|
-
@api_keys_required(
|
|
55
|
-
[
|
|
56
|
-
("api_key", "GROQ_API_KEY"),
|
|
57
|
-
]
|
|
58
|
-
)
|
|
56
|
+
@api_keys_required([("api_key", "GROQ_API_KEY")])
|
|
59
57
|
def __init__(
|
|
60
58
|
self,
|
|
61
59
|
model_type: Union[ModelType, str],
|
|
@@ -79,6 +77,12 @@ class GroqModel(BaseModelBackend):
|
|
|
79
77
|
api_key=self._api_key,
|
|
80
78
|
base_url=self._url,
|
|
81
79
|
)
|
|
80
|
+
self._async_client = AsyncOpenAI(
|
|
81
|
+
timeout=180,
|
|
82
|
+
max_retries=3,
|
|
83
|
+
api_key=self._api_key,
|
|
84
|
+
base_url=self._url,
|
|
85
|
+
)
|
|
82
86
|
|
|
83
87
|
@property
|
|
84
88
|
def token_counter(self) -> BaseTokenCounter:
|
|
@@ -92,25 +96,83 @@ class GroqModel(BaseModelBackend):
|
|
|
92
96
|
self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
|
|
93
97
|
return self._token_counter
|
|
94
98
|
|
|
95
|
-
def
|
|
99
|
+
def _prepare_request(
|
|
96
100
|
self,
|
|
97
101
|
messages: List[OpenAIMessage],
|
|
102
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
103
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
104
|
+
) -> Dict[str, Any]:
|
|
105
|
+
request_config = self.model_config_dict.copy()
|
|
106
|
+
if tools:
|
|
107
|
+
request_config["tools"] = tools
|
|
108
|
+
elif response_format:
|
|
109
|
+
try_modify_message_with_format(messages[-1], response_format)
|
|
110
|
+
request_config["response_format"] = {"type": "json_object"}
|
|
111
|
+
|
|
112
|
+
return request_config
|
|
113
|
+
|
|
114
|
+
def _run(
|
|
115
|
+
self,
|
|
116
|
+
messages: List[OpenAIMessage],
|
|
117
|
+
response_format: Optional[type[BaseModel]] = None,
|
|
118
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
98
119
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
99
|
-
r"""Runs inference of
|
|
120
|
+
r"""Runs inference of Groq chat completion.
|
|
100
121
|
|
|
101
122
|
Args:
|
|
102
123
|
messages (List[OpenAIMessage]): Message list with the chat history
|
|
103
124
|
in OpenAI API format.
|
|
125
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
126
|
+
response.
|
|
127
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
128
|
+
use for the request.
|
|
104
129
|
|
|
105
130
|
Returns:
|
|
106
131
|
Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
107
132
|
`ChatCompletion` in the non-stream mode, or
|
|
108
133
|
`Stream[ChatCompletionChunk]` in the stream mode.
|
|
109
134
|
"""
|
|
135
|
+
request_config = self._prepare_request(
|
|
136
|
+
messages, response_format, tools
|
|
137
|
+
)
|
|
138
|
+
|
|
110
139
|
response = self._client.chat.completions.create(
|
|
111
140
|
messages=messages,
|
|
112
141
|
model=self.model_type,
|
|
113
|
-
**
|
|
142
|
+
**request_config,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
return response
|
|
146
|
+
|
|
147
|
+
async def _arun(
|
|
148
|
+
self,
|
|
149
|
+
messages: List[OpenAIMessage],
|
|
150
|
+
response_format: Optional[type[BaseModel]] = None,
|
|
151
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
152
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
153
|
+
r"""Runs inference of Groq chat completion asynchronously.
|
|
154
|
+
|
|
155
|
+
Args:
|
|
156
|
+
messages (List[OpenAIMessage]): Message list with the chat history
|
|
157
|
+
in OpenAI API format.
|
|
158
|
+
response_format (Optional[Type[BaseModel]]): The format of the
|
|
159
|
+
response.
|
|
160
|
+
tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
|
|
161
|
+
use for the request.
|
|
162
|
+
|
|
163
|
+
Returns:
|
|
164
|
+
Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
165
|
+
`ChatCompletion` in the non-stream mode, or
|
|
166
|
+
`AsyncStream[ChatCompletionChunk]` in the stream mode.
|
|
167
|
+
"""
|
|
168
|
+
request_config = self._prepare_request(
|
|
169
|
+
messages, response_format, tools
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
response = await self._async_client.chat.completions.create(
|
|
173
|
+
messages=messages,
|
|
174
|
+
model=self.model_type,
|
|
175
|
+
**request_config,
|
|
114
176
|
)
|
|
115
177
|
|
|
116
178
|
return response
|
camel/models/internlm_model.py
CHANGED
|
@@ -13,9 +13,10 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
|
-
from typing import Any, Dict, List, Optional, Union
|
|
16
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
17
17
|
|
|
18
|
-
from openai import OpenAI, Stream
|
|
18
|
+
from openai import AsyncStream, OpenAI, Stream
|
|
19
|
+
from pydantic import BaseModel
|
|
19
20
|
|
|
20
21
|
from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
|
|
21
22
|
from camel.messages import OpenAIMessage
|
|
@@ -82,9 +83,11 @@ class InternLMModel(BaseModelBackend):
|
|
|
82
83
|
base_url=self._url,
|
|
83
84
|
)
|
|
84
85
|
|
|
85
|
-
def
|
|
86
|
+
def _run(
|
|
86
87
|
self,
|
|
87
88
|
messages: List[OpenAIMessage],
|
|
89
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
90
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
88
91
|
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
89
92
|
r"""Runs inference of InternLM chat completion.
|
|
90
93
|
|
|
@@ -104,6 +107,14 @@ class InternLMModel(BaseModelBackend):
|
|
|
104
107
|
)
|
|
105
108
|
return response
|
|
106
109
|
|
|
110
|
+
async def _arun(
|
|
111
|
+
self,
|
|
112
|
+
messages: List[OpenAIMessage],
|
|
113
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
114
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
115
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
116
|
+
raise NotImplementedError("InternLM does not support async inference.")
|
|
117
|
+
|
|
107
118
|
@property
|
|
108
119
|
def token_counter(self) -> BaseTokenCounter:
|
|
109
120
|
r"""Initialize the token counter for the model backend.
|
camel/models/litellm_model.py
CHANGED
|
@@ -11,7 +11,9 @@
|
|
|
11
11
|
# See the License for the specific language governing permissions and
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
-
from typing import Any, Dict, List, Optional, Union
|
|
14
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
15
|
+
|
|
16
|
+
from pydantic import BaseModel
|
|
15
17
|
|
|
16
18
|
from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
|
|
17
19
|
from camel.messages import OpenAIMessage
|
|
@@ -106,9 +108,14 @@ class LiteLLMModel(BaseModelBackend):
|
|
|
106
108
|
self._token_counter = LiteLLMTokenCounter(self.model_type)
|
|
107
109
|
return self._token_counter
|
|
108
110
|
|
|
109
|
-
def
|
|
111
|
+
async def _arun(self) -> None: # type: ignore[override]
|
|
112
|
+
raise NotImplementedError
|
|
113
|
+
|
|
114
|
+
def _run(
|
|
110
115
|
self,
|
|
111
116
|
messages: List[OpenAIMessage],
|
|
117
|
+
response_format: Optional[Type[BaseModel]] = None,
|
|
118
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
112
119
|
) -> ChatCompletion:
|
|
113
120
|
r"""Runs inference of LiteLLM chat completion.
|
|
114
121
|
|