camel-ai 0.2.22__py3-none-any.whl → 0.2.23a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (100) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +556 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +79 -1
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +6 -1
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/environments/__init__.py +16 -0
  18. camel/environments/base.py +503 -0
  19. camel/extractors/__init__.py +16 -0
  20. camel/extractors/base.py +263 -0
  21. camel/memories/agent_memories.py +16 -1
  22. camel/memories/blocks/chat_history_block.py +10 -2
  23. camel/memories/blocks/vectordb_block.py +1 -0
  24. camel/memories/context_creators/score_based.py +20 -3
  25. camel/memories/records.py +10 -0
  26. camel/messages/base.py +8 -8
  27. camel/models/_utils.py +57 -0
  28. camel/models/aiml_model.py +48 -17
  29. camel/models/anthropic_model.py +41 -3
  30. camel/models/azure_openai_model.py +39 -3
  31. camel/models/base_model.py +88 -13
  32. camel/models/cohere_model.py +88 -11
  33. camel/models/deepseek_model.py +107 -45
  34. camel/models/gemini_model.py +133 -15
  35. camel/models/groq_model.py +72 -10
  36. camel/models/internlm_model.py +14 -3
  37. camel/models/litellm_model.py +9 -2
  38. camel/models/mistral_model.py +42 -5
  39. camel/models/model_manager.py +48 -3
  40. camel/models/moonshot_model.py +33 -4
  41. camel/models/nemotron_model.py +32 -3
  42. camel/models/nvidia_model.py +43 -3
  43. camel/models/ollama_model.py +139 -17
  44. camel/models/openai_audio_models.py +7 -1
  45. camel/models/openai_compatible_model.py +37 -3
  46. camel/models/openai_model.py +158 -46
  47. camel/models/qwen_model.py +61 -4
  48. camel/models/reka_model.py +53 -3
  49. camel/models/samba_model.py +209 -4
  50. camel/models/sglang_model.py +153 -14
  51. camel/models/siliconflow_model.py +16 -3
  52. camel/models/stub_model.py +46 -4
  53. camel/models/togetherai_model.py +38 -3
  54. camel/models/vllm_model.py +37 -3
  55. camel/models/yi_model.py +36 -3
  56. camel/models/zhipuai_model.py +38 -3
  57. camel/retrievers/__init__.py +3 -0
  58. camel/retrievers/hybrid_retrival.py +237 -0
  59. camel/toolkits/__init__.py +4 -0
  60. camel/toolkits/arxiv_toolkit.py +2 -1
  61. camel/toolkits/ask_news_toolkit.py +4 -2
  62. camel/toolkits/base.py +22 -3
  63. camel/toolkits/code_execution.py +2 -0
  64. camel/toolkits/dappier_toolkit.py +2 -1
  65. camel/toolkits/data_commons_toolkit.py +38 -12
  66. camel/toolkits/function_tool.py +13 -0
  67. camel/toolkits/github_toolkit.py +5 -1
  68. camel/toolkits/google_maps_toolkit.py +2 -1
  69. camel/toolkits/google_scholar_toolkit.py +2 -0
  70. camel/toolkits/human_toolkit.py +0 -3
  71. camel/toolkits/linkedin_toolkit.py +3 -2
  72. camel/toolkits/meshy_toolkit.py +3 -2
  73. camel/toolkits/mineru_toolkit.py +2 -2
  74. camel/toolkits/networkx_toolkit.py +240 -0
  75. camel/toolkits/notion_toolkit.py +2 -0
  76. camel/toolkits/openbb_toolkit.py +3 -2
  77. camel/toolkits/reddit_toolkit.py +11 -3
  78. camel/toolkits/retrieval_toolkit.py +6 -1
  79. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  80. camel/toolkits/stripe_toolkit.py +8 -2
  81. camel/toolkits/sympy_toolkit.py +6 -1
  82. camel/toolkits/video_toolkit.py +2 -0
  83. camel/toolkits/whatsapp_toolkit.py +3 -2
  84. camel/toolkits/zapier_toolkit.py +191 -0
  85. camel/types/__init__.py +2 -2
  86. camel/types/agents/__init__.py +16 -0
  87. camel/types/agents/tool_calling_record.py +52 -0
  88. camel/types/enums.py +3 -0
  89. camel/types/openai_types.py +16 -14
  90. camel/utils/__init__.py +2 -1
  91. camel/utils/async_func.py +2 -2
  92. camel/utils/commons.py +114 -1
  93. camel/verifiers/__init__.py +23 -0
  94. camel/verifiers/base.py +340 -0
  95. camel/verifiers/models.py +82 -0
  96. camel/verifiers/python_verifier.py +202 -0
  97. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23a0.dist-info}/METADATA +273 -255
  98. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23a0.dist-info}/RECORD +100 -82
  99. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23a0.dist-info}/WHEEL +1 -1
  100. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23a0.dist-info}/LICENSE +0 -0
@@ -12,7 +12,9 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
+
17
+ from pydantic import BaseModel
16
18
 
17
19
  from camel.configs import ANTHROPIC_API_PARAMS, AnthropicConfig
18
20
  from camel.messages import OpenAIMessage
@@ -59,7 +61,7 @@ class AnthropicModel(BaseModelBackend):
59
61
  url: Optional[str] = None,
60
62
  token_counter: Optional[BaseTokenCounter] = None,
61
63
  ) -> None:
62
- from anthropic import Anthropic
64
+ from anthropic import Anthropic, AsyncAnthropic
63
65
 
64
66
  if model_config_dict is None:
65
67
  model_config_dict = AnthropicConfig().as_dict()
@@ -69,6 +71,9 @@ class AnthropicModel(BaseModelBackend):
69
71
  model_type, model_config_dict, api_key, url, token_counter
70
72
  )
71
73
  self.client = Anthropic(api_key=self._api_key, base_url=self._url)
74
+ self.async_client = AsyncAnthropic(
75
+ api_key=self._api_key, base_url=self._url
76
+ )
72
77
 
73
78
  def _convert_response_from_anthropic_to_openai(self, response):
74
79
  # openai ^1.0.0 format, reference openai/types/chat/chat_completion.py
@@ -102,9 +107,11 @@ class AnthropicModel(BaseModelBackend):
102
107
  self._token_counter = AnthropicTokenCounter(self.model_type)
103
108
  return self._token_counter
104
109
 
105
- def run(
110
+ def _run(
106
111
  self,
107
112
  messages: List[OpenAIMessage],
113
+ response_format: Optional[Type[BaseModel]] = None,
114
+ tools: Optional[List[Dict[str, Any]]] = None,
108
115
  ):
109
116
  r"""Run inference of Anthropic chat completion.
110
117
 
@@ -133,6 +140,37 @@ class AnthropicModel(BaseModelBackend):
133
140
 
134
141
  return response
135
142
 
143
+ async def _arun(
144
+ self,
145
+ messages: List[OpenAIMessage],
146
+ response_format: Optional[Type[BaseModel]] = None,
147
+ tools: Optional[List[Dict[str, Any]]] = None,
148
+ ) -> ChatCompletion:
149
+ r"""Run inference of Anthropic chat completion.
150
+
151
+ Args:
152
+ messages (List[OpenAIMessage]): Message list with the chat history
153
+ in OpenAI API format.
154
+
155
+ Returns:
156
+ ChatCompletion: Response in the OpenAI API format.
157
+ """
158
+ from anthropic import NOT_GIVEN
159
+
160
+ if messages[0]["role"] == "system":
161
+ sys_msg = str(messages.pop(0)["content"])
162
+ else:
163
+ sys_msg = NOT_GIVEN # type: ignore[assignment]
164
+ response = await self.async_client.messages.create(
165
+ model=self.model_type,
166
+ system=sys_msg,
167
+ messages=messages, # type: ignore[arg-type]
168
+ **self.model_config_dict,
169
+ )
170
+
171
+ # format response to openai format
172
+ return self._convert_response_from_anthropic_to_openai(response)
173
+
136
174
  def check_model_config(self):
137
175
  r"""Check whether the model configuration is valid for anthropic
138
176
  model backends.
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import AzureOpenAI, Stream
17
+ from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
20
21
  from camel.messages import OpenAIMessage
@@ -95,6 +96,15 @@ class AzureOpenAIModel(BaseModelBackend):
95
96
  max_retries=3,
96
97
  )
97
98
 
99
+ self._async_client = AsyncAzureOpenAI(
100
+ azure_endpoint=str(self._url),
101
+ azure_deployment=self.azure_deployment_name,
102
+ api_version=self.api_version,
103
+ api_key=self._api_key,
104
+ timeout=180,
105
+ max_retries=3,
106
+ )
107
+
98
108
  @property
99
109
  def token_counter(self) -> BaseTokenCounter:
100
110
  r"""Initialize the token counter for the model backend.
@@ -107,9 +117,11 @@ class AzureOpenAIModel(BaseModelBackend):
107
117
  self._token_counter = OpenAITokenCounter(self.model_type)
108
118
  return self._token_counter
109
119
 
110
- def run(
120
+ def _run(
111
121
  self,
112
122
  messages: List[OpenAIMessage],
123
+ response_format: Optional[Type[BaseModel]] = None,
124
+ tools: Optional[List[Dict[str, Any]]] = None,
113
125
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
114
126
  r"""Runs inference of Azure OpenAI chat completion.
115
127
 
@@ -129,6 +141,30 @@ class AzureOpenAIModel(BaseModelBackend):
129
141
  )
130
142
  return response
131
143
 
144
+ async def _arun(
145
+ self,
146
+ messages: List[OpenAIMessage],
147
+ response_format: Optional[Type[BaseModel]] = None,
148
+ tools: Optional[List[Dict[str, Any]]] = None,
149
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
150
+ r"""Runs inference of Azure OpenAI chat completion.
151
+
152
+ Args:
153
+ messages (List[OpenAIMessage]): Message list with the chat history
154
+ in OpenAI API format.
155
+
156
+ Returns:
157
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
158
+ `ChatCompletion` in the non-stream mode, or
159
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
160
+ """
161
+ response = await self._async_client.chat.completions.create(
162
+ messages=messages,
163
+ model=self.azure_deployment_name, # type:ignore[arg-type]
164
+ **self.model_config_dict,
165
+ )
166
+ return response
167
+
132
168
  def check_model_config(self):
133
169
  r"""Check whether the model configuration contains any
134
170
  unexpected arguments to Azure OpenAI API.
@@ -14,9 +14,10 @@
14
14
  import abc
15
15
  import re
16
16
  from abc import ABC, abstractmethod
17
- from typing import Any, Dict, List, Optional, Union
17
+ from typing import Any, Dict, List, Optional, Type, Union
18
18
 
19
- from openai import Stream
19
+ from openai import AsyncStream, Stream
20
+ from pydantic import BaseModel
20
21
 
21
22
  from camel.messages import OpenAIMessage
22
23
  from camel.types import (
@@ -102,7 +103,7 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
102
103
  self, messages: List[OpenAIMessage]
103
104
  ) -> List[OpenAIMessage]:
104
105
  r"""Preprocess messages before sending to model API.
105
- Removes thinking content and other model-specific preprocessing.
106
+ Removes thinking content from assistant and user messages.
106
107
 
107
108
  Args:
108
109
  messages (List[OpenAIMessage]): Original messages
@@ -110,39 +111,113 @@ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
110
111
  Returns:
111
112
  List[OpenAIMessage]: Preprocessed messages
112
113
  """
113
- # Remove thinking content from messages before sending to API
114
- # This ensures only the final response is sent, excluding
115
- # intermediate thought processes
116
- return [
117
- { # type: ignore[misc]
118
- **msg,
119
- 'content': re.sub(
114
+
115
+ def should_process_thinking(msg: OpenAIMessage) -> bool:
116
+ # Only process thinking content for assistant and user messages
117
+ return msg['role'] in ['assistant', 'user'] and isinstance(
118
+ msg['content'], str
119
+ )
120
+
121
+ def remove_thinking(content: str) -> str:
122
+ # Only remove thinking content if the tags are present
123
+ if '<think>' in content and '</think>' in content:
124
+ return re.sub(
120
125
  r'<think>.*?</think>',
121
126
  '',
122
- msg['content'], # type: ignore[arg-type]
127
+ content,
123
128
  flags=re.DOTALL,
124
- ).strip(),
129
+ ).strip()
130
+ return content
131
+
132
+ return [
133
+ { # type: ignore[misc]
134
+ **msg,
135
+ 'content': (
136
+ remove_thinking(msg['content']) # type: ignore[arg-type]
137
+ if should_process_thinking(msg)
138
+ else msg['content']
139
+ ),
125
140
  }
126
141
  for msg in messages
127
142
  ]
128
143
 
129
144
  @abstractmethod
145
+ def _run(
146
+ self,
147
+ messages: List[OpenAIMessage],
148
+ response_format: Optional[Type[BaseModel]] = None,
149
+ tools: Optional[List[Dict[str, Any]]] = None,
150
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
151
+ pass
152
+
153
+ @abstractmethod
154
+ async def _arun(
155
+ self,
156
+ messages: List[OpenAIMessage],
157
+ response_format: Optional[Type[BaseModel]] = None,
158
+ tools: Optional[List[Dict[str, Any]]] = None,
159
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
160
+ pass
161
+
130
162
  def run(
131
163
  self,
132
164
  messages: List[OpenAIMessage],
165
+ response_format: Optional[Type[BaseModel]] = None,
166
+ tools: Optional[List[Dict[str, Any]]] = None,
133
167
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
134
168
  r"""Runs the query to the backend model.
135
169
 
136
170
  Args:
137
171
  messages (List[OpenAIMessage]): Message list with the chat history
138
172
  in OpenAI API format.
173
+ response_format (Optional[Type[BaseModel]]): The response format
174
+ to use for the model. (default: :obj:`None`)
175
+ tools (Optional[List[Tool]]): The schema of tools to use for the
176
+ model for this request. Will override the tools specified in
177
+ the model configuration (but not change the configuration).
178
+ (default: :obj:`None`)
139
179
 
140
180
  Returns:
141
181
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
142
182
  `ChatCompletion` in the non-stream mode, or
143
183
  `Stream[ChatCompletionChunk]` in the stream mode.
144
184
  """
145
- pass
185
+ # None -> use default tools
186
+ if tools is None:
187
+ tools = self.model_config_dict.get("tools", None)
188
+ # Empty -> use no tools
189
+ elif not tools:
190
+ tools = None
191
+ return self._run(messages, response_format, tools)
192
+
193
+ async def arun(
194
+ self,
195
+ messages: List[OpenAIMessage],
196
+ response_format: Optional[Type[BaseModel]] = None,
197
+ tools: Optional[List[Dict[str, Any]]] = None,
198
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
199
+ r"""Runs the query to the backend model asynchronously.
200
+
201
+ Args:
202
+ messages (List[OpenAIMessage]): Message list with the chat history
203
+ in OpenAI API format.
204
+ response_format (Optional[Type[BaseModel]]): The response format
205
+ to use for the model. (default: :obj:`None`)
206
+ tools (Optional[List[Tool]]): The schema of tools to use for the
207
+ model for this request. Will override the tools specified in
208
+ the model configuration (but not change the configuration).
209
+ (default: :obj:`None`)
210
+
211
+ Returns:
212
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
213
+ `ChatCompletion` in the non-stream mode, or
214
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
215
+ """
216
+ if tools is None:
217
+ tools = self.model_config_dict.get("tools", None)
218
+ elif not tools:
219
+ tools = None
220
+ return await self._arun(messages, response_format, tools)
146
221
 
147
222
  @abstractmethod
148
223
  def check_model_config(self):
@@ -16,7 +16,9 @@ import json
16
16
  import logging
17
17
  import os
18
18
  import uuid
19
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
19
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
20
+
21
+ from pydantic import BaseModel
20
22
 
21
23
  if TYPE_CHECKING:
22
24
  from cohere.types import ChatMessageV2, ChatResponse
@@ -24,6 +26,7 @@ if TYPE_CHECKING:
24
26
  from camel.configs import COHERE_API_PARAMS, CohereConfig
25
27
  from camel.messages import OpenAIMessage
26
28
  from camel.models import BaseModelBackend
29
+ from camel.models._utils import try_modify_message_with_format
27
30
  from camel.types import ChatCompletion, ModelType
28
31
  from camel.utils import (
29
32
  BaseTokenCounter,
@@ -67,6 +70,7 @@ class CohereModel(BaseModelBackend):
67
70
  model_type, model_config_dict, api_key, url, token_counter
68
71
  )
69
72
  self._client = cohere.ClientV2(api_key=self._api_key)
73
+ self._async_client = cohere.AsyncClientV2(api_key=self._api_key)
70
74
 
71
75
  def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
72
76
  if response.usage and response.usage.tokens:
@@ -215,7 +219,30 @@ class CohereModel(BaseModelBackend):
215
219
  )
216
220
  return self._token_counter
217
221
 
218
- def run(self, messages: List[OpenAIMessage]) -> ChatCompletion:
222
+ def _prepare_request(
223
+ self,
224
+ messages: List[OpenAIMessage],
225
+ response_format: Optional[Type[BaseModel]] = None,
226
+ tools: Optional[List[Dict[str, Any]]] = None,
227
+ ) -> Dict[str, Any]:
228
+ request_config = self.model_config_dict.copy()
229
+ if tools:
230
+ for tool in tools:
231
+ function_dict = tool.get('function', {})
232
+ function_dict.pop("strict", None)
233
+ request_config["tools"] = tools
234
+ elif response_format:
235
+ try_modify_message_with_format(messages[-1], response_format)
236
+ request_config["response_format"] = {"type": "json_object"}
237
+
238
+ return request_config
239
+
240
+ def _run(
241
+ self,
242
+ messages: List[OpenAIMessage],
243
+ response_format: Optional[Type[BaseModel]] = None,
244
+ tools: Optional[List[Dict[str, Any]]] = None,
245
+ ) -> ChatCompletion:
219
246
  r"""Runs inference of Cohere chat completion.
220
247
 
221
248
  Args:
@@ -226,21 +253,71 @@ class CohereModel(BaseModelBackend):
226
253
  """
227
254
  from cohere.core.api_error import ApiError
228
255
 
229
- cohere_messages = self._to_cohere_chatmessage(messages)
256
+ request_config = self._prepare_request(
257
+ messages, response_format, tools
258
+ )
230
259
 
231
- # Removing 'strict': True from the dictionary for
232
- # cohere client
233
- if self.model_config_dict.get('tools') is not None:
234
- for tool in self.model_config_dict.get('tools', []):
235
- function_dict = tool.get('function', {})
236
- if 'strict' in function_dict:
237
- del function_dict['strict']
260
+ cohere_messages = self._to_cohere_chatmessage(messages)
238
261
 
239
262
  try:
240
263
  response = self._client.chat(
241
264
  messages=cohere_messages,
242
265
  model=self.model_type,
243
- **self.model_config_dict,
266
+ **request_config,
267
+ )
268
+ except ApiError as e:
269
+ logging.error(f"Cohere API Error: {e.status_code}")
270
+ logging.error(f"Error body: {e.body}")
271
+ raise
272
+ except Exception as e:
273
+ logging.error(f"Unexpected error when calling Cohere API: {e!s}")
274
+ raise
275
+
276
+ openai_response = self._to_openai_response(response)
277
+
278
+ # Add AgentOps LLM Event tracking
279
+ if LLMEvent:
280
+ llm_event = LLMEvent(
281
+ thread_id=openai_response.id,
282
+ prompt=" ".join(
283
+ [message.get("content") for message in messages] # type: ignore[misc]
284
+ ),
285
+ prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
286
+ completion=openai_response.choices[0].message.content,
287
+ completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
288
+ model=self.model_type,
289
+ )
290
+ record(llm_event)
291
+
292
+ return openai_response
293
+
294
+ async def _arun(
295
+ self,
296
+ messages: List[OpenAIMessage],
297
+ response_format: Optional[Type[BaseModel]] = None,
298
+ tools: Optional[List[Dict[str, Any]]] = None,
299
+ ) -> ChatCompletion:
300
+ r"""Runs inference of Cohere chat completion.
301
+
302
+ Args:
303
+ messages (List[OpenAIMessage]): Message list with the chat history
304
+ in OpenAI API format.
305
+ Returns:
306
+ ChatCompletion.
307
+ """
308
+ from cohere.core.api_error import ApiError
309
+
310
+ request_config = self._prepare_request(
311
+ messages, response_format, tools
312
+ )
313
+
314
+ cohere_messages = self._to_cohere_chatmessage(messages)
315
+
316
+ try:
317
+ response = await self._async_client.chat(
318
+ messages=cohere_messages,
319
+ model=self.model_type,
320
+ **request_config,
244
321
  )
245
322
  except ApiError as e:
246
323
  logging.error(f"Cohere API Error: {e.status_code}")
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
21
22
  from camel.logger import get_logger
22
23
  from camel.messages import OpenAIMessage
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.models.base_model import BaseModelBackend
24
26
  from camel.types import (
25
27
  ChatCompletion,
@@ -30,6 +32,16 @@ from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
30
32
 
31
33
  logger = get_logger(__name__)
32
34
 
35
+ REASONSER_UNSUPPORTED_PARAMS = [
36
+ "temperature",
37
+ "top_p",
38
+ "presence_penalty",
39
+ "frequency_penalty",
40
+ "logprobs",
41
+ "top_logprobs",
42
+ "tools",
43
+ ]
44
+
33
45
 
34
46
  class DeepSeekModel(BaseModelBackend):
35
47
  r"""DeepSeek API in a unified BaseModelBackend interface.
@@ -84,6 +96,13 @@ class DeepSeekModel(BaseModelBackend):
84
96
  base_url=self._url,
85
97
  )
86
98
 
99
+ self._async_client = AsyncOpenAI(
100
+ timeout=180,
101
+ max_retries=3,
102
+ api_key=self._api_key,
103
+ base_url=self._url,
104
+ )
105
+
87
106
  @property
88
107
  def token_counter(self) -> BaseTokenCounter:
89
108
  r"""Initialize the token counter for the model backend.
@@ -98,64 +117,51 @@ class DeepSeekModel(BaseModelBackend):
98
117
  )
99
118
  return self._token_counter
100
119
 
101
- def run(
120
+ def _prepare_request(
102
121
  self,
103
122
  messages: List[OpenAIMessage],
104
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
105
- r"""Runs inference of DeepSeek chat completion.
106
-
107
- Args:
108
- messages (List[OpenAIMessage]): Message list with the chat history
109
- in OpenAI API format.
123
+ response_format: Optional[Type[BaseModel]] = None,
124
+ tools: Optional[List[Dict[str, Any]]] = None,
125
+ ) -> Dict[str, Any]:
126
+ request_config = self.model_config_dict.copy()
110
127
 
111
- Returns:
112
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
113
- `ChatCompletion` in the non-stream mode, or
114
- `Stream[ChatCompletionChunk]` in the stream mode.
115
- """
116
- # deepseek reasoner has limitations
117
- # reference: https://api-docs.deepseek.com/guides/reasoning_model#api-parameters
118
128
  if self.model_type in [
119
129
  ModelType.DEEPSEEK_REASONER,
120
130
  ]:
121
131
  logger.warning(
122
- "You are using a DeepSeek Reasoner model, "
132
+ "Warning: You are using an DeepSeek Reasoner model, "
123
133
  "which has certain limitations, reference: "
124
- "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`"
134
+ "`https://api-docs.deepseek.com/guides/reasoning_model"
135
+ "#api-parameters`.",
125
136
  )
137
+ request_config = {
138
+ key: value
139
+ for key, value in request_config.items()
140
+ if key not in REASONSER_UNSUPPORTED_PARAMS
141
+ }
126
142
 
127
- # Check and remove unsupported parameters and reset the fixed
128
- # parameters
129
- unsupported_keys = [
130
- "temperature",
131
- "top_p",
132
- "presence_penalty",
133
- "frequency_penalty",
134
- "logprobs",
135
- "top_logprobs",
136
- "tools",
137
- ]
138
- for key in unsupported_keys:
139
- if key in self.model_config_dict:
140
- del self.model_config_dict[key]
143
+ if tools:
144
+ for tool in tools:
145
+ function_dict = tool.get('function', {})
146
+ function_dict.pop("strict", None)
147
+ request_config["tools"] = tools
148
+ elif response_format:
149
+ try_modify_message_with_format(messages[-1], response_format)
150
+ request_config["response_format"] = {"type": "json_object"}
141
151
 
142
- response = self._client.chat.completions.create(
143
- messages=messages,
144
- model=self.model_type,
145
- **self.model_config_dict,
146
- )
152
+ return request_config
147
153
 
148
- # Handle reasoning content with <think> tags at the beginning
154
+ def _post_handle_response(
155
+ self, response: ChatCompletion
156
+ ) -> ChatCompletion:
157
+ r"""Handle reasoning content with <think> tags at the beginning."""
149
158
  if (
150
- self.model_type
151
- in [
152
- ModelType.DEEPSEEK_REASONER,
153
- ]
159
+ self.model_type in [ModelType.DEEPSEEK_REASONER]
154
160
  and os.environ.get("GET_REASONING_CONTENT", "false").lower()
155
161
  == "true"
156
162
  ):
157
- reasoning_content = response.choices[0].message.reasoning_content
158
- combined_content = (
163
+ reasoning_content = response.choices[0].message.reasoning_content # type: ignore[attr-defined]
164
+ combined_content = ( # type: ignore[operator]
159
165
  f"<think>\n{reasoning_content}\n</think>\n"
160
166
  if reasoning_content
161
167
  else ""
@@ -181,9 +187,65 @@ class DeepSeekModel(BaseModelBackend):
181
187
  object="chat.completion",
182
188
  usage=response.usage,
183
189
  )
184
-
185
190
  return response
186
191
 
192
+ def _run(
193
+ self,
194
+ messages: List[OpenAIMessage],
195
+ response_format: Optional[Type[BaseModel]] = None,
196
+ tools: Optional[List[Dict[str, Any]]] = None,
197
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
198
+ r"""Runs inference of DeepSeek chat completion.
199
+
200
+ Args:
201
+ messages (List[OpenAIMessage]): Message list with the chat history
202
+ in OpenAI API format.
203
+
204
+ Returns:
205
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
206
+ `ChatCompletion` in the non-stream mode, or
207
+ `Stream[ChatCompletionChunk]` in the stream mode.
208
+ """
209
+ request_config = self._prepare_request(
210
+ messages, response_format, tools
211
+ )
212
+
213
+ response = self._client.chat.completions.create(
214
+ messages=messages,
215
+ model=self.model_type,
216
+ **request_config,
217
+ )
218
+
219
+ return self._post_handle_response(response)
220
+
221
+ async def _arun(
222
+ self,
223
+ messages: List[OpenAIMessage],
224
+ response_format: Optional[Type[BaseModel]] = None,
225
+ tools: Optional[List[Dict[str, Any]]] = None,
226
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
227
+ r"""Runs inference of DeepSeek chat completion.
228
+
229
+ Args:
230
+ messages (List[OpenAIMessage]): Message list with the chat history
231
+ in OpenAI API format.
232
+
233
+ Returns:
234
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
235
+ `ChatCompletion` in the non-stream mode, or
236
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
237
+ """
238
+ request_config = self._prepare_request(
239
+ messages, response_format, tools
240
+ )
241
+ response = await self._async_client.chat.completions.create(
242
+ messages=messages,
243
+ model=self.model_type,
244
+ **request_config,
245
+ )
246
+
247
+ return self._post_handle_response(response)
248
+
187
249
  def check_model_config(self):
188
250
  r"""Check whether the model configuration contains any
189
251
  unexpected arguments to DeepSeek API.