camel-ai 0.2.21__py3-none-any.whl → 0.2.23a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (106) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +556 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +79 -1
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +7 -2
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/embeddings/openai_embedding.py +10 -1
  18. camel/environments/__init__.py +16 -0
  19. camel/environments/base.py +503 -0
  20. camel/extractors/__init__.py +16 -0
  21. camel/extractors/base.py +263 -0
  22. camel/interpreters/docker/Dockerfile +12 -0
  23. camel/interpreters/docker_interpreter.py +19 -1
  24. camel/interpreters/subprocess_interpreter.py +42 -17
  25. camel/loaders/__init__.py +2 -0
  26. camel/loaders/mineru_extractor.py +250 -0
  27. camel/memories/agent_memories.py +16 -1
  28. camel/memories/blocks/chat_history_block.py +10 -2
  29. camel/memories/blocks/vectordb_block.py +1 -0
  30. camel/memories/context_creators/score_based.py +20 -3
  31. camel/memories/records.py +10 -0
  32. camel/messages/base.py +8 -8
  33. camel/models/_utils.py +57 -0
  34. camel/models/aiml_model.py +48 -17
  35. camel/models/anthropic_model.py +41 -3
  36. camel/models/azure_openai_model.py +39 -3
  37. camel/models/base_model.py +132 -4
  38. camel/models/cohere_model.py +88 -11
  39. camel/models/deepseek_model.py +107 -63
  40. camel/models/gemini_model.py +133 -15
  41. camel/models/groq_model.py +72 -10
  42. camel/models/internlm_model.py +14 -3
  43. camel/models/litellm_model.py +9 -2
  44. camel/models/mistral_model.py +42 -5
  45. camel/models/model_manager.py +48 -3
  46. camel/models/moonshot_model.py +33 -4
  47. camel/models/nemotron_model.py +32 -3
  48. camel/models/nvidia_model.py +43 -3
  49. camel/models/ollama_model.py +139 -17
  50. camel/models/openai_audio_models.py +7 -1
  51. camel/models/openai_compatible_model.py +37 -3
  52. camel/models/openai_model.py +158 -46
  53. camel/models/qwen_model.py +61 -4
  54. camel/models/reka_model.py +53 -3
  55. camel/models/samba_model.py +209 -4
  56. camel/models/sglang_model.py +153 -14
  57. camel/models/siliconflow_model.py +16 -3
  58. camel/models/stub_model.py +46 -4
  59. camel/models/togetherai_model.py +38 -3
  60. camel/models/vllm_model.py +37 -3
  61. camel/models/yi_model.py +36 -3
  62. camel/models/zhipuai_model.py +38 -3
  63. camel/retrievers/__init__.py +3 -0
  64. camel/retrievers/hybrid_retrival.py +237 -0
  65. camel/toolkits/__init__.py +9 -2
  66. camel/toolkits/arxiv_toolkit.py +2 -1
  67. camel/toolkits/ask_news_toolkit.py +4 -2
  68. camel/toolkits/base.py +22 -3
  69. camel/toolkits/code_execution.py +2 -0
  70. camel/toolkits/dappier_toolkit.py +2 -1
  71. camel/toolkits/data_commons_toolkit.py +38 -12
  72. camel/toolkits/function_tool.py +13 -0
  73. camel/toolkits/github_toolkit.py +5 -1
  74. camel/toolkits/google_maps_toolkit.py +2 -1
  75. camel/toolkits/google_scholar_toolkit.py +2 -0
  76. camel/toolkits/human_toolkit.py +0 -3
  77. camel/toolkits/linkedin_toolkit.py +3 -2
  78. camel/toolkits/meshy_toolkit.py +3 -2
  79. camel/toolkits/mineru_toolkit.py +178 -0
  80. camel/toolkits/networkx_toolkit.py +240 -0
  81. camel/toolkits/notion_toolkit.py +2 -0
  82. camel/toolkits/openbb_toolkit.py +3 -2
  83. camel/toolkits/reddit_toolkit.py +11 -3
  84. camel/toolkits/retrieval_toolkit.py +6 -1
  85. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  86. camel/toolkits/stripe_toolkit.py +8 -2
  87. camel/toolkits/sympy_toolkit.py +44 -1
  88. camel/toolkits/video_toolkit.py +2 -0
  89. camel/toolkits/whatsapp_toolkit.py +3 -2
  90. camel/toolkits/zapier_toolkit.py +191 -0
  91. camel/types/__init__.py +2 -2
  92. camel/types/agents/__init__.py +16 -0
  93. camel/types/agents/tool_calling_record.py +52 -0
  94. camel/types/enums.py +3 -0
  95. camel/types/openai_types.py +16 -14
  96. camel/utils/__init__.py +2 -1
  97. camel/utils/async_func.py +2 -2
  98. camel/utils/commons.py +114 -1
  99. camel/verifiers/__init__.py +23 -0
  100. camel/verifiers/base.py +340 -0
  101. camel/verifiers/models.py +82 -0
  102. camel/verifiers/python_verifier.py +202 -0
  103. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/METADATA +273 -256
  104. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/RECORD +106 -85
  105. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/WHEEL +1 -1
  106. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/LICENSE +0 -0
@@ -11,10 +11,13 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import abc
15
+ import re
14
16
  from abc import ABC, abstractmethod
15
- from typing import Any, Dict, List, Optional, Union
17
+ from typing import Any, Dict, List, Optional, Type, Union
16
18
 
17
- from openai import Stream
19
+ from openai import AsyncStream, Stream
20
+ from pydantic import BaseModel
18
21
 
19
22
  from camel.messages import OpenAIMessage
20
23
  from camel.types import (
@@ -27,7 +30,30 @@ from camel.types import (
27
30
  from camel.utils import BaseTokenCounter
28
31
 
29
32
 
30
- class BaseModelBackend(ABC):
33
+ class ModelBackendMeta(abc.ABCMeta):
34
+ r"""Metaclass that automatically preprocesses messages in run method.
35
+
36
+ Automatically wraps the run method of any class inheriting from
37
+ BaseModelBackend to preprocess messages (remove <think> tags) before they
38
+ are sent to the model.
39
+ """
40
+
41
+ def __new__(mcs, name, bases, namespace):
42
+ r"""Wraps run method with preprocessing if it exists in the class."""
43
+ if 'run' in namespace:
44
+ original_run = namespace['run']
45
+
46
+ def wrapped_run(
47
+ self, messages: List[OpenAIMessage], *args, **kwargs
48
+ ):
49
+ messages = self.preprocess_messages(messages)
50
+ return original_run(self, messages, *args, **kwargs)
51
+
52
+ namespace['run'] = wrapped_run
53
+ return super().__new__(mcs, name, bases, namespace)
54
+
55
+
56
+ class BaseModelBackend(ABC, metaclass=ModelBackendMeta):
31
57
  r"""Base class for different model backends.
32
58
  It may be OpenAI API, a local LLM, a stub for unit tests, etc.
33
59
 
@@ -73,23 +99,125 @@ class BaseModelBackend(ABC):
73
99
  """
74
100
  pass
75
101
 
102
+ def preprocess_messages(
103
+ self, messages: List[OpenAIMessage]
104
+ ) -> List[OpenAIMessage]:
105
+ r"""Preprocess messages before sending to model API.
106
+ Removes thinking content from assistant and user messages.
107
+
108
+ Args:
109
+ messages (List[OpenAIMessage]): Original messages
110
+
111
+ Returns:
112
+ List[OpenAIMessage]: Preprocessed messages
113
+ """
114
+
115
+ def should_process_thinking(msg: OpenAIMessage) -> bool:
116
+ # Only process thinking content for assistant and user messages
117
+ return msg['role'] in ['assistant', 'user'] and isinstance(
118
+ msg['content'], str
119
+ )
120
+
121
+ def remove_thinking(content: str) -> str:
122
+ # Only remove thinking content if the tags are present
123
+ if '<think>' in content and '</think>' in content:
124
+ return re.sub(
125
+ r'<think>.*?</think>',
126
+ '',
127
+ content,
128
+ flags=re.DOTALL,
129
+ ).strip()
130
+ return content
131
+
132
+ return [
133
+ { # type: ignore[misc]
134
+ **msg,
135
+ 'content': (
136
+ remove_thinking(msg['content']) # type: ignore[arg-type]
137
+ if should_process_thinking(msg)
138
+ else msg['content']
139
+ ),
140
+ }
141
+ for msg in messages
142
+ ]
143
+
144
+ @abstractmethod
145
+ def _run(
146
+ self,
147
+ messages: List[OpenAIMessage],
148
+ response_format: Optional[Type[BaseModel]] = None,
149
+ tools: Optional[List[Dict[str, Any]]] = None,
150
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
151
+ pass
152
+
76
153
  @abstractmethod
154
+ async def _arun(
155
+ self,
156
+ messages: List[OpenAIMessage],
157
+ response_format: Optional[Type[BaseModel]] = None,
158
+ tools: Optional[List[Dict[str, Any]]] = None,
159
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
160
+ pass
161
+
77
162
  def run(
78
163
  self,
79
164
  messages: List[OpenAIMessage],
165
+ response_format: Optional[Type[BaseModel]] = None,
166
+ tools: Optional[List[Dict[str, Any]]] = None,
80
167
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
81
168
  r"""Runs the query to the backend model.
82
169
 
83
170
  Args:
84
171
  messages (List[OpenAIMessage]): Message list with the chat history
85
172
  in OpenAI API format.
173
+ response_format (Optional[Type[BaseModel]]): The response format
174
+ to use for the model. (default: :obj:`None`)
175
+ tools (Optional[List[Tool]]): The schema of tools to use for the
176
+ model for this request. Will override the tools specified in
177
+ the model configuration (but not change the configuration).
178
+ (default: :obj:`None`)
86
179
 
87
180
  Returns:
88
181
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
182
  `ChatCompletion` in the non-stream mode, or
90
183
  `Stream[ChatCompletionChunk]` in the stream mode.
91
184
  """
92
- pass
185
+ # None -> use default tools
186
+ if tools is None:
187
+ tools = self.model_config_dict.get("tools", None)
188
+ # Empty -> use no tools
189
+ elif not tools:
190
+ tools = None
191
+ return self._run(messages, response_format, tools)
192
+
193
+ async def arun(
194
+ self,
195
+ messages: List[OpenAIMessage],
196
+ response_format: Optional[Type[BaseModel]] = None,
197
+ tools: Optional[List[Dict[str, Any]]] = None,
198
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
199
+ r"""Runs the query to the backend model asynchronously.
200
+
201
+ Args:
202
+ messages (List[OpenAIMessage]): Message list with the chat history
203
+ in OpenAI API format.
204
+ response_format (Optional[Type[BaseModel]]): The response format
205
+ to use for the model. (default: :obj:`None`)
206
+ tools (Optional[List[Tool]]): The schema of tools to use for the
207
+ model for this request. Will override the tools specified in
208
+ the model configuration (but not change the configuration).
209
+ (default: :obj:`None`)
210
+
211
+ Returns:
212
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
213
+ `ChatCompletion` in the non-stream mode, or
214
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
215
+ """
216
+ if tools is None:
217
+ tools = self.model_config_dict.get("tools", None)
218
+ elif not tools:
219
+ tools = None
220
+ return await self._arun(messages, response_format, tools)
93
221
 
94
222
  @abstractmethod
95
223
  def check_model_config(self):
@@ -16,7 +16,9 @@ import json
16
16
  import logging
17
17
  import os
18
18
  import uuid
19
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
19
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
20
+
21
+ from pydantic import BaseModel
20
22
 
21
23
  if TYPE_CHECKING:
22
24
  from cohere.types import ChatMessageV2, ChatResponse
@@ -24,6 +26,7 @@ if TYPE_CHECKING:
24
26
  from camel.configs import COHERE_API_PARAMS, CohereConfig
25
27
  from camel.messages import OpenAIMessage
26
28
  from camel.models import BaseModelBackend
29
+ from camel.models._utils import try_modify_message_with_format
27
30
  from camel.types import ChatCompletion, ModelType
28
31
  from camel.utils import (
29
32
  BaseTokenCounter,
@@ -67,6 +70,7 @@ class CohereModel(BaseModelBackend):
67
70
  model_type, model_config_dict, api_key, url, token_counter
68
71
  )
69
72
  self._client = cohere.ClientV2(api_key=self._api_key)
73
+ self._async_client = cohere.AsyncClientV2(api_key=self._api_key)
70
74
 
71
75
  def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
72
76
  if response.usage and response.usage.tokens:
@@ -215,7 +219,30 @@ class CohereModel(BaseModelBackend):
215
219
  )
216
220
  return self._token_counter
217
221
 
218
- def run(self, messages: List[OpenAIMessage]) -> ChatCompletion:
222
+ def _prepare_request(
223
+ self,
224
+ messages: List[OpenAIMessage],
225
+ response_format: Optional[Type[BaseModel]] = None,
226
+ tools: Optional[List[Dict[str, Any]]] = None,
227
+ ) -> Dict[str, Any]:
228
+ request_config = self.model_config_dict.copy()
229
+ if tools:
230
+ for tool in tools:
231
+ function_dict = tool.get('function', {})
232
+ function_dict.pop("strict", None)
233
+ request_config["tools"] = tools
234
+ elif response_format:
235
+ try_modify_message_with_format(messages[-1], response_format)
236
+ request_config["response_format"] = {"type": "json_object"}
237
+
238
+ return request_config
239
+
240
+ def _run(
241
+ self,
242
+ messages: List[OpenAIMessage],
243
+ response_format: Optional[Type[BaseModel]] = None,
244
+ tools: Optional[List[Dict[str, Any]]] = None,
245
+ ) -> ChatCompletion:
219
246
  r"""Runs inference of Cohere chat completion.
220
247
 
221
248
  Args:
@@ -226,21 +253,71 @@ class CohereModel(BaseModelBackend):
226
253
  """
227
254
  from cohere.core.api_error import ApiError
228
255
 
229
- cohere_messages = self._to_cohere_chatmessage(messages)
256
+ request_config = self._prepare_request(
257
+ messages, response_format, tools
258
+ )
230
259
 
231
- # Removing 'strict': True from the dictionary for
232
- # cohere client
233
- if self.model_config_dict.get('tools') is not None:
234
- for tool in self.model_config_dict.get('tools', []):
235
- function_dict = tool.get('function', {})
236
- if 'strict' in function_dict:
237
- del function_dict['strict']
260
+ cohere_messages = self._to_cohere_chatmessage(messages)
238
261
 
239
262
  try:
240
263
  response = self._client.chat(
241
264
  messages=cohere_messages,
242
265
  model=self.model_type,
243
- **self.model_config_dict,
266
+ **request_config,
267
+ )
268
+ except ApiError as e:
269
+ logging.error(f"Cohere API Error: {e.status_code}")
270
+ logging.error(f"Error body: {e.body}")
271
+ raise
272
+ except Exception as e:
273
+ logging.error(f"Unexpected error when calling Cohere API: {e!s}")
274
+ raise
275
+
276
+ openai_response = self._to_openai_response(response)
277
+
278
+ # Add AgentOps LLM Event tracking
279
+ if LLMEvent:
280
+ llm_event = LLMEvent(
281
+ thread_id=openai_response.id,
282
+ prompt=" ".join(
283
+ [message.get("content") for message in messages] # type: ignore[misc]
284
+ ),
285
+ prompt_tokens=openai_response.usage.prompt_tokens, # type: ignore[union-attr]
286
+ completion=openai_response.choices[0].message.content,
287
+ completion_tokens=openai_response.usage.completion_tokens, # type: ignore[union-attr]
288
+ model=self.model_type,
289
+ )
290
+ record(llm_event)
291
+
292
+ return openai_response
293
+
294
+ async def _arun(
295
+ self,
296
+ messages: List[OpenAIMessage],
297
+ response_format: Optional[Type[BaseModel]] = None,
298
+ tools: Optional[List[Dict[str, Any]]] = None,
299
+ ) -> ChatCompletion:
300
+ r"""Runs inference of Cohere chat completion.
301
+
302
+ Args:
303
+ messages (List[OpenAIMessage]): Message list with the chat history
304
+ in OpenAI API format.
305
+ Returns:
306
+ ChatCompletion.
307
+ """
308
+ from cohere.core.api_error import ApiError
309
+
310
+ request_config = self._prepare_request(
311
+ messages, response_format, tools
312
+ )
313
+
314
+ cohere_messages = self._to_cohere_chatmessage(messages)
315
+
316
+ try:
317
+ response = await self._async_client.chat(
318
+ messages=cohere_messages,
319
+ model=self.model_type,
320
+ **request_config,
244
321
  )
245
322
  except ApiError as e:
246
323
  logging.error(f"Cohere API Error: {e.status_code}")
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
21
22
  from camel.logger import get_logger
22
23
  from camel.messages import OpenAIMessage
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.models.base_model import BaseModelBackend
24
26
  from camel.types import (
25
27
  ChatCompletion,
@@ -30,6 +32,16 @@ from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
30
32
 
31
33
  logger = get_logger(__name__)
32
34
 
35
+ REASONSER_UNSUPPORTED_PARAMS = [
36
+ "temperature",
37
+ "top_p",
38
+ "presence_penalty",
39
+ "frequency_penalty",
40
+ "logprobs",
41
+ "top_logprobs",
42
+ "tools",
43
+ ]
44
+
33
45
 
34
46
  class DeepSeekModel(BaseModelBackend):
35
47
  r"""DeepSeek API in a unified BaseModelBackend interface.
@@ -84,6 +96,13 @@ class DeepSeekModel(BaseModelBackend):
84
96
  base_url=self._url,
85
97
  )
86
98
 
99
+ self._async_client = AsyncOpenAI(
100
+ timeout=180,
101
+ max_retries=3,
102
+ api_key=self._api_key,
103
+ base_url=self._url,
104
+ )
105
+
87
106
  @property
88
107
  def token_counter(self) -> BaseTokenCounter:
89
108
  r"""Initialize the token counter for the model backend.
@@ -98,82 +117,51 @@ class DeepSeekModel(BaseModelBackend):
98
117
  )
99
118
  return self._token_counter
100
119
 
101
- def run(
120
+ def _prepare_request(
102
121
  self,
103
122
  messages: List[OpenAIMessage],
104
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
105
- r"""Runs inference of DeepSeek chat completion.
106
-
107
- Args:
108
- messages (List[OpenAIMessage]): Message list with the chat history
109
- in OpenAI API format.
123
+ response_format: Optional[Type[BaseModel]] = None,
124
+ tools: Optional[List[Dict[str, Any]]] = None,
125
+ ) -> Dict[str, Any]:
126
+ request_config = self.model_config_dict.copy()
110
127
 
111
- Returns:
112
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
113
- `ChatCompletion` in the non-stream mode, or
114
- `Stream[ChatCompletionChunk]` in the stream mode.
115
- """
116
- # deepseek reasoner has limitations
117
- # reference: https://api-docs.deepseek.com/guides/reasoning_model#api-parameters
118
128
  if self.model_type in [
119
129
  ModelType.DEEPSEEK_REASONER,
120
130
  ]:
121
- import re
122
-
123
131
  logger.warning(
124
- "You are using a DeepSeek Reasoner model, "
132
+ "Warning: You are using an DeepSeek Reasoner model, "
125
133
  "which has certain limitations, reference: "
126
- "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`"
134
+ "`https://api-docs.deepseek.com/guides/reasoning_model"
135
+ "#api-parameters`.",
127
136
  )
137
+ request_config = {
138
+ key: value
139
+ for key, value in request_config.items()
140
+ if key not in REASONSER_UNSUPPORTED_PARAMS
141
+ }
128
142
 
129
- # Check and remove unsupported parameters and reset the fixed
130
- # parameters
131
- unsupported_keys = [
132
- "temperature",
133
- "top_p",
134
- "presence_penalty",
135
- "frequency_penalty",
136
- "logprobs",
137
- "top_logprobs",
138
- "tools",
139
- ]
140
- for key in unsupported_keys:
141
- if key in self.model_config_dict:
142
- del self.model_config_dict[key]
143
-
144
- # Remove thinking content from messages before sending to API
145
- # This ensures only the final response is sent, excluding
146
- # intermediate thought processes
147
- messages = [
148
- { # type: ignore[misc]
149
- **msg,
150
- 'content': re.sub(
151
- r'<think>.*?</think>',
152
- '',
153
- msg['content'], # type: ignore[arg-type]
154
- flags=re.DOTALL,
155
- ).strip(),
156
- }
157
- for msg in messages
158
- ]
143
+ if tools:
144
+ for tool in tools:
145
+ function_dict = tool.get('function', {})
146
+ function_dict.pop("strict", None)
147
+ request_config["tools"] = tools
148
+ elif response_format:
149
+ try_modify_message_with_format(messages[-1], response_format)
150
+ request_config["response_format"] = {"type": "json_object"}
159
151
 
160
- response = self._client.chat.completions.create(
161
- messages=messages,
162
- model=self.model_type,
163
- **self.model_config_dict,
164
- )
152
+ return request_config
165
153
 
166
- # Handle reasoning content with <think> tags at the beginning
154
+ def _post_handle_response(
155
+ self, response: ChatCompletion
156
+ ) -> ChatCompletion:
157
+ r"""Handle reasoning content with <think> tags at the beginning."""
167
158
  if (
168
- self.model_type
169
- in [
170
- ModelType.DEEPSEEK_REASONER,
171
- ]
159
+ self.model_type in [ModelType.DEEPSEEK_REASONER]
172
160
  and os.environ.get("GET_REASONING_CONTENT", "false").lower()
173
161
  == "true"
174
162
  ):
175
- reasoning_content = response.choices[0].message.reasoning_content
176
- combined_content = (
163
+ reasoning_content = response.choices[0].message.reasoning_content # type: ignore[attr-defined]
164
+ combined_content = ( # type: ignore[operator]
177
165
  f"<think>\n{reasoning_content}\n</think>\n"
178
166
  if reasoning_content
179
167
  else ""
@@ -199,9 +187,65 @@ class DeepSeekModel(BaseModelBackend):
199
187
  object="chat.completion",
200
188
  usage=response.usage,
201
189
  )
202
-
203
190
  return response
204
191
 
192
+ def _run(
193
+ self,
194
+ messages: List[OpenAIMessage],
195
+ response_format: Optional[Type[BaseModel]] = None,
196
+ tools: Optional[List[Dict[str, Any]]] = None,
197
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
198
+ r"""Runs inference of DeepSeek chat completion.
199
+
200
+ Args:
201
+ messages (List[OpenAIMessage]): Message list with the chat history
202
+ in OpenAI API format.
203
+
204
+ Returns:
205
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
206
+ `ChatCompletion` in the non-stream mode, or
207
+ `Stream[ChatCompletionChunk]` in the stream mode.
208
+ """
209
+ request_config = self._prepare_request(
210
+ messages, response_format, tools
211
+ )
212
+
213
+ response = self._client.chat.completions.create(
214
+ messages=messages,
215
+ model=self.model_type,
216
+ **request_config,
217
+ )
218
+
219
+ return self._post_handle_response(response)
220
+
221
+ async def _arun(
222
+ self,
223
+ messages: List[OpenAIMessage],
224
+ response_format: Optional[Type[BaseModel]] = None,
225
+ tools: Optional[List[Dict[str, Any]]] = None,
226
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
227
+ r"""Runs inference of DeepSeek chat completion.
228
+
229
+ Args:
230
+ messages (List[OpenAIMessage]): Message list with the chat history
231
+ in OpenAI API format.
232
+
233
+ Returns:
234
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
235
+ `ChatCompletion` in the non-stream mode, or
236
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
237
+ """
238
+ request_config = self._prepare_request(
239
+ messages, response_format, tools
240
+ )
241
+ response = await self._async_client.chat.completions.create(
242
+ messages=messages,
243
+ model=self.model_type,
244
+ **request_config,
245
+ )
246
+
247
+ return self._post_handle_response(response)
248
+
205
249
  def check_model_config(self):
206
250
  r"""Check whether the model configuration contains any
207
251
  unexpected arguments to DeepSeek API.