camel-ai 0.2.21__py3-none-any.whl → 0.2.23a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (106) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +556 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +79 -1
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +7 -2
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/embeddings/openai_embedding.py +10 -1
  18. camel/environments/__init__.py +16 -0
  19. camel/environments/base.py +503 -0
  20. camel/extractors/__init__.py +16 -0
  21. camel/extractors/base.py +263 -0
  22. camel/interpreters/docker/Dockerfile +12 -0
  23. camel/interpreters/docker_interpreter.py +19 -1
  24. camel/interpreters/subprocess_interpreter.py +42 -17
  25. camel/loaders/__init__.py +2 -0
  26. camel/loaders/mineru_extractor.py +250 -0
  27. camel/memories/agent_memories.py +16 -1
  28. camel/memories/blocks/chat_history_block.py +10 -2
  29. camel/memories/blocks/vectordb_block.py +1 -0
  30. camel/memories/context_creators/score_based.py +20 -3
  31. camel/memories/records.py +10 -0
  32. camel/messages/base.py +8 -8
  33. camel/models/_utils.py +57 -0
  34. camel/models/aiml_model.py +48 -17
  35. camel/models/anthropic_model.py +41 -3
  36. camel/models/azure_openai_model.py +39 -3
  37. camel/models/base_model.py +132 -4
  38. camel/models/cohere_model.py +88 -11
  39. camel/models/deepseek_model.py +107 -63
  40. camel/models/gemini_model.py +133 -15
  41. camel/models/groq_model.py +72 -10
  42. camel/models/internlm_model.py +14 -3
  43. camel/models/litellm_model.py +9 -2
  44. camel/models/mistral_model.py +42 -5
  45. camel/models/model_manager.py +48 -3
  46. camel/models/moonshot_model.py +33 -4
  47. camel/models/nemotron_model.py +32 -3
  48. camel/models/nvidia_model.py +43 -3
  49. camel/models/ollama_model.py +139 -17
  50. camel/models/openai_audio_models.py +7 -1
  51. camel/models/openai_compatible_model.py +37 -3
  52. camel/models/openai_model.py +158 -46
  53. camel/models/qwen_model.py +61 -4
  54. camel/models/reka_model.py +53 -3
  55. camel/models/samba_model.py +209 -4
  56. camel/models/sglang_model.py +153 -14
  57. camel/models/siliconflow_model.py +16 -3
  58. camel/models/stub_model.py +46 -4
  59. camel/models/togetherai_model.py +38 -3
  60. camel/models/vllm_model.py +37 -3
  61. camel/models/yi_model.py +36 -3
  62. camel/models/zhipuai_model.py +38 -3
  63. camel/retrievers/__init__.py +3 -0
  64. camel/retrievers/hybrid_retrival.py +237 -0
  65. camel/toolkits/__init__.py +9 -2
  66. camel/toolkits/arxiv_toolkit.py +2 -1
  67. camel/toolkits/ask_news_toolkit.py +4 -2
  68. camel/toolkits/base.py +22 -3
  69. camel/toolkits/code_execution.py +2 -0
  70. camel/toolkits/dappier_toolkit.py +2 -1
  71. camel/toolkits/data_commons_toolkit.py +38 -12
  72. camel/toolkits/function_tool.py +13 -0
  73. camel/toolkits/github_toolkit.py +5 -1
  74. camel/toolkits/google_maps_toolkit.py +2 -1
  75. camel/toolkits/google_scholar_toolkit.py +2 -0
  76. camel/toolkits/human_toolkit.py +0 -3
  77. camel/toolkits/linkedin_toolkit.py +3 -2
  78. camel/toolkits/meshy_toolkit.py +3 -2
  79. camel/toolkits/mineru_toolkit.py +178 -0
  80. camel/toolkits/networkx_toolkit.py +240 -0
  81. camel/toolkits/notion_toolkit.py +2 -0
  82. camel/toolkits/openbb_toolkit.py +3 -2
  83. camel/toolkits/reddit_toolkit.py +11 -3
  84. camel/toolkits/retrieval_toolkit.py +6 -1
  85. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  86. camel/toolkits/stripe_toolkit.py +8 -2
  87. camel/toolkits/sympy_toolkit.py +44 -1
  88. camel/toolkits/video_toolkit.py +2 -0
  89. camel/toolkits/whatsapp_toolkit.py +3 -2
  90. camel/toolkits/zapier_toolkit.py +191 -0
  91. camel/types/__init__.py +2 -2
  92. camel/types/agents/__init__.py +16 -0
  93. camel/types/agents/tool_calling_record.py +52 -0
  94. camel/types/enums.py +3 -0
  95. camel/types/openai_types.py +16 -14
  96. camel/utils/__init__.py +2 -1
  97. camel/utils/async_func.py +2 -2
  98. camel/utils/commons.py +114 -1
  99. camel/verifiers/__init__.py +23 -0
  100. camel/verifiers/base.py +340 -0
  101. camel/verifiers/models.py +82 -0
  102. camel/verifiers/python_verifier.py +202 -0
  103. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/METADATA +273 -256
  104. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/RECORD +106 -85
  105. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/WHEEL +1 -1
  106. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23a0.dist-info}/LICENSE +0 -0
@@ -13,13 +13,14 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
19
  from openai.types.chat import (
20
20
  ChatCompletion,
21
21
  ChatCompletionChunk,
22
22
  )
23
+ from pydantic import BaseModel
23
24
 
24
25
  from camel.configs import NVIDIA_API_PARAMS, NvidiaConfig
25
26
  from camel.messages import OpenAIMessage
@@ -76,10 +77,49 @@ class NvidiaModel(BaseModelBackend):
76
77
  api_key=self._api_key,
77
78
  base_url=self._url,
78
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key=self._api_key,
84
+ base_url=self._url,
85
+ )
86
+
87
+ async def _arun(
88
+ self,
89
+ messages: List[OpenAIMessage],
90
+ response_format: Optional[Type[BaseModel]] = None,
91
+ tools: Optional[List[Dict[str, Any]]] = None,
92
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
93
+ r"""Runs inference of NVIDIA chat completion.
94
+
95
+ Args:
96
+ messages (List[OpenAIMessage]): Message list with the chat history
97
+ in OpenAI API format.
98
+
99
+ Returns:
100
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
101
+ `ChatCompletion` in the non-stream mode, or
102
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
103
+ """
104
+
105
+ # Remove tool-related parameters if no tools are specified
106
+ config = dict(self.model_config_dict)
107
+ if not config.get("tools"): # None or empty list
108
+ config.pop("tools", None)
109
+ config.pop("tool_choice", None)
110
+
111
+ response = await self._async_client.chat.completions.create(
112
+ messages=messages,
113
+ model=self.model_type,
114
+ **config,
115
+ )
116
+ return response
79
117
 
80
- def run(
118
+ def _run(
81
119
  self,
82
120
  messages: List[OpenAIMessage],
121
+ response_format: Optional[Type[BaseModel]] = None,
122
+ tools: Optional[List[Dict[str, Any]]] = None,
83
123
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
84
124
  r"""Runs inference of NVIDIA chat completion.
85
125
 
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import subprocess
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import OLLAMA_API_PARAMS, OllamaConfig
21
22
  from camel.messages import OpenAIMessage
22
23
  from camel.models import BaseModelBackend
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.types import (
24
26
  ChatCompletion,
25
27
  ChatCompletionChunk,
@@ -75,6 +77,12 @@ class OllamaModel(BaseModelBackend):
75
77
  api_key="Set-but-ignored", # required but ignored
76
78
  base_url=self._url,
77
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key="Set-but-ignored", # required but ignored
84
+ base_url=self._url,
85
+ )
78
86
 
79
87
  def _start_server(self) -> None:
80
88
  r"""Starts the Ollama server in a subprocess."""
@@ -119,40 +127,154 @@ class OllamaModel(BaseModelBackend):
119
127
  "input into Ollama model backend."
120
128
  )
121
129
 
122
- def run(
130
+ def _run(
123
131
  self,
124
132
  messages: List[OpenAIMessage],
133
+ response_format: Optional[Type[BaseModel]] = None,
134
+ tools: Optional[List[Dict[str, Any]]] = None,
125
135
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
126
- r"""Runs inference of OpenAI chat completion.
136
+ r"""Runs inference of Ollama chat completion.
127
137
 
128
138
  Args:
129
139
  messages (List[OpenAIMessage]): Message list with the chat history
130
140
  in OpenAI API format.
141
+ response_format (Optional[Type[BaseModel]]): The format of the
142
+ response.
143
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
144
+ use for the request.
131
145
 
132
146
  Returns:
133
147
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
134
148
  `ChatCompletion` in the non-stream mode, or
135
149
  `Stream[ChatCompletionChunk]` in the stream mode.
136
150
  """
137
- if self.model_config_dict.get("response_format"):
138
- # stream is not supported in beta.chat.completions.parse
139
- if "stream" in self.model_config_dict:
140
- del self.model_config_dict["stream"]
141
-
142
- response = self._client.beta.chat.completions.parse(
143
- messages=messages,
144
- model=self.model_type,
145
- **self.model_config_dict,
151
+ response_format = response_format or self.model_config_dict.get(
152
+ "response_format", None
153
+ )
154
+ # For Ollama, the tool calling will be broken with response_format
155
+ if response_format and not tools:
156
+ return self._request_parse(messages, response_format, tools)
157
+ else:
158
+ return self._request_chat_completion(
159
+ messages, response_format, tools
160
+ )
161
+
162
+ async def _arun(
163
+ self,
164
+ messages: List[OpenAIMessage],
165
+ response_format: Optional[Type[BaseModel]] = None,
166
+ tools: Optional[List[Dict[str, Any]]] = None,
167
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
168
+ r"""Runs inference of Ollama chat completion in async mode.
169
+
170
+ Args:
171
+ messages (List[OpenAIMessage]): Message list with the chat history
172
+ in OpenAI API format.
173
+ response_format (Optional[Type[BaseModel]]): The format of the
174
+ response.
175
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
176
+ use for the request.
177
+
178
+ Returns:
179
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
180
+ `ChatCompletion` in the non-stream mode, or
181
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
182
+ """
183
+ response_format = response_format or self.model_config_dict.get(
184
+ "response_format", None
185
+ )
186
+ if response_format:
187
+ return await self._arequest_parse(messages, response_format, tools)
188
+ else:
189
+ return await self._arequest_chat_completion(
190
+ messages, response_format, tools
146
191
  )
147
192
 
148
- return self._to_chat_completion(response)
193
+ def _prepare_chat_completion_config(
194
+ self,
195
+ messages: List[OpenAIMessage],
196
+ response_format: Optional[Type[BaseModel]] = None,
197
+ tools: Optional[List[Dict[str, Any]]] = None,
198
+ ) -> Dict[str, Any]:
199
+ request_config = self.model_config_dict.copy()
200
+
201
+ if tools:
202
+ request_config["tools"] = tools
203
+ if response_format:
204
+ try_modify_message_with_format(messages[-1], response_format)
205
+ request_config["response_format"] = {"type": "json_object"}
206
+
207
+ return request_config
208
+
209
+ def _request_chat_completion(
210
+ self,
211
+ messages: List[OpenAIMessage],
212
+ response_format: Optional[Type[BaseModel]] = None,
213
+ tools: Optional[List[Dict[str, Any]]] = None,
214
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
215
+ request_config = self._prepare_chat_completion_config(
216
+ messages, response_format, tools
217
+ )
218
+
219
+ return self._client.chat.completions.create(
220
+ messages=messages,
221
+ model=self.model_type,
222
+ **request_config,
223
+ )
224
+
225
+ async def _arequest_chat_completion(
226
+ self,
227
+ messages: List[OpenAIMessage],
228
+ response_format: Optional[Type[BaseModel]] = None,
229
+ tools: Optional[List[Dict[str, Any]]] = None,
230
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
231
+ request_config = self._prepare_chat_completion_config(
232
+ messages, response_format, tools
233
+ )
234
+
235
+ return await self._async_client.chat.completions.create(
236
+ messages=messages,
237
+ model=self.model_type,
238
+ **request_config,
239
+ )
240
+
241
+ def _request_parse(
242
+ self,
243
+ messages: List[OpenAIMessage],
244
+ response_format: Type[BaseModel],
245
+ tools: Optional[List[Dict[str, Any]]] = None,
246
+ ) -> ChatCompletion:
247
+ request_config = self.model_config_dict.copy()
248
+
249
+ request_config["response_format"] = response_format
250
+ request_config.pop("stream", None)
251
+ if tools is not None:
252
+ request_config["tools"] = tools
253
+
254
+ return self._client.beta.chat.completions.parse(
255
+ messages=messages,
256
+ model=self.model_type,
257
+ **request_config,
258
+ )
259
+
260
+ async def _arequest_parse(
261
+ self,
262
+ messages: List[OpenAIMessage],
263
+ response_format: Type[BaseModel],
264
+ tools: Optional[List[Dict[str, Any]]] = None,
265
+ ) -> ChatCompletion:
266
+ request_config = self.model_config_dict.copy()
267
+
268
+ request_config["response_format"] = response_format
269
+ request_config.pop("stream", None)
270
+ if tools is not None:
271
+ request_config["tools"] = tools
149
272
 
150
- response = self._client.chat.completions.create(
273
+ return await self._async_client.beta.chat.completions.parse(
151
274
  messages=messages,
152
275
  model=self.model_type,
153
- **self.model_config_dict,
276
+ **request_config,
154
277
  )
155
- return response
156
278
 
157
279
  @property
158
280
  def stream(self) -> bool:
@@ -14,7 +14,7 @@
14
14
  import os
15
15
  from typing import Any, List, Optional, Union
16
16
 
17
- from openai import OpenAI, _legacy_response
17
+ from openai import AsyncOpenAI, OpenAI, _legacy_response
18
18
 
19
19
  from camel.types import AudioModelType, VoiceType
20
20
 
@@ -37,6 +37,12 @@ class OpenAIAudioModels:
37
37
  base_url=self._url,
38
38
  api_key=self._api_key,
39
39
  )
40
+ self._async_client = AsyncOpenAI(
41
+ timeout=120,
42
+ max_retries=3,
43
+ base_url=self._url,
44
+ api_key=self._api_key,
45
+ )
40
46
 
41
47
  def text_to_speech(
42
48
  self,
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.messages import OpenAIMessage
21
22
  from camel.models import BaseModelBackend
@@ -67,9 +68,18 @@ class OpenAICompatibleModel(BaseModelBackend):
67
68
  base_url=self._url,
68
69
  )
69
70
 
70
- def run(
71
+ self._async_client = AsyncOpenAI(
72
+ timeout=180,
73
+ max_retries=3,
74
+ api_key=self._api_key,
75
+ base_url=self._url,
76
+ )
77
+
78
+ def _run(
71
79
  self,
72
80
  messages: List[OpenAIMessage],
81
+ response_format: Optional[Type[BaseModel]] = None,
82
+ tools: Optional[List[Dict[str, Any]]] = None,
73
83
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
74
84
  r"""Runs inference of OpenAI chat completion.
75
85
 
@@ -89,6 +99,30 @@ class OpenAICompatibleModel(BaseModelBackend):
89
99
  )
90
100
  return response
91
101
 
102
+ async def _arun(
103
+ self,
104
+ messages: List[OpenAIMessage],
105
+ response_format: Optional[Type[BaseModel]] = None,
106
+ tools: Optional[List[Dict[str, Any]]] = None,
107
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
108
+ r"""Runs inference of OpenAI chat completion in async mode.
109
+
110
+ Args:
111
+ messages (List[OpenAIMessage]): Message list with the chat history
112
+ in OpenAI API format.
113
+
114
+ Returns:
115
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
116
+ `ChatCompletion` in the non-stream mode, or
117
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
118
+ """
119
+ response = await self._async_client.chat.completions.create(
120
+ messages=messages,
121
+ model=self.model_type,
122
+ **self.model_config_dict,
123
+ )
124
+ return response
125
+
92
126
  @property
93
127
  def token_counter(self) -> BaseTokenCounter:
94
128
  r"""Initialize the token counter for the model backend.
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import warnings
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import OPENAI_API_PARAMS, ChatGPTConfig
21
22
  from camel.messages import OpenAIMessage
@@ -31,6 +32,16 @@ from camel.utils import (
31
32
  api_keys_required,
32
33
  )
33
34
 
35
+ UNSUPPORTED_PARAMS = {
36
+ "temperature",
37
+ "top_p",
38
+ "presence_penalty",
39
+ "frequency_penalty",
40
+ "logprobs",
41
+ "top_logprobs",
42
+ "logit_bias",
43
+ }
44
+
34
45
 
35
46
  class OpenAIModel(BaseModelBackend):
36
47
  r"""OpenAI API in a unified BaseModelBackend interface.
@@ -68,15 +79,45 @@ class OpenAIModel(BaseModelBackend):
68
79
  model_config_dict = ChatGPTConfig().as_dict()
69
80
  api_key = api_key or os.environ.get("OPENAI_API_KEY")
70
81
  url = url or os.environ.get("OPENAI_API_BASE_URL")
82
+
71
83
  super().__init__(
72
84
  model_type, model_config_dict, api_key, url, token_counter
73
85
  )
86
+
74
87
  self._client = OpenAI(
75
88
  timeout=180,
76
89
  max_retries=3,
77
90
  base_url=self._url,
78
91
  api_key=self._api_key,
79
92
  )
93
+ self._async_client = AsyncOpenAI(
94
+ timeout=180,
95
+ max_retries=3,
96
+ base_url=self._url,
97
+ api_key=self._api_key,
98
+ )
99
+
100
+ def _sanitize_config(self, config_dict: Dict[str, Any]) -> Dict[str, Any]:
101
+ """Sanitize the model configuration for O1 models."""
102
+
103
+ if self.model_type in [
104
+ ModelType.O1,
105
+ ModelType.O1_MINI,
106
+ ModelType.O1_PREVIEW,
107
+ ModelType.O3_MINI,
108
+ ]:
109
+ warnings.warn(
110
+ "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
111
+ "which has certain limitations, reference: "
112
+ "`https://platform.openai.com/docs/guides/reasoning`.",
113
+ UserWarning,
114
+ )
115
+ return {
116
+ k: v
117
+ for k, v in config_dict.items()
118
+ if k not in UNSUPPORTED_PARAMS
119
+ }
120
+ return config_dict
80
121
 
81
122
  @property
82
123
  def token_counter(self) -> BaseTokenCounter:
@@ -90,70 +131,141 @@ class OpenAIModel(BaseModelBackend):
90
131
  self._token_counter = OpenAITokenCounter(self.model_type)
91
132
  return self._token_counter
92
133
 
93
- def run(
134
+ def _run(
94
135
  self,
95
136
  messages: List[OpenAIMessage],
137
+ response_format: Optional[Type[BaseModel]] = None,
138
+ tools: Optional[List[Dict[str, Any]]] = None,
96
139
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
140
  r"""Runs inference of OpenAI chat completion.
98
141
 
99
142
  Args:
100
143
  messages (List[OpenAIMessage]): Message list with the chat history
101
144
  in OpenAI API format.
145
+ response_format (Optional[Type[BaseModel]]): The format of the
146
+ response.
147
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
148
+ use for the request.
102
149
 
103
150
  Returns:
104
151
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
105
152
  `ChatCompletion` in the non-stream mode, or
106
153
  `Stream[ChatCompletionChunk]` in the stream mode.
107
154
  """
108
- # o1-preview and o1-mini have Beta limitations
109
- # reference: https://platform.openai.com/docs/guides/reasoning
110
- if self.model_type in [
111
- ModelType.O1,
112
- ModelType.O1_MINI,
113
- ModelType.O1_PREVIEW,
114
- ModelType.O3_MINI,
115
- ]:
116
- warnings.warn(
117
- "Warning: You are using an O1 model (O1_MINI or O1_PREVIEW), "
118
- "which has certain limitations, reference: "
119
- "`https://platform.openai.com/docs/guides/reasoning`.",
120
- UserWarning,
121
- )
155
+ response_format = response_format or self.model_config_dict.get(
156
+ "response_format", None
157
+ )
158
+ if response_format:
159
+ return self._request_parse(messages, response_format, tools)
160
+ else:
161
+ return self._request_chat_completion(messages, tools)
122
162
 
123
- # Check and remove unsupported parameters and reset the fixed
124
- # parameters
125
- unsupported_keys = [
126
- "temperature",
127
- "top_p",
128
- "presence_penalty",
129
- "frequency_penalty",
130
- "logprobs",
131
- "top_logprobs",
132
- "logit_bias",
133
- ]
134
- for key in unsupported_keys:
135
- if key in self.model_config_dict:
136
- del self.model_config_dict[key]
137
-
138
- if self.model_config_dict.get("response_format"):
139
- # stream is not supported in beta.chat.completions.parse
140
- if "stream" in self.model_config_dict:
141
- del self.model_config_dict["stream"]
142
-
143
- response = self._client.beta.chat.completions.parse(
144
- messages=messages,
145
- model=self.model_type,
146
- **self.model_config_dict,
147
- )
163
+ async def _arun(
164
+ self,
165
+ messages: List[OpenAIMessage],
166
+ response_format: Optional[Type[BaseModel]] = None,
167
+ tools: Optional[List[Dict[str, Any]]] = None,
168
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
169
+ r"""Runs inference of OpenAI chat completion in async mode.
170
+
171
+ Args:
172
+ messages (List[OpenAIMessage]): Message list with the chat history
173
+ in OpenAI API format.
174
+ response_format (Optional[Type[BaseModel]]): The format of the
175
+ response.
176
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
177
+ use for the request.
178
+
179
+ Returns:
180
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
181
+ `ChatCompletion` in the non-stream mode, or
182
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
183
+ """
184
+ response_format = response_format or self.model_config_dict.get(
185
+ "response_format", None
186
+ )
187
+ if response_format:
188
+ return await self._arequest_parse(messages, response_format, tools)
189
+ else:
190
+ return await self._arequest_chat_completion(messages, tools)
191
+
192
+ def _request_chat_completion(
193
+ self,
194
+ messages: List[OpenAIMessage],
195
+ tools: Optional[List[Dict[str, Any]]] = None,
196
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
197
+ request_config = self.model_config_dict.copy()
198
+
199
+ if tools:
200
+ request_config["tools"] = tools
201
+
202
+ request_config = self._sanitize_config(request_config)
203
+
204
+ return self._client.chat.completions.create(
205
+ messages=messages,
206
+ model=self.model_type,
207
+ **request_config,
208
+ )
209
+
210
+ async def _arequest_chat_completion(
211
+ self,
212
+ messages: List[OpenAIMessage],
213
+ tools: Optional[List[Dict[str, Any]]] = None,
214
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
215
+ request_config = self.model_config_dict.copy()
216
+
217
+ if tools:
218
+ request_config["tools"] = tools
219
+
220
+ request_config = self._sanitize_config(request_config)
221
+
222
+ return await self._async_client.chat.completions.create(
223
+ messages=messages,
224
+ model=self.model_type,
225
+ **request_config,
226
+ )
227
+
228
+ def _request_parse(
229
+ self,
230
+ messages: List[OpenAIMessage],
231
+ response_format: Type[BaseModel],
232
+ tools: Optional[List[Dict[str, Any]]] = None,
233
+ ) -> ChatCompletion:
234
+ request_config = self.model_config_dict.copy()
235
+
236
+ request_config["response_format"] = response_format
237
+ request_config.pop("stream", None)
238
+ if tools is not None:
239
+ request_config["tools"] = tools
240
+
241
+ request_config = self._sanitize_config(request_config)
242
+
243
+ return self._client.beta.chat.completions.parse(
244
+ messages=messages,
245
+ model=self.model_type,
246
+ **request_config,
247
+ )
248
+
249
+ async def _arequest_parse(
250
+ self,
251
+ messages: List[OpenAIMessage],
252
+ response_format: Type[BaseModel],
253
+ tools: Optional[List[Dict[str, Any]]] = None,
254
+ ) -> ChatCompletion:
255
+ request_config = self.model_config_dict.copy()
256
+
257
+ request_config["response_format"] = response_format
258
+ request_config.pop("stream", None)
259
+ if tools is not None:
260
+ request_config["tools"] = tools
148
261
 
149
- return self._to_chat_completion(response)
262
+ request_config = self._sanitize_config(request_config)
150
263
 
151
- response = self._client.chat.completions.create(
264
+ return await self._async_client.beta.chat.completions.parse(
152
265
  messages=messages,
153
266
  model=self.model_type,
154
- **self.model_config_dict,
267
+ **request_config,
155
268
  )
156
- return response
157
269
 
158
270
  def check_model_config(self):
159
271
  r"""Check whether the model configuration contains any