camel-ai 0.2.21__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (116) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +570 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +81 -3
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +53 -4
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/embeddings/openai_embedding.py +10 -1
  18. camel/environments/__init__.py +16 -0
  19. camel/environments/base.py +503 -0
  20. camel/extractors/__init__.py +16 -0
  21. camel/extractors/base.py +263 -0
  22. camel/interpreters/docker/Dockerfile +12 -0
  23. camel/interpreters/docker_interpreter.py +19 -1
  24. camel/interpreters/subprocess_interpreter.py +42 -17
  25. camel/loaders/__init__.py +2 -0
  26. camel/loaders/mineru_extractor.py +250 -0
  27. camel/memories/agent_memories.py +16 -1
  28. camel/memories/blocks/chat_history_block.py +10 -2
  29. camel/memories/blocks/vectordb_block.py +1 -0
  30. camel/memories/context_creators/score_based.py +20 -3
  31. camel/memories/records.py +10 -0
  32. camel/messages/base.py +8 -8
  33. camel/models/__init__.py +2 -0
  34. camel/models/_utils.py +57 -0
  35. camel/models/aiml_model.py +48 -17
  36. camel/models/anthropic_model.py +41 -3
  37. camel/models/azure_openai_model.py +39 -3
  38. camel/models/base_audio_model.py +92 -0
  39. camel/models/base_model.py +132 -4
  40. camel/models/cohere_model.py +88 -11
  41. camel/models/deepseek_model.py +107 -63
  42. camel/models/fish_audio_model.py +18 -8
  43. camel/models/gemini_model.py +133 -15
  44. camel/models/groq_model.py +72 -10
  45. camel/models/internlm_model.py +14 -3
  46. camel/models/litellm_model.py +9 -2
  47. camel/models/mistral_model.py +42 -5
  48. camel/models/model_manager.py +57 -3
  49. camel/models/moonshot_model.py +33 -4
  50. camel/models/nemotron_model.py +32 -3
  51. camel/models/nvidia_model.py +43 -3
  52. camel/models/ollama_model.py +139 -17
  53. camel/models/openai_audio_models.py +87 -2
  54. camel/models/openai_compatible_model.py +37 -3
  55. camel/models/openai_model.py +158 -46
  56. camel/models/qwen_model.py +61 -4
  57. camel/models/reka_model.py +53 -3
  58. camel/models/samba_model.py +209 -4
  59. camel/models/sglang_model.py +153 -14
  60. camel/models/siliconflow_model.py +16 -3
  61. camel/models/stub_model.py +46 -4
  62. camel/models/togetherai_model.py +38 -3
  63. camel/models/vllm_model.py +37 -3
  64. camel/models/yi_model.py +36 -3
  65. camel/models/zhipuai_model.py +38 -3
  66. camel/retrievers/__init__.py +3 -0
  67. camel/retrievers/hybrid_retrival.py +237 -0
  68. camel/toolkits/__init__.py +20 -3
  69. camel/toolkits/arxiv_toolkit.py +2 -1
  70. camel/toolkits/ask_news_toolkit.py +4 -2
  71. camel/toolkits/audio_analysis_toolkit.py +238 -0
  72. camel/toolkits/base.py +22 -3
  73. camel/toolkits/code_execution.py +2 -0
  74. camel/toolkits/dappier_toolkit.py +2 -1
  75. camel/toolkits/data_commons_toolkit.py +38 -12
  76. camel/toolkits/excel_toolkit.py +172 -0
  77. camel/toolkits/function_tool.py +13 -0
  78. camel/toolkits/github_toolkit.py +5 -1
  79. camel/toolkits/google_maps_toolkit.py +2 -1
  80. camel/toolkits/google_scholar_toolkit.py +2 -0
  81. camel/toolkits/human_toolkit.py +0 -3
  82. camel/toolkits/image_analysis_toolkit.py +202 -0
  83. camel/toolkits/linkedin_toolkit.py +3 -2
  84. camel/toolkits/meshy_toolkit.py +3 -2
  85. camel/toolkits/mineru_toolkit.py +178 -0
  86. camel/toolkits/networkx_toolkit.py +240 -0
  87. camel/toolkits/notion_toolkit.py +2 -0
  88. camel/toolkits/openbb_toolkit.py +3 -2
  89. camel/toolkits/page_script.js +376 -0
  90. camel/toolkits/reddit_toolkit.py +11 -3
  91. camel/toolkits/retrieval_toolkit.py +6 -1
  92. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  93. camel/toolkits/stripe_toolkit.py +8 -2
  94. camel/toolkits/sympy_toolkit.py +44 -1
  95. camel/toolkits/video_analysis_toolkit.py +407 -0
  96. camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
  97. camel/toolkits/web_toolkit.py +1307 -0
  98. camel/toolkits/whatsapp_toolkit.py +3 -2
  99. camel/toolkits/zapier_toolkit.py +191 -0
  100. camel/types/__init__.py +2 -2
  101. camel/types/agents/__init__.py +16 -0
  102. camel/types/agents/tool_calling_record.py +52 -0
  103. camel/types/enums.py +3 -0
  104. camel/types/openai_types.py +16 -14
  105. camel/utils/__init__.py +2 -1
  106. camel/utils/async_func.py +2 -2
  107. camel/utils/commons.py +114 -1
  108. camel/verifiers/__init__.py +23 -0
  109. camel/verifiers/base.py +340 -0
  110. camel/verifiers/models.py +82 -0
  111. camel/verifiers/python_verifier.py +202 -0
  112. camel_ai-0.2.23.dist-info/METADATA +671 -0
  113. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info}/RECORD +127 -99
  114. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
  115. camel_ai-0.2.21.dist-info/METADATA +0 -528
  116. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
@@ -13,13 +13,14 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
19
  from openai.types.chat import (
20
20
  ChatCompletion,
21
21
  ChatCompletionChunk,
22
22
  )
23
+ from pydantic import BaseModel
23
24
 
24
25
  from camel.configs import NVIDIA_API_PARAMS, NvidiaConfig
25
26
  from camel.messages import OpenAIMessage
@@ -76,10 +77,49 @@ class NvidiaModel(BaseModelBackend):
76
77
  api_key=self._api_key,
77
78
  base_url=self._url,
78
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key=self._api_key,
84
+ base_url=self._url,
85
+ )
86
+
87
+ async def _arun(
88
+ self,
89
+ messages: List[OpenAIMessage],
90
+ response_format: Optional[Type[BaseModel]] = None,
91
+ tools: Optional[List[Dict[str, Any]]] = None,
92
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
93
+ r"""Runs inference of NVIDIA chat completion.
94
+
95
+ Args:
96
+ messages (List[OpenAIMessage]): Message list with the chat history
97
+ in OpenAI API format.
98
+
99
+ Returns:
100
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
101
+ `ChatCompletion` in the non-stream mode, or
102
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
103
+ """
104
+
105
+ # Remove tool-related parameters if no tools are specified
106
+ config = dict(self.model_config_dict)
107
+ if not config.get("tools"): # None or empty list
108
+ config.pop("tools", None)
109
+ config.pop("tool_choice", None)
110
+
111
+ response = await self._async_client.chat.completions.create(
112
+ messages=messages,
113
+ model=self.model_type,
114
+ **config,
115
+ )
116
+ return response
79
117
 
80
- def run(
118
+ def _run(
81
119
  self,
82
120
  messages: List[OpenAIMessage],
121
+ response_format: Optional[Type[BaseModel]] = None,
122
+ tools: Optional[List[Dict[str, Any]]] = None,
83
123
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
84
124
  r"""Runs inference of NVIDIA chat completion.
85
125
 
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import subprocess
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import OLLAMA_API_PARAMS, OllamaConfig
21
22
  from camel.messages import OpenAIMessage
22
23
  from camel.models import BaseModelBackend
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.types import (
24
26
  ChatCompletion,
25
27
  ChatCompletionChunk,
@@ -75,6 +77,12 @@ class OllamaModel(BaseModelBackend):
75
77
  api_key="Set-but-ignored", # required but ignored
76
78
  base_url=self._url,
77
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key="Set-but-ignored", # required but ignored
84
+ base_url=self._url,
85
+ )
78
86
 
79
87
  def _start_server(self) -> None:
80
88
  r"""Starts the Ollama server in a subprocess."""
@@ -119,40 +127,154 @@ class OllamaModel(BaseModelBackend):
119
127
  "input into Ollama model backend."
120
128
  )
121
129
 
122
- def run(
130
+ def _run(
123
131
  self,
124
132
  messages: List[OpenAIMessage],
133
+ response_format: Optional[Type[BaseModel]] = None,
134
+ tools: Optional[List[Dict[str, Any]]] = None,
125
135
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
126
- r"""Runs inference of OpenAI chat completion.
136
+ r"""Runs inference of Ollama chat completion.
127
137
 
128
138
  Args:
129
139
  messages (List[OpenAIMessage]): Message list with the chat history
130
140
  in OpenAI API format.
141
+ response_format (Optional[Type[BaseModel]]): The format of the
142
+ response.
143
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
144
+ use for the request.
131
145
 
132
146
  Returns:
133
147
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
134
148
  `ChatCompletion` in the non-stream mode, or
135
149
  `Stream[ChatCompletionChunk]` in the stream mode.
136
150
  """
137
- if self.model_config_dict.get("response_format"):
138
- # stream is not supported in beta.chat.completions.parse
139
- if "stream" in self.model_config_dict:
140
- del self.model_config_dict["stream"]
141
-
142
- response = self._client.beta.chat.completions.parse(
143
- messages=messages,
144
- model=self.model_type,
145
- **self.model_config_dict,
151
+ response_format = response_format or self.model_config_dict.get(
152
+ "response_format", None
153
+ )
154
+ # For Ollama, the tool calling will be broken with response_format
155
+ if response_format and not tools:
156
+ return self._request_parse(messages, response_format, tools)
157
+ else:
158
+ return self._request_chat_completion(
159
+ messages, response_format, tools
160
+ )
161
+
162
+ async def _arun(
163
+ self,
164
+ messages: List[OpenAIMessage],
165
+ response_format: Optional[Type[BaseModel]] = None,
166
+ tools: Optional[List[Dict[str, Any]]] = None,
167
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
168
+ r"""Runs inference of Ollama chat completion in async mode.
169
+
170
+ Args:
171
+ messages (List[OpenAIMessage]): Message list with the chat history
172
+ in OpenAI API format.
173
+ response_format (Optional[Type[BaseModel]]): The format of the
174
+ response.
175
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
176
+ use for the request.
177
+
178
+ Returns:
179
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
180
+ `ChatCompletion` in the non-stream mode, or
181
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
182
+ """
183
+ response_format = response_format or self.model_config_dict.get(
184
+ "response_format", None
185
+ )
186
+ if response_format:
187
+ return await self._arequest_parse(messages, response_format, tools)
188
+ else:
189
+ return await self._arequest_chat_completion(
190
+ messages, response_format, tools
146
191
  )
147
192
 
148
- return self._to_chat_completion(response)
193
+ def _prepare_chat_completion_config(
194
+ self,
195
+ messages: List[OpenAIMessage],
196
+ response_format: Optional[Type[BaseModel]] = None,
197
+ tools: Optional[List[Dict[str, Any]]] = None,
198
+ ) -> Dict[str, Any]:
199
+ request_config = self.model_config_dict.copy()
200
+
201
+ if tools:
202
+ request_config["tools"] = tools
203
+ if response_format:
204
+ try_modify_message_with_format(messages[-1], response_format)
205
+ request_config["response_format"] = {"type": "json_object"}
206
+
207
+ return request_config
208
+
209
+ def _request_chat_completion(
210
+ self,
211
+ messages: List[OpenAIMessage],
212
+ response_format: Optional[Type[BaseModel]] = None,
213
+ tools: Optional[List[Dict[str, Any]]] = None,
214
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
215
+ request_config = self._prepare_chat_completion_config(
216
+ messages, response_format, tools
217
+ )
218
+
219
+ return self._client.chat.completions.create(
220
+ messages=messages,
221
+ model=self.model_type,
222
+ **request_config,
223
+ )
224
+
225
+ async def _arequest_chat_completion(
226
+ self,
227
+ messages: List[OpenAIMessage],
228
+ response_format: Optional[Type[BaseModel]] = None,
229
+ tools: Optional[List[Dict[str, Any]]] = None,
230
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
231
+ request_config = self._prepare_chat_completion_config(
232
+ messages, response_format, tools
233
+ )
234
+
235
+ return await self._async_client.chat.completions.create(
236
+ messages=messages,
237
+ model=self.model_type,
238
+ **request_config,
239
+ )
240
+
241
+ def _request_parse(
242
+ self,
243
+ messages: List[OpenAIMessage],
244
+ response_format: Type[BaseModel],
245
+ tools: Optional[List[Dict[str, Any]]] = None,
246
+ ) -> ChatCompletion:
247
+ request_config = self.model_config_dict.copy()
248
+
249
+ request_config["response_format"] = response_format
250
+ request_config.pop("stream", None)
251
+ if tools is not None:
252
+ request_config["tools"] = tools
253
+
254
+ return self._client.beta.chat.completions.parse(
255
+ messages=messages,
256
+ model=self.model_type,
257
+ **request_config,
258
+ )
259
+
260
+ async def _arequest_parse(
261
+ self,
262
+ messages: List[OpenAIMessage],
263
+ response_format: Type[BaseModel],
264
+ tools: Optional[List[Dict[str, Any]]] = None,
265
+ ) -> ChatCompletion:
266
+ request_config = self.model_config_dict.copy()
267
+
268
+ request_config["response_format"] = response_format
269
+ request_config.pop("stream", None)
270
+ if tools is not None:
271
+ request_config["tools"] = tools
149
272
 
150
- response = self._client.chat.completions.create(
273
+ return await self._async_client.beta.chat.completions.parse(
151
274
  messages=messages,
152
275
  model=self.model_type,
153
- **self.model_config_dict,
276
+ **request_config,
154
277
  )
155
- return response
156
278
 
157
279
  @property
158
280
  def stream(self) -> bool:
@@ -11,15 +11,17 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import base64
14
15
  import os
15
16
  from typing import Any, List, Optional, Union
16
17
 
17
- from openai import OpenAI, _legacy_response
18
+ from openai import AsyncOpenAI, OpenAI, _legacy_response
18
19
 
20
+ from camel.models.base_audio_model import BaseAudioModel
19
21
  from camel.types import AudioModelType, VoiceType
20
22
 
21
23
 
22
- class OpenAIAudioModels:
24
+ class OpenAIAudioModels(BaseAudioModel):
23
25
  r"""Provides access to OpenAI's Text-to-Speech (TTS) and Speech_to_Text
24
26
  (STT) models."""
25
27
 
@@ -29,6 +31,7 @@ class OpenAIAudioModels:
29
31
  url: Optional[str] = None,
30
32
  ) -> None:
31
33
  r"""Initialize an instance of OpenAI."""
34
+ super().__init__(api_key, url)
32
35
  self._url = url or os.environ.get("OPENAI_API_BASE_URL")
33
36
  self._api_key = api_key or os.environ.get("OPENAI_API_KEY")
34
37
  self._client = OpenAI(
@@ -37,10 +40,17 @@ class OpenAIAudioModels:
37
40
  base_url=self._url,
38
41
  api_key=self._api_key,
39
42
  )
43
+ self._async_client = AsyncOpenAI(
44
+ timeout=120,
45
+ max_retries=3,
46
+ base_url=self._url,
47
+ api_key=self._api_key,
48
+ )
40
49
 
41
50
  def text_to_speech(
42
51
  self,
43
52
  input: str,
53
+ *,
44
54
  model_type: AudioModelType = AudioModelType.TTS_1,
45
55
  voice: VoiceType = VoiceType.ALLOY,
46
56
  storage_path: Optional[str] = None,
@@ -105,6 +115,8 @@ class OpenAIAudioModels:
105
115
  new_storage_path = (
106
116
  f"{file_name}_{chunk_index}{file_extension}"
107
117
  )
118
+ # Ensure directory exists
119
+ self._ensure_directory_exists(new_storage_path)
108
120
  response.write_to_file(new_storage_path)
109
121
  chunk_index += 1
110
122
  except Exception as e:
@@ -125,6 +137,8 @@ class OpenAIAudioModels:
125
137
 
126
138
  if storage_path:
127
139
  try:
140
+ # Ensure directory exists
141
+ self._ensure_directory_exists(storage_path)
128
142
  response.write_to_file(storage_path)
129
143
  except Exception as e:
130
144
  raise Exception("Error during write the file") from e
@@ -257,3 +271,74 @@ class OpenAIAudioModels:
257
271
  return transcription.text
258
272
  except Exception as e:
259
273
  raise Exception("Error during STT API call") from e
274
+
275
+ def audio_question_answering(
276
+ self,
277
+ audio_file_path: str,
278
+ question: str,
279
+ model: str = "gpt-4o-mini-audio-preview",
280
+ **kwargs: Any,
281
+ ) -> str:
282
+ r"""Answer a question directly using the audio content.
283
+
284
+ Args:
285
+ audio_file_path (str): The path to the audio file.
286
+ question (str): The question to ask about the audio content.
287
+ model (str, optional): The model to use for audio question
288
+ answering. (default: :obj:`"gpt-4o-mini-audio-preview"`)
289
+ **kwargs (Any): Extra keyword arguments passed to the chat
290
+ completions API.
291
+
292
+ Returns:
293
+ str: The model's response to the question.
294
+
295
+ Raises:
296
+ Exception: If there's an error during the API call.
297
+ """
298
+ try:
299
+ # Read and encode the audio file
300
+ with open(audio_file_path, "rb") as audio_file:
301
+ audio_data = audio_file.read()
302
+
303
+ encoded_string = base64.b64encode(audio_data).decode('utf-8')
304
+
305
+ # Get file format
306
+ file_suffix = os.path.splitext(audio_file_path)[1]
307
+ file_format = file_suffix[1:].lower()
308
+
309
+ # Prepare the prompt
310
+ text_prompt = "Answer the following question based on the "
311
+ f"given audio information:\n\n{question}"
312
+
313
+ # Call the OpenAI API
314
+ completion = self._client.chat.completions.create(
315
+ model=model,
316
+ messages=[
317
+ {
318
+ "role": "system",
319
+ "content": "You are a helpful assistant "
320
+ "specializing in audio analysis.",
321
+ },
322
+ { # type: ignore[misc, list-item]
323
+ "role": "user",
324
+ "content": [
325
+ {"type": "text", "text": text_prompt},
326
+ {
327
+ "type": "input_audio",
328
+ "input_audio": {
329
+ "data": encoded_string,
330
+ "format": file_format,
331
+ },
332
+ },
333
+ ],
334
+ },
335
+ ],
336
+ **kwargs,
337
+ )
338
+
339
+ response = str(completion.choices[0].message.content)
340
+ return response
341
+ except Exception as e:
342
+ raise Exception(
343
+ "Error during audio question answering API call"
344
+ ) from e
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.messages import OpenAIMessage
21
22
  from camel.models import BaseModelBackend
@@ -67,9 +68,18 @@ class OpenAICompatibleModel(BaseModelBackend):
67
68
  base_url=self._url,
68
69
  )
69
70
 
70
- def run(
71
+ self._async_client = AsyncOpenAI(
72
+ timeout=180,
73
+ max_retries=3,
74
+ api_key=self._api_key,
75
+ base_url=self._url,
76
+ )
77
+
78
+ def _run(
71
79
  self,
72
80
  messages: List[OpenAIMessage],
81
+ response_format: Optional[Type[BaseModel]] = None,
82
+ tools: Optional[List[Dict[str, Any]]] = None,
73
83
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
74
84
  r"""Runs inference of OpenAI chat completion.
75
85
 
@@ -89,6 +99,30 @@ class OpenAICompatibleModel(BaseModelBackend):
89
99
  )
90
100
  return response
91
101
 
102
+ async def _arun(
103
+ self,
104
+ messages: List[OpenAIMessage],
105
+ response_format: Optional[Type[BaseModel]] = None,
106
+ tools: Optional[List[Dict[str, Any]]] = None,
107
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
108
+ r"""Runs inference of OpenAI chat completion in async mode.
109
+
110
+ Args:
111
+ messages (List[OpenAIMessage]): Message list with the chat history
112
+ in OpenAI API format.
113
+
114
+ Returns:
115
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
116
+ `ChatCompletion` in the non-stream mode, or
117
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
118
+ """
119
+ response = await self._async_client.chat.completions.create(
120
+ messages=messages,
121
+ model=self.model_type,
122
+ **self.model_config_dict,
123
+ )
124
+ return response
125
+
92
126
  @property
93
127
  def token_counter(self) -> BaseTokenCounter:
94
128
  r"""Initialize the token counter for the model backend.