camel-ai 0.2.22__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (110) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +570 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +81 -3
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +52 -3
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/environments/__init__.py +16 -0
  18. camel/environments/base.py +503 -0
  19. camel/extractors/__init__.py +16 -0
  20. camel/extractors/base.py +263 -0
  21. camel/memories/agent_memories.py +16 -1
  22. camel/memories/blocks/chat_history_block.py +10 -2
  23. camel/memories/blocks/vectordb_block.py +1 -0
  24. camel/memories/context_creators/score_based.py +20 -3
  25. camel/memories/records.py +10 -0
  26. camel/messages/base.py +8 -8
  27. camel/models/__init__.py +2 -0
  28. camel/models/_utils.py +57 -0
  29. camel/models/aiml_model.py +48 -17
  30. camel/models/anthropic_model.py +41 -3
  31. camel/models/azure_openai_model.py +39 -3
  32. camel/models/base_audio_model.py +92 -0
  33. camel/models/base_model.py +88 -13
  34. camel/models/cohere_model.py +88 -11
  35. camel/models/deepseek_model.py +107 -45
  36. camel/models/fish_audio_model.py +18 -8
  37. camel/models/gemini_model.py +133 -15
  38. camel/models/groq_model.py +72 -10
  39. camel/models/internlm_model.py +14 -3
  40. camel/models/litellm_model.py +9 -2
  41. camel/models/mistral_model.py +42 -5
  42. camel/models/model_manager.py +57 -3
  43. camel/models/moonshot_model.py +33 -4
  44. camel/models/nemotron_model.py +32 -3
  45. camel/models/nvidia_model.py +43 -3
  46. camel/models/ollama_model.py +139 -17
  47. camel/models/openai_audio_models.py +87 -2
  48. camel/models/openai_compatible_model.py +37 -3
  49. camel/models/openai_model.py +158 -46
  50. camel/models/qwen_model.py +61 -4
  51. camel/models/reka_model.py +53 -3
  52. camel/models/samba_model.py +209 -4
  53. camel/models/sglang_model.py +153 -14
  54. camel/models/siliconflow_model.py +16 -3
  55. camel/models/stub_model.py +46 -4
  56. camel/models/togetherai_model.py +38 -3
  57. camel/models/vllm_model.py +37 -3
  58. camel/models/yi_model.py +36 -3
  59. camel/models/zhipuai_model.py +38 -3
  60. camel/retrievers/__init__.py +3 -0
  61. camel/retrievers/hybrid_retrival.py +237 -0
  62. camel/toolkits/__init__.py +15 -1
  63. camel/toolkits/arxiv_toolkit.py +2 -1
  64. camel/toolkits/ask_news_toolkit.py +4 -2
  65. camel/toolkits/audio_analysis_toolkit.py +238 -0
  66. camel/toolkits/base.py +22 -3
  67. camel/toolkits/code_execution.py +2 -0
  68. camel/toolkits/dappier_toolkit.py +2 -1
  69. camel/toolkits/data_commons_toolkit.py +38 -12
  70. camel/toolkits/excel_toolkit.py +172 -0
  71. camel/toolkits/function_tool.py +13 -0
  72. camel/toolkits/github_toolkit.py +5 -1
  73. camel/toolkits/google_maps_toolkit.py +2 -1
  74. camel/toolkits/google_scholar_toolkit.py +2 -0
  75. camel/toolkits/human_toolkit.py +0 -3
  76. camel/toolkits/image_analysis_toolkit.py +202 -0
  77. camel/toolkits/linkedin_toolkit.py +3 -2
  78. camel/toolkits/meshy_toolkit.py +3 -2
  79. camel/toolkits/mineru_toolkit.py +2 -2
  80. camel/toolkits/networkx_toolkit.py +240 -0
  81. camel/toolkits/notion_toolkit.py +2 -0
  82. camel/toolkits/openbb_toolkit.py +3 -2
  83. camel/toolkits/page_script.js +376 -0
  84. camel/toolkits/reddit_toolkit.py +11 -3
  85. camel/toolkits/retrieval_toolkit.py +6 -1
  86. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  87. camel/toolkits/stripe_toolkit.py +8 -2
  88. camel/toolkits/sympy_toolkit.py +6 -1
  89. camel/toolkits/video_analysis_toolkit.py +407 -0
  90. camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
  91. camel/toolkits/web_toolkit.py +1307 -0
  92. camel/toolkits/whatsapp_toolkit.py +3 -2
  93. camel/toolkits/zapier_toolkit.py +191 -0
  94. camel/types/__init__.py +2 -2
  95. camel/types/agents/__init__.py +16 -0
  96. camel/types/agents/tool_calling_record.py +52 -0
  97. camel/types/enums.py +3 -0
  98. camel/types/openai_types.py +16 -14
  99. camel/utils/__init__.py +2 -1
  100. camel/utils/async_func.py +2 -2
  101. camel/utils/commons.py +114 -1
  102. camel/verifiers/__init__.py +23 -0
  103. camel/verifiers/base.py +340 -0
  104. camel/verifiers/models.py +82 -0
  105. camel/verifiers/python_verifier.py +202 -0
  106. camel_ai-0.2.23.dist-info/METADATA +671 -0
  107. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/RECORD +122 -97
  108. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
  109. camel_ai-0.2.22.dist-info/METADATA +0 -527
  110. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
@@ -12,7 +12,9 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
16
+
17
+ from pydantic import BaseModel
16
18
 
17
19
  if TYPE_CHECKING:
18
20
  from mistralai.models import (
@@ -20,10 +22,13 @@ if TYPE_CHECKING:
20
22
  Messages,
21
23
  )
22
24
 
25
+ from openai import AsyncStream
26
+
23
27
  from camel.configs import MISTRAL_API_PARAMS, MistralConfig
24
28
  from camel.messages import OpenAIMessage
25
29
  from camel.models import BaseModelBackend
26
- from camel.types import ChatCompletion, ModelType
30
+ from camel.models._utils import try_modify_message_with_format
31
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
27
32
  from camel.utils import (
28
33
  BaseTokenCounter,
29
34
  OpenAITokenCounter,
@@ -212,25 +217,42 @@ class MistralModel(BaseModelBackend):
212
217
  )
213
218
  return self._token_counter
214
219
 
215
- def run(
220
+ async def _arun(
221
+ self,
222
+ messages: List[OpenAIMessage],
223
+ response_format: Optional[Type[BaseModel]] = None,
224
+ tools: Optional[List[Dict[str, Any]]] = None,
225
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
226
+ raise NotImplementedError("Mistral does not support async inference.")
227
+
228
+ def _run(
216
229
  self,
217
230
  messages: List[OpenAIMessage],
231
+ response_format: Optional[Type[BaseModel]] = None,
232
+ tools: Optional[List[Dict[str, Any]]] = None,
218
233
  ) -> ChatCompletion:
219
234
  r"""Runs inference of Mistral chat completion.
220
235
 
221
236
  Args:
222
237
  messages (List[OpenAIMessage]): Message list with the chat history
223
238
  in OpenAI API format.
239
+ response_format (Optional[Type[BaseModel]]): The format of the
240
+ response for this query.
241
+ tools (Optional[List[Dict[str, Any]]]): The tools to use for this
242
+ query.
224
243
 
225
244
  Returns:
226
- ChatCompletion.
245
+ ChatCompletion: The response from the model.
227
246
  """
247
+ request_config = self._prepare_request(
248
+ messages, response_format, tools
249
+ )
228
250
  mistral_messages = self._to_mistral_chatmessage(messages)
229
251
 
230
252
  response = self._client.chat.complete(
231
253
  messages=mistral_messages,
232
254
  model=self.model_type,
233
- **self.model_config_dict,
255
+ **request_config,
234
256
  )
235
257
 
236
258
  openai_response = self._to_openai_response(response) # type: ignore[arg-type]
@@ -251,6 +273,21 @@ class MistralModel(BaseModelBackend):
251
273
 
252
274
  return openai_response
253
275
 
276
+ def _prepare_request(
277
+ self,
278
+ messages: List[OpenAIMessage],
279
+ response_format: Optional[Type[BaseModel]] = None,
280
+ tools: Optional[List[Dict[str, Any]]] = None,
281
+ ) -> Dict[str, Any]:
282
+ request_config = self.model_config_dict.copy()
283
+ if tools:
284
+ request_config["tools"] = tools
285
+ elif response_format:
286
+ try_modify_message_with_format(messages[-1], response_format)
287
+ request_config["response_format"] = {"type": "json_object"}
288
+
289
+ return request_config
290
+
254
291
  def check_model_config(self):
255
292
  r"""Check whether the model configuration contains any
256
293
  unexpected arguments to Mistral API.
@@ -20,10 +20,13 @@ from typing import (
20
20
  Callable,
21
21
  Dict,
22
22
  List,
23
+ Optional,
24
+ Type,
23
25
  Union,
24
26
  )
25
27
 
26
- from openai import Stream
28
+ from openai import AsyncStream, Stream
29
+ from pydantic import BaseModel
27
30
 
28
31
  from camel.messages import OpenAIMessage
29
32
  from camel.models.base_model import BaseModelBackend
@@ -114,6 +117,15 @@ class ModelManager:
114
117
  """
115
118
  return self.models.index(self.current_model)
116
119
 
120
+ @property
121
+ def num_models(self) -> int:
122
+ r"""Return the number of models in the manager.
123
+
124
+ Returns:
125
+ int: The number of models available in the model manager.
126
+ """
127
+ return len(self.models)
128
+
117
129
  @property
118
130
  def token_limit(self):
119
131
  r"""Returns the maximum token limit for current model.
@@ -178,7 +190,10 @@ class ModelManager:
178
190
  return choice(self.models)
179
191
 
180
192
  def run(
181
- self, messages: List[OpenAIMessage]
193
+ self,
194
+ messages: List[OpenAIMessage],
195
+ response_format: Optional[Type[BaseModel]] = None,
196
+ tools: Optional[List[Dict[str, Any]]] = None,
182
197
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
183
198
  r"""Process a list of messages by selecting a model based on
184
199
  the scheduling strategy.
@@ -198,7 +213,46 @@ class ModelManager:
198
213
 
199
214
  # Pass all messages to the selected model and get the response
200
215
  try:
201
- response = self.current_model.run(messages)
216
+ response = self.current_model.run(messages, response_format, tools)
217
+ except Exception as exc:
218
+ logger.error(f"Error processing with model: {self.current_model}")
219
+ if self.scheduling_strategy == self.always_first:
220
+ self.scheduling_strategy = self.round_robin
221
+ logger.warning(
222
+ "The scheduling strategy has been changed to 'round_robin'"
223
+ )
224
+ # Skip already used one
225
+ self.current_model = self.scheduling_strategy()
226
+ raise exc
227
+ return response
228
+
229
+ async def arun(
230
+ self,
231
+ messages: List[OpenAIMessage],
232
+ response_format: Optional[Type[BaseModel]] = None,
233
+ tools: Optional[List[Dict[str, Any]]] = None,
234
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
235
+ r"""Process a list of messages by selecting a model based on
236
+ the scheduling strategy.
237
+ Sends the entire list of messages to the selected model,
238
+ and returns a single response.
239
+
240
+ Args:
241
+ messages (List[OpenAIMessage]): Message list with the chat
242
+ history in OpenAI API format.
243
+
244
+ Returns:
245
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
246
+ `ChatCompletion` in the non-stream mode, or
247
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
248
+ """
249
+ self.current_model = self.scheduling_strategy()
250
+
251
+ # Pass all messages to the selected model and get the response
252
+ try:
253
+ response = await self.current_model.arun(
254
+ messages, response_format, tools
255
+ )
202
256
  except Exception as exc:
203
257
  logger.error(f"Error processing with model: {self.current_model}")
204
258
  if self.scheduling_strategy == self.always_first:
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import MOONSHOT_API_PARAMS, MoonshotConfig
21
22
  from camel.messages import OpenAIMessage
22
23
  from camel.models import BaseModelBackend
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.types import (
24
26
  ChatCompletion,
25
27
  ChatCompletionChunk,
@@ -78,9 +80,24 @@ class MoonshotModel(BaseModelBackend):
78
80
  base_url=self._url,
79
81
  )
80
82
 
81
- def run(
83
+ def _prepare_request(
82
84
  self,
83
85
  messages: List[OpenAIMessage],
86
+ response_format: Optional[Type[BaseModel]] = None,
87
+ tools: Optional[List[Dict[str, Any]]] = None,
88
+ ) -> Dict[str, Any]:
89
+ request_config = self.model_config_dict.copy()
90
+ if tools:
91
+ request_config["tools"] = tools
92
+ elif response_format:
93
+ try_modify_message_with_format(messages[-1], response_format)
94
+ return request_config
95
+
96
+ def _run(
97
+ self,
98
+ messages: List[OpenAIMessage],
99
+ response_format: Optional[Type[BaseModel]] = None,
100
+ tools: Optional[List[Dict[str, Any]]] = None,
84
101
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
85
102
  r"""Runs inference of Moonshot chat completion.
86
103
 
@@ -93,13 +110,25 @@ class MoonshotModel(BaseModelBackend):
93
110
  `ChatCompletion` in the non-stream mode, or
94
111
  `Stream[ChatCompletionChunk]` in the stream mode.
95
112
  """
113
+ request_config = self._prepare_request(
114
+ messages, response_format, tools
115
+ )
116
+
96
117
  response = self._client.chat.completions.create(
97
118
  messages=messages,
98
119
  model=self.model_type,
99
- **self.model_config_dict,
120
+ **request_config,
100
121
  )
101
122
  return response
102
123
 
124
+ async def _arun(
125
+ self,
126
+ messages: List[OpenAIMessage],
127
+ response_format: Optional[Type[BaseModel]] = None,
128
+ tools: Optional[List[Dict[str, Any]]] = None,
129
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
130
+ raise NotImplementedError("Moonshot does not support async inference.")
131
+
103
132
  @property
104
133
  def token_counter(self) -> BaseTokenCounter:
105
134
  r"""Initialize the token counter for the model backend.
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI
17
+ from openai import AsyncOpenAI, OpenAI
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.messages import OpenAIMessage
20
21
  from camel.models import BaseModelBackend
@@ -62,10 +63,38 @@ class NemotronModel(BaseModelBackend):
62
63
  base_url=self._url,
63
64
  api_key=self._api_key,
64
65
  )
66
+ self._async_client = AsyncOpenAI(
67
+ timeout=180,
68
+ max_retries=3,
69
+ base_url=self._url,
70
+ api_key=self._api_key,
71
+ )
72
+
73
+ async def _arun(
74
+ self,
75
+ messages: List[OpenAIMessage],
76
+ response_format: Optional[Type[BaseModel]] = None,
77
+ tools: Optional[List[Dict[str, Any]]] = None,
78
+ ) -> ChatCompletion:
79
+ r"""Runs inference of OpenAI chat completion asynchronously.
80
+
81
+ Args:
82
+ messages (List[OpenAIMessage]): Message list.
83
+
84
+ Returns:
85
+ ChatCompletion.
86
+ """
87
+ response = await self._async_client.chat.completions.create(
88
+ messages=messages,
89
+ model=self.model_type,
90
+ )
91
+ return response
65
92
 
66
- def run(
93
+ def _run(
67
94
  self,
68
95
  messages: List[OpenAIMessage],
96
+ response_format: Optional[Type[BaseModel]] = None,
97
+ tools: Optional[List[Dict[str, Any]]] = None,
69
98
  ) -> ChatCompletion:
70
99
  r"""Runs inference of OpenAI chat completion.
71
100
 
@@ -13,13 +13,14 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
19
  from openai.types.chat import (
20
20
  ChatCompletion,
21
21
  ChatCompletionChunk,
22
22
  )
23
+ from pydantic import BaseModel
23
24
 
24
25
  from camel.configs import NVIDIA_API_PARAMS, NvidiaConfig
25
26
  from camel.messages import OpenAIMessage
@@ -76,10 +77,49 @@ class NvidiaModel(BaseModelBackend):
76
77
  api_key=self._api_key,
77
78
  base_url=self._url,
78
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key=self._api_key,
84
+ base_url=self._url,
85
+ )
86
+
87
+ async def _arun(
88
+ self,
89
+ messages: List[OpenAIMessage],
90
+ response_format: Optional[Type[BaseModel]] = None,
91
+ tools: Optional[List[Dict[str, Any]]] = None,
92
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
93
+ r"""Runs inference of NVIDIA chat completion.
94
+
95
+ Args:
96
+ messages (List[OpenAIMessage]): Message list with the chat history
97
+ in OpenAI API format.
98
+
99
+ Returns:
100
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
101
+ `ChatCompletion` in the non-stream mode, or
102
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
103
+ """
104
+
105
+ # Remove tool-related parameters if no tools are specified
106
+ config = dict(self.model_config_dict)
107
+ if not config.get("tools"): # None or empty list
108
+ config.pop("tools", None)
109
+ config.pop("tool_choice", None)
110
+
111
+ response = await self._async_client.chat.completions.create(
112
+ messages=messages,
113
+ model=self.model_type,
114
+ **config,
115
+ )
116
+ return response
79
117
 
80
- def run(
118
+ def _run(
81
119
  self,
82
120
  messages: List[OpenAIMessage],
121
+ response_format: Optional[Type[BaseModel]] = None,
122
+ tools: Optional[List[Dict[str, Any]]] = None,
83
123
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
84
124
  r"""Runs inference of NVIDIA chat completion.
85
125
 
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import subprocess
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import OLLAMA_API_PARAMS, OllamaConfig
21
22
  from camel.messages import OpenAIMessage
22
23
  from camel.models import BaseModelBackend
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.types import (
24
26
  ChatCompletion,
25
27
  ChatCompletionChunk,
@@ -75,6 +77,12 @@ class OllamaModel(BaseModelBackend):
75
77
  api_key="Set-but-ignored", # required but ignored
76
78
  base_url=self._url,
77
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key="Set-but-ignored", # required but ignored
84
+ base_url=self._url,
85
+ )
78
86
 
79
87
  def _start_server(self) -> None:
80
88
  r"""Starts the Ollama server in a subprocess."""
@@ -119,40 +127,154 @@ class OllamaModel(BaseModelBackend):
119
127
  "input into Ollama model backend."
120
128
  )
121
129
 
122
- def run(
130
+ def _run(
123
131
  self,
124
132
  messages: List[OpenAIMessage],
133
+ response_format: Optional[Type[BaseModel]] = None,
134
+ tools: Optional[List[Dict[str, Any]]] = None,
125
135
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
126
- r"""Runs inference of OpenAI chat completion.
136
+ r"""Runs inference of Ollama chat completion.
127
137
 
128
138
  Args:
129
139
  messages (List[OpenAIMessage]): Message list with the chat history
130
140
  in OpenAI API format.
141
+ response_format (Optional[Type[BaseModel]]): The format of the
142
+ response.
143
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
144
+ use for the request.
131
145
 
132
146
  Returns:
133
147
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
134
148
  `ChatCompletion` in the non-stream mode, or
135
149
  `Stream[ChatCompletionChunk]` in the stream mode.
136
150
  """
137
- if self.model_config_dict.get("response_format"):
138
- # stream is not supported in beta.chat.completions.parse
139
- if "stream" in self.model_config_dict:
140
- del self.model_config_dict["stream"]
141
-
142
- response = self._client.beta.chat.completions.parse(
143
- messages=messages,
144
- model=self.model_type,
145
- **self.model_config_dict,
151
+ response_format = response_format or self.model_config_dict.get(
152
+ "response_format", None
153
+ )
154
+ # For Ollama, the tool calling will be broken with response_format
155
+ if response_format and not tools:
156
+ return self._request_parse(messages, response_format, tools)
157
+ else:
158
+ return self._request_chat_completion(
159
+ messages, response_format, tools
160
+ )
161
+
162
+ async def _arun(
163
+ self,
164
+ messages: List[OpenAIMessage],
165
+ response_format: Optional[Type[BaseModel]] = None,
166
+ tools: Optional[List[Dict[str, Any]]] = None,
167
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
168
+ r"""Runs inference of Ollama chat completion in async mode.
169
+
170
+ Args:
171
+ messages (List[OpenAIMessage]): Message list with the chat history
172
+ in OpenAI API format.
173
+ response_format (Optional[Type[BaseModel]]): The format of the
174
+ response.
175
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
176
+ use for the request.
177
+
178
+ Returns:
179
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
180
+ `ChatCompletion` in the non-stream mode, or
181
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
182
+ """
183
+ response_format = response_format or self.model_config_dict.get(
184
+ "response_format", None
185
+ )
186
+ if response_format:
187
+ return await self._arequest_parse(messages, response_format, tools)
188
+ else:
189
+ return await self._arequest_chat_completion(
190
+ messages, response_format, tools
146
191
  )
147
192
 
148
- return self._to_chat_completion(response)
193
+ def _prepare_chat_completion_config(
194
+ self,
195
+ messages: List[OpenAIMessage],
196
+ response_format: Optional[Type[BaseModel]] = None,
197
+ tools: Optional[List[Dict[str, Any]]] = None,
198
+ ) -> Dict[str, Any]:
199
+ request_config = self.model_config_dict.copy()
200
+
201
+ if tools:
202
+ request_config["tools"] = tools
203
+ if response_format:
204
+ try_modify_message_with_format(messages[-1], response_format)
205
+ request_config["response_format"] = {"type": "json_object"}
206
+
207
+ return request_config
208
+
209
+ def _request_chat_completion(
210
+ self,
211
+ messages: List[OpenAIMessage],
212
+ response_format: Optional[Type[BaseModel]] = None,
213
+ tools: Optional[List[Dict[str, Any]]] = None,
214
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
215
+ request_config = self._prepare_chat_completion_config(
216
+ messages, response_format, tools
217
+ )
218
+
219
+ return self._client.chat.completions.create(
220
+ messages=messages,
221
+ model=self.model_type,
222
+ **request_config,
223
+ )
224
+
225
+ async def _arequest_chat_completion(
226
+ self,
227
+ messages: List[OpenAIMessage],
228
+ response_format: Optional[Type[BaseModel]] = None,
229
+ tools: Optional[List[Dict[str, Any]]] = None,
230
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
231
+ request_config = self._prepare_chat_completion_config(
232
+ messages, response_format, tools
233
+ )
234
+
235
+ return await self._async_client.chat.completions.create(
236
+ messages=messages,
237
+ model=self.model_type,
238
+ **request_config,
239
+ )
240
+
241
+ def _request_parse(
242
+ self,
243
+ messages: List[OpenAIMessage],
244
+ response_format: Type[BaseModel],
245
+ tools: Optional[List[Dict[str, Any]]] = None,
246
+ ) -> ChatCompletion:
247
+ request_config = self.model_config_dict.copy()
248
+
249
+ request_config["response_format"] = response_format
250
+ request_config.pop("stream", None)
251
+ if tools is not None:
252
+ request_config["tools"] = tools
253
+
254
+ return self._client.beta.chat.completions.parse(
255
+ messages=messages,
256
+ model=self.model_type,
257
+ **request_config,
258
+ )
259
+
260
+ async def _arequest_parse(
261
+ self,
262
+ messages: List[OpenAIMessage],
263
+ response_format: Type[BaseModel],
264
+ tools: Optional[List[Dict[str, Any]]] = None,
265
+ ) -> ChatCompletion:
266
+ request_config = self.model_config_dict.copy()
267
+
268
+ request_config["response_format"] = response_format
269
+ request_config.pop("stream", None)
270
+ if tools is not None:
271
+ request_config["tools"] = tools
149
272
 
150
- response = self._client.chat.completions.create(
273
+ return await self._async_client.beta.chat.completions.parse(
151
274
  messages=messages,
152
275
  model=self.model_type,
153
- **self.model_config_dict,
276
+ **request_config,
154
277
  )
155
- return response
156
278
 
157
279
  @property
158
280
  def stream(self) -> bool: