camel-ai 0.2.21__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (116) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +570 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +81 -3
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +53 -4
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/embeddings/openai_embedding.py +10 -1
  18. camel/environments/__init__.py +16 -0
  19. camel/environments/base.py +503 -0
  20. camel/extractors/__init__.py +16 -0
  21. camel/extractors/base.py +263 -0
  22. camel/interpreters/docker/Dockerfile +12 -0
  23. camel/interpreters/docker_interpreter.py +19 -1
  24. camel/interpreters/subprocess_interpreter.py +42 -17
  25. camel/loaders/__init__.py +2 -0
  26. camel/loaders/mineru_extractor.py +250 -0
  27. camel/memories/agent_memories.py +16 -1
  28. camel/memories/blocks/chat_history_block.py +10 -2
  29. camel/memories/blocks/vectordb_block.py +1 -0
  30. camel/memories/context_creators/score_based.py +20 -3
  31. camel/memories/records.py +10 -0
  32. camel/messages/base.py +8 -8
  33. camel/models/__init__.py +2 -0
  34. camel/models/_utils.py +57 -0
  35. camel/models/aiml_model.py +48 -17
  36. camel/models/anthropic_model.py +41 -3
  37. camel/models/azure_openai_model.py +39 -3
  38. camel/models/base_audio_model.py +92 -0
  39. camel/models/base_model.py +132 -4
  40. camel/models/cohere_model.py +88 -11
  41. camel/models/deepseek_model.py +107 -63
  42. camel/models/fish_audio_model.py +18 -8
  43. camel/models/gemini_model.py +133 -15
  44. camel/models/groq_model.py +72 -10
  45. camel/models/internlm_model.py +14 -3
  46. camel/models/litellm_model.py +9 -2
  47. camel/models/mistral_model.py +42 -5
  48. camel/models/model_manager.py +57 -3
  49. camel/models/moonshot_model.py +33 -4
  50. camel/models/nemotron_model.py +32 -3
  51. camel/models/nvidia_model.py +43 -3
  52. camel/models/ollama_model.py +139 -17
  53. camel/models/openai_audio_models.py +87 -2
  54. camel/models/openai_compatible_model.py +37 -3
  55. camel/models/openai_model.py +158 -46
  56. camel/models/qwen_model.py +61 -4
  57. camel/models/reka_model.py +53 -3
  58. camel/models/samba_model.py +209 -4
  59. camel/models/sglang_model.py +153 -14
  60. camel/models/siliconflow_model.py +16 -3
  61. camel/models/stub_model.py +46 -4
  62. camel/models/togetherai_model.py +38 -3
  63. camel/models/vllm_model.py +37 -3
  64. camel/models/yi_model.py +36 -3
  65. camel/models/zhipuai_model.py +38 -3
  66. camel/retrievers/__init__.py +3 -0
  67. camel/retrievers/hybrid_retrival.py +237 -0
  68. camel/toolkits/__init__.py +20 -3
  69. camel/toolkits/arxiv_toolkit.py +2 -1
  70. camel/toolkits/ask_news_toolkit.py +4 -2
  71. camel/toolkits/audio_analysis_toolkit.py +238 -0
  72. camel/toolkits/base.py +22 -3
  73. camel/toolkits/code_execution.py +2 -0
  74. camel/toolkits/dappier_toolkit.py +2 -1
  75. camel/toolkits/data_commons_toolkit.py +38 -12
  76. camel/toolkits/excel_toolkit.py +172 -0
  77. camel/toolkits/function_tool.py +13 -0
  78. camel/toolkits/github_toolkit.py +5 -1
  79. camel/toolkits/google_maps_toolkit.py +2 -1
  80. camel/toolkits/google_scholar_toolkit.py +2 -0
  81. camel/toolkits/human_toolkit.py +0 -3
  82. camel/toolkits/image_analysis_toolkit.py +202 -0
  83. camel/toolkits/linkedin_toolkit.py +3 -2
  84. camel/toolkits/meshy_toolkit.py +3 -2
  85. camel/toolkits/mineru_toolkit.py +178 -0
  86. camel/toolkits/networkx_toolkit.py +240 -0
  87. camel/toolkits/notion_toolkit.py +2 -0
  88. camel/toolkits/openbb_toolkit.py +3 -2
  89. camel/toolkits/page_script.js +376 -0
  90. camel/toolkits/reddit_toolkit.py +11 -3
  91. camel/toolkits/retrieval_toolkit.py +6 -1
  92. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  93. camel/toolkits/stripe_toolkit.py +8 -2
  94. camel/toolkits/sympy_toolkit.py +44 -1
  95. camel/toolkits/video_analysis_toolkit.py +407 -0
  96. camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
  97. camel/toolkits/web_toolkit.py +1307 -0
  98. camel/toolkits/whatsapp_toolkit.py +3 -2
  99. camel/toolkits/zapier_toolkit.py +191 -0
  100. camel/types/__init__.py +2 -2
  101. camel/types/agents/__init__.py +16 -0
  102. camel/types/agents/tool_calling_record.py +52 -0
  103. camel/types/enums.py +3 -0
  104. camel/types/openai_types.py +16 -14
  105. camel/utils/__init__.py +2 -1
  106. camel/utils/async_func.py +2 -2
  107. camel/utils/commons.py +114 -1
  108. camel/verifiers/__init__.py +23 -0
  109. camel/verifiers/base.py +340 -0
  110. camel/verifiers/models.py +82 -0
  111. camel/verifiers/python_verifier.py +202 -0
  112. camel_ai-0.2.23.dist-info/METADATA +671 -0
  113. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info}/RECORD +127 -99
  114. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
  115. camel_ai-0.2.21.dist-info/METADATA +0 -528
  116. {camel_ai-0.2.21.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI, Stream
17
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import Gemini_API_PARAMS, GeminiConfig
20
21
  from camel.messages import OpenAIMessage
@@ -81,37 +82,154 @@ class GeminiModel(BaseModelBackend):
81
82
  api_key=self._api_key,
82
83
  base_url=self._url,
83
84
  )
85
+ self._async_client = AsyncOpenAI(
86
+ timeout=180,
87
+ max_retries=3,
88
+ api_key=self._api_key,
89
+ base_url=self._url,
90
+ )
84
91
 
85
- def run(
92
+ def _process_messages(self, messages) -> List[OpenAIMessage]:
93
+ r"""Process the messages for Gemini API to ensure no empty content,
94
+ which is not accepted by Gemini.
95
+ """
96
+ processed_messages = []
97
+ for msg in messages:
98
+ msg_copy = msg.copy()
99
+ if 'content' in msg_copy and msg_copy['content'] == '':
100
+ msg_copy['content'] = 'null'
101
+ processed_messages.append(msg_copy)
102
+ return processed_messages
103
+
104
+ def _run(
86
105
  self,
87
106
  messages: List[OpenAIMessage],
107
+ response_format: Optional[Type[BaseModel]] = None,
108
+ tools: Optional[List[Dict[str, Any]]] = None,
88
109
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
110
  r"""Runs inference of Gemini chat completion.
90
111
 
91
112
  Args:
92
113
  messages (List[OpenAIMessage]): Message list with the chat history
93
114
  in OpenAI API format.
115
+ response_format (Optional[Type[BaseModel]]): The format of the
116
+ response.
117
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
118
+ use for the request.
94
119
 
95
120
  Returns:
96
121
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
122
  `ChatCompletion` in the non-stream mode, or
98
123
  `Stream[ChatCompletionChunk]` in the stream mode.
99
124
  """
100
- # Process messages to ensure no empty content, it's not accepeted by
101
- # Gemini
102
- processed_messages = []
103
- for msg in messages:
104
- msg_copy = msg.copy()
105
- if 'content' in msg_copy and msg_copy['content'] == '':
106
- msg_copy['content'] = 'null'
107
- processed_messages.append(msg_copy)
125
+ response_format = response_format or self.model_config_dict.get(
126
+ "response_format", None
127
+ )
128
+ messages = self._process_messages(messages)
129
+ if response_format:
130
+ return self._request_parse(messages, response_format)
131
+ else:
132
+ return self._request_chat_completion(messages, tools)
133
+
134
+ async def _arun(
135
+ self,
136
+ messages: List[OpenAIMessage],
137
+ response_format: Optional[Type[BaseModel]] = None,
138
+ tools: Optional[List[Dict[str, Any]]] = None,
139
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
140
+ r"""Runs inference of OpenAI chat completion in async mode.
141
+
142
+ Args:
143
+ messages (List[OpenAIMessage]): Message list with the chat history
144
+ in OpenAI API format.
145
+ response_format (Optional[Type[BaseModel]]): The format of the
146
+ response.
147
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
148
+ use for the request.
149
+
150
+ Returns:
151
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
152
+ `ChatCompletion` in the non-stream mode, or
153
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
154
+ """
155
+ response_format = response_format or self.model_config_dict.get(
156
+ "response_format", None
157
+ )
158
+ messages = self._process_messages(messages)
159
+ if response_format:
160
+ return await self._arequest_parse(messages, response_format)
161
+ else:
162
+ return await self._arequest_chat_completion(messages, tools)
163
+
164
+ def _request_chat_completion(
165
+ self,
166
+ messages: List[OpenAIMessage],
167
+ tools: Optional[List[Dict[str, Any]]] = None,
168
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
169
+ request_config = self.model_config_dict.copy()
170
+
171
+ if tools:
172
+ for tool in tools:
173
+ function_dict = tool.get('function', {})
174
+ function_dict.pop("strict", None)
175
+ request_config["tools"] = tools
176
+
177
+ return self._client.chat.completions.create(
178
+ messages=messages,
179
+ model=self.model_type,
180
+ **request_config,
181
+ )
182
+
183
+ async def _arequest_chat_completion(
184
+ self,
185
+ messages: List[OpenAIMessage],
186
+ tools: Optional[List[Dict[str, Any]]] = None,
187
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
188
+ request_config = self.model_config_dict.copy()
189
+
190
+ if tools:
191
+ for tool in tools:
192
+ function_dict = tool.get('function', {})
193
+ function_dict.pop("strict", None)
194
+ request_config["tools"] = tools
195
+
196
+ return await self._async_client.chat.completions.create(
197
+ messages=messages,
198
+ model=self.model_type,
199
+ **request_config,
200
+ )
201
+
202
+ def _request_parse(
203
+ self,
204
+ messages: List[OpenAIMessage],
205
+ response_format: Type[BaseModel],
206
+ ) -> ChatCompletion:
207
+ request_config = self.model_config_dict.copy()
208
+
209
+ request_config["response_format"] = response_format
210
+ request_config.pop("stream", None)
211
+
212
+ return self._client.beta.chat.completions.parse(
213
+ messages=messages,
214
+ model=self.model_type,
215
+ **request_config,
216
+ )
217
+
218
+ async def _arequest_parse(
219
+ self,
220
+ messages: List[OpenAIMessage],
221
+ response_format: Type[BaseModel],
222
+ ) -> ChatCompletion:
223
+ request_config = self.model_config_dict.copy()
224
+
225
+ request_config["response_format"] = response_format
226
+ request_config.pop("stream", None)
108
227
 
109
- response = self._client.chat.completions.create(
110
- messages=processed_messages,
228
+ return await self._async_client.beta.chat.completions.parse(
229
+ messages=messages,
111
230
  model=self.model_type,
112
- **self.model_config_dict,
231
+ **request_config,
113
232
  )
114
- return response
115
233
 
116
234
  @property
117
235
  def token_counter(self) -> BaseTokenCounter:
@@ -12,13 +12,15 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI, Stream
17
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import GROQ_API_PARAMS, GroqConfig
20
21
  from camel.messages import OpenAIMessage
21
22
  from camel.models import BaseModelBackend
23
+ from camel.models._utils import try_modify_message_with_format
22
24
  from camel.types import (
23
25
  ChatCompletion,
24
26
  ChatCompletionChunk,
@@ -51,11 +53,7 @@ class GroqModel(BaseModelBackend):
51
53
  (default: :obj:`None`)
52
54
  """
53
55
 
54
- @api_keys_required(
55
- [
56
- ("api_key", "GROQ_API_KEY"),
57
- ]
58
- )
56
+ @api_keys_required([("api_key", "GROQ_API_KEY")])
59
57
  def __init__(
60
58
  self,
61
59
  model_type: Union[ModelType, str],
@@ -79,6 +77,12 @@ class GroqModel(BaseModelBackend):
79
77
  api_key=self._api_key,
80
78
  base_url=self._url,
81
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key=self._api_key,
84
+ base_url=self._url,
85
+ )
82
86
 
83
87
  @property
84
88
  def token_counter(self) -> BaseTokenCounter:
@@ -92,25 +96,83 @@ class GroqModel(BaseModelBackend):
92
96
  self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
93
97
  return self._token_counter
94
98
 
95
- def run(
99
+ def _prepare_request(
96
100
  self,
97
101
  messages: List[OpenAIMessage],
102
+ response_format: Optional[Type[BaseModel]] = None,
103
+ tools: Optional[List[Dict[str, Any]]] = None,
104
+ ) -> Dict[str, Any]:
105
+ request_config = self.model_config_dict.copy()
106
+ if tools:
107
+ request_config["tools"] = tools
108
+ elif response_format:
109
+ try_modify_message_with_format(messages[-1], response_format)
110
+ request_config["response_format"] = {"type": "json_object"}
111
+
112
+ return request_config
113
+
114
+ def _run(
115
+ self,
116
+ messages: List[OpenAIMessage],
117
+ response_format: Optional[type[BaseModel]] = None,
118
+ tools: Optional[List[Dict[str, Any]]] = None,
98
119
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
99
- r"""Runs inference of OpenAI chat completion.
120
+ r"""Runs inference of Groq chat completion.
100
121
 
101
122
  Args:
102
123
  messages (List[OpenAIMessage]): Message list with the chat history
103
124
  in OpenAI API format.
125
+ response_format (Optional[Type[BaseModel]]): The format of the
126
+ response.
127
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
128
+ use for the request.
104
129
 
105
130
  Returns:
106
131
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
107
132
  `ChatCompletion` in the non-stream mode, or
108
133
  `Stream[ChatCompletionChunk]` in the stream mode.
109
134
  """
135
+ request_config = self._prepare_request(
136
+ messages, response_format, tools
137
+ )
138
+
110
139
  response = self._client.chat.completions.create(
111
140
  messages=messages,
112
141
  model=self.model_type,
113
- **self.model_config_dict,
142
+ **request_config,
143
+ )
144
+
145
+ return response
146
+
147
+ async def _arun(
148
+ self,
149
+ messages: List[OpenAIMessage],
150
+ response_format: Optional[type[BaseModel]] = None,
151
+ tools: Optional[List[Dict[str, Any]]] = None,
152
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
153
+ r"""Runs inference of Groq chat completion asynchronously.
154
+
155
+ Args:
156
+ messages (List[OpenAIMessage]): Message list with the chat history
157
+ in OpenAI API format.
158
+ response_format (Optional[Type[BaseModel]]): The format of the
159
+ response.
160
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
161
+ use for the request.
162
+
163
+ Returns:
164
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
165
+ `ChatCompletion` in the non-stream mode, or
166
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
167
+ """
168
+ request_config = self._prepare_request(
169
+ messages, response_format, tools
170
+ )
171
+
172
+ response = await self._async_client.chat.completions.create(
173
+ messages=messages,
174
+ model=self.model_type,
175
+ **request_config,
114
176
  )
115
177
 
116
178
  return response
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
21
22
  from camel.messages import OpenAIMessage
@@ -82,9 +83,11 @@ class InternLMModel(BaseModelBackend):
82
83
  base_url=self._url,
83
84
  )
84
85
 
85
- def run(
86
+ def _run(
86
87
  self,
87
88
  messages: List[OpenAIMessage],
89
+ response_format: Optional[Type[BaseModel]] = None,
90
+ tools: Optional[List[Dict[str, Any]]] = None,
88
91
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
92
  r"""Runs inference of InternLM chat completion.
90
93
 
@@ -104,6 +107,14 @@ class InternLMModel(BaseModelBackend):
104
107
  )
105
108
  return response
106
109
 
110
+ async def _arun(
111
+ self,
112
+ messages: List[OpenAIMessage],
113
+ response_format: Optional[Type[BaseModel]] = None,
114
+ tools: Optional[List[Dict[str, Any]]] = None,
115
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
116
+ raise NotImplementedError("InternLM does not support async inference.")
117
+
107
118
  @property
108
119
  def token_counter(self) -> BaseTokenCounter:
109
120
  r"""Initialize the token counter for the model backend.
@@ -11,7 +11,9 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- from typing import Any, Dict, List, Optional, Union
14
+ from typing import Any, Dict, List, Optional, Type, Union
15
+
16
+ from pydantic import BaseModel
15
17
 
16
18
  from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
17
19
  from camel.messages import OpenAIMessage
@@ -106,9 +108,14 @@ class LiteLLMModel(BaseModelBackend):
106
108
  self._token_counter = LiteLLMTokenCounter(self.model_type)
107
109
  return self._token_counter
108
110
 
109
- def run(
111
+ async def _arun(self) -> None: # type: ignore[override]
112
+ raise NotImplementedError
113
+
114
+ def _run(
110
115
  self,
111
116
  messages: List[OpenAIMessage],
117
+ response_format: Optional[Type[BaseModel]] = None,
118
+ tools: Optional[List[Dict[str, Any]]] = None,
112
119
  ) -> ChatCompletion:
113
120
  r"""Runs inference of LiteLLM chat completion.
114
121
 
@@ -12,7 +12,9 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
15
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Type, Union
16
+
17
+ from pydantic import BaseModel
16
18
 
17
19
  if TYPE_CHECKING:
18
20
  from mistralai.models import (
@@ -20,10 +22,13 @@ if TYPE_CHECKING:
20
22
  Messages,
21
23
  )
22
24
 
25
+ from openai import AsyncStream
26
+
23
27
  from camel.configs import MISTRAL_API_PARAMS, MistralConfig
24
28
  from camel.messages import OpenAIMessage
25
29
  from camel.models import BaseModelBackend
26
- from camel.types import ChatCompletion, ModelType
30
+ from camel.models._utils import try_modify_message_with_format
31
+ from camel.types import ChatCompletion, ChatCompletionChunk, ModelType
27
32
  from camel.utils import (
28
33
  BaseTokenCounter,
29
34
  OpenAITokenCounter,
@@ -212,25 +217,42 @@ class MistralModel(BaseModelBackend):
212
217
  )
213
218
  return self._token_counter
214
219
 
215
- def run(
220
+ async def _arun(
221
+ self,
222
+ messages: List[OpenAIMessage],
223
+ response_format: Optional[Type[BaseModel]] = None,
224
+ tools: Optional[List[Dict[str, Any]]] = None,
225
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
226
+ raise NotImplementedError("Mistral does not support async inference.")
227
+
228
+ def _run(
216
229
  self,
217
230
  messages: List[OpenAIMessage],
231
+ response_format: Optional[Type[BaseModel]] = None,
232
+ tools: Optional[List[Dict[str, Any]]] = None,
218
233
  ) -> ChatCompletion:
219
234
  r"""Runs inference of Mistral chat completion.
220
235
 
221
236
  Args:
222
237
  messages (List[OpenAIMessage]): Message list with the chat history
223
238
  in OpenAI API format.
239
+ response_format (Optional[Type[BaseModel]]): The format of the
240
+ response for this query.
241
+ tools (Optional[List[Dict[str, Any]]]): The tools to use for this
242
+ query.
224
243
 
225
244
  Returns:
226
- ChatCompletion.
245
+ ChatCompletion: The response from the model.
227
246
  """
247
+ request_config = self._prepare_request(
248
+ messages, response_format, tools
249
+ )
228
250
  mistral_messages = self._to_mistral_chatmessage(messages)
229
251
 
230
252
  response = self._client.chat.complete(
231
253
  messages=mistral_messages,
232
254
  model=self.model_type,
233
- **self.model_config_dict,
255
+ **request_config,
234
256
  )
235
257
 
236
258
  openai_response = self._to_openai_response(response) # type: ignore[arg-type]
@@ -251,6 +273,21 @@ class MistralModel(BaseModelBackend):
251
273
 
252
274
  return openai_response
253
275
 
276
+ def _prepare_request(
277
+ self,
278
+ messages: List[OpenAIMessage],
279
+ response_format: Optional[Type[BaseModel]] = None,
280
+ tools: Optional[List[Dict[str, Any]]] = None,
281
+ ) -> Dict[str, Any]:
282
+ request_config = self.model_config_dict.copy()
283
+ if tools:
284
+ request_config["tools"] = tools
285
+ elif response_format:
286
+ try_modify_message_with_format(messages[-1], response_format)
287
+ request_config["response_format"] = {"type": "json_object"}
288
+
289
+ return request_config
290
+
254
291
  def check_model_config(self):
255
292
  r"""Check whether the model configuration contains any
256
293
  unexpected arguments to Mistral API.
@@ -20,10 +20,13 @@ from typing import (
20
20
  Callable,
21
21
  Dict,
22
22
  List,
23
+ Optional,
24
+ Type,
23
25
  Union,
24
26
  )
25
27
 
26
- from openai import Stream
28
+ from openai import AsyncStream, Stream
29
+ from pydantic import BaseModel
27
30
 
28
31
  from camel.messages import OpenAIMessage
29
32
  from camel.models.base_model import BaseModelBackend
@@ -114,6 +117,15 @@ class ModelManager:
114
117
  """
115
118
  return self.models.index(self.current_model)
116
119
 
120
+ @property
121
+ def num_models(self) -> int:
122
+ r"""Return the number of models in the manager.
123
+
124
+ Returns:
125
+ int: The number of models available in the model manager.
126
+ """
127
+ return len(self.models)
128
+
117
129
  @property
118
130
  def token_limit(self):
119
131
  r"""Returns the maximum token limit for current model.
@@ -178,7 +190,10 @@ class ModelManager:
178
190
  return choice(self.models)
179
191
 
180
192
  def run(
181
- self, messages: List[OpenAIMessage]
193
+ self,
194
+ messages: List[OpenAIMessage],
195
+ response_format: Optional[Type[BaseModel]] = None,
196
+ tools: Optional[List[Dict[str, Any]]] = None,
182
197
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
183
198
  r"""Process a list of messages by selecting a model based on
184
199
  the scheduling strategy.
@@ -198,7 +213,46 @@ class ModelManager:
198
213
 
199
214
  # Pass all messages to the selected model and get the response
200
215
  try:
201
- response = self.current_model.run(messages)
216
+ response = self.current_model.run(messages, response_format, tools)
217
+ except Exception as exc:
218
+ logger.error(f"Error processing with model: {self.current_model}")
219
+ if self.scheduling_strategy == self.always_first:
220
+ self.scheduling_strategy = self.round_robin
221
+ logger.warning(
222
+ "The scheduling strategy has been changed to 'round_robin'"
223
+ )
224
+ # Skip already used one
225
+ self.current_model = self.scheduling_strategy()
226
+ raise exc
227
+ return response
228
+
229
+ async def arun(
230
+ self,
231
+ messages: List[OpenAIMessage],
232
+ response_format: Optional[Type[BaseModel]] = None,
233
+ tools: Optional[List[Dict[str, Any]]] = None,
234
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
235
+ r"""Process a list of messages by selecting a model based on
236
+ the scheduling strategy.
237
+ Sends the entire list of messages to the selected model,
238
+ and returns a single response.
239
+
240
+ Args:
241
+ messages (List[OpenAIMessage]): Message list with the chat
242
+ history in OpenAI API format.
243
+
244
+ Returns:
245
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
246
+ `ChatCompletion` in the non-stream mode, or
247
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
248
+ """
249
+ self.current_model = self.scheduling_strategy()
250
+
251
+ # Pass all messages to the selected model and get the response
252
+ try:
253
+ response = await self.current_model.arun(
254
+ messages, response_format, tools
255
+ )
202
256
  except Exception as exc:
203
257
  logger.error(f"Error processing with model: {self.current_model}")
204
258
  if self.scheduling_strategy == self.always_first:
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import MOONSHOT_API_PARAMS, MoonshotConfig
21
22
  from camel.messages import OpenAIMessage
22
23
  from camel.models import BaseModelBackend
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.types import (
24
26
  ChatCompletion,
25
27
  ChatCompletionChunk,
@@ -78,9 +80,24 @@ class MoonshotModel(BaseModelBackend):
78
80
  base_url=self._url,
79
81
  )
80
82
 
81
- def run(
83
+ def _prepare_request(
82
84
  self,
83
85
  messages: List[OpenAIMessage],
86
+ response_format: Optional[Type[BaseModel]] = None,
87
+ tools: Optional[List[Dict[str, Any]]] = None,
88
+ ) -> Dict[str, Any]:
89
+ request_config = self.model_config_dict.copy()
90
+ if tools:
91
+ request_config["tools"] = tools
92
+ elif response_format:
93
+ try_modify_message_with_format(messages[-1], response_format)
94
+ return request_config
95
+
96
+ def _run(
97
+ self,
98
+ messages: List[OpenAIMessage],
99
+ response_format: Optional[Type[BaseModel]] = None,
100
+ tools: Optional[List[Dict[str, Any]]] = None,
84
101
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
85
102
  r"""Runs inference of Moonshot chat completion.
86
103
 
@@ -93,13 +110,25 @@ class MoonshotModel(BaseModelBackend):
93
110
  `ChatCompletion` in the non-stream mode, or
94
111
  `Stream[ChatCompletionChunk]` in the stream mode.
95
112
  """
113
+ request_config = self._prepare_request(
114
+ messages, response_format, tools
115
+ )
116
+
96
117
  response = self._client.chat.completions.create(
97
118
  messages=messages,
98
119
  model=self.model_type,
99
- **self.model_config_dict,
120
+ **request_config,
100
121
  )
101
122
  return response
102
123
 
124
+ async def _arun(
125
+ self,
126
+ messages: List[OpenAIMessage],
127
+ response_format: Optional[Type[BaseModel]] = None,
128
+ tools: Optional[List[Dict[str, Any]]] = None,
129
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
130
+ raise NotImplementedError("Moonshot does not support async inference.")
131
+
103
132
  @property
104
133
  def token_counter(self) -> BaseTokenCounter:
105
134
  r"""Initialize the token counter for the model backend.
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI
17
+ from openai import AsyncOpenAI, OpenAI
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.messages import OpenAIMessage
20
21
  from camel.models import BaseModelBackend
@@ -62,10 +63,38 @@ class NemotronModel(BaseModelBackend):
62
63
  base_url=self._url,
63
64
  api_key=self._api_key,
64
65
  )
66
+ self._async_client = AsyncOpenAI(
67
+ timeout=180,
68
+ max_retries=3,
69
+ base_url=self._url,
70
+ api_key=self._api_key,
71
+ )
72
+
73
+ async def _arun(
74
+ self,
75
+ messages: List[OpenAIMessage],
76
+ response_format: Optional[Type[BaseModel]] = None,
77
+ tools: Optional[List[Dict[str, Any]]] = None,
78
+ ) -> ChatCompletion:
79
+ r"""Runs inference of OpenAI chat completion asynchronously.
80
+
81
+ Args:
82
+ messages (List[OpenAIMessage]): Message list.
83
+
84
+ Returns:
85
+ ChatCompletion.
86
+ """
87
+ response = await self._async_client.chat.completions.create(
88
+ messages=messages,
89
+ model=self.model_type,
90
+ )
91
+ return response
65
92
 
66
- def run(
93
+ def _run(
67
94
  self,
68
95
  messages: List[OpenAIMessage],
96
+ response_format: Optional[Type[BaseModel]] = None,
97
+ tools: Optional[List[Dict[str, Any]]] = None,
69
98
  ) -> ChatCompletion:
70
99
  r"""Runs inference of OpenAI chat completion.
71
100