camel-ai 0.2.22__py3-none-any.whl → 0.2.23__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (110) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/_types.py +41 -0
  3. camel/agents/_utils.py +188 -0
  4. camel/agents/chat_agent.py +570 -965
  5. camel/agents/knowledge_graph_agent.py +7 -1
  6. camel/agents/multi_hop_generator_agent.py +1 -1
  7. camel/configs/base_config.py +10 -13
  8. camel/configs/deepseek_config.py +4 -30
  9. camel/configs/gemini_config.py +5 -31
  10. camel/configs/openai_config.py +14 -32
  11. camel/configs/qwen_config.py +36 -36
  12. camel/datagen/self_improving_cot.py +81 -3
  13. camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
  14. camel/datagen/self_instruct/self_instruct.py +52 -3
  15. camel/datasets/__init__.py +28 -0
  16. camel/datasets/base.py +969 -0
  17. camel/environments/__init__.py +16 -0
  18. camel/environments/base.py +503 -0
  19. camel/extractors/__init__.py +16 -0
  20. camel/extractors/base.py +263 -0
  21. camel/memories/agent_memories.py +16 -1
  22. camel/memories/blocks/chat_history_block.py +10 -2
  23. camel/memories/blocks/vectordb_block.py +1 -0
  24. camel/memories/context_creators/score_based.py +20 -3
  25. camel/memories/records.py +10 -0
  26. camel/messages/base.py +8 -8
  27. camel/models/__init__.py +2 -0
  28. camel/models/_utils.py +57 -0
  29. camel/models/aiml_model.py +48 -17
  30. camel/models/anthropic_model.py +41 -3
  31. camel/models/azure_openai_model.py +39 -3
  32. camel/models/base_audio_model.py +92 -0
  33. camel/models/base_model.py +88 -13
  34. camel/models/cohere_model.py +88 -11
  35. camel/models/deepseek_model.py +107 -45
  36. camel/models/fish_audio_model.py +18 -8
  37. camel/models/gemini_model.py +133 -15
  38. camel/models/groq_model.py +72 -10
  39. camel/models/internlm_model.py +14 -3
  40. camel/models/litellm_model.py +9 -2
  41. camel/models/mistral_model.py +42 -5
  42. camel/models/model_manager.py +57 -3
  43. camel/models/moonshot_model.py +33 -4
  44. camel/models/nemotron_model.py +32 -3
  45. camel/models/nvidia_model.py +43 -3
  46. camel/models/ollama_model.py +139 -17
  47. camel/models/openai_audio_models.py +87 -2
  48. camel/models/openai_compatible_model.py +37 -3
  49. camel/models/openai_model.py +158 -46
  50. camel/models/qwen_model.py +61 -4
  51. camel/models/reka_model.py +53 -3
  52. camel/models/samba_model.py +209 -4
  53. camel/models/sglang_model.py +153 -14
  54. camel/models/siliconflow_model.py +16 -3
  55. camel/models/stub_model.py +46 -4
  56. camel/models/togetherai_model.py +38 -3
  57. camel/models/vllm_model.py +37 -3
  58. camel/models/yi_model.py +36 -3
  59. camel/models/zhipuai_model.py +38 -3
  60. camel/retrievers/__init__.py +3 -0
  61. camel/retrievers/hybrid_retrival.py +237 -0
  62. camel/toolkits/__init__.py +15 -1
  63. camel/toolkits/arxiv_toolkit.py +2 -1
  64. camel/toolkits/ask_news_toolkit.py +4 -2
  65. camel/toolkits/audio_analysis_toolkit.py +238 -0
  66. camel/toolkits/base.py +22 -3
  67. camel/toolkits/code_execution.py +2 -0
  68. camel/toolkits/dappier_toolkit.py +2 -1
  69. camel/toolkits/data_commons_toolkit.py +38 -12
  70. camel/toolkits/excel_toolkit.py +172 -0
  71. camel/toolkits/function_tool.py +13 -0
  72. camel/toolkits/github_toolkit.py +5 -1
  73. camel/toolkits/google_maps_toolkit.py +2 -1
  74. camel/toolkits/google_scholar_toolkit.py +2 -0
  75. camel/toolkits/human_toolkit.py +0 -3
  76. camel/toolkits/image_analysis_toolkit.py +202 -0
  77. camel/toolkits/linkedin_toolkit.py +3 -2
  78. camel/toolkits/meshy_toolkit.py +3 -2
  79. camel/toolkits/mineru_toolkit.py +2 -2
  80. camel/toolkits/networkx_toolkit.py +240 -0
  81. camel/toolkits/notion_toolkit.py +2 -0
  82. camel/toolkits/openbb_toolkit.py +3 -2
  83. camel/toolkits/page_script.js +376 -0
  84. camel/toolkits/reddit_toolkit.py +11 -3
  85. camel/toolkits/retrieval_toolkit.py +6 -1
  86. camel/toolkits/semantic_scholar_toolkit.py +2 -1
  87. camel/toolkits/stripe_toolkit.py +8 -2
  88. camel/toolkits/sympy_toolkit.py +6 -1
  89. camel/toolkits/video_analysis_toolkit.py +407 -0
  90. camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
  91. camel/toolkits/web_toolkit.py +1307 -0
  92. camel/toolkits/whatsapp_toolkit.py +3 -2
  93. camel/toolkits/zapier_toolkit.py +191 -0
  94. camel/types/__init__.py +2 -2
  95. camel/types/agents/__init__.py +16 -0
  96. camel/types/agents/tool_calling_record.py +52 -0
  97. camel/types/enums.py +3 -0
  98. camel/types/openai_types.py +16 -14
  99. camel/utils/__init__.py +2 -1
  100. camel/utils/async_func.py +2 -2
  101. camel/utils/commons.py +114 -1
  102. camel/verifiers/__init__.py +23 -0
  103. camel/verifiers/base.py +340 -0
  104. camel/verifiers/models.py +82 -0
  105. camel/verifiers/python_verifier.py +202 -0
  106. camel_ai-0.2.23.dist-info/METADATA +671 -0
  107. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/RECORD +122 -97
  108. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
  109. camel_ai-0.2.22.dist-info/METADATA +0 -527
  110. {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
@@ -13,13 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
21
22
  from camel.logger import get_logger
22
23
  from camel.messages import OpenAIMessage
24
+ from camel.models._utils import try_modify_message_with_format
23
25
  from camel.models.base_model import BaseModelBackend
24
26
  from camel.types import (
25
27
  ChatCompletion,
@@ -30,6 +32,16 @@ from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
30
32
 
31
33
  logger = get_logger(__name__)
32
34
 
35
+ REASONSER_UNSUPPORTED_PARAMS = [
36
+ "temperature",
37
+ "top_p",
38
+ "presence_penalty",
39
+ "frequency_penalty",
40
+ "logprobs",
41
+ "top_logprobs",
42
+ "tools",
43
+ ]
44
+
33
45
 
34
46
  class DeepSeekModel(BaseModelBackend):
35
47
  r"""DeepSeek API in a unified BaseModelBackend interface.
@@ -84,6 +96,13 @@ class DeepSeekModel(BaseModelBackend):
84
96
  base_url=self._url,
85
97
  )
86
98
 
99
+ self._async_client = AsyncOpenAI(
100
+ timeout=180,
101
+ max_retries=3,
102
+ api_key=self._api_key,
103
+ base_url=self._url,
104
+ )
105
+
87
106
  @property
88
107
  def token_counter(self) -> BaseTokenCounter:
89
108
  r"""Initialize the token counter for the model backend.
@@ -98,64 +117,51 @@ class DeepSeekModel(BaseModelBackend):
98
117
  )
99
118
  return self._token_counter
100
119
 
101
- def run(
120
+ def _prepare_request(
102
121
  self,
103
122
  messages: List[OpenAIMessage],
104
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
105
- r"""Runs inference of DeepSeek chat completion.
106
-
107
- Args:
108
- messages (List[OpenAIMessage]): Message list with the chat history
109
- in OpenAI API format.
123
+ response_format: Optional[Type[BaseModel]] = None,
124
+ tools: Optional[List[Dict[str, Any]]] = None,
125
+ ) -> Dict[str, Any]:
126
+ request_config = self.model_config_dict.copy()
110
127
 
111
- Returns:
112
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
113
- `ChatCompletion` in the non-stream mode, or
114
- `Stream[ChatCompletionChunk]` in the stream mode.
115
- """
116
- # deepseek reasoner has limitations
117
- # reference: https://api-docs.deepseek.com/guides/reasoning_model#api-parameters
118
128
  if self.model_type in [
119
129
  ModelType.DEEPSEEK_REASONER,
120
130
  ]:
121
131
  logger.warning(
122
- "You are using a DeepSeek Reasoner model, "
132
+ "Warning: You are using an DeepSeek Reasoner model, "
123
133
  "which has certain limitations, reference: "
124
- "`https://api-docs.deepseek.com/guides/reasoning_model#api-parameters`"
134
+ "`https://api-docs.deepseek.com/guides/reasoning_model"
135
+ "#api-parameters`.",
125
136
  )
137
+ request_config = {
138
+ key: value
139
+ for key, value in request_config.items()
140
+ if key not in REASONSER_UNSUPPORTED_PARAMS
141
+ }
126
142
 
127
- # Check and remove unsupported parameters and reset the fixed
128
- # parameters
129
- unsupported_keys = [
130
- "temperature",
131
- "top_p",
132
- "presence_penalty",
133
- "frequency_penalty",
134
- "logprobs",
135
- "top_logprobs",
136
- "tools",
137
- ]
138
- for key in unsupported_keys:
139
- if key in self.model_config_dict:
140
- del self.model_config_dict[key]
143
+ if tools:
144
+ for tool in tools:
145
+ function_dict = tool.get('function', {})
146
+ function_dict.pop("strict", None)
147
+ request_config["tools"] = tools
148
+ elif response_format:
149
+ try_modify_message_with_format(messages[-1], response_format)
150
+ request_config["response_format"] = {"type": "json_object"}
141
151
 
142
- response = self._client.chat.completions.create(
143
- messages=messages,
144
- model=self.model_type,
145
- **self.model_config_dict,
146
- )
152
+ return request_config
147
153
 
148
- # Handle reasoning content with <think> tags at the beginning
154
+ def _post_handle_response(
155
+ self, response: ChatCompletion
156
+ ) -> ChatCompletion:
157
+ r"""Handle reasoning content with <think> tags at the beginning."""
149
158
  if (
150
- self.model_type
151
- in [
152
- ModelType.DEEPSEEK_REASONER,
153
- ]
159
+ self.model_type in [ModelType.DEEPSEEK_REASONER]
154
160
  and os.environ.get("GET_REASONING_CONTENT", "false").lower()
155
161
  == "true"
156
162
  ):
157
- reasoning_content = response.choices[0].message.reasoning_content
158
- combined_content = (
163
+ reasoning_content = response.choices[0].message.reasoning_content # type: ignore[attr-defined]
164
+ combined_content = ( # type: ignore[operator]
159
165
  f"<think>\n{reasoning_content}\n</think>\n"
160
166
  if reasoning_content
161
167
  else ""
@@ -181,9 +187,65 @@ class DeepSeekModel(BaseModelBackend):
181
187
  object="chat.completion",
182
188
  usage=response.usage,
183
189
  )
184
-
185
190
  return response
186
191
 
192
+ def _run(
193
+ self,
194
+ messages: List[OpenAIMessage],
195
+ response_format: Optional[Type[BaseModel]] = None,
196
+ tools: Optional[List[Dict[str, Any]]] = None,
197
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
198
+ r"""Runs inference of DeepSeek chat completion.
199
+
200
+ Args:
201
+ messages (List[OpenAIMessage]): Message list with the chat history
202
+ in OpenAI API format.
203
+
204
+ Returns:
205
+ Union[ChatCompletion, Stream[ChatCompletionChunk]]:
206
+ `ChatCompletion` in the non-stream mode, or
207
+ `Stream[ChatCompletionChunk]` in the stream mode.
208
+ """
209
+ request_config = self._prepare_request(
210
+ messages, response_format, tools
211
+ )
212
+
213
+ response = self._client.chat.completions.create(
214
+ messages=messages,
215
+ model=self.model_type,
216
+ **request_config,
217
+ )
218
+
219
+ return self._post_handle_response(response)
220
+
221
+ async def _arun(
222
+ self,
223
+ messages: List[OpenAIMessage],
224
+ response_format: Optional[Type[BaseModel]] = None,
225
+ tools: Optional[List[Dict[str, Any]]] = None,
226
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
227
+ r"""Runs inference of DeepSeek chat completion.
228
+
229
+ Args:
230
+ messages (List[OpenAIMessage]): Message list with the chat history
231
+ in OpenAI API format.
232
+
233
+ Returns:
234
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
235
+ `ChatCompletion` in the non-stream mode, or
236
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
237
+ """
238
+ request_config = self._prepare_request(
239
+ messages, response_format, tools
240
+ )
241
+ response = await self._async_client.chat.completions.create(
242
+ messages=messages,
243
+ model=self.model_type,
244
+ **request_config,
245
+ )
246
+
247
+ return self._post_handle_response(response)
248
+
187
249
  def check_model_config(self):
188
250
  r"""Check whether the model configuration contains any
189
251
  unexpected arguments to DeepSeek API.
@@ -15,8 +15,10 @@
15
15
  import os
16
16
  from typing import Any, Optional
17
17
 
18
+ from camel.models.base_audio_model import BaseAudioModel
18
19
 
19
- class FishAudioModel:
20
+
21
+ class FishAudioModel(BaseAudioModel):
20
22
  r"""Provides access to FishAudio's Text-to-Speech (TTS) and Speech_to_Text
21
23
  (STT) models.
22
24
  """
@@ -37,6 +39,7 @@ class FishAudioModel:
37
39
  """
38
40
  from fish_audio_sdk import Session
39
41
 
42
+ super().__init__(api_key, url)
40
43
  self._api_key = api_key or os.environ.get("FISHAUDIO_API_KEY")
41
44
  self._url = url or os.environ.get(
42
45
  "FISHAUDIO_API_BASE_URL", "https://api.fish.audio"
@@ -46,7 +49,8 @@ class FishAudioModel:
46
49
  def text_to_speech(
47
50
  self,
48
51
  input: str,
49
- storage_path: str,
52
+ *,
53
+ storage_path: Optional[str] = None,
50
54
  reference_id: Optional[str] = None,
51
55
  reference_audio: Optional[str] = None,
52
56
  reference_audio_text: Optional[str] = None,
@@ -55,9 +59,9 @@ class FishAudioModel:
55
59
  r"""Convert text to speech and save the output to a file.
56
60
 
57
61
  Args:
58
- input_text (str): The text to convert to speech.
59
- storage_path (str): The file path where the resulting speech will
60
- be saved.
62
+ input (str): The text to convert to speech.
63
+ storage_path (Optional[str]): The file path where the resulting
64
+ speech will be saved. (default: :obj:`None`)
61
65
  reference_id (Optional[str]): An optional reference ID to
62
66
  associate with the request. (default: :obj:`None`)
63
67
  reference_audio (Optional[str]): Path to an audio file for
@@ -68,12 +72,18 @@ class FishAudioModel:
68
72
 
69
73
  Raises:
70
74
  FileNotFoundError: If the reference audio file cannot be found.
75
+ ValueError: If storage_path is not provided or if reference_audio
76
+ is provided without reference_audio_text.
71
77
  """
72
78
  from fish_audio_sdk import ReferenceAudio, TTSRequest
73
79
 
74
- directory = os.path.dirname(storage_path)
75
- if directory and not os.path.exists(directory):
76
- os.makedirs(directory)
80
+ if storage_path is None:
81
+ raise ValueError(
82
+ "storage_path must be provided for "
83
+ "FishAudioModel.text_to_speech"
84
+ )
85
+
86
+ self._ensure_directory_exists(storage_path)
77
87
 
78
88
  if not reference_audio:
79
89
  with open(f"{storage_path}", "wb") as f:
@@ -12,9 +12,10 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI, Stream
17
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import Gemini_API_PARAMS, GeminiConfig
20
21
  from camel.messages import OpenAIMessage
@@ -81,37 +82,154 @@ class GeminiModel(BaseModelBackend):
81
82
  api_key=self._api_key,
82
83
  base_url=self._url,
83
84
  )
85
+ self._async_client = AsyncOpenAI(
86
+ timeout=180,
87
+ max_retries=3,
88
+ api_key=self._api_key,
89
+ base_url=self._url,
90
+ )
84
91
 
85
- def run(
92
+ def _process_messages(self, messages) -> List[OpenAIMessage]:
93
+ r"""Process the messages for Gemini API to ensure no empty content,
94
+ which is not accepted by Gemini.
95
+ """
96
+ processed_messages = []
97
+ for msg in messages:
98
+ msg_copy = msg.copy()
99
+ if 'content' in msg_copy and msg_copy['content'] == '':
100
+ msg_copy['content'] = 'null'
101
+ processed_messages.append(msg_copy)
102
+ return processed_messages
103
+
104
+ def _run(
86
105
  self,
87
106
  messages: List[OpenAIMessage],
107
+ response_format: Optional[Type[BaseModel]] = None,
108
+ tools: Optional[List[Dict[str, Any]]] = None,
88
109
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
110
  r"""Runs inference of Gemini chat completion.
90
111
 
91
112
  Args:
92
113
  messages (List[OpenAIMessage]): Message list with the chat history
93
114
  in OpenAI API format.
115
+ response_format (Optional[Type[BaseModel]]): The format of the
116
+ response.
117
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
118
+ use for the request.
94
119
 
95
120
  Returns:
96
121
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
97
122
  `ChatCompletion` in the non-stream mode, or
98
123
  `Stream[ChatCompletionChunk]` in the stream mode.
99
124
  """
100
- # Process messages to ensure no empty content, it's not accepeted by
101
- # Gemini
102
- processed_messages = []
103
- for msg in messages:
104
- msg_copy = msg.copy()
105
- if 'content' in msg_copy and msg_copy['content'] == '':
106
- msg_copy['content'] = 'null'
107
- processed_messages.append(msg_copy)
125
+ response_format = response_format or self.model_config_dict.get(
126
+ "response_format", None
127
+ )
128
+ messages = self._process_messages(messages)
129
+ if response_format:
130
+ return self._request_parse(messages, response_format)
131
+ else:
132
+ return self._request_chat_completion(messages, tools)
133
+
134
+ async def _arun(
135
+ self,
136
+ messages: List[OpenAIMessage],
137
+ response_format: Optional[Type[BaseModel]] = None,
138
+ tools: Optional[List[Dict[str, Any]]] = None,
139
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
140
+ r"""Runs inference of OpenAI chat completion in async mode.
141
+
142
+ Args:
143
+ messages (List[OpenAIMessage]): Message list with the chat history
144
+ in OpenAI API format.
145
+ response_format (Optional[Type[BaseModel]]): The format of the
146
+ response.
147
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
148
+ use for the request.
149
+
150
+ Returns:
151
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
152
+ `ChatCompletion` in the non-stream mode, or
153
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
154
+ """
155
+ response_format = response_format or self.model_config_dict.get(
156
+ "response_format", None
157
+ )
158
+ messages = self._process_messages(messages)
159
+ if response_format:
160
+ return await self._arequest_parse(messages, response_format)
161
+ else:
162
+ return await self._arequest_chat_completion(messages, tools)
163
+
164
+ def _request_chat_completion(
165
+ self,
166
+ messages: List[OpenAIMessage],
167
+ tools: Optional[List[Dict[str, Any]]] = None,
168
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
169
+ request_config = self.model_config_dict.copy()
170
+
171
+ if tools:
172
+ for tool in tools:
173
+ function_dict = tool.get('function', {})
174
+ function_dict.pop("strict", None)
175
+ request_config["tools"] = tools
176
+
177
+ return self._client.chat.completions.create(
178
+ messages=messages,
179
+ model=self.model_type,
180
+ **request_config,
181
+ )
182
+
183
+ async def _arequest_chat_completion(
184
+ self,
185
+ messages: List[OpenAIMessage],
186
+ tools: Optional[List[Dict[str, Any]]] = None,
187
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
188
+ request_config = self.model_config_dict.copy()
189
+
190
+ if tools:
191
+ for tool in tools:
192
+ function_dict = tool.get('function', {})
193
+ function_dict.pop("strict", None)
194
+ request_config["tools"] = tools
195
+
196
+ return await self._async_client.chat.completions.create(
197
+ messages=messages,
198
+ model=self.model_type,
199
+ **request_config,
200
+ )
201
+
202
+ def _request_parse(
203
+ self,
204
+ messages: List[OpenAIMessage],
205
+ response_format: Type[BaseModel],
206
+ ) -> ChatCompletion:
207
+ request_config = self.model_config_dict.copy()
208
+
209
+ request_config["response_format"] = response_format
210
+ request_config.pop("stream", None)
211
+
212
+ return self._client.beta.chat.completions.parse(
213
+ messages=messages,
214
+ model=self.model_type,
215
+ **request_config,
216
+ )
217
+
218
+ async def _arequest_parse(
219
+ self,
220
+ messages: List[OpenAIMessage],
221
+ response_format: Type[BaseModel],
222
+ ) -> ChatCompletion:
223
+ request_config = self.model_config_dict.copy()
224
+
225
+ request_config["response_format"] = response_format
226
+ request_config.pop("stream", None)
108
227
 
109
- response = self._client.chat.completions.create(
110
- messages=processed_messages,
228
+ return await self._async_client.beta.chat.completions.parse(
229
+ messages=messages,
111
230
  model=self.model_type,
112
- **self.model_config_dict,
231
+ **request_config,
113
232
  )
114
- return response
115
233
 
116
234
  @property
117
235
  def token_counter(self) -> BaseTokenCounter:
@@ -12,13 +12,15 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Union
15
+ from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import OpenAI, Stream
17
+ from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from pydantic import BaseModel
18
19
 
19
20
  from camel.configs import GROQ_API_PARAMS, GroqConfig
20
21
  from camel.messages import OpenAIMessage
21
22
  from camel.models import BaseModelBackend
23
+ from camel.models._utils import try_modify_message_with_format
22
24
  from camel.types import (
23
25
  ChatCompletion,
24
26
  ChatCompletionChunk,
@@ -51,11 +53,7 @@ class GroqModel(BaseModelBackend):
51
53
  (default: :obj:`None`)
52
54
  """
53
55
 
54
- @api_keys_required(
55
- [
56
- ("api_key", "GROQ_API_KEY"),
57
- ]
58
- )
56
+ @api_keys_required([("api_key", "GROQ_API_KEY")])
59
57
  def __init__(
60
58
  self,
61
59
  model_type: Union[ModelType, str],
@@ -79,6 +77,12 @@ class GroqModel(BaseModelBackend):
79
77
  api_key=self._api_key,
80
78
  base_url=self._url,
81
79
  )
80
+ self._async_client = AsyncOpenAI(
81
+ timeout=180,
82
+ max_retries=3,
83
+ api_key=self._api_key,
84
+ base_url=self._url,
85
+ )
82
86
 
83
87
  @property
84
88
  def token_counter(self) -> BaseTokenCounter:
@@ -92,25 +96,83 @@ class GroqModel(BaseModelBackend):
92
96
  self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
93
97
  return self._token_counter
94
98
 
95
- def run(
99
+ def _prepare_request(
96
100
  self,
97
101
  messages: List[OpenAIMessage],
102
+ response_format: Optional[Type[BaseModel]] = None,
103
+ tools: Optional[List[Dict[str, Any]]] = None,
104
+ ) -> Dict[str, Any]:
105
+ request_config = self.model_config_dict.copy()
106
+ if tools:
107
+ request_config["tools"] = tools
108
+ elif response_format:
109
+ try_modify_message_with_format(messages[-1], response_format)
110
+ request_config["response_format"] = {"type": "json_object"}
111
+
112
+ return request_config
113
+
114
+ def _run(
115
+ self,
116
+ messages: List[OpenAIMessage],
117
+ response_format: Optional[type[BaseModel]] = None,
118
+ tools: Optional[List[Dict[str, Any]]] = None,
98
119
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
99
- r"""Runs inference of OpenAI chat completion.
120
+ r"""Runs inference of Groq chat completion.
100
121
 
101
122
  Args:
102
123
  messages (List[OpenAIMessage]): Message list with the chat history
103
124
  in OpenAI API format.
125
+ response_format (Optional[Type[BaseModel]]): The format of the
126
+ response.
127
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
128
+ use for the request.
104
129
 
105
130
  Returns:
106
131
  Union[ChatCompletion, Stream[ChatCompletionChunk]]:
107
132
  `ChatCompletion` in the non-stream mode, or
108
133
  `Stream[ChatCompletionChunk]` in the stream mode.
109
134
  """
135
+ request_config = self._prepare_request(
136
+ messages, response_format, tools
137
+ )
138
+
110
139
  response = self._client.chat.completions.create(
111
140
  messages=messages,
112
141
  model=self.model_type,
113
- **self.model_config_dict,
142
+ **request_config,
143
+ )
144
+
145
+ return response
146
+
147
+ async def _arun(
148
+ self,
149
+ messages: List[OpenAIMessage],
150
+ response_format: Optional[type[BaseModel]] = None,
151
+ tools: Optional[List[Dict[str, Any]]] = None,
152
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
153
+ r"""Runs inference of Groq chat completion asynchronously.
154
+
155
+ Args:
156
+ messages (List[OpenAIMessage]): Message list with the chat history
157
+ in OpenAI API format.
158
+ response_format (Optional[Type[BaseModel]]): The format of the
159
+ response.
160
+ tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
161
+ use for the request.
162
+
163
+ Returns:
164
+ Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
165
+ `ChatCompletion` in the non-stream mode, or
166
+ `AsyncStream[ChatCompletionChunk]` in the stream mode.
167
+ """
168
+ request_config = self._prepare_request(
169
+ messages, response_format, tools
170
+ )
171
+
172
+ response = await self._async_client.chat.completions.create(
173
+ messages=messages,
174
+ model=self.model_type,
175
+ **request_config,
114
176
  )
115
177
 
116
178
  return response
@@ -13,9 +13,10 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Union
16
+ from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import OpenAI, Stream
18
+ from openai import AsyncStream, OpenAI, Stream
19
+ from pydantic import BaseModel
19
20
 
20
21
  from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
21
22
  from camel.messages import OpenAIMessage
@@ -82,9 +83,11 @@ class InternLMModel(BaseModelBackend):
82
83
  base_url=self._url,
83
84
  )
84
85
 
85
- def run(
86
+ def _run(
86
87
  self,
87
88
  messages: List[OpenAIMessage],
89
+ response_format: Optional[Type[BaseModel]] = None,
90
+ tools: Optional[List[Dict[str, Any]]] = None,
88
91
  ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
89
92
  r"""Runs inference of InternLM chat completion.
90
93
 
@@ -104,6 +107,14 @@ class InternLMModel(BaseModelBackend):
104
107
  )
105
108
  return response
106
109
 
110
+ async def _arun(
111
+ self,
112
+ messages: List[OpenAIMessage],
113
+ response_format: Optional[Type[BaseModel]] = None,
114
+ tools: Optional[List[Dict[str, Any]]] = None,
115
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
116
+ raise NotImplementedError("InternLM does not support async inference.")
117
+
107
118
  @property
108
119
  def token_counter(self) -> BaseTokenCounter:
109
120
  r"""Initialize the token counter for the model backend.
@@ -11,7 +11,9 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
- from typing import Any, Dict, List, Optional, Union
14
+ from typing import Any, Dict, List, Optional, Type, Union
15
+
16
+ from pydantic import BaseModel
15
17
 
16
18
  from camel.configs import LITELLM_API_PARAMS, LiteLLMConfig
17
19
  from camel.messages import OpenAIMessage
@@ -106,9 +108,14 @@ class LiteLLMModel(BaseModelBackend):
106
108
  self._token_counter = LiteLLMTokenCounter(self.model_type)
107
109
  return self._token_counter
108
110
 
109
- def run(
111
+ async def _arun(self) -> None: # type: ignore[override]
112
+ raise NotImplementedError
113
+
114
+ def _run(
110
115
  self,
111
116
  messages: List[OpenAIMessage],
117
+ response_format: Optional[Type[BaseModel]] = None,
118
+ tools: Optional[List[Dict[str, Any]]] = None,
112
119
  ) -> ChatCompletion:
113
120
  r"""Runs inference of LiteLLM chat completion.
114
121