camel-ai 0.2.45__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (42) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/deepseek_model.py +11 -44
  11. camel/models/gemini_model.py +10 -72
  12. camel/models/groq_model.py +11 -131
  13. camel/models/internlm_model.py +11 -61
  14. camel/models/lmstudio_model.py +82 -0
  15. camel/models/model_factory.py +7 -1
  16. camel/models/modelscope_model.py +11 -122
  17. camel/models/moonshot_model.py +10 -76
  18. camel/models/nemotron_model.py +4 -60
  19. camel/models/nvidia_model.py +11 -111
  20. camel/models/ollama_model.py +12 -205
  21. camel/models/openai_compatible_model.py +51 -12
  22. camel/models/openrouter_model.py +12 -131
  23. camel/models/ppio_model.py +10 -99
  24. camel/models/qwen_model.py +11 -122
  25. camel/models/reka_model.py +1 -1
  26. camel/models/sglang_model.py +5 -3
  27. camel/models/siliconflow_model.py +10 -58
  28. camel/models/togetherai_model.py +10 -177
  29. camel/models/vllm_model.py +11 -218
  30. camel/models/volcano_model.py +1 -15
  31. camel/models/yi_model.py +11 -98
  32. camel/models/zhipuai_model.py +11 -102
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/pyautogui_toolkit.py +428 -0
  35. camel/toolkits/video_analysis_toolkit.py +215 -80
  36. camel/toolkits/video_download_toolkit.py +10 -3
  37. camel/types/enums.py +64 -0
  38. camel/types/unified_model_type.py +10 -0
  39. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/METADATA +2 -1
  40. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/RECORD +42 -37
  41. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  42. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -13,29 +13,19 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import QWEN_API_PARAMS, QwenConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.models._utils import try_modify_message_with_format
25
- from camel.types import (
26
- ChatCompletion,
27
- ChatCompletionChunk,
28
- ModelType,
29
- )
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
30
21
  from camel.utils import (
31
22
  BaseTokenCounter,
32
- OpenAITokenCounter,
33
23
  api_keys_required,
34
24
  )
35
25
 
36
26
 
37
- class QwenModel(BaseModelBackend):
38
- r"""Qwen API in a unified BaseModelBackend interface.
27
+ class QwenModel(OpenAICompatibleModel):
28
+ r"""Qwen API in a unified OpenAICompatibleModel interface.
39
29
 
40
30
  Args:
41
31
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -81,104 +71,13 @@ class QwenModel(BaseModelBackend):
81
71
  )
82
72
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
83
73
  super().__init__(
84
- model_type, model_config_dict, api_key, url, token_counter, timeout
85
- )
86
- self._client = OpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key=self._api_key,
90
- base_url=self._url,
91
- )
92
- self._async_client = AsyncOpenAI(
93
- timeout=self._timeout,
94
- max_retries=3,
95
- api_key=self._api_key,
96
- base_url=self._url,
97
- )
98
-
99
- async def _arun(
100
- self,
101
- messages: List[OpenAIMessage],
102
- response_format: Optional[Type[BaseModel]] = None,
103
- tools: Optional[List[Dict[str, Any]]] = None,
104
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
105
- r"""Runs inference of Qwen chat completion.
106
-
107
- Args:
108
- messages (List[OpenAIMessage]): Message list with the chat history
109
- in OpenAI API format.
110
-
111
- Returns:
112
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
113
- `ChatCompletion` in the non-stream mode, or
114
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
115
- """
116
- request_config = self._prepare_request(
117
- messages, response_format, tools
118
- )
119
-
120
- response = await self._async_client.chat.completions.create(
121
- messages=messages,
122
- model=self.model_type,
123
- **request_config,
124
- )
125
- return response
126
-
127
- def _run(
128
- self,
129
- messages: List[OpenAIMessage],
130
- response_format: Optional[Type[BaseModel]] = None,
131
- tools: Optional[List[Dict[str, Any]]] = None,
132
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
133
- r"""Runs inference of Qwen chat completion.
134
-
135
- Args:
136
- messages (List[OpenAIMessage]): Message list with the chat history
137
- in OpenAI API format.
138
-
139
- Returns:
140
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
141
- `ChatCompletion` in the non-stream mode, or
142
- `Stream[ChatCompletionChunk]` in the stream mode.
143
- """
144
- request_config = self._prepare_request(
145
- messages, response_format, tools
146
- )
147
-
148
- response = self._client.chat.completions.create(
149
- messages=messages,
150
- model=self.model_type,
151
- **request_config,
74
+ model_type=model_type,
75
+ model_config_dict=model_config_dict,
76
+ api_key=api_key,
77
+ url=url,
78
+ token_counter=token_counter,
79
+ timeout=timeout,
152
80
  )
153
- return response
154
-
155
- def _prepare_request(
156
- self,
157
- messages: List[OpenAIMessage],
158
- response_format: Optional[Type[BaseModel]] = None,
159
- tools: Optional[List[Dict[str, Any]]] = None,
160
- ) -> Dict[str, Any]:
161
- request_config = self.model_config_dict.copy()
162
- if tools:
163
- request_config["tools"] = tools
164
- elif response_format:
165
- try_modify_message_with_format(messages[-1], response_format)
166
- request_config["response_format"] = {"type": "json_object"}
167
-
168
- return request_config
169
-
170
- @property
171
- def token_counter(self) -> BaseTokenCounter:
172
- r"""Initialize the token counter for the model backend.
173
-
174
- Returns:
175
- OpenAITokenCounter: The token counter following the model's
176
- tokenization style.
177
- """
178
-
179
- if not self._token_counter:
180
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
181
- return self._token_counter
182
81
 
183
82
  def check_model_config(self):
184
83
  r"""Check whether the model configuration contains any
@@ -194,13 +93,3 @@ class QwenModel(BaseModelBackend):
194
93
  f"Unexpected argument `{param}` is "
195
94
  "input into Qwen model backend."
196
95
  )
197
-
198
- @property
199
- def stream(self) -> bool:
200
- r"""Returns whether the model is in stream mode, which sends partial
201
- results each time.
202
-
203
- Returns:
204
- bool: Whether the model is in stream mode.
205
- """
206
- return self.model_config_dict.get('stream', False)
@@ -41,7 +41,7 @@ except (ImportError, AttributeError):
41
41
 
42
42
 
43
43
  class RekaModel(BaseModelBackend):
44
- r"""Reka API in a unified BaseModelBackend interface.
44
+ r"""Reka API in a unified OpenAICompatibleModel interface.
45
45
 
46
46
  Args:
47
47
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -119,7 +119,9 @@ class SGLangModel(BaseModelBackend):
119
119
  )
120
120
 
121
121
  server_process = _execute_shell_command(cmd)
122
- _wait_for_server("http://localhost:30000")
122
+ _wait_for_server(
123
+ base_url="http://localhost:30000", timeout=self._timeout
124
+ )
123
125
  self._url = "http://127.0.0.1:30000/v1"
124
126
  self.server_process = server_process # type: ignore[assignment]
125
127
  # Start the inactivity monitor in a background thread
@@ -356,12 +358,12 @@ def _execute_shell_command(command: str) -> subprocess.Popen:
356
358
  return subprocess.Popen(parts, text=True, stderr=subprocess.STDOUT)
357
359
 
358
360
 
359
- def _wait_for_server(base_url: str, timeout: Optional[int] = 30) -> None:
361
+ def _wait_for_server(base_url: str, timeout: Optional[float] = 30) -> None:
360
362
  r"""Wait for the server to be ready by polling the /v1/models endpoint.
361
363
 
362
364
  Args:
363
365
  base_url (str): The base URL of the server
364
- timeout (Optional[int]): Maximum time to wait in seconds.
366
+ timeout (Optional[float]): Maximum time to wait in seconds.
365
367
  (default: :obj:`30`)
366
368
  """
367
369
  import requests
@@ -14,12 +14,12 @@
14
14
  import os
15
15
  from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import AsyncStream, OpenAI, Stream
17
+ from openai import AsyncStream
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from camel.configs import SILICONFLOW_API_PARAMS, SiliconFlowConfig
21
21
  from camel.messages import OpenAIMessage
22
- from camel.models.base_model import BaseModelBackend
22
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
23
23
  from camel.types import (
24
24
  ChatCompletion,
25
25
  ChatCompletionChunk,
@@ -27,13 +27,12 @@ from camel.types import (
27
27
  )
28
28
  from camel.utils import (
29
29
  BaseTokenCounter,
30
- OpenAITokenCounter,
31
30
  api_keys_required,
32
31
  )
33
32
 
34
33
 
35
- class SiliconFlowModel(BaseModelBackend):
36
- r"""SiliconFlow API in a unified BaseModelBackend interface.
34
+ class SiliconFlowModel(OpenAICompatibleModel):
35
+ r"""SiliconFlow API in a unified OpenAICompatibleModel interface.
37
36
 
38
37
  Args:
39
38
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -80,38 +79,13 @@ class SiliconFlowModel(BaseModelBackend):
80
79
  )
81
80
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
81
  super().__init__(
83
- model_type, model_config_dict, api_key, url, token_counter, timeout
82
+ model_type=model_type,
83
+ model_config_dict=model_config_dict,
84
+ api_key=api_key,
85
+ url=url,
86
+ token_counter=token_counter,
87
+ timeout=timeout,
84
88
  )
85
- self._client = OpenAI(
86
- timeout=self._timeout,
87
- max_retries=3,
88
- api_key=self._api_key,
89
- base_url=self._url,
90
- )
91
-
92
- def _run(
93
- self,
94
- messages: List[OpenAIMessage],
95
- response_format: Optional[Type[BaseModel]] = None,
96
- tools: Optional[List[Dict[str, Any]]] = None,
97
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
98
- r"""Runs inference of SiliconFlow chat completion.
99
-
100
- Args:
101
- messages (List[OpenAIMessage]): Message list with the chat history
102
- in OpenAI API format.
103
-
104
- Returns:
105
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
106
- `ChatCompletion` in the non-stream mode, or
107
- `Stream[ChatCompletionChunk]` in the stream mode.
108
- """
109
- response = self._client.chat.completions.create(
110
- messages=messages,
111
- model=self.model_type,
112
- **self.model_config_dict,
113
- )
114
- return response
115
89
 
116
90
  async def _arun(
117
91
  self,
@@ -123,18 +97,6 @@ class SiliconFlowModel(BaseModelBackend):
123
97
  "SiliconFlow does not support async inference."
124
98
  )
125
99
 
126
- @property
127
- def token_counter(self) -> BaseTokenCounter:
128
- r"""Initialize the token counter for the model backend.
129
-
130
- Returns:
131
- BaseTokenCounter: The token counter following the model's
132
- tokenization style.
133
- """
134
- if not self._token_counter:
135
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
136
- return self._token_counter
137
-
138
100
  def check_model_config(self):
139
101
  r"""Check whether the model configuration contains any
140
102
  unexpected arguments to SiliconFlow API.
@@ -149,13 +111,3 @@ class SiliconFlowModel(BaseModelBackend):
149
111
  f"Unexpected argument `{param}` is "
150
112
  "input into SiliconFlow model backend."
151
113
  )
152
-
153
- @property
154
- def stream(self) -> bool:
155
- """Returns whether the model is in stream mode, which sends partial
156
- results each time.
157
-
158
- Returns:
159
- bool: Whether the model is in stream mode.
160
- """
161
- return self.model_config_dict.get('stream', False)
@@ -13,27 +13,18 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import TOGETHERAI_API_PARAMS, TogetherAIConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
29
21
  from camel.utils import (
30
22
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
23
  api_keys_required,
33
24
  )
34
25
 
35
26
 
36
- class TogetherAIModel(BaseModelBackend):
27
+ class TogetherAIModel(OpenAICompatibleModel):
37
28
  r"""Constructor for Together AI backend with OpenAI compatibility.
38
29
 
39
30
  Args:
@@ -80,160 +71,12 @@ class TogetherAIModel(BaseModelBackend):
80
71
  )
81
72
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
73
  super().__init__(
83
- model_type, model_config_dict, api_key, url, token_counter, timeout
84
- )
85
-
86
- self._client = OpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key=self._api_key,
90
- base_url=self._url,
91
- )
92
- self._async_client = AsyncOpenAI(
93
- timeout=self._timeout,
94
- max_retries=3,
95
- api_key=self._api_key,
96
- base_url=self._url,
97
- )
98
-
99
- @property
100
- def token_counter(self) -> BaseTokenCounter:
101
- r"""Initialize the token counter for the model backend.
102
-
103
- Returns:
104
- BaseTokenCounter: The token counter following the model's
105
- tokenization style.
106
- """
107
- if not self._token_counter:
108
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
109
- return self._token_counter
110
-
111
- def _run(
112
- self,
113
- messages: List[OpenAIMessage],
114
- response_format: Optional[Type[BaseModel]] = None,
115
- tools: Optional[List[Dict[str, Any]]] = None,
116
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
117
- r"""Runs inference of OpenAI chat completion.
118
-
119
- Args:
120
- messages (List[OpenAIMessage]): Message list with the chat history
121
- in OpenAI API format.
122
- response_format (Optional[Type[BaseModel]]): The format of the
123
- response.
124
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
125
- use for the request.
126
-
127
- Returns:
128
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
129
- `ChatCompletion` in the non-stream mode, or
130
- `Stream[ChatCompletionChunk]` in the stream mode.
131
- """
132
- response_format = response_format or self.model_config_dict.get(
133
- "response_format", None
134
- )
135
- if response_format:
136
- return self._request_parse(messages, response_format, tools)
137
- else:
138
- return self._request_chat_completion(messages, tools)
139
-
140
- async def _arun(
141
- self,
142
- messages: List[OpenAIMessage],
143
- response_format: Optional[Type[BaseModel]] = None,
144
- tools: Optional[List[Dict[str, Any]]] = None,
145
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
146
- r"""Runs inference of OpenAI chat completion in async mode.
147
-
148
- Args:
149
- messages (List[OpenAIMessage]): Message list with the chat history
150
- in OpenAI API format.
151
- response_format (Optional[Type[BaseModel]]): The format of the
152
- response.
153
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
154
- use for the request.
155
-
156
- Returns:
157
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
158
- `ChatCompletion` in the non-stream mode, or
159
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
160
- """
161
- response_format = response_format or self.model_config_dict.get(
162
- "response_format", None
163
- )
164
- if response_format:
165
- return await self._arequest_parse(messages, response_format, tools)
166
- else:
167
- return await self._arequest_chat_completion(messages, tools)
168
-
169
- def _request_chat_completion(
170
- self,
171
- messages: List[OpenAIMessage],
172
- tools: Optional[List[Dict[str, Any]]] = None,
173
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
174
- request_config = self.model_config_dict.copy()
175
-
176
- if tools:
177
- request_config["tools"] = tools
178
-
179
- return self._client.chat.completions.create(
180
- messages=messages,
181
- model=self.model_type,
182
- **request_config,
183
- )
184
-
185
- async def _arequest_chat_completion(
186
- self,
187
- messages: List[OpenAIMessage],
188
- tools: Optional[List[Dict[str, Any]]] = None,
189
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
190
- request_config = self.model_config_dict.copy()
191
-
192
- if tools:
193
- request_config["tools"] = tools
194
-
195
- return await self._async_client.chat.completions.create(
196
- messages=messages,
197
- model=self.model_type,
198
- **request_config,
199
- )
200
-
201
- def _request_parse(
202
- self,
203
- messages: List[OpenAIMessage],
204
- response_format: Type[BaseModel],
205
- tools: Optional[List[Dict[str, Any]]] = None,
206
- ) -> ChatCompletion:
207
- request_config = self.model_config_dict.copy()
208
-
209
- request_config["response_format"] = response_format
210
-
211
- if tools is not None:
212
- request_config["tools"] = tools
213
-
214
- return self._client.beta.chat.completions.parse(
215
- messages=messages,
216
- model=self.model_type,
217
- **request_config,
218
- )
219
-
220
- async def _arequest_parse(
221
- self,
222
- messages: List[OpenAIMessage],
223
- response_format: Type[BaseModel],
224
- tools: Optional[List[Dict[str, Any]]] = None,
225
- ) -> ChatCompletion:
226
- request_config = self.model_config_dict.copy()
227
-
228
- request_config["response_format"] = response_format
229
-
230
- if tools is not None:
231
- request_config["tools"] = tools
232
-
233
- return await self._async_client.beta.chat.completions.parse(
234
- messages=messages,
235
- model=self.model_type,
236
- **request_config,
74
+ model_type=model_type,
75
+ model_config_dict=model_config_dict,
76
+ api_key=api_key,
77
+ url=url,
78
+ token_counter=token_counter,
79
+ timeout=timeout,
237
80
  )
238
81
 
239
82
  def check_model_config(self):
@@ -250,13 +93,3 @@ class TogetherAIModel(BaseModelBackend):
250
93
  f"Unexpected argument `{param}` is "
251
94
  "input into TogetherAI model backend."
252
95
  )
253
-
254
- @property
255
- def stream(self) -> bool:
256
- r"""Returns whether the model is in stream mode, which sends partial
257
- results each time.
258
-
259
- Returns:
260
- bool: Whether the model is in stream mode.
261
- """
262
- return self.model_config_dict.get('stream', False)