camel-ai 0.2.45__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (42) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/deepseek_model.py +11 -44
  11. camel/models/gemini_model.py +10 -72
  12. camel/models/groq_model.py +11 -131
  13. camel/models/internlm_model.py +11 -61
  14. camel/models/lmstudio_model.py +82 -0
  15. camel/models/model_factory.py +7 -1
  16. camel/models/modelscope_model.py +11 -122
  17. camel/models/moonshot_model.py +10 -76
  18. camel/models/nemotron_model.py +4 -60
  19. camel/models/nvidia_model.py +11 -111
  20. camel/models/ollama_model.py +12 -205
  21. camel/models/openai_compatible_model.py +51 -12
  22. camel/models/openrouter_model.py +12 -131
  23. camel/models/ppio_model.py +10 -99
  24. camel/models/qwen_model.py +11 -122
  25. camel/models/reka_model.py +1 -1
  26. camel/models/sglang_model.py +5 -3
  27. camel/models/siliconflow_model.py +10 -58
  28. camel/models/togetherai_model.py +10 -177
  29. camel/models/vllm_model.py +11 -218
  30. camel/models/volcano_model.py +1 -15
  31. camel/models/yi_model.py +11 -98
  32. camel/models/zhipuai_model.py +11 -102
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/pyautogui_toolkit.py +428 -0
  35. camel/toolkits/video_analysis_toolkit.py +215 -80
  36. camel/toolkits/video_download_toolkit.py +10 -3
  37. camel/types/enums.py +64 -0
  38. camel/types/unified_model_type.py +10 -0
  39. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/METADATA +2 -1
  40. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/RECORD +42 -37
  41. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  42. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -13,29 +13,19 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import MODELSCOPE_API_PARAMS, ModelScopeConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.models._utils import try_modify_message_with_format
25
- from camel.types import (
26
- ChatCompletion,
27
- ChatCompletionChunk,
28
- ModelType,
29
- )
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
30
21
  from camel.utils import (
31
22
  BaseTokenCounter,
32
- OpenAITokenCounter,
33
23
  api_keys_required,
34
24
  )
35
25
 
36
26
 
37
- class ModelScopeModel(BaseModelBackend):
38
- r"""ModelScope API in a unified BaseModelBackend interface.
27
+ class ModelScopeModel(OpenAICompatibleModel):
28
+ r"""ModelScope API in a unified OpenAICompatibleModel interface.
39
29
 
40
30
  Args:
41
31
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -83,104 +73,13 @@ class ModelScopeModel(BaseModelBackend):
83
73
  )
84
74
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
85
75
  super().__init__(
86
- model_type, model_config_dict, api_key, url, token_counter, timeout
87
- )
88
- self._client = OpenAI(
89
- timeout=self._timeout,
90
- max_retries=3,
91
- api_key=self._api_key,
92
- base_url=self._url,
93
- )
94
- self._async_client = AsyncOpenAI(
95
- timeout=self._timeout,
96
- max_retries=3,
97
- api_key=self._api_key,
98
- base_url=self._url,
99
- )
100
-
101
- async def _arun(
102
- self,
103
- messages: List[OpenAIMessage],
104
- response_format: Optional[Type[BaseModel]] = None,
105
- tools: Optional[List[Dict[str, Any]]] = None,
106
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
107
- r"""Runs inference of ModelScope chat completion.
108
-
109
- Args:
110
- messages (List[OpenAIMessage]): Message list with the chat history
111
- in OpenAI API format.
112
-
113
- Returns:
114
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
115
- `ChatCompletion` in the non-stream mode, or
116
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
117
- """
118
- request_config = self._prepare_request(
119
- messages, response_format, tools
120
- )
121
-
122
- response = await self._async_client.chat.completions.create(
123
- messages=messages,
124
- model=self.model_type,
125
- **request_config,
126
- )
127
- return response
128
-
129
- def _run(
130
- self,
131
- messages: List[OpenAIMessage],
132
- response_format: Optional[Type[BaseModel]] = None,
133
- tools: Optional[List[Dict[str, Any]]] = None,
134
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
135
- r"""Runs inference of ModelScope chat completion.
136
-
137
- Args:
138
- messages (List[OpenAIMessage]): Message list with the chat history
139
- in OpenAI API format.
140
-
141
- Returns:
142
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
143
- `ChatCompletion` in the non-stream mode, or
144
- `Stream[ChatCompletionChunk]` in the stream mode.
145
- """
146
- request_config = self._prepare_request(
147
- messages, response_format, tools
148
- )
149
-
150
- response = self._client.chat.completions.create(
151
- messages=messages,
152
- model=self.model_type,
153
- **request_config,
76
+ model_type=model_type,
77
+ model_config_dict=model_config_dict,
78
+ api_key=api_key,
79
+ url=url,
80
+ token_counter=token_counter,
81
+ timeout=timeout,
154
82
  )
155
- return response
156
-
157
- def _prepare_request(
158
- self,
159
- messages: List[OpenAIMessage],
160
- response_format: Optional[Type[BaseModel]] = None,
161
- tools: Optional[List[Dict[str, Any]]] = None,
162
- ) -> Dict[str, Any]:
163
- request_config = self.model_config_dict.copy()
164
- if tools:
165
- request_config["tools"] = tools
166
- elif response_format:
167
- try_modify_message_with_format(messages[-1], response_format)
168
- request_config["response_format"] = {"type": "json_object"}
169
-
170
- return request_config
171
-
172
- @property
173
- def token_counter(self) -> BaseTokenCounter:
174
- r"""Initialize the token counter for the model backend.
175
-
176
- Returns:
177
- OpenAITokenCounter: The token counter following the model's
178
- tokenization style.
179
- """
180
-
181
- if not self._token_counter:
182
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
183
- return self._token_counter
184
83
 
185
84
  def check_model_config(self):
186
85
  r"""Check whether the model configuration contains any
@@ -196,13 +95,3 @@ class ModelScopeModel(BaseModelBackend):
196
95
  f"Unexpected argument `{param}` is "
197
96
  "input into ModelScope model backend."
198
97
  )
199
-
200
- @property
201
- def stream(self) -> bool:
202
- r"""Returns whether the model is in stream mode, which sends partial
203
- results each time.
204
-
205
- Returns:
206
- bool: Whether the model is in stream mode.
207
- """
208
- return self.model_config_dict.get('stream', False)
@@ -15,13 +15,12 @@
15
15
  import os
16
16
  from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import AsyncStream, OpenAI, Stream
18
+ from openai import AsyncStream
19
19
  from pydantic import BaseModel
20
20
 
21
21
  from camel.configs import MOONSHOT_API_PARAMS, MoonshotConfig
22
22
  from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.models._utils import try_modify_message_with_format
23
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
25
24
  from camel.types import (
26
25
  ChatCompletion,
27
26
  ChatCompletionChunk,
@@ -29,13 +28,12 @@ from camel.types import (
29
28
  )
30
29
  from camel.utils import (
31
30
  BaseTokenCounter,
32
- OpenAITokenCounter,
33
31
  api_keys_required,
34
32
  )
35
33
 
36
34
 
37
- class MoonshotModel(BaseModelBackend):
38
- r"""Moonshot API in a unified BaseModelBackend interface.
35
+ class MoonshotModel(OpenAICompatibleModel):
36
+ r"""Moonshot API in a unified OpenAICompatibleModel interface.
39
37
 
40
38
  Args:
41
39
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -77,55 +75,13 @@ class MoonshotModel(BaseModelBackend):
77
75
  )
78
76
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
79
77
  super().__init__(
80
- model_type, model_config_dict, api_key, url, token_counter, timeout
78
+ model_type=model_type,
79
+ model_config_dict=model_config_dict,
80
+ api_key=api_key,
81
+ url=url,
82
+ token_counter=token_counter,
83
+ timeout=timeout,
81
84
  )
82
- self._client = OpenAI(
83
- api_key=self._api_key,
84
- timeout=self._timeout,
85
- max_retries=3,
86
- base_url=self._url,
87
- )
88
-
89
- def _prepare_request(
90
- self,
91
- messages: List[OpenAIMessage],
92
- response_format: Optional[Type[BaseModel]] = None,
93
- tools: Optional[List[Dict[str, Any]]] = None,
94
- ) -> Dict[str, Any]:
95
- request_config = self.model_config_dict.copy()
96
- if tools:
97
- request_config["tools"] = tools
98
- elif response_format:
99
- try_modify_message_with_format(messages[-1], response_format)
100
- return request_config
101
-
102
- def _run(
103
- self,
104
- messages: List[OpenAIMessage],
105
- response_format: Optional[Type[BaseModel]] = None,
106
- tools: Optional[List[Dict[str, Any]]] = None,
107
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
108
- r"""Runs inference of Moonshot chat completion.
109
-
110
- Args:
111
- messages (List[OpenAIMessage]): Message list with the chat history
112
- in OpenAI API format.
113
-
114
- Returns:
115
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
116
- `ChatCompletion` in the non-stream mode, or
117
- `Stream[ChatCompletionChunk]` in the stream mode.
118
- """
119
- request_config = self._prepare_request(
120
- messages, response_format, tools
121
- )
122
-
123
- response = self._client.chat.completions.create(
124
- messages=messages,
125
- model=self.model_type,
126
- **request_config,
127
- )
128
- return response
129
85
 
130
86
  async def _arun(
131
87
  self,
@@ -135,18 +91,6 @@ class MoonshotModel(BaseModelBackend):
135
91
  ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
136
92
  raise NotImplementedError("Moonshot does not support async inference.")
137
93
 
138
- @property
139
- def token_counter(self) -> BaseTokenCounter:
140
- r"""Initialize the token counter for the model backend.
141
-
142
- Returns:
143
- OpenAITokenCounter: The token counter following the model's
144
- tokenization style.
145
- """
146
- if not self._token_counter:
147
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
148
- return self._token_counter
149
-
150
94
  def check_model_config(self):
151
95
  r"""Check whether the model configuration contains any
152
96
  unexpected arguments to Moonshot API.
@@ -161,13 +105,3 @@ class MoonshotModel(BaseModelBackend):
161
105
  f"Unexpected argument `{param}` is "
162
106
  "input into Moonshot model backend."
163
107
  )
164
-
165
- @property
166
- def stream(self) -> bool:
167
- r"""Returns whether the model is in stream mode, which sends partial
168
- results each time.
169
-
170
- Returns:
171
- bool: Whether the model is in stream mode.
172
- """
173
- return self.model_config_dict.get('stream', False)
@@ -12,21 +12,17 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Type, Union
15
+ from typing import Optional, Union
16
16
 
17
- from openai import AsyncOpenAI, OpenAI
18
- from pydantic import BaseModel
19
-
20
- from camel.messages import OpenAIMessage
21
- from camel.models import BaseModelBackend
22
- from camel.types import ChatCompletion, ModelType
17
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
18
+ from camel.types import ModelType
23
19
  from camel.utils import (
24
20
  BaseTokenCounter,
25
21
  api_keys_required,
26
22
  )
27
23
 
28
24
 
29
- class NemotronModel(BaseModelBackend):
25
+ class NemotronModel(OpenAICompatibleModel):
30
26
  r"""Nemotron model API backend with OpenAI compatibility.
31
27
 
32
28
  Args:
@@ -63,58 +59,6 @@ class NemotronModel(BaseModelBackend):
63
59
  api_key = api_key or os.environ.get("NVIDIA_API_KEY")
64
60
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
65
61
  super().__init__(model_type, {}, api_key, url, None, timeout)
66
- self._client = OpenAI(
67
- timeout=self._timeout,
68
- max_retries=3,
69
- base_url=self._url,
70
- api_key=self._api_key,
71
- )
72
- self._async_client = AsyncOpenAI(
73
- timeout=self._timeout,
74
- max_retries=3,
75
- base_url=self._url,
76
- api_key=self._api_key,
77
- )
78
-
79
- async def _arun(
80
- self,
81
- messages: List[OpenAIMessage],
82
- response_format: Optional[Type[BaseModel]] = None,
83
- tools: Optional[List[Dict[str, Any]]] = None,
84
- ) -> ChatCompletion:
85
- r"""Runs inference of OpenAI chat completion asynchronously.
86
-
87
- Args:
88
- messages (List[OpenAIMessage]): Message list.
89
-
90
- Returns:
91
- ChatCompletion.
92
- """
93
- response = await self._async_client.chat.completions.create(
94
- messages=messages,
95
- model=self.model_type,
96
- )
97
- return response
98
-
99
- def _run(
100
- self,
101
- messages: List[OpenAIMessage],
102
- response_format: Optional[Type[BaseModel]] = None,
103
- tools: Optional[List[Dict[str, Any]]] = None,
104
- ) -> ChatCompletion:
105
- r"""Runs inference of OpenAI chat completion.
106
-
107
- Args:
108
- messages (List[OpenAIMessage]): Message list.
109
-
110
- Returns:
111
- ChatCompletion.
112
- """
113
- response = self._client.chat.completions.create(
114
- messages=messages,
115
- model=self.model_type,
116
- )
117
- return response
118
62
 
119
63
  @property
120
64
  def token_counter(self) -> BaseTokenCounter:
@@ -13,24 +13,16 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from openai.types.chat import (
20
- ChatCompletion,
21
- ChatCompletionChunk,
22
- )
23
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
24
17
 
25
18
  from camel.configs import NVIDIA_API_PARAMS, NvidiaConfig
26
- from camel.messages import OpenAIMessage
27
- from camel.models import BaseModelBackend
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
28
20
  from camel.types import ModelType
29
- from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
21
+ from camel.utils import BaseTokenCounter, api_keys_required
30
22
 
31
23
 
32
- class NvidiaModel(BaseModelBackend):
33
- r"""NVIDIA API in a unified BaseModelBackend interface.
24
+ class NvidiaModel(OpenAICompatibleModel):
25
+ r"""NVIDIA API in a unified OpenAICompatibleModel interface.
34
26
 
35
27
  Args:
36
28
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -75,95 +67,13 @@ class NvidiaModel(BaseModelBackend):
75
67
  )
76
68
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
69
  super().__init__(
78
- model_type, model_config_dict, api_key, url, token_counter, timeout
79
- )
80
- self._client = OpenAI(
81
- timeout=self._timeout,
82
- max_retries=3,
83
- api_key=self._api_key,
84
- base_url=self._url,
85
- )
86
- self._async_client = AsyncOpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key=self._api_key,
90
- base_url=self._url,
91
- )
92
-
93
- async def _arun(
94
- self,
95
- messages: List[OpenAIMessage],
96
- response_format: Optional[Type[BaseModel]] = None,
97
- tools: Optional[List[Dict[str, Any]]] = None,
98
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
99
- r"""Runs inference of NVIDIA chat completion.
100
-
101
- Args:
102
- messages (List[OpenAIMessage]): Message list with the chat history
103
- in OpenAI API format.
104
-
105
- Returns:
106
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
107
- `ChatCompletion` in the non-stream mode, or
108
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
109
- """
110
-
111
- # Remove tool-related parameters if no tools are specified
112
- config = dict(self.model_config_dict)
113
- if not config.get("tools"): # None or empty list
114
- config.pop("tools", None)
115
- config.pop("tool_choice", None)
116
-
117
- response = await self._async_client.chat.completions.create(
118
- messages=messages,
119
- model=self.model_type,
120
- **config,
121
- )
122
- return response
123
-
124
- def _run(
125
- self,
126
- messages: List[OpenAIMessage],
127
- response_format: Optional[Type[BaseModel]] = None,
128
- tools: Optional[List[Dict[str, Any]]] = None,
129
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
130
- r"""Runs inference of NVIDIA chat completion.
131
-
132
- Args:
133
- messages (List[OpenAIMessage]): Message list with the chat history
134
- in OpenAI API format.
135
-
136
- Returns:
137
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
- `ChatCompletion` in the non-stream mode, or
139
- `Stream[ChatCompletionChunk]` in the stream mode.
140
- """
141
-
142
- # Remove tool-related parameters if no tools are specified
143
- config = dict(self.model_config_dict)
144
- if not config.get('tools'): # None or empty list
145
- config.pop('tools', None)
146
- config.pop('tool_choice', None)
147
-
148
- response = self._client.chat.completions.create(
149
- messages=messages,
150
- model=self.model_type,
151
- **config,
70
+ model_type=model_type,
71
+ model_config_dict=model_config_dict,
72
+ api_key=api_key,
73
+ url=url,
74
+ token_counter=token_counter,
75
+ timeout=timeout,
152
76
  )
153
- return response
154
-
155
- @property
156
- def token_counter(self) -> BaseTokenCounter:
157
- r"""Initialize the token counter for the model backend.
158
-
159
- Returns:
160
- OpenAITokenCounter: The token counter following the model's
161
- tokenization style.
162
- """
163
-
164
- if not self._token_counter:
165
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
166
- return self._token_counter
167
77
 
168
78
  def check_model_config(self):
169
79
  r"""Check whether the model configuration contains any
@@ -179,13 +89,3 @@ class NvidiaModel(BaseModelBackend):
179
89
  f"Unexpected argument `{param}` is "
180
90
  "input into NVIDIA model backend."
181
91
  )
182
-
183
- @property
184
- def stream(self) -> bool:
185
- r"""Returns whether the model is in stream mode, which sends partial
186
- results each time.
187
-
188
- Returns:
189
- bool: Whether the model is in stream mode.
190
- """
191
- return self.model_config_dict.get('stream', False)