camel-ai 0.2.45__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (42) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/deepseek_model.py +11 -44
  11. camel/models/gemini_model.py +10 -72
  12. camel/models/groq_model.py +11 -131
  13. camel/models/internlm_model.py +11 -61
  14. camel/models/lmstudio_model.py +82 -0
  15. camel/models/model_factory.py +7 -1
  16. camel/models/modelscope_model.py +11 -122
  17. camel/models/moonshot_model.py +10 -76
  18. camel/models/nemotron_model.py +4 -60
  19. camel/models/nvidia_model.py +11 -111
  20. camel/models/ollama_model.py +12 -205
  21. camel/models/openai_compatible_model.py +51 -12
  22. camel/models/openrouter_model.py +12 -131
  23. camel/models/ppio_model.py +10 -99
  24. camel/models/qwen_model.py +11 -122
  25. camel/models/reka_model.py +1 -1
  26. camel/models/sglang_model.py +5 -3
  27. camel/models/siliconflow_model.py +10 -58
  28. camel/models/togetherai_model.py +10 -177
  29. camel/models/vllm_model.py +11 -218
  30. camel/models/volcano_model.py +1 -15
  31. camel/models/yi_model.py +11 -98
  32. camel/models/zhipuai_model.py +11 -102
  33. camel/toolkits/__init__.py +2 -0
  34. camel/toolkits/pyautogui_toolkit.py +428 -0
  35. camel/toolkits/video_analysis_toolkit.py +215 -80
  36. camel/toolkits/video_download_toolkit.py +10 -3
  37. camel/types/enums.py +64 -0
  38. camel/types/unified_model_type.py +10 -0
  39. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/METADATA +2 -1
  40. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/RECORD +42 -37
  41. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  42. {camel_ai-0.2.45.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -15,20 +15,20 @@
15
15
  import os
16
16
  from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from openai import AsyncStream, Stream
19
19
  from pydantic import BaseModel
20
20
 
21
21
  from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
22
22
  from camel.logger import get_logger
23
23
  from camel.messages import OpenAIMessage
24
24
  from camel.models._utils import try_modify_message_with_format
25
- from camel.models.base_model import BaseModelBackend
25
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
26
26
  from camel.types import (
27
27
  ChatCompletion,
28
28
  ChatCompletionChunk,
29
29
  ModelType,
30
30
  )
31
- from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
31
+ from camel.utils import BaseTokenCounter, api_keys_required
32
32
 
33
33
  logger = get_logger(__name__)
34
34
 
@@ -43,8 +43,8 @@ REASONSER_UNSUPPORTED_PARAMS = [
43
43
  ]
44
44
 
45
45
 
46
- class DeepSeekModel(BaseModelBackend):
47
- r"""DeepSeek API in a unified BaseModelBackend interface.
46
+ class DeepSeekModel(OpenAICompatibleModel):
47
+ r"""DeepSeek API in a unified OpenAICompatibleModel interface.
48
48
 
49
49
  Args:
50
50
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -92,37 +92,14 @@ class DeepSeekModel(BaseModelBackend):
92
92
  )
93
93
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
94
94
  super().__init__(
95
- model_type, model_config_dict, api_key, url, token_counter, timeout
95
+ model_type=model_type,
96
+ model_config_dict=model_config_dict,
97
+ api_key=api_key,
98
+ url=url,
99
+ token_counter=token_counter,
100
+ timeout=timeout,
96
101
  )
97
102
 
98
- self._client = OpenAI(
99
- timeout=self._timeout,
100
- max_retries=3,
101
- api_key=self._api_key,
102
- base_url=self._url,
103
- )
104
-
105
- self._async_client = AsyncOpenAI(
106
- timeout=self._timeout,
107
- max_retries=3,
108
- api_key=self._api_key,
109
- base_url=self._url,
110
- )
111
-
112
- @property
113
- def token_counter(self) -> BaseTokenCounter:
114
- r"""Initialize the token counter for the model backend.
115
-
116
- Returns:
117
- BaseTokenCounter: The token counter following the model's
118
- tokenization style.
119
- """
120
- if not self._token_counter:
121
- self._token_counter = OpenAITokenCounter(
122
- model=ModelType.GPT_4O_MINI
123
- )
124
- return self._token_counter
125
-
126
103
  def _prepare_request(
127
104
  self,
128
105
  messages: List[OpenAIMessage],
@@ -270,13 +247,3 @@ class DeepSeekModel(BaseModelBackend):
270
247
  f"Unexpected argument `{param}` is "
271
248
  "input into DeepSeek model backend."
272
249
  )
273
-
274
- @property
275
- def stream(self) -> bool:
276
- r"""Returns whether the model is in stream mode, which sends partial
277
- results each time.
278
-
279
- Returns:
280
- bool: Whether the model is in stream mode.
281
- """
282
- return self.model_config_dict.get("stream", False)
@@ -14,12 +14,12 @@
14
14
  import os
15
15
  from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
17
+ from openai import AsyncStream, Stream
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from camel.configs import Gemini_API_PARAMS, GeminiConfig
21
21
  from camel.messages import OpenAIMessage
22
- from camel.models import BaseModelBackend
22
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
23
23
  from camel.types import (
24
24
  ChatCompletion,
25
25
  ChatCompletionChunk,
@@ -27,13 +27,12 @@ from camel.types import (
27
27
  )
28
28
  from camel.utils import (
29
29
  BaseTokenCounter,
30
- OpenAITokenCounter,
31
30
  api_keys_required,
32
31
  )
33
32
 
34
33
 
35
- class GeminiModel(BaseModelBackend):
36
- r"""Gemini API in a unified BaseModelBackend interface.
34
+ class GeminiModel(OpenAICompatibleModel):
35
+ r"""Gemini API in a unified OpenAICompatibleModel interface.
37
36
 
38
37
  Args:
39
38
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -80,19 +79,12 @@ class GeminiModel(BaseModelBackend):
80
79
  )
81
80
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
81
  super().__init__(
83
- model_type, model_config_dict, api_key, url, token_counter, timeout
84
- )
85
- self._client = OpenAI(
86
- timeout=self._timeout,
87
- max_retries=3,
88
- api_key=self._api_key,
89
- base_url=self._url,
90
- )
91
- self._async_client = AsyncOpenAI(
92
- timeout=self._timeout,
93
- max_retries=3,
94
- api_key=self._api_key,
95
- base_url=self._url,
82
+ model_type=model_type,
83
+ model_config_dict=model_config_dict,
84
+ api_key=api_key,
85
+ url=url,
86
+ token_counter=token_counter,
87
+ timeout=timeout,
96
88
  )
97
89
 
98
90
  def _process_messages(self, messages) -> List[OpenAIMessage]:
@@ -247,50 +239,6 @@ class GeminiModel(BaseModelBackend):
247
239
  **request_config,
248
240
  )
249
241
 
250
- def _request_parse(
251
- self,
252
- messages: List[OpenAIMessage],
253
- response_format: Type[BaseModel],
254
- ) -> ChatCompletion:
255
- request_config = self.model_config_dict.copy()
256
-
257
- request_config["response_format"] = response_format
258
- request_config.pop("stream", None)
259
-
260
- return self._client.beta.chat.completions.parse(
261
- messages=messages,
262
- model=self.model_type,
263
- **request_config,
264
- )
265
-
266
- async def _arequest_parse(
267
- self,
268
- messages: List[OpenAIMessage],
269
- response_format: Type[BaseModel],
270
- ) -> ChatCompletion:
271
- request_config = self.model_config_dict.copy()
272
-
273
- request_config["response_format"] = response_format
274
- request_config.pop("stream", None)
275
-
276
- return await self._async_client.beta.chat.completions.parse(
277
- messages=messages,
278
- model=self.model_type,
279
- **request_config,
280
- )
281
-
282
- @property
283
- def token_counter(self) -> BaseTokenCounter:
284
- r"""Initialize the token counter for the model backend.
285
-
286
- Returns:
287
- BaseTokenCounter: The token counter following the model's
288
- tokenization style.
289
- """
290
- if not self._token_counter:
291
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
292
- return self._token_counter
293
-
294
242
  def check_model_config(self):
295
243
  r"""Check whether the model configuration contains any
296
244
  unexpected arguments to Gemini API.
@@ -305,13 +253,3 @@ class GeminiModel(BaseModelBackend):
305
253
  f"Unexpected argument `{param}` is "
306
254
  "input into Gemini model backend."
307
255
  )
308
-
309
- @property
310
- def stream(self) -> bool:
311
- r"""Returns whether the model is in stream mode, which sends partial
312
- results each time.
313
-
314
- Returns:
315
- bool: Whether the model is in stream mode.
316
- """
317
- return self.model_config_dict.get('stream', False)
@@ -12,29 +12,19 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Type, Union
16
-
17
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
- from pydantic import BaseModel
15
+ from typing import Any, Dict, Optional, Union
19
16
 
20
17
  from camel.configs import GROQ_API_PARAMS, GroqConfig
21
- from camel.messages import OpenAIMessage
22
- from camel.models import BaseModelBackend
23
- from camel.models._utils import try_modify_message_with_format
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
29
20
  from camel.utils import (
30
21
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
22
  api_keys_required,
33
23
  )
34
24
 
35
25
 
36
- class GroqModel(BaseModelBackend):
37
- r"""LLM API served by Groq in a unified BaseModelBackend interface.
26
+ class GroqModel(OpenAICompatibleModel):
27
+ r"""LLM API served by Groq in a unified OpenAICompatibleModel interface.
38
28
 
39
29
  Args:
40
30
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -75,114 +65,14 @@ class GroqModel(BaseModelBackend):
75
65
  )
76
66
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
67
  super().__init__(
78
- model_type, model_config_dict, api_key, url, token_counter, timeout
79
- )
80
- self._client = OpenAI(
81
- timeout=self._timeout,
82
- max_retries=3,
83
- api_key=self._api_key,
84
- base_url=self._url,
85
- )
86
- self._async_client = AsyncOpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key=self._api_key,
90
- base_url=self._url,
91
- )
92
-
93
- @property
94
- def token_counter(self) -> BaseTokenCounter:
95
- r"""Initialize the token counter for the model backend.
96
-
97
- Returns:
98
- BaseTokenCounter: The token counter following the model's
99
- tokenization style.
100
- """
101
- if not self._token_counter:
102
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
103
- return self._token_counter
104
-
105
- def _prepare_request(
106
- self,
107
- messages: List[OpenAIMessage],
108
- response_format: Optional[Type[BaseModel]] = None,
109
- tools: Optional[List[Dict[str, Any]]] = None,
110
- ) -> Dict[str, Any]:
111
- request_config = self.model_config_dict.copy()
112
- if tools:
113
- request_config["tools"] = tools
114
- elif response_format:
115
- try_modify_message_with_format(messages[-1], response_format)
116
- request_config["response_format"] = {"type": "json_object"}
117
-
118
- return request_config
119
-
120
- def _run(
121
- self,
122
- messages: List[OpenAIMessage],
123
- response_format: Optional[type[BaseModel]] = None,
124
- tools: Optional[List[Dict[str, Any]]] = None,
125
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
126
- r"""Runs inference of Groq chat completion.
127
-
128
- Args:
129
- messages (List[OpenAIMessage]): Message list with the chat history
130
- in OpenAI API format.
131
- response_format (Optional[Type[BaseModel]]): The format of the
132
- response.
133
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
134
- use for the request.
135
-
136
- Returns:
137
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
- `ChatCompletion` in the non-stream mode, or
139
- `Stream[ChatCompletionChunk]` in the stream mode.
140
- """
141
- request_config = self._prepare_request(
142
- messages, response_format, tools
68
+ model_type=model_type,
69
+ model_config_dict=model_config_dict,
70
+ api_key=api_key,
71
+ url=url,
72
+ token_counter=token_counter,
73
+ timeout=timeout,
143
74
  )
144
75
 
145
- response = self._client.chat.completions.create(
146
- messages=messages,
147
- model=self.model_type,
148
- **request_config,
149
- )
150
-
151
- return response
152
-
153
- async def _arun(
154
- self,
155
- messages: List[OpenAIMessage],
156
- response_format: Optional[type[BaseModel]] = None,
157
- tools: Optional[List[Dict[str, Any]]] = None,
158
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
159
- r"""Runs inference of Groq chat completion asynchronously.
160
-
161
- Args:
162
- messages (List[OpenAIMessage]): Message list with the chat history
163
- in OpenAI API format.
164
- response_format (Optional[Type[BaseModel]]): The format of the
165
- response.
166
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
167
- use for the request.
168
-
169
- Returns:
170
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
171
- `ChatCompletion` in the non-stream mode, or
172
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
173
- """
174
- request_config = self._prepare_request(
175
- messages, response_format, tools
176
- )
177
-
178
- response = await self._async_client.chat.completions.create(
179
- messages=messages,
180
- model=self.model_type,
181
- **request_config,
182
- )
183
-
184
- return response
185
-
186
76
  def check_model_config(self):
187
77
  r"""Check whether the model configuration contains any unexpected
188
78
  arguments to Groq API. But Groq API does not have any additional
@@ -198,13 +88,3 @@ class GroqModel(BaseModelBackend):
198
88
  f"Unexpected argument `{param}` is "
199
89
  "input into Groq model backend."
200
90
  )
201
-
202
- @property
203
- def stream(self) -> bool:
204
- r"""Returns whether the model is in stream mode, which sends partial
205
- results each time.
206
-
207
- Returns:
208
- bool: Whether the model is in stream mode.
209
- """
210
- return self.model_config_dict.get("stream", False)
@@ -15,12 +15,12 @@
15
15
  import os
16
16
  from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import AsyncStream, OpenAI, Stream
18
+ from openai import AsyncStream
19
19
  from pydantic import BaseModel
20
20
 
21
21
  from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
22
22
  from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
23
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
24
24
  from camel.types import (
25
25
  ChatCompletion,
26
26
  ChatCompletionChunk,
@@ -28,13 +28,12 @@ from camel.types import (
28
28
  )
29
29
  from camel.utils import (
30
30
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
31
  api_keys_required,
33
32
  )
34
33
 
35
34
 
36
- class InternLMModel(BaseModelBackend):
37
- r"""InternLM API in a unified BaseModelBackend interface.
35
+ class InternLMModel(OpenAICompatibleModel):
36
+ r"""InternLM API in a unified OpenAICompatibleModel interface.
38
37
 
39
38
  Args:
40
39
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -71,8 +70,7 @@ class InternLMModel(BaseModelBackend):
71
70
  token_counter: Optional[BaseTokenCounter] = None,
72
71
  timeout: Optional[float] = None,
73
72
  ) -> None:
74
- if model_config_dict is None:
75
- model_config_dict = InternLMConfig().as_dict()
73
+ self.model_config = model_config_dict or InternLMConfig().as_dict()
76
74
  api_key = api_key or os.environ.get("INTERNLM_API_KEY")
77
75
  url = url or os.environ.get(
78
76
  "INTERNLM_API_BASE_URL",
@@ -80,39 +78,14 @@ class InternLMModel(BaseModelBackend):
80
78
  )
81
79
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
80
  super().__init__(
83
- model_type, model_config_dict, api_key, url, token_counter, timeout
84
- )
85
- self._client = OpenAI(
86
- timeout=self._timeout,
87
- max_retries=3,
88
- api_key=self._api_key,
89
- base_url=self._url,
81
+ model_type=model_type,
82
+ model_config_dict=self.model_config,
83
+ api_key=api_key,
84
+ url=url,
85
+ token_counter=token_counter,
86
+ timeout=timeout,
90
87
  )
91
88
 
92
- def _run(
93
- self,
94
- messages: List[OpenAIMessage],
95
- response_format: Optional[Type[BaseModel]] = None,
96
- tools: Optional[List[Dict[str, Any]]] = None,
97
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
98
- r"""Runs inference of InternLM chat completion.
99
-
100
- Args:
101
- messages (List[OpenAIMessage]): Message list with the chat history
102
- in OpenAI API format.
103
-
104
- Returns:
105
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
106
- `ChatCompletion` in the non-stream mode, or
107
- `Stream[ChatCompletionChunk]` in the stream mode.
108
- """
109
- response = self._client.chat.completions.create(
110
- messages=messages,
111
- model=self.model_type,
112
- **self.model_config_dict,
113
- )
114
- return response
115
-
116
89
  async def _arun(
117
90
  self,
118
91
  messages: List[OpenAIMessage],
@@ -121,19 +94,6 @@ class InternLMModel(BaseModelBackend):
121
94
  ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
122
95
  raise NotImplementedError("InternLM does not support async inference.")
123
96
 
124
- @property
125
- def token_counter(self) -> BaseTokenCounter:
126
- r"""Initialize the token counter for the model backend.
127
-
128
- Returns:
129
- OpenAITokenCounter: The token counter following the model's
130
- tokenization style.
131
- """
132
-
133
- if not self._token_counter:
134
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
135
- return self._token_counter
136
-
137
97
  def check_model_config(self):
138
98
  r"""Check whether the model configuration contains any
139
99
  unexpected arguments to InternLM API.
@@ -148,13 +108,3 @@ class InternLMModel(BaseModelBackend):
148
108
  f"Unexpected argument `{param}` is "
149
109
  "input into InternLM model backend."
150
110
  )
151
-
152
- @property
153
- def stream(self) -> bool:
154
- r"""Returns whether the model is in stream mode, which sends partial
155
- results each time.
156
-
157
- Returns:
158
- bool: Whether the model is in stream mode.
159
- """
160
- return self.model_config_dict.get('stream', False)
@@ -0,0 +1,82 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, Optional, Union
16
+
17
+ from camel.configs import LMSTUDIO_API_PARAMS, LMStudioConfig
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
20
+ from camel.utils import BaseTokenCounter
21
+
22
+
23
+ class LMStudioModel(OpenAICompatibleModel):
24
+ r"""LLM served by LMStudio in a unified OpenAICompatibleModel interface.
25
+
26
+ Args:
27
+ model_type (Union[ModelType, str]): Model for which a backend is
28
+ created.
29
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
30
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
31
+ If:obj:`None`, :obj:`LMStudioConfig().as_dict()` will be used.
32
+ (default: :obj:`None`)
33
+ api_key (Optional[str], optional): The API key for authenticating with
34
+ the model service. LMStudio doesn't need API key, it would be
35
+ ignored if set. (default: :obj:`None`)
36
+ url (Optional[str], optional): The url to the LMStudio service.
37
+ (default: :obj:`None`)
38
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
39
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
40
+ ModelType.GPT_4O_MINI)` will be used.
41
+ (default: :obj:`None`)
42
+ timeout (Optional[float], optional): The timeout value in seconds for
43
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
44
+ environment variable or default to 180 seconds.
45
+ (default: :obj:`None`)
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ model_type: Union[ModelType, str],
51
+ model_config_dict: Optional[Dict[str, Any]] = None,
52
+ api_key: Optional[str] = None,
53
+ url: Optional[str] = None,
54
+ token_counter: Optional[BaseTokenCounter] = None,
55
+ timeout: Optional[float] = None,
56
+ ) -> None:
57
+ if model_config_dict is None:
58
+ model_config_dict = LMStudioConfig().as_dict()
59
+ api_key = "NA"
60
+ url = url or os.environ.get(
61
+ "LMSTUDIO_API_BASE_URL", "http://localhost:1234/v1"
62
+ )
63
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
64
+ super().__init__(
65
+ model_type, model_config_dict, api_key, url, token_counter, timeout
66
+ )
67
+
68
+ def check_model_config(self):
69
+ r"""Check whether the model configuration contains any unexpected
70
+ arguments to LMStudio API. But LMStudio API does not have any
71
+ additional arguments to check.
72
+
73
+ Raises:
74
+ ValueError: If the model configuration dictionary contains any
75
+ unexpected arguments to LMStudio API.
76
+ """
77
+ for param in self.model_config_dict:
78
+ if param not in LMSTUDIO_API_PARAMS:
79
+ raise ValueError(
80
+ f"Unexpected argument `{param}` is "
81
+ "input into LMStudio model backend."
82
+ )
@@ -18,6 +18,7 @@ import yaml
18
18
 
19
19
  from camel.models.aiml_model import AIMLModel
20
20
  from camel.models.anthropic_model import AnthropicModel
21
+ from camel.models.aws_bedrock_model import AWSBedrockModel
21
22
  from camel.models.azure_openai_model import AzureOpenAIModel
22
23
  from camel.models.base_model import BaseModelBackend
23
24
  from camel.models.cohere_model import CohereModel
@@ -26,6 +27,7 @@ from camel.models.gemini_model import GeminiModel
26
27
  from camel.models.groq_model import GroqModel
27
28
  from camel.models.internlm_model import InternLMModel
28
29
  from camel.models.litellm_model import LiteLLMModel
30
+ from camel.models.lmstudio_model import LMStudioModel
29
31
  from camel.models.mistral_model import MistralModel
30
32
  from camel.models.modelscope_model import ModelScopeModel
31
33
  from camel.models.moonshot_model import MoonshotModel
@@ -65,7 +67,7 @@ class ModelFactory:
65
67
  token_counter: Optional[BaseTokenCounter] = None,
66
68
  api_key: Optional[str] = None,
67
69
  url: Optional[str] = None,
68
- timeout: Optional[int] = None,
70
+ timeout: Optional[float] = None,
69
71
  ) -> BaseModelBackend:
70
72
  r"""Creates an instance of `BaseModelBackend` of the specified type.
71
73
 
@@ -111,6 +113,8 @@ class ModelFactory:
111
113
  model_class = TogetherAIModel
112
114
  elif model_platform.is_litellm:
113
115
  model_class = LiteLLMModel
116
+ elif model_platform.is_aws_bedrock:
117
+ model_class = AWSBedrockModel
114
118
  elif model_platform.is_nvidia:
115
119
  model_class = NvidiaModel
116
120
  elif model_platform.is_siliconflow:
@@ -128,6 +132,8 @@ class ModelFactory:
128
132
  model_class = AnthropicModel
129
133
  elif model_platform.is_groq and model_type.is_groq:
130
134
  model_class = GroqModel
135
+ elif model_platform.is_lmstudio and model_type.is_lmstudio:
136
+ model_class = LMStudioModel
131
137
  elif model_platform.is_openrouter and model_type.is_openrouter:
132
138
  model_class = OpenRouterModel
133
139
  elif model_platform.is_zhipuai and model_type.is_zhipuai: