camel-ai 0.2.44__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (60) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/cohere_model.py +32 -4
  11. camel/models/deepseek_model.py +11 -44
  12. camel/models/gemini_model.py +10 -72
  13. camel/models/groq_model.py +11 -131
  14. camel/models/internlm_model.py +11 -61
  15. camel/models/litellm_model.py +11 -4
  16. camel/models/lmstudio_model.py +82 -0
  17. camel/models/mistral_model.py +14 -2
  18. camel/models/model_factory.py +7 -1
  19. camel/models/modelscope_model.py +11 -122
  20. camel/models/moonshot_model.py +10 -76
  21. camel/models/nemotron_model.py +4 -60
  22. camel/models/nvidia_model.py +11 -111
  23. camel/models/ollama_model.py +12 -205
  24. camel/models/openai_compatible_model.py +51 -12
  25. camel/models/openrouter_model.py +12 -131
  26. camel/models/ppio_model.py +10 -99
  27. camel/models/qwen_model.py +11 -122
  28. camel/models/reka_model.py +12 -4
  29. camel/models/sglang_model.py +5 -3
  30. camel/models/siliconflow_model.py +10 -58
  31. camel/models/togetherai_model.py +10 -177
  32. camel/models/vllm_model.py +11 -218
  33. camel/models/volcano_model.py +8 -17
  34. camel/models/yi_model.py +11 -98
  35. camel/models/zhipuai_model.py +11 -102
  36. camel/runtime/__init__.py +2 -0
  37. camel/runtime/ubuntu_docker_runtime.py +340 -0
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/audio_analysis_toolkit.py +21 -17
  40. camel/toolkits/browser_toolkit.py +2 -1
  41. camel/toolkits/dalle_toolkit.py +15 -0
  42. camel/toolkits/excel_toolkit.py +14 -1
  43. camel/toolkits/image_analysis_toolkit.py +9 -1
  44. camel/toolkits/mcp_toolkit.py +2 -0
  45. camel/toolkits/networkx_toolkit.py +5 -0
  46. camel/toolkits/openai_agent_toolkit.py +5 -1
  47. camel/toolkits/pyautogui_toolkit.py +428 -0
  48. camel/toolkits/searxng_toolkit.py +7 -0
  49. camel/toolkits/slack_toolkit.py +15 -2
  50. camel/toolkits/video_analysis_toolkit.py +218 -78
  51. camel/toolkits/video_download_toolkit.py +10 -3
  52. camel/toolkits/weather_toolkit.py +14 -1
  53. camel/toolkits/zapier_toolkit.py +6 -2
  54. camel/types/enums.py +73 -0
  55. camel/types/unified_model_type.py +10 -0
  56. camel/verifiers/base.py +14 -0
  57. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/METADATA +6 -5
  58. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/RECORD +60 -54
  59. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  60. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,82 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
15
+ from typing import Any, Dict, Optional, Union
16
+
17
+ from camel.configs import LMSTUDIO_API_PARAMS, LMStudioConfig
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
20
+ from camel.utils import BaseTokenCounter
21
+
22
+
23
+ class LMStudioModel(OpenAICompatibleModel):
24
+ r"""LLM served by LMStudio in a unified OpenAICompatibleModel interface.
25
+
26
+ Args:
27
+ model_type (Union[ModelType, str]): Model for which a backend is
28
+ created.
29
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
30
+ that will be fed into:obj:`openai.ChatCompletion.create()`.
31
+ If:obj:`None`, :obj:`LMStudioConfig().as_dict()` will be used.
32
+ (default: :obj:`None`)
33
+ api_key (Optional[str], optional): The API key for authenticating with
34
+ the model service. LMStudio doesn't need API key, it would be
35
+ ignored if set. (default: :obj:`None`)
36
+ url (Optional[str], optional): The url to the LMStudio service.
37
+ (default: :obj:`None`)
38
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
39
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
40
+ ModelType.GPT_4O_MINI)` will be used.
41
+ (default: :obj:`None`)
42
+ timeout (Optional[float], optional): The timeout value in seconds for
43
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
44
+ environment variable or default to 180 seconds.
45
+ (default: :obj:`None`)
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ model_type: Union[ModelType, str],
51
+ model_config_dict: Optional[Dict[str, Any]] = None,
52
+ api_key: Optional[str] = None,
53
+ url: Optional[str] = None,
54
+ token_counter: Optional[BaseTokenCounter] = None,
55
+ timeout: Optional[float] = None,
56
+ ) -> None:
57
+ if model_config_dict is None:
58
+ model_config_dict = LMStudioConfig().as_dict()
59
+ api_key = "NA"
60
+ url = url or os.environ.get(
61
+ "LMSTUDIO_API_BASE_URL", "http://localhost:1234/v1"
62
+ )
63
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
64
+ super().__init__(
65
+ model_type, model_config_dict, api_key, url, token_counter, timeout
66
+ )
67
+
68
+ def check_model_config(self):
69
+ r"""Check whether the model configuration contains any unexpected
70
+ arguments to LMStudio API. But LMStudio API does not have any
71
+ additional arguments to check.
72
+
73
+ Raises:
74
+ ValueError: If the model configuration dictionary contains any
75
+ unexpected arguments to LMStudio API.
76
+ """
77
+ for param in self.model_config_dict:
78
+ if param not in LMSTUDIO_API_PARAMS:
79
+ raise ValueError(
80
+ f"Unexpected argument `{param}` is "
81
+ "input into LMStudio model backend."
82
+ )
@@ -62,6 +62,10 @@ class MistralModel(BaseModelBackend):
62
62
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
63
63
  use for the model. If not provided, :obj:`OpenAITokenCounter` will
64
64
  be used. (default: :obj:`None`)
65
+ timeout (Optional[float], optional): The timeout value in seconds for
66
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
67
+ environment variable or default to 180 seconds.
68
+ (default: :obj:`None`)
65
69
  """
66
70
 
67
71
  @api_keys_required(
@@ -77,6 +81,7 @@ class MistralModel(BaseModelBackend):
77
81
  api_key: Optional[str] = None,
78
82
  url: Optional[str] = None,
79
83
  token_counter: Optional[BaseTokenCounter] = None,
84
+ timeout: Optional[float] = None,
80
85
  ) -> None:
81
86
  from mistralai import Mistral
82
87
 
@@ -85,10 +90,17 @@ class MistralModel(BaseModelBackend):
85
90
 
86
91
  api_key = api_key or os.environ.get("MISTRAL_API_KEY")
87
92
  url = url or os.environ.get("MISTRAL_API_BASE_URL")
93
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
88
94
  super().__init__(
89
- model_type, model_config_dict, api_key, url, token_counter
95
+ model_type, model_config_dict, api_key, url, token_counter, timeout
96
+ )
97
+ self._client = Mistral(
98
+ timeout_ms=int(self._timeout)
99
+ if self._timeout is not None
100
+ else None,
101
+ api_key=self._api_key,
102
+ server_url=self._url,
90
103
  )
91
- self._client = Mistral(api_key=self._api_key, server_url=self._url)
92
104
 
93
105
  def _to_openai_response(
94
106
  self, response: 'ChatCompletionResponse'
@@ -18,6 +18,7 @@ import yaml
18
18
 
19
19
  from camel.models.aiml_model import AIMLModel
20
20
  from camel.models.anthropic_model import AnthropicModel
21
+ from camel.models.aws_bedrock_model import AWSBedrockModel
21
22
  from camel.models.azure_openai_model import AzureOpenAIModel
22
23
  from camel.models.base_model import BaseModelBackend
23
24
  from camel.models.cohere_model import CohereModel
@@ -26,6 +27,7 @@ from camel.models.gemini_model import GeminiModel
26
27
  from camel.models.groq_model import GroqModel
27
28
  from camel.models.internlm_model import InternLMModel
28
29
  from camel.models.litellm_model import LiteLLMModel
30
+ from camel.models.lmstudio_model import LMStudioModel
29
31
  from camel.models.mistral_model import MistralModel
30
32
  from camel.models.modelscope_model import ModelScopeModel
31
33
  from camel.models.moonshot_model import MoonshotModel
@@ -65,7 +67,7 @@ class ModelFactory:
65
67
  token_counter: Optional[BaseTokenCounter] = None,
66
68
  api_key: Optional[str] = None,
67
69
  url: Optional[str] = None,
68
- timeout: Optional[int] = None,
70
+ timeout: Optional[float] = None,
69
71
  ) -> BaseModelBackend:
70
72
  r"""Creates an instance of `BaseModelBackend` of the specified type.
71
73
 
@@ -111,6 +113,8 @@ class ModelFactory:
111
113
  model_class = TogetherAIModel
112
114
  elif model_platform.is_litellm:
113
115
  model_class = LiteLLMModel
116
+ elif model_platform.is_aws_bedrock:
117
+ model_class = AWSBedrockModel
114
118
  elif model_platform.is_nvidia:
115
119
  model_class = NvidiaModel
116
120
  elif model_platform.is_siliconflow:
@@ -128,6 +132,8 @@ class ModelFactory:
128
132
  model_class = AnthropicModel
129
133
  elif model_platform.is_groq and model_type.is_groq:
130
134
  model_class = GroqModel
135
+ elif model_platform.is_lmstudio and model_type.is_lmstudio:
136
+ model_class = LMStudioModel
131
137
  elif model_platform.is_openrouter and model_type.is_openrouter:
132
138
  model_class = OpenRouterModel
133
139
  elif model_platform.is_zhipuai and model_type.is_zhipuai:
@@ -13,29 +13,19 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import MODELSCOPE_API_PARAMS, ModelScopeConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.models._utils import try_modify_message_with_format
25
- from camel.types import (
26
- ChatCompletion,
27
- ChatCompletionChunk,
28
- ModelType,
29
- )
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
30
21
  from camel.utils import (
31
22
  BaseTokenCounter,
32
- OpenAITokenCounter,
33
23
  api_keys_required,
34
24
  )
35
25
 
36
26
 
37
- class ModelScopeModel(BaseModelBackend):
38
- r"""ModelScope API in a unified BaseModelBackend interface.
27
+ class ModelScopeModel(OpenAICompatibleModel):
28
+ r"""ModelScope API in a unified OpenAICompatibleModel interface.
39
29
 
40
30
  Args:
41
31
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -83,104 +73,13 @@ class ModelScopeModel(BaseModelBackend):
83
73
  )
84
74
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
85
75
  super().__init__(
86
- model_type, model_config_dict, api_key, url, token_counter, timeout
87
- )
88
- self._client = OpenAI(
89
- timeout=self._timeout,
90
- max_retries=3,
91
- api_key=self._api_key,
92
- base_url=self._url,
93
- )
94
- self._async_client = AsyncOpenAI(
95
- timeout=self._timeout,
96
- max_retries=3,
97
- api_key=self._api_key,
98
- base_url=self._url,
99
- )
100
-
101
- async def _arun(
102
- self,
103
- messages: List[OpenAIMessage],
104
- response_format: Optional[Type[BaseModel]] = None,
105
- tools: Optional[List[Dict[str, Any]]] = None,
106
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
107
- r"""Runs inference of ModelScope chat completion.
108
-
109
- Args:
110
- messages (List[OpenAIMessage]): Message list with the chat history
111
- in OpenAI API format.
112
-
113
- Returns:
114
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
115
- `ChatCompletion` in the non-stream mode, or
116
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
117
- """
118
- request_config = self._prepare_request(
119
- messages, response_format, tools
120
- )
121
-
122
- response = await self._async_client.chat.completions.create(
123
- messages=messages,
124
- model=self.model_type,
125
- **request_config,
126
- )
127
- return response
128
-
129
- def _run(
130
- self,
131
- messages: List[OpenAIMessage],
132
- response_format: Optional[Type[BaseModel]] = None,
133
- tools: Optional[List[Dict[str, Any]]] = None,
134
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
135
- r"""Runs inference of ModelScope chat completion.
136
-
137
- Args:
138
- messages (List[OpenAIMessage]): Message list with the chat history
139
- in OpenAI API format.
140
-
141
- Returns:
142
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
143
- `ChatCompletion` in the non-stream mode, or
144
- `Stream[ChatCompletionChunk]` in the stream mode.
145
- """
146
- request_config = self._prepare_request(
147
- messages, response_format, tools
148
- )
149
-
150
- response = self._client.chat.completions.create(
151
- messages=messages,
152
- model=self.model_type,
153
- **request_config,
76
+ model_type=model_type,
77
+ model_config_dict=model_config_dict,
78
+ api_key=api_key,
79
+ url=url,
80
+ token_counter=token_counter,
81
+ timeout=timeout,
154
82
  )
155
- return response
156
-
157
- def _prepare_request(
158
- self,
159
- messages: List[OpenAIMessage],
160
- response_format: Optional[Type[BaseModel]] = None,
161
- tools: Optional[List[Dict[str, Any]]] = None,
162
- ) -> Dict[str, Any]:
163
- request_config = self.model_config_dict.copy()
164
- if tools:
165
- request_config["tools"] = tools
166
- elif response_format:
167
- try_modify_message_with_format(messages[-1], response_format)
168
- request_config["response_format"] = {"type": "json_object"}
169
-
170
- return request_config
171
-
172
- @property
173
- def token_counter(self) -> BaseTokenCounter:
174
- r"""Initialize the token counter for the model backend.
175
-
176
- Returns:
177
- OpenAITokenCounter: The token counter following the model's
178
- tokenization style.
179
- """
180
-
181
- if not self._token_counter:
182
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
183
- return self._token_counter
184
83
 
185
84
  def check_model_config(self):
186
85
  r"""Check whether the model configuration contains any
@@ -196,13 +95,3 @@ class ModelScopeModel(BaseModelBackend):
196
95
  f"Unexpected argument `{param}` is "
197
96
  "input into ModelScope model backend."
198
97
  )
199
-
200
- @property
201
- def stream(self) -> bool:
202
- r"""Returns whether the model is in stream mode, which sends partial
203
- results each time.
204
-
205
- Returns:
206
- bool: Whether the model is in stream mode.
207
- """
208
- return self.model_config_dict.get('stream', False)
@@ -15,13 +15,12 @@
15
15
  import os
16
16
  from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import AsyncStream, OpenAI, Stream
18
+ from openai import AsyncStream
19
19
  from pydantic import BaseModel
20
20
 
21
21
  from camel.configs import MOONSHOT_API_PARAMS, MoonshotConfig
22
22
  from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.models._utils import try_modify_message_with_format
23
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
25
24
  from camel.types import (
26
25
  ChatCompletion,
27
26
  ChatCompletionChunk,
@@ -29,13 +28,12 @@ from camel.types import (
29
28
  )
30
29
  from camel.utils import (
31
30
  BaseTokenCounter,
32
- OpenAITokenCounter,
33
31
  api_keys_required,
34
32
  )
35
33
 
36
34
 
37
- class MoonshotModel(BaseModelBackend):
38
- r"""Moonshot API in a unified BaseModelBackend interface.
35
+ class MoonshotModel(OpenAICompatibleModel):
36
+ r"""Moonshot API in a unified OpenAICompatibleModel interface.
39
37
 
40
38
  Args:
41
39
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -77,55 +75,13 @@ class MoonshotModel(BaseModelBackend):
77
75
  )
78
76
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
79
77
  super().__init__(
80
- model_type, model_config_dict, api_key, url, token_counter, timeout
78
+ model_type=model_type,
79
+ model_config_dict=model_config_dict,
80
+ api_key=api_key,
81
+ url=url,
82
+ token_counter=token_counter,
83
+ timeout=timeout,
81
84
  )
82
- self._client = OpenAI(
83
- api_key=self._api_key,
84
- timeout=self._timeout,
85
- max_retries=3,
86
- base_url=self._url,
87
- )
88
-
89
- def _prepare_request(
90
- self,
91
- messages: List[OpenAIMessage],
92
- response_format: Optional[Type[BaseModel]] = None,
93
- tools: Optional[List[Dict[str, Any]]] = None,
94
- ) -> Dict[str, Any]:
95
- request_config = self.model_config_dict.copy()
96
- if tools:
97
- request_config["tools"] = tools
98
- elif response_format:
99
- try_modify_message_with_format(messages[-1], response_format)
100
- return request_config
101
-
102
- def _run(
103
- self,
104
- messages: List[OpenAIMessage],
105
- response_format: Optional[Type[BaseModel]] = None,
106
- tools: Optional[List[Dict[str, Any]]] = None,
107
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
108
- r"""Runs inference of Moonshot chat completion.
109
-
110
- Args:
111
- messages (List[OpenAIMessage]): Message list with the chat history
112
- in OpenAI API format.
113
-
114
- Returns:
115
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
116
- `ChatCompletion` in the non-stream mode, or
117
- `Stream[ChatCompletionChunk]` in the stream mode.
118
- """
119
- request_config = self._prepare_request(
120
- messages, response_format, tools
121
- )
122
-
123
- response = self._client.chat.completions.create(
124
- messages=messages,
125
- model=self.model_type,
126
- **request_config,
127
- )
128
- return response
129
85
 
130
86
  async def _arun(
131
87
  self,
@@ -135,18 +91,6 @@ class MoonshotModel(BaseModelBackend):
135
91
  ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
136
92
  raise NotImplementedError("Moonshot does not support async inference.")
137
93
 
138
- @property
139
- def token_counter(self) -> BaseTokenCounter:
140
- r"""Initialize the token counter for the model backend.
141
-
142
- Returns:
143
- OpenAITokenCounter: The token counter following the model's
144
- tokenization style.
145
- """
146
- if not self._token_counter:
147
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
148
- return self._token_counter
149
-
150
94
  def check_model_config(self):
151
95
  r"""Check whether the model configuration contains any
152
96
  unexpected arguments to Moonshot API.
@@ -161,13 +105,3 @@ class MoonshotModel(BaseModelBackend):
161
105
  f"Unexpected argument `{param}` is "
162
106
  "input into Moonshot model backend."
163
107
  )
164
-
165
- @property
166
- def stream(self) -> bool:
167
- r"""Returns whether the model is in stream mode, which sends partial
168
- results each time.
169
-
170
- Returns:
171
- bool: Whether the model is in stream mode.
172
- """
173
- return self.model_config_dict.get('stream', False)
@@ -12,21 +12,17 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Type, Union
15
+ from typing import Optional, Union
16
16
 
17
- from openai import AsyncOpenAI, OpenAI
18
- from pydantic import BaseModel
19
-
20
- from camel.messages import OpenAIMessage
21
- from camel.models import BaseModelBackend
22
- from camel.types import ChatCompletion, ModelType
17
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
18
+ from camel.types import ModelType
23
19
  from camel.utils import (
24
20
  BaseTokenCounter,
25
21
  api_keys_required,
26
22
  )
27
23
 
28
24
 
29
- class NemotronModel(BaseModelBackend):
25
+ class NemotronModel(OpenAICompatibleModel):
30
26
  r"""Nemotron model API backend with OpenAI compatibility.
31
27
 
32
28
  Args:
@@ -63,58 +59,6 @@ class NemotronModel(BaseModelBackend):
63
59
  api_key = api_key or os.environ.get("NVIDIA_API_KEY")
64
60
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
65
61
  super().__init__(model_type, {}, api_key, url, None, timeout)
66
- self._client = OpenAI(
67
- timeout=self._timeout,
68
- max_retries=3,
69
- base_url=self._url,
70
- api_key=self._api_key,
71
- )
72
- self._async_client = AsyncOpenAI(
73
- timeout=self._timeout,
74
- max_retries=3,
75
- base_url=self._url,
76
- api_key=self._api_key,
77
- )
78
-
79
- async def _arun(
80
- self,
81
- messages: List[OpenAIMessage],
82
- response_format: Optional[Type[BaseModel]] = None,
83
- tools: Optional[List[Dict[str, Any]]] = None,
84
- ) -> ChatCompletion:
85
- r"""Runs inference of OpenAI chat completion asynchronously.
86
-
87
- Args:
88
- messages (List[OpenAIMessage]): Message list.
89
-
90
- Returns:
91
- ChatCompletion.
92
- """
93
- response = await self._async_client.chat.completions.create(
94
- messages=messages,
95
- model=self.model_type,
96
- )
97
- return response
98
-
99
- def _run(
100
- self,
101
- messages: List[OpenAIMessage],
102
- response_format: Optional[Type[BaseModel]] = None,
103
- tools: Optional[List[Dict[str, Any]]] = None,
104
- ) -> ChatCompletion:
105
- r"""Runs inference of OpenAI chat completion.
106
-
107
- Args:
108
- messages (List[OpenAIMessage]): Message list.
109
-
110
- Returns:
111
- ChatCompletion.
112
- """
113
- response = self._client.chat.completions.create(
114
- messages=messages,
115
- model=self.model_type,
116
- )
117
- return response
118
62
 
119
63
  @property
120
64
  def token_counter(self) -> BaseTokenCounter:
@@ -13,24 +13,16 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from openai.types.chat import (
20
- ChatCompletion,
21
- ChatCompletionChunk,
22
- )
23
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
24
17
 
25
18
  from camel.configs import NVIDIA_API_PARAMS, NvidiaConfig
26
- from camel.messages import OpenAIMessage
27
- from camel.models import BaseModelBackend
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
28
20
  from camel.types import ModelType
29
- from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
21
+ from camel.utils import BaseTokenCounter, api_keys_required
30
22
 
31
23
 
32
- class NvidiaModel(BaseModelBackend):
33
- r"""NVIDIA API in a unified BaseModelBackend interface.
24
+ class NvidiaModel(OpenAICompatibleModel):
25
+ r"""NVIDIA API in a unified OpenAICompatibleModel interface.
34
26
 
35
27
  Args:
36
28
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -75,95 +67,13 @@ class NvidiaModel(BaseModelBackend):
75
67
  )
76
68
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
69
  super().__init__(
78
- model_type, model_config_dict, api_key, url, token_counter, timeout
79
- )
80
- self._client = OpenAI(
81
- timeout=self._timeout,
82
- max_retries=3,
83
- api_key=self._api_key,
84
- base_url=self._url,
85
- )
86
- self._async_client = AsyncOpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key=self._api_key,
90
- base_url=self._url,
91
- )
92
-
93
- async def _arun(
94
- self,
95
- messages: List[OpenAIMessage],
96
- response_format: Optional[Type[BaseModel]] = None,
97
- tools: Optional[List[Dict[str, Any]]] = None,
98
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
99
- r"""Runs inference of NVIDIA chat completion.
100
-
101
- Args:
102
- messages (List[OpenAIMessage]): Message list with the chat history
103
- in OpenAI API format.
104
-
105
- Returns:
106
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
107
- `ChatCompletion` in the non-stream mode, or
108
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
109
- """
110
-
111
- # Remove tool-related parameters if no tools are specified
112
- config = dict(self.model_config_dict)
113
- if not config.get("tools"): # None or empty list
114
- config.pop("tools", None)
115
- config.pop("tool_choice", None)
116
-
117
- response = await self._async_client.chat.completions.create(
118
- messages=messages,
119
- model=self.model_type,
120
- **config,
121
- )
122
- return response
123
-
124
- def _run(
125
- self,
126
- messages: List[OpenAIMessage],
127
- response_format: Optional[Type[BaseModel]] = None,
128
- tools: Optional[List[Dict[str, Any]]] = None,
129
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
130
- r"""Runs inference of NVIDIA chat completion.
131
-
132
- Args:
133
- messages (List[OpenAIMessage]): Message list with the chat history
134
- in OpenAI API format.
135
-
136
- Returns:
137
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
- `ChatCompletion` in the non-stream mode, or
139
- `Stream[ChatCompletionChunk]` in the stream mode.
140
- """
141
-
142
- # Remove tool-related parameters if no tools are specified
143
- config = dict(self.model_config_dict)
144
- if not config.get('tools'): # None or empty list
145
- config.pop('tools', None)
146
- config.pop('tool_choice', None)
147
-
148
- response = self._client.chat.completions.create(
149
- messages=messages,
150
- model=self.model_type,
151
- **config,
70
+ model_type=model_type,
71
+ model_config_dict=model_config_dict,
72
+ api_key=api_key,
73
+ url=url,
74
+ token_counter=token_counter,
75
+ timeout=timeout,
152
76
  )
153
- return response
154
-
155
- @property
156
- def token_counter(self) -> BaseTokenCounter:
157
- r"""Initialize the token counter for the model backend.
158
-
159
- Returns:
160
- OpenAITokenCounter: The token counter following the model's
161
- tokenization style.
162
- """
163
-
164
- if not self._token_counter:
165
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
166
- return self._token_counter
167
77
 
168
78
  def check_model_config(self):
169
79
  r"""Check whether the model configuration contains any
@@ -179,13 +89,3 @@ class NvidiaModel(BaseModelBackend):
179
89
  f"Unexpected argument `{param}` is "
180
90
  "input into NVIDIA model backend."
181
91
  )
182
-
183
- @property
184
- def stream(self) -> bool:
185
- r"""Returns whether the model is in stream mode, which sends partial
186
- results each time.
187
-
188
- Returns:
189
- bool: Whether the model is in stream mode.
190
- """
191
- return self.model_config_dict.get('stream', False)