camel-ai 0.2.44__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (60) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/cohere_model.py +32 -4
  11. camel/models/deepseek_model.py +11 -44
  12. camel/models/gemini_model.py +10 -72
  13. camel/models/groq_model.py +11 -131
  14. camel/models/internlm_model.py +11 -61
  15. camel/models/litellm_model.py +11 -4
  16. camel/models/lmstudio_model.py +82 -0
  17. camel/models/mistral_model.py +14 -2
  18. camel/models/model_factory.py +7 -1
  19. camel/models/modelscope_model.py +11 -122
  20. camel/models/moonshot_model.py +10 -76
  21. camel/models/nemotron_model.py +4 -60
  22. camel/models/nvidia_model.py +11 -111
  23. camel/models/ollama_model.py +12 -205
  24. camel/models/openai_compatible_model.py +51 -12
  25. camel/models/openrouter_model.py +12 -131
  26. camel/models/ppio_model.py +10 -99
  27. camel/models/qwen_model.py +11 -122
  28. camel/models/reka_model.py +12 -4
  29. camel/models/sglang_model.py +5 -3
  30. camel/models/siliconflow_model.py +10 -58
  31. camel/models/togetherai_model.py +10 -177
  32. camel/models/vllm_model.py +11 -218
  33. camel/models/volcano_model.py +8 -17
  34. camel/models/yi_model.py +11 -98
  35. camel/models/zhipuai_model.py +11 -102
  36. camel/runtime/__init__.py +2 -0
  37. camel/runtime/ubuntu_docker_runtime.py +340 -0
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/audio_analysis_toolkit.py +21 -17
  40. camel/toolkits/browser_toolkit.py +2 -1
  41. camel/toolkits/dalle_toolkit.py +15 -0
  42. camel/toolkits/excel_toolkit.py +14 -1
  43. camel/toolkits/image_analysis_toolkit.py +9 -1
  44. camel/toolkits/mcp_toolkit.py +2 -0
  45. camel/toolkits/networkx_toolkit.py +5 -0
  46. camel/toolkits/openai_agent_toolkit.py +5 -1
  47. camel/toolkits/pyautogui_toolkit.py +428 -0
  48. camel/toolkits/searxng_toolkit.py +7 -0
  49. camel/toolkits/slack_toolkit.py +15 -2
  50. camel/toolkits/video_analysis_toolkit.py +218 -78
  51. camel/toolkits/video_download_toolkit.py +10 -3
  52. camel/toolkits/weather_toolkit.py +14 -1
  53. camel/toolkits/zapier_toolkit.py +6 -2
  54. camel/types/enums.py +73 -0
  55. camel/types/unified_model_type.py +10 -0
  56. camel/verifiers/base.py +14 -0
  57. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/METADATA +6 -5
  58. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/RECORD +60 -54
  59. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  60. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -44,7 +44,28 @@ except (ImportError, AttributeError):
44
44
 
45
45
 
46
46
  class CohereModel(BaseModelBackend):
47
- r"""Cohere API in a unified BaseModelBackend interface."""
47
+ r"""Cohere API in a unified BaseModelBackend interface.
48
+
49
+ Args:
50
+ model_type (Union[ModelType, str]): Model for which a backend is
51
+ created, one of Cohere series.
52
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
53
+ that will be fed into:obj:`cohere.ClientV2().chat()`. If
54
+ :obj:`None`, :obj:`CohereConfig().as_dict()` will be used.
55
+ (default: :obj:`None`)
56
+ api_key (Optional[str], optional): The API key for authenticating with
57
+ the Cohere service. (default: :obj:`None`)
58
+ url (Optional[str], optional): The url to the Cohere service.
59
+ (default: :obj:`None`)
60
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
61
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
62
+ ModelType.GPT_4O_MINI)` will be used.
63
+ (default: :obj:`None`)
64
+ timeout (Optional[float], optional): The timeout value in seconds for
65
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
66
+ environment variable or default to 180 seconds.
67
+ (default: :obj:`None`)
68
+ """
48
69
 
49
70
  @api_keys_required(
50
71
  [
@@ -58,6 +79,7 @@ class CohereModel(BaseModelBackend):
58
79
  api_key: Optional[str] = None,
59
80
  url: Optional[str] = None,
60
81
  token_counter: Optional[BaseTokenCounter] = None,
82
+ timeout: Optional[float] = None,
61
83
  ):
62
84
  import cohere
63
85
 
@@ -66,11 +88,17 @@ class CohereModel(BaseModelBackend):
66
88
 
67
89
  api_key = api_key or os.environ.get("COHERE_API_KEY")
68
90
  url = url or os.environ.get("COHERE_API_BASE_URL")
91
+
92
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
69
93
  super().__init__(
70
- model_type, model_config_dict, api_key, url, token_counter
94
+ model_type, model_config_dict, api_key, url, token_counter, timeout
95
+ )
96
+ self._client = cohere.ClientV2(
97
+ timeout=self._timeout, api_key=self._api_key
98
+ )
99
+ self._async_client = cohere.AsyncClientV2(
100
+ timeout=self._timeout, api_key=self._api_key
71
101
  )
72
- self._client = cohere.ClientV2(api_key=self._api_key)
73
- self._async_client = cohere.AsyncClientV2(api_key=self._api_key)
74
102
 
75
103
  def _to_openai_response(self, response: 'ChatResponse') -> ChatCompletion:
76
104
  if response.usage and response.usage.tokens:
@@ -15,20 +15,20 @@
15
15
  import os
16
16
  from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
+ from openai import AsyncStream, Stream
19
19
  from pydantic import BaseModel
20
20
 
21
21
  from camel.configs import DEEPSEEK_API_PARAMS, DeepSeekConfig
22
22
  from camel.logger import get_logger
23
23
  from camel.messages import OpenAIMessage
24
24
  from camel.models._utils import try_modify_message_with_format
25
- from camel.models.base_model import BaseModelBackend
25
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
26
26
  from camel.types import (
27
27
  ChatCompletion,
28
28
  ChatCompletionChunk,
29
29
  ModelType,
30
30
  )
31
- from camel.utils import BaseTokenCounter, OpenAITokenCounter, api_keys_required
31
+ from camel.utils import BaseTokenCounter, api_keys_required
32
32
 
33
33
  logger = get_logger(__name__)
34
34
 
@@ -43,8 +43,8 @@ REASONSER_UNSUPPORTED_PARAMS = [
43
43
  ]
44
44
 
45
45
 
46
- class DeepSeekModel(BaseModelBackend):
47
- r"""DeepSeek API in a unified BaseModelBackend interface.
46
+ class DeepSeekModel(OpenAICompatibleModel):
47
+ r"""DeepSeek API in a unified OpenAICompatibleModel interface.
48
48
 
49
49
  Args:
50
50
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -92,37 +92,14 @@ class DeepSeekModel(BaseModelBackend):
92
92
  )
93
93
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
94
94
  super().__init__(
95
- model_type, model_config_dict, api_key, url, token_counter, timeout
95
+ model_type=model_type,
96
+ model_config_dict=model_config_dict,
97
+ api_key=api_key,
98
+ url=url,
99
+ token_counter=token_counter,
100
+ timeout=timeout,
96
101
  )
97
102
 
98
- self._client = OpenAI(
99
- timeout=self._timeout,
100
- max_retries=3,
101
- api_key=self._api_key,
102
- base_url=self._url,
103
- )
104
-
105
- self._async_client = AsyncOpenAI(
106
- timeout=self._timeout,
107
- max_retries=3,
108
- api_key=self._api_key,
109
- base_url=self._url,
110
- )
111
-
112
- @property
113
- def token_counter(self) -> BaseTokenCounter:
114
- r"""Initialize the token counter for the model backend.
115
-
116
- Returns:
117
- BaseTokenCounter: The token counter following the model's
118
- tokenization style.
119
- """
120
- if not self._token_counter:
121
- self._token_counter = OpenAITokenCounter(
122
- model=ModelType.GPT_4O_MINI
123
- )
124
- return self._token_counter
125
-
126
103
  def _prepare_request(
127
104
  self,
128
105
  messages: List[OpenAIMessage],
@@ -270,13 +247,3 @@ class DeepSeekModel(BaseModelBackend):
270
247
  f"Unexpected argument `{param}` is "
271
248
  "input into DeepSeek model backend."
272
249
  )
273
-
274
- @property
275
- def stream(self) -> bool:
276
- r"""Returns whether the model is in stream mode, which sends partial
277
- results each time.
278
-
279
- Returns:
280
- bool: Whether the model is in stream mode.
281
- """
282
- return self.model_config_dict.get("stream", False)
@@ -14,12 +14,12 @@
14
14
  import os
15
15
  from typing import Any, Dict, List, Optional, Type, Union
16
16
 
17
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
17
+ from openai import AsyncStream, Stream
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from camel.configs import Gemini_API_PARAMS, GeminiConfig
21
21
  from camel.messages import OpenAIMessage
22
- from camel.models import BaseModelBackend
22
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
23
23
  from camel.types import (
24
24
  ChatCompletion,
25
25
  ChatCompletionChunk,
@@ -27,13 +27,12 @@ from camel.types import (
27
27
  )
28
28
  from camel.utils import (
29
29
  BaseTokenCounter,
30
- OpenAITokenCounter,
31
30
  api_keys_required,
32
31
  )
33
32
 
34
33
 
35
- class GeminiModel(BaseModelBackend):
36
- r"""Gemini API in a unified BaseModelBackend interface.
34
+ class GeminiModel(OpenAICompatibleModel):
35
+ r"""Gemini API in a unified OpenAICompatibleModel interface.
37
36
 
38
37
  Args:
39
38
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -80,19 +79,12 @@ class GeminiModel(BaseModelBackend):
80
79
  )
81
80
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
81
  super().__init__(
83
- model_type, model_config_dict, api_key, url, token_counter, timeout
84
- )
85
- self._client = OpenAI(
86
- timeout=self._timeout,
87
- max_retries=3,
88
- api_key=self._api_key,
89
- base_url=self._url,
90
- )
91
- self._async_client = AsyncOpenAI(
92
- timeout=self._timeout,
93
- max_retries=3,
94
- api_key=self._api_key,
95
- base_url=self._url,
82
+ model_type=model_type,
83
+ model_config_dict=model_config_dict,
84
+ api_key=api_key,
85
+ url=url,
86
+ token_counter=token_counter,
87
+ timeout=timeout,
96
88
  )
97
89
 
98
90
  def _process_messages(self, messages) -> List[OpenAIMessage]:
@@ -247,50 +239,6 @@ class GeminiModel(BaseModelBackend):
247
239
  **request_config,
248
240
  )
249
241
 
250
- def _request_parse(
251
- self,
252
- messages: List[OpenAIMessage],
253
- response_format: Type[BaseModel],
254
- ) -> ChatCompletion:
255
- request_config = self.model_config_dict.copy()
256
-
257
- request_config["response_format"] = response_format
258
- request_config.pop("stream", None)
259
-
260
- return self._client.beta.chat.completions.parse(
261
- messages=messages,
262
- model=self.model_type,
263
- **request_config,
264
- )
265
-
266
- async def _arequest_parse(
267
- self,
268
- messages: List[OpenAIMessage],
269
- response_format: Type[BaseModel],
270
- ) -> ChatCompletion:
271
- request_config = self.model_config_dict.copy()
272
-
273
- request_config["response_format"] = response_format
274
- request_config.pop("stream", None)
275
-
276
- return await self._async_client.beta.chat.completions.parse(
277
- messages=messages,
278
- model=self.model_type,
279
- **request_config,
280
- )
281
-
282
- @property
283
- def token_counter(self) -> BaseTokenCounter:
284
- r"""Initialize the token counter for the model backend.
285
-
286
- Returns:
287
- BaseTokenCounter: The token counter following the model's
288
- tokenization style.
289
- """
290
- if not self._token_counter:
291
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
292
- return self._token_counter
293
-
294
242
  def check_model_config(self):
295
243
  r"""Check whether the model configuration contains any
296
244
  unexpected arguments to Gemini API.
@@ -305,13 +253,3 @@ class GeminiModel(BaseModelBackend):
305
253
  f"Unexpected argument `{param}` is "
306
254
  "input into Gemini model backend."
307
255
  )
308
-
309
- @property
310
- def stream(self) -> bool:
311
- r"""Returns whether the model is in stream mode, which sends partial
312
- results each time.
313
-
314
- Returns:
315
- bool: Whether the model is in stream mode.
316
- """
317
- return self.model_config_dict.get('stream', False)
@@ -12,29 +12,19 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Type, Union
16
-
17
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
- from pydantic import BaseModel
15
+ from typing import Any, Dict, Optional, Union
19
16
 
20
17
  from camel.configs import GROQ_API_PARAMS, GroqConfig
21
- from camel.messages import OpenAIMessage
22
- from camel.models import BaseModelBackend
23
- from camel.models._utils import try_modify_message_with_format
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
29
20
  from camel.utils import (
30
21
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
22
  api_keys_required,
33
23
  )
34
24
 
35
25
 
36
- class GroqModel(BaseModelBackend):
37
- r"""LLM API served by Groq in a unified BaseModelBackend interface.
26
+ class GroqModel(OpenAICompatibleModel):
27
+ r"""LLM API served by Groq in a unified OpenAICompatibleModel interface.
38
28
 
39
29
  Args:
40
30
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -75,114 +65,14 @@ class GroqModel(BaseModelBackend):
75
65
  )
76
66
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
67
  super().__init__(
78
- model_type, model_config_dict, api_key, url, token_counter, timeout
79
- )
80
- self._client = OpenAI(
81
- timeout=self._timeout,
82
- max_retries=3,
83
- api_key=self._api_key,
84
- base_url=self._url,
85
- )
86
- self._async_client = AsyncOpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key=self._api_key,
90
- base_url=self._url,
91
- )
92
-
93
- @property
94
- def token_counter(self) -> BaseTokenCounter:
95
- r"""Initialize the token counter for the model backend.
96
-
97
- Returns:
98
- BaseTokenCounter: The token counter following the model's
99
- tokenization style.
100
- """
101
- if not self._token_counter:
102
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
103
- return self._token_counter
104
-
105
- def _prepare_request(
106
- self,
107
- messages: List[OpenAIMessage],
108
- response_format: Optional[Type[BaseModel]] = None,
109
- tools: Optional[List[Dict[str, Any]]] = None,
110
- ) -> Dict[str, Any]:
111
- request_config = self.model_config_dict.copy()
112
- if tools:
113
- request_config["tools"] = tools
114
- elif response_format:
115
- try_modify_message_with_format(messages[-1], response_format)
116
- request_config["response_format"] = {"type": "json_object"}
117
-
118
- return request_config
119
-
120
- def _run(
121
- self,
122
- messages: List[OpenAIMessage],
123
- response_format: Optional[type[BaseModel]] = None,
124
- tools: Optional[List[Dict[str, Any]]] = None,
125
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
126
- r"""Runs inference of Groq chat completion.
127
-
128
- Args:
129
- messages (List[OpenAIMessage]): Message list with the chat history
130
- in OpenAI API format.
131
- response_format (Optional[Type[BaseModel]]): The format of the
132
- response.
133
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
134
- use for the request.
135
-
136
- Returns:
137
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
- `ChatCompletion` in the non-stream mode, or
139
- `Stream[ChatCompletionChunk]` in the stream mode.
140
- """
141
- request_config = self._prepare_request(
142
- messages, response_format, tools
68
+ model_type=model_type,
69
+ model_config_dict=model_config_dict,
70
+ api_key=api_key,
71
+ url=url,
72
+ token_counter=token_counter,
73
+ timeout=timeout,
143
74
  )
144
75
 
145
- response = self._client.chat.completions.create(
146
- messages=messages,
147
- model=self.model_type,
148
- **request_config,
149
- )
150
-
151
- return response
152
-
153
- async def _arun(
154
- self,
155
- messages: List[OpenAIMessage],
156
- response_format: Optional[type[BaseModel]] = None,
157
- tools: Optional[List[Dict[str, Any]]] = None,
158
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
159
- r"""Runs inference of Groq chat completion asynchronously.
160
-
161
- Args:
162
- messages (List[OpenAIMessage]): Message list with the chat history
163
- in OpenAI API format.
164
- response_format (Optional[Type[BaseModel]]): The format of the
165
- response.
166
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
167
- use for the request.
168
-
169
- Returns:
170
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
171
- `ChatCompletion` in the non-stream mode, or
172
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
173
- """
174
- request_config = self._prepare_request(
175
- messages, response_format, tools
176
- )
177
-
178
- response = await self._async_client.chat.completions.create(
179
- messages=messages,
180
- model=self.model_type,
181
- **request_config,
182
- )
183
-
184
- return response
185
-
186
76
  def check_model_config(self):
187
77
  r"""Check whether the model configuration contains any unexpected
188
78
  arguments to Groq API. But Groq API does not have any additional
@@ -198,13 +88,3 @@ class GroqModel(BaseModelBackend):
198
88
  f"Unexpected argument `{param}` is "
199
89
  "input into Groq model backend."
200
90
  )
201
-
202
- @property
203
- def stream(self) -> bool:
204
- r"""Returns whether the model is in stream mode, which sends partial
205
- results each time.
206
-
207
- Returns:
208
- bool: Whether the model is in stream mode.
209
- """
210
- return self.model_config_dict.get("stream", False)
@@ -15,12 +15,12 @@
15
15
  import os
16
16
  from typing import Any, Dict, List, Optional, Type, Union
17
17
 
18
- from openai import AsyncStream, OpenAI, Stream
18
+ from openai import AsyncStream
19
19
  from pydantic import BaseModel
20
20
 
21
21
  from camel.configs import INTERNLM_API_PARAMS, InternLMConfig
22
22
  from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
23
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
24
24
  from camel.types import (
25
25
  ChatCompletion,
26
26
  ChatCompletionChunk,
@@ -28,13 +28,12 @@ from camel.types import (
28
28
  )
29
29
  from camel.utils import (
30
30
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
31
  api_keys_required,
33
32
  )
34
33
 
35
34
 
36
- class InternLMModel(BaseModelBackend):
37
- r"""InternLM API in a unified BaseModelBackend interface.
35
+ class InternLMModel(OpenAICompatibleModel):
36
+ r"""InternLM API in a unified OpenAICompatibleModel interface.
38
37
 
39
38
  Args:
40
39
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -71,8 +70,7 @@ class InternLMModel(BaseModelBackend):
71
70
  token_counter: Optional[BaseTokenCounter] = None,
72
71
  timeout: Optional[float] = None,
73
72
  ) -> None:
74
- if model_config_dict is None:
75
- model_config_dict = InternLMConfig().as_dict()
73
+ self.model_config = model_config_dict or InternLMConfig().as_dict()
76
74
  api_key = api_key or os.environ.get("INTERNLM_API_KEY")
77
75
  url = url or os.environ.get(
78
76
  "INTERNLM_API_BASE_URL",
@@ -80,39 +78,14 @@ class InternLMModel(BaseModelBackend):
80
78
  )
81
79
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
80
  super().__init__(
83
- model_type, model_config_dict, api_key, url, token_counter, timeout
84
- )
85
- self._client = OpenAI(
86
- timeout=self._timeout,
87
- max_retries=3,
88
- api_key=self._api_key,
89
- base_url=self._url,
81
+ model_type=model_type,
82
+ model_config_dict=self.model_config,
83
+ api_key=api_key,
84
+ url=url,
85
+ token_counter=token_counter,
86
+ timeout=timeout,
90
87
  )
91
88
 
92
- def _run(
93
- self,
94
- messages: List[OpenAIMessage],
95
- response_format: Optional[Type[BaseModel]] = None,
96
- tools: Optional[List[Dict[str, Any]]] = None,
97
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
98
- r"""Runs inference of InternLM chat completion.
99
-
100
- Args:
101
- messages (List[OpenAIMessage]): Message list with the chat history
102
- in OpenAI API format.
103
-
104
- Returns:
105
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
106
- `ChatCompletion` in the non-stream mode, or
107
- `Stream[ChatCompletionChunk]` in the stream mode.
108
- """
109
- response = self._client.chat.completions.create(
110
- messages=messages,
111
- model=self.model_type,
112
- **self.model_config_dict,
113
- )
114
- return response
115
-
116
89
  async def _arun(
117
90
  self,
118
91
  messages: List[OpenAIMessage],
@@ -121,19 +94,6 @@ class InternLMModel(BaseModelBackend):
121
94
  ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
122
95
  raise NotImplementedError("InternLM does not support async inference.")
123
96
 
124
- @property
125
- def token_counter(self) -> BaseTokenCounter:
126
- r"""Initialize the token counter for the model backend.
127
-
128
- Returns:
129
- OpenAITokenCounter: The token counter following the model's
130
- tokenization style.
131
- """
132
-
133
- if not self._token_counter:
134
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
135
- return self._token_counter
136
-
137
97
  def check_model_config(self):
138
98
  r"""Check whether the model configuration contains any
139
99
  unexpected arguments to InternLM API.
@@ -148,13 +108,3 @@ class InternLMModel(BaseModelBackend):
148
108
  f"Unexpected argument `{param}` is "
149
109
  "input into InternLM model backend."
150
110
  )
151
-
152
- @property
153
- def stream(self) -> bool:
154
- r"""Returns whether the model is in stream mode, which sends partial
155
- results each time.
156
-
157
- Returns:
158
- bool: Whether the model is in stream mode.
159
- """
160
- return self.model_config_dict.get('stream', False)
@@ -11,6 +11,7 @@
11
11
  # See the License for the specific language governing permissions and
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+ import os
14
15
  from typing import Any, Dict, List, Optional, Type, Union
15
16
 
16
17
  from pydantic import BaseModel
@@ -33,8 +34,8 @@ class LiteLLMModel(BaseModelBackend):
33
34
  model_type (Union[ModelType, str]): Model for which a backend is
34
35
  created, such as GPT-3.5-turbo, Claude-2, etc.
35
36
  model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
36
- that will be fed into:obj:`openai.ChatCompletion.create()`.
37
- If:obj:`None`, :obj:`LiteLLMConfig().as_dict()` will be used.
37
+ that will be fed into:obj:`completion()`. If:obj:`None`,
38
+ :obj:`LiteLLMConfig().as_dict()` will be used.
38
39
  (default: :obj:`None`)
39
40
  api_key (Optional[str], optional): The API key for authenticating with
40
41
  the model service. (default: :obj:`None`)
@@ -43,6 +44,10 @@ class LiteLLMModel(BaseModelBackend):
43
44
  token_counter (Optional[BaseTokenCounter], optional): Token counter to
44
45
  use for the model. If not provided, :obj:`LiteLLMTokenCounter` will
45
46
  be used. (default: :obj:`None`)
47
+ timeout (Optional[float], optional): The timeout value in seconds for
48
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
49
+ environment variable or default to 180 seconds.
50
+ (default: :obj:`None`)
46
51
  """
47
52
 
48
53
  # NOTE: Currently stream mode is not supported.
@@ -55,14 +60,15 @@ class LiteLLMModel(BaseModelBackend):
55
60
  api_key: Optional[str] = None,
56
61
  url: Optional[str] = None,
57
62
  token_counter: Optional[BaseTokenCounter] = None,
63
+ timeout: Optional[float] = None,
58
64
  ) -> None:
59
65
  from litellm import completion
60
66
 
61
67
  if model_config_dict is None:
62
68
  model_config_dict = LiteLLMConfig().as_dict()
63
-
69
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
64
70
  super().__init__(
65
- model_type, model_config_dict, api_key, url, token_counter
71
+ model_type, model_config_dict, api_key, url, token_counter, timeout
66
72
  )
67
73
  self.client = completion
68
74
 
@@ -127,6 +133,7 @@ class LiteLLMModel(BaseModelBackend):
127
133
  ChatCompletion
128
134
  """
129
135
  response = self.client(
136
+ timeout=self._timeout,
130
137
  api_key=self._api_key,
131
138
  base_url=self._url,
132
139
  model=self.model_type,