camel-ai 0.2.44__py3-none-any.whl → 0.2.46__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (60) hide show
  1. camel/__init__.py +1 -1
  2. camel/configs/__init__.py +6 -0
  3. camel/configs/bedrock_config.py +73 -0
  4. camel/configs/lmstudio_config.py +94 -0
  5. camel/configs/qwen_config.py +3 -3
  6. camel/models/__init__.py +4 -0
  7. camel/models/aiml_model.py +11 -104
  8. camel/models/anthropic_model.py +11 -76
  9. camel/models/aws_bedrock_model.py +112 -0
  10. camel/models/cohere_model.py +32 -4
  11. camel/models/deepseek_model.py +11 -44
  12. camel/models/gemini_model.py +10 -72
  13. camel/models/groq_model.py +11 -131
  14. camel/models/internlm_model.py +11 -61
  15. camel/models/litellm_model.py +11 -4
  16. camel/models/lmstudio_model.py +82 -0
  17. camel/models/mistral_model.py +14 -2
  18. camel/models/model_factory.py +7 -1
  19. camel/models/modelscope_model.py +11 -122
  20. camel/models/moonshot_model.py +10 -76
  21. camel/models/nemotron_model.py +4 -60
  22. camel/models/nvidia_model.py +11 -111
  23. camel/models/ollama_model.py +12 -205
  24. camel/models/openai_compatible_model.py +51 -12
  25. camel/models/openrouter_model.py +12 -131
  26. camel/models/ppio_model.py +10 -99
  27. camel/models/qwen_model.py +11 -122
  28. camel/models/reka_model.py +12 -4
  29. camel/models/sglang_model.py +5 -3
  30. camel/models/siliconflow_model.py +10 -58
  31. camel/models/togetherai_model.py +10 -177
  32. camel/models/vllm_model.py +11 -218
  33. camel/models/volcano_model.py +8 -17
  34. camel/models/yi_model.py +11 -98
  35. camel/models/zhipuai_model.py +11 -102
  36. camel/runtime/__init__.py +2 -0
  37. camel/runtime/ubuntu_docker_runtime.py +340 -0
  38. camel/toolkits/__init__.py +2 -0
  39. camel/toolkits/audio_analysis_toolkit.py +21 -17
  40. camel/toolkits/browser_toolkit.py +2 -1
  41. camel/toolkits/dalle_toolkit.py +15 -0
  42. camel/toolkits/excel_toolkit.py +14 -1
  43. camel/toolkits/image_analysis_toolkit.py +9 -1
  44. camel/toolkits/mcp_toolkit.py +2 -0
  45. camel/toolkits/networkx_toolkit.py +5 -0
  46. camel/toolkits/openai_agent_toolkit.py +5 -1
  47. camel/toolkits/pyautogui_toolkit.py +428 -0
  48. camel/toolkits/searxng_toolkit.py +7 -0
  49. camel/toolkits/slack_toolkit.py +15 -2
  50. camel/toolkits/video_analysis_toolkit.py +218 -78
  51. camel/toolkits/video_download_toolkit.py +10 -3
  52. camel/toolkits/weather_toolkit.py +14 -1
  53. camel/toolkits/zapier_toolkit.py +6 -2
  54. camel/types/enums.py +73 -0
  55. camel/types/unified_model_type.py +10 -0
  56. camel/verifiers/base.py +14 -0
  57. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/METADATA +6 -5
  58. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/RECORD +60 -54
  59. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/WHEEL +0 -0
  60. {camel_ai-0.2.44.dist-info → camel_ai-0.2.46.dist-info}/licenses/LICENSE +0 -0
@@ -13,24 +13,15 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import subprocess
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import OLLAMA_API_PARAMS, OllamaConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.models._utils import try_modify_message_with_format
25
- from camel.types import (
26
- ChatCompletion,
27
- ChatCompletionChunk,
28
- ModelType,
29
- )
30
- from camel.utils import BaseTokenCounter, OpenAITokenCounter
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import BaseTokenCounter
31
22
 
32
23
 
33
- class OllamaModel(BaseModelBackend):
24
+ class OllamaModel(OpenAICompatibleModel):
34
25
  r"""Ollama service interface.
35
26
 
36
27
  Args:
@@ -72,23 +63,16 @@ class OllamaModel(BaseModelBackend):
72
63
  url = url or os.environ.get("OLLAMA_BASE_URL")
73
64
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
74
65
  super().__init__(
75
- model_type, model_config_dict, api_key, url, token_counter, timeout
66
+ model_type=model_type,
67
+ model_config_dict=model_config_dict,
68
+ api_key=api_key,
69
+ url=url,
70
+ token_counter=token_counter,
71
+ timeout=timeout,
76
72
  )
73
+
77
74
  if not self._url:
78
75
  self._start_server()
79
- # Use OpenAI client as interface call Ollama
80
- self._client = OpenAI(
81
- timeout=self._timeout,
82
- max_retries=3,
83
- api_key="Set-but-ignored", # required but ignored
84
- base_url=self._url,
85
- )
86
- self._async_client = AsyncOpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key="Set-but-ignored", # required but ignored
90
- base_url=self._url,
91
- )
92
76
 
93
77
  def _start_server(self) -> None:
94
78
  r"""Starts the Ollama server in a subprocess."""
@@ -106,18 +90,6 @@ class OllamaModel(BaseModelBackend):
106
90
  except Exception as e:
107
91
  print(f"Failed to start Ollama server: {e}.")
108
92
 
109
- @property
110
- def token_counter(self) -> BaseTokenCounter:
111
- r"""Initialize the token counter for the model backend.
112
-
113
- Returns:
114
- BaseTokenCounter: The token counter following the model's
115
- tokenization style.
116
- """
117
- if not self._token_counter:
118
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
119
- return self._token_counter
120
-
121
93
  def check_model_config(self):
122
94
  r"""Check whether the model configuration contains any
123
95
  unexpected arguments to Ollama API.
@@ -132,168 +104,3 @@ class OllamaModel(BaseModelBackend):
132
104
  f"Unexpected argument `{param}` is "
133
105
  "input into Ollama model backend."
134
106
  )
135
-
136
- def _run(
137
- self,
138
- messages: List[OpenAIMessage],
139
- response_format: Optional[Type[BaseModel]] = None,
140
- tools: Optional[List[Dict[str, Any]]] = None,
141
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
142
- r"""Runs inference of Ollama chat completion.
143
-
144
- Args:
145
- messages (List[OpenAIMessage]): Message list with the chat history
146
- in OpenAI API format.
147
- response_format (Optional[Type[BaseModel]]): The format of the
148
- response.
149
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
150
- use for the request.
151
-
152
- Returns:
153
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
154
- `ChatCompletion` in the non-stream mode, or
155
- `Stream[ChatCompletionChunk]` in the stream mode.
156
- """
157
- response_format = response_format or self.model_config_dict.get(
158
- "response_format", None
159
- )
160
- # For Ollama, the tool calling will be broken with response_format
161
- if response_format and not tools:
162
- return self._request_parse(messages, response_format, tools)
163
- else:
164
- return self._request_chat_completion(
165
- messages, response_format, tools
166
- )
167
-
168
- async def _arun(
169
- self,
170
- messages: List[OpenAIMessage],
171
- response_format: Optional[Type[BaseModel]] = None,
172
- tools: Optional[List[Dict[str, Any]]] = None,
173
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
174
- r"""Runs inference of Ollama chat completion in async mode.
175
-
176
- Args:
177
- messages (List[OpenAIMessage]): Message list with the chat history
178
- in OpenAI API format.
179
- response_format (Optional[Type[BaseModel]]): The format of the
180
- response.
181
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
182
- use for the request.
183
-
184
- Returns:
185
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
186
- `ChatCompletion` in the non-stream mode, or
187
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
188
- """
189
- response_format = response_format or self.model_config_dict.get(
190
- "response_format", None
191
- )
192
- if response_format:
193
- return await self._arequest_parse(messages, response_format, tools)
194
- else:
195
- return await self._arequest_chat_completion(
196
- messages, response_format, tools
197
- )
198
-
199
- def _prepare_chat_completion_config(
200
- self,
201
- messages: List[OpenAIMessage],
202
- response_format: Optional[Type[BaseModel]] = None,
203
- tools: Optional[List[Dict[str, Any]]] = None,
204
- ) -> Dict[str, Any]:
205
- request_config = self.model_config_dict.copy()
206
-
207
- if tools:
208
- request_config["tools"] = tools
209
- if response_format:
210
- try_modify_message_with_format(messages[-1], response_format)
211
- request_config["response_format"] = {"type": "json_object"}
212
-
213
- return request_config
214
-
215
- def _request_chat_completion(
216
- self,
217
- messages: List[OpenAIMessage],
218
- response_format: Optional[Type[BaseModel]] = None,
219
- tools: Optional[List[Dict[str, Any]]] = None,
220
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
221
- request_config = self._prepare_chat_completion_config(
222
- messages, response_format, tools
223
- )
224
-
225
- return self._client.chat.completions.create(
226
- messages=messages,
227
- model=self.model_type,
228
- **request_config,
229
- )
230
-
231
- async def _arequest_chat_completion(
232
- self,
233
- messages: List[OpenAIMessage],
234
- response_format: Optional[Type[BaseModel]] = None,
235
- tools: Optional[List[Dict[str, Any]]] = None,
236
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
237
- request_config = self._prepare_chat_completion_config(
238
- messages, response_format, tools
239
- )
240
-
241
- return await self._async_client.chat.completions.create(
242
- messages=messages,
243
- model=self.model_type,
244
- **request_config,
245
- )
246
-
247
- def _request_parse(
248
- self,
249
- messages: List[OpenAIMessage],
250
- response_format: Type[BaseModel],
251
- tools: Optional[List[Dict[str, Any]]] = None,
252
- ) -> ChatCompletion:
253
- import copy
254
-
255
- request_config = copy.deepcopy(self.model_config_dict)
256
- # Remove stream from request_config since Ollama does not support it
257
- # when structured response is used
258
- request_config["response_format"] = response_format
259
- request_config.pop("stream", None)
260
- if tools is not None:
261
- request_config["tools"] = tools
262
-
263
- return self._client.beta.chat.completions.parse(
264
- messages=messages,
265
- model=self.model_type,
266
- **request_config,
267
- )
268
-
269
- async def _arequest_parse(
270
- self,
271
- messages: List[OpenAIMessage],
272
- response_format: Type[BaseModel],
273
- tools: Optional[List[Dict[str, Any]]] = None,
274
- ) -> ChatCompletion:
275
- import copy
276
-
277
- request_config = copy.deepcopy(self.model_config_dict)
278
- # Remove stream from request_config since Ollama does not support it
279
- # when structured response is used
280
- request_config["response_format"] = response_format
281
- request_config.pop("stream", None)
282
- if tools is not None:
283
- request_config["tools"] = tools
284
-
285
- return await self._async_client.beta.chat.completions.parse(
286
- messages=messages,
287
- model=self.model_type,
288
- **request_config,
289
- )
290
-
291
- @property
292
- def stream(self) -> bool:
293
- r"""Returns whether the model is in stream mode, which sends partial
294
- results each time.
295
-
296
- Returns:
297
- bool: Whether the model is in stream mode.
298
- """
299
- return self.model_config_dict.get('stream', False)
@@ -13,13 +13,16 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
+ from json import JSONDecodeError
16
17
  from typing import Any, Dict, List, Optional, Type, Union
17
18
 
18
19
  from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
20
+ from pydantic import BaseModel, ValidationError
20
21
 
22
+ from camel.logger import get_logger
21
23
  from camel.messages import OpenAIMessage
22
- from camel.models import BaseModelBackend
24
+ from camel.models._utils import try_modify_message_with_format
25
+ from camel.models.base_model import BaseModelBackend
23
26
  from camel.types import (
24
27
  ChatCompletion,
25
28
  ChatCompletionChunk,
@@ -30,6 +33,8 @@ from camel.utils import (
30
33
  OpenAITokenCounter,
31
34
  )
32
35
 
36
+ logger = get_logger(__name__)
37
+
33
38
 
34
39
  class OpenAICompatibleModel(BaseModelBackend):
35
40
  r"""Constructor for model backend supporting OpenAI compatibility.
@@ -187,11 +192,28 @@ class OpenAICompatibleModel(BaseModelBackend):
187
192
  if tools is not None:
188
193
  request_config["tools"] = tools
189
194
 
190
- return self._client.beta.chat.completions.parse(
191
- messages=messages,
192
- model=self.model_type,
193
- **request_config,
194
- )
195
+ try:
196
+ return self._client.beta.chat.completions.parse(
197
+ messages=messages,
198
+ model=self.model_type,
199
+ **request_config,
200
+ )
201
+ except (ValidationError, JSONDecodeError) as e:
202
+ logger.warning(
203
+ f"Format validation error: {e}. "
204
+ f"Attempting fallback with JSON format."
205
+ )
206
+ try_modify_message_with_format(messages[-1], response_format)
207
+ request_config["response_format"] = {"type": "json_object"}
208
+ try:
209
+ return self._client.beta.chat.completions.parse(
210
+ messages=messages,
211
+ model=self.model_type,
212
+ **request_config,
213
+ )
214
+ except Exception as e:
215
+ logger.error(f"Fallback attempt also failed: {e}")
216
+ raise
195
217
 
196
218
  async def _arequest_parse(
197
219
  self,
@@ -209,11 +231,28 @@ class OpenAICompatibleModel(BaseModelBackend):
209
231
  if tools is not None:
210
232
  request_config["tools"] = tools
211
233
 
212
- return await self._async_client.beta.chat.completions.parse(
213
- messages=messages,
214
- model=self.model_type,
215
- **request_config,
216
- )
234
+ try:
235
+ return await self._async_client.beta.chat.completions.parse(
236
+ messages=messages,
237
+ model=self.model_type,
238
+ **request_config,
239
+ )
240
+ except (ValidationError, JSONDecodeError) as e:
241
+ logger.warning(
242
+ f"Format validation error: {e}. "
243
+ f"Attempting fallback with JSON format."
244
+ )
245
+ try_modify_message_with_format(messages[-1], response_format)
246
+ request_config["response_format"] = {"type": "json_object"}
247
+ try:
248
+ return await self._async_client.beta.chat.completions.parse(
249
+ messages=messages,
250
+ model=self.model_type,
251
+ **request_config,
252
+ )
253
+ except Exception as e:
254
+ logger.error(f"Fallback attempt also failed: {e}")
255
+ raise
217
256
 
218
257
  @property
219
258
  def token_counter(self) -> BaseTokenCounter:
@@ -12,29 +12,20 @@
12
12
  # limitations under the License.
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
- from typing import Any, Dict, List, Optional, Type, Union
16
-
17
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
18
- from pydantic import BaseModel
15
+ from typing import Any, Dict, Optional, Union
19
16
 
20
17
  from camel.configs import OPENROUTER_API_PARAMS, OpenRouterConfig
21
- from camel.messages import OpenAIMessage
22
- from camel.models import BaseModelBackend
23
- from camel.models._utils import try_modify_message_with_format
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
18
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
19
+ from camel.types import ModelType
29
20
  from camel.utils import (
30
21
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
22
  api_keys_required,
33
23
  )
34
24
 
35
25
 
36
- class OpenRouterModel(BaseModelBackend):
37
- r"""LLM API served by OpenRouter in a unified BaseModelBackend interface.
26
+ class OpenRouterModel(OpenAICompatibleModel):
27
+ r"""LLM API served by OpenRouter in a unified OpenAICompatibleModel
28
+ interface.
38
29
 
39
30
  Args:
40
31
  model_type (Union[ModelType, str]): Model for which a backend is
@@ -75,114 +66,14 @@ class OpenRouterModel(BaseModelBackend):
75
66
  )
76
67
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
77
68
  super().__init__(
78
- model_type, model_config_dict, api_key, url, token_counter, timeout
79
- )
80
- self._client = OpenAI(
81
- timeout=self._timeout,
82
- max_retries=3,
83
- api_key=self._api_key,
84
- base_url=self._url,
85
- )
86
- self._async_client = AsyncOpenAI(
87
- timeout=self._timeout,
88
- max_retries=3,
89
- api_key=self._api_key,
90
- base_url=self._url,
91
- )
92
-
93
- @property
94
- def token_counter(self) -> BaseTokenCounter:
95
- r"""Initialize the token counter for the model backend.
96
-
97
- Returns:
98
- BaseTokenCounter: The token counter following the model's
99
- tokenization style.
100
- """
101
- if not self._token_counter:
102
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
103
- return self._token_counter
104
-
105
- def _prepare_request(
106
- self,
107
- messages: List[OpenAIMessage],
108
- response_format: Optional[Type[BaseModel]] = None,
109
- tools: Optional[List[Dict[str, Any]]] = None,
110
- ) -> Dict[str, Any]:
111
- request_config = self.model_config_dict.copy()
112
- if tools:
113
- request_config["tools"] = tools
114
- elif response_format:
115
- try_modify_message_with_format(messages[-1], response_format)
116
- request_config["response_format"] = {"type": "json_object"}
117
-
118
- return request_config
119
-
120
- def _run(
121
- self,
122
- messages: List[OpenAIMessage],
123
- response_format: Optional[type[BaseModel]] = None,
124
- tools: Optional[List[Dict[str, Any]]] = None,
125
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
126
- r"""Runs inference of OpenAI chat completion.
127
-
128
- Args:
129
- messages (List[OpenAIMessage]): Message list with the chat history
130
- in OpenAI API format.
131
- response_format (Optional[Type[BaseModel]]): The format of the
132
- response.
133
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
134
- use for the request.
135
-
136
- Returns:
137
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
- `ChatCompletion` in the non-stream mode, or
139
- `Stream[ChatCompletionChunk]` in the stream mode.
140
- """
141
- request_config = self._prepare_request(
142
- messages, response_format, tools
69
+ model_type=model_type,
70
+ model_config_dict=model_config_dict,
71
+ api_key=api_key,
72
+ url=url,
73
+ token_counter=token_counter,
74
+ timeout=timeout,
143
75
  )
144
76
 
145
- response = self._client.chat.completions.create(
146
- messages=messages,
147
- model=self.model_type,
148
- **request_config,
149
- )
150
-
151
- return response
152
-
153
- async def _arun(
154
- self,
155
- messages: List[OpenAIMessage],
156
- response_format: Optional[type[BaseModel]] = None,
157
- tools: Optional[List[Dict[str, Any]]] = None,
158
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
159
- r"""Runs inference of OpenRouter chat completion asynchronously.
160
-
161
- Args:
162
- messages (List[OpenAIMessage]): Message list with the chat history
163
- in OpenAI API format.
164
- response_format (Optional[Type[BaseModel]]): The format of the
165
- response.
166
- tools (Optional[List[Dict[str, Any]]]): The schema of the tools to
167
- use for the request.
168
-
169
- Returns:
170
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
171
- `ChatCompletion` in the non-stream mode, or
172
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
173
- """
174
- request_config = self._prepare_request(
175
- messages, response_format, tools
176
- )
177
-
178
- response = await self._async_client.chat.completions.create(
179
- messages=messages,
180
- model=self.model_type,
181
- **request_config,
182
- )
183
-
184
- return response
185
-
186
77
  def check_model_config(self):
187
78
  r"""Check whether the model configuration contains any unexpected
188
79
  arguments to OpenRouter API. But OpenRouter API does not have any
@@ -198,13 +89,3 @@ class OpenRouterModel(BaseModelBackend):
198
89
  f"Unexpected argument `{param}` is "
199
90
  "input into OpenRouter model backend."
200
91
  )
201
-
202
- @property
203
- def stream(self) -> bool:
204
- r"""Returns whether the model is in stream mode, which sends partial
205
- results each time.
206
-
207
- Returns:
208
- bool: Whether the model is in stream mode.
209
- """
210
- return self.model_config_dict.get("stream", False)
@@ -13,27 +13,18 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, List, Optional, Type, Union
17
-
18
- from openai import AsyncOpenAI, AsyncStream, OpenAI, Stream
19
- from pydantic import BaseModel
16
+ from typing import Any, Dict, Optional, Union
20
17
 
21
18
  from camel.configs import PPIO_API_PARAMS, PPIOConfig
22
- from camel.messages import OpenAIMessage
23
- from camel.models import BaseModelBackend
24
- from camel.types import (
25
- ChatCompletion,
26
- ChatCompletionChunk,
27
- ModelType,
28
- )
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
29
21
  from camel.utils import (
30
22
  BaseTokenCounter,
31
- OpenAITokenCounter,
32
23
  api_keys_required,
33
24
  )
34
25
 
35
26
 
36
- class PPIOModel(BaseModelBackend):
27
+ class PPIOModel(OpenAICompatibleModel):
37
28
  r"""Constructor for PPIO backend with OpenAI compatibility.
38
29
 
39
30
  Args:
@@ -79,85 +70,15 @@ class PPIOModel(BaseModelBackend):
79
70
  "PPIO_API_BASE_URL", "https://api.ppinfra.com/v3/openai"
80
71
  )
81
72
  timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
82
-
83
73
  super().__init__(
84
- model_type, model_config_dict, api_key, url, token_counter, timeout
74
+ model_type=model_type,
75
+ model_config_dict=model_config_dict,
76
+ api_key=api_key,
77
+ url=url,
78
+ token_counter=token_counter,
79
+ timeout=timeout,
85
80
  )
86
81
 
87
- self._client = OpenAI(
88
- timeout=self._timeout,
89
- max_retries=3,
90
- api_key=self._api_key,
91
- base_url=self._url,
92
- )
93
- self._async_client = AsyncOpenAI(
94
- timeout=self._timeout,
95
- max_retries=3,
96
- api_key=self._api_key,
97
- base_url=self._url,
98
- )
99
-
100
- async def _arun(
101
- self,
102
- messages: List[OpenAIMessage],
103
- response_format: Optional[Type[BaseModel]] = None,
104
- tools: Optional[List[Dict[str, Any]]] = None,
105
- ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
106
- r"""Runs inference of OpenAI chat completion.
107
-
108
- Args:
109
- messages (List[OpenAIMessage]): Message list with the chat history
110
- in OpenAI API format.
111
-
112
- Returns:
113
- Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
114
- `ChatCompletion` in the non-stream mode, or
115
- `AsyncStream[ChatCompletionChunk]` in the stream mode.
116
- """
117
- response = await self._async_client.chat.completions.create(
118
- messages=messages,
119
- model=self.model_type,
120
- **self.model_config_dict,
121
- )
122
- return response
123
-
124
- def _run(
125
- self,
126
- messages: List[OpenAIMessage],
127
- response_format: Optional[Type[BaseModel]] = None,
128
- tools: Optional[List[Dict[str, Any]]] = None,
129
- ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
130
- r"""Runs inference of OpenAI chat completion.
131
-
132
- Args:
133
- messages (List[OpenAIMessage]): Message list with the chat history
134
- in OpenAI API format.
135
-
136
- Returns:
137
- Union[ChatCompletion, Stream[ChatCompletionChunk]]:
138
- `ChatCompletion` in the non-stream mode, or
139
- `Stream[ChatCompletionChunk]` in the stream mode.
140
- """
141
- response = self._client.chat.completions.create(
142
- messages=messages,
143
- model=self.model_type,
144
- **self.model_config_dict,
145
- )
146
- return response
147
-
148
- @property
149
- def token_counter(self) -> BaseTokenCounter:
150
- r"""Initialize the token counter for the model backend.
151
-
152
- Returns:
153
- OpenAITokenCounter: The token counter following the model's
154
- tokenization style.
155
- """
156
-
157
- if not self._token_counter:
158
- self._token_counter = OpenAITokenCounter(ModelType.GPT_4O_MINI)
159
- return self._token_counter
160
-
161
82
  def check_model_config(self):
162
83
  r"""Check whether the model configuration contains any
163
84
  unexpected arguments to PPIO API.
@@ -172,13 +93,3 @@ class PPIOModel(BaseModelBackend):
172
93
  f"Unexpected argument `{param}` is "
173
94
  "input into PPIO model backend."
174
95
  )
175
-
176
- @property
177
- def stream(self) -> bool:
178
- r"""Returns whether the model is in stream mode, which sends partial
179
- results each time.
180
-
181
- Returns:
182
- bool: Whether the model is in stream mode.
183
- """
184
- return self.model_config_dict.get('stream', False)