camel-ai 0.2.48__py3-none-any.whl → 0.2.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

@@ -31,6 +31,8 @@ from camel.models.lmstudio_model import LMStudioModel
31
31
  from camel.models.mistral_model import MistralModel
32
32
  from camel.models.modelscope_model import ModelScopeModel
33
33
  from camel.models.moonshot_model import MoonshotModel
34
+ from camel.models.netmind_model import NetmindModel
35
+ from camel.models.novita_model import NovitaModel
34
36
  from camel.models.nvidia_model import NvidiaModel
35
37
  from camel.models.ollama_model import OllamaModel
36
38
  from camel.models.openai_compatible_model import OpenAICompatibleModel
@@ -46,6 +48,7 @@ from camel.models.stub_model import StubModel
46
48
  from camel.models.togetherai_model import TogetherAIModel
47
49
  from camel.models.vllm_model import VLLMModel
48
50
  from camel.models.volcano_model import VolcanoModel
51
+ from camel.models.watsonx_model import WatsonXModel
49
52
  from camel.models.yi_model import YiModel
50
53
  from camel.models.zhipuai_model import ZhipuAIModel
51
54
  from camel.types import ModelPlatformType, ModelType, UnifiedModelType
@@ -61,8 +64,8 @@ class ModelFactory:
61
64
 
62
65
  @staticmethod
63
66
  def create(
64
- model_platform: ModelPlatformType,
65
- model_type: Union[ModelType, str],
67
+ model_platform: Union[ModelPlatformType, str],
68
+ model_type: Union[ModelType, str, UnifiedModelType],
66
69
  model_config_dict: Optional[Dict] = None,
67
70
  token_counter: Optional[BaseTokenCounter] = None,
68
71
  api_key: Optional[str] = None,
@@ -72,10 +75,12 @@ class ModelFactory:
72
75
  r"""Creates an instance of `BaseModelBackend` of the specified type.
73
76
 
74
77
  Args:
75
- model_platform (ModelPlatformType): Platform from which the model
76
- originates.
77
- model_type (Union[ModelType, str]): Model for which a
78
- backend is created. Can be a `str` for open source platforms.
78
+ model_platform (Union[ModelPlatformType, str]): Platform from
79
+ which the model originates. Can be a string or
80
+ ModelPlatformType enum.
81
+ model_type (Union[ModelType, str, UnifiedModelType]): Model for
82
+ which a backend is created. Can be a string, ModelType enum, or
83
+ UnifiedModelType.
79
84
  model_config_dict (Optional[Dict]): A dictionary that will be fed
80
85
  into the backend constructor. (default: :obj:`None`)
81
86
  token_counter (Optional[BaseTokenCounter], optional): Token
@@ -96,6 +101,21 @@ class ModelFactory:
96
101
  Raises:
97
102
  ValueError: If there is no backend for the model.
98
103
  """
104
+ # Convert string to ModelPlatformType enum if needed
105
+ if isinstance(model_platform, str):
106
+ try:
107
+ model_platform = ModelPlatformType(model_platform)
108
+ except ValueError:
109
+ raise ValueError(f"Unknown model platform: {model_platform}")
110
+
111
+ # Convert string to ModelType enum or UnifiedModelType if needed
112
+ if isinstance(model_type, str):
113
+ try:
114
+ model_type = ModelType(model_type)
115
+ except ValueError:
116
+ # If not in ModelType, create a UnifiedModelType
117
+ model_type = UnifiedModelType(model_type)
118
+
99
119
  model_class: Optional[Type[BaseModelBackend]] = None
100
120
  model_type = UnifiedModelType(model_type)
101
121
 
@@ -123,6 +143,8 @@ class ModelFactory:
123
143
  model_class = AIMLModel
124
144
  elif model_platform.is_volcano:
125
145
  model_class = VolcanoModel
146
+ elif model_platform.is_netmind:
147
+ model_class = NetmindModel
126
148
 
127
149
  elif model_platform.is_openai and model_type.is_openai:
128
150
  model_class = OpenAIModel
@@ -160,8 +182,12 @@ class ModelFactory:
160
182
  model_class = MoonshotModel
161
183
  elif model_platform.is_modelscope:
162
184
  model_class = ModelScopeModel
185
+ elif model_platform.is_novita:
186
+ model_class = NovitaModel
163
187
  elif model_type == ModelType.STUB:
164
188
  model_class = StubModel
189
+ elif model_type.is_watsonx:
190
+ model_class = WatsonXModel
165
191
 
166
192
  if model_class is None:
167
193
  raise ValueError(
@@ -13,11 +13,19 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, Optional, Union
16
+ import time
17
+ from typing import Any, Dict, List, Optional, Union
18
+
19
+ from openai import AsyncStream, Stream
17
20
 
18
21
  from camel.configs import MODELSCOPE_API_PARAMS, ModelScopeConfig
22
+ from camel.messages import OpenAIMessage
19
23
  from camel.models.openai_compatible_model import OpenAICompatibleModel
20
- from camel.types import ModelType
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
21
29
  from camel.utils import (
22
30
  BaseTokenCounter,
23
31
  api_keys_required,
@@ -81,6 +89,171 @@ class ModelScopeModel(OpenAICompatibleModel):
81
89
  timeout=timeout,
82
90
  )
83
91
 
92
+ def _post_handle_response(
93
+ self, response: Union[ChatCompletion, Stream[ChatCompletionChunk]]
94
+ ) -> ChatCompletion:
95
+ r"""Handle reasoning content with <think> tags at the beginning."""
96
+ if not isinstance(response, Stream):
97
+ # Handle non-streaming response (existing logic)
98
+ if self.model_config_dict.get("extra_body", {}).get(
99
+ "enable_thinking", False
100
+ ):
101
+ reasoning_content = response.choices[
102
+ 0
103
+ ].message.reasoning_content # type: ignore[attr-defined]
104
+ combined_content = (
105
+ f"<think>\n{reasoning_content}\n</think>\n"
106
+ if reasoning_content
107
+ else ""
108
+ )
109
+ response_content = response.choices[0].message.content or ""
110
+ combined_content += response_content
111
+
112
+ # Construct a new ChatCompletion with combined content
113
+ return ChatCompletion.construct(
114
+ id=response.id,
115
+ choices=[
116
+ dict(
117
+ finish_reason=response.choices[0].finish_reason,
118
+ index=response.choices[0].index,
119
+ logprobs=response.choices[0].logprobs,
120
+ message=dict(
121
+ role=response.choices[0].message.role,
122
+ content=combined_content,
123
+ ),
124
+ )
125
+ ],
126
+ created=response.created,
127
+ model=response.model,
128
+ object="chat.completion",
129
+ system_fingerprint=response.system_fingerprint,
130
+ usage=response.usage,
131
+ )
132
+ else:
133
+ return response # Return original if no thinking enabled
134
+
135
+ # Handle streaming response
136
+ accumulated_reasoning = ""
137
+ accumulated_content = ""
138
+ final_chunk = None
139
+ usage_data = None # Initialize usage data
140
+ role = "assistant" # Default role
141
+
142
+ for chunk in response:
143
+ final_chunk = chunk # Keep track of the last chunk for metadata
144
+ if chunk.choices:
145
+ delta = chunk.choices[0].delta
146
+ if delta.role:
147
+ role = delta.role # Update role if provided
148
+ if (
149
+ hasattr(delta, 'reasoning_content')
150
+ and delta.reasoning_content
151
+ ):
152
+ accumulated_reasoning += delta.reasoning_content
153
+ if delta.content:
154
+ accumulated_content += delta.content
155
+
156
+ if hasattr(chunk, 'usage') and chunk.usage:
157
+ usage_data = chunk.usage
158
+
159
+ combined_content = (
160
+ f"<think>\n{accumulated_reasoning}\n</think>\n"
161
+ if accumulated_reasoning
162
+ else ""
163
+ ) + accumulated_content
164
+
165
+ # Construct the final ChatCompletion object from accumulated
166
+ # stream data
167
+ if final_chunk:
168
+ finish_reason = "stop" # Default finish reason
169
+ logprobs = None
170
+ if final_chunk.choices:
171
+ finish_reason = (
172
+ final_chunk.choices[0].finish_reason or finish_reason
173
+ )
174
+ if hasattr(final_chunk.choices[0], 'logprobs'):
175
+ logprobs = final_chunk.choices[0].logprobs
176
+
177
+ return ChatCompletion.construct(
178
+ # Use data from the final chunk or defaults
179
+ id=final_chunk.id
180
+ if hasattr(final_chunk, 'id')
181
+ else "streamed-completion",
182
+ choices=[
183
+ dict(
184
+ finish_reason=finish_reason,
185
+ index=0,
186
+ logprobs=logprobs,
187
+ message=dict(
188
+ role=role,
189
+ content=combined_content,
190
+ ),
191
+ )
192
+ ],
193
+ created=final_chunk.created
194
+ if hasattr(final_chunk, 'created')
195
+ else int(time.time()),
196
+ model=final_chunk.model
197
+ if hasattr(final_chunk, 'model')
198
+ else self.model_type,
199
+ object="chat.completion",
200
+ system_fingerprint=final_chunk.system_fingerprint
201
+ if hasattr(final_chunk, 'system_fingerprint')
202
+ else None,
203
+ usage=usage_data,
204
+ )
205
+ else:
206
+ # Handle cases where the stream was empty or invalid
207
+ return ChatCompletion.construct(
208
+ id="empty-stream",
209
+ choices=[
210
+ dict(
211
+ finish_reason="error",
212
+ index=0,
213
+ message=dict(role="assistant", content=""),
214
+ )
215
+ ],
216
+ created=int(time.time()),
217
+ model=self.model_type,
218
+ object="chat.completion",
219
+ usage=usage_data,
220
+ )
221
+
222
+ def _request_chat_completion(
223
+ self,
224
+ messages: List[OpenAIMessage],
225
+ tools: Optional[List[Dict[str, Any]]] = None,
226
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
227
+ request_config = self.model_config_dict.copy()
228
+
229
+ if tools:
230
+ request_config["tools"] = tools
231
+
232
+ return self._post_handle_response(
233
+ self._client.chat.completions.create(
234
+ messages=messages,
235
+ model=self.model_type,
236
+ **request_config,
237
+ )
238
+ )
239
+
240
+ async def _arequest_chat_completion(
241
+ self,
242
+ messages: List[OpenAIMessage],
243
+ tools: Optional[List[Dict[str, Any]]] = None,
244
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
245
+ request_config = self.model_config_dict.copy()
246
+
247
+ if tools:
248
+ request_config["tools"] = tools
249
+
250
+ response = await self._async_client.chat.completions.create(
251
+ messages=messages,
252
+ model=self.model_type,
253
+ **request_config,
254
+ )
255
+ return self._post_handle_response(response)
256
+
84
257
  def check_model_config(self):
85
258
  r"""Check whether the model configuration contains any
86
259
  unexpected arguments to ModelScope API.
@@ -0,0 +1,96 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs import NETMIND_API_PARAMS, NetmindConfig
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import (
22
+ BaseTokenCounter,
23
+ api_keys_required,
24
+ )
25
+
26
+
27
+ class NetmindModel(OpenAICompatibleModel):
28
+ r"""Constructor for Netmind backend with OpenAI compatibility.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created, supported model can be found here:
33
+ https://www.netmind.ai/modelsLibrary?expandList=Chat
34
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
35
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
36
+ :obj:`None`, :obj:`NetmindConfig().as_dict()` will be used.
37
+ (default: :obj:`None`)
38
+ api_key (Optional[str], optional): The API key for authenticating with
39
+ the Netmind service. (default: :obj:`None`)
40
+ url (Optional[str], optional): The url to the Netmind service.
41
+ If not provided, "https://api.netmind.ai/inference-api/openai/v1"
42
+ will be used.(default: :obj:`None`)
43
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
44
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
45
+ ModelType.GPT_4O_MINI)` will be used.
46
+ timeout (Optional[float], optional): The timeout value in seconds for
47
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
48
+ environment variable or default to 180 seconds.
49
+ (default: :obj:`None`)
50
+ """
51
+
52
+ @api_keys_required(
53
+ [
54
+ ("api_key", 'NETMIND_API_KEY'),
55
+ ]
56
+ )
57
+ def __init__(
58
+ self,
59
+ model_type: Union[ModelType, str],
60
+ model_config_dict: Optional[Dict[str, Any]] = None,
61
+ api_key: Optional[str] = None,
62
+ url: Optional[str] = None,
63
+ token_counter: Optional[BaseTokenCounter] = None,
64
+ timeout: Optional[float] = None,
65
+ ) -> None:
66
+ if model_config_dict is None:
67
+ model_config_dict = NetmindConfig().as_dict()
68
+ api_key = api_key or os.environ.get("NETMIND_API_KEY")
69
+ url = url or os.environ.get(
70
+ "NETMIND_API_BASE_URL",
71
+ "https://api.netmind.ai/inference-api/openai/v1",
72
+ )
73
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
74
+ super().__init__(
75
+ model_type=model_type,
76
+ model_config_dict=model_config_dict,
77
+ api_key=api_key,
78
+ url=url,
79
+ token_counter=token_counter,
80
+ timeout=timeout,
81
+ )
82
+
83
+ def check_model_config(self):
84
+ r"""Check whether the model configuration contains any
85
+ unexpected arguments to NETMIND API.
86
+
87
+ Raises:
88
+ ValueError: If the model configuration dictionary contains any
89
+ unexpected arguments to NETMIND API.
90
+ """
91
+ for param in self.model_config_dict:
92
+ if param not in NETMIND_API_PARAMS:
93
+ raise ValueError(
94
+ f"Unexpected argument `{param}` is "
95
+ "input into NETMIND model backend."
96
+ )
@@ -0,0 +1,95 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import os
16
+ from typing import Any, Dict, Optional, Union
17
+
18
+ from camel.configs import NOVITA_API_PARAMS, NovitaConfig
19
+ from camel.models.openai_compatible_model import OpenAICompatibleModel
20
+ from camel.types import ModelType
21
+ from camel.utils import (
22
+ BaseTokenCounter,
23
+ api_keys_required,
24
+ )
25
+
26
+
27
+ class NovitaModel(OpenAICompatibleModel):
28
+ r"""Constructor for Novita backend with OpenAI compatibility.
29
+
30
+ Args:
31
+ model_type (Union[ModelType, str]): Model for which a backend is
32
+ created, supported model can be found here:
33
+ https://novita.ai/models?utm_source=github_owl&utm_campaign=github_link
34
+ model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
35
+ that will be fed into:obj:`openai.ChatCompletion.create()`. If
36
+ :obj:`None`, :obj:`NovitaConfig().as_dict()` will be used.
37
+ (default: :obj:`None`)
38
+ api_key (Optional[str], optional): The API key for authenticating with
39
+ the Novita service. (default: :obj:`None`)
40
+ url (Optional[str], optional): The url to the Novita service.
41
+ If not provided, "https://api.novita.ai/v3/openai" will be used.
42
+ (default: :obj:`None`)
43
+ token_counter (Optional[BaseTokenCounter], optional): Token counter to
44
+ use for the model. If not provided, :obj:`OpenAITokenCounter(
45
+ ModelType.GPT_4O_MINI)` will be used.
46
+ timeout (Optional[float], optional): The timeout value in seconds for
47
+ API calls. If not provided, will fall back to the MODEL_TIMEOUT
48
+ environment variable or default to 180 seconds.
49
+ (default: :obj:`None`)
50
+ """
51
+
52
+ @api_keys_required(
53
+ [
54
+ ("api_key", 'NOVITA_API_KEY'),
55
+ ]
56
+ )
57
+ def __init__(
58
+ self,
59
+ model_type: Union[ModelType, str],
60
+ model_config_dict: Optional[Dict[str, Any]] = None,
61
+ api_key: Optional[str] = None,
62
+ url: Optional[str] = None,
63
+ token_counter: Optional[BaseTokenCounter] = None,
64
+ timeout: Optional[float] = None,
65
+ ) -> None:
66
+ if model_config_dict is None:
67
+ model_config_dict = NovitaConfig().as_dict()
68
+ api_key = api_key or os.environ.get("NOVITA_API_KEY")
69
+ url = url or os.environ.get(
70
+ "NOVITA_API_BASE_URL", "https://api.novita.ai/v3/openai"
71
+ )
72
+ timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
73
+ super().__init__(
74
+ model_type=model_type,
75
+ model_config_dict=model_config_dict,
76
+ api_key=api_key,
77
+ url=url,
78
+ token_counter=token_counter,
79
+ timeout=timeout,
80
+ )
81
+
82
+ def check_model_config(self):
83
+ r"""Check whether the model configuration contains any
84
+ unexpected arguments to Novita API.
85
+
86
+ Raises:
87
+ ValueError: If the model configuration dictionary contains any
88
+ unexpected arguments to Novita API.
89
+ """
90
+ for param in self.model_config_dict:
91
+ if param not in NOVITA_API_PARAMS:
92
+ raise ValueError(
93
+ f"Unexpected argument `{param}` is "
94
+ "input into Novita model backend."
95
+ )
@@ -13,11 +13,19 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
 
15
15
  import os
16
- from typing import Any, Dict, Optional, Union
16
+ import time
17
+ from typing import Any, Dict, List, Optional, Union
18
+
19
+ from openai import AsyncStream, Stream
17
20
 
18
21
  from camel.configs import QWEN_API_PARAMS, QwenConfig
22
+ from camel.messages import OpenAIMessage
19
23
  from camel.models.openai_compatible_model import OpenAICompatibleModel
20
- from camel.types import ModelType
24
+ from camel.types import (
25
+ ChatCompletion,
26
+ ChatCompletionChunk,
27
+ ModelType,
28
+ )
21
29
  from camel.utils import (
22
30
  BaseTokenCounter,
23
31
  api_keys_required,
@@ -79,6 +87,171 @@ class QwenModel(OpenAICompatibleModel):
79
87
  timeout=timeout,
80
88
  )
81
89
 
90
+ def _post_handle_response(
91
+ self, response: Union[ChatCompletion, Stream[ChatCompletionChunk]]
92
+ ) -> ChatCompletion:
93
+ r"""Handle reasoning content with <think> tags at the beginning."""
94
+ if not isinstance(response, Stream):
95
+ # Handle non-streaming response (existing logic)
96
+ if self.model_config_dict.get("extra_body", {}).get(
97
+ "enable_thinking", False
98
+ ):
99
+ reasoning_content = response.choices[
100
+ 0
101
+ ].message.reasoning_content # type: ignore[attr-defined]
102
+ combined_content = (
103
+ f"<think>\n{reasoning_content}\n</think>\n"
104
+ if reasoning_content
105
+ else ""
106
+ )
107
+ response_content = response.choices[0].message.content or ""
108
+ combined_content += response_content
109
+
110
+ # Construct a new ChatCompletion with combined content
111
+ return ChatCompletion.construct(
112
+ id=response.id,
113
+ choices=[
114
+ dict(
115
+ finish_reason=response.choices[0].finish_reason,
116
+ index=response.choices[0].index,
117
+ logprobs=response.choices[0].logprobs,
118
+ message=dict(
119
+ role=response.choices[0].message.role,
120
+ content=combined_content,
121
+ ),
122
+ )
123
+ ],
124
+ created=response.created,
125
+ model=response.model,
126
+ object="chat.completion",
127
+ system_fingerprint=response.system_fingerprint,
128
+ usage=response.usage,
129
+ )
130
+ else:
131
+ return response # Return original if no thinking enabled
132
+
133
+ # Handle streaming response
134
+ accumulated_reasoning = ""
135
+ accumulated_content = ""
136
+ final_chunk = None
137
+ usage_data = None # Initialize usage data
138
+ role = "assistant" # Default role
139
+
140
+ for chunk in response:
141
+ final_chunk = chunk # Keep track of the last chunk for metadata
142
+ if chunk.choices:
143
+ delta = chunk.choices[0].delta
144
+ if delta.role:
145
+ role = delta.role # Update role if provided
146
+ if (
147
+ hasattr(delta, 'reasoning_content')
148
+ and delta.reasoning_content
149
+ ):
150
+ accumulated_reasoning += delta.reasoning_content
151
+ if delta.content:
152
+ accumulated_content += delta.content
153
+
154
+ if hasattr(chunk, 'usage') and chunk.usage:
155
+ usage_data = chunk.usage
156
+
157
+ combined_content = (
158
+ f"<think>\n{accumulated_reasoning}\n</think>\n"
159
+ if accumulated_reasoning
160
+ else ""
161
+ ) + accumulated_content
162
+
163
+ # Construct the final ChatCompletion object from accumulated
164
+ # stream data
165
+ if final_chunk:
166
+ finish_reason = "stop" # Default finish reason
167
+ logprobs = None
168
+ if final_chunk.choices:
169
+ finish_reason = (
170
+ final_chunk.choices[0].finish_reason or finish_reason
171
+ )
172
+ if hasattr(final_chunk.choices[0], 'logprobs'):
173
+ logprobs = final_chunk.choices[0].logprobs
174
+
175
+ return ChatCompletion.construct(
176
+ # Use data from the final chunk or defaults
177
+ id=final_chunk.id
178
+ if hasattr(final_chunk, 'id')
179
+ else "streamed-completion",
180
+ choices=[
181
+ dict(
182
+ finish_reason=finish_reason,
183
+ index=0,
184
+ logprobs=logprobs,
185
+ message=dict(
186
+ role=role,
187
+ content=combined_content,
188
+ ),
189
+ )
190
+ ],
191
+ created=final_chunk.created
192
+ if hasattr(final_chunk, 'created')
193
+ else int(time.time()),
194
+ model=final_chunk.model
195
+ if hasattr(final_chunk, 'model')
196
+ else self.model_type,
197
+ object="chat.completion",
198
+ system_fingerprint=final_chunk.system_fingerprint
199
+ if hasattr(final_chunk, 'system_fingerprint')
200
+ else None,
201
+ usage=usage_data,
202
+ )
203
+ else:
204
+ # Handle cases where the stream was empty or invalid
205
+ return ChatCompletion.construct(
206
+ id="empty-stream",
207
+ choices=[
208
+ dict(
209
+ finish_reason="error",
210
+ index=0,
211
+ message=dict(role="assistant", content=""),
212
+ )
213
+ ],
214
+ created=int(time.time()),
215
+ model=self.model_type,
216
+ object="chat.completion",
217
+ usage=usage_data,
218
+ )
219
+
220
+ def _request_chat_completion(
221
+ self,
222
+ messages: List[OpenAIMessage],
223
+ tools: Optional[List[Dict[str, Any]]] = None,
224
+ ) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
225
+ request_config = self.model_config_dict.copy()
226
+
227
+ if tools:
228
+ request_config["tools"] = tools
229
+
230
+ return self._post_handle_response(
231
+ self._client.chat.completions.create(
232
+ messages=messages,
233
+ model=self.model_type,
234
+ **request_config,
235
+ )
236
+ )
237
+
238
+ async def _arequest_chat_completion(
239
+ self,
240
+ messages: List[OpenAIMessage],
241
+ tools: Optional[List[Dict[str, Any]]] = None,
242
+ ) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
243
+ request_config = self.model_config_dict.copy()
244
+
245
+ if tools:
246
+ request_config["tools"] = tools
247
+
248
+ response = await self._async_client.chat.completions.create(
249
+ messages=messages,
250
+ model=self.model_type,
251
+ **request_config,
252
+ )
253
+ return self._post_handle_response(response)
254
+
82
255
  def check_model_config(self):
83
256
  r"""Check whether the model configuration contains any
84
257
  unexpected arguments to Qwen API.