camel-ai 0.2.49__py3-none-any.whl → 0.2.51__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +159 -15
- camel/configs/__init__.py +6 -0
- camel/configs/modelscope_config.py +4 -1
- camel/configs/novita_config.py +102 -0
- camel/configs/qwen_config.py +0 -7
- camel/configs/watsonx_config.py +96 -0
- camel/environments/single_step.py +79 -11
- camel/models/__init__.py +4 -0
- camel/models/azure_openai_model.py +27 -9
- camel/models/model_factory.py +29 -6
- camel/models/modelscope_model.py +175 -2
- camel/models/novita_model.py +95 -0
- camel/models/ollama_model.py +15 -10
- camel/models/qwen_model.py +175 -2
- camel/models/vllm_model.py +15 -9
- camel/models/watsonx_model.py +253 -0
- camel/societies/workforce/prompts.py +31 -4
- camel/societies/workforce/workforce.py +1 -1
- camel/toolkits/browser_toolkit.py +53 -55
- camel/types/enums.py +226 -1
- camel/types/unified_model_type.py +10 -0
- camel/utils/__init__.py +2 -0
- camel/utils/filename.py +80 -0
- camel/verifiers/__init__.py +2 -0
- camel/verifiers/physics_verifier.py +881 -0
- camel/verifiers/python_verifier.py +16 -31
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.51.dist-info}/METADATA +4 -1
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.51.dist-info}/RECORD +31 -25
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.51.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.51.dist-info}/licenses/LICENSE +0 -0
|
@@ -12,7 +12,7 @@
|
|
|
12
12
|
# limitations under the License.
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
import os
|
|
15
|
-
from typing import Any, Dict, List, Optional, Type, Union
|
|
15
|
+
from typing import Any, Callable, Dict, List, Optional, Type, Union
|
|
16
16
|
|
|
17
17
|
from openai import AsyncAzureOpenAI, AsyncStream, AzureOpenAI, Stream
|
|
18
18
|
from pydantic import BaseModel
|
|
@@ -27,6 +27,8 @@ from camel.types import (
|
|
|
27
27
|
)
|
|
28
28
|
from camel.utils import BaseTokenCounter, OpenAITokenCounter
|
|
29
29
|
|
|
30
|
+
AzureADTokenProvider = Callable[[], str]
|
|
31
|
+
|
|
30
32
|
|
|
31
33
|
class AzureOpenAIModel(BaseModelBackend):
|
|
32
34
|
r"""Azure OpenAI API in a unified BaseModelBackend interface.
|
|
@@ -46,6 +48,12 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
46
48
|
(default: :obj:`None`)
|
|
47
49
|
azure_deployment_name (Optional[str], optional): The deployment name
|
|
48
50
|
you chose when you deployed an azure model. (default: :obj:`None`)
|
|
51
|
+
azure_ad_token (Optional[str], optional): Your Azure Active Directory
|
|
52
|
+
token, https://www.microsoft.com/en-us/security/business/
|
|
53
|
+
identity-access/microsoft-entra-id. (default: :obj:`None`)
|
|
54
|
+
azure_ad_token_provider (Optional[AzureADTokenProvider], optional): A
|
|
55
|
+
function that returns an Azure Active Directory token, will be
|
|
56
|
+
invoked on every request. (default: :obj:`None`)
|
|
49
57
|
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
50
58
|
use for the model. If not provided, :obj:`OpenAITokenCounter`
|
|
51
59
|
will be used. (default: :obj:`None`)
|
|
@@ -68,6 +76,8 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
68
76
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
69
77
|
api_version: Optional[str] = None,
|
|
70
78
|
azure_deployment_name: Optional[str] = None,
|
|
79
|
+
azure_ad_token_provider: Optional["AzureADTokenProvider"] = None,
|
|
80
|
+
azure_ad_token: Optional[str] = None,
|
|
71
81
|
) -> None:
|
|
72
82
|
if model_config_dict is None:
|
|
73
83
|
model_config_dict = ChatGPTConfig().as_dict()
|
|
@@ -79,15 +89,19 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
79
89
|
)
|
|
80
90
|
|
|
81
91
|
self.api_version = api_version or os.environ.get("AZURE_API_VERSION")
|
|
82
|
-
self.
|
|
92
|
+
self._azure_deployment_name = azure_deployment_name or os.environ.get(
|
|
83
93
|
"AZURE_DEPLOYMENT_NAME"
|
|
84
94
|
)
|
|
95
|
+
self._azure_ad_token = azure_ad_token or os.environ.get(
|
|
96
|
+
"AZURE_AD_TOKEN"
|
|
97
|
+
)
|
|
98
|
+
self.azure_ad_token_provider = azure_ad_token_provider
|
|
85
99
|
if self.api_version is None:
|
|
86
100
|
raise ValueError(
|
|
87
101
|
"Must provide either the `api_version` argument "
|
|
88
102
|
"or `AZURE_API_VERSION` environment variable."
|
|
89
103
|
)
|
|
90
|
-
if self.
|
|
104
|
+
if self._azure_deployment_name is None:
|
|
91
105
|
raise ValueError(
|
|
92
106
|
"Must provide either the `azure_deployment_name` argument "
|
|
93
107
|
"or `AZURE_DEPLOYMENT_NAME` environment variable."
|
|
@@ -95,18 +109,22 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
95
109
|
|
|
96
110
|
self._client = AzureOpenAI(
|
|
97
111
|
azure_endpoint=str(self._url),
|
|
98
|
-
azure_deployment=self.
|
|
112
|
+
azure_deployment=self._azure_deployment_name,
|
|
99
113
|
api_version=self.api_version,
|
|
100
114
|
api_key=self._api_key,
|
|
115
|
+
azure_ad_token=self._azure_ad_token,
|
|
116
|
+
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
101
117
|
timeout=self._timeout,
|
|
102
118
|
max_retries=3,
|
|
103
119
|
)
|
|
104
120
|
|
|
105
121
|
self._async_client = AsyncAzureOpenAI(
|
|
106
122
|
azure_endpoint=str(self._url),
|
|
107
|
-
azure_deployment=self.
|
|
123
|
+
azure_deployment=self._azure_deployment_name,
|
|
108
124
|
api_version=self.api_version,
|
|
109
125
|
api_key=self._api_key,
|
|
126
|
+
azure_ad_token=self._azure_ad_token,
|
|
127
|
+
azure_ad_token_provider=self.azure_ad_token_provider,
|
|
110
128
|
timeout=self._timeout,
|
|
111
129
|
max_retries=3,
|
|
112
130
|
)
|
|
@@ -193,7 +211,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
193
211
|
|
|
194
212
|
return self._client.chat.completions.create(
|
|
195
213
|
messages=messages,
|
|
196
|
-
model=self.
|
|
214
|
+
model=self._azure_deployment_name, # type:ignore[arg-type]
|
|
197
215
|
**request_config,
|
|
198
216
|
)
|
|
199
217
|
|
|
@@ -209,7 +227,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
209
227
|
|
|
210
228
|
return await self._async_client.chat.completions.create(
|
|
211
229
|
messages=messages,
|
|
212
|
-
model=self.
|
|
230
|
+
model=self._azure_deployment_name, # type:ignore[arg-type]
|
|
213
231
|
**request_config,
|
|
214
232
|
)
|
|
215
233
|
|
|
@@ -232,7 +250,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
232
250
|
|
|
233
251
|
return self._client.beta.chat.completions.parse(
|
|
234
252
|
messages=messages,
|
|
235
|
-
model=self.
|
|
253
|
+
model=self._azure_deployment_name, # type:ignore[arg-type]
|
|
236
254
|
**request_config,
|
|
237
255
|
)
|
|
238
256
|
|
|
@@ -255,7 +273,7 @@ class AzureOpenAIModel(BaseModelBackend):
|
|
|
255
273
|
|
|
256
274
|
return await self._async_client.beta.chat.completions.parse(
|
|
257
275
|
messages=messages,
|
|
258
|
-
model=self.
|
|
276
|
+
model=self._azure_deployment_name, # type:ignore[arg-type]
|
|
259
277
|
**request_config,
|
|
260
278
|
)
|
|
261
279
|
|
camel/models/model_factory.py
CHANGED
|
@@ -32,6 +32,7 @@ from camel.models.mistral_model import MistralModel
|
|
|
32
32
|
from camel.models.modelscope_model import ModelScopeModel
|
|
33
33
|
from camel.models.moonshot_model import MoonshotModel
|
|
34
34
|
from camel.models.netmind_model import NetmindModel
|
|
35
|
+
from camel.models.novita_model import NovitaModel
|
|
35
36
|
from camel.models.nvidia_model import NvidiaModel
|
|
36
37
|
from camel.models.ollama_model import OllamaModel
|
|
37
38
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
@@ -47,6 +48,7 @@ from camel.models.stub_model import StubModel
|
|
|
47
48
|
from camel.models.togetherai_model import TogetherAIModel
|
|
48
49
|
from camel.models.vllm_model import VLLMModel
|
|
49
50
|
from camel.models.volcano_model import VolcanoModel
|
|
51
|
+
from camel.models.watsonx_model import WatsonXModel
|
|
50
52
|
from camel.models.yi_model import YiModel
|
|
51
53
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
52
54
|
from camel.types import ModelPlatformType, ModelType, UnifiedModelType
|
|
@@ -62,8 +64,8 @@ class ModelFactory:
|
|
|
62
64
|
|
|
63
65
|
@staticmethod
|
|
64
66
|
def create(
|
|
65
|
-
model_platform: ModelPlatformType,
|
|
66
|
-
model_type: Union[ModelType, str],
|
|
67
|
+
model_platform: Union[ModelPlatformType, str],
|
|
68
|
+
model_type: Union[ModelType, str, UnifiedModelType],
|
|
67
69
|
model_config_dict: Optional[Dict] = None,
|
|
68
70
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
69
71
|
api_key: Optional[str] = None,
|
|
@@ -73,10 +75,12 @@ class ModelFactory:
|
|
|
73
75
|
r"""Creates an instance of `BaseModelBackend` of the specified type.
|
|
74
76
|
|
|
75
77
|
Args:
|
|
76
|
-
model_platform (ModelPlatformType): Platform from
|
|
77
|
-
originates.
|
|
78
|
-
|
|
79
|
-
|
|
78
|
+
model_platform (Union[ModelPlatformType, str]): Platform from
|
|
79
|
+
which the model originates. Can be a string or
|
|
80
|
+
ModelPlatformType enum.
|
|
81
|
+
model_type (Union[ModelType, str, UnifiedModelType]): Model for
|
|
82
|
+
which a backend is created. Can be a string, ModelType enum, or
|
|
83
|
+
UnifiedModelType.
|
|
80
84
|
model_config_dict (Optional[Dict]): A dictionary that will be fed
|
|
81
85
|
into the backend constructor. (default: :obj:`None`)
|
|
82
86
|
token_counter (Optional[BaseTokenCounter], optional): Token
|
|
@@ -97,6 +101,21 @@ class ModelFactory:
|
|
|
97
101
|
Raises:
|
|
98
102
|
ValueError: If there is no backend for the model.
|
|
99
103
|
"""
|
|
104
|
+
# Convert string to ModelPlatformType enum if needed
|
|
105
|
+
if isinstance(model_platform, str):
|
|
106
|
+
try:
|
|
107
|
+
model_platform = ModelPlatformType(model_platform)
|
|
108
|
+
except ValueError:
|
|
109
|
+
raise ValueError(f"Unknown model platform: {model_platform}")
|
|
110
|
+
|
|
111
|
+
# Convert string to ModelType enum or UnifiedModelType if needed
|
|
112
|
+
if isinstance(model_type, str):
|
|
113
|
+
try:
|
|
114
|
+
model_type = ModelType(model_type)
|
|
115
|
+
except ValueError:
|
|
116
|
+
# If not in ModelType, create a UnifiedModelType
|
|
117
|
+
model_type = UnifiedModelType(model_type)
|
|
118
|
+
|
|
100
119
|
model_class: Optional[Type[BaseModelBackend]] = None
|
|
101
120
|
model_type = UnifiedModelType(model_type)
|
|
102
121
|
|
|
@@ -163,8 +182,12 @@ class ModelFactory:
|
|
|
163
182
|
model_class = MoonshotModel
|
|
164
183
|
elif model_platform.is_modelscope:
|
|
165
184
|
model_class = ModelScopeModel
|
|
185
|
+
elif model_platform.is_novita:
|
|
186
|
+
model_class = NovitaModel
|
|
166
187
|
elif model_type == ModelType.STUB:
|
|
167
188
|
model_class = StubModel
|
|
189
|
+
elif model_type.is_watsonx:
|
|
190
|
+
model_class = WatsonXModel
|
|
168
191
|
|
|
169
192
|
if model_class is None:
|
|
170
193
|
raise ValueError(
|
camel/models/modelscope_model.py
CHANGED
|
@@ -13,11 +13,19 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
|
|
15
15
|
import os
|
|
16
|
-
|
|
16
|
+
import time
|
|
17
|
+
from typing import Any, Dict, List, Optional, Union
|
|
18
|
+
|
|
19
|
+
from openai import AsyncStream, Stream
|
|
17
20
|
|
|
18
21
|
from camel.configs import MODELSCOPE_API_PARAMS, ModelScopeConfig
|
|
22
|
+
from camel.messages import OpenAIMessage
|
|
19
23
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
|
-
from camel.types import
|
|
24
|
+
from camel.types import (
|
|
25
|
+
ChatCompletion,
|
|
26
|
+
ChatCompletionChunk,
|
|
27
|
+
ModelType,
|
|
28
|
+
)
|
|
21
29
|
from camel.utils import (
|
|
22
30
|
BaseTokenCounter,
|
|
23
31
|
api_keys_required,
|
|
@@ -81,6 +89,171 @@ class ModelScopeModel(OpenAICompatibleModel):
|
|
|
81
89
|
timeout=timeout,
|
|
82
90
|
)
|
|
83
91
|
|
|
92
|
+
def _post_handle_response(
|
|
93
|
+
self, response: Union[ChatCompletion, Stream[ChatCompletionChunk]]
|
|
94
|
+
) -> ChatCompletion:
|
|
95
|
+
r"""Handle reasoning content with <think> tags at the beginning."""
|
|
96
|
+
if not isinstance(response, Stream):
|
|
97
|
+
# Handle non-streaming response (existing logic)
|
|
98
|
+
if self.model_config_dict.get("extra_body", {}).get(
|
|
99
|
+
"enable_thinking", False
|
|
100
|
+
):
|
|
101
|
+
reasoning_content = response.choices[
|
|
102
|
+
0
|
|
103
|
+
].message.reasoning_content # type: ignore[attr-defined]
|
|
104
|
+
combined_content = (
|
|
105
|
+
f"<think>\n{reasoning_content}\n</think>\n"
|
|
106
|
+
if reasoning_content
|
|
107
|
+
else ""
|
|
108
|
+
)
|
|
109
|
+
response_content = response.choices[0].message.content or ""
|
|
110
|
+
combined_content += response_content
|
|
111
|
+
|
|
112
|
+
# Construct a new ChatCompletion with combined content
|
|
113
|
+
return ChatCompletion.construct(
|
|
114
|
+
id=response.id,
|
|
115
|
+
choices=[
|
|
116
|
+
dict(
|
|
117
|
+
finish_reason=response.choices[0].finish_reason,
|
|
118
|
+
index=response.choices[0].index,
|
|
119
|
+
logprobs=response.choices[0].logprobs,
|
|
120
|
+
message=dict(
|
|
121
|
+
role=response.choices[0].message.role,
|
|
122
|
+
content=combined_content,
|
|
123
|
+
),
|
|
124
|
+
)
|
|
125
|
+
],
|
|
126
|
+
created=response.created,
|
|
127
|
+
model=response.model,
|
|
128
|
+
object="chat.completion",
|
|
129
|
+
system_fingerprint=response.system_fingerprint,
|
|
130
|
+
usage=response.usage,
|
|
131
|
+
)
|
|
132
|
+
else:
|
|
133
|
+
return response # Return original if no thinking enabled
|
|
134
|
+
|
|
135
|
+
# Handle streaming response
|
|
136
|
+
accumulated_reasoning = ""
|
|
137
|
+
accumulated_content = ""
|
|
138
|
+
final_chunk = None
|
|
139
|
+
usage_data = None # Initialize usage data
|
|
140
|
+
role = "assistant" # Default role
|
|
141
|
+
|
|
142
|
+
for chunk in response:
|
|
143
|
+
final_chunk = chunk # Keep track of the last chunk for metadata
|
|
144
|
+
if chunk.choices:
|
|
145
|
+
delta = chunk.choices[0].delta
|
|
146
|
+
if delta.role:
|
|
147
|
+
role = delta.role # Update role if provided
|
|
148
|
+
if (
|
|
149
|
+
hasattr(delta, 'reasoning_content')
|
|
150
|
+
and delta.reasoning_content
|
|
151
|
+
):
|
|
152
|
+
accumulated_reasoning += delta.reasoning_content
|
|
153
|
+
if delta.content:
|
|
154
|
+
accumulated_content += delta.content
|
|
155
|
+
|
|
156
|
+
if hasattr(chunk, 'usage') and chunk.usage:
|
|
157
|
+
usage_data = chunk.usage
|
|
158
|
+
|
|
159
|
+
combined_content = (
|
|
160
|
+
f"<think>\n{accumulated_reasoning}\n</think>\n"
|
|
161
|
+
if accumulated_reasoning
|
|
162
|
+
else ""
|
|
163
|
+
) + accumulated_content
|
|
164
|
+
|
|
165
|
+
# Construct the final ChatCompletion object from accumulated
|
|
166
|
+
# stream data
|
|
167
|
+
if final_chunk:
|
|
168
|
+
finish_reason = "stop" # Default finish reason
|
|
169
|
+
logprobs = None
|
|
170
|
+
if final_chunk.choices:
|
|
171
|
+
finish_reason = (
|
|
172
|
+
final_chunk.choices[0].finish_reason or finish_reason
|
|
173
|
+
)
|
|
174
|
+
if hasattr(final_chunk.choices[0], 'logprobs'):
|
|
175
|
+
logprobs = final_chunk.choices[0].logprobs
|
|
176
|
+
|
|
177
|
+
return ChatCompletion.construct(
|
|
178
|
+
# Use data from the final chunk or defaults
|
|
179
|
+
id=final_chunk.id
|
|
180
|
+
if hasattr(final_chunk, 'id')
|
|
181
|
+
else "streamed-completion",
|
|
182
|
+
choices=[
|
|
183
|
+
dict(
|
|
184
|
+
finish_reason=finish_reason,
|
|
185
|
+
index=0,
|
|
186
|
+
logprobs=logprobs,
|
|
187
|
+
message=dict(
|
|
188
|
+
role=role,
|
|
189
|
+
content=combined_content,
|
|
190
|
+
),
|
|
191
|
+
)
|
|
192
|
+
],
|
|
193
|
+
created=final_chunk.created
|
|
194
|
+
if hasattr(final_chunk, 'created')
|
|
195
|
+
else int(time.time()),
|
|
196
|
+
model=final_chunk.model
|
|
197
|
+
if hasattr(final_chunk, 'model')
|
|
198
|
+
else self.model_type,
|
|
199
|
+
object="chat.completion",
|
|
200
|
+
system_fingerprint=final_chunk.system_fingerprint
|
|
201
|
+
if hasattr(final_chunk, 'system_fingerprint')
|
|
202
|
+
else None,
|
|
203
|
+
usage=usage_data,
|
|
204
|
+
)
|
|
205
|
+
else:
|
|
206
|
+
# Handle cases where the stream was empty or invalid
|
|
207
|
+
return ChatCompletion.construct(
|
|
208
|
+
id="empty-stream",
|
|
209
|
+
choices=[
|
|
210
|
+
dict(
|
|
211
|
+
finish_reason="error",
|
|
212
|
+
index=0,
|
|
213
|
+
message=dict(role="assistant", content=""),
|
|
214
|
+
)
|
|
215
|
+
],
|
|
216
|
+
created=int(time.time()),
|
|
217
|
+
model=self.model_type,
|
|
218
|
+
object="chat.completion",
|
|
219
|
+
usage=usage_data,
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
def _request_chat_completion(
|
|
223
|
+
self,
|
|
224
|
+
messages: List[OpenAIMessage],
|
|
225
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
226
|
+
) -> Union[ChatCompletion, Stream[ChatCompletionChunk]]:
|
|
227
|
+
request_config = self.model_config_dict.copy()
|
|
228
|
+
|
|
229
|
+
if tools:
|
|
230
|
+
request_config["tools"] = tools
|
|
231
|
+
|
|
232
|
+
return self._post_handle_response(
|
|
233
|
+
self._client.chat.completions.create(
|
|
234
|
+
messages=messages,
|
|
235
|
+
model=self.model_type,
|
|
236
|
+
**request_config,
|
|
237
|
+
)
|
|
238
|
+
)
|
|
239
|
+
|
|
240
|
+
async def _arequest_chat_completion(
|
|
241
|
+
self,
|
|
242
|
+
messages: List[OpenAIMessage],
|
|
243
|
+
tools: Optional[List[Dict[str, Any]]] = None,
|
|
244
|
+
) -> Union[ChatCompletion, AsyncStream[ChatCompletionChunk]]:
|
|
245
|
+
request_config = self.model_config_dict.copy()
|
|
246
|
+
|
|
247
|
+
if tools:
|
|
248
|
+
request_config["tools"] = tools
|
|
249
|
+
|
|
250
|
+
response = await self._async_client.chat.completions.create(
|
|
251
|
+
messages=messages,
|
|
252
|
+
model=self.model_type,
|
|
253
|
+
**request_config,
|
|
254
|
+
)
|
|
255
|
+
return self._post_handle_response(response)
|
|
256
|
+
|
|
84
257
|
def check_model_config(self):
|
|
85
258
|
r"""Check whether the model configuration contains any
|
|
86
259
|
unexpected arguments to ModelScope API.
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
|
|
15
|
+
import os
|
|
16
|
+
from typing import Any, Dict, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs import NOVITA_API_PARAMS, NovitaConfig
|
|
19
|
+
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
|
+
from camel.types import ModelType
|
|
21
|
+
from camel.utils import (
|
|
22
|
+
BaseTokenCounter,
|
|
23
|
+
api_keys_required,
|
|
24
|
+
)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class NovitaModel(OpenAICompatibleModel):
|
|
28
|
+
r"""Constructor for Novita backend with OpenAI compatibility.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
model_type (Union[ModelType, str]): Model for which a backend is
|
|
32
|
+
created, supported model can be found here:
|
|
33
|
+
https://novita.ai/models?utm_source=github_owl&utm_campaign=github_link
|
|
34
|
+
model_config_dict (Optional[Dict[str, Any]], optional): A dictionary
|
|
35
|
+
that will be fed into:obj:`openai.ChatCompletion.create()`. If
|
|
36
|
+
:obj:`None`, :obj:`NovitaConfig().as_dict()` will be used.
|
|
37
|
+
(default: :obj:`None`)
|
|
38
|
+
api_key (Optional[str], optional): The API key for authenticating with
|
|
39
|
+
the Novita service. (default: :obj:`None`)
|
|
40
|
+
url (Optional[str], optional): The url to the Novita service.
|
|
41
|
+
If not provided, "https://api.novita.ai/v3/openai" will be used.
|
|
42
|
+
(default: :obj:`None`)
|
|
43
|
+
token_counter (Optional[BaseTokenCounter], optional): Token counter to
|
|
44
|
+
use for the model. If not provided, :obj:`OpenAITokenCounter(
|
|
45
|
+
ModelType.GPT_4O_MINI)` will be used.
|
|
46
|
+
timeout (Optional[float], optional): The timeout value in seconds for
|
|
47
|
+
API calls. If not provided, will fall back to the MODEL_TIMEOUT
|
|
48
|
+
environment variable or default to 180 seconds.
|
|
49
|
+
(default: :obj:`None`)
|
|
50
|
+
"""
|
|
51
|
+
|
|
52
|
+
@api_keys_required(
|
|
53
|
+
[
|
|
54
|
+
("api_key", 'NOVITA_API_KEY'),
|
|
55
|
+
]
|
|
56
|
+
)
|
|
57
|
+
def __init__(
|
|
58
|
+
self,
|
|
59
|
+
model_type: Union[ModelType, str],
|
|
60
|
+
model_config_dict: Optional[Dict[str, Any]] = None,
|
|
61
|
+
api_key: Optional[str] = None,
|
|
62
|
+
url: Optional[str] = None,
|
|
63
|
+
token_counter: Optional[BaseTokenCounter] = None,
|
|
64
|
+
timeout: Optional[float] = None,
|
|
65
|
+
) -> None:
|
|
66
|
+
if model_config_dict is None:
|
|
67
|
+
model_config_dict = NovitaConfig().as_dict()
|
|
68
|
+
api_key = api_key or os.environ.get("NOVITA_API_KEY")
|
|
69
|
+
url = url or os.environ.get(
|
|
70
|
+
"NOVITA_API_BASE_URL", "https://api.novita.ai/v3/openai"
|
|
71
|
+
)
|
|
72
|
+
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
73
|
+
super().__init__(
|
|
74
|
+
model_type=model_type,
|
|
75
|
+
model_config_dict=model_config_dict,
|
|
76
|
+
api_key=api_key,
|
|
77
|
+
url=url,
|
|
78
|
+
token_counter=token_counter,
|
|
79
|
+
timeout=timeout,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
def check_model_config(self):
|
|
83
|
+
r"""Check whether the model configuration contains any
|
|
84
|
+
unexpected arguments to Novita API.
|
|
85
|
+
|
|
86
|
+
Raises:
|
|
87
|
+
ValueError: If the model configuration dictionary contains any
|
|
88
|
+
unexpected arguments to Novita API.
|
|
89
|
+
"""
|
|
90
|
+
for param in self.model_config_dict:
|
|
91
|
+
if param not in NOVITA_API_PARAMS:
|
|
92
|
+
raise ValueError(
|
|
93
|
+
f"Unexpected argument `{param}` is "
|
|
94
|
+
"input into Novita model backend."
|
|
95
|
+
)
|
camel/models/ollama_model.py
CHANGED
|
@@ -16,10 +16,13 @@ import subprocess
|
|
|
16
16
|
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
18
|
from camel.configs import OLLAMA_API_PARAMS, OllamaConfig
|
|
19
|
+
from camel.logger import get_logger
|
|
19
20
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
20
21
|
from camel.types import ModelType
|
|
21
22
|
from camel.utils import BaseTokenCounter
|
|
22
23
|
|
|
24
|
+
logger = get_logger(__name__)
|
|
25
|
+
|
|
23
26
|
|
|
24
27
|
class OllamaModel(OpenAICompatibleModel):
|
|
25
28
|
r"""Ollama service interface.
|
|
@@ -60,20 +63,22 @@ class OllamaModel(OpenAICompatibleModel):
|
|
|
60
63
|
) -> None:
|
|
61
64
|
if model_config_dict is None:
|
|
62
65
|
model_config_dict = OllamaConfig().as_dict()
|
|
63
|
-
|
|
66
|
+
self._url = url or os.environ.get("OLLAMA_BASE_URL")
|
|
64
67
|
timeout = timeout or float(os.environ.get("MODEL_TIMEOUT", 180))
|
|
68
|
+
self._model_type = model_type
|
|
69
|
+
|
|
70
|
+
if not self._url:
|
|
71
|
+
self._start_server()
|
|
72
|
+
|
|
65
73
|
super().__init__(
|
|
66
|
-
model_type=
|
|
74
|
+
model_type=self._model_type,
|
|
67
75
|
model_config_dict=model_config_dict,
|
|
68
|
-
api_key=
|
|
69
|
-
url=
|
|
76
|
+
api_key="Not_Used",
|
|
77
|
+
url=self._url,
|
|
70
78
|
token_counter=token_counter,
|
|
71
79
|
timeout=timeout,
|
|
72
80
|
)
|
|
73
81
|
|
|
74
|
-
if not self._url:
|
|
75
|
-
self._start_server()
|
|
76
|
-
|
|
77
82
|
def _start_server(self) -> None:
|
|
78
83
|
r"""Starts the Ollama server in a subprocess."""
|
|
79
84
|
try:
|
|
@@ -83,12 +88,12 @@ class OllamaModel(OpenAICompatibleModel):
|
|
|
83
88
|
stderr=subprocess.PIPE,
|
|
84
89
|
)
|
|
85
90
|
self._url = "http://localhost:11434/v1"
|
|
86
|
-
|
|
91
|
+
logger.info(
|
|
87
92
|
f"Ollama server started on {self._url} "
|
|
88
|
-
f"for {self.
|
|
93
|
+
f"for {self._model_type} model."
|
|
89
94
|
)
|
|
90
95
|
except Exception as e:
|
|
91
|
-
|
|
96
|
+
logger.error(f"Failed to start Ollama server: {e}.")
|
|
92
97
|
|
|
93
98
|
def check_model_config(self):
|
|
94
99
|
r"""Check whether the model configuration contains any
|