camel-ai 0.2.49__py3-none-any.whl → 0.2.50__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +159 -15
- camel/configs/__init__.py +6 -0
- camel/configs/modelscope_config.py +4 -1
- camel/configs/novita_config.py +102 -0
- camel/configs/qwen_config.py +0 -7
- camel/configs/watsonx_config.py +96 -0
- camel/models/__init__.py +4 -0
- camel/models/model_factory.py +29 -6
- camel/models/modelscope_model.py +175 -2
- camel/models/novita_model.py +95 -0
- camel/models/qwen_model.py +175 -2
- camel/models/watsonx_model.py +253 -0
- camel/societies/workforce/prompts.py +31 -4
- camel/societies/workforce/workforce.py +1 -1
- camel/toolkits/browser_toolkit.py +53 -55
- camel/types/enums.py +223 -1
- camel/types/unified_model_type.py +10 -0
- camel/utils/__init__.py +2 -0
- camel/utils/filename.py +80 -0
- camel/verifiers/__init__.py +2 -0
- camel/verifiers/physics_verifier.py +881 -0
- camel/verifiers/python_verifier.py +16 -31
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.50.dist-info}/METADATA +4 -1
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.50.dist-info}/RECORD +27 -21
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.50.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.49.dist-info → camel_ai-0.2.50.dist-info}/licenses/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -28,6 +28,7 @@ from typing import (
|
|
|
28
28
|
List,
|
|
29
29
|
Optional,
|
|
30
30
|
Set,
|
|
31
|
+
Tuple,
|
|
31
32
|
Type,
|
|
32
33
|
Union,
|
|
33
34
|
)
|
|
@@ -78,7 +79,6 @@ from camel.utils import get_model_encoding
|
|
|
78
79
|
if TYPE_CHECKING:
|
|
79
80
|
from camel.terminators import ResponseTerminator
|
|
80
81
|
|
|
81
|
-
|
|
82
82
|
logger = logging.getLogger(__name__)
|
|
83
83
|
|
|
84
84
|
# AgentOps decorator setting
|
|
@@ -110,10 +110,17 @@ class ChatAgent(BaseAgent):
|
|
|
110
110
|
|
|
111
111
|
Args:
|
|
112
112
|
system_message (Union[BaseMessage, str], optional): The system message
|
|
113
|
-
for the chat agent.
|
|
114
|
-
model (BaseModelBackend,
|
|
115
|
-
|
|
116
|
-
|
|
113
|
+
for the chat agent. (default: :obj:`None`)
|
|
114
|
+
model (Union[BaseModelBackend, Tuple[str, str], str, ModelType,
|
|
115
|
+
Tuple[ModelPlatformType, ModelType], List[BaseModelBackend],
|
|
116
|
+
List[str], List[ModelType], List[Tuple[str, str]],
|
|
117
|
+
List[Tuple[ModelPlatformType, ModelType]]], optional):
|
|
118
|
+
The model backend(s) to use. Can be a single instance,
|
|
119
|
+
a specification (string, enum, tuple), or a list of instances
|
|
120
|
+
or specifications to be managed by `ModelManager`. If a list of
|
|
121
|
+
specifications (not `BaseModelBackend` instances) is provided,
|
|
122
|
+
they will be instantiated using `ModelFactory`. (default:
|
|
123
|
+
:obj:`ModelPlatformType.DEFAULT` with `ModelType.DEFAULT`)
|
|
117
124
|
memory (AgentMemory, optional): The agent memory for managing chat
|
|
118
125
|
messages. If `None`, a :obj:`ChatHistoryMemory` will be used.
|
|
119
126
|
(default: :obj:`None`)
|
|
@@ -150,7 +157,18 @@ class ChatAgent(BaseAgent):
|
|
|
150
157
|
self,
|
|
151
158
|
system_message: Optional[Union[BaseMessage, str]] = None,
|
|
152
159
|
model: Optional[
|
|
153
|
-
Union[
|
|
160
|
+
Union[
|
|
161
|
+
BaseModelBackend,
|
|
162
|
+
Tuple[str, str],
|
|
163
|
+
str,
|
|
164
|
+
ModelType,
|
|
165
|
+
Tuple[ModelPlatformType, ModelType],
|
|
166
|
+
List[BaseModelBackend],
|
|
167
|
+
List[str],
|
|
168
|
+
List[ModelType],
|
|
169
|
+
List[Tuple[str, str]],
|
|
170
|
+
List[Tuple[ModelPlatformType, ModelType]],
|
|
171
|
+
]
|
|
154
172
|
] = None,
|
|
155
173
|
memory: Optional[AgentMemory] = None,
|
|
156
174
|
message_window_size: Optional[int] = None,
|
|
@@ -165,19 +183,14 @@ class ChatAgent(BaseAgent):
|
|
|
165
183
|
single_iteration: bool = False,
|
|
166
184
|
agent_id: Optional[str] = None,
|
|
167
185
|
) -> None:
|
|
168
|
-
#
|
|
186
|
+
# Resolve model backends and set up model manager
|
|
187
|
+
resolved_models = self._resolve_models(model)
|
|
169
188
|
self.model_backend = ModelManager(
|
|
170
|
-
|
|
171
|
-
model
|
|
172
|
-
if model is not None
|
|
173
|
-
else ModelFactory.create(
|
|
174
|
-
model_platform=ModelPlatformType.DEFAULT,
|
|
175
|
-
model_type=ModelType.DEFAULT,
|
|
176
|
-
)
|
|
177
|
-
),
|
|
189
|
+
resolved_models,
|
|
178
190
|
scheduling_strategy=scheduling_strategy,
|
|
179
191
|
)
|
|
180
192
|
self.model_type = self.model_backend.model_type
|
|
193
|
+
|
|
181
194
|
# Assign unique ID
|
|
182
195
|
self.agent_id = agent_id if agent_id else str(uuid.uuid4())
|
|
183
196
|
|
|
@@ -247,6 +260,137 @@ class ChatAgent(BaseAgent):
|
|
|
247
260
|
for terminator in self.response_terminators:
|
|
248
261
|
terminator.reset()
|
|
249
262
|
|
|
263
|
+
def _resolve_models(
|
|
264
|
+
self,
|
|
265
|
+
model: Optional[
|
|
266
|
+
Union[
|
|
267
|
+
BaseModelBackend,
|
|
268
|
+
Tuple[str, str],
|
|
269
|
+
str,
|
|
270
|
+
ModelType,
|
|
271
|
+
Tuple[ModelPlatformType, ModelType],
|
|
272
|
+
List[BaseModelBackend],
|
|
273
|
+
List[str],
|
|
274
|
+
List[ModelType],
|
|
275
|
+
List[Tuple[str, str]],
|
|
276
|
+
List[Tuple[ModelPlatformType, ModelType]],
|
|
277
|
+
]
|
|
278
|
+
],
|
|
279
|
+
) -> Union[BaseModelBackend, List[BaseModelBackend]]:
|
|
280
|
+
r"""Resolves model specifications into model backend instances.
|
|
281
|
+
|
|
282
|
+
This method handles various input formats for model specifications and
|
|
283
|
+
returns the appropriate model backend(s).
|
|
284
|
+
|
|
285
|
+
Args:
|
|
286
|
+
model: Model specification in various formats including single
|
|
287
|
+
model, list of models, or model type specifications.
|
|
288
|
+
|
|
289
|
+
Returns:
|
|
290
|
+
Union[BaseModelBackend, List[BaseModelBackend]]: Resolved model
|
|
291
|
+
backend(s).
|
|
292
|
+
|
|
293
|
+
Raises:
|
|
294
|
+
TypeError: If the model specification format is not supported.
|
|
295
|
+
"""
|
|
296
|
+
if model is None:
|
|
297
|
+
# Default single model if none provided
|
|
298
|
+
return ModelFactory.create(
|
|
299
|
+
model_platform=ModelPlatformType.DEFAULT,
|
|
300
|
+
model_type=ModelType.DEFAULT,
|
|
301
|
+
)
|
|
302
|
+
elif isinstance(model, BaseModelBackend):
|
|
303
|
+
# Already a single pre-instantiated model
|
|
304
|
+
return model
|
|
305
|
+
elif isinstance(model, list):
|
|
306
|
+
return self._resolve_model_list(model)
|
|
307
|
+
elif isinstance(model, (ModelType, str)):
|
|
308
|
+
# Single string or ModelType -> use default platform
|
|
309
|
+
model_platform = ModelPlatformType.DEFAULT
|
|
310
|
+
model_type = model
|
|
311
|
+
logger.warning(
|
|
312
|
+
f"Model type '{model_type}' provided without a platform. "
|
|
313
|
+
f"Using platform '{model_platform}'. Note: platform "
|
|
314
|
+
"is not automatically inferred based on model type."
|
|
315
|
+
)
|
|
316
|
+
return ModelFactory.create(
|
|
317
|
+
model_platform=model_platform,
|
|
318
|
+
model_type=model_type,
|
|
319
|
+
)
|
|
320
|
+
elif isinstance(model, tuple) and len(model) == 2:
|
|
321
|
+
# Single tuple (platform, type)
|
|
322
|
+
model_platform, model_type = model # type: ignore[assignment]
|
|
323
|
+
return ModelFactory.create(
|
|
324
|
+
model_platform=model_platform,
|
|
325
|
+
model_type=model_type,
|
|
326
|
+
)
|
|
327
|
+
else:
|
|
328
|
+
raise TypeError(
|
|
329
|
+
f"Unsupported type for model parameter: {type(model)}"
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
def _resolve_model_list(
|
|
333
|
+
self, model_list: list
|
|
334
|
+
) -> Union[BaseModelBackend, List[BaseModelBackend]]:
|
|
335
|
+
r"""Resolves a list of model specifications into model backend
|
|
336
|
+
instances.
|
|
337
|
+
|
|
338
|
+
Args:
|
|
339
|
+
model_list (list): List of model specifications in various formats.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
Union[BaseModelBackend, List[BaseModelBackend]]: Resolved model
|
|
343
|
+
backend(s).
|
|
344
|
+
|
|
345
|
+
Raises:
|
|
346
|
+
TypeError: If the list elements format is not supported.
|
|
347
|
+
"""
|
|
348
|
+
if not model_list: # Handle empty list
|
|
349
|
+
logger.warning(
|
|
350
|
+
"Empty list provided for model, using default model."
|
|
351
|
+
)
|
|
352
|
+
return ModelFactory.create(
|
|
353
|
+
model_platform=ModelPlatformType.DEFAULT,
|
|
354
|
+
model_type=ModelType.DEFAULT,
|
|
355
|
+
)
|
|
356
|
+
elif isinstance(model_list[0], BaseModelBackend):
|
|
357
|
+
# List of pre-instantiated models
|
|
358
|
+
return model_list # type: ignore[return-value]
|
|
359
|
+
elif isinstance(model_list[0], (str, ModelType)):
|
|
360
|
+
# List of strings or ModelTypes -> use default platform
|
|
361
|
+
model_platform = ModelPlatformType.DEFAULT
|
|
362
|
+
logger.warning(
|
|
363
|
+
f"List of model types {model_list} provided without "
|
|
364
|
+
f"platforms. Using platform '{model_platform}' for all. "
|
|
365
|
+
"Note: platform is not automatically inferred based on "
|
|
366
|
+
"model type."
|
|
367
|
+
)
|
|
368
|
+
resolved_models_list = []
|
|
369
|
+
for model_type_item in model_list:
|
|
370
|
+
resolved_models_list.append(
|
|
371
|
+
ModelFactory.create(
|
|
372
|
+
model_platform=model_platform,
|
|
373
|
+
model_type=model_type_item, # type: ignore[arg-type]
|
|
374
|
+
)
|
|
375
|
+
)
|
|
376
|
+
return resolved_models_list
|
|
377
|
+
elif isinstance(model_list[0], tuple) and len(model_list[0]) == 2:
|
|
378
|
+
# List of tuples (platform, type)
|
|
379
|
+
resolved_models_list = []
|
|
380
|
+
for model_spec in model_list:
|
|
381
|
+
platform, type_ = model_spec[0], model_spec[1] # type: ignore[index]
|
|
382
|
+
resolved_models_list.append(
|
|
383
|
+
ModelFactory.create(
|
|
384
|
+
model_platform=platform, model_type=type_
|
|
385
|
+
)
|
|
386
|
+
)
|
|
387
|
+
return resolved_models_list
|
|
388
|
+
else:
|
|
389
|
+
raise TypeError(
|
|
390
|
+
"Unsupported type for list elements in model: "
|
|
391
|
+
f"{type(model_list[0])}"
|
|
392
|
+
)
|
|
393
|
+
|
|
250
394
|
@property
|
|
251
395
|
def system_message(self) -> Optional[BaseMessage]:
|
|
252
396
|
r"""Returns the system message for the agent."""
|
camel/configs/__init__.py
CHANGED
|
@@ -26,6 +26,7 @@ from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
|
26
26
|
from .modelscope_config import MODELSCOPE_API_PARAMS, ModelScopeConfig
|
|
27
27
|
from .moonshot_config import MOONSHOT_API_PARAMS, MoonshotConfig
|
|
28
28
|
from .netmind_config import NETMIND_API_PARAMS, NetmindConfig
|
|
29
|
+
from .novita_config import NOVITA_API_PARAMS, NovitaConfig
|
|
29
30
|
from .nvidia_config import NVIDIA_API_PARAMS, NvidiaConfig
|
|
30
31
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
31
32
|
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig
|
|
@@ -43,6 +44,7 @@ from .sglang_config import SGLANG_API_PARAMS, SGLangConfig
|
|
|
43
44
|
from .siliconflow_config import SILICONFLOW_API_PARAMS, SiliconFlowConfig
|
|
44
45
|
from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
|
|
45
46
|
from .vllm_config import VLLM_API_PARAMS, VLLMConfig
|
|
47
|
+
from .watsonx_config import WATSONX_API_PARAMS, WatsonXConfig
|
|
46
48
|
from .yi_config import YI_API_PARAMS, YiConfig
|
|
47
49
|
from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
|
|
48
50
|
|
|
@@ -98,6 +100,8 @@ __all__ = [
|
|
|
98
100
|
"MOONSHOT_API_PARAMS",
|
|
99
101
|
'ModelScopeConfig',
|
|
100
102
|
'MODELSCOPE_API_PARAMS',
|
|
103
|
+
'NovitaConfig',
|
|
104
|
+
'NOVITA_API_PARAMS',
|
|
101
105
|
'SiliconFlowConfig',
|
|
102
106
|
'SILICONFLOW_API_PARAMS',
|
|
103
107
|
'AIMLConfig',
|
|
@@ -106,4 +110,6 @@ __all__ = [
|
|
|
106
110
|
'OPENROUTER_API_PARAMS',
|
|
107
111
|
'LMSTUDIO_API_PARAMS',
|
|
108
112
|
'LMStudioConfig',
|
|
113
|
+
'WatsonXConfig',
|
|
114
|
+
'WATSONX_API_PARAMS',
|
|
109
115
|
]
|
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from typing import Optional, Union
|
|
16
|
+
from typing import Any, Dict, Optional, Union
|
|
17
17
|
|
|
18
18
|
from camel.configs.base_config import BaseConfig
|
|
19
19
|
|
|
@@ -45,6 +45,8 @@ class ModelScopeConfig(BaseConfig):
|
|
|
45
45
|
while higher values make it more diverse. (default: :obj:`0.3`)
|
|
46
46
|
stream (bool, optional): If True, enables streaming output.
|
|
47
47
|
(default: :obj:`None`)
|
|
48
|
+
extra_body (dict, optional): Extra body parameters to be passed to
|
|
49
|
+
the ModelScope API.
|
|
48
50
|
"""
|
|
49
51
|
|
|
50
52
|
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
@@ -52,6 +54,7 @@ class ModelScopeConfig(BaseConfig):
|
|
|
52
54
|
top_p: Optional[float] = None
|
|
53
55
|
temperature: Optional[float] = None
|
|
54
56
|
stream: Optional[bool] = None
|
|
57
|
+
extra_body: Optional[Dict[str, Any]] = None
|
|
55
58
|
|
|
56
59
|
|
|
57
60
|
MODELSCOPE_API_PARAMS = {
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Dict, Optional, Sequence, Type, Union
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel
|
|
19
|
+
|
|
20
|
+
from camel.configs.base_config import BaseConfig
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class NovitaConfig(BaseConfig):
|
|
24
|
+
r"""Defines the parameters for generating chat completions using the
|
|
25
|
+
OpenAI API.
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
temperature (float, optional): Sampling temperature to use, between
|
|
29
|
+
:obj:`0` and :obj:`2`. Higher values make the output more random,
|
|
30
|
+
while lower values make it more focused and deterministic.
|
|
31
|
+
(default: :obj:`None`)
|
|
32
|
+
top_p (float, optional): An alternative to sampling with temperature,
|
|
33
|
+
called nucleus sampling, where the model considers the results of
|
|
34
|
+
the tokens with top_p probability mass. So :obj:`0.1` means only
|
|
35
|
+
the tokens comprising the top 10% probability mass are considered.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
n (int, optional): How many chat completion choices to generate for
|
|
38
|
+
each input message. (default: :obj:`None`)
|
|
39
|
+
response_format (object, optional): An object specifying the format
|
|
40
|
+
that the model must output. Compatible with GPT-4 Turbo and all
|
|
41
|
+
GPT-3.5 Turbo models newer than gpt-3.5-turbo-1106. Setting to
|
|
42
|
+
{"type": "json_object"} enables JSON mode, which guarantees the
|
|
43
|
+
message the model generates is valid JSON. Important: when using
|
|
44
|
+
JSON mode, you must also instruct the model to produce JSON
|
|
45
|
+
yourself via a system or user message. Without this, the model
|
|
46
|
+
may generate an unending stream of whitespace until the generation
|
|
47
|
+
reaches the token limit, resulting in a long-running and seemingly
|
|
48
|
+
"stuck" request. Also note that the message content may be
|
|
49
|
+
partially cut off if finish_reason="length", which indicates the
|
|
50
|
+
generation exceeded max_tokens or the conversation exceeded the
|
|
51
|
+
max context length.
|
|
52
|
+
stream (bool, optional): If True, partial message deltas will be sent
|
|
53
|
+
as data-only server-sent events as they become available.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (str or list, optional): Up to :obj:`4` sequences where the API
|
|
56
|
+
will stop generating further tokens. (default: :obj:`None`)
|
|
57
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
58
|
+
in the chat completion. The total length of input tokens and
|
|
59
|
+
generated tokens is limited by the model's context length.
|
|
60
|
+
(default: :obj:`None`)
|
|
61
|
+
presence_penalty (float, optional): Number between :obj:`-2.0` and
|
|
62
|
+
:obj:`2.0`. Positive values penalize new tokens based on whether
|
|
63
|
+
they appear in the text so far, increasing the model's likelihood
|
|
64
|
+
to talk about new topics. See more information about frequency and
|
|
65
|
+
presence penalties. (default: :obj:`None`)
|
|
66
|
+
frequency_penalty (float, optional): Number between :obj:`-2.0` and
|
|
67
|
+
:obj:`2.0`. Positive values penalize new tokens based on their
|
|
68
|
+
existing frequency in the text so far, decreasing the model's
|
|
69
|
+
likelihood to repeat the same line verbatim. See more information
|
|
70
|
+
about frequency and presence penalties. (default: :obj:`None`)
|
|
71
|
+
logit_bias (dict, optional): Modify the likelihood of specified tokens
|
|
72
|
+
appearing in the completion. Accepts a json object that maps tokens
|
|
73
|
+
(specified by their token ID in the tokenizer) to an associated
|
|
74
|
+
bias value from :obj:`-100` to :obj:`100`. Mathematically, the bias
|
|
75
|
+
is added to the logits generated by the model prior to sampling.
|
|
76
|
+
The exact effect will vary per model, but values between:obj:` -1`
|
|
77
|
+
and :obj:`1` should decrease or increase likelihood of selection;
|
|
78
|
+
values like :obj:`-100` or :obj:`100` should result in a ban or
|
|
79
|
+
exclusive selection of the relevant token. (default: :obj:`None`)
|
|
80
|
+
user (str, optional): A unique identifier representing your end-user,
|
|
81
|
+
which can help OpenAI to monitor and detect abuse.
|
|
82
|
+
(default: :obj:`None`)
|
|
83
|
+
tools (list[FunctionTool], optional): A list of tools the model may
|
|
84
|
+
call. Currently, only functions are supported as a tool. Use this
|
|
85
|
+
to provide a list of functions the model may generate JSON inputs
|
|
86
|
+
for. A max of 128 functions are supported.
|
|
87
|
+
"""
|
|
88
|
+
|
|
89
|
+
temperature: Optional[float] = None
|
|
90
|
+
top_p: Optional[float] = None
|
|
91
|
+
n: Optional[int] = None
|
|
92
|
+
stream: Optional[bool] = None
|
|
93
|
+
stop: Optional[Union[str, Sequence[str]]] = None
|
|
94
|
+
max_tokens: Optional[int] = None
|
|
95
|
+
presence_penalty: Optional[float] = None
|
|
96
|
+
response_format: Optional[Union[Type[BaseModel], Dict]] = None
|
|
97
|
+
frequency_penalty: Optional[float] = None
|
|
98
|
+
logit_bias: Optional[Dict] = None
|
|
99
|
+
user: Optional[str] = None
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
NOVITA_API_PARAMS = {param for param in NovitaConfig.model_fields.keys()}
|
camel/configs/qwen_config.py
CHANGED
|
@@ -80,12 +80,5 @@ class QwenConfig(BaseConfig):
|
|
|
80
80
|
stop: Optional[Union[str, List]] = None
|
|
81
81
|
extra_body: Optional[Dict[str, Any]] = None
|
|
82
82
|
|
|
83
|
-
def __init__(self, include_usage: bool = True, **kwargs):
|
|
84
|
-
super().__init__(**kwargs)
|
|
85
|
-
# Only set stream_options when stream is True
|
|
86
|
-
# Otherwise, it will raise error when calling the API
|
|
87
|
-
if self.stream:
|
|
88
|
-
self.stream_options = {"include_usage": include_usage}
|
|
89
|
-
|
|
90
83
|
|
|
91
84
|
QWEN_API_PARAMS = {param for param in QwenConfig.model_fields.keys()}
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Dict, List, Literal, Optional
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class WatsonXConfig(BaseConfig):
|
|
22
|
+
r"""Defines the parameters for generating chat completions using the
|
|
23
|
+
IBM WatsonX API.
|
|
24
|
+
|
|
25
|
+
See: https://ibm.github.io/watsonx-ai-python-sdk/fm_schema.html
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
frequency_penalty (float, optional): Number between -2.0 and 2.0.
|
|
29
|
+
Positive values penalize new tokens based on their existing
|
|
30
|
+
frequency in the text so far, decreasing the model's likelihood
|
|
31
|
+
to repeat the same line verbatim.
|
|
32
|
+
(default: :obj:`None`)
|
|
33
|
+
logprobs (bool, optional): Whether to return log probabilities of the
|
|
34
|
+
output tokens or not. If true, returns the log probabilities of
|
|
35
|
+
each output token returned in the content of message.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
top_logprobs (int, optional): An integer between 0 and 20 specifying
|
|
38
|
+
the number of most likely tokens to return at each token position,
|
|
39
|
+
each with an associated log probability.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
presence_penalty (float, optional): Number between -2.0 and 2.0.
|
|
42
|
+
Positive values penalize new tokens based on whether they appear
|
|
43
|
+
in the text so far, increasing the model's likelihood to talk
|
|
44
|
+
about new topics.
|
|
45
|
+
(default: :obj:`None`)
|
|
46
|
+
temperature (float, optional): What sampling temperature to use,
|
|
47
|
+
between 0 and 2. Higher values like 0.8 will make the output
|
|
48
|
+
more random, while lower values like 0.2 will make it more focused
|
|
49
|
+
and deterministic. We generally recommend altering this or top_p
|
|
50
|
+
but not both.
|
|
51
|
+
(default: :obj:`None`)
|
|
52
|
+
max_tokens (int, optional): The maximum number of tokens to generate
|
|
53
|
+
in the chat completion. The total length of input tokens and
|
|
54
|
+
generated tokens is limited by the model's context length.
|
|
55
|
+
(default: :obj:`None`)
|
|
56
|
+
time_limit (int, optional): The maximum amount of time in seconds
|
|
57
|
+
that the API will spend generating a response.
|
|
58
|
+
(default: :obj:`None`)
|
|
59
|
+
top_p (float, optional): Controls the randomness of the generated
|
|
60
|
+
results. Lower values lead to less randomness, while higher
|
|
61
|
+
values increase randomness. (default: :obj:`None`)
|
|
62
|
+
n (int, optional): How many chat completion choices to generate for
|
|
63
|
+
each input message. Note that you will be charged based on the
|
|
64
|
+
total number of tokens generated.
|
|
65
|
+
(default: :obj:`None`)
|
|
66
|
+
logit_biaslogit_bias (Optional[dict], optional): Modify probability
|
|
67
|
+
of specific tokens appearing in the completion.
|
|
68
|
+
(default: :obj:`None`)
|
|
69
|
+
seed (int, optional): If specified, the system will make a best effort
|
|
70
|
+
to sample deterministically, such that repeated requests with the
|
|
71
|
+
same seed and parameters should return the same result.
|
|
72
|
+
(default: :obj:`None`)
|
|
73
|
+
stop (List[str], optional): Up to 4 sequences where the API will stop
|
|
74
|
+
generating further tokens.
|
|
75
|
+
(default: :obj:`None`)
|
|
76
|
+
tool_choice_options (Literal["none", "auto"], optional): The options
|
|
77
|
+
for the tool choice.
|
|
78
|
+
(default: :obj:`"auto"`)
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
frequency_penalty: Optional[float] = None
|
|
82
|
+
logprobs: Optional[bool] = None
|
|
83
|
+
top_logprobs: Optional[int] = None
|
|
84
|
+
presence_penalty: Optional[float] = None
|
|
85
|
+
temperature: Optional[float] = None
|
|
86
|
+
max_tokens: Optional[int] = None
|
|
87
|
+
time_limit: Optional[int] = None
|
|
88
|
+
top_p: Optional[float] = None
|
|
89
|
+
n: Optional[int] = None
|
|
90
|
+
logit_bias: Optional[Dict] = None
|
|
91
|
+
seed: Optional[int] = None
|
|
92
|
+
stop: Optional[List[str]] = None
|
|
93
|
+
tool_choice_options: Literal["none", "auto"] = "auto"
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
WATSONX_API_PARAMS = {param for param in WatsonXConfig.model_fields.keys()}
|
camel/models/__init__.py
CHANGED
|
@@ -32,6 +32,7 @@ from .modelscope_model import ModelScopeModel
|
|
|
32
32
|
from .moonshot_model import MoonshotModel
|
|
33
33
|
from .nemotron_model import NemotronModel
|
|
34
34
|
from .netmind_model import NetmindModel
|
|
35
|
+
from .novita_model import NovitaModel
|
|
35
36
|
from .nvidia_model import NvidiaModel
|
|
36
37
|
from .ollama_model import OllamaModel
|
|
37
38
|
from .openai_audio_models import OpenAIAudioModels
|
|
@@ -48,6 +49,7 @@ from .stub_model import StubModel
|
|
|
48
49
|
from .togetherai_model import TogetherAIModel
|
|
49
50
|
from .vllm_model import VLLMModel
|
|
50
51
|
from .volcano_model import VolcanoModel
|
|
52
|
+
from .watsonx_model import WatsonXModel
|
|
51
53
|
from .yi_model import YiModel
|
|
52
54
|
from .zhipuai_model import ZhipuAIModel
|
|
53
55
|
|
|
@@ -68,6 +70,7 @@ __all__ = [
|
|
|
68
70
|
'OpenAIAudioModels',
|
|
69
71
|
'NetmindModel',
|
|
70
72
|
'NemotronModel',
|
|
73
|
+
'NovitaModel',
|
|
71
74
|
'NvidiaModel',
|
|
72
75
|
'OllamaModel',
|
|
73
76
|
'VLLMModel',
|
|
@@ -92,4 +95,5 @@ __all__ = [
|
|
|
92
95
|
'SiliconFlowModel',
|
|
93
96
|
'VolcanoModel',
|
|
94
97
|
'LMStudioModel',
|
|
98
|
+
'WatsonXModel',
|
|
95
99
|
]
|
camel/models/model_factory.py
CHANGED
|
@@ -32,6 +32,7 @@ from camel.models.mistral_model import MistralModel
|
|
|
32
32
|
from camel.models.modelscope_model import ModelScopeModel
|
|
33
33
|
from camel.models.moonshot_model import MoonshotModel
|
|
34
34
|
from camel.models.netmind_model import NetmindModel
|
|
35
|
+
from camel.models.novita_model import NovitaModel
|
|
35
36
|
from camel.models.nvidia_model import NvidiaModel
|
|
36
37
|
from camel.models.ollama_model import OllamaModel
|
|
37
38
|
from camel.models.openai_compatible_model import OpenAICompatibleModel
|
|
@@ -47,6 +48,7 @@ from camel.models.stub_model import StubModel
|
|
|
47
48
|
from camel.models.togetherai_model import TogetherAIModel
|
|
48
49
|
from camel.models.vllm_model import VLLMModel
|
|
49
50
|
from camel.models.volcano_model import VolcanoModel
|
|
51
|
+
from camel.models.watsonx_model import WatsonXModel
|
|
50
52
|
from camel.models.yi_model import YiModel
|
|
51
53
|
from camel.models.zhipuai_model import ZhipuAIModel
|
|
52
54
|
from camel.types import ModelPlatformType, ModelType, UnifiedModelType
|
|
@@ -62,8 +64,8 @@ class ModelFactory:
|
|
|
62
64
|
|
|
63
65
|
@staticmethod
|
|
64
66
|
def create(
|
|
65
|
-
model_platform: ModelPlatformType,
|
|
66
|
-
model_type: Union[ModelType, str],
|
|
67
|
+
model_platform: Union[ModelPlatformType, str],
|
|
68
|
+
model_type: Union[ModelType, str, UnifiedModelType],
|
|
67
69
|
model_config_dict: Optional[Dict] = None,
|
|
68
70
|
token_counter: Optional[BaseTokenCounter] = None,
|
|
69
71
|
api_key: Optional[str] = None,
|
|
@@ -73,10 +75,12 @@ class ModelFactory:
|
|
|
73
75
|
r"""Creates an instance of `BaseModelBackend` of the specified type.
|
|
74
76
|
|
|
75
77
|
Args:
|
|
76
|
-
model_platform (ModelPlatformType): Platform from
|
|
77
|
-
originates.
|
|
78
|
-
|
|
79
|
-
|
|
78
|
+
model_platform (Union[ModelPlatformType, str]): Platform from
|
|
79
|
+
which the model originates. Can be a string or
|
|
80
|
+
ModelPlatformType enum.
|
|
81
|
+
model_type (Union[ModelType, str, UnifiedModelType]): Model for
|
|
82
|
+
which a backend is created. Can be a string, ModelType enum, or
|
|
83
|
+
UnifiedModelType.
|
|
80
84
|
model_config_dict (Optional[Dict]): A dictionary that will be fed
|
|
81
85
|
into the backend constructor. (default: :obj:`None`)
|
|
82
86
|
token_counter (Optional[BaseTokenCounter], optional): Token
|
|
@@ -97,6 +101,21 @@ class ModelFactory:
|
|
|
97
101
|
Raises:
|
|
98
102
|
ValueError: If there is no backend for the model.
|
|
99
103
|
"""
|
|
104
|
+
# Convert string to ModelPlatformType enum if needed
|
|
105
|
+
if isinstance(model_platform, str):
|
|
106
|
+
try:
|
|
107
|
+
model_platform = ModelPlatformType(model_platform)
|
|
108
|
+
except ValueError:
|
|
109
|
+
raise ValueError(f"Unknown model platform: {model_platform}")
|
|
110
|
+
|
|
111
|
+
# Convert string to ModelType enum or UnifiedModelType if needed
|
|
112
|
+
if isinstance(model_type, str):
|
|
113
|
+
try:
|
|
114
|
+
model_type = ModelType(model_type)
|
|
115
|
+
except ValueError:
|
|
116
|
+
# If not in ModelType, create a UnifiedModelType
|
|
117
|
+
model_type = UnifiedModelType(model_type)
|
|
118
|
+
|
|
100
119
|
model_class: Optional[Type[BaseModelBackend]] = None
|
|
101
120
|
model_type = UnifiedModelType(model_type)
|
|
102
121
|
|
|
@@ -163,8 +182,12 @@ class ModelFactory:
|
|
|
163
182
|
model_class = MoonshotModel
|
|
164
183
|
elif model_platform.is_modelscope:
|
|
165
184
|
model_class = ModelScopeModel
|
|
185
|
+
elif model_platform.is_novita:
|
|
186
|
+
model_class = NovitaModel
|
|
166
187
|
elif model_type == ModelType.STUB:
|
|
167
188
|
model_class = StubModel
|
|
189
|
+
elif model_type.is_watsonx:
|
|
190
|
+
model_class = WatsonXModel
|
|
168
191
|
|
|
169
192
|
if model_class is None:
|
|
170
193
|
raise ValueError(
|