camel-ai 0.2.5__py3-none-any.whl → 0.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +114 -23
- camel/configs/__init__.py +6 -4
- camel/configs/base_config.py +21 -0
- camel/configs/gemini_config.py +17 -9
- camel/configs/qwen_config.py +91 -0
- camel/configs/samba_config.py +1 -38
- camel/configs/yi_config.py +58 -0
- camel/generators.py +93 -0
- camel/interpreters/docker_interpreter.py +5 -0
- camel/interpreters/ipython_interpreter.py +2 -1
- camel/loaders/__init__.py +2 -0
- camel/loaders/apify_reader.py +223 -0
- camel/memories/agent_memories.py +24 -1
- camel/messages/base.py +38 -0
- camel/models/__init__.py +4 -0
- camel/models/model_factory.py +6 -0
- camel/models/qwen_model.py +139 -0
- camel/models/samba_model.py +1 -1
- camel/models/yi_model.py +138 -0
- camel/prompts/image_craft.py +8 -0
- camel/prompts/video_description_prompt.py +8 -0
- camel/retrievers/vector_retriever.py +5 -1
- camel/societies/role_playing.py +29 -18
- camel/societies/workforce/base.py +7 -1
- camel/societies/workforce/task_channel.py +10 -0
- camel/societies/workforce/utils.py +6 -0
- camel/societies/workforce/worker.py +2 -0
- camel/storages/vectordb_storages/qdrant.py +147 -24
- camel/tasks/task.py +15 -0
- camel/terminators/base.py +4 -0
- camel/terminators/response_terminator.py +1 -0
- camel/terminators/token_limit_terminator.py +1 -0
- camel/toolkits/__init__.py +4 -1
- camel/toolkits/base.py +9 -0
- camel/toolkits/data_commons_toolkit.py +360 -0
- camel/toolkits/function_tool.py +174 -7
- camel/toolkits/github_toolkit.py +175 -176
- camel/toolkits/google_scholar_toolkit.py +36 -7
- camel/toolkits/notion_toolkit.py +279 -0
- camel/toolkits/search_toolkit.py +164 -36
- camel/types/enums.py +88 -0
- camel/types/unified_model_type.py +10 -0
- camel/utils/commons.py +2 -1
- camel/utils/constants.py +2 -0
- {camel_ai-0.2.5.dist-info → camel_ai-0.2.7.dist-info}/METADATA +129 -79
- {camel_ai-0.2.5.dist-info → camel_ai-0.2.7.dist-info}/RECORD +49 -42
- {camel_ai-0.2.5.dist-info → camel_ai-0.2.7.dist-info}/LICENSE +0 -0
- {camel_ai-0.2.5.dist-info → camel_ai-0.2.7.dist-info}/WHEEL +0 -0
camel/__init__.py
CHANGED
camel/agents/chat_agent.py
CHANGED
|
@@ -106,6 +106,11 @@ class FunctionCallingRecord(BaseModel):
|
|
|
106
106
|
)
|
|
107
107
|
|
|
108
108
|
def as_dict(self) -> dict[str, Any]:
|
|
109
|
+
r"""Returns the function calling record as a dictionary.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
dict[str, Any]: The function calling record as a dictionary.
|
|
113
|
+
"""
|
|
109
114
|
return self.model_dump()
|
|
110
115
|
|
|
111
116
|
|
|
@@ -175,9 +180,6 @@ class ChatAgent(BaseAgent):
|
|
|
175
180
|
model_type=ModelType.DEFAULT,
|
|
176
181
|
)
|
|
177
182
|
)
|
|
178
|
-
self.output_language: Optional[str] = output_language
|
|
179
|
-
if self.output_language is not None:
|
|
180
|
-
self.set_output_language(self.output_language)
|
|
181
183
|
|
|
182
184
|
self.model_type = self.model_backend.model_type
|
|
183
185
|
|
|
@@ -213,12 +215,27 @@ class ChatAgent(BaseAgent):
|
|
|
213
215
|
context_creator, window_size=message_window_size
|
|
214
216
|
)
|
|
215
217
|
|
|
218
|
+
self.output_language: Optional[str] = output_language
|
|
219
|
+
if self.output_language is not None:
|
|
220
|
+
self.set_output_language(self.output_language)
|
|
221
|
+
|
|
216
222
|
self.terminated: bool = False
|
|
217
223
|
self.response_terminators = response_terminators or []
|
|
218
224
|
self.init_messages()
|
|
219
225
|
|
|
226
|
+
self.tool_prompt_added = False
|
|
227
|
+
|
|
220
228
|
# ruff: noqa: E501
|
|
221
229
|
def _generate_tool_prompt(self, tool_schema_list: List[Dict]) -> str:
|
|
230
|
+
r"""Generates a tool prompt based on the provided tool schema list.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
tool_schema_list (List[Dict]): A list of dictionaries, each
|
|
234
|
+
containing a tool schema.
|
|
235
|
+
|
|
236
|
+
Returns:
|
|
237
|
+
str: A string representing the tool prompt.
|
|
238
|
+
"""
|
|
222
239
|
tool_prompts = []
|
|
223
240
|
|
|
224
241
|
for tool in tool_schema_list:
|
|
@@ -239,21 +256,36 @@ class ChatAgent(BaseAgent):
|
|
|
239
256
|
|
|
240
257
|
{tool_prompt_str}
|
|
241
258
|
|
|
242
|
-
If you choose to call a function ONLY reply in the following format with no
|
|
259
|
+
If you choose to call a function ONLY reply in the following format with no
|
|
260
|
+
prefix or suffix:
|
|
243
261
|
|
|
244
|
-
<function=example_function_name>{{"example_name": "example_value"}}
|
|
262
|
+
<function=example_function_name>{{"example_name": "example_value"}}
|
|
263
|
+
</function>
|
|
245
264
|
|
|
246
265
|
Reminder:
|
|
247
|
-
- Function calls MUST follow the specified format, start with <function=
|
|
266
|
+
- Function calls MUST follow the specified format, start with <function=
|
|
267
|
+
and end with </function>
|
|
248
268
|
- Required parameters MUST be specified
|
|
249
269
|
- Only call one function at a time
|
|
250
270
|
- Put the entire function call reply on one line
|
|
251
|
-
- If there is no function call available, answer the question like normal
|
|
271
|
+
- If there is no function call available, answer the question like normal
|
|
272
|
+
with your current knowledge and do not tell the user about function calls
|
|
252
273
|
"""
|
|
253
274
|
'''
|
|
254
275
|
return final_prompt
|
|
255
276
|
|
|
256
277
|
def _parse_tool_response(self, response: str):
|
|
278
|
+
r"""Parses the tool response to extract the function name and
|
|
279
|
+
arguments.
|
|
280
|
+
|
|
281
|
+
Args:
|
|
282
|
+
response (str): The response from the model containing the
|
|
283
|
+
function call.
|
|
284
|
+
|
|
285
|
+
Returns:
|
|
286
|
+
Optional[Dict[str, Any]]: The parsed function name and arguments
|
|
287
|
+
if found, otherwise :obj:`None`.
|
|
288
|
+
"""
|
|
257
289
|
function_regex = r"<function=(\w+)>(.*?)</function>"
|
|
258
290
|
match = re.search(function_regex, response)
|
|
259
291
|
|
|
@@ -268,12 +300,7 @@ class ChatAgent(BaseAgent):
|
|
|
268
300
|
return None
|
|
269
301
|
|
|
270
302
|
def reset(self):
|
|
271
|
-
r"""Resets the :obj:`ChatAgent` to its initial state
|
|
272
|
-
stored messages.
|
|
273
|
-
|
|
274
|
-
Returns:
|
|
275
|
-
List[BaseMessage]: The stored messages.
|
|
276
|
-
"""
|
|
303
|
+
r"""Resets the :obj:`ChatAgent` to its initial state."""
|
|
277
304
|
self.terminated = False
|
|
278
305
|
self.init_messages()
|
|
279
306
|
for terminator in self.response_terminators:
|
|
@@ -290,7 +317,7 @@ class ChatAgent(BaseAgent):
|
|
|
290
317
|
return self._system_message
|
|
291
318
|
|
|
292
319
|
@system_message.setter
|
|
293
|
-
def system_message(self, message: BaseMessage):
|
|
320
|
+
def system_message(self, message: BaseMessage) -> None:
|
|
294
321
|
r"""The setter method for the property :obj:`system_message`.
|
|
295
322
|
|
|
296
323
|
Args:
|
|
@@ -345,13 +372,19 @@ class ChatAgent(BaseAgent):
|
|
|
345
372
|
self._system_message = self.orig_sys_message.create_new_instance(
|
|
346
373
|
content
|
|
347
374
|
)
|
|
348
|
-
return self._system_message
|
|
349
375
|
else:
|
|
350
376
|
self._system_message = BaseMessage.make_assistant_message(
|
|
351
377
|
role_name="Assistant",
|
|
352
378
|
content=language_prompt,
|
|
353
379
|
)
|
|
354
|
-
|
|
380
|
+
|
|
381
|
+
system_record = MemoryRecord(
|
|
382
|
+
message=self._system_message,
|
|
383
|
+
role_at_backend=OpenAIBackendRole.SYSTEM,
|
|
384
|
+
)
|
|
385
|
+
self.memory.clear()
|
|
386
|
+
self.memory.write_record(system_record)
|
|
387
|
+
return self._system_message
|
|
355
388
|
|
|
356
389
|
def get_info(
|
|
357
390
|
self,
|
|
@@ -448,7 +481,10 @@ class ChatAgent(BaseAgent):
|
|
|
448
481
|
)
|
|
449
482
|
|
|
450
483
|
if "llama" in self.model_type.lower():
|
|
451
|
-
if
|
|
484
|
+
if (
|
|
485
|
+
self.model_backend.model_config_dict.get("tools", None)
|
|
486
|
+
and not self.tool_prompt_added
|
|
487
|
+
):
|
|
452
488
|
tool_prompt = self._generate_tool_prompt(self.tool_schema_list)
|
|
453
489
|
|
|
454
490
|
tool_sys_msg = BaseMessage.make_assistant_message(
|
|
@@ -457,6 +493,7 @@ class ChatAgent(BaseAgent):
|
|
|
457
493
|
)
|
|
458
494
|
|
|
459
495
|
self.update_memory(tool_sys_msg, OpenAIBackendRole.SYSTEM)
|
|
496
|
+
self.tool_prompt_added = True
|
|
460
497
|
|
|
461
498
|
self.update_memory(input_message, OpenAIBackendRole.USER)
|
|
462
499
|
|
|
@@ -821,6 +858,17 @@ class ChatAgent(BaseAgent):
|
|
|
821
858
|
]:
|
|
822
859
|
r"""Internal function of structuring the output of the agent based on
|
|
823
860
|
the given output schema.
|
|
861
|
+
|
|
862
|
+
Args:
|
|
863
|
+
response_format (Type[BaseModel]): The output schema to use for
|
|
864
|
+
structuring the output.
|
|
865
|
+
|
|
866
|
+
Returns:
|
|
867
|
+
Tuple[List[BaseMessage], List[str], Dict[str, int], str,
|
|
868
|
+
FunctionCallingRecord, int]:
|
|
869
|
+
A tuple containing the output messages, finish reasons, usage
|
|
870
|
+
dictionary, response ID, function calling record, and number of
|
|
871
|
+
tokens.
|
|
824
872
|
"""
|
|
825
873
|
from camel.toolkits import FunctionTool
|
|
826
874
|
|
|
@@ -913,9 +961,39 @@ class ChatAgent(BaseAgent):
|
|
|
913
961
|
num_tokens: int,
|
|
914
962
|
external_tool_request: Optional[ChatCompletionMessageToolCall] = None,
|
|
915
963
|
) -> Dict[str, Any]:
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
|
|
964
|
+
r"""Process the output of a chat step and gather information about the
|
|
965
|
+
step.
|
|
966
|
+
|
|
967
|
+
This method checks for termination conditions, updates the agent's
|
|
968
|
+
state, and collects information about the chat step, including tool
|
|
969
|
+
calls and termination reasons.
|
|
970
|
+
|
|
971
|
+
Args:
|
|
972
|
+
output_messages (List[BaseMessage]): The messages generated in
|
|
973
|
+
this step.
|
|
974
|
+
finish_reasons (List[str]): The reasons for finishing the
|
|
975
|
+
generation for each message.
|
|
976
|
+
usage_dict (Dict[str, int]): Dictionary containing token usage
|
|
977
|
+
information.
|
|
978
|
+
response_id (str): The ID of the response from the model.
|
|
979
|
+
tool_calls (List[FunctionCallingRecord]): Records of function calls
|
|
980
|
+
made during this step.
|
|
981
|
+
num_tokens (int): The number of tokens used in this step.
|
|
982
|
+
external_tool_request (Optional[ChatCompletionMessageToolCall]):
|
|
983
|
+
Any external tool request made during this step.
|
|
984
|
+
(default::obj:`None`)
|
|
985
|
+
|
|
986
|
+
Returns:
|
|
987
|
+
Dict[str, Any]: A dictionary containing information about the chat
|
|
988
|
+
step, including termination status, reasons, and tool call
|
|
989
|
+
information.
|
|
990
|
+
|
|
991
|
+
Note:
|
|
992
|
+
This method iterates over all response terminators and checks if
|
|
993
|
+
any of them signal termination. If a terminator signals
|
|
994
|
+
termination, the agent's state is updated accordingly, and the
|
|
995
|
+
termination reason is recorded.
|
|
996
|
+
"""
|
|
919
997
|
termination = [
|
|
920
998
|
terminator.is_terminated(output_messages)
|
|
921
999
|
for terminator in self.response_terminators
|
|
@@ -946,7 +1024,8 @@ class ChatAgent(BaseAgent):
|
|
|
946
1024
|
def handle_batch_response(
|
|
947
1025
|
self, response: ChatCompletion
|
|
948
1026
|
) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
|
|
949
|
-
r"""
|
|
1027
|
+
r"""Process a batch response from the model and extract the necessary
|
|
1028
|
+
information.
|
|
950
1029
|
|
|
951
1030
|
Args:
|
|
952
1031
|
response (dict): Model response.
|
|
@@ -979,7 +1058,18 @@ class ChatAgent(BaseAgent):
|
|
|
979
1058
|
response.id,
|
|
980
1059
|
)
|
|
981
1060
|
|
|
982
|
-
def _safe_model_dump(self, obj):
|
|
1061
|
+
def _safe_model_dump(self, obj) -> dict:
|
|
1062
|
+
r"""Safely dump a Pydantic model to a dictionary.
|
|
1063
|
+
|
|
1064
|
+
This method attempts to use the `model_dump` method if available,
|
|
1065
|
+
otherwise it falls back to the `dict` method.
|
|
1066
|
+
|
|
1067
|
+
Args:
|
|
1068
|
+
obj: The Pydantic model instance to be dumped.
|
|
1069
|
+
|
|
1070
|
+
Returns:
|
|
1071
|
+
dict: A dictionary representation of the Pydantic model.
|
|
1072
|
+
"""
|
|
983
1073
|
# Check if the `model_dump` method exists (Pydantic v2)
|
|
984
1074
|
if hasattr(obj, 'model_dump'):
|
|
985
1075
|
return obj.model_dump()
|
|
@@ -994,7 +1084,8 @@ class ChatAgent(BaseAgent):
|
|
|
994
1084
|
response: Stream[ChatCompletionChunk],
|
|
995
1085
|
prompt_tokens: int,
|
|
996
1086
|
) -> Tuple[List[BaseMessage], List[str], Dict[str, int], str]:
|
|
997
|
-
r"""
|
|
1087
|
+
r"""Process a stream response from the model and extract the necessary
|
|
1088
|
+
information.
|
|
998
1089
|
|
|
999
1090
|
Args:
|
|
1000
1091
|
response (dict): Model response.
|
camel/configs/__init__.py
CHANGED
|
@@ -19,17 +19,17 @@ from .litellm_config import LITELLM_API_PARAMS, LiteLLMConfig
|
|
|
19
19
|
from .mistral_config import MISTRAL_API_PARAMS, MistralConfig
|
|
20
20
|
from .ollama_config import OLLAMA_API_PARAMS, OllamaConfig
|
|
21
21
|
from .openai_config import OPENAI_API_PARAMS, ChatGPTConfig
|
|
22
|
+
from .qwen_config import QWEN_API_PARAMS, QwenConfig
|
|
22
23
|
from .reka_config import REKA_API_PARAMS, RekaConfig
|
|
23
24
|
from .samba_config import (
|
|
24
25
|
SAMBA_CLOUD_API_PARAMS,
|
|
25
|
-
SAMBA_FAST_API_PARAMS,
|
|
26
26
|
SAMBA_VERSE_API_PARAMS,
|
|
27
27
|
SambaCloudAPIConfig,
|
|
28
|
-
SambaFastAPIConfig,
|
|
29
28
|
SambaVerseAPIConfig,
|
|
30
29
|
)
|
|
31
30
|
from .togetherai_config import TOGETHERAI_API_PARAMS, TogetherAIConfig
|
|
32
31
|
from .vllm_config import VLLM_API_PARAMS, VLLMConfig
|
|
32
|
+
from .yi_config import YI_API_PARAMS, YiConfig
|
|
33
33
|
from .zhipuai_config import ZHIPUAI_API_PARAMS, ZhipuAIConfig
|
|
34
34
|
|
|
35
35
|
__all__ = [
|
|
@@ -54,12 +54,14 @@ __all__ = [
|
|
|
54
54
|
'MISTRAL_API_PARAMS',
|
|
55
55
|
'RekaConfig',
|
|
56
56
|
'REKA_API_PARAMS',
|
|
57
|
-
'SambaFastAPIConfig',
|
|
58
|
-
'SAMBA_FAST_API_PARAMS',
|
|
59
57
|
'SambaVerseAPIConfig',
|
|
60
58
|
'SAMBA_VERSE_API_PARAMS',
|
|
61
59
|
'SambaCloudAPIConfig',
|
|
62
60
|
'SAMBA_CLOUD_API_PARAMS',
|
|
63
61
|
'TogetherAIConfig',
|
|
64
62
|
'TOGETHERAI_API_PARAMS',
|
|
63
|
+
'YiConfig',
|
|
64
|
+
'YI_API_PARAMS',
|
|
65
|
+
'QwenConfig',
|
|
66
|
+
'QWEN_API_PARAMS',
|
|
65
67
|
]
|
camel/configs/base_config.py
CHANGED
|
@@ -20,6 +20,12 @@ from pydantic import BaseModel, ConfigDict, field_validator
|
|
|
20
20
|
|
|
21
21
|
|
|
22
22
|
class BaseConfig(ABC, BaseModel):
|
|
23
|
+
r"""Base configuration class for all models.
|
|
24
|
+
|
|
25
|
+
This class provides a common interface for all models, ensuring that all
|
|
26
|
+
models have a consistent set of attributes and methods.
|
|
27
|
+
"""
|
|
28
|
+
|
|
23
29
|
model_config = ConfigDict(
|
|
24
30
|
arbitrary_types_allowed=True,
|
|
25
31
|
extra="forbid",
|
|
@@ -38,6 +44,12 @@ class BaseConfig(ABC, BaseModel):
|
|
|
38
44
|
@field_validator("tools", mode="before")
|
|
39
45
|
@classmethod
|
|
40
46
|
def fields_type_checking(cls, tools):
|
|
47
|
+
r"""Validate the type of tools in the configuration.
|
|
48
|
+
|
|
49
|
+
This method ensures that the tools provided in the configuration are
|
|
50
|
+
instances of `FunctionTool`. If any tool is not an instance of
|
|
51
|
+
`FunctionTool`, it raises a ValueError.
|
|
52
|
+
"""
|
|
41
53
|
if tools is not None:
|
|
42
54
|
from camel.toolkits import FunctionTool
|
|
43
55
|
|
|
@@ -50,6 +62,15 @@ class BaseConfig(ABC, BaseModel):
|
|
|
50
62
|
return tools
|
|
51
63
|
|
|
52
64
|
def as_dict(self) -> dict[str, Any]:
|
|
65
|
+
r"""Convert the current configuration to a dictionary.
|
|
66
|
+
|
|
67
|
+
This method converts the current configuration object to a dictionary
|
|
68
|
+
representation, which can be used for serialization or other purposes.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
dict[str, Any]: A dictionary representation of the current
|
|
72
|
+
configuration.
|
|
73
|
+
"""
|
|
53
74
|
config_dict = self.model_dump()
|
|
54
75
|
|
|
55
76
|
tools_schema = None
|
camel/configs/gemini_config.py
CHANGED
|
@@ -87,6 +87,12 @@ class GeminiConfig(BaseConfig):
|
|
|
87
87
|
@model_validator(mode="before")
|
|
88
88
|
@classmethod
|
|
89
89
|
def model_type_checking(cls, data: Any):
|
|
90
|
+
r"""Validate the type of tools in the configuration.
|
|
91
|
+
|
|
92
|
+
This method ensures that the tools provided in the configuration are
|
|
93
|
+
instances of `FunctionTool`. If any tool is not an instance of
|
|
94
|
+
`FunctionTool`, it raises a ValueError.
|
|
95
|
+
"""
|
|
90
96
|
if isinstance(data, dict):
|
|
91
97
|
response_schema = data.get("response_schema")
|
|
92
98
|
safety_settings = data.get("safety_settings")
|
|
@@ -111,16 +117,17 @@ class GeminiConfig(BaseConfig):
|
|
|
111
117
|
|
|
112
118
|
if response_schema and not isinstance(response_schema, Schema):
|
|
113
119
|
raise ValueError(
|
|
114
|
-
"The response_schema should be "
|
|
115
|
-
"
|
|
120
|
+
"The response_schema should be an instance of "
|
|
121
|
+
"google.generativeai.protos.Schema."
|
|
116
122
|
)
|
|
117
123
|
|
|
118
124
|
if safety_settings and not isinstance(
|
|
119
125
|
safety_settings, SafetySettingOptions
|
|
120
126
|
):
|
|
121
127
|
raise ValueError(
|
|
122
|
-
"The
|
|
123
|
-
"
|
|
128
|
+
"The safety_settings should be an instance of "
|
|
129
|
+
"google.generativeai.types.safety_types."
|
|
130
|
+
"SafetySettingOptions."
|
|
124
131
|
)
|
|
125
132
|
|
|
126
133
|
if tools is not None:
|
|
@@ -128,19 +135,20 @@ class GeminiConfig(BaseConfig):
|
|
|
128
135
|
if not isinstance(tool, FunctionLibraryType):
|
|
129
136
|
raise ValueError(
|
|
130
137
|
"The tool should be an instance of "
|
|
131
|
-
"
|
|
138
|
+
"google.generativeai.types.content_types."
|
|
139
|
+
"FunctionLibraryType."
|
|
132
140
|
)
|
|
133
141
|
if tool_config and not isinstance(tool_config, ToolConfigType):
|
|
134
142
|
raise ValueError(
|
|
135
|
-
"The
|
|
136
|
-
"
|
|
143
|
+
"The tool_config should be an instance of "
|
|
144
|
+
"google.generativeai.types.content_types.ToolConfigType."
|
|
137
145
|
)
|
|
138
146
|
if request_options and not isinstance(
|
|
139
147
|
request_options, RequestOptionsType
|
|
140
148
|
):
|
|
141
149
|
raise ValueError(
|
|
142
|
-
"The
|
|
143
|
-
"
|
|
150
|
+
"The request_options should be an instance of "
|
|
151
|
+
"google.generativeai.types.helper_types.RequestOptionsType."
|
|
144
152
|
)
|
|
145
153
|
return data
|
|
146
154
|
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import ClassVar, Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
from camel.types import NOT_GIVEN, NotGiven
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class QwenConfig(BaseConfig):
|
|
23
|
+
r"""Defines the parameters for generating chat completions using the
|
|
24
|
+
Qwen API. You can refer to the following link for more details:
|
|
25
|
+
https://help.aliyun.com/zh/model-studio/developer-reference/use-qwen-by-calling-api
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
stream (bool, optional): Whether to stream the response.
|
|
29
|
+
(default: :obj:`False`)
|
|
30
|
+
temperature (float, optional): Controls the diversity and focus of
|
|
31
|
+
the generated results. Lower values make the output more focused,
|
|
32
|
+
while higher values make it more diverse. (default: :obj:`0.3`)
|
|
33
|
+
top_p (float, optional): Controls the diversity and focus of the
|
|
34
|
+
generated results. Higher values make the output more diverse,
|
|
35
|
+
while lower values make it more focused. (default: :obj:`0.9`)
|
|
36
|
+
presence_penalty (float, optional): Controls the repetition of
|
|
37
|
+
content in the generated results. Positive values reduce the
|
|
38
|
+
repetition of content, while negative values increase it.
|
|
39
|
+
(default: :obj:`0.0`)
|
|
40
|
+
response_format (object, optional): Specifies the format of the
|
|
41
|
+
returned content. The available values are `{"type": "text"}` or
|
|
42
|
+
`{"type": "json_object"}`. Setting it to `{"type": "json_object"}`
|
|
43
|
+
will output a standard JSON string.
|
|
44
|
+
(default: :obj:`{"type": "text"}`)
|
|
45
|
+
max_tokens (Union[int, NotGiven], optional): Allows the model to
|
|
46
|
+
generate the maximum number of tokens.
|
|
47
|
+
(default: :obj:`NOT_GIVEN`)
|
|
48
|
+
seed (int, optional): Sets the seed parameter to make the text
|
|
49
|
+
generation process more deterministic, typically used to ensure
|
|
50
|
+
that the results are consistent across model runs. By passing the
|
|
51
|
+
same seed value (specified by you) in each model call while
|
|
52
|
+
keeping other parameters unchanged, the model is likely to return
|
|
53
|
+
the same result.
|
|
54
|
+
(default: :obj:`None`)
|
|
55
|
+
stop (str or list, optional): Using the stop parameter, the model will
|
|
56
|
+
automatically stop generating text when it is about to include the
|
|
57
|
+
specified string or token_id. You can use the stop parameter to
|
|
58
|
+
control the output of the model by passing sensitive words.
|
|
59
|
+
(default: :obj:`None`)
|
|
60
|
+
tools (list, optional): Specifies an array of tools that the model can
|
|
61
|
+
call. It can contain one or more tool objects. During a function
|
|
62
|
+
call process, the model will select one tool from the array.
|
|
63
|
+
(default: :obj:`None`)
|
|
64
|
+
extra_body (dict, optional): Additional parameters to be sent to the
|
|
65
|
+
Qwen API. If you want to enable internet search, you can set this
|
|
66
|
+
parameter to `{"enable_search": True}`.
|
|
67
|
+
(default: :obj:`{"enable_search": False}`)
|
|
68
|
+
include_usage (bool, optional): When streaming, specifies whether to
|
|
69
|
+
include usage information in `stream_options`. (default:
|
|
70
|
+
:obj:`True`)
|
|
71
|
+
"""
|
|
72
|
+
|
|
73
|
+
stream: bool = False
|
|
74
|
+
temperature: float = 0.3
|
|
75
|
+
top_p: float = 0.9
|
|
76
|
+
presence_penalty: float = 0.0
|
|
77
|
+
response_format: ClassVar[dict] = {"type": "text"}
|
|
78
|
+
max_tokens: Union[int, NotGiven] = NOT_GIVEN
|
|
79
|
+
seed: Optional[int] = None
|
|
80
|
+
stop: Optional[Union[str, list]] = None
|
|
81
|
+
extra_body: ClassVar[dict] = {"enable_search": False}
|
|
82
|
+
|
|
83
|
+
def __init__(self, include_usage: bool = True, **kwargs):
|
|
84
|
+
super().__init__(**kwargs)
|
|
85
|
+
# Only set stream_options when stream is True
|
|
86
|
+
# Otherwise, it will raise error when calling the API
|
|
87
|
+
if self.stream:
|
|
88
|
+
self.stream_options = {"include_usage": include_usage}
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
QWEN_API_PARAMS = {param for param in QwenConfig.model_fields.keys()}
|
camel/configs/samba_config.py
CHANGED
|
@@ -13,7 +13,7 @@
|
|
|
13
13
|
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
14
|
from __future__ import annotations
|
|
15
15
|
|
|
16
|
-
from typing import Any,
|
|
16
|
+
from typing import Any, Optional, Sequence, Union
|
|
17
17
|
|
|
18
18
|
from pydantic import Field
|
|
19
19
|
|
|
@@ -21,43 +21,6 @@ from camel.configs.base_config import BaseConfig
|
|
|
21
21
|
from camel.types import NOT_GIVEN, NotGiven
|
|
22
22
|
|
|
23
23
|
|
|
24
|
-
class SambaFastAPIConfig(BaseConfig):
|
|
25
|
-
r"""Defines the parameters for generating chat completions using the
|
|
26
|
-
SambaNova Fast API.
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
max_tokens (Optional[int], optional): the maximum number of tokens to
|
|
30
|
-
generate, e.g. 100.
|
|
31
|
-
(default: :obj:`2048`)
|
|
32
|
-
stop (Optional[Union[str,list[str]]]): Stop generation if this token
|
|
33
|
-
is detected. Or if one of these tokens is detected when providing
|
|
34
|
-
a string list.
|
|
35
|
-
(default: :obj:`None`)
|
|
36
|
-
stream (Optional[bool]): If True, partial message deltas will be sent
|
|
37
|
-
as data-only server-sent events as they become available.
|
|
38
|
-
Currently SambaNova Fast API only support stream mode.
|
|
39
|
-
(default: :obj:`True`)
|
|
40
|
-
stream_options (Optional[Dict]): Additional options for streaming.
|
|
41
|
-
(default: :obj:`{"include_usage": True}`)
|
|
42
|
-
"""
|
|
43
|
-
|
|
44
|
-
max_tokens: Optional[int] = 2048
|
|
45
|
-
stop: Optional[Union[str, list[str]]] = None
|
|
46
|
-
stream: Optional[bool] = True
|
|
47
|
-
stream_options: Optional[Dict] = {"include_usage": True} # noqa: RUF012
|
|
48
|
-
|
|
49
|
-
def as_dict(self) -> dict[str, Any]:
|
|
50
|
-
config_dict = super().as_dict()
|
|
51
|
-
if "tools" in config_dict:
|
|
52
|
-
del config_dict["tools"] # SambaNova does not support tool calling
|
|
53
|
-
return config_dict
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
SAMBA_FAST_API_PARAMS = {
|
|
57
|
-
param for param in SambaFastAPIConfig().model_fields.keys()
|
|
58
|
-
}
|
|
59
|
-
|
|
60
|
-
|
|
61
24
|
class SambaVerseAPIConfig(BaseConfig):
|
|
62
25
|
r"""Defines the parameters for generating chat completions using the
|
|
63
26
|
SambaVerse API.
|
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
from typing import Optional, Union
|
|
17
|
+
|
|
18
|
+
from camel.configs.base_config import BaseConfig
|
|
19
|
+
from camel.types import NOT_GIVEN, NotGiven
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class YiConfig(BaseConfig):
|
|
23
|
+
r"""Defines the parameters for generating chat completions using the
|
|
24
|
+
Yi API. You can refer to the following link for more details:
|
|
25
|
+
https://platform.lingyiwanwu.com/docs/api-reference
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
tool_choice (Union[dict[str, str], str], optional): Controls which (if
|
|
29
|
+
any) tool is called by the model. :obj:`"none"` means the model
|
|
30
|
+
will not call any tool and instead generates a message.
|
|
31
|
+
:obj:`"auto"` means the model can pick between generating a
|
|
32
|
+
message or calling one or more tools. :obj:`"required"` or
|
|
33
|
+
specifying a particular tool via
|
|
34
|
+
{"type": "function", "function": {"name": "some_function"}}
|
|
35
|
+
can be used to guide the model to use tools more strongly.
|
|
36
|
+
(default: :obj:`None`)
|
|
37
|
+
max_tokens (int, optional): Specifies the maximum number of tokens
|
|
38
|
+
the model can generate. This sets an upper limit, but does not
|
|
39
|
+
guarantee that this number will always be reached.
|
|
40
|
+
(default: :obj:`5000`)
|
|
41
|
+
top_p (float, optional): Controls the randomness of the generated
|
|
42
|
+
results. Lower values lead to less randomness, while higher
|
|
43
|
+
values increase randomness. (default: :obj:`0.9`)
|
|
44
|
+
temperature (float, optional): Controls the diversity and focus of
|
|
45
|
+
the generated results. Lower values make the output more focused,
|
|
46
|
+
while higher values make it more diverse. (default: :obj:`0.3`)
|
|
47
|
+
stream (bool, optional): If True, enables streaming output.
|
|
48
|
+
(default: :obj:`False`)
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
tool_choice: Optional[Union[dict[str, str], str]] = None
|
|
52
|
+
max_tokens: Union[int, NotGiven] = NOT_GIVEN
|
|
53
|
+
top_p: float = 0.9
|
|
54
|
+
temperature: float = 0.3
|
|
55
|
+
stream: bool = False
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
YI_API_PARAMS = {param for param in YiConfig.model_fields.keys()}
|