camel-ai 0.2.22__py3-none-any.whl → 0.2.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/_types.py +41 -0
- camel/agents/_utils.py +188 -0
- camel/agents/chat_agent.py +570 -965
- camel/agents/knowledge_graph_agent.py +7 -1
- camel/agents/multi_hop_generator_agent.py +1 -1
- camel/configs/base_config.py +10 -13
- camel/configs/deepseek_config.py +4 -30
- camel/configs/gemini_config.py +5 -31
- camel/configs/openai_config.py +14 -32
- camel/configs/qwen_config.py +36 -36
- camel/datagen/self_improving_cot.py +81 -3
- camel/datagen/self_instruct/filter/instruction_filter.py +19 -3
- camel/datagen/self_instruct/self_instruct.py +52 -3
- camel/datasets/__init__.py +28 -0
- camel/datasets/base.py +969 -0
- camel/environments/__init__.py +16 -0
- camel/environments/base.py +503 -0
- camel/extractors/__init__.py +16 -0
- camel/extractors/base.py +263 -0
- camel/memories/agent_memories.py +16 -1
- camel/memories/blocks/chat_history_block.py +10 -2
- camel/memories/blocks/vectordb_block.py +1 -0
- camel/memories/context_creators/score_based.py +20 -3
- camel/memories/records.py +10 -0
- camel/messages/base.py +8 -8
- camel/models/__init__.py +2 -0
- camel/models/_utils.py +57 -0
- camel/models/aiml_model.py +48 -17
- camel/models/anthropic_model.py +41 -3
- camel/models/azure_openai_model.py +39 -3
- camel/models/base_audio_model.py +92 -0
- camel/models/base_model.py +88 -13
- camel/models/cohere_model.py +88 -11
- camel/models/deepseek_model.py +107 -45
- camel/models/fish_audio_model.py +18 -8
- camel/models/gemini_model.py +133 -15
- camel/models/groq_model.py +72 -10
- camel/models/internlm_model.py +14 -3
- camel/models/litellm_model.py +9 -2
- camel/models/mistral_model.py +42 -5
- camel/models/model_manager.py +57 -3
- camel/models/moonshot_model.py +33 -4
- camel/models/nemotron_model.py +32 -3
- camel/models/nvidia_model.py +43 -3
- camel/models/ollama_model.py +139 -17
- camel/models/openai_audio_models.py +87 -2
- camel/models/openai_compatible_model.py +37 -3
- camel/models/openai_model.py +158 -46
- camel/models/qwen_model.py +61 -4
- camel/models/reka_model.py +53 -3
- camel/models/samba_model.py +209 -4
- camel/models/sglang_model.py +153 -14
- camel/models/siliconflow_model.py +16 -3
- camel/models/stub_model.py +46 -4
- camel/models/togetherai_model.py +38 -3
- camel/models/vllm_model.py +37 -3
- camel/models/yi_model.py +36 -3
- camel/models/zhipuai_model.py +38 -3
- camel/retrievers/__init__.py +3 -0
- camel/retrievers/hybrid_retrival.py +237 -0
- camel/toolkits/__init__.py +15 -1
- camel/toolkits/arxiv_toolkit.py +2 -1
- camel/toolkits/ask_news_toolkit.py +4 -2
- camel/toolkits/audio_analysis_toolkit.py +238 -0
- camel/toolkits/base.py +22 -3
- camel/toolkits/code_execution.py +2 -0
- camel/toolkits/dappier_toolkit.py +2 -1
- camel/toolkits/data_commons_toolkit.py +38 -12
- camel/toolkits/excel_toolkit.py +172 -0
- camel/toolkits/function_tool.py +13 -0
- camel/toolkits/github_toolkit.py +5 -1
- camel/toolkits/google_maps_toolkit.py +2 -1
- camel/toolkits/google_scholar_toolkit.py +2 -0
- camel/toolkits/human_toolkit.py +0 -3
- camel/toolkits/image_analysis_toolkit.py +202 -0
- camel/toolkits/linkedin_toolkit.py +3 -2
- camel/toolkits/meshy_toolkit.py +3 -2
- camel/toolkits/mineru_toolkit.py +2 -2
- camel/toolkits/networkx_toolkit.py +240 -0
- camel/toolkits/notion_toolkit.py +2 -0
- camel/toolkits/openbb_toolkit.py +3 -2
- camel/toolkits/page_script.js +376 -0
- camel/toolkits/reddit_toolkit.py +11 -3
- camel/toolkits/retrieval_toolkit.py +6 -1
- camel/toolkits/semantic_scholar_toolkit.py +2 -1
- camel/toolkits/stripe_toolkit.py +8 -2
- camel/toolkits/sympy_toolkit.py +6 -1
- camel/toolkits/video_analysis_toolkit.py +407 -0
- camel/toolkits/{video_toolkit.py → video_download_toolkit.py} +21 -25
- camel/toolkits/web_toolkit.py +1307 -0
- camel/toolkits/whatsapp_toolkit.py +3 -2
- camel/toolkits/zapier_toolkit.py +191 -0
- camel/types/__init__.py +2 -2
- camel/types/agents/__init__.py +16 -0
- camel/types/agents/tool_calling_record.py +52 -0
- camel/types/enums.py +3 -0
- camel/types/openai_types.py +16 -14
- camel/utils/__init__.py +2 -1
- camel/utils/async_func.py +2 -2
- camel/utils/commons.py +114 -1
- camel/verifiers/__init__.py +23 -0
- camel/verifiers/base.py +340 -0
- camel/verifiers/models.py +82 -0
- camel/verifiers/python_verifier.py +202 -0
- camel_ai-0.2.23.dist-info/METADATA +671 -0
- {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/RECORD +122 -97
- {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info}/WHEEL +1 -1
- camel_ai-0.2.22.dist-info/METADATA +0 -527
- {camel_ai-0.2.22.dist-info → camel_ai-0.2.23.dist-info/licenses}/LICENSE +0 -0
camel/__init__.py
CHANGED
camel/agents/_types.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from typing import Any, Dict, List, Optional, Union
|
|
15
|
+
|
|
16
|
+
from openai import AsyncStream, Stream
|
|
17
|
+
from pydantic import BaseModel, ConfigDict
|
|
18
|
+
|
|
19
|
+
from camel.messages import BaseMessage
|
|
20
|
+
from camel.types import ChatCompletion
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ToolCallRequest(BaseModel):
|
|
24
|
+
r"""The request for tool calling."""
|
|
25
|
+
|
|
26
|
+
tool_name: str
|
|
27
|
+
args: Dict[str, Any]
|
|
28
|
+
tool_call_id: str
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ModelResponse(BaseModel):
|
|
32
|
+
r"""The response from the model."""
|
|
33
|
+
|
|
34
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
35
|
+
|
|
36
|
+
response: Union[ChatCompletion, Stream, AsyncStream]
|
|
37
|
+
tool_call_request: Optional[ToolCallRequest]
|
|
38
|
+
output_messages: List[BaseMessage]
|
|
39
|
+
finish_reasons: List[str]
|
|
40
|
+
usage_dict: Dict[str, Any]
|
|
41
|
+
response_id: str
|
camel/agents/_utils.py
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
import json
|
|
15
|
+
import logging
|
|
16
|
+
import re
|
|
17
|
+
import textwrap
|
|
18
|
+
from typing import Any, Callable, Dict, List, Optional, Union
|
|
19
|
+
|
|
20
|
+
from camel.agents._types import ToolCallRequest
|
|
21
|
+
from camel.toolkits import FunctionTool
|
|
22
|
+
from camel.types import Choice
|
|
23
|
+
from camel.types.agents import ToolCallingRecord
|
|
24
|
+
|
|
25
|
+
logger = logging.getLogger(__name__)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def generate_tool_prompt(tool_schema_list: List[Dict[str, Any]]) -> str:
|
|
29
|
+
r"""Generates a tool prompt based on the provided tool schema list.
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
str: A string representing the tool prompt.
|
|
33
|
+
"""
|
|
34
|
+
tool_prompts = []
|
|
35
|
+
|
|
36
|
+
for tool in tool_schema_list:
|
|
37
|
+
tool_info = tool["function"]
|
|
38
|
+
tool_name = tool_info["name"]
|
|
39
|
+
tool_description = tool_info["description"]
|
|
40
|
+
tool_json = json.dumps(tool_info, indent=4)
|
|
41
|
+
|
|
42
|
+
prompt = (
|
|
43
|
+
f"Use the function '{tool_name}' to '{tool_description}':\n"
|
|
44
|
+
f"{tool_json}\n"
|
|
45
|
+
)
|
|
46
|
+
tool_prompts.append(prompt)
|
|
47
|
+
|
|
48
|
+
tool_prompt_str = "\n".join(tool_prompts)
|
|
49
|
+
|
|
50
|
+
final_prompt = textwrap.dedent(
|
|
51
|
+
f"""\
|
|
52
|
+
You have access to the following functions:
|
|
53
|
+
|
|
54
|
+
{tool_prompt_str}
|
|
55
|
+
|
|
56
|
+
If you choose to call a function ONLY reply in the following format with no prefix or suffix:
|
|
57
|
+
|
|
58
|
+
<function=example_function_name>{{"example_name": "example_value"}}</function>
|
|
59
|
+
|
|
60
|
+
Reminder:
|
|
61
|
+
- Function calls MUST follow the specified format, start with <function= and end with </function>
|
|
62
|
+
- Required parameters MUST be specified
|
|
63
|
+
- Only call one function at a time
|
|
64
|
+
- Put the entire function call reply on one line
|
|
65
|
+
- If there is no function call available, answer the question like normal with your current knowledge and do not tell the user about function calls.
|
|
66
|
+
""" # noqa: E501
|
|
67
|
+
)
|
|
68
|
+
return final_prompt
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def extract_tool_call(
|
|
72
|
+
content: str,
|
|
73
|
+
) -> Optional[Dict[str, Any]]:
|
|
74
|
+
r"""Extract the tool call from the model response, if present.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
response (Any): The model's response object.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
Optional[Dict[str, Any]]: The parsed tool call if present,
|
|
81
|
+
otherwise None.
|
|
82
|
+
"""
|
|
83
|
+
function_regex = r"<function=(\w+)>(.*?)</function>"
|
|
84
|
+
match = re.search(function_regex, content)
|
|
85
|
+
|
|
86
|
+
if not match:
|
|
87
|
+
return None
|
|
88
|
+
|
|
89
|
+
function_name, args_string = match.groups()
|
|
90
|
+
try:
|
|
91
|
+
args = json.loads(args_string)
|
|
92
|
+
return {"function": function_name, "arguments": args}
|
|
93
|
+
except json.JSONDecodeError as error:
|
|
94
|
+
logger.error(f"Error parsing function arguments: {error}")
|
|
95
|
+
return None
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def safe_model_dump(obj) -> Dict[str, Any]:
|
|
99
|
+
r"""Safely dump a Pydantic model to a dictionary.
|
|
100
|
+
|
|
101
|
+
This method attempts to use the `model_dump` method if available,
|
|
102
|
+
otherwise it falls back to the `dict` method.
|
|
103
|
+
"""
|
|
104
|
+
# Check if the `model_dump` method exists (Pydantic v2)
|
|
105
|
+
if hasattr(obj, "model_dump"):
|
|
106
|
+
return obj.model_dump()
|
|
107
|
+
# Fallback to `dict()` method (Pydantic v1)
|
|
108
|
+
elif hasattr(obj, "dict"):
|
|
109
|
+
return obj.dict()
|
|
110
|
+
else:
|
|
111
|
+
raise TypeError("The object is not a Pydantic model")
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def convert_to_function_tool(
|
|
115
|
+
tool: Union[FunctionTool, Callable],
|
|
116
|
+
) -> FunctionTool:
|
|
117
|
+
r"""Convert a tool to a FunctionTool from Callable."""
|
|
118
|
+
return tool if isinstance(tool, FunctionTool) else FunctionTool(tool)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def convert_to_schema(
|
|
122
|
+
tool: Union[FunctionTool, Callable, Dict[str, Any]],
|
|
123
|
+
) -> Dict[str, Any]:
|
|
124
|
+
r"""Convert a tool to a schema from Callable or FunctionTool."""
|
|
125
|
+
if isinstance(tool, FunctionTool):
|
|
126
|
+
return tool.get_openai_tool_schema()
|
|
127
|
+
elif callable(tool):
|
|
128
|
+
return FunctionTool(tool).get_openai_tool_schema()
|
|
129
|
+
else:
|
|
130
|
+
return tool
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
def get_info_dict(
|
|
134
|
+
session_id: Optional[str],
|
|
135
|
+
usage: Optional[Dict[str, int]],
|
|
136
|
+
termination_reasons: List[str],
|
|
137
|
+
num_tokens: int,
|
|
138
|
+
tool_calls: List[ToolCallingRecord],
|
|
139
|
+
external_tool_call_request: Optional[ToolCallRequest] = None,
|
|
140
|
+
) -> Dict[str, Any]:
|
|
141
|
+
r"""Returns a dictionary containing information about the chat session.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
session_id (str, optional): The ID of the chat session.
|
|
145
|
+
usage (Dict[str, int], optional): Information about the usage of
|
|
146
|
+
the LLM.
|
|
147
|
+
termination_reasons (List[str]): The reasons for the termination
|
|
148
|
+
of the chat session.
|
|
149
|
+
num_tokens (int): The number of tokens used in the chat session.
|
|
150
|
+
tool_calls (List[ToolCallingRecord]): The list of function
|
|
151
|
+
calling records, containing the information of called tools.
|
|
152
|
+
external_tool_call_request (Optional[ToolCallRequest]): The
|
|
153
|
+
request for external tool call.
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
Dict[str, Any]: The chat session information.
|
|
158
|
+
"""
|
|
159
|
+
return {
|
|
160
|
+
"id": session_id,
|
|
161
|
+
"usage": usage,
|
|
162
|
+
"termination_reasons": termination_reasons,
|
|
163
|
+
"num_tokens": num_tokens,
|
|
164
|
+
"tool_calls": tool_calls,
|
|
165
|
+
"external_tool_call_request": external_tool_call_request,
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def handle_logprobs(choice: Choice) -> Optional[List[Dict[str, Any]]]:
|
|
170
|
+
if choice.logprobs is None:
|
|
171
|
+
return None
|
|
172
|
+
|
|
173
|
+
tokens_logprobs = choice.logprobs.content
|
|
174
|
+
|
|
175
|
+
if tokens_logprobs is None:
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
return [
|
|
179
|
+
{
|
|
180
|
+
"token": token_logprob.token,
|
|
181
|
+
"logprob": token_logprob.logprob,
|
|
182
|
+
"top_logprobs": [
|
|
183
|
+
(top_logprob.token, top_logprob.logprob)
|
|
184
|
+
for top_logprob in token_logprob.top_logprobs
|
|
185
|
+
],
|
|
186
|
+
}
|
|
187
|
+
for token_logprob in tokens_logprobs
|
|
188
|
+
]
|