camel-ai 0.2.46__py3-none-any.whl → 0.2.47__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/datasets/few_shot_generator.py +19 -3
- camel/datasets/models.py +1 -1
- camel/loaders/__init__.py +2 -0
- camel/loaders/scrapegraph_reader.py +96 -0
- camel/models/openai_model.py +3 -1
- camel/storages/__init__.py +2 -0
- camel/storages/vectordb_storages/__init__.py +2 -0
- camel/storages/vectordb_storages/oceanbase.py +458 -0
- camel/toolkits/__init__.py +2 -0
- camel/toolkits/browser_toolkit.py +4 -7
- camel/toolkits/jina_reranker_toolkit.py +231 -0
- camel/toolkits/search_toolkit.py +167 -0
- camel/types/enums.py +6 -0
- camel/utils/token_counting.py +7 -3
- {camel_ai-0.2.46.dist-info → camel_ai-0.2.47.dist-info}/METADATA +12 -1
- {camel_ai-0.2.46.dist-info → camel_ai-0.2.47.dist-info}/RECORD +19 -16
- {camel_ai-0.2.46.dist-info → camel_ai-0.2.47.dist-info}/WHEEL +0 -0
- {camel_ai-0.2.46.dist-info → camel_ai-0.2.47.dist-info}/licenses/LICENSE +0 -0
|
@@ -301,9 +301,9 @@ def _add_set_of_mark(
|
|
|
301
301
|
|
|
302
302
|
Returns:
|
|
303
303
|
Tuple[Image.Image, List[str], List[str], List[str]]: A tuple
|
|
304
|
-
|
|
305
|
-
|
|
306
|
-
|
|
304
|
+
containing the screenshot with marked ROIs, ROIs fully within the
|
|
305
|
+
images, ROIs located above the visible area, and ROIs located below
|
|
306
|
+
the visible area.
|
|
307
307
|
"""
|
|
308
308
|
visible_rects: List[str] = list()
|
|
309
309
|
rects_above: List[str] = list() # Scroll up to see
|
|
@@ -1013,8 +1013,6 @@ class BrowserToolkit(BaseToolkit):
|
|
|
1013
1013
|
self.browser = BaseBrowser(
|
|
1014
1014
|
headless=headless, cache_dir=cache_dir, channel=channel
|
|
1015
1015
|
)
|
|
1016
|
-
# This needs to be called explicitly
|
|
1017
|
-
self.browser.init()
|
|
1018
1016
|
|
|
1019
1017
|
self.history_window = history_window
|
|
1020
1018
|
self.web_agent_model = web_agent_model
|
|
@@ -1037,7 +1035,7 @@ class BrowserToolkit(BaseToolkit):
|
|
|
1037
1035
|
if self.web_agent_model is None:
|
|
1038
1036
|
web_agent_model = ModelFactory.create(
|
|
1039
1037
|
model_platform=ModelPlatformType.OPENAI,
|
|
1040
|
-
model_type=ModelType.
|
|
1038
|
+
model_type=ModelType.GPT_4_1,
|
|
1041
1039
|
model_config_dict={"temperature": 0, "top_p": 1},
|
|
1042
1040
|
)
|
|
1043
1041
|
else:
|
|
@@ -1499,7 +1497,6 @@ Your output should be in json format, including the following fields:
|
|
|
1499
1497
|
else:
|
|
1500
1498
|
simulation_result = self._get_final_answer(task_prompt)
|
|
1501
1499
|
|
|
1502
|
-
self.browser.close()
|
|
1503
1500
|
return simulation_result
|
|
1504
1501
|
|
|
1505
1502
|
def get_tools(self) -> List[FunctionTool]:
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
|
|
14
|
+
from typing import List, Optional, Tuple
|
|
15
|
+
|
|
16
|
+
from camel.toolkits import FunctionTool
|
|
17
|
+
from camel.toolkits.base import BaseToolkit
|
|
18
|
+
from camel.utils import MCPServer
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
@MCPServer()
|
|
22
|
+
class JinaRerankerToolkit(BaseToolkit):
|
|
23
|
+
r"""A class representing a toolkit for reranking documents
|
|
24
|
+
using Jina Reranker.
|
|
25
|
+
|
|
26
|
+
This class provides methods for reranking documents (text or images)
|
|
27
|
+
based on their relevance to a given query using the Jina Reranker model.
|
|
28
|
+
"""
|
|
29
|
+
|
|
30
|
+
def __init__(
|
|
31
|
+
self,
|
|
32
|
+
timeout: Optional[float] = None,
|
|
33
|
+
device: Optional[str] = None,
|
|
34
|
+
) -> None:
|
|
35
|
+
r"""Initializes a new instance of the JinaRerankerToolkit class.
|
|
36
|
+
|
|
37
|
+
Args:
|
|
38
|
+
timeout (Optional[float]): The timeout value for API requests
|
|
39
|
+
in seconds. If None, no timeout is applied.
|
|
40
|
+
(default: :obj:`None`)
|
|
41
|
+
device (Optional[str]): Device to load the model on. If None,
|
|
42
|
+
will use CUDA if available, otherwise CPU.
|
|
43
|
+
(default: :obj:`None`)
|
|
44
|
+
"""
|
|
45
|
+
import torch
|
|
46
|
+
from transformers import AutoModel
|
|
47
|
+
|
|
48
|
+
super().__init__(timeout=timeout)
|
|
49
|
+
|
|
50
|
+
self.model = AutoModel.from_pretrained(
|
|
51
|
+
'jinaai/jina-reranker-m0',
|
|
52
|
+
torch_dtype="auto",
|
|
53
|
+
trust_remote_code=True,
|
|
54
|
+
)
|
|
55
|
+
DEVICE = (
|
|
56
|
+
device
|
|
57
|
+
if device is not None
|
|
58
|
+
else ("cuda" if torch.cuda.is_available() else "cpu")
|
|
59
|
+
)
|
|
60
|
+
self.model.to(DEVICE)
|
|
61
|
+
self.model.eval()
|
|
62
|
+
|
|
63
|
+
def _sort_documents(
|
|
64
|
+
self, documents: List[str], scores: List[float]
|
|
65
|
+
) -> List[Tuple[str, float]]:
|
|
66
|
+
r"""Sort documents by their scores in descending order.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
documents (List[str]): List of documents to sort.
|
|
70
|
+
scores (List[float]): Corresponding scores for each document.
|
|
71
|
+
|
|
72
|
+
Returns:
|
|
73
|
+
List[Tuple[str, float]]: Sorted list of (document, score) pairs.
|
|
74
|
+
|
|
75
|
+
Raises:
|
|
76
|
+
ValueError: If documents and scores have different lengths.
|
|
77
|
+
"""
|
|
78
|
+
if len(documents) != len(scores):
|
|
79
|
+
raise ValueError("Number of documents must match number of scores")
|
|
80
|
+
doc_score_pairs = list(zip(documents, scores))
|
|
81
|
+
doc_score_pairs.sort(key=lambda x: x[1], reverse=True)
|
|
82
|
+
|
|
83
|
+
return doc_score_pairs
|
|
84
|
+
|
|
85
|
+
def rerank_text_documents(
|
|
86
|
+
self,
|
|
87
|
+
query: str,
|
|
88
|
+
documents: List[str],
|
|
89
|
+
max_length: int = 1024,
|
|
90
|
+
) -> List[Tuple[str, float]]:
|
|
91
|
+
r"""Reranks text documents based on their relevance to a text query.
|
|
92
|
+
|
|
93
|
+
Args:
|
|
94
|
+
query (str): The text query for reranking.
|
|
95
|
+
documents (List[str]): List of text documents to be reranked.
|
|
96
|
+
max_length (int): Maximum token length for processing.
|
|
97
|
+
(default: :obj:`1024`)
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
List[Tuple[str, float]]: A list of tuples containing
|
|
101
|
+
the reranked documents and their relevance scores.
|
|
102
|
+
"""
|
|
103
|
+
import torch
|
|
104
|
+
|
|
105
|
+
if self.model is None:
|
|
106
|
+
raise ValueError(
|
|
107
|
+
"Model has not been initialized or failed to initialize."
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
with torch.inference_mode():
|
|
111
|
+
text_pairs = [[query, doc] for doc in documents]
|
|
112
|
+
scores = self.model.compute_score(
|
|
113
|
+
text_pairs, max_length=max_length, doc_type="text"
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
return self._sort_documents(documents, scores)
|
|
117
|
+
|
|
118
|
+
def rerank_image_documents(
|
|
119
|
+
self,
|
|
120
|
+
query: str,
|
|
121
|
+
documents: List[str],
|
|
122
|
+
max_length: int = 2048,
|
|
123
|
+
) -> List[Tuple[str, float]]:
|
|
124
|
+
r"""Reranks image documents based on their relevance to a text query.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
query (str): The text query for reranking.
|
|
128
|
+
documents (List[str]): List of image URLs or paths to be reranked.
|
|
129
|
+
max_length (int): Maximum token length for processing.
|
|
130
|
+
(default: :obj:`2048`)
|
|
131
|
+
|
|
132
|
+
Returns:
|
|
133
|
+
List[Tuple[str, float]]: A list of tuples containing
|
|
134
|
+
the reranked image URLs/paths and their relevance scores.
|
|
135
|
+
"""
|
|
136
|
+
import torch
|
|
137
|
+
|
|
138
|
+
if self.model is None:
|
|
139
|
+
raise ValueError(
|
|
140
|
+
"Model has not been initialized or failed to initialize."
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
with torch.inference_mode():
|
|
144
|
+
image_pairs = [[query, doc] for doc in documents]
|
|
145
|
+
scores = self.model.compute_score(
|
|
146
|
+
image_pairs, max_length=max_length, doc_type="image"
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
return self._sort_documents(documents, scores)
|
|
150
|
+
|
|
151
|
+
def image_query_text_documents(
|
|
152
|
+
self,
|
|
153
|
+
image_query: str,
|
|
154
|
+
documents: List[str],
|
|
155
|
+
max_length: int = 2048,
|
|
156
|
+
) -> List[Tuple[str, float]]:
|
|
157
|
+
r"""Reranks text documents based on their relevance to an image query.
|
|
158
|
+
|
|
159
|
+
Args:
|
|
160
|
+
image_query (str): The image URL or path used as query.
|
|
161
|
+
documents (List[str]): List of text documents to be reranked.
|
|
162
|
+
max_length (int): Maximum token length for processing.
|
|
163
|
+
(default: :obj:`2048`)
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
List[Tuple[str, float]]: A list of tuples containing
|
|
167
|
+
the reranked documents and their relevance scores.
|
|
168
|
+
"""
|
|
169
|
+
import torch
|
|
170
|
+
|
|
171
|
+
if self.model is None:
|
|
172
|
+
raise ValueError("Model has not been initialized.")
|
|
173
|
+
with torch.inference_mode():
|
|
174
|
+
image_pairs = [[image_query, doc] for doc in documents]
|
|
175
|
+
scores = self.model.compute_score(
|
|
176
|
+
image_pairs,
|
|
177
|
+
max_length=max_length,
|
|
178
|
+
query_type="image",
|
|
179
|
+
doc_type="text",
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
return self._sort_documents(documents, scores)
|
|
183
|
+
|
|
184
|
+
def image_query_image_documents(
|
|
185
|
+
self,
|
|
186
|
+
image_query: str,
|
|
187
|
+
documents: List[str],
|
|
188
|
+
max_length: int = 2048,
|
|
189
|
+
) -> List[Tuple[str, float]]:
|
|
190
|
+
r"""Reranks image documents based on their relevance to an image query.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
image_query (str): The image URL or path used as query.
|
|
194
|
+
documents (List[str]): List of image URLs or paths to be reranked.
|
|
195
|
+
max_length (int): Maximum token length for processing.
|
|
196
|
+
(default: :obj:`2048`)
|
|
197
|
+
|
|
198
|
+
Returns:
|
|
199
|
+
List[Tuple[str, float]]: A list of tuples containing
|
|
200
|
+
the reranked image URLs/paths and their relevance scores.
|
|
201
|
+
"""
|
|
202
|
+
import torch
|
|
203
|
+
|
|
204
|
+
if self.model is None:
|
|
205
|
+
raise ValueError("Model has not been initialized.")
|
|
206
|
+
|
|
207
|
+
with torch.inference_mode():
|
|
208
|
+
image_pairs = [[image_query, doc] for doc in documents]
|
|
209
|
+
scores = self.model.compute_score(
|
|
210
|
+
image_pairs,
|
|
211
|
+
max_length=max_length,
|
|
212
|
+
query_type="image",
|
|
213
|
+
doc_type="image",
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
return self._sort_documents(documents, scores)
|
|
217
|
+
|
|
218
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
219
|
+
r"""Returns a list of FunctionTool objects representing the
|
|
220
|
+
functions in the toolkit.
|
|
221
|
+
|
|
222
|
+
Returns:
|
|
223
|
+
List[FunctionTool]: A list of FunctionTool objects
|
|
224
|
+
representing the functions in the toolkit.
|
|
225
|
+
"""
|
|
226
|
+
return [
|
|
227
|
+
FunctionTool(self.rerank_text_documents),
|
|
228
|
+
FunctionTool(self.rerank_image_documents),
|
|
229
|
+
FunctionTool(self.image_query_text_documents),
|
|
230
|
+
FunctionTool(self.image_query_image_documents),
|
|
231
|
+
]
|
camel/toolkits/search_toolkit.py
CHANGED
|
@@ -1064,6 +1064,172 @@ class SearchToolkit(BaseToolkit):
|
|
|
1064
1064
|
except Exception as e:
|
|
1065
1065
|
return {"error": f"Exa search failed: {e!s}"}
|
|
1066
1066
|
|
|
1067
|
+
@api_keys_required([(None, 'TONGXIAO_API_KEY')])
|
|
1068
|
+
def search_alibaba_tongxiao(
|
|
1069
|
+
self,
|
|
1070
|
+
query: str,
|
|
1071
|
+
time_range: Literal[
|
|
1072
|
+
"OneDay", "OneWeek", "OneMonth", "OneYear", "NoLimit"
|
|
1073
|
+
] = "NoLimit",
|
|
1074
|
+
industry: Optional[
|
|
1075
|
+
Literal[
|
|
1076
|
+
"finance",
|
|
1077
|
+
"law",
|
|
1078
|
+
"medical",
|
|
1079
|
+
"internet",
|
|
1080
|
+
"tax",
|
|
1081
|
+
"news_province",
|
|
1082
|
+
"news_center",
|
|
1083
|
+
]
|
|
1084
|
+
] = None,
|
|
1085
|
+
page: int = 1,
|
|
1086
|
+
return_main_text: bool = False,
|
|
1087
|
+
return_markdown_text: bool = True,
|
|
1088
|
+
enable_rerank: bool = True,
|
|
1089
|
+
) -> Dict[str, Any]:
|
|
1090
|
+
r"""Query the Alibaba Tongxiao search API and return search results.
|
|
1091
|
+
|
|
1092
|
+
A powerful search API optimized for Chinese language queries with
|
|
1093
|
+
features:
|
|
1094
|
+
- Enhanced Chinese language understanding
|
|
1095
|
+
- Industry-specific filtering (finance, law, medical, etc.)
|
|
1096
|
+
- Structured data with markdown formatting
|
|
1097
|
+
- Result reranking for relevance
|
|
1098
|
+
- Time-based filtering
|
|
1099
|
+
|
|
1100
|
+
Args:
|
|
1101
|
+
query (str): The search query string (length >= 1 and <= 100).
|
|
1102
|
+
time_range (Literal["OneDay", "OneWeek", "OneMonth", "OneYear",
|
|
1103
|
+
"NoLimit"]): Time frame filter for search results.
|
|
1104
|
+
(default: :obj:`"NoLimit"`)
|
|
1105
|
+
industry (Optional[Literal["finance", "law", "medical",
|
|
1106
|
+
"internet", "tax", "news_province", "news_center"]]):
|
|
1107
|
+
Industry-specific search filter. When specified, only returns
|
|
1108
|
+
results from sites in the specified industries. Multiple
|
|
1109
|
+
industries can be comma-separated.
|
|
1110
|
+
(default: :obj:`None`)
|
|
1111
|
+
page (int): Page number for results pagination.
|
|
1112
|
+
(default: :obj:`1`)
|
|
1113
|
+
return_main_text (bool): Whether to include the main text of the
|
|
1114
|
+
webpage in results. (default: :obj:`True`)
|
|
1115
|
+
return_markdown_text (bool): Whether to include markdown formatted
|
|
1116
|
+
content in results. (default: :obj:`True`)
|
|
1117
|
+
enable_rerank (bool): Whether to enable result reranking. If
|
|
1118
|
+
response time is critical, setting this to False can reduce
|
|
1119
|
+
response time by approximately 140ms. (default: :obj:`True`)
|
|
1120
|
+
|
|
1121
|
+
Returns:
|
|
1122
|
+
Dict[str, Any]: A dictionary containing either search results with
|
|
1123
|
+
'requestId' and 'results' keys, or an 'error' key with error
|
|
1124
|
+
message. Each result contains title, snippet, url and other
|
|
1125
|
+
metadata.
|
|
1126
|
+
"""
|
|
1127
|
+
TONGXIAO_API_KEY = os.getenv("TONGXIAO_API_KEY")
|
|
1128
|
+
|
|
1129
|
+
# Validate query length
|
|
1130
|
+
if not query or len(query) > 100:
|
|
1131
|
+
return {
|
|
1132
|
+
"error": "Query length must be between 1 and 100 characters"
|
|
1133
|
+
}
|
|
1134
|
+
|
|
1135
|
+
# API endpoint and parameters
|
|
1136
|
+
base_url = "https://cloud-iqs.aliyuncs.com/search/genericSearch"
|
|
1137
|
+
headers = {
|
|
1138
|
+
"X-API-Key": TONGXIAO_API_KEY,
|
|
1139
|
+
}
|
|
1140
|
+
|
|
1141
|
+
# Convert boolean parameters to string for compatibility with requests
|
|
1142
|
+
params: Dict[str, Union[str, int]] = {
|
|
1143
|
+
"query": query,
|
|
1144
|
+
"timeRange": time_range,
|
|
1145
|
+
"page": page,
|
|
1146
|
+
"returnMainText": str(return_main_text).lower(),
|
|
1147
|
+
"returnMarkdownText": str(return_markdown_text).lower(),
|
|
1148
|
+
"enableRerank": str(enable_rerank).lower(),
|
|
1149
|
+
}
|
|
1150
|
+
|
|
1151
|
+
# Only add industry parameter if specified
|
|
1152
|
+
if industry is not None:
|
|
1153
|
+
params["industry"] = industry
|
|
1154
|
+
|
|
1155
|
+
try:
|
|
1156
|
+
# Send GET request with proper typing for params
|
|
1157
|
+
response = requests.get(
|
|
1158
|
+
base_url, headers=headers, params=params, timeout=10
|
|
1159
|
+
)
|
|
1160
|
+
|
|
1161
|
+
# Check response status
|
|
1162
|
+
if response.status_code != 200:
|
|
1163
|
+
return {
|
|
1164
|
+
"error": (
|
|
1165
|
+
f"Alibaba Tongxiao API request failed with status "
|
|
1166
|
+
f"code {response.status_code}: {response.text}"
|
|
1167
|
+
)
|
|
1168
|
+
}
|
|
1169
|
+
|
|
1170
|
+
# Parse JSON response
|
|
1171
|
+
data = response.json()
|
|
1172
|
+
|
|
1173
|
+
# Extract and format pageItems
|
|
1174
|
+
page_items = data.get("pageItems", [])
|
|
1175
|
+
results = []
|
|
1176
|
+
for idx, item in enumerate(page_items):
|
|
1177
|
+
# Create a simplified result structure
|
|
1178
|
+
result = {
|
|
1179
|
+
"result_id": idx + 1,
|
|
1180
|
+
"title": item.get("title", ""),
|
|
1181
|
+
"snippet": item.get("snippet", ""),
|
|
1182
|
+
"url": item.get("link", ""),
|
|
1183
|
+
"hostname": item.get("hostname", ""),
|
|
1184
|
+
}
|
|
1185
|
+
|
|
1186
|
+
# Only include additional fields if they exist and are
|
|
1187
|
+
# requested
|
|
1188
|
+
if "summary" in item and item.get("summary"):
|
|
1189
|
+
result["summary"] = item["summary"]
|
|
1190
|
+
elif (
|
|
1191
|
+
return_main_text
|
|
1192
|
+
and "mainText" in item
|
|
1193
|
+
and item.get("mainText")
|
|
1194
|
+
):
|
|
1195
|
+
result["summary"] = item["mainText"]
|
|
1196
|
+
|
|
1197
|
+
if (
|
|
1198
|
+
return_main_text
|
|
1199
|
+
and "mainText" in item
|
|
1200
|
+
and item.get("mainText")
|
|
1201
|
+
):
|
|
1202
|
+
result["main_text"] = item["mainText"]
|
|
1203
|
+
|
|
1204
|
+
if (
|
|
1205
|
+
return_markdown_text
|
|
1206
|
+
and "markdownText" in item
|
|
1207
|
+
and item.get("markdownText")
|
|
1208
|
+
):
|
|
1209
|
+
result["markdown_text"] = item["markdownText"]
|
|
1210
|
+
|
|
1211
|
+
if "score" in item:
|
|
1212
|
+
result["score"] = item["score"]
|
|
1213
|
+
|
|
1214
|
+
if "publishTime" in item:
|
|
1215
|
+
result["publish_time"] = item["publishTime"]
|
|
1216
|
+
|
|
1217
|
+
results.append(result)
|
|
1218
|
+
|
|
1219
|
+
# Return a simplified structure
|
|
1220
|
+
return {
|
|
1221
|
+
"request_id": data.get("requestId", ""),
|
|
1222
|
+
"results": results,
|
|
1223
|
+
}
|
|
1224
|
+
|
|
1225
|
+
except requests.exceptions.RequestException as e:
|
|
1226
|
+
return {"error": f"Alibaba Tongxiao search request failed: {e!s}"}
|
|
1227
|
+
except Exception as e:
|
|
1228
|
+
return {
|
|
1229
|
+
"error": f"Unexpected error during Alibaba Tongxiao "
|
|
1230
|
+
f"search: {e!s}"
|
|
1231
|
+
}
|
|
1232
|
+
|
|
1067
1233
|
def get_tools(self) -> List[FunctionTool]:
|
|
1068
1234
|
r"""Returns a list of FunctionTool objects representing the
|
|
1069
1235
|
functions in the toolkit.
|
|
@@ -1084,4 +1250,5 @@ class SearchToolkit(BaseToolkit):
|
|
|
1084
1250
|
FunctionTool(self.search_baidu),
|
|
1085
1251
|
FunctionTool(self.search_bing),
|
|
1086
1252
|
FunctionTool(self.search_exa),
|
|
1253
|
+
FunctionTool(self.search_alibaba_tongxiao),
|
|
1087
1254
|
]
|
camel/types/enums.py
CHANGED
|
@@ -42,6 +42,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
42
42
|
GPT_4_1 = "gpt-4.1-2025-04-14"
|
|
43
43
|
GPT_4_1_MINI = "gpt-4.1-mini-2025-04-14"
|
|
44
44
|
GPT_4_1_NANO = "gpt-4.1-nano-2025-04-14"
|
|
45
|
+
O4_MINI = "o4-mini"
|
|
46
|
+
O3 = "o3"
|
|
45
47
|
|
|
46
48
|
AWS_CLAUDE_3_7_SONNET = "anthropic.claude-3-7-sonnet-20250219-v1:0"
|
|
47
49
|
AWS_CLAUDE_3_5_SONNET = "anthropic.claude-3-5-sonnet-20241022-v2:0"
|
|
@@ -351,6 +353,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
351
353
|
ModelType.GPT_4_1,
|
|
352
354
|
ModelType.GPT_4_1_MINI,
|
|
353
355
|
ModelType.GPT_4_1_NANO,
|
|
356
|
+
ModelType.O4_MINI,
|
|
357
|
+
ModelType.O3,
|
|
354
358
|
}
|
|
355
359
|
|
|
356
360
|
@property
|
|
@@ -893,6 +897,8 @@ class ModelType(UnifiedModelType, Enum):
|
|
|
893
897
|
ModelType.AWS_CLAUDE_3_HAIKU,
|
|
894
898
|
ModelType.AWS_CLAUDE_3_SONNET,
|
|
895
899
|
ModelType.AWS_CLAUDE_3_7_SONNET,
|
|
900
|
+
ModelType.O4_MINI,
|
|
901
|
+
ModelType.O3,
|
|
896
902
|
}:
|
|
897
903
|
return 200_000
|
|
898
904
|
elif self in {
|
camel/utils/token_counting.py
CHANGED
|
@@ -136,7 +136,11 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
136
136
|
elif ("gpt-3.5-turbo" in self.model) or ("gpt-4" in self.model):
|
|
137
137
|
self.tokens_per_message = 3
|
|
138
138
|
self.tokens_per_name = 1
|
|
139
|
-
elif (
|
|
139
|
+
elif (
|
|
140
|
+
("o1" in self.model)
|
|
141
|
+
or ("o3" in self.model)
|
|
142
|
+
or ("o4" in self.model)
|
|
143
|
+
):
|
|
140
144
|
self.tokens_per_message = 2
|
|
141
145
|
self.tokens_per_name = 1
|
|
142
146
|
else:
|
|
@@ -144,8 +148,8 @@ class OpenAITokenCounter(BaseTokenCounter):
|
|
|
144
148
|
raise NotImplementedError(
|
|
145
149
|
"Token counting for OpenAI Models is not presently "
|
|
146
150
|
f"implemented for model {model}. "
|
|
147
|
-
"See https://github.com/openai/openai-python/blob/main/chatml
|
|
148
|
-
"for information on how messages are converted to tokens. "
|
|
151
|
+
"See https://github.com/openai/openai-python/blob/main/chatml"
|
|
152
|
+
".md for information on how messages are converted to tokens. "
|
|
149
153
|
"See https://platform.openai.com/docs/models/gpt-4"
|
|
150
154
|
"or https://platform.openai.com/docs/models/gpt-3-5"
|
|
151
155
|
"for information about openai chat models."
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: camel-ai
|
|
3
|
-
Version: 0.2.
|
|
3
|
+
Version: 0.2.47
|
|
4
4
|
Summary: Communicative Agents for AI Society Study
|
|
5
5
|
Project-URL: Homepage, https://www.camel-ai.org/
|
|
6
6
|
Project-URL: Repository, https://github.com/camel-ai/camel
|
|
@@ -89,6 +89,7 @@ Requires-Dist: pydub<0.26,>=0.25.1; extra == 'all'
|
|
|
89
89
|
Requires-Dist: pygithub<3,>=2.6.0; extra == 'all'
|
|
90
90
|
Requires-Dist: pymilvus<3,>=2.4.0; extra == 'all'
|
|
91
91
|
Requires-Dist: pymupdf<2,>=1.22.5; extra == 'all'
|
|
92
|
+
Requires-Dist: pyobvector>=0.1.18; extra == 'all'
|
|
92
93
|
Requires-Dist: pyowm<4,>=3.3.0; extra == 'all'
|
|
93
94
|
Requires-Dist: pytelegrambotapi<5,>=4.18.0; extra == 'all'
|
|
94
95
|
Requires-Dist: pytest-asyncio<0.24,>=0.23.0; extra == 'all'
|
|
@@ -103,6 +104,7 @@ Requires-Dist: requests-oauthlib<2,>=1.3.1; extra == 'all'
|
|
|
103
104
|
Requires-Dist: rouge<2,>=1.0.1; extra == 'all'
|
|
104
105
|
Requires-Dist: scenedetect>=0.6.5.2; extra == 'all'
|
|
105
106
|
Requires-Dist: scholarly[tor]==1.7.11; extra == 'all'
|
|
107
|
+
Requires-Dist: scrapegraph-py<2,>=1.12.0; extra == 'all'
|
|
106
108
|
Requires-Dist: sentence-transformers<4,>=3.0.1; extra == 'all'
|
|
107
109
|
Requires-Dist: sentencepiece<0.3,>=0.2; extra == 'all'
|
|
108
110
|
Requires-Dist: slack-bolt<2,>=1.20.1; extra == 'all'
|
|
@@ -253,6 +255,7 @@ Requires-Dist: python-dotenv<2,>=1.0.0; extra == 'owl'
|
|
|
253
255
|
Requires-Dist: requests-oauthlib<2,>=1.3.1; extra == 'owl'
|
|
254
256
|
Requires-Dist: rouge<2,>=1.0.1; extra == 'owl'
|
|
255
257
|
Requires-Dist: scenedetect>=0.6.5.2; extra == 'owl'
|
|
258
|
+
Requires-Dist: scrapegraph-py<2,>=1.12.0; extra == 'owl'
|
|
256
259
|
Requires-Dist: sentencepiece<0.3,>=0.2; extra == 'owl'
|
|
257
260
|
Requires-Dist: soundfile<0.14,>=0.13; extra == 'owl'
|
|
258
261
|
Requires-Dist: tabulate>=0.9.0; extra == 'owl'
|
|
@@ -272,6 +275,7 @@ Requires-Dist: neo4j<6,>=5.18.0; extra == 'rag'
|
|
|
272
275
|
Requires-Dist: numpy~=1.26; extra == 'rag'
|
|
273
276
|
Requires-Dist: pandasai<3,>=2.3.0; extra == 'rag'
|
|
274
277
|
Requires-Dist: pymilvus<3,>=2.4.0; extra == 'rag'
|
|
278
|
+
Requires-Dist: pyobvector>=0.1.18; extra == 'rag'
|
|
275
279
|
Requires-Dist: pytidb-experimental==0.0.1.dev4; extra == 'rag'
|
|
276
280
|
Requires-Dist: qdrant-client<2,>=1.9.0; extra == 'rag'
|
|
277
281
|
Requires-Dist: rank-bm25<0.3,>=0.2.2; extra == 'rag'
|
|
@@ -289,6 +293,7 @@ Requires-Dist: mem0ai>=0.1.73; extra == 'storage'
|
|
|
289
293
|
Requires-Dist: nebula3-python==3.8.2; extra == 'storage'
|
|
290
294
|
Requires-Dist: neo4j<6,>=5.18.0; extra == 'storage'
|
|
291
295
|
Requires-Dist: pymilvus<3,>=2.4.0; extra == 'storage'
|
|
296
|
+
Requires-Dist: pyobvector>=0.1.18; extra == 'storage'
|
|
292
297
|
Requires-Dist: pytidb-experimental==0.0.1.dev4; extra == 'storage'
|
|
293
298
|
Requires-Dist: qdrant-client<2,>=1.9.0; extra == 'storage'
|
|
294
299
|
Requires-Dist: redis<6,>=5.0.6; extra == 'storage'
|
|
@@ -314,6 +319,7 @@ Requires-Dist: newspaper3k<0.3,>=0.2.8; extra == 'web-tools'
|
|
|
314
319
|
Requires-Dist: playwright>=1.50.0; extra == 'web-tools'
|
|
315
320
|
Requires-Dist: pyowm<4,>=3.3.0; extra == 'web-tools'
|
|
316
321
|
Requires-Dist: requests-oauthlib<2,>=1.3.1; extra == 'web-tools'
|
|
322
|
+
Requires-Dist: scrapegraph-py<2,>=1.12.0; extra == 'web-tools'
|
|
317
323
|
Requires-Dist: sympy<2,>=1.13.3; extra == 'web-tools'
|
|
318
324
|
Requires-Dist: tavily-python<0.6,>=0.5.0; extra == 'web-tools'
|
|
319
325
|
Requires-Dist: wikipedia<2,>=1; extra == 'web-tools'
|
|
@@ -338,6 +344,9 @@ Description-Content-Type: text/markdown
|
|
|
338
344
|
[![Hugging Face][huggingface-image]][huggingface-url]
|
|
339
345
|
[![Star][star-image]][star-url]
|
|
340
346
|
[![Package License][package-license-image]][package-license-url]
|
|
347
|
+
[![PyPI Download][package-download-image]][package-download-url]
|
|
348
|
+
|
|
349
|
+
<a href="https://trendshift.io/repositories/649" target="_blank"><img src="https://trendshift.io/api/badge/repositories/649" alt="camel-ai/camel | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
|
341
350
|
|
|
342
351
|
</div>
|
|
343
352
|
|
|
@@ -782,6 +791,7 @@ The source code is licensed under Apache 2.0.
|
|
|
782
791
|
[star-url]: https://github.com/camel-ai/camel/stargazers
|
|
783
792
|
[package-license-image]: https://img.shields.io/badge/License-Apache_2.0-blue.svg
|
|
784
793
|
[package-license-url]: https://github.com/camel-ai/camel/blob/master/licenses/LICENSE
|
|
794
|
+
[package-download-image]: https://img.shields.io/pypi/dm/camel-ai
|
|
785
795
|
|
|
786
796
|
[colab-url]: https://colab.research.google.com/drive/1AzP33O8rnMW__7ocWJhVBXjKziJXPtim?usp=sharing
|
|
787
797
|
[colab-image]: https://colab.research.google.com/assets/colab-badge.svg
|
|
@@ -797,3 +807,4 @@ The source code is licensed under Apache 2.0.
|
|
|
797
807
|
[reddit-url]: https://www.reddit.com/r/CamelAI/
|
|
798
808
|
[reddit-image]: https://img.shields.io/reddit/subreddit-subscribers/CamelAI?style=plastic&logo=reddit&label=r%2FCAMEL&labelColor=white
|
|
799
809
|
[ambassador-url]: https://www.camel-ai.org/community
|
|
810
|
+
[package-download-url]: https://pypi.org/project/camel-ai
|