camel-ai 0.2.18__py3-none-any.whl → 0.2.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of camel-ai might be problematic. Click here for more details.

Files changed (53) hide show
  1. camel/__init__.py +1 -1
  2. camel/agents/chat_agent.py +29 -30
  3. camel/agents/knowledge_graph_agent.py +1 -5
  4. camel/agents/multi_hop_generator_agent.py +35 -3
  5. camel/agents/programmed_agent_instruction.py +73 -18
  6. camel/benchmarks/apibench.py +1 -5
  7. camel/benchmarks/nexus.py +1 -5
  8. camel/benchmarks/ragbench.py +2 -2
  9. camel/bots/telegram_bot.py +1 -5
  10. camel/configs/__init__.py +9 -0
  11. camel/configs/aiml_config.py +80 -0
  12. camel/configs/gemini_config.py +1 -1
  13. camel/configs/moonshot_config.py +63 -0
  14. camel/configs/sglang_config.py +4 -0
  15. camel/configs/siliconflow_config.py +91 -0
  16. camel/datagen/__init__.py +3 -1
  17. camel/datagen/self_improving_cot.py +821 -0
  18. camel/datagen/source2synth/__init__.py +31 -0
  19. camel/{synthetic_datagen → datagen}/source2synth/data_processor.py +194 -29
  20. camel/{synthetic_datagen → datagen}/source2synth/models.py +25 -0
  21. camel/{synthetic_datagen → datagen}/source2synth/user_data_processor_config.py +9 -8
  22. camel/datahubs/huggingface.py +3 -3
  23. camel/embeddings/__init__.py +2 -0
  24. camel/embeddings/jina_embedding.py +161 -0
  25. camel/messages/func_message.py +1 -1
  26. camel/models/__init__.py +4 -0
  27. camel/models/aiml_model.py +147 -0
  28. camel/models/deepseek_model.py +29 -11
  29. camel/models/groq_model.py +0 -2
  30. camel/models/model_factory.py +9 -0
  31. camel/models/moonshot_model.py +138 -0
  32. camel/models/openai_model.py +1 -9
  33. camel/models/siliconflow_model.py +142 -0
  34. camel/societies/workforce/role_playing_worker.py +2 -4
  35. camel/societies/workforce/single_agent_worker.py +1 -6
  36. camel/societies/workforce/workforce.py +3 -9
  37. camel/toolkits/__init__.py +4 -0
  38. camel/toolkits/reddit_toolkit.py +8 -38
  39. camel/toolkits/search_toolkit.py +17 -6
  40. camel/toolkits/semantic_scholar_toolkit.py +308 -0
  41. camel/toolkits/sympy_toolkit.py +778 -0
  42. camel/toolkits/whatsapp_toolkit.py +11 -32
  43. camel/types/enums.py +205 -16
  44. camel/types/unified_model_type.py +5 -0
  45. camel/utils/__init__.py +7 -2
  46. camel/utils/commons.py +198 -21
  47. camel/utils/deduplication.py +199 -0
  48. camel/utils/token_counting.py +1 -39
  49. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/METADATA +17 -12
  50. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/RECORD +53 -41
  51. /camel/datagen/{cotdatagen.py → cot_datagen.py} +0 -0
  52. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/LICENSE +0 -0
  53. {camel_ai-0.2.18.dist-info → camel_ai-0.2.20.dist-info}/WHEEL +0 -0
@@ -45,6 +45,8 @@ from .human_toolkit import HumanToolkit
45
45
  from .stripe_toolkit import StripeToolkit
46
46
  from .video_toolkit import VideoDownloaderToolkit
47
47
  from .dappier_toolkit import DappierToolkit
48
+ from .sympy_toolkit import SymPyToolkit
49
+ from .semantic_scholar_toolkit import SemanticScholarToolkit
48
50
 
49
51
  __all__ = [
50
52
  'BaseToolkit',
@@ -77,4 +79,6 @@ __all__ = [
77
79
  'MeshyToolkit',
78
80
  'OpenBBToolkit',
79
81
  'DappierToolkit',
82
+ 'SymPyToolkit',
83
+ 'SemanticScholarToolkit',
80
84
  ]
@@ -16,10 +16,9 @@ import os
16
16
  import time
17
17
  from typing import Any, Dict, List, Union
18
18
 
19
- from requests.exceptions import RequestException
20
-
21
19
  from camel.toolkits import FunctionTool
22
20
  from camel.toolkits.base import BaseToolkit
21
+ from camel.utils import retry_on_error
23
22
 
24
23
 
25
24
  class RedditToolkit(BaseToolkit):
@@ -61,30 +60,7 @@ class RedditToolkit(BaseToolkit):
61
60
  request_timeout=30, # Set a timeout to handle delays
62
61
  )
63
62
 
64
- def _retry_request(self, func, *args, **kwargs):
65
- r"""Retries a function in case of network-related errors.
66
-
67
- Args:
68
- func (callable): The function to be retried.
69
- *args: Arguments to pass to the function.
70
- **kwargs: Keyword arguments to pass to the function.
71
-
72
- Returns:
73
- Any: The result of the function call if successful.
74
-
75
- Raises:
76
- RequestException: If all retry attempts fail.
77
- """
78
- for attempt in range(self.retries):
79
- try:
80
- return func(*args, **kwargs)
81
- except RequestException as e:
82
- print(f"Attempt {attempt + 1}/{self.retries} failed: {e}")
83
- if attempt < self.retries - 1:
84
- time.sleep(self.delay)
85
- else:
86
- raise
87
-
63
+ @retry_on_error()
88
64
  def collect_top_posts(
89
65
  self,
90
66
  subreddit_name: str,
@@ -113,8 +89,8 @@ class RedditToolkit(BaseToolkit):
113
89
  "Please set the environment variables."
114
90
  )
115
91
 
116
- subreddit = self._retry_request(self.reddit.subreddit, subreddit_name)
117
- top_posts = self._retry_request(subreddit.top, limit=post_limit)
92
+ subreddit = self.reddit.subreddit(subreddit_name)
93
+ top_posts = subreddit.top(limit=post_limit)
118
94
  data = []
119
95
 
120
96
  for post in top_posts:
@@ -122,9 +98,7 @@ class RedditToolkit(BaseToolkit):
122
98
  "Post Title": post.title,
123
99
  "Comments": [
124
100
  {"Comment Body": comment.body, "Upvotes": comment.score}
125
- for comment in self._retry_request(
126
- lambda post=post: list(post.comments)
127
- )[:comment_limit]
101
+ for comment in list(post.comments)[:comment_limit]
128
102
  ],
129
103
  }
130
104
  data.append(post_data)
@@ -192,15 +166,11 @@ class RedditToolkit(BaseToolkit):
192
166
  data = []
193
167
 
194
168
  for subreddit_name in subreddits:
195
- subreddit = self._retry_request(
196
- self.reddit.subreddit, subreddit_name
197
- )
198
- top_posts = self._retry_request(subreddit.top, limit=post_limit)
169
+ subreddit = self.reddit.subreddit(subreddit_name)
170
+ top_posts = subreddit.top(limit=post_limit)
199
171
 
200
172
  for post in top_posts:
201
- for comment in self._retry_request(
202
- lambda post=post: list(post.comments)
203
- )[:comment_limit]:
173
+ for comment in list(post.comments)[:comment_limit]:
204
174
  # Print comment body for debugging
205
175
  if any(
206
176
  keyword.lower() in comment.body.lower()
@@ -13,10 +13,9 @@
13
13
  # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
14
  import os
15
15
  import xml.etree.ElementTree as ET
16
- from typing import Any, Dict, List, Literal, Optional, Type, TypeAlias, Union
16
+ from typing import Any, Dict, List, Literal, Optional, TypeAlias, Union
17
17
 
18
18
  import requests
19
- from pydantic import BaseModel
20
19
 
21
20
  from camel.toolkits.base import BaseToolkit
22
21
  from camel.toolkits.function_tool import FunctionTool
@@ -77,7 +76,7 @@ class SearchToolkit(BaseToolkit):
77
76
  output_type: Literal[
78
77
  "searchResults", "sourcedAnswer", "structured"
79
78
  ] = "searchResults",
80
- structured_output_schema: Union[Type[BaseModel], str, None] = None,
79
+ structured_output_schema: Optional[str] = None,
81
80
  ) -> Dict[str, Any]:
82
81
  r"""Search for a query in the Linkup API and return results in various
83
82
  formats.
@@ -92,9 +91,9 @@ class SearchToolkit(BaseToolkit):
92
91
  - "searchResults" for raw search results,
93
92
  - "sourcedAnswer" for an answer with supporting sources,
94
93
  - "structured" for output based on a provided schema.
95
- structured_output_schema (Union[Type[BaseModel], str, None]): If
96
- `output_type` is "structured",specify the schema of the
97
- output. Can be a Pydantic BaseModel or a JSON schema string.
94
+ structured_output_schema (Optional[str]): If `output_type` is
95
+ "structured", specify the schema of the output. Must be a
96
+ string representing a valid object JSON schema.
98
97
 
99
98
  Returns:
100
99
  Dict[str, Any]: A dictionary representing the search result. The
@@ -581,6 +580,18 @@ class SearchToolkit(BaseToolkit):
581
580
  "image_url": image_url,
582
581
  }
583
582
 
583
+ # For Results pod, collect all plaintext values from subpods
584
+ if pod.get("@title") == "Results":
585
+ results_text = []
586
+ if isinstance(subpod_data, list):
587
+ for subpod in subpod_data:
588
+ if subpod.get("plaintext"):
589
+ results_text.append(subpod["plaintext"])
590
+ else:
591
+ if description:
592
+ results_text.append(description)
593
+ pod_info["description"] = "\n".join(results_text)
594
+
584
595
  # Add to steps list
585
596
  output["pod_info"].append(pod_info)
586
597
 
@@ -0,0 +1,308 @@
1
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
2
+ # Licensed under the Apache License, Version 2.0 (the "License");
3
+ # you may not use this file except in compliance with the License.
4
+ # You may obtain a copy of the License at
5
+ #
6
+ # http://www.apache.org/licenses/LICENSE-2.0
7
+ #
8
+ # Unless required by applicable law or agreed to in writing, software
9
+ # distributed under the License is distributed on an "AS IS" BASIS,
10
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11
+ # See the License for the specific language governing permissions and
12
+ # limitations under the License.
13
+ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
14
+
15
+ import json
16
+ from typing import List, Optional
17
+
18
+ import requests
19
+
20
+ from camel.toolkits import FunctionTool
21
+ from camel.toolkits.base import BaseToolkit
22
+
23
+
24
+ class SemanticScholarToolkit(BaseToolkit):
25
+ r"""A toolkit for interacting with the Semantic Scholar
26
+ API to fetch paper and author data.
27
+ """
28
+
29
+ def __init__(self):
30
+ r"""Initializes the SemanticScholarToolkit."""
31
+ self.base_url = "https://api.semanticscholar.org/graph/v1"
32
+
33
+ def fetch_paper_data_title(
34
+ self,
35
+ paper_title: str,
36
+ fields: Optional[List[str]] = None,
37
+ ) -> dict:
38
+ r"""Fetches a SINGLE paper from the Semantic Scholar
39
+ API based on a paper title.
40
+
41
+ Args:
42
+ paper_title (str): The title of the paper to fetch.
43
+ fields (Optional[List[str]], optional): The fields to include in
44
+ the response (default: :obj:`None`). If not provided defaults
45
+ to ["title", "abstract", "authors", "year", "citationCount",
46
+ "publicationTypes", "publicationDate", "openAccessPdf"].
47
+
48
+ Returns:
49
+ dict: The response data from the API or error information if the
50
+ request fails.
51
+ """
52
+ if fields is None:
53
+ fields = [
54
+ "title",
55
+ "abstract",
56
+ "authors",
57
+ "year",
58
+ "citationCount",
59
+ "publicationTypes",
60
+ "publicationDate",
61
+ "openAccessPdf",
62
+ ]
63
+
64
+ url = f"{self.base_url}/paper/search"
65
+ query_params = {"query": paper_title, "fields": ",".join(fields)}
66
+ try:
67
+ response = requests.get(url, params=query_params)
68
+ response.raise_for_status()
69
+ return response.json()
70
+ except requests.exceptions.RequestException as e:
71
+ return {
72
+ "error": f"Request failed: {e!s}",
73
+ "message": str(e),
74
+ }
75
+ except ValueError:
76
+ return {
77
+ "error": "Response is not valid JSON",
78
+ "message": response.text,
79
+ }
80
+
81
+ def fetch_paper_data_id(
82
+ self,
83
+ paper_id: str,
84
+ fields: Optional[List[str]] = None,
85
+ ) -> dict:
86
+ r"""Fetches a SINGLE paper from the Semantic Scholar
87
+ API based on a paper ID.
88
+
89
+ Args:
90
+ paper_id (str): The ID of the paper to fetch.
91
+ fields (Optional[List[str]], optional): The fields to include in
92
+ the response (default: :obj:`None`). If not provided defaults
93
+ to ["title", "abstract", "authors", "year", "citationCount",
94
+ "publicationTypes", "publicationDate", "openAccessPdf"].
95
+
96
+ Returns:
97
+ dict: The response data from the API or error information
98
+ if the request fails.
99
+ """
100
+ if fields is None:
101
+ fields = [
102
+ "title",
103
+ "abstract",
104
+ "authors",
105
+ "year",
106
+ "citationCount",
107
+ "publicationTypes",
108
+ "publicationDate",
109
+ "openAccessPdf",
110
+ ]
111
+
112
+ url = f"{self.base_url}/paper/{paper_id}"
113
+ query_params = {"fields": ",".join(fields)}
114
+ try:
115
+ response = requests.get(url, params=query_params)
116
+ response.raise_for_status()
117
+ return response.json()
118
+ except requests.exceptions.RequestException as e:
119
+ return {
120
+ "error": f"Request failed: {e!s}",
121
+ "message": str(e),
122
+ }
123
+ except ValueError:
124
+ return {
125
+ "error": "Response is not valid JSON",
126
+ "message": response.text,
127
+ }
128
+
129
+ def fetch_bulk_paper_data(
130
+ self,
131
+ query: str,
132
+ year: str = "2023-",
133
+ fields: Optional[List[str]] = None,
134
+ ) -> dict:
135
+ r"""Fetches MULTIPLE papers at once from the Semantic Scholar
136
+ API based on a related topic.
137
+
138
+ Args:
139
+ query (str): The text query to match against the paper's title and
140
+ abstract. For example, you can use the following operators and
141
+ techniques to construct your query: Example 1: ((cloud
142
+ computing) | virtualization) +security -privacy This will
143
+ match papers whose title or abstract contains "cloud" and
144
+ "computing", or contains the word "virtualization". The papers
145
+ must also include the term "security" but exclude papers that
146
+ contain the word "privacy".
147
+ year (str, optional): The year filter for papers (default:
148
+ :obj:`"2023-"`).
149
+ fields (Optional[List[str]], optional): The fields to include in
150
+ the response (default: :obj:`None`). If not provided defaults
151
+ to ["title", "url", "publicationTypes", "publicationDate",
152
+ "openAccessPdf"].
153
+
154
+ Returns:
155
+ dict: The response data from the API or error information if the
156
+ request fails.
157
+ """
158
+ if fields is None:
159
+ fields = [
160
+ "title",
161
+ "url",
162
+ "publicationTypes",
163
+ "publicationDate",
164
+ "openAccessPdf",
165
+ ]
166
+
167
+ url = f"{self.base_url}/paper/search/bulk"
168
+ query_params = {
169
+ "query": query,
170
+ "fields": ",".join(fields),
171
+ "year": year,
172
+ }
173
+ try:
174
+ response = requests.get(url, params=query_params)
175
+ response.raise_for_status()
176
+ return response.json()
177
+ except requests.exceptions.RequestException as e:
178
+ return {
179
+ "error": f"Request failed: {e!s}",
180
+ "message": str(e),
181
+ }
182
+ except ValueError:
183
+ return {
184
+ "error": "Response is not valid JSON",
185
+ "message": response.text,
186
+ }
187
+
188
+ def fetch_recommended_papers(
189
+ self,
190
+ positive_paper_ids: List[str],
191
+ negative_paper_ids: List[str],
192
+ fields: Optional[List[str]] = None,
193
+ limit: int = 500,
194
+ save_to_file: bool = False,
195
+ ) -> dict:
196
+ r"""Fetches recommended papers from the Semantic Scholar
197
+ API based on the positive and negative paper IDs.
198
+
199
+ Args:
200
+ positive_paper_ids (list): A list of paper IDs (as strings)
201
+ that are positively correlated to the recommendation.
202
+ negative_paper_ids (list): A list of paper IDs (as strings)
203
+ that are negatively correlated to the recommendation.
204
+ fields (Optional[List[str]], optional): The fields to include in
205
+ the response (default: :obj:`None`). If not provided defaults
206
+ to ["title", "url", "citationCount", "authors",
207
+ "publicationTypes", "publicationDate", "openAccessPdf"].
208
+ limit (int, optional): The maximum number of recommended papers to
209
+ return (default: :obj:`500`).
210
+ save_to_file (bool, optional): If True, saves the response data to
211
+ a file (default: :obj:`False`).
212
+
213
+ Returns:
214
+ dict: A dictionary containing recommended papers sorted by
215
+ citation count.
216
+ """
217
+ if fields is None:
218
+ fields = [
219
+ "title",
220
+ "url",
221
+ "citationCount",
222
+ "authors",
223
+ "publicationTypes",
224
+ "publicationDate",
225
+ "openAccessPdf",
226
+ ]
227
+
228
+ url = "https://api.semanticscholar.org/recommendations/v1/papers"
229
+ query_params = {"fields": ",".join(fields), "limit": str(limit)}
230
+ data = {
231
+ "positive_paper_ids": positive_paper_ids,
232
+ "negative_paper_ids": negative_paper_ids,
233
+ }
234
+ try:
235
+ response = requests.post(url, params=query_params, json=data)
236
+ response.raise_for_status()
237
+ papers = response.json()
238
+ if save_to_file:
239
+ with open('recommended_papers.json', 'w') as output:
240
+ json.dump(papers, output)
241
+ return papers
242
+ except requests.exceptions.RequestException as e:
243
+ return {"error": str(e)}
244
+ except ValueError:
245
+ return {
246
+ "error": "Response is not valid JSON",
247
+ "message": response.text,
248
+ }
249
+
250
+ def fetch_author_data(
251
+ self,
252
+ ids: List[str],
253
+ fields: Optional[List[str]] = None,
254
+ save_to_file: bool = False,
255
+ ) -> dict:
256
+ r"""Fetches author information from the Semantic Scholar
257
+ API based on author IDs.
258
+
259
+ Args:
260
+ ids (list): A list of author IDs (as strings) to fetch
261
+ data for.
262
+ fields (Optional[List[str]], optional): The fields to include in
263
+ the response (default: :obj:`None`). If not provided defaults
264
+ to ["name", "url", "paperCount", "hIndex", "papers"].
265
+ save_to_file (bool, optional): Whether to save the results to a
266
+ file (default: :obj:`False`).
267
+
268
+ Returns:
269
+ dict: The response data from the API or error information if
270
+ the request fails.
271
+ """
272
+ if fields is None:
273
+ fields = ["name", "url", "paperCount", "hIndex", "papers"]
274
+
275
+ url = f"{self.base_url}/author/batch"
276
+ query_params = {"fields": ",".join(fields)}
277
+ data = {"ids": ids}
278
+ try:
279
+ response = requests.post(url, params=query_params, json=data)
280
+ response.raise_for_status()
281
+ response_data = response.json()
282
+ if save_to_file:
283
+ with open('author_information.json', 'w') as output:
284
+ json.dump(response_data, output)
285
+ return response_data
286
+ except requests.exceptions.RequestException as e:
287
+ return {"error": str(e)}
288
+ except ValueError:
289
+ return {
290
+ "error": "Response is not valid JSON",
291
+ "message": response.text,
292
+ }
293
+
294
+ def get_tools(self) -> List[FunctionTool]:
295
+ r"""Returns a list of FunctionTool objects representing the
296
+ functions in the toolkit.
297
+
298
+ Returns:
299
+ List[FunctionTool]: A list of FunctionTool objects
300
+ representing the functions in the toolkit.
301
+ """
302
+ return [
303
+ FunctionTool(self.fetch_paper_data_title),
304
+ FunctionTool(self.fetch_paper_data_id),
305
+ FunctionTool(self.fetch_bulk_paper_data),
306
+ FunctionTool(self.fetch_recommended_papers),
307
+ FunctionTool(self.fetch_author_data),
308
+ ]