camel-ai 0.2.3a1__py3-none-any.whl → 0.2.3a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of camel-ai might be problematic. Click here for more details.
- camel/__init__.py +1 -1
- camel/agents/chat_agent.py +93 -69
- camel/agents/knowledge_graph_agent.py +4 -6
- camel/bots/__init__.py +16 -2
- camel/bots/discord_app.py +138 -0
- camel/bots/slack/__init__.py +30 -0
- camel/bots/slack/models.py +158 -0
- camel/bots/slack/slack_app.py +255 -0
- camel/configs/__init__.py +1 -2
- camel/configs/anthropic_config.py +2 -5
- camel/configs/base_config.py +6 -6
- camel/configs/groq_config.py +2 -3
- camel/configs/ollama_config.py +1 -2
- camel/configs/openai_config.py +2 -23
- camel/configs/samba_config.py +2 -2
- camel/configs/togetherai_config.py +1 -1
- camel/configs/vllm_config.py +1 -1
- camel/configs/zhipuai_config.py +2 -3
- camel/embeddings/openai_embedding.py +2 -2
- camel/loaders/__init__.py +2 -0
- camel/loaders/chunkr_reader.py +163 -0
- camel/loaders/firecrawl_reader.py +3 -3
- camel/loaders/unstructured_io.py +35 -33
- camel/messages/__init__.py +1 -0
- camel/models/__init__.py +2 -4
- camel/models/anthropic_model.py +32 -26
- camel/models/azure_openai_model.py +39 -36
- camel/models/base_model.py +31 -20
- camel/models/gemini_model.py +37 -29
- camel/models/groq_model.py +29 -23
- camel/models/litellm_model.py +44 -61
- camel/models/mistral_model.py +32 -29
- camel/models/model_factory.py +66 -76
- camel/models/nemotron_model.py +33 -23
- camel/models/ollama_model.py +42 -47
- camel/models/{openai_compatibility_model.py → openai_compatible_model.py} +31 -49
- camel/models/openai_model.py +48 -29
- camel/models/reka_model.py +30 -28
- camel/models/samba_model.py +82 -177
- camel/models/stub_model.py +2 -2
- camel/models/togetherai_model.py +37 -43
- camel/models/vllm_model.py +43 -50
- camel/models/zhipuai_model.py +33 -27
- camel/retrievers/auto_retriever.py +28 -10
- camel/retrievers/vector_retriever.py +58 -47
- camel/societies/babyagi_playing.py +6 -3
- camel/societies/role_playing.py +5 -3
- camel/storages/graph_storages/graph_element.py +3 -5
- camel/storages/key_value_storages/json.py +6 -1
- camel/toolkits/__init__.py +20 -7
- camel/toolkits/arxiv_toolkit.py +155 -0
- camel/toolkits/ask_news_toolkit.py +653 -0
- camel/toolkits/base.py +2 -3
- camel/toolkits/code_execution.py +6 -7
- camel/toolkits/dalle_toolkit.py +6 -6
- camel/toolkits/{openai_function.py → function_tool.py} +34 -11
- camel/toolkits/github_toolkit.py +9 -10
- camel/toolkits/google_maps_toolkit.py +7 -7
- camel/toolkits/google_scholar_toolkit.py +146 -0
- camel/toolkits/linkedin_toolkit.py +7 -7
- camel/toolkits/math_toolkit.py +8 -8
- camel/toolkits/open_api_toolkit.py +5 -5
- camel/toolkits/reddit_toolkit.py +7 -7
- camel/toolkits/retrieval_toolkit.py +5 -5
- camel/toolkits/search_toolkit.py +9 -9
- camel/toolkits/slack_toolkit.py +11 -11
- camel/toolkits/twitter_toolkit.py +378 -452
- camel/toolkits/weather_toolkit.py +6 -6
- camel/toolkits/whatsapp_toolkit.py +177 -0
- camel/types/__init__.py +6 -1
- camel/types/enums.py +40 -85
- camel/types/openai_types.py +3 -0
- camel/types/unified_model_type.py +104 -0
- camel/utils/__init__.py +0 -2
- camel/utils/async_func.py +7 -7
- camel/utils/commons.py +32 -3
- camel/utils/token_counting.py +30 -212
- camel/workforce/role_playing_worker.py +1 -1
- camel/workforce/single_agent_worker.py +1 -1
- camel/workforce/task_channel.py +4 -3
- camel/workforce/workforce.py +4 -4
- camel_ai-0.2.3a2.dist-info/LICENSE +201 -0
- {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.3a2.dist-info}/METADATA +27 -56
- {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.3a2.dist-info}/RECORD +85 -76
- {camel_ai-0.2.3a1.dist-info → camel_ai-0.2.3a2.dist-info}/WHEEL +1 -1
- camel/bots/discord_bot.py +0 -206
- camel/models/open_source_model.py +0 -170
|
@@ -0,0 +1,653 @@
|
|
|
1
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
2
|
+
# Licensed under the Apache License, Version 2.0 (the “License”);
|
|
3
|
+
# you may not use this file except in compliance with the License.
|
|
4
|
+
# You may obtain a copy of the License at
|
|
5
|
+
#
|
|
6
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
7
|
+
#
|
|
8
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
9
|
+
# distributed under the License is distributed on an “AS IS” BASIS,
|
|
10
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
11
|
+
# See the License for the specific language governing permissions and
|
|
12
|
+
# limitations under the License.
|
|
13
|
+
# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. ===========
|
|
14
|
+
import os
|
|
15
|
+
from datetime import datetime
|
|
16
|
+
from typing import List, Literal, Optional, Tuple, Union
|
|
17
|
+
|
|
18
|
+
from camel.toolkits import FunctionTool
|
|
19
|
+
from camel.toolkits.base import BaseToolkit
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def _process_response(
|
|
23
|
+
response, return_type: str
|
|
24
|
+
) -> Union[str, dict, Tuple[str, dict]]:
|
|
25
|
+
r"""Process the response based on the specified return type.
|
|
26
|
+
|
|
27
|
+
This helper method processes the API response and returns the content
|
|
28
|
+
in the specified format, which could be a string, a dictionary, or
|
|
29
|
+
both.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
response: The response object returned by the API call.
|
|
33
|
+
return_type (str): Specifies the format of the return value. It
|
|
34
|
+
can be "string" to return the response as a string, "dicts" to
|
|
35
|
+
return it as a dictionary, or "both" to return both formats as
|
|
36
|
+
a tuple.
|
|
37
|
+
|
|
38
|
+
Returns:
|
|
39
|
+
Union[str, dict, Tuple[str, dict]]: The processed response,
|
|
40
|
+
formatted according to the return_type argument. If "string",
|
|
41
|
+
returns the response as a string. If "dicts", returns the
|
|
42
|
+
response as a dictionary. If "both", returns a tuple
|
|
43
|
+
containing both formats.
|
|
44
|
+
|
|
45
|
+
Raises:
|
|
46
|
+
ValueError: If the return_type provided is invalid.
|
|
47
|
+
"""
|
|
48
|
+
if return_type == "string":
|
|
49
|
+
return response.as_string
|
|
50
|
+
elif return_type == "dicts":
|
|
51
|
+
return response.as_dicts
|
|
52
|
+
elif return_type == "both":
|
|
53
|
+
return (response.as_string, response.as_dicts)
|
|
54
|
+
else:
|
|
55
|
+
raise ValueError(f"Invalid return_type: {return_type}")
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class AskNewsToolkit(BaseToolkit):
|
|
59
|
+
r"""A class representing a toolkit for interacting with the AskNews API.
|
|
60
|
+
|
|
61
|
+
This class provides methods for fetching news, stories, and other content
|
|
62
|
+
based on user queries using the AskNews API.
|
|
63
|
+
"""
|
|
64
|
+
|
|
65
|
+
def __init__(self):
|
|
66
|
+
r"""Initialize the AskNewsToolkit with API clients.The API keys and
|
|
67
|
+
credentials are retrieved from environment variables.
|
|
68
|
+
"""
|
|
69
|
+
from asknews_sdk import AskNewsSDK # type: ignore[import]
|
|
70
|
+
|
|
71
|
+
client_id = os.environ.get("ASKNEWS_CLIENT_ID")
|
|
72
|
+
client_secret = os.environ.get("ASKNEWS_CLIENT_SECRET")
|
|
73
|
+
|
|
74
|
+
if client_id and client_secret:
|
|
75
|
+
self.asknews_client = AskNewsSDK(client_id, client_secret)
|
|
76
|
+
else:
|
|
77
|
+
self.asknews_client = None
|
|
78
|
+
|
|
79
|
+
def get_news(
|
|
80
|
+
self,
|
|
81
|
+
query: str,
|
|
82
|
+
n_articles: int = 10,
|
|
83
|
+
return_type: Literal["string", "dicts", "both"] = "string",
|
|
84
|
+
method: Literal["nl", "kw"] = "kw",
|
|
85
|
+
) -> Union[str, dict, Tuple[str, dict]]:
|
|
86
|
+
r"""Fetch news or stories based on a user query.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
query (str): The search query for fetching relevant news.
|
|
90
|
+
n_articles (int): Number of articles to include in the response.
|
|
91
|
+
(default: :obj:`10`)
|
|
92
|
+
return_type (Literal["string", "dicts", "both"]): The format of the
|
|
93
|
+
return value. (default: :obj:`"string"`)
|
|
94
|
+
method (Literal["nl", "kw"]): The search method, either "nl" for
|
|
95
|
+
natural language or "kw" for keyword search. (default:
|
|
96
|
+
:obj:`"kw"`)
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
Union[str, dict, Tuple[str, dict]]: A string, dictionary,
|
|
100
|
+
or both containing the news or story content, or error message
|
|
101
|
+
if the process fails.
|
|
102
|
+
"""
|
|
103
|
+
try:
|
|
104
|
+
response = self.asknews_client.news.search_news(
|
|
105
|
+
query=query,
|
|
106
|
+
n_articles=n_articles,
|
|
107
|
+
return_type=return_type,
|
|
108
|
+
method=method,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
return _process_response(response, return_type)
|
|
112
|
+
|
|
113
|
+
except Exception as e:
|
|
114
|
+
return f"Got error: {e}"
|
|
115
|
+
|
|
116
|
+
def get_stories(
|
|
117
|
+
self,
|
|
118
|
+
query: str,
|
|
119
|
+
categories: List[
|
|
120
|
+
Literal[
|
|
121
|
+
'Politics',
|
|
122
|
+
'Economy',
|
|
123
|
+
'Finance',
|
|
124
|
+
'Science',
|
|
125
|
+
'Technology',
|
|
126
|
+
'Sports',
|
|
127
|
+
'Climate',
|
|
128
|
+
'Environment',
|
|
129
|
+
'Culture',
|
|
130
|
+
'Entertainment',
|
|
131
|
+
'Business',
|
|
132
|
+
'Health',
|
|
133
|
+
'International',
|
|
134
|
+
]
|
|
135
|
+
],
|
|
136
|
+
reddit: int = 3,
|
|
137
|
+
expand_updates: bool = True,
|
|
138
|
+
max_updates: int = 2,
|
|
139
|
+
max_articles: int = 10,
|
|
140
|
+
) -> Union[dict, str]:
|
|
141
|
+
r"""Fetch stories based on the provided parameters.
|
|
142
|
+
|
|
143
|
+
Args:
|
|
144
|
+
query (str): The search query for fetching relevant stories.
|
|
145
|
+
categories (list): The categories to filter stories by.
|
|
146
|
+
reddit (int): Number of Reddit threads to include.
|
|
147
|
+
(default: :obj:`3`)
|
|
148
|
+
expand_updates (bool): Whether to include detailed updates.
|
|
149
|
+
(default: :obj:`True`)
|
|
150
|
+
max_updates (int): Maximum number of recent updates per story.
|
|
151
|
+
(default: :obj:`2`)
|
|
152
|
+
max_articles (int): Maximum number of articles associated with
|
|
153
|
+
each update. (default: :obj:`10`)
|
|
154
|
+
|
|
155
|
+
Returns:
|
|
156
|
+
Unio[dict, str]: A dictionary containing the stories and their
|
|
157
|
+
associated data, or error message if the process fails.
|
|
158
|
+
"""
|
|
159
|
+
try:
|
|
160
|
+
response = self.asknews_client.stories.search_stories(
|
|
161
|
+
query=query,
|
|
162
|
+
categories=categories,
|
|
163
|
+
reddit=reddit,
|
|
164
|
+
expand_updates=expand_updates,
|
|
165
|
+
max_updates=max_updates,
|
|
166
|
+
max_articles=max_articles,
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Collect only the headline and story content from the updates
|
|
170
|
+
stories_data = {
|
|
171
|
+
"stories": [
|
|
172
|
+
{
|
|
173
|
+
"headline": story.updates[0].headline,
|
|
174
|
+
"updates": [
|
|
175
|
+
{
|
|
176
|
+
"headline": update.headline,
|
|
177
|
+
"story": update.story,
|
|
178
|
+
}
|
|
179
|
+
for update in story.updates[:max_updates]
|
|
180
|
+
],
|
|
181
|
+
}
|
|
182
|
+
for story in response.stories
|
|
183
|
+
]
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
return stories_data
|
|
187
|
+
|
|
188
|
+
except Exception as e:
|
|
189
|
+
return f"Got error: {e}"
|
|
190
|
+
|
|
191
|
+
def get_web_search(
|
|
192
|
+
self,
|
|
193
|
+
queries: List[str],
|
|
194
|
+
return_type: Literal["string", "dicts", "both"] = "string",
|
|
195
|
+
) -> Union[str, dict, Tuple[str, dict]]:
|
|
196
|
+
r"""Perform a live web search based on the given queries.
|
|
197
|
+
|
|
198
|
+
Args:
|
|
199
|
+
queries (List[str]): A list of search queries.
|
|
200
|
+
return_type (Literal["string", "dicts", "both"]): The format of the
|
|
201
|
+
return value. (default: :obj:`"string"`)
|
|
202
|
+
|
|
203
|
+
Returns:
|
|
204
|
+
Union[str, dict, Tuple[str, dict]]: A string,
|
|
205
|
+
dictionary, or both containing the search results, or
|
|
206
|
+
error message if the process fails.
|
|
207
|
+
"""
|
|
208
|
+
try:
|
|
209
|
+
response = self.asknews_client.chat.live_web_search(
|
|
210
|
+
queries=queries
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
return _process_response(response, return_type)
|
|
214
|
+
|
|
215
|
+
except Exception as e:
|
|
216
|
+
return f"Got error: {e}"
|
|
217
|
+
|
|
218
|
+
def search_reddit(
|
|
219
|
+
self,
|
|
220
|
+
keywords: List[str],
|
|
221
|
+
n_threads: int = 5,
|
|
222
|
+
return_type: Literal["string", "dicts", "both"] = "string",
|
|
223
|
+
method: Literal["nl", "kw"] = "kw",
|
|
224
|
+
) -> Union[str, dict, Tuple[str, dict]]:
|
|
225
|
+
r"""Search Reddit based on the provided keywords.
|
|
226
|
+
|
|
227
|
+
Args:
|
|
228
|
+
keywords (List[str]): The keywords to search for on Reddit.
|
|
229
|
+
n_threads (int): Number of Reddit threads to summarize and return.
|
|
230
|
+
(default: :obj:`5`)
|
|
231
|
+
return_type (Literal["string", "dicts", "both"]): The format of the
|
|
232
|
+
return value. (default: :obj:`"string"`)
|
|
233
|
+
method (Literal["nl", "kw"]): The search method, either "nl" for
|
|
234
|
+
natural language or "kw" for keyword search.
|
|
235
|
+
(default::obj:`"kw"`)
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
Union[str, dict, Tuple[str, dict]]: The Reddit search
|
|
239
|
+
results as a string, dictionary, or both, or error message if
|
|
240
|
+
the process fails.
|
|
241
|
+
"""
|
|
242
|
+
try:
|
|
243
|
+
response = self.asknews_client.news.search_reddit(
|
|
244
|
+
keywords=keywords, n_threads=n_threads, method=method
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
return _process_response(response, return_type)
|
|
248
|
+
|
|
249
|
+
except Exception as e:
|
|
250
|
+
return f"Got error: {e}"
|
|
251
|
+
|
|
252
|
+
def query_finance(
|
|
253
|
+
self,
|
|
254
|
+
asset: Literal[
|
|
255
|
+
'bitcoin',
|
|
256
|
+
'ethereum',
|
|
257
|
+
'cardano',
|
|
258
|
+
'uniswap',
|
|
259
|
+
'ripple',
|
|
260
|
+
'solana',
|
|
261
|
+
'polkadot',
|
|
262
|
+
'polygon',
|
|
263
|
+
'chainlink',
|
|
264
|
+
'tether',
|
|
265
|
+
'dogecoin',
|
|
266
|
+
'monero',
|
|
267
|
+
'tron',
|
|
268
|
+
'binance',
|
|
269
|
+
'aave',
|
|
270
|
+
'tesla',
|
|
271
|
+
'microsoft',
|
|
272
|
+
'amazon',
|
|
273
|
+
],
|
|
274
|
+
metric: Literal[
|
|
275
|
+
'news_positive',
|
|
276
|
+
'news_negative',
|
|
277
|
+
'news_total',
|
|
278
|
+
'news_positive_weighted',
|
|
279
|
+
'news_negative_weighted',
|
|
280
|
+
'news_total_weighted',
|
|
281
|
+
] = "news_positive",
|
|
282
|
+
return_type: Literal["list", "string"] = "string",
|
|
283
|
+
date_from: Optional[datetime] = None,
|
|
284
|
+
date_to: Optional[datetime] = None,
|
|
285
|
+
) -> Union[list, str]:
|
|
286
|
+
r"""Fetch asset sentiment data for a given asset, metric, and date
|
|
287
|
+
range.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
asset (Literal): The asset for which to fetch sentiment data.
|
|
291
|
+
metric (Literal): The sentiment metric to analyze.
|
|
292
|
+
return_type (Literal["list", "string"]): The format of the return
|
|
293
|
+
value. (default: :obj:`"string"`)
|
|
294
|
+
date_from (datetime, optional): The start date and time for the
|
|
295
|
+
data in ISO 8601 format.
|
|
296
|
+
date_to (datetime, optional): The end date and time for the data
|
|
297
|
+
in ISO 8601 format.
|
|
298
|
+
|
|
299
|
+
Returns:
|
|
300
|
+
Union[list, str]: A list of dictionaries containing the datetime
|
|
301
|
+
and value or a string describing all datetime and value pairs
|
|
302
|
+
for providing quantified time-series data for news sentiment
|
|
303
|
+
on topics of interest, or an error message if the process
|
|
304
|
+
fails.
|
|
305
|
+
"""
|
|
306
|
+
try:
|
|
307
|
+
response = self.asknews_client.analytics.get_asset_sentiment(
|
|
308
|
+
asset=asset,
|
|
309
|
+
metric=metric,
|
|
310
|
+
date_from=date_from,
|
|
311
|
+
date_to=date_to,
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
time_series_data = response.data.timeseries
|
|
315
|
+
|
|
316
|
+
if return_type == "list":
|
|
317
|
+
return time_series_data
|
|
318
|
+
elif return_type == "string":
|
|
319
|
+
header = (
|
|
320
|
+
f"This is the sentiment analysis for '{asset}' based "
|
|
321
|
+
+ f"on the '{metric}' metric from {date_from} to {date_to}"
|
|
322
|
+
+ ". The values reflect the aggregated sentiment from news"
|
|
323
|
+
+ " sources for each given time period.\n"
|
|
324
|
+
)
|
|
325
|
+
descriptive_text = "\n".join(
|
|
326
|
+
[
|
|
327
|
+
f"On {entry.datetime}, the sentiment value was "
|
|
328
|
+
f"{entry.value}."
|
|
329
|
+
for entry in time_series_data
|
|
330
|
+
]
|
|
331
|
+
)
|
|
332
|
+
return header + descriptive_text
|
|
333
|
+
|
|
334
|
+
except Exception as e:
|
|
335
|
+
return f"Got error: {e}"
|
|
336
|
+
|
|
337
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
338
|
+
r"""Returns a list of FunctionTool objects representing the functions
|
|
339
|
+
in the toolkit.
|
|
340
|
+
|
|
341
|
+
Returns:
|
|
342
|
+
List[FunctionTool]: A list of FunctionTool objects representing
|
|
343
|
+
the functions in the toolkit.
|
|
344
|
+
"""
|
|
345
|
+
return [
|
|
346
|
+
FunctionTool(self.get_news),
|
|
347
|
+
FunctionTool(self.get_stories),
|
|
348
|
+
FunctionTool(self.get_web_search),
|
|
349
|
+
FunctionTool(self.search_reddit),
|
|
350
|
+
FunctionTool(self.query_finance),
|
|
351
|
+
]
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
class AsyncAskNewsToolkit(BaseToolkit):
|
|
355
|
+
r"""A class representing a toolkit for interacting with the AskNews API
|
|
356
|
+
asynchronously.
|
|
357
|
+
|
|
358
|
+
This class provides methods for fetching news, stories, and other
|
|
359
|
+
content based on user queries using the AskNews API.
|
|
360
|
+
"""
|
|
361
|
+
|
|
362
|
+
def __init__(self):
|
|
363
|
+
r"""Initialize the AsyncAskNewsToolkit with API clients.The API keys
|
|
364
|
+
and credentials are retrieved from environment variables.
|
|
365
|
+
"""
|
|
366
|
+
from asknews_sdk import AsyncAskNewsSDK # type: ignore[import]
|
|
367
|
+
|
|
368
|
+
client_id = os.environ.get("ASKNEWS_CLIENT_ID")
|
|
369
|
+
client_secret = os.environ.get("ASKNEWS_CLIENT_SECRET")
|
|
370
|
+
|
|
371
|
+
if client_id and client_secret:
|
|
372
|
+
self.asknews_client = AsyncAskNewsSDK(client_id, client_secret)
|
|
373
|
+
else:
|
|
374
|
+
self.asknews_client = None
|
|
375
|
+
|
|
376
|
+
async def get_news(
|
|
377
|
+
self,
|
|
378
|
+
query: str,
|
|
379
|
+
n_articles: int = 10,
|
|
380
|
+
return_type: Literal["string", "dicts", "both"] = "string",
|
|
381
|
+
method: Literal["nl", "kw"] = "kw",
|
|
382
|
+
) -> Union[str, dict, Tuple[str, dict]]:
|
|
383
|
+
r"""Fetch news or stories based on a user query.
|
|
384
|
+
|
|
385
|
+
Args:
|
|
386
|
+
query (str): The search query for fetching relevant news or
|
|
387
|
+
stories.
|
|
388
|
+
n_articles (int): Number of articles to include in the response.
|
|
389
|
+
(default: :obj:10)
|
|
390
|
+
return_type (Literal["string", "dicts", "both"]): The format of the
|
|
391
|
+
return value. (default: :obj:"string")
|
|
392
|
+
method (Literal["nl", "kw"]): The search method, either "nl" for
|
|
393
|
+
natural language or "kw" for keyword search. (default:
|
|
394
|
+
:obj:"kw")
|
|
395
|
+
|
|
396
|
+
Returns:
|
|
397
|
+
Union[str, dict, Tuple[str, dict]]: A string,
|
|
398
|
+
dictionary, or both containing the news or story content, or
|
|
399
|
+
error message if the process fails.
|
|
400
|
+
"""
|
|
401
|
+
try:
|
|
402
|
+
response = await self.asknews_client.news.search_news(
|
|
403
|
+
query=query,
|
|
404
|
+
n_articles=n_articles,
|
|
405
|
+
return_type=return_type,
|
|
406
|
+
method=method,
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
return _process_response(response, return_type)
|
|
410
|
+
|
|
411
|
+
except Exception as e:
|
|
412
|
+
return f"Got error: {e}"
|
|
413
|
+
|
|
414
|
+
async def get_stories(
|
|
415
|
+
self,
|
|
416
|
+
query: str,
|
|
417
|
+
categories: List[
|
|
418
|
+
Literal[
|
|
419
|
+
'Politics',
|
|
420
|
+
'Economy',
|
|
421
|
+
'Finance',
|
|
422
|
+
'Science',
|
|
423
|
+
'Technology',
|
|
424
|
+
'Sports',
|
|
425
|
+
'Climate',
|
|
426
|
+
'Environment',
|
|
427
|
+
'Culture',
|
|
428
|
+
'Entertainment',
|
|
429
|
+
'Business',
|
|
430
|
+
'Health',
|
|
431
|
+
'International',
|
|
432
|
+
]
|
|
433
|
+
],
|
|
434
|
+
reddit: int = 3,
|
|
435
|
+
expand_updates: bool = True,
|
|
436
|
+
max_updates: int = 2,
|
|
437
|
+
max_articles: int = 10,
|
|
438
|
+
) -> Union[dict, str]:
|
|
439
|
+
r"""Fetch stories based on the provided parameters.
|
|
440
|
+
|
|
441
|
+
Args:
|
|
442
|
+
query (str): The search query for fetching relevant stories.
|
|
443
|
+
categories (list): The categories to filter stories by.
|
|
444
|
+
reddit (int): Number of Reddit threads to include.
|
|
445
|
+
(default: :obj:`3`)
|
|
446
|
+
expand_updates (bool): Whether to include detailed updates.
|
|
447
|
+
(default: :obj:`True`)
|
|
448
|
+
max_updates (int): Maximum number of recent updates per story.
|
|
449
|
+
(default: :obj:`2`)
|
|
450
|
+
max_articles (int): Maximum number of articles associated with
|
|
451
|
+
each update. (default: :obj:`10`)
|
|
452
|
+
|
|
453
|
+
Returns:
|
|
454
|
+
Unio[dict, str]: A dictionary containing the stories and their
|
|
455
|
+
associated data, or error message if the process fails.
|
|
456
|
+
"""
|
|
457
|
+
try:
|
|
458
|
+
response = await self.asknews_client.stories.search_stories(
|
|
459
|
+
query=query,
|
|
460
|
+
categories=categories,
|
|
461
|
+
reddit=reddit,
|
|
462
|
+
expand_updates=expand_updates,
|
|
463
|
+
max_updates=max_updates,
|
|
464
|
+
max_articles=max_articles,
|
|
465
|
+
)
|
|
466
|
+
|
|
467
|
+
# Collect only the headline and story content from the updates
|
|
468
|
+
stories_data = {
|
|
469
|
+
"stories": [
|
|
470
|
+
{
|
|
471
|
+
"headline": story.updates[0].headline,
|
|
472
|
+
"updates": [
|
|
473
|
+
{
|
|
474
|
+
"headline": update.headline,
|
|
475
|
+
"story": update.story,
|
|
476
|
+
}
|
|
477
|
+
for update in story.updates[:max_updates]
|
|
478
|
+
],
|
|
479
|
+
}
|
|
480
|
+
for story in response.stories
|
|
481
|
+
]
|
|
482
|
+
}
|
|
483
|
+
|
|
484
|
+
return stories_data
|
|
485
|
+
|
|
486
|
+
except Exception as e:
|
|
487
|
+
return f"Got error: {e}"
|
|
488
|
+
|
|
489
|
+
async def get_web_search(
|
|
490
|
+
self,
|
|
491
|
+
queries: List[str],
|
|
492
|
+
return_type: Literal["string", "dicts", "both"] = "string",
|
|
493
|
+
) -> Union[str, dict, Tuple[str, dict]]:
|
|
494
|
+
r"""Perform a live web search based on the given queries.
|
|
495
|
+
|
|
496
|
+
Args:
|
|
497
|
+
queries (List[str]): A list of search queries.
|
|
498
|
+
return_type (Literal["string", "dicts", "both"]): The format of the
|
|
499
|
+
return value. (default: :obj:`"string"`)
|
|
500
|
+
|
|
501
|
+
Returns:
|
|
502
|
+
Union[str, dict, Tuple[str, dict]]: A string,
|
|
503
|
+
dictionary, or both containing the search results, or
|
|
504
|
+
error message if the process fails.
|
|
505
|
+
"""
|
|
506
|
+
try:
|
|
507
|
+
response = await self.asknews_client.chat.live_web_search(
|
|
508
|
+
queries=queries
|
|
509
|
+
)
|
|
510
|
+
|
|
511
|
+
return _process_response(response, return_type)
|
|
512
|
+
|
|
513
|
+
except Exception as e:
|
|
514
|
+
return f"Got error: {e}"
|
|
515
|
+
|
|
516
|
+
async def search_reddit(
|
|
517
|
+
self,
|
|
518
|
+
keywords: List[str],
|
|
519
|
+
n_threads: int = 5,
|
|
520
|
+
return_type: Literal["string", "dicts", "both"] = "string",
|
|
521
|
+
method: Literal["nl", "kw"] = "kw",
|
|
522
|
+
) -> Union[str, dict, Tuple[str, dict]]:
|
|
523
|
+
r"""Search Reddit based on the provided keywords.
|
|
524
|
+
|
|
525
|
+
Args:
|
|
526
|
+
keywords (list): The keywords to search for on Reddit.
|
|
527
|
+
n_threads (int): Number of Reddit threads to summarize and return.
|
|
528
|
+
(default: :obj:5)
|
|
529
|
+
return_type (Literal["string", "dicts", "both"]): The format of the
|
|
530
|
+
return value. (default: :obj:"string")
|
|
531
|
+
method (Literal["nl", "kw"]): The search method, either "nl" for
|
|
532
|
+
natural language or "kw" for keyword search.
|
|
533
|
+
(default::obj:"kw")
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
Union[str, dict, Tuple[str, dict]]: The Reddit search
|
|
537
|
+
results as a string, dictionary, or both, or error message if
|
|
538
|
+
the process fails.
|
|
539
|
+
"""
|
|
540
|
+
try:
|
|
541
|
+
response = await self.asknews_client.news.search_reddit(
|
|
542
|
+
keywords=keywords, n_threads=n_threads, method=method
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
return _process_response(response, return_type)
|
|
546
|
+
|
|
547
|
+
except Exception as e:
|
|
548
|
+
return f"Got error: {e}"
|
|
549
|
+
|
|
550
|
+
async def query_finance(
|
|
551
|
+
self,
|
|
552
|
+
asset: Literal[
|
|
553
|
+
'bitcoin',
|
|
554
|
+
'ethereum',
|
|
555
|
+
'cardano',
|
|
556
|
+
'uniswap',
|
|
557
|
+
'ripple',
|
|
558
|
+
'solana',
|
|
559
|
+
'polkadot',
|
|
560
|
+
'polygon',
|
|
561
|
+
'chainlink',
|
|
562
|
+
'tether',
|
|
563
|
+
'dogecoin',
|
|
564
|
+
'monero',
|
|
565
|
+
'tron',
|
|
566
|
+
'binance',
|
|
567
|
+
'aave',
|
|
568
|
+
'tesla',
|
|
569
|
+
'microsoft',
|
|
570
|
+
'amazon',
|
|
571
|
+
],
|
|
572
|
+
metric: Literal[
|
|
573
|
+
'news_positive',
|
|
574
|
+
'news_negative',
|
|
575
|
+
'news_total',
|
|
576
|
+
'news_positive_weighted',
|
|
577
|
+
'news_negative_weighted',
|
|
578
|
+
'news_total_weighted',
|
|
579
|
+
] = "news_positive",
|
|
580
|
+
return_type: Literal["list", "string"] = "string",
|
|
581
|
+
date_from: Optional[datetime] = None,
|
|
582
|
+
date_to: Optional[datetime] = None,
|
|
583
|
+
) -> Union[list, str]:
|
|
584
|
+
r"""Fetch asset sentiment data for a given asset, metric, and date
|
|
585
|
+
range.
|
|
586
|
+
|
|
587
|
+
Args:
|
|
588
|
+
asset (Literal): The asset for which to fetch sentiment data.
|
|
589
|
+
metric (Literal): The sentiment metric to analyze.
|
|
590
|
+
return_type (Literal["list", "string"]): The format of the return
|
|
591
|
+
value. (default: :obj:`"string"`)
|
|
592
|
+
date_from (datetime, optional): The start date and time for the
|
|
593
|
+
data in ISO 8601 format.
|
|
594
|
+
date_to (datetime, optional): The end date and time for the data
|
|
595
|
+
in ISO 8601 format.
|
|
596
|
+
|
|
597
|
+
Returns:
|
|
598
|
+
Union[list, str]: A list of dictionaries containing the datetime
|
|
599
|
+
and value or a string describing all datetime and value pairs
|
|
600
|
+
for providing quantified time-series data for news sentiment
|
|
601
|
+
on topics of interest, or an error message if the process
|
|
602
|
+
fails.
|
|
603
|
+
"""
|
|
604
|
+
try:
|
|
605
|
+
response = await self.asknews_client.analytics.get_asset_sentiment(
|
|
606
|
+
asset=asset,
|
|
607
|
+
metric=metric,
|
|
608
|
+
date_from=date_from,
|
|
609
|
+
date_to=date_to,
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
time_series_data = response.data.timeseries
|
|
613
|
+
|
|
614
|
+
if return_type == "list":
|
|
615
|
+
return time_series_data
|
|
616
|
+
elif return_type == "string":
|
|
617
|
+
header = (
|
|
618
|
+
f"This is the sentiment analysis for '{asset}' based "
|
|
619
|
+
+ f"on the '{metric}' metric from {date_from} to {date_to}"
|
|
620
|
+
+ ". The values reflect the aggregated sentiment from news"
|
|
621
|
+
+ " sources for each given time period.\n"
|
|
622
|
+
)
|
|
623
|
+
descriptive_text = "\n".join(
|
|
624
|
+
[
|
|
625
|
+
f"On {entry.datetime}, the sentiment value was "
|
|
626
|
+
f"{entry.value}."
|
|
627
|
+
for entry in time_series_data
|
|
628
|
+
]
|
|
629
|
+
)
|
|
630
|
+
return header + descriptive_text
|
|
631
|
+
|
|
632
|
+
except Exception as e:
|
|
633
|
+
return f"Got error: {e}"
|
|
634
|
+
|
|
635
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
636
|
+
r"""Returns a list of FunctionTool objects representing the functions
|
|
637
|
+
in the toolkit.
|
|
638
|
+
|
|
639
|
+
Returns:
|
|
640
|
+
List[FunctionTool]: A list of FunctionTool objects representing
|
|
641
|
+
the functions in the toolkit.
|
|
642
|
+
"""
|
|
643
|
+
return [
|
|
644
|
+
FunctionTool(self.get_news),
|
|
645
|
+
FunctionTool(self.get_stories),
|
|
646
|
+
FunctionTool(self.get_web_search),
|
|
647
|
+
FunctionTool(self.search_reddit),
|
|
648
|
+
FunctionTool(self.query_finance),
|
|
649
|
+
]
|
|
650
|
+
|
|
651
|
+
|
|
652
|
+
ASKNEWS_FUNCS: List[FunctionTool] = AskNewsToolkit().get_tools()
|
|
653
|
+
ASYNC_ASKNEWS_FUNCS: List[FunctionTool] = AsyncAskNewsToolkit().get_tools()
|
camel/toolkits/base.py
CHANGED
|
@@ -14,11 +14,10 @@
|
|
|
14
14
|
|
|
15
15
|
from typing import List
|
|
16
16
|
|
|
17
|
+
from camel.toolkits import FunctionTool
|
|
17
18
|
from camel.utils import AgentOpsMeta
|
|
18
19
|
|
|
19
|
-
from .openai_function import OpenAIFunction
|
|
20
|
-
|
|
21
20
|
|
|
22
21
|
class BaseToolkit(metaclass=AgentOpsMeta):
|
|
23
|
-
def get_tools(self) -> List[
|
|
22
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
24
23
|
raise NotImplementedError("Subclasses must implement this method.")
|
camel/toolkits/code_execution.py
CHANGED
|
@@ -14,9 +14,8 @@
|
|
|
14
14
|
from typing import List, Literal
|
|
15
15
|
|
|
16
16
|
from camel.interpreters import InternalPythonInterpreter
|
|
17
|
-
from camel.toolkits import
|
|
18
|
-
|
|
19
|
-
from .base import BaseToolkit
|
|
17
|
+
from camel.toolkits import FunctionTool
|
|
18
|
+
from camel.toolkits.base import BaseToolkit
|
|
20
19
|
|
|
21
20
|
|
|
22
21
|
class CodeExecutionToolkit(BaseToolkit):
|
|
@@ -58,12 +57,12 @@ class CodeExecutionToolkit(BaseToolkit):
|
|
|
58
57
|
print(content)
|
|
59
58
|
return content
|
|
60
59
|
|
|
61
|
-
def get_tools(self) -> List[
|
|
62
|
-
r"""Returns a list of
|
|
60
|
+
def get_tools(self) -> List[FunctionTool]:
|
|
61
|
+
r"""Returns a list of FunctionTool objects representing the
|
|
63
62
|
functions in the toolkit.
|
|
64
63
|
|
|
65
64
|
Returns:
|
|
66
|
-
List[
|
|
65
|
+
List[FunctionTool]: A list of FunctionTool objects
|
|
67
66
|
representing the functions in the toolkit.
|
|
68
67
|
"""
|
|
69
|
-
return [
|
|
68
|
+
return [FunctionTool(self.execute_code)]
|