quantalogic 0.33.4__py3-none-any.whl → 0.40.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- quantalogic/__init__.py +0 -4
- quantalogic/agent.py +603 -362
- quantalogic/agent_config.py +260 -28
- quantalogic/agent_factory.py +43 -17
- quantalogic/coding_agent.py +20 -12
- quantalogic/config.py +7 -4
- quantalogic/console_print_events.py +4 -8
- quantalogic/console_print_token.py +2 -2
- quantalogic/docs_cli.py +15 -10
- quantalogic/event_emitter.py +258 -83
- quantalogic/flow/__init__.py +23 -0
- quantalogic/flow/flow.py +595 -0
- quantalogic/flow/flow_extractor.py +672 -0
- quantalogic/flow/flow_generator.py +89 -0
- quantalogic/flow/flow_manager.py +407 -0
- quantalogic/flow/flow_manager_schema.py +169 -0
- quantalogic/flow/flow_yaml.md +419 -0
- quantalogic/generative_model.py +109 -77
- quantalogic/get_model_info.py +6 -6
- quantalogic/interactive_text_editor.py +100 -73
- quantalogic/main.py +36 -23
- quantalogic/model_info_list.py +12 -0
- quantalogic/model_info_litellm.py +14 -14
- quantalogic/prompts.py +2 -1
- quantalogic/{llm.py → quantlitellm.py} +29 -39
- quantalogic/search_agent.py +4 -4
- quantalogic/server/models.py +4 -1
- quantalogic/task_file_reader.py +5 -5
- quantalogic/task_runner.py +21 -20
- quantalogic/tool_manager.py +10 -21
- quantalogic/tools/__init__.py +98 -68
- quantalogic/tools/composio/composio.py +416 -0
- quantalogic/tools/{generate_database_report_tool.py → database/generate_database_report_tool.py} +4 -9
- quantalogic/tools/database/sql_query_tool_advanced.py +261 -0
- quantalogic/tools/document_tools/markdown_to_docx_tool.py +620 -0
- quantalogic/tools/document_tools/markdown_to_epub_tool.py +438 -0
- quantalogic/tools/document_tools/markdown_to_html_tool.py +362 -0
- quantalogic/tools/document_tools/markdown_to_ipynb_tool.py +319 -0
- quantalogic/tools/document_tools/markdown_to_latex_tool.py +420 -0
- quantalogic/tools/document_tools/markdown_to_pdf_tool.py +623 -0
- quantalogic/tools/document_tools/markdown_to_pptx_tool.py +319 -0
- quantalogic/tools/duckduckgo_search_tool.py +2 -4
- quantalogic/tools/finance/alpha_vantage_tool.py +440 -0
- quantalogic/tools/finance/ccxt_tool.py +373 -0
- quantalogic/tools/finance/finance_llm_tool.py +387 -0
- quantalogic/tools/finance/google_finance.py +192 -0
- quantalogic/tools/finance/market_intelligence_tool.py +520 -0
- quantalogic/tools/finance/technical_analysis_tool.py +491 -0
- quantalogic/tools/finance/tradingview_tool.py +336 -0
- quantalogic/tools/finance/yahoo_finance.py +236 -0
- quantalogic/tools/git/bitbucket_clone_repo_tool.py +181 -0
- quantalogic/tools/git/bitbucket_operations_tool.py +326 -0
- quantalogic/tools/git/clone_repo_tool.py +189 -0
- quantalogic/tools/git/git_operations_tool.py +532 -0
- quantalogic/tools/google_packages/google_news_tool.py +480 -0
- quantalogic/tools/grep_app_tool.py +123 -186
- quantalogic/tools/{dalle_e.py → image_generation/dalle_e.py} +37 -27
- quantalogic/tools/jinja_tool.py +6 -10
- quantalogic/tools/language_handlers/__init__.py +22 -9
- quantalogic/tools/list_directory_tool.py +131 -42
- quantalogic/tools/llm_tool.py +45 -15
- quantalogic/tools/llm_vision_tool.py +59 -7
- quantalogic/tools/markitdown_tool.py +17 -5
- quantalogic/tools/nasa_packages/models.py +47 -0
- quantalogic/tools/nasa_packages/nasa_apod_tool.py +232 -0
- quantalogic/tools/nasa_packages/nasa_neows_tool.py +147 -0
- quantalogic/tools/nasa_packages/services.py +82 -0
- quantalogic/tools/presentation_tools/presentation_llm_tool.py +396 -0
- quantalogic/tools/product_hunt/product_hunt_tool.py +258 -0
- quantalogic/tools/product_hunt/services.py +63 -0
- quantalogic/tools/rag_tool/__init__.py +48 -0
- quantalogic/tools/rag_tool/document_metadata.py +15 -0
- quantalogic/tools/rag_tool/query_response.py +20 -0
- quantalogic/tools/rag_tool/rag_tool.py +566 -0
- quantalogic/tools/rag_tool/rag_tool_beta.py +264 -0
- quantalogic/tools/read_html_tool.py +24 -38
- quantalogic/tools/replace_in_file_tool.py +10 -10
- quantalogic/tools/safe_python_interpreter_tool.py +10 -24
- quantalogic/tools/search_definition_names.py +2 -2
- quantalogic/tools/sequence_tool.py +14 -23
- quantalogic/tools/sql_query_tool.py +17 -19
- quantalogic/tools/tool.py +39 -15
- quantalogic/tools/unified_diff_tool.py +1 -1
- quantalogic/tools/utilities/csv_processor_tool.py +234 -0
- quantalogic/tools/utilities/download_file_tool.py +179 -0
- quantalogic/tools/utilities/mermaid_validator_tool.py +661 -0
- quantalogic/tools/utils/__init__.py +1 -4
- quantalogic/tools/utils/create_sample_database.py +24 -38
- quantalogic/tools/utils/generate_database_report.py +74 -82
- quantalogic/tools/wikipedia_search_tool.py +17 -21
- quantalogic/utils/ask_user_validation.py +1 -1
- quantalogic/utils/async_utils.py +35 -0
- quantalogic/utils/check_version.py +3 -5
- quantalogic/utils/get_all_models.py +2 -1
- quantalogic/utils/git_ls.py +21 -7
- quantalogic/utils/lm_studio_model_info.py +9 -7
- quantalogic/utils/python_interpreter.py +113 -43
- quantalogic/utils/xml_utility.py +178 -0
- quantalogic/version_check.py +1 -1
- quantalogic/welcome_message.py +7 -7
- quantalogic/xml_parser.py +0 -1
- {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/METADATA +44 -1
- quantalogic-0.40.0.dist-info/RECORD +148 -0
- quantalogic-0.33.4.dist-info/RECORD +0 -102
- {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/LICENSE +0 -0
- {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/WHEEL +0 -0
- {quantalogic-0.33.4.dist-info → quantalogic-0.40.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,480 @@
|
|
1
|
+
"""Advanced tool for retrieving and analyzing news articles from Google News.
|
2
|
+
|
3
|
+
This tool provides a sophisticated interface to fetch, analyze, and format news articles
|
4
|
+
from Google News using multiple sources and advanced filtering capabilities.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import asyncio
|
8
|
+
from typing import Any, Dict, List
|
9
|
+
|
10
|
+
import aiohttp
|
11
|
+
import html2text
|
12
|
+
from bs4 import BeautifulSoup
|
13
|
+
from gnews import GNews
|
14
|
+
from loguru import logger
|
15
|
+
|
16
|
+
from quantalogic.event_emitter import EventEmitter
|
17
|
+
from quantalogic.tools.llm_tool import LLMTool
|
18
|
+
from quantalogic.tools.tool import Tool, ToolArgument
|
19
|
+
|
20
|
+
|
21
|
+
class NewsArticle:
|
22
|
+
"""Represents a news article with enhanced metadata and analysis."""
|
23
|
+
|
24
|
+
def __init__(self, title: str, url: str, source: str, date: str):
|
25
|
+
self.title = title
|
26
|
+
self.url = url
|
27
|
+
self.source = source
|
28
|
+
self.date = date
|
29
|
+
self.description = ""
|
30
|
+
self.full_text = ""
|
31
|
+
self.keywords = []
|
32
|
+
self.sentiment = {}
|
33
|
+
self.summary = ""
|
34
|
+
|
35
|
+
async def enrich(self, session: aiohttp.ClientSession):
|
36
|
+
"""Enrich article with additional data and analysis."""
|
37
|
+
try:
|
38
|
+
# Fetch article content
|
39
|
+
async with session.get(self.url) as response:
|
40
|
+
if response.status == 200:
|
41
|
+
html_content = await response.text()
|
42
|
+
|
43
|
+
# Convert HTML to text
|
44
|
+
h = html2text.HTML2Text()
|
45
|
+
h.ignore_links = True
|
46
|
+
h.ignore_images = True
|
47
|
+
self.full_text = h.handle(html_content)
|
48
|
+
|
49
|
+
# Extract main content using BeautifulSoup
|
50
|
+
soup = BeautifulSoup(html_content, 'html.parser')
|
51
|
+
|
52
|
+
# Remove unwanted elements
|
53
|
+
for tag in soup(['script', 'style', 'nav', 'header', 'footer', 'aside']):
|
54
|
+
tag.decompose()
|
55
|
+
|
56
|
+
# Get main content
|
57
|
+
main_content = soup.find('main') or soup.find('article') or soup.find('body')
|
58
|
+
if main_content:
|
59
|
+
self.full_text = main_content.get_text(strip=True)
|
60
|
+
|
61
|
+
# Basic keyword extraction from title and content
|
62
|
+
words = set(word.lower() for word in self.title.split() + self.full_text.split()
|
63
|
+
if len(word) > 3)
|
64
|
+
self.keywords = list(words)[:10] # Top 10 keywords
|
65
|
+
|
66
|
+
# Create a basic summary (first 2-3 sentences)
|
67
|
+
sentences = [s.strip() for s in self.full_text.split('.') if s.strip()]
|
68
|
+
self.summary = '. '.join(sentences[:3]) + '.'
|
69
|
+
|
70
|
+
# Perform sentiment analysis
|
71
|
+
# sia = SentimentIntensityAnalyzer()
|
72
|
+
# self.sentiment = sia.polarity_scores(self.title + " " + self.summary)
|
73
|
+
self.sentiment = {'pos': 0.0, 'neg': 0.0, 'neu': 1.0, 'compound': 0.0}
|
74
|
+
|
75
|
+
except Exception as e:
|
76
|
+
logger.warning(f"Error enriching article {self.url}: {str(e)}")
|
77
|
+
# Set basic info even if enrichment fails
|
78
|
+
self.summary = self.title
|
79
|
+
self.keywords = [word.lower() for word in self.title.split() if len(word) > 3]
|
80
|
+
self.sentiment = {'pos': 0.0, 'neg': 0.0, 'neu': 1.0, 'compound': 0.0}
|
81
|
+
|
82
|
+
|
83
|
+
class GoogleNewsTool(Tool):
|
84
|
+
"""Advanced tool for retrieving and analyzing news articles from Google News.
|
85
|
+
|
86
|
+
Features:
|
87
|
+
- Multi-source news aggregation
|
88
|
+
- Sentiment analysis
|
89
|
+
- Keyword extraction
|
90
|
+
- Article summarization
|
91
|
+
- Advanced filtering
|
92
|
+
- Async processing for better performance
|
93
|
+
"""
|
94
|
+
|
95
|
+
name: str = "google_news_tool"
|
96
|
+
description: str = (
|
97
|
+
"Advanced news retrieval and analysis tool with support for sentiment analysis, "
|
98
|
+
"keyword extraction, and article summarization."
|
99
|
+
)
|
100
|
+
arguments: List[ToolArgument] = [
|
101
|
+
ToolArgument(
|
102
|
+
name="query",
|
103
|
+
arg_type="string",
|
104
|
+
description="The news search query",
|
105
|
+
required=True,
|
106
|
+
example="artificial intelligence developments",
|
107
|
+
),
|
108
|
+
ToolArgument(
|
109
|
+
name="language",
|
110
|
+
arg_type="string",
|
111
|
+
description="Language code (e.g., 'en' for English)",
|
112
|
+
required=False,
|
113
|
+
default="en",
|
114
|
+
example="en",
|
115
|
+
),
|
116
|
+
ToolArgument(
|
117
|
+
name="period",
|
118
|
+
arg_type="string",
|
119
|
+
description="Time period (1h, 1d, 7d, 1m)",
|
120
|
+
required=False,
|
121
|
+
default="1d",
|
122
|
+
example="7d",
|
123
|
+
),
|
124
|
+
ToolArgument(
|
125
|
+
name="max_results",
|
126
|
+
arg_type="int",
|
127
|
+
description="Maximum number of results (1-100)",
|
128
|
+
required=False,
|
129
|
+
default="25",
|
130
|
+
example="20",
|
131
|
+
),
|
132
|
+
ToolArgument(
|
133
|
+
name="country",
|
134
|
+
arg_type="string",
|
135
|
+
description="Country code for news sources",
|
136
|
+
required=False,
|
137
|
+
default="US",
|
138
|
+
example="GB",
|
139
|
+
),
|
140
|
+
ToolArgument(
|
141
|
+
name="sort_by",
|
142
|
+
arg_type="string",
|
143
|
+
description="Sort by relevance or date",
|
144
|
+
required=False,
|
145
|
+
default="relevance",
|
146
|
+
example="date",
|
147
|
+
),
|
148
|
+
ToolArgument(
|
149
|
+
name="analyze",
|
150
|
+
arg_type="boolean",
|
151
|
+
description="Perform detailed analysis of articles",
|
152
|
+
required=False,
|
153
|
+
default="True",
|
154
|
+
example="True",
|
155
|
+
),
|
156
|
+
]
|
157
|
+
|
158
|
+
def __init__(
|
159
|
+
self,
|
160
|
+
model_name: str | None = None,
|
161
|
+
on_token: Any | None = None,
|
162
|
+
event_emitter: EventEmitter | None = None,
|
163
|
+
):
|
164
|
+
"""Initialize the GoogleNewsTool.
|
165
|
+
|
166
|
+
Args:
|
167
|
+
model_name (str | None): Name of the LLM model to use for summarization
|
168
|
+
on_token (Any | None): Token callback for streaming
|
169
|
+
event_emitter (EventEmitter | None): Event emitter for the tool
|
170
|
+
"""
|
171
|
+
super().__init__()
|
172
|
+
self.model_name = model_name
|
173
|
+
self.on_token = on_token
|
174
|
+
self.event_emitter = event_emitter
|
175
|
+
if model_name:
|
176
|
+
self.llm_tool = LLMTool(
|
177
|
+
model_name=model_name,
|
178
|
+
on_token=on_token,
|
179
|
+
event_emitter=event_emitter,
|
180
|
+
)
|
181
|
+
|
182
|
+
def _summarize_article(self, article: Dict[str, Any]) -> str:
|
183
|
+
"""Summarize a news article using LLM.
|
184
|
+
|
185
|
+
Args:
|
186
|
+
article (Dict[str, Any]): Article data including title and description
|
187
|
+
|
188
|
+
Returns:
|
189
|
+
str: Summarized article content
|
190
|
+
"""
|
191
|
+
if not hasattr(self, 'llm_tool'):
|
192
|
+
return article.get('description', '')
|
193
|
+
|
194
|
+
prompt = f"""
|
195
|
+
Summarize this news article concisely and professionally:
|
196
|
+
|
197
|
+
Title: {article.get('title', '')}
|
198
|
+
Description: {article.get('description', '')}
|
199
|
+
|
200
|
+
Provide a 2-3 sentence summary that captures the key points.
|
201
|
+
"""
|
202
|
+
|
203
|
+
try:
|
204
|
+
summary = self.llm_tool.execute(
|
205
|
+
system_prompt="You are a professional news summarizer. Create clear, accurate, and concise summaries.",
|
206
|
+
prompt=prompt,
|
207
|
+
temperature="0.3"
|
208
|
+
)
|
209
|
+
return summary
|
210
|
+
except Exception as e:
|
211
|
+
logger.error(f"Error summarizing article: {e}")
|
212
|
+
return article.get('description', '')
|
213
|
+
|
214
|
+
def _format_html_output(self, articles: List[Dict[str, Any]], query: str) -> str:
|
215
|
+
"""Format articles as HTML with a modern, clean design."""
|
216
|
+
css_styles = """
|
217
|
+
body {
|
218
|
+
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
|
219
|
+
line-height: 1.6;
|
220
|
+
max-width: 1200px;
|
221
|
+
margin: 0 auto;
|
222
|
+
padding: 20px;
|
223
|
+
background-color: #f5f5f5;
|
224
|
+
}
|
225
|
+
.header {
|
226
|
+
background-color: #2c3e50;
|
227
|
+
color: white;
|
228
|
+
padding: 20px;
|
229
|
+
border-radius: 8px;
|
230
|
+
margin-bottom: 20px;
|
231
|
+
}
|
232
|
+
.article {
|
233
|
+
background-color: white;
|
234
|
+
padding: 20px;
|
235
|
+
margin-bottom: 20px;
|
236
|
+
border-radius: 8px;
|
237
|
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
238
|
+
}
|
239
|
+
.article h2 {
|
240
|
+
color: #2c3e50;
|
241
|
+
margin-top: 0;
|
242
|
+
}
|
243
|
+
.article-meta {
|
244
|
+
color: #666;
|
245
|
+
font-size: 0.9em;
|
246
|
+
margin-bottom: 10px;
|
247
|
+
}
|
248
|
+
.summary {
|
249
|
+
border-left: 4px solid #2c3e50;
|
250
|
+
padding-left: 15px;
|
251
|
+
margin: 15px 0;
|
252
|
+
}
|
253
|
+
.source-link {
|
254
|
+
display: inline-block;
|
255
|
+
margin-top: 10px;
|
256
|
+
color: #3498db;
|
257
|
+
text-decoration: none;
|
258
|
+
}
|
259
|
+
.source-link:hover {
|
260
|
+
text-decoration: underline;
|
261
|
+
}
|
262
|
+
"""
|
263
|
+
|
264
|
+
articles_html = []
|
265
|
+
for article in articles:
|
266
|
+
article_html = f"""
|
267
|
+
<div class="article">
|
268
|
+
<h2>{article.get('title', 'No Title')}</h2>
|
269
|
+
<div class="article-meta">
|
270
|
+
<span>Source: {article.get('source', {}).get('title', 'Unknown')}</span>
|
271
|
+
<span> • </span>
|
272
|
+
<span>Published: {article.get('published_date', 'Unknown date')}</span>
|
273
|
+
</div>
|
274
|
+
<div class="summary">
|
275
|
+
{article.get('summary', 'No summary available')}
|
276
|
+
</div>
|
277
|
+
<a href="{article.get('link', '#')}" class="source-link" target="_blank">Read full article →</a>
|
278
|
+
</div>
|
279
|
+
"""
|
280
|
+
articles_html.append(article_html)
|
281
|
+
|
282
|
+
html_content = f"""
|
283
|
+
<!DOCTYPE html>
|
284
|
+
<html>
|
285
|
+
<head>
|
286
|
+
<meta charset="UTF-8">
|
287
|
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
288
|
+
<style>
|
289
|
+
{css_styles}
|
290
|
+
</style>
|
291
|
+
</head>
|
292
|
+
<body>
|
293
|
+
<div class="header">
|
294
|
+
<h1>News Results for: {query}</h1>
|
295
|
+
<p>Found {len(articles)} articles</p>
|
296
|
+
</div>
|
297
|
+
{''.join(articles_html)}
|
298
|
+
</body>
|
299
|
+
</html>
|
300
|
+
"""
|
301
|
+
|
302
|
+
return html_content.strip()
|
303
|
+
|
304
|
+
def _fetch_article_data(self, articles: List[NewsArticle]) -> List[NewsArticle]:
|
305
|
+
"""Fetch detailed data for multiple articles."""
|
306
|
+
try:
|
307
|
+
loop = asyncio.get_event_loop()
|
308
|
+
except RuntimeError:
|
309
|
+
loop = asyncio.new_event_loop()
|
310
|
+
asyncio.set_event_loop(loop)
|
311
|
+
|
312
|
+
async def fetch_all():
|
313
|
+
tasks = []
|
314
|
+
async with aiohttp.ClientSession() as session:
|
315
|
+
for article in articles:
|
316
|
+
tasks.append(article.enrich(session))
|
317
|
+
await asyncio.gather(*tasks)
|
318
|
+
return articles
|
319
|
+
|
320
|
+
return loop.run_until_complete(fetch_all())
|
321
|
+
|
322
|
+
def _format_results(self, articles: List[NewsArticle], analyze: bool) -> str:
|
323
|
+
"""Format news results with optional analysis data."""
|
324
|
+
results = ["=== Advanced Google News Results ===\n"]
|
325
|
+
|
326
|
+
for i, article in enumerate(articles, 1):
|
327
|
+
results.extend([
|
328
|
+
f"{i}. {article.title}",
|
329
|
+
f" Source: {article.source} | Date: {article.date}",
|
330
|
+
f" URL: {article.url}",
|
331
|
+
""
|
332
|
+
])
|
333
|
+
|
334
|
+
if analyze and article.summary:
|
335
|
+
results.extend([
|
336
|
+
" Summary:",
|
337
|
+
f" {article.summary}",
|
338
|
+
"",
|
339
|
+
" Key Topics:",
|
340
|
+
f" {', '.join(article.keywords[:5])}",
|
341
|
+
"",
|
342
|
+
" Sentiment Analysis:",
|
343
|
+
" Overall tone: " + self._interpret_sentiment(article.sentiment),
|
344
|
+
f" - Positive: {article.sentiment.get('pos', 0)*100:.1f}% ({self._get_sentiment_level(article.sentiment.get('pos', 0))})",
|
345
|
+
f" - Negative: {article.sentiment.get('neg', 0)*100:.1f}% ({self._get_sentiment_level(article.sentiment.get('neg', 0))})",
|
346
|
+
f" - Neutral: {article.sentiment.get('neu', 0)*100:.1f}% ({self._get_sentiment_level(article.sentiment.get('neu', 0))})",
|
347
|
+
""
|
348
|
+
])
|
349
|
+
|
350
|
+
results.append("")
|
351
|
+
|
352
|
+
return "\n".join(results)
|
353
|
+
|
354
|
+
def _get_sentiment_level(self, score: float) -> str:
|
355
|
+
"""Convert sentiment score to descriptive level."""
|
356
|
+
if score >= 0.6:
|
357
|
+
return "Very High"
|
358
|
+
elif score >= 0.4:
|
359
|
+
return "High"
|
360
|
+
elif score >= 0.2:
|
361
|
+
return "Moderate"
|
362
|
+
elif score > 0.1:
|
363
|
+
return "Low"
|
364
|
+
else:
|
365
|
+
return "Very Low"
|
366
|
+
|
367
|
+
def _interpret_sentiment(self, sentiment: Dict[str, float]) -> str:
|
368
|
+
"""Interpret the overall sentiment of the text."""
|
369
|
+
compound = sentiment.get('compound', 0)
|
370
|
+
if compound >= 0.5:
|
371
|
+
return "Very Positive"
|
372
|
+
elif compound >= 0.1:
|
373
|
+
return "Slightly Positive"
|
374
|
+
elif compound <= -0.5:
|
375
|
+
return "Very Negative"
|
376
|
+
elif compound <= -0.1:
|
377
|
+
return "Slightly Negative"
|
378
|
+
else:
|
379
|
+
return "Neutral"
|
380
|
+
|
381
|
+
def execute(
|
382
|
+
self,
|
383
|
+
query: str,
|
384
|
+
language: str = "en",
|
385
|
+
period: str = "1m",
|
386
|
+
max_results: int = 30,
|
387
|
+
country: str = "US",
|
388
|
+
sort_by: str = "relevance",
|
389
|
+
analyze: bool = True,
|
390
|
+
) -> str:
|
391
|
+
"""Execute the Google News search with summarization and HTML formatting.
|
392
|
+
|
393
|
+
Args:
|
394
|
+
query (str): Search query
|
395
|
+
language (str, optional): Language code. Defaults to "en".
|
396
|
+
period (str, optional): Time period. Defaults to "1m".
|
397
|
+
max_results (int, optional): Maximum results. Defaults to 30.
|
398
|
+
country (str, optional): Country code. Defaults to "US".
|
399
|
+
sort_by (str, optional): Sort method. Defaults to "relevance".
|
400
|
+
analyze (bool, optional): Whether to analyze results. Defaults to True.
|
401
|
+
|
402
|
+
Returns:
|
403
|
+
str: HTML formatted news results with summaries
|
404
|
+
"""
|
405
|
+
try:
|
406
|
+
# Input validation
|
407
|
+
if not query:
|
408
|
+
raise ValueError("Query cannot be empty")
|
409
|
+
|
410
|
+
# Configure GNews
|
411
|
+
google_news = GNews(
|
412
|
+
language=language,
|
413
|
+
country=country,
|
414
|
+
period=period,
|
415
|
+
max_results=max_results,
|
416
|
+
)
|
417
|
+
|
418
|
+
# Fetch news
|
419
|
+
logger.info(f"Fetching news for query: {query}")
|
420
|
+
articles = []
|
421
|
+
try:
|
422
|
+
raw_articles = google_news.get_news(query)
|
423
|
+
for article_data in raw_articles:
|
424
|
+
articles.append(
|
425
|
+
NewsArticle(
|
426
|
+
title=article_data.get("title", ""),
|
427
|
+
url=article_data.get("url", ""),
|
428
|
+
source=article_data.get("publisher", {}).get("title", ""),
|
429
|
+
date=article_data.get("published date", ""),
|
430
|
+
)
|
431
|
+
)
|
432
|
+
except Exception as e:
|
433
|
+
logger.error(f"Error fetching articles: {e}")
|
434
|
+
raise RuntimeError(f"Failed to fetch articles: {str(e)}")
|
435
|
+
|
436
|
+
# Enrich articles with additional data if requested
|
437
|
+
if analyze:
|
438
|
+
logger.info("Performing detailed analysis of articles...")
|
439
|
+
articles = self._fetch_article_data(articles)
|
440
|
+
|
441
|
+
# Sort results if needed
|
442
|
+
if sort_by == "date":
|
443
|
+
articles.sort(key=lambda x: x.date if x.date else "", reverse=True)
|
444
|
+
|
445
|
+
# Process and summarize each article
|
446
|
+
processed_articles = []
|
447
|
+
for article in articles:
|
448
|
+
article_copy = {
|
449
|
+
'title': article.title,
|
450
|
+
'link': article.url,
|
451
|
+
'source': {'title': article.source},
|
452
|
+
'published_date': article.date,
|
453
|
+
'description': article.full_text if hasattr(article, 'full_text') else '',
|
454
|
+
}
|
455
|
+
article_copy['summary'] = self._summarize_article(article_copy)
|
456
|
+
processed_articles.append(article_copy)
|
457
|
+
|
458
|
+
# Format results as HTML
|
459
|
+
html_output = self._format_html_output(processed_articles, query)
|
460
|
+
return html_output
|
461
|
+
|
462
|
+
except Exception as e:
|
463
|
+
logger.error(f"Error in GoogleNewsTool: {e}")
|
464
|
+
raise Exception(f"Failed to fetch news: {str(e)}")
|
465
|
+
|
466
|
+
|
467
|
+
if __name__ == "__main__":
|
468
|
+
# Example usage with analysis
|
469
|
+
tool = GoogleNewsTool()
|
470
|
+
try:
|
471
|
+
result = tool.execute(
|
472
|
+
query="XRP crypto coin",
|
473
|
+
language="en",
|
474
|
+
period="7d",
|
475
|
+
max_results=25,
|
476
|
+
analyze=True
|
477
|
+
)
|
478
|
+
print(result)
|
479
|
+
except Exception as e:
|
480
|
+
print(f"Error: {e}")
|