google-news-trends-mcp 0.2.0__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -48,9 +48,6 @@ google_news = GNews(
48
48
  # exclude_websites=[],
49
49
  )
50
50
 
51
- playwright: Optional[Playwright] = None
52
- browser: Optional[Browser] = None
53
-
54
51
  ProgressCallback = Callable[[float, Optional[float]], Awaitable[None]]
55
52
 
56
53
 
@@ -64,14 +61,15 @@ class BrowserManager(AsyncContextDecorator):
64
61
  if cls.browser is None:
65
62
  async with cls._lock:
66
63
  if cls.browser is None:
67
- await cls._startup()
64
+ logger.info("Starting browser...")
65
+ try:
66
+ cls.playwright = await async_playwright().start()
67
+ cls.browser = await cls.playwright.chromium.launch(headless=True)
68
+ except Exception as e:
69
+ logger.critical("Browser startup failed", exc_info=e)
70
+ raise SystemExit(1)
68
71
  return cast(Browser, cls.browser)
69
72
 
70
- @classmethod
71
- async def _startup(cls):
72
- logger.info("Starting browser...")
73
- cls.playwright = await async_playwright().start()
74
- cls.browser = await cls.playwright.chromium.launch(headless=True)
75
73
 
76
74
  @classmethod
77
75
  async def _shutdown(cls):
@@ -89,11 +87,11 @@ class BrowserManager(AsyncContextDecorator):
89
87
  async def _browser_context_cm():
90
88
  browser_inst = await cls._get_browser()
91
89
  context = await browser_inst.new_context()
92
- logging.debug("Created browser context...")
90
+ logger.debug("Created browser context...")
93
91
  try:
94
92
  yield context
95
93
  finally:
96
- logging.debug("Closing browser context...")
94
+ logger.debug("Closing browser context...")
97
95
  await context.close()
98
96
 
99
97
  return _browser_context_cm()
@@ -119,7 +117,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
119
117
  article = newspaper.article(url, input_html=content)
120
118
  return article
121
119
  except Exception as e:
122
- logging.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
120
+ logger.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
123
121
  return None
124
122
 
125
123
 
@@ -128,18 +126,18 @@ def download_article_with_scraper(url) -> newspaper.Article | None:
128
126
  try:
129
127
  article = newspaper.article(url)
130
128
  except Exception as e:
131
- logging.debug(f"Error downloading article with newspaper from {url}\n {e.args}")
129
+ logger.debug(f"Error downloading article with newspaper from {url}\n {e.args}")
132
130
  try:
133
131
  # Retry with cloudscraper
134
132
  response = scraper.get(url)
135
133
  if response.status_code < 400:
136
134
  article = newspaper.article(url, input_html=response.text)
137
135
  else:
138
- logging.debug(
136
+ logger.debug(
139
137
  f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
140
138
  )
141
139
  except Exception as e:
142
- logging.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
140
+ logger.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
143
141
  return article
144
142
 
145
143
 
@@ -150,10 +148,10 @@ def decode_url(url: str) -> str:
150
148
  if decoded_url.get("status"):
151
149
  url = decoded_url["decoded_url"]
152
150
  else:
153
- logging.debug("Failed to decode Google News RSS link:")
151
+ logger.debug("Failed to decode Google News RSS link:")
154
152
  return ""
155
153
  except Exception as err:
156
- logging.warning(f"Error while decoding url {url}\n {err.args}")
154
+ logger.warning(f"Error while decoding url {url}\n {err.args}")
157
155
  return url
158
156
 
159
157
 
@@ -184,7 +182,7 @@ async def process_gnews_articles(
184
182
  for idx, gnews_article in enumerate(gnews_articles):
185
183
  article = await download_article(gnews_article["url"])
186
184
  if article is None or not article.text:
187
- logging.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
185
+ logger.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
188
186
  continue
189
187
  article.parse()
190
188
  if nlp:
@@ -209,7 +207,7 @@ async def get_news_by_keyword(
209
207
  google_news.max_results = max_results
210
208
  gnews_articles = google_news.get_news(keyword)
211
209
  if not gnews_articles:
212
- logging.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
210
+ logger.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
213
211
  return []
214
212
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
215
213
 
@@ -227,7 +225,7 @@ async def get_top_news(
227
225
  google_news.max_results = max_results
228
226
  gnews_articles = google_news.get_top_news()
229
227
  if not gnews_articles:
230
- logging.debug("No top news articles found.")
228
+ logger.debug("No top news articles found.")
231
229
  return []
232
230
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
233
231
 
@@ -244,7 +242,7 @@ async def get_news_by_location(
244
242
  google_news.max_results = max_results
245
243
  gnews_articles = google_news.get_news_by_location(location)
246
244
  if not gnews_articles:
247
- logging.debug(f"No articles found for location '{location}' in the last {period} days.")
245
+ logger.debug(f"No articles found for location '{location}' in the last {period} days.")
248
246
  return []
249
247
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
250
248
 
@@ -271,7 +269,7 @@ async def get_news_by_topic(
271
269
  google_news.max_results = max_results
272
270
  gnews_articles = google_news.get_news_by_topic(topic)
273
271
  if not gnews_articles:
274
- logging.debug(f"No articles found for topic '{topic}' in the last {period} days.")
272
+ logger.debug(f"No articles found for topic '{topic}' in the last {period} days.")
275
273
  return []
276
274
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
277
275
 
@@ -303,7 +301,7 @@ async def get_trending_terms(
303
301
  return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
304
302
  return trends
305
303
  except Exception as e:
306
- logging.warning(f"Error fetching trending terms: {e}")
304
+ logger.warning(f"Error fetching trending terms: {e}")
307
305
  return []
308
306
 
309
307
 
@@ -351,4 +349,4 @@ def save_article_to_json(article: newspaper.Article, filename: str = "") -> None
351
349
  filename += ".json"
352
350
  with open(filename, "w") as f:
353
351
  json.dump(article_data, f, indent=4)
354
- logging.debug(f"Article saved to {filename}")
352
+ logger.debug(f"Article saved to {filename}")
@@ -154,6 +154,19 @@ async def summarize_article(article: Article, ctx: Context) -> None:
154
154
  article.summary = "No summary available."
155
155
 
156
156
 
157
+ async def summarize_articles(articles: list[Article], ctx: Context) -> None:
158
+ total_articles = len(articles)
159
+ try:
160
+ for idx, article in enumerate(articles):
161
+ await summarize_article(article, ctx)
162
+ await ctx.report_progress(idx, total_articles)
163
+ except Exception as err:
164
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
165
+ for idx, article in enumerate(articles):
166
+ article.nlp()
167
+ await ctx.report_progress(idx, total_articles)
168
+
169
+
157
170
  @mcp.tool(
158
171
  description=news.get_news_by_keyword.__doc__,
159
172
  tags={"news", "articles", "keyword"},
@@ -185,16 +198,7 @@ async def get_news_by_keyword(
185
198
  report_progress=ctx.report_progress,
186
199
  )
187
200
  if summarize:
188
- total_articles = len(articles)
189
- try:
190
- for idx, article in enumerate(articles):
191
- await summarize_article(article, ctx)
192
- await ctx.report_progress(idx, total_articles)
193
- except Exception as err:
194
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
195
- for idx, article in enumerate(articles):
196
- article.nlp()
197
- await ctx.report_progress(idx, total_articles)
201
+ await summarize_articles(articles, ctx)
198
202
  await ctx.report_progress(progress=len(articles), total=len(articles))
199
203
  return [ArticleOut(**a.to_json(False)) for a in articles]
200
204
 
@@ -230,16 +234,7 @@ async def get_news_by_location(
230
234
  report_progress=ctx.report_progress,
231
235
  )
232
236
  if summarize:
233
- total_articles = len(articles)
234
- try:
235
- for idx, article in enumerate(articles):
236
- await summarize_article(article, ctx)
237
- await ctx.report_progress(idx, total_articles)
238
- except Exception as err:
239
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
240
- for idx, article in enumerate(articles):
241
- article.nlp()
242
- await ctx.report_progress(idx, total_articles)
237
+ await summarize_articles(articles, ctx)
243
238
  await ctx.report_progress(progress=len(articles), total=len(articles))
244
239
  return [ArticleOut(**a.to_json(False)) for a in articles]
245
240
 
@@ -272,17 +267,7 @@ async def get_news_by_topic(
272
267
  report_progress=ctx.report_progress,
273
268
  )
274
269
  if summarize:
275
- total_articles = len(articles)
276
- try:
277
- for idx, article in enumerate(articles):
278
- await summarize_article(article, ctx)
279
- await ctx.report_progress(idx, total_articles)
280
- except Exception as err:
281
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
282
- for idx, article in enumerate(articles):
283
- article.nlp()
284
- await ctx.report_progress(idx, total_articles)
285
-
270
+ await summarize_articles(articles, ctx)
286
271
  await ctx.report_progress(progress=len(articles), total=len(articles))
287
272
  return [ArticleOut(**a.to_json(False)) for a in articles]
288
273
 
@@ -313,17 +298,7 @@ async def get_top_news(
313
298
  report_progress=ctx.report_progress,
314
299
  )
315
300
  if summarize:
316
- total_articles = len(articles)
317
- try:
318
- for idx, article in enumerate(articles):
319
- await summarize_article(article, ctx)
320
- await ctx.report_progress(idx, total_articles)
321
- except Exception as err:
322
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
323
- for idx, article in enumerate(articles):
324
- article.nlp()
325
- await ctx.report_progress(idx, total_articles)
326
-
301
+ await summarize_articles(articles, ctx)
327
302
  await ctx.report_progress(progress=len(articles), total=len(articles))
328
303
  return [ArticleOut(**a.to_json(False)) for a in articles]
329
304
 
@@ -340,7 +315,6 @@ async def get_trending_terms(
340
315
  if not full_data:
341
316
  trends = await news.get_trending_terms(geo=geo, full_data=False, max_results=max_results)
342
317
  return [TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"]) for tt in trends]
343
-
344
318
  trends = await news.get_trending_terms(geo=geo, full_data=True, max_results=max_results)
345
319
  return [TrendingTermOut(**tt.__dict__) for tt in trends]
346
320
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.2.0
3
+ Version: 0.2.1
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -27,7 +27,7 @@ Dynamic: license-file
27
27
 
28
28
  # Google News Trends MCP
29
29
 
30
- An MCP server to access Google News and Google Trends. Does not rely on any paid APIs.
30
+ An MCP server to access Google News and Google Trends.
31
31
 
32
32
  ## Features
33
33
 
@@ -0,0 +1,11 @@
1
+ google_news_trends_mcp/__init__.py,sha256=nDWNd6_TSf4vDQuHVBoAf4QfZCB3ZUFQ0M7XvifNJ-g,78
2
+ google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
+ google_news_trends_mcp/cli.py,sha256=3Z916898HXTigmQYEfvb7ybfbuUE7bjMC6yjT5-l6u0,4558
4
+ google_news_trends_mcp/news.py,sha256=EhTIqRmFOQKAhyV8rLySaPoILX3uyKbrzO6jxo89FhA,12528
5
+ google_news_trends_mcp/server.py,sha256=ZFZvCxSVLma7QlanFsXyu6gpCHZSS0y5YfnZl7HnXnc,13780
6
+ google_news_trends_mcp-0.2.1.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
+ google_news_trends_mcp-0.2.1.dist-info/METADATA,sha256=AdnEqmflIhFNUV052z9VHIOum2x0_TzI_e3hK84vdfM,4419
8
+ google_news_trends_mcp-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
+ google_news_trends_mcp-0.2.1.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
+ google_news_trends_mcp-0.2.1.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
+ google_news_trends_mcp-0.2.1.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- google_news_trends_mcp/__init__.py,sha256=nDWNd6_TSf4vDQuHVBoAf4QfZCB3ZUFQ0M7XvifNJ-g,78
2
- google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
- google_news_trends_mcp/cli.py,sha256=3Z916898HXTigmQYEfvb7ybfbuUE7bjMC6yjT5-l6u0,4558
4
- google_news_trends_mcp/news.py,sha256=MPNZlzI7KXkhQ2uj7233N2i9kFHGUgGMdRBCAbj-B44,12471
5
- google_news_trends_mcp/server.py,sha256=S-tlFY1wiFm9VPeb4NDnV0NGtczaQDmx20kIrZZQHto,15031
6
- google_news_trends_mcp-0.2.0.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
- google_news_trends_mcp-0.2.0.dist-info/METADATA,sha256=rok_3L-eDVXQJSuG6ze1Vuicnh-kpcWyJxaF2DbqZ1s,4454
8
- google_news_trends_mcp-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
- google_news_trends_mcp-0.2.0.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
- google_news_trends_mcp-0.2.0.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
- google_news_trends_mcp-0.2.0.dist-info/RECORD,,