google-news-trends-mcp 0.1.3__py3-none-any.whl → 0.1.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2 @@
1
+ import logging
2
+ logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -107,7 +107,10 @@ def trending(geo, full_data, max_results):
107
107
  if trending_terms:
108
108
  print("Trending terms:")
109
109
  for term in trending_terms:
110
- print(f"- {term}")
110
+ if isinstance(term, dict):
111
+ print(f"{term['keyword']:<40} - {term['volume']}")
112
+ else:
113
+ print(term)
111
114
  else:
112
115
  print("No trending terms found.")
113
116
 
@@ -14,12 +14,19 @@ from gnews import GNews
14
14
  import newspaper # newspaper4k
15
15
  from googlenewsdecoder import gnewsdecoder
16
16
  import cloudscraper
17
- from playwright.async_api import async_playwright, Browser
17
+ from playwright.async_api import async_playwright, Browser, Playwright
18
18
  from trendspy import Trends, TrendKeyword
19
19
  import click
20
- from typing import Optional
20
+ from typing import Optional, cast
21
21
  import atexit
22
22
  from contextlib import asynccontextmanager
23
+ import logging
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ for logname in logging.root.manager.loggerDict:
28
+ if logname.startswith("newspaper"):
29
+ logging.getLogger(logname).setLevel(logging.ERROR)
23
30
 
24
31
  tr = Trends()
25
32
 
@@ -43,8 +50,8 @@ google_news = GNews(
43
50
  # exclude_websites=[],
44
51
  )
45
52
 
46
- playwright = None
47
- browser: Browser = None
53
+ playwright: Optional[Playwright] = None
54
+ browser: Optional[Browser] = None
48
55
 
49
56
 
50
57
  async def startup_browser():
@@ -64,7 +71,7 @@ def shutdown_browser():
64
71
  async def get_browser() -> Browser:
65
72
  if browser is None:
66
73
  await startup_browser()
67
- return browser
74
+ return cast(Browser, browser)
68
75
 
69
76
 
70
77
  @asynccontextmanager
@@ -73,7 +80,7 @@ async def browser_context():
73
80
  try:
74
81
  yield context
75
82
  finally:
76
- print("Closing browser context...")
83
+ logging.debug("Closing browser context...")
77
84
  await context.close()
78
85
 
79
86
 
@@ -90,7 +97,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
90
97
  article = newspaper.article(url, input_html=content, language="en")
91
98
  return article
92
99
  except Exception as e:
93
- print(f"Error downloading article with Playwright from {url}\n {e.args}")
100
+ logging.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
94
101
  return None
95
102
 
96
103
 
@@ -105,36 +112,37 @@ async def download_article(url: str, nlp: bool = True) -> newspaper.Article | No
105
112
  if decoded_url.get("status"):
106
113
  url = decoded_url["decoded_url"]
107
114
  else:
108
- print("Failed to decode Google News RSS link:")
115
+ logging.debug("Failed to decode Google News RSS link:")
109
116
  return None
110
117
  except Exception as err:
111
- print(f"Error while decoding url {url}\n {err.args}")
118
+ logging.warning(f"Error while decoding url {url}\n {err.args}")
112
119
  try:
113
120
  article = newspaper.article(url)
114
121
  except Exception as e:
115
- print(f"Error downloading article with newspaper from {url}\n {e.args}")
122
+ logging.debug(f"Error downloading article with newspaper from {url}\n {e.args}")
116
123
  try:
117
124
  # Retry with cloudscraper
118
125
  response = scraper.get(url)
119
126
  if response.status_code < 400:
120
127
  article = newspaper.article(url, input_html=response.text)
121
128
  else:
122
- print(
129
+ logging.debug(
123
130
  f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
124
131
  )
125
132
  except Exception as e:
126
- print(f"Error downloading article with cloudscraper from {url}\n {e.args}")
133
+ logging.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
127
134
 
128
135
  try:
129
136
  if article is None or not article.text:
130
137
  # If newspaper failed, try downloading with Playwright
131
- print(f"Retrying with Playwright for {url}")
138
+ logging.debug(f"Retrying with Playwright for {url}")
132
139
  article = await download_article_with_playwright(url)
140
+ article = cast(newspaper.Article, article)
133
141
  article.parse()
134
142
  if nlp:
135
143
  article.nlp()
136
144
  except Exception as e:
137
- print(f"Error parsing article from {url}\n {e.args}")
145
+ logging.warning(f"Error parsing article from {url}\n {e.args}")
138
146
  return None
139
147
  return article
140
148
 
@@ -149,7 +157,7 @@ async def process_gnews_articles(
149
157
  for gnews_article in gnews_articles:
150
158
  article = await download_article(gnews_article["url"], nlp=nlp)
151
159
  if article is None or not article.text:
152
- print(f"Failed to download article from {gnews_article['url']}:\n{article}")
160
+ logging.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
153
161
  continue
154
162
  articles.append(article)
155
163
  return articles
@@ -164,14 +172,12 @@ async def get_news_by_keyword(
164
172
  period: is the number of days to look back for articles.
165
173
  max_results: is the maximum number of results to return.
166
174
  nlp: If True, will perform NLP on the articles to extract keywords and summary.
167
- Returns:
168
- list[newspaper.Article]: A list of newspaper.Article objects containing the articles.
169
175
  """
170
176
  google_news.period = f"{period}d"
171
177
  google_news.max_results = max_results
172
178
  gnews_articles = google_news.get_news(keyword)
173
179
  if not gnews_articles:
174
- print(f"No articles found for keyword '{keyword}' in the last {period} days.")
180
+ logging.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
175
181
  return []
176
182
  return await process_gnews_articles(gnews_articles, nlp=nlp)
177
183
 
@@ -184,14 +190,12 @@ async def get_top_news(
184
190
  period: is the number of days to look back for top articles.
185
191
  max_results: is the maximum number of results to return.
186
192
  nlp: If True, will perform NLP on the articles to extract keywords and summary.
187
- Returns:
188
- list[newspaper.Article]: A list of newspaper.Article objects containing the top news articles.
189
193
  """
190
194
  google_news.period = f"{period}d"
191
195
  google_news.max_results = max_results
192
196
  gnews_articles = google_news.get_top_news()
193
197
  if not gnews_articles:
194
- print("No top news articles found.")
198
+ logging.debug("No top news articles found.")
195
199
  return []
196
200
  return await process_gnews_articles(gnews_articles, nlp=nlp)
197
201
 
@@ -204,14 +208,12 @@ async def get_news_by_location(
204
208
  period: is the number of days to look back for articles.
205
209
  max_results: is the maximum number of results to return.
206
210
  nlp: If True, will perform NLP on the articles to extract keywords and summary.
207
- Returns:
208
- list[newspaper.Article]: A list of newspaper.Article objects containing the articles for the specified location
209
211
  """
210
212
  google_news.period = f"{period}d"
211
213
  google_news.max_results = max_results
212
214
  gnews_articles = google_news.get_news_by_location(location)
213
215
  if not gnews_articles:
214
- print(f"No articles found for location '{location}' in the last {period} days.")
216
+ logging.debug(f"No articles found for location '{location}' in the last {period} days.")
215
217
  return []
216
218
  return await process_gnews_articles(gnews_articles, nlp=nlp)
217
219
 
@@ -232,30 +234,25 @@ async def get_news_by_topic(
232
234
  period: is the number of days to look back for articles.
233
235
  max_results: is the maximum number of results to return.
234
236
  nlp: If True, will perform NLP on the articles to extract keywords and summary.
235
- Returns:
236
- list[newspaper.Article]: A list of newspaper.Article objects containing the articles for the specified topic
237
237
  """
238
238
  google_news.period = f"{period}d"
239
239
  google_news.max_results = max_results
240
240
  gnews_articles = google_news.get_news_by_topic(topic)
241
241
  if not gnews_articles:
242
- print(f"No articles found for topic '{topic}' in the last {period} days.")
242
+ logging.debug(f"No articles found for topic '{topic}' in the last {period} days.")
243
243
  return []
244
244
  return await process_gnews_articles(gnews_articles, nlp=nlp)
245
245
 
246
246
 
247
247
  async def get_trending_terms(
248
248
  geo: str = "US", full_data: bool = False, max_results: int = 100
249
- ) -> list[tuple[str, int]] | list[TrendKeyword]:
249
+ ) -> list[dict[str, int]] | list[TrendKeyword]:
250
250
  """
251
251
  Returns google trends for a specific geo location.
252
252
  Default is US.
253
253
  geo: is the country code, e.g. 'US', 'GB', 'IN', etc.
254
254
  full_data: if True, returns full data for each trend, otherwise returns only the trend and volume.
255
255
  max_results: is the maximum number of results to return, default is 100.
256
- Returns:
257
- list[tuple[str, int]]: A list of tuples containing the trend keyword and its volume.
258
- If full_data is True, each tuple will also contain additional data such as related queries and trend type.
259
256
  """
260
257
  try:
261
258
  trends = list(tr.trending_now(geo=geo))
@@ -263,10 +260,10 @@ async def get_trending_terms(
263
260
  :max_results
264
261
  ]
265
262
  if not full_data:
266
- return [(trend.keyword, trend.volume) for trend in trends]
263
+ return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
267
264
  return trends
268
265
  except Exception as e:
269
- print(f"Error fetching trending terms: {e}")
266
+ logging.warning(f"Error fetching trending terms: {e}")
270
267
  return []
271
268
 
272
269
 
@@ -314,4 +311,4 @@ def save_article_to_json(article: newspaper.Article, filename: str = "") -> None
314
311
  filename += ".json"
315
312
  with open(filename, "w") as f:
316
313
  json.dump(article_data, f, indent=4)
317
- print(f"Article saved to {filename}")
314
+ logging.debug(f"Article saved to {filename}")
@@ -223,8 +223,6 @@ async def get_top_news(
223
223
  max_results=max_results,
224
224
  nlp=nlp,
225
225
  )
226
- ctx = get_context()
227
- await ctx.debug(f"Top Articles:\n{articles[0].to_json(False)}")
228
226
  return [ArticleOut(**a.to_json(False)) for a in articles]
229
227
 
230
228
 
@@ -248,11 +246,6 @@ async def get_trending_terms(
248
246
  trends = await news.get_trending_terms(
249
247
  geo=geo, full_data=full_data, max_results=max_results
250
248
  )
251
- ctx = get_context()
252
- await ctx.debug(f"Found {(trends)} trending terms")
253
- if not full_data:
254
- # return [TrendingTermOut(keyword=tt[0], volume=tt[1]) for tt in trends]
255
- return [TrendingTermOut(keyword=tt[0], volume=tt[1]) for tt in trends]
256
249
  return [TrendingTermOut(**tt.__dict__) for tt in trends]
257
250
 
258
251
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.3
3
+ Version: 0.1.5
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -0,0 +1,11 @@
1
+ google_news_trends_mcp/__init__.py,sha256=J9O5WNvC9cNDaxecveSUvzLGOXOYO-pCHbiGopfYoIc,76
2
+ google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
+ google_news_trends_mcp/cli.py,sha256=XJNnRVpDXX2MCb8dPfDcQJWYYA4CxTuxbhvpJGeVQgs,4133
4
+ google_news_trends_mcp/news.py,sha256=FYz1guxLZThMmh_9uN3VcdHBjLHZF5brhk7Bw7QxeDo,11780
5
+ google_news_trends_mcp/server.py,sha256=4l1cslLvQ2gdldRK6DTPmlaO37iLublBT9Y5_9nBj2o,9114
6
+ google_news_trends_mcp-0.1.5.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
+ google_news_trends_mcp-0.1.5.dist-info/METADATA,sha256=yKcgFJVdy-3o_Ox-F8r0xEbCTZ9ToQJXTlD53JkYtyo,4580
8
+ google_news_trends_mcp-0.1.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
+ google_news_trends_mcp-0.1.5.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
+ google_news_trends_mcp-0.1.5.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
+ google_news_trends_mcp-0.1.5.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- google_news_trends_mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
- google_news_trends_mcp/cli.py,sha256=fi0qocr-nc3UbGKOR5GLrmfsEjhU_M6ZJ7UAyLoC8ds,4012
4
- google_news_trends_mcp/news.py,sha256=TH8y4lHHyjGRJ_CGHsiU425sQuFEefR_cg27br9fKr8,12014
5
- google_news_trends_mcp/server.py,sha256=qwQ_9UKnOLybUGCmUH4sJWxKsmJHZCg7PKimFXgr58c,9468
6
- google_news_trends_mcp-0.1.3.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
- google_news_trends_mcp-0.1.3.dist-info/METADATA,sha256=5ixq1RxbZOw6R4KWbpOd14Qp3zQVJ815hP_yCf9rl-s,4580
8
- google_news_trends_mcp-0.1.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
- google_news_trends_mcp-0.1.3.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
- google_news_trends_mcp-0.1.3.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
- google_news_trends_mcp-0.1.3.dist-info/RECORD,,