google-news-trends-mcp 0.1.4__py3-none-any.whl → 0.1.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,2 @@
1
+ import logging
2
+ logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -107,7 +107,10 @@ def trending(geo, full_data, max_results):
107
107
  if trending_terms:
108
108
  print("Trending terms:")
109
109
  for term in trending_terms:
110
- print(f"- {term}")
110
+ if isinstance(term, dict):
111
+ print(f"{term['keyword']:<40} - {term['volume']}")
112
+ else:
113
+ print(term)
111
114
  else:
112
115
  print("No trending terms found.")
113
116
 
@@ -14,12 +14,19 @@ from gnews import GNews
14
14
  import newspaper # newspaper4k
15
15
  from googlenewsdecoder import gnewsdecoder
16
16
  import cloudscraper
17
- from playwright.async_api import async_playwright, Browser
17
+ from playwright.async_api import async_playwright, Browser, Playwright
18
18
  from trendspy import Trends, TrendKeyword
19
19
  import click
20
- from typing import Optional
20
+ from typing import Optional, cast
21
21
  import atexit
22
22
  from contextlib import asynccontextmanager
23
+ import logging
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ for logname in logging.root.manager.loggerDict:
28
+ if logname.startswith("newspaper"):
29
+ logging.getLogger(logname).setLevel(logging.ERROR)
23
30
 
24
31
  tr = Trends()
25
32
 
@@ -43,8 +50,8 @@ google_news = GNews(
43
50
  # exclude_websites=[],
44
51
  )
45
52
 
46
- playwright = None
47
- browser: Browser = None
53
+ playwright: Optional[Playwright] = None
54
+ browser: Optional[Browser] = None
48
55
 
49
56
 
50
57
  async def startup_browser():
@@ -64,7 +71,7 @@ def shutdown_browser():
64
71
  async def get_browser() -> Browser:
65
72
  if browser is None:
66
73
  await startup_browser()
67
- return browser
74
+ return cast(Browser, browser)
68
75
 
69
76
 
70
77
  @asynccontextmanager
@@ -73,7 +80,7 @@ async def browser_context():
73
80
  try:
74
81
  yield context
75
82
  finally:
76
- print("Closing browser context...")
83
+ logging.debug("Closing browser context...")
77
84
  await context.close()
78
85
 
79
86
 
@@ -90,7 +97,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
90
97
  article = newspaper.article(url, input_html=content, language="en")
91
98
  return article
92
99
  except Exception as e:
93
- print(f"Error downloading article with Playwright from {url}\n {e.args}")
100
+ logging.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
94
101
  return None
95
102
 
96
103
 
@@ -105,36 +112,37 @@ async def download_article(url: str, nlp: bool = True) -> newspaper.Article | No
105
112
  if decoded_url.get("status"):
106
113
  url = decoded_url["decoded_url"]
107
114
  else:
108
- print("Failed to decode Google News RSS link:")
115
+ logging.debug("Failed to decode Google News RSS link:")
109
116
  return None
110
117
  except Exception as err:
111
- print(f"Error while decoding url {url}\n {err.args}")
118
+ logging.warning(f"Error while decoding url {url}\n {err.args}")
112
119
  try:
113
120
  article = newspaper.article(url)
114
121
  except Exception as e:
115
- print(f"Error downloading article with newspaper from {url}\n {e.args}")
122
+ logging.debug(f"Error downloading article with newspaper from {url}\n {e.args}")
116
123
  try:
117
124
  # Retry with cloudscraper
118
125
  response = scraper.get(url)
119
126
  if response.status_code < 400:
120
127
  article = newspaper.article(url, input_html=response.text)
121
128
  else:
122
- print(
129
+ logging.debug(
123
130
  f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
124
131
  )
125
132
  except Exception as e:
126
- print(f"Error downloading article with cloudscraper from {url}\n {e.args}")
133
+ logging.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
127
134
 
128
135
  try:
129
136
  if article is None or not article.text:
130
137
  # If newspaper failed, try downloading with Playwright
131
- print(f"Retrying with Playwright for {url}")
138
+ logging.debug(f"Retrying with Playwright for {url}")
132
139
  article = await download_article_with_playwright(url)
140
+ article = cast(newspaper.Article, article)
133
141
  article.parse()
134
142
  if nlp:
135
143
  article.nlp()
136
144
  except Exception as e:
137
- print(f"Error parsing article from {url}\n {e.args}")
145
+ logging.warning(f"Error parsing article from {url}\n {e.args}")
138
146
  return None
139
147
  return article
140
148
 
@@ -149,7 +157,7 @@ async def process_gnews_articles(
149
157
  for gnews_article in gnews_articles:
150
158
  article = await download_article(gnews_article["url"], nlp=nlp)
151
159
  if article is None or not article.text:
152
- print(f"Failed to download article from {gnews_article['url']}:\n{article}")
160
+ logging.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
153
161
  continue
154
162
  articles.append(article)
155
163
  return articles
@@ -169,7 +177,7 @@ async def get_news_by_keyword(
169
177
  google_news.max_results = max_results
170
178
  gnews_articles = google_news.get_news(keyword)
171
179
  if not gnews_articles:
172
- print(f"No articles found for keyword '{keyword}' in the last {period} days.")
180
+ logging.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
173
181
  return []
174
182
  return await process_gnews_articles(gnews_articles, nlp=nlp)
175
183
 
@@ -187,7 +195,7 @@ async def get_top_news(
187
195
  google_news.max_results = max_results
188
196
  gnews_articles = google_news.get_top_news()
189
197
  if not gnews_articles:
190
- print("No top news articles found.")
198
+ logging.debug("No top news articles found.")
191
199
  return []
192
200
  return await process_gnews_articles(gnews_articles, nlp=nlp)
193
201
 
@@ -205,7 +213,7 @@ async def get_news_by_location(
205
213
  google_news.max_results = max_results
206
214
  gnews_articles = google_news.get_news_by_location(location)
207
215
  if not gnews_articles:
208
- print(f"No articles found for location '{location}' in the last {period} days.")
216
+ logging.debug(f"No articles found for location '{location}' in the last {period} days.")
209
217
  return []
210
218
  return await process_gnews_articles(gnews_articles, nlp=nlp)
211
219
 
@@ -231,14 +239,14 @@ async def get_news_by_topic(
231
239
  google_news.max_results = max_results
232
240
  gnews_articles = google_news.get_news_by_topic(topic)
233
241
  if not gnews_articles:
234
- print(f"No articles found for topic '{topic}' in the last {period} days.")
242
+ logging.debug(f"No articles found for topic '{topic}' in the last {period} days.")
235
243
  return []
236
244
  return await process_gnews_articles(gnews_articles, nlp=nlp)
237
245
 
238
246
 
239
247
  async def get_trending_terms(
240
248
  geo: str = "US", full_data: bool = False, max_results: int = 100
241
- ) -> list[tuple[str, int]] | list[TrendKeyword]:
249
+ ) -> list[dict[str, int]] | list[TrendKeyword]:
242
250
  """
243
251
  Returns google trends for a specific geo location.
244
252
  Default is US.
@@ -252,10 +260,10 @@ async def get_trending_terms(
252
260
  :max_results
253
261
  ]
254
262
  if not full_data:
255
- return [(trend.keyword, trend.volume) for trend in trends]
263
+ return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
256
264
  return trends
257
265
  except Exception as e:
258
- print(f"Error fetching trending terms: {e}")
266
+ logging.warning(f"Error fetching trending terms: {e}")
259
267
  return []
260
268
 
261
269
 
@@ -303,4 +311,4 @@ def save_article_to_json(article: newspaper.Article, filename: str = "") -> None
303
311
  filename += ".json"
304
312
  with open(filename, "w") as f:
305
313
  json.dump(article_data, f, indent=4)
306
- print(f"Article saved to {filename}")
314
+ logging.debug(f"Article saved to {filename}")
@@ -223,8 +223,6 @@ async def get_top_news(
223
223
  max_results=max_results,
224
224
  nlp=nlp,
225
225
  )
226
- ctx = get_context()
227
- await ctx.debug(f"Top Articles:\n{articles[0].to_json(False)}")
228
226
  return [ArticleOut(**a.to_json(False)) for a in articles]
229
227
 
230
228
 
@@ -248,13 +246,14 @@ async def get_trending_terms(
248
246
  trends = await news.get_trending_terms(
249
247
  geo=geo, full_data=full_data, max_results=max_results
250
248
  )
251
- ctx = get_context()
252
- await ctx.debug(f"Found {(trends)} trending terms")
253
249
  if not full_data:
254
- # return [TrendingTermOut(keyword=tt[0], volume=tt[1]) for tt in trends]
255
- return [TrendingTermOut(keyword=tt[0], volume=tt[1]) for tt in trends]
256
- return [TrendingTermOut(**tt.__dict__) for tt in trends]
257
-
250
+ # Only return keyword and volume fields
251
+ return [
252
+ TrendingTermOut(keyword=tt["keyword"], volume=tt["volume"]) for tt in trends
253
+ ]
254
+ else:
255
+ # Assume each tt is a TrendingTerm object
256
+ return [TrendingTermOut(**tt.__dict__) for tt in trends]
258
257
 
259
258
  def main():
260
259
  mcp.run()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.4
3
+ Version: 0.1.6
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -0,0 +1,11 @@
1
+ google_news_trends_mcp/__init__.py,sha256=J9O5WNvC9cNDaxecveSUvzLGOXOYO-pCHbiGopfYoIc,76
2
+ google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
+ google_news_trends_mcp/cli.py,sha256=XJNnRVpDXX2MCb8dPfDcQJWYYA4CxTuxbhvpJGeVQgs,4133
4
+ google_news_trends_mcp/news.py,sha256=FYz1guxLZThMmh_9uN3VcdHBjLHZF5brhk7Bw7QxeDo,11780
5
+ google_news_trends_mcp/server.py,sha256=MdEWk9QVark4z00UlTIckdAM3hPW7eRQgZRZ2h8WUPk,9363
6
+ google_news_trends_mcp-0.1.6.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
+ google_news_trends_mcp-0.1.6.dist-info/METADATA,sha256=HewouWHDlGkCPzEM_Nq7_s2KE66FVvtLLdHYToz9WgE,4580
8
+ google_news_trends_mcp-0.1.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
+ google_news_trends_mcp-0.1.6.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
+ google_news_trends_mcp-0.1.6.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
+ google_news_trends_mcp-0.1.6.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- google_news_trends_mcp/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
- google_news_trends_mcp/cli.py,sha256=fi0qocr-nc3UbGKOR5GLrmfsEjhU_M6ZJ7UAyLoC8ds,4012
4
- google_news_trends_mcp/news.py,sha256=UF1r5vcew2QKGJH1K6wy5_xlmRBsKLyqrhObbNz3C8Y,11307
5
- google_news_trends_mcp/server.py,sha256=qwQ_9UKnOLybUGCmUH4sJWxKsmJHZCg7PKimFXgr58c,9468
6
- google_news_trends_mcp-0.1.4.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
- google_news_trends_mcp-0.1.4.dist-info/METADATA,sha256=yJ9SrQxh94LAK9Mvh5IQ2daSlFWpYH-ZdLdY1ZF3kOk,4580
8
- google_news_trends_mcp-0.1.4.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
- google_news_trends_mcp-0.1.4.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
- google_news_trends_mcp-0.1.4.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
- google_news_trends_mcp-0.1.4.dist-info/RECORD,,