google-news-trends-mcp 0.1.10__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,2 +1,3 @@
1
1
  import logging
2
+
2
3
  logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -7,6 +7,7 @@ from google_news_trends_mcp.news import (
7
7
  get_trending_terms,
8
8
  get_top_news,
9
9
  save_article_to_json,
10
+ BrowserManager,
10
11
  )
11
12
 
12
13
 
@@ -27,9 +28,13 @@ def cli():
27
28
  )
28
29
  @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
29
30
  def keyword(keyword, period, max_results, no_nlp):
30
- articles = asyncio.run(get_news_by_keyword(keyword, period=period, max_results=max_results, nlp=not no_nlp))
31
- # asyncio.run(articles) # Ensure the articles are fetched asynchronously
32
- print_articles(articles)
31
+ @BrowserManager()
32
+ async def _keyword():
33
+ articles = await get_news_by_keyword(keyword, period=period, max_results=max_results, nlp=not no_nlp)
34
+ print_articles(articles)
35
+ print(f"Found {len(articles)} articles for keyword '{keyword}'.")
36
+
37
+ asyncio.run(_keyword())
33
38
 
34
39
 
35
40
  @cli.command(help=get_news_by_location.__doc__)
@@ -44,8 +49,13 @@ def keyword(keyword, period, max_results, no_nlp):
44
49
  )
45
50
  @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
46
51
  def location(location, period, max_results, no_nlp):
47
- articles = asyncio.run(get_news_by_location(location, period=period, max_results=max_results, nlp=not no_nlp))
48
- print_articles(articles)
52
+ @BrowserManager()
53
+ async def _location():
54
+ articles = await get_news_by_location(location, period=period, max_results=max_results, nlp=not no_nlp)
55
+ print_articles(articles)
56
+ print(f"Found {len(articles)} articles for location '{location}'.")
57
+
58
+ asyncio.run(_location())
49
59
 
50
60
 
51
61
  @cli.command(help=get_news_by_topic.__doc__)
@@ -60,8 +70,13 @@ def location(location, period, max_results, no_nlp):
60
70
  )
61
71
  @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
62
72
  def topic(topic, period, max_results, no_nlp):
63
- articles = asyncio.run(get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp))
64
- print_articles(articles)
73
+ @BrowserManager()
74
+ async def _topic():
75
+ articles = await get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp)
76
+ print_articles(articles)
77
+ print(f"Found {len(articles)} articles for topic '{topic}'.")
78
+
79
+ asyncio.run(_topic())
65
80
 
66
81
 
67
82
  @cli.command(help=get_trending_terms.__doc__)
@@ -75,16 +90,20 @@ def topic(topic, period, max_results, no_nlp):
75
90
  help="Maximum number of results to return.",
76
91
  )
77
92
  def trending(geo, full_data, max_results):
78
- trending_terms = asyncio.run(get_trending_terms(geo=geo, full_data=full_data, max_results=max_results))
79
- if trending_terms:
80
- print("Trending terms:")
81
- for term in trending_terms:
82
- if isinstance(term, dict):
83
- print(f"{term['keyword']:<40} - {term['volume']}")
84
- else:
85
- print(term)
86
- else:
87
- print("No trending terms found.")
93
+ # Browser not used for Google Trends
94
+ async def _trending():
95
+ trending_terms = await get_trending_terms(geo=geo, full_data=full_data, max_results=max_results)
96
+ if trending_terms:
97
+ print("Trending terms:")
98
+ for term in trending_terms:
99
+ if isinstance(term, dict):
100
+ print(f"{term['keyword']:<40} - {term['volume']}")
101
+ else:
102
+ print(term)
103
+ else:
104
+ print("No trending terms found.")
105
+
106
+ asyncio.run(_trending())
88
107
 
89
108
 
90
109
  @cli.command(help=get_top_news.__doc__)
@@ -98,9 +117,13 @@ def trending(geo, full_data, max_results):
98
117
  )
99
118
  @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
100
119
  def top(period, max_results, no_nlp):
101
- articles = asyncio.run(get_top_news(max_results=max_results, period=period, nlp=not no_nlp))
102
- print_articles(articles)
103
- print(f"Found {len(articles)} top articles.")
120
+ @BrowserManager()
121
+ async def _top():
122
+ articles = await get_top_news(max_results=max_results, period=period, nlp=not no_nlp)
123
+ print_articles(articles)
124
+ print(f"Found {len(articles)} top articles.")
125
+
126
+ asyncio.run(_top())
104
127
 
105
128
 
106
129
  def print_articles(articles):
@@ -16,8 +16,7 @@ import cloudscraper
16
16
  from playwright.async_api import async_playwright, Browser, Playwright
17
17
  from trendspy import Trends, TrendKeyword
18
18
  from typing import Optional, cast, overload, Literal, Awaitable
19
- import atexit
20
- from contextlib import asynccontextmanager
19
+ from contextlib import asynccontextmanager, AsyncContextDecorator
21
20
  import logging
22
21
  from collections.abc import Callable
23
22
 
@@ -49,40 +48,60 @@ google_news = GNews(
49
48
  # exclude_websites=[],
50
49
  )
51
50
 
52
- playwright: Optional[Playwright] = None
53
- browser: Optional[Browser] = None
54
-
55
51
  ProgressCallback = Callable[[float, Optional[float]], Awaitable[None]]
56
52
 
57
53
 
58
- async def startup_browser():
59
- global playwright, browser
60
- playwright = await async_playwright().start()
61
- browser = await playwright.chromium.launch(headless=True)
62
-
63
-
64
- @atexit.register
65
- def shutdown_browser():
66
- if browser:
67
- asyncio.run(browser.close())
68
- if playwright:
69
- asyncio.run(playwright.stop())
70
-
71
-
72
- async def get_browser() -> Browser:
73
- if browser is None:
74
- await startup_browser()
75
- return cast(Browser, browser)
76
-
77
-
78
- @asynccontextmanager
79
- async def browser_context():
80
- context = await (await get_browser()).new_context()
81
- try:
82
- yield context
83
- finally:
84
- logging.debug("Closing browser context...")
85
- await context.close()
54
+ class BrowserManager(AsyncContextDecorator):
55
+ playwright: Optional[Playwright] = None
56
+ browser: Optional[Browser] = None
57
+ _lock = asyncio.Lock()
58
+
59
+ @classmethod
60
+ async def _get_browser(cls) -> Browser:
61
+ if cls.browser is None:
62
+ async with cls._lock:
63
+ if cls.browser is None:
64
+ logger.info("Starting browser...")
65
+ try:
66
+ cls.playwright = await async_playwright().start()
67
+ cls.browser = await cls.playwright.chromium.launch(headless=True)
68
+ except Exception as e:
69
+ logger.critical("Browser startup failed", exc_info=e)
70
+ raise SystemExit(1)
71
+ return cast(Browser, cls.browser)
72
+
73
+
74
+ @classmethod
75
+ async def _shutdown(cls):
76
+ logger.info("Shutting down browser...")
77
+ if cls.browser:
78
+ await cls.browser.close()
79
+ cls.browser = None
80
+ if cls.playwright:
81
+ await cls.playwright.stop()
82
+ cls.playwright = None
83
+
84
+ @classmethod
85
+ def browser_context(cls):
86
+ @asynccontextmanager
87
+ async def _browser_context_cm():
88
+ browser_inst = await cls._get_browser()
89
+ context = await browser_inst.new_context()
90
+ logger.debug("Created browser context...")
91
+ try:
92
+ yield context
93
+ finally:
94
+ logger.debug("Closing browser context...")
95
+ await context.close()
96
+
97
+ return _browser_context_cm()
98
+
99
+ async def __aenter__(self):
100
+ return self
101
+
102
+ async def __aexit__(self, *exc):
103
+ await self._shutdown()
104
+ return False
86
105
 
87
106
 
88
107
  async def download_article_with_playwright(url) -> newspaper.Article | None:
@@ -90,7 +109,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
90
109
  Download an article using Playwright to handle complex websites (async).
91
110
  """
92
111
  try:
93
- async with browser_context() as context:
112
+ async with BrowserManager.browser_context() as context:
94
113
  page = await context.new_page()
95
114
  await page.goto(url, wait_until="domcontentloaded")
96
115
  await asyncio.sleep(2) # Wait for the page to load completely
@@ -98,7 +117,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
98
117
  article = newspaper.article(url, input_html=content)
99
118
  return article
100
119
  except Exception as e:
101
- logging.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
120
+ logger.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
102
121
  return None
103
122
 
104
123
 
@@ -107,18 +126,18 @@ def download_article_with_scraper(url) -> newspaper.Article | None:
107
126
  try:
108
127
  article = newspaper.article(url)
109
128
  except Exception as e:
110
- logging.debug(f"Error downloading article with newspaper from {url}\n {e.args}")
129
+ logger.debug(f"Error downloading article with newspaper from {url}\n {e.args}")
111
130
  try:
112
131
  # Retry with cloudscraper
113
132
  response = scraper.get(url)
114
133
  if response.status_code < 400:
115
134
  article = newspaper.article(url, input_html=response.text)
116
135
  else:
117
- logging.debug(
136
+ logger.debug(
118
137
  f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
119
138
  )
120
139
  except Exception as e:
121
- logging.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
140
+ logger.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
122
141
  return article
123
142
 
124
143
 
@@ -129,10 +148,10 @@ def decode_url(url: str) -> str:
129
148
  if decoded_url.get("status"):
130
149
  url = decoded_url["decoded_url"]
131
150
  else:
132
- logging.debug("Failed to decode Google News RSS link:")
151
+ logger.debug("Failed to decode Google News RSS link:")
133
152
  return ""
134
153
  except Exception as err:
135
- logging.warning(f"Error while decoding url {url}\n {err.args}")
154
+ logger.warning(f"Error while decoding url {url}\n {err.args}")
136
155
  return url
137
156
 
138
157
 
@@ -144,6 +163,7 @@ async def download_article(url: str) -> newspaper.Article | None:
144
163
  return None
145
164
  article = download_article_with_scraper(url)
146
165
  if article is None or not article.text:
166
+ logger.debug("Attempting to download article with playwright")
147
167
  article = await download_article_with_playwright(url)
148
168
  return article
149
169
 
@@ -162,7 +182,7 @@ async def process_gnews_articles(
162
182
  for idx, gnews_article in enumerate(gnews_articles):
163
183
  article = await download_article(gnews_article["url"])
164
184
  if article is None or not article.text:
165
- logging.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
185
+ logger.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
166
186
  continue
167
187
  article.parse()
168
188
  if nlp:
@@ -187,7 +207,7 @@ async def get_news_by_keyword(
187
207
  google_news.max_results = max_results
188
208
  gnews_articles = google_news.get_news(keyword)
189
209
  if not gnews_articles:
190
- logging.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
210
+ logger.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
191
211
  return []
192
212
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
193
213
 
@@ -205,7 +225,7 @@ async def get_top_news(
205
225
  google_news.max_results = max_results
206
226
  gnews_articles = google_news.get_top_news()
207
227
  if not gnews_articles:
208
- logging.debug("No top news articles found.")
228
+ logger.debug("No top news articles found.")
209
229
  return []
210
230
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
211
231
 
@@ -217,13 +237,12 @@ async def get_news_by_location(
217
237
  nlp: bool = True,
218
238
  report_progress: Optional[ProgressCallback] = None,
219
239
  ) -> list[newspaper.Article]:
220
- """Find articles by location using Google News.
221
- """
240
+ """Find articles by location using Google News."""
222
241
  google_news.period = f"{period}d"
223
242
  google_news.max_results = max_results
224
243
  gnews_articles = google_news.get_news_by_location(location)
225
244
  if not gnews_articles:
226
- logging.debug(f"No articles found for location '{location}' in the last {period} days.")
245
+ logger.debug(f"No articles found for location '{location}' in the last {period} days.")
227
246
  return []
228
247
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
229
248
 
@@ -250,7 +269,7 @@ async def get_news_by_topic(
250
269
  google_news.max_results = max_results
251
270
  gnews_articles = google_news.get_news_by_topic(topic)
252
271
  if not gnews_articles:
253
- logging.debug(f"No articles found for topic '{topic}' in the last {period} days.")
272
+ logger.debug(f"No articles found for topic '{topic}' in the last {period} days.")
254
273
  return []
255
274
  return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
256
275
 
@@ -282,7 +301,7 @@ async def get_trending_terms(
282
301
  return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
283
302
  return trends
284
303
  except Exception as e:
285
- logging.warning(f"Error fetching trending terms: {e}")
304
+ logger.warning(f"Error fetching trending terms: {e}")
286
305
  return []
287
306
 
288
307
 
@@ -330,4 +349,4 @@ def save_article_to_json(article: newspaper.Article, filename: str = "") -> None
330
349
  filename += ".json"
331
350
  with open(filename, "w") as f:
332
351
  json.dump(article_data, f, indent=4)
333
- logging.debug(f"Article saved to {filename}")
352
+ logger.debug(f"Article saved to {filename}")
@@ -7,8 +7,10 @@ from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
7
7
  from mcp.types import TextContent
8
8
  from pydantic import BaseModel, Field, model_serializer
9
9
  from google_news_trends_mcp import news
10
+ from google_news_trends_mcp.news import BrowserManager
10
11
  from newspaper import settings as newspaper_settings
11
12
  from newspaper.article import Article
13
+ from contextlib import asynccontextmanager
12
14
 
13
15
 
14
16
  class BaseModelClean(BaseModel):
@@ -82,9 +84,16 @@ class TrendingTermOut(BaseModelClean):
82
84
  normalized_keyword: Annotated[Optional[str], Field(description="Normalized form of the keyword.")] = None
83
85
 
84
86
 
87
+ @asynccontextmanager
88
+ async def lifespan(app: FastMCP):
89
+ async with BrowserManager():
90
+ yield
91
+
92
+
85
93
  mcp = FastMCP(
86
94
  name="google-news-trends",
87
95
  instructions="This server provides tools to search, analyze, and summarize Google News articles and Google Trends",
96
+ lifespan=lifespan,
88
97
  on_duplicate_tools="replace",
89
98
  )
90
99
 
@@ -145,6 +154,19 @@ async def summarize_article(article: Article, ctx: Context) -> None:
145
154
  article.summary = "No summary available."
146
155
 
147
156
 
157
+ async def summarize_articles(articles: list[Article], ctx: Context) -> None:
158
+ total_articles = len(articles)
159
+ try:
160
+ for idx, article in enumerate(articles):
161
+ await summarize_article(article, ctx)
162
+ await ctx.report_progress(idx, total_articles)
163
+ except Exception as err:
164
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
165
+ for idx, article in enumerate(articles):
166
+ article.nlp()
167
+ await ctx.report_progress(idx, total_articles)
168
+
169
+
148
170
  @mcp.tool(
149
171
  description=news.get_news_by_keyword.__doc__,
150
172
  tags={"news", "articles", "keyword"},
@@ -176,16 +198,7 @@ async def get_news_by_keyword(
176
198
  report_progress=ctx.report_progress,
177
199
  )
178
200
  if summarize:
179
- total_articles = len(articles)
180
- try:
181
- for idx, article in enumerate(articles):
182
- await summarize_article(article, ctx)
183
- await ctx.report_progress(idx, total_articles)
184
- except Exception as err:
185
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
186
- for idx, article in enumerate(articles):
187
- article.nlp()
188
- await ctx.report_progress(idx, total_articles)
201
+ await summarize_articles(articles, ctx)
189
202
  await ctx.report_progress(progress=len(articles), total=len(articles))
190
203
  return [ArticleOut(**a.to_json(False)) for a in articles]
191
204
 
@@ -221,16 +234,7 @@ async def get_news_by_location(
221
234
  report_progress=ctx.report_progress,
222
235
  )
223
236
  if summarize:
224
- total_articles = len(articles)
225
- try:
226
- for idx, article in enumerate(articles):
227
- await summarize_article(article, ctx)
228
- await ctx.report_progress(idx, total_articles)
229
- except Exception as err:
230
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
231
- for idx, article in enumerate(articles):
232
- article.nlp()
233
- await ctx.report_progress(idx, total_articles)
237
+ await summarize_articles(articles, ctx)
234
238
  await ctx.report_progress(progress=len(articles), total=len(articles))
235
239
  return [ArticleOut(**a.to_json(False)) for a in articles]
236
240
 
@@ -263,17 +267,7 @@ async def get_news_by_topic(
263
267
  report_progress=ctx.report_progress,
264
268
  )
265
269
  if summarize:
266
- total_articles = len(articles)
267
- try:
268
- for idx, article in enumerate(articles):
269
- await summarize_article(article, ctx)
270
- await ctx.report_progress(idx, total_articles)
271
- except Exception as err:
272
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
273
- for idx, article in enumerate(articles):
274
- article.nlp()
275
- await ctx.report_progress(idx, total_articles)
276
-
270
+ await summarize_articles(articles, ctx)
277
271
  await ctx.report_progress(progress=len(articles), total=len(articles))
278
272
  return [ArticleOut(**a.to_json(False)) for a in articles]
279
273
 
@@ -304,17 +298,7 @@ async def get_top_news(
304
298
  report_progress=ctx.report_progress,
305
299
  )
306
300
  if summarize:
307
- total_articles = len(articles)
308
- try:
309
- for idx, article in enumerate(articles):
310
- await summarize_article(article, ctx)
311
- await ctx.report_progress(idx, total_articles)
312
- except Exception as err:
313
- await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
314
- for idx, article in enumerate(articles):
315
- article.nlp()
316
- await ctx.report_progress(idx, total_articles)
317
-
301
+ await summarize_articles(articles, ctx)
318
302
  await ctx.report_progress(progress=len(articles), total=len(articles))
319
303
  return [ArticleOut(**a.to_json(False)) for a in articles]
320
304
 
@@ -328,11 +312,9 @@ async def get_trending_terms(
328
312
  ] = False,
329
313
  max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 100,
330
314
  ) -> list[TrendingTermOut]:
331
-
332
315
  if not full_data:
333
316
  trends = await news.get_trending_terms(geo=geo, full_data=False, max_results=max_results)
334
317
  return [TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"]) for tt in trends]
335
-
336
318
  trends = await news.get_trending_terms(geo=geo, full_data=True, max_results=max_results)
337
319
  return [TrendingTermOut(**tt.__dict__) for tt in trends]
338
320
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.10
3
+ Version: 0.2.1
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -22,14 +22,12 @@ Requires-Dist: newspaper4k>=0.9.3.1
22
22
  Requires-Dist: nltk>=3.9.1
23
23
  Requires-Dist: playwright>=1.53.0
24
24
  Requires-Dist: pydantic>=2.11.7
25
- Requires-Dist: pytest>=8.4.1
26
- Requires-Dist: pytest-asyncio>=1.0.0
27
25
  Requires-Dist: trendspy>=0.1.6
28
26
  Dynamic: license-file
29
27
 
30
28
  # Google News Trends MCP
31
29
 
32
- An MCP server to access Google News and Google Trends. Does not rely on any paid APIs.
30
+ An MCP server to access Google News and Google Trends.
33
31
 
34
32
  ## Features
35
33
 
@@ -0,0 +1,11 @@
1
+ google_news_trends_mcp/__init__.py,sha256=nDWNd6_TSf4vDQuHVBoAf4QfZCB3ZUFQ0M7XvifNJ-g,78
2
+ google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
+ google_news_trends_mcp/cli.py,sha256=3Z916898HXTigmQYEfvb7ybfbuUE7bjMC6yjT5-l6u0,4558
4
+ google_news_trends_mcp/news.py,sha256=EhTIqRmFOQKAhyV8rLySaPoILX3uyKbrzO6jxo89FhA,12528
5
+ google_news_trends_mcp/server.py,sha256=ZFZvCxSVLma7QlanFsXyu6gpCHZSS0y5YfnZl7HnXnc,13780
6
+ google_news_trends_mcp-0.2.1.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
+ google_news_trends_mcp-0.2.1.dist-info/METADATA,sha256=AdnEqmflIhFNUV052z9VHIOum2x0_TzI_e3hK84vdfM,4419
8
+ google_news_trends_mcp-0.2.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
+ google_news_trends_mcp-0.2.1.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
+ google_news_trends_mcp-0.2.1.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
+ google_news_trends_mcp-0.2.1.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- google_news_trends_mcp/__init__.py,sha256=NkmudPEEuKk8Geah4EtzeEHQ-ChqR66lZEO5VrMwXNo,77
2
- google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
- google_news_trends_mcp/cli.py,sha256=-Cith02x6-9o91rXpgMM0lrhArPDMB9d3h8AAE1rimw,3959
4
- google_news_trends_mcp/news.py,sha256=wmFzikDEx_HNVO0vm84gnkgV-LZBOAuW7mNr2uhurEE,11524
5
- google_news_trends_mcp/server.py,sha256=h8GP_XUPqiPw4vFu1jy9MFv0i384rBARePvm15YOZJo,14807
6
- google_news_trends_mcp-0.1.10.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
- google_news_trends_mcp-0.1.10.dist-info/METADATA,sha256=rbYqd15smnZA0sgOU4Fk6iqvsr8h638U69Ki4VRMddI,4521
8
- google_news_trends_mcp-0.1.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
- google_news_trends_mcp-0.1.10.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
- google_news_trends_mcp-0.1.10.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
- google_news_trends_mcp-0.1.10.dist-info/RECORD,,