google-news-trends-mcp 0.2.0__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- google_news_trends_mcp/cli.py +2 -9
- google_news_trends_mcp/news.py +30 -32
- google_news_trends_mcp/server.py +31 -61
- {google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/METADATA +4 -4
- google_news_trends_mcp-0.2.2.dist-info/RECORD +11 -0
- google_news_trends_mcp-0.2.0.dist-info/RECORD +0 -11
- {google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/WHEEL +0 -0
- {google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/entry_points.txt +0 -0
- {google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/licenses/LICENSE +0 -0
- {google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/top_level.txt +0 -0
google_news_trends_mcp/cli.py
CHANGED
@@ -82,17 +82,10 @@ def topic(topic, period, max_results, no_nlp):
|
|
82
82
|
@cli.command(help=get_trending_terms.__doc__)
|
83
83
|
@click.option("--geo", type=str, default="US", help="Country code, e.g. 'US', 'GB', 'IN', etc.")
|
84
84
|
@click.option("--full-data", is_flag=True, default=False, help="Return full data for each trend.")
|
85
|
-
|
86
|
-
"--max-results",
|
87
|
-
"max_results",
|
88
|
-
type=int,
|
89
|
-
default=100,
|
90
|
-
help="Maximum number of results to return.",
|
91
|
-
)
|
92
|
-
def trending(geo, full_data, max_results):
|
85
|
+
def trending(geo, full_data):
|
93
86
|
# Browser not used for Google Trends
|
94
87
|
async def _trending():
|
95
|
-
trending_terms = await get_trending_terms(geo=geo, full_data=full_data
|
88
|
+
trending_terms = await get_trending_terms(geo=geo, full_data=full_data)
|
96
89
|
if trending_terms:
|
97
90
|
print("Trending terms:")
|
98
91
|
for term in trending_terms:
|
google_news_trends_mcp/news.py
CHANGED
@@ -14,7 +14,7 @@ import newspaper # newspaper4k
|
|
14
14
|
from googlenewsdecoder import gnewsdecoder
|
15
15
|
import cloudscraper
|
16
16
|
from playwright.async_api import async_playwright, Browser, Playwright
|
17
|
-
from trendspy import Trends,
|
17
|
+
from trendspy import Trends, TrendKeywordLite
|
18
18
|
from typing import Optional, cast, overload, Literal, Awaitable
|
19
19
|
from contextlib import asynccontextmanager, AsyncContextDecorator
|
20
20
|
import logging
|
@@ -48,9 +48,6 @@ google_news = GNews(
|
|
48
48
|
# exclude_websites=[],
|
49
49
|
)
|
50
50
|
|
51
|
-
playwright: Optional[Playwright] = None
|
52
|
-
browser: Optional[Browser] = None
|
53
|
-
|
54
51
|
ProgressCallback = Callable[[float, Optional[float]], Awaitable[None]]
|
55
52
|
|
56
53
|
|
@@ -64,14 +61,15 @@ class BrowserManager(AsyncContextDecorator):
|
|
64
61
|
if cls.browser is None:
|
65
62
|
async with cls._lock:
|
66
63
|
if cls.browser is None:
|
67
|
-
|
64
|
+
logger.info("Starting browser...")
|
65
|
+
try:
|
66
|
+
cls.playwright = await async_playwright().start()
|
67
|
+
cls.browser = await cls.playwright.chromium.launch(headless=True)
|
68
|
+
except Exception as e:
|
69
|
+
logger.critical("Browser startup failed", exc_info=e)
|
70
|
+
raise SystemExit(1)
|
68
71
|
return cast(Browser, cls.browser)
|
69
72
|
|
70
|
-
@classmethod
|
71
|
-
async def _startup(cls):
|
72
|
-
logger.info("Starting browser...")
|
73
|
-
cls.playwright = await async_playwright().start()
|
74
|
-
cls.browser = await cls.playwright.chromium.launch(headless=True)
|
75
73
|
|
76
74
|
@classmethod
|
77
75
|
async def _shutdown(cls):
|
@@ -89,11 +87,11 @@ class BrowserManager(AsyncContextDecorator):
|
|
89
87
|
async def _browser_context_cm():
|
90
88
|
browser_inst = await cls._get_browser()
|
91
89
|
context = await browser_inst.new_context()
|
92
|
-
|
90
|
+
logger.debug("Created browser context...")
|
93
91
|
try:
|
94
92
|
yield context
|
95
93
|
finally:
|
96
|
-
|
94
|
+
logger.debug("Closing browser context...")
|
97
95
|
await context.close()
|
98
96
|
|
99
97
|
return _browser_context_cm()
|
@@ -119,7 +117,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
|
|
119
117
|
article = newspaper.article(url, input_html=content)
|
120
118
|
return article
|
121
119
|
except Exception as e:
|
122
|
-
|
120
|
+
logger.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
|
123
121
|
return None
|
124
122
|
|
125
123
|
|
@@ -128,18 +126,18 @@ def download_article_with_scraper(url) -> newspaper.Article | None:
|
|
128
126
|
try:
|
129
127
|
article = newspaper.article(url)
|
130
128
|
except Exception as e:
|
131
|
-
|
129
|
+
logger.debug(f"Error downloading article with newspaper from {url}\n {e.args}")
|
132
130
|
try:
|
133
131
|
# Retry with cloudscraper
|
134
132
|
response = scraper.get(url)
|
135
133
|
if response.status_code < 400:
|
136
134
|
article = newspaper.article(url, input_html=response.text)
|
137
135
|
else:
|
138
|
-
|
136
|
+
logger.debug(
|
139
137
|
f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
|
140
138
|
)
|
141
139
|
except Exception as e:
|
142
|
-
|
140
|
+
logger.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
|
143
141
|
return article
|
144
142
|
|
145
143
|
|
@@ -150,10 +148,10 @@ def decode_url(url: str) -> str:
|
|
150
148
|
if decoded_url.get("status"):
|
151
149
|
url = decoded_url["decoded_url"]
|
152
150
|
else:
|
153
|
-
|
151
|
+
logger.debug("Failed to decode Google News RSS link:")
|
154
152
|
return ""
|
155
153
|
except Exception as err:
|
156
|
-
|
154
|
+
logger.warning(f"Error while decoding url {url}\n {err.args}")
|
157
155
|
return url
|
158
156
|
|
159
157
|
|
@@ -184,7 +182,7 @@ async def process_gnews_articles(
|
|
184
182
|
for idx, gnews_article in enumerate(gnews_articles):
|
185
183
|
article = await download_article(gnews_article["url"])
|
186
184
|
if article is None or not article.text:
|
187
|
-
|
185
|
+
logger.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
|
188
186
|
continue
|
189
187
|
article.parse()
|
190
188
|
if nlp:
|
@@ -209,7 +207,7 @@ async def get_news_by_keyword(
|
|
209
207
|
google_news.max_results = max_results
|
210
208
|
gnews_articles = google_news.get_news(keyword)
|
211
209
|
if not gnews_articles:
|
212
|
-
|
210
|
+
logger.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
|
213
211
|
return []
|
214
212
|
return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
|
215
213
|
|
@@ -227,7 +225,7 @@ async def get_top_news(
|
|
227
225
|
google_news.max_results = max_results
|
228
226
|
gnews_articles = google_news.get_top_news()
|
229
227
|
if not gnews_articles:
|
230
|
-
|
228
|
+
logger.debug("No top news articles found.")
|
231
229
|
return []
|
232
230
|
return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
|
233
231
|
|
@@ -244,7 +242,7 @@ async def get_news_by_location(
|
|
244
242
|
google_news.max_results = max_results
|
245
243
|
gnews_articles = google_news.get_news_by_location(location)
|
246
244
|
if not gnews_articles:
|
247
|
-
|
245
|
+
logger.debug(f"No articles found for location '{location}' in the last {period} days.")
|
248
246
|
return []
|
249
247
|
return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
|
250
248
|
|
@@ -271,39 +269,39 @@ async def get_news_by_topic(
|
|
271
269
|
google_news.max_results = max_results
|
272
270
|
gnews_articles = google_news.get_news_by_topic(topic)
|
273
271
|
if not gnews_articles:
|
274
|
-
|
272
|
+
logger.debug(f"No articles found for topic '{topic}' in the last {period} days.")
|
275
273
|
return []
|
276
274
|
return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
|
277
275
|
|
278
276
|
|
279
277
|
@overload
|
280
278
|
async def get_trending_terms(
|
281
|
-
geo: str = "US", full_data: Literal[False] = False
|
279
|
+
geo: str = "US", full_data: Literal[False] = False
|
282
280
|
) -> list[dict[str, int]]:
|
283
281
|
pass
|
284
282
|
|
285
283
|
|
286
284
|
@overload
|
287
285
|
async def get_trending_terms(
|
288
|
-
geo: str = "US", full_data: Literal[True] = True
|
289
|
-
) -> list[
|
286
|
+
geo: str = "US", full_data: Literal[True] = True
|
287
|
+
) -> list[TrendKeywordLite]:
|
290
288
|
pass
|
291
289
|
|
292
290
|
|
293
291
|
async def get_trending_terms(
|
294
|
-
geo: str = "US", full_data: bool = False
|
295
|
-
) -> list[dict[str, int]] | list[
|
292
|
+
geo: str = "US", full_data: bool = False
|
293
|
+
) -> list[dict[str, int]] | list[TrendKeywordLite]:
|
296
294
|
"""
|
297
295
|
Returns google trends for a specific geo location.
|
298
296
|
"""
|
299
297
|
try:
|
300
|
-
trends = list
|
301
|
-
trends =
|
298
|
+
trends = cast(list[TrendKeywordLite], tr.trending_now_by_rss(geo=geo))
|
299
|
+
trends = sorted(trends, key=lambda tt: int(tt.volume[:-1]), reverse=True)
|
302
300
|
if not full_data:
|
303
301
|
return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
|
304
302
|
return trends
|
305
303
|
except Exception as e:
|
306
|
-
|
304
|
+
logger.warning(f"Error fetching trending terms: {e}")
|
307
305
|
return []
|
308
306
|
|
309
307
|
|
@@ -351,4 +349,4 @@ def save_article_to_json(article: newspaper.Article, filename: str = "") -> None
|
|
351
349
|
filename += ".json"
|
352
350
|
with open(filename, "w") as f:
|
353
351
|
json.dump(article_data, f, indent=4)
|
354
|
-
|
352
|
+
logger.debug(f"Article saved to {filename}")
|
google_news_trends_mcp/server.py
CHANGED
@@ -63,25 +63,16 @@ class TrendingTermArticleOut(BaseModelClean):
|
|
63
63
|
|
64
64
|
class TrendingTermOut(BaseModelClean):
|
65
65
|
keyword: Annotated[str, Field(description="Trending keyword.")]
|
66
|
-
volume: Annotated[Optional[
|
67
|
-
geo: Annotated[Optional[str], Field(description="Geographic location code.")] = None
|
68
|
-
started_timestamp: Annotated[
|
69
|
-
Optional[list],
|
70
|
-
Field(description="When the trend started (year, month, day, hour, minute, second)."),
|
71
|
-
] = None
|
72
|
-
ended_timestamp: Annotated[
|
73
|
-
Optional[list],
|
74
|
-
Field(description="When the trend ended (year, month, day, hour, minute, second)."),
|
75
|
-
] = None
|
76
|
-
volume_growth_pct: Annotated[Optional[float], Field(description="Percentage growth in search volume.")] = None
|
66
|
+
volume: Annotated[Optional[str], Field(description="Search volume.")] = None
|
77
67
|
trend_keywords: Annotated[Optional[list[str]], Field(description="Related keywords.")] = None
|
78
|
-
|
68
|
+
link: Annotated[Optional[str], Field(description="URL to more information.")] = None
|
69
|
+
started: Annotated[Optional[int], Field(description="Unix timestamp when the trend started.")] = None
|
70
|
+
picture: Annotated[Optional[str], Field(description="URL to related image.")] = None
|
71
|
+
picture_source: Annotated[Optional[str], Field(description="Source of the picture.")] = None
|
79
72
|
news: Annotated[
|
80
73
|
Optional[list[TrendingTermArticleOut]],
|
81
74
|
Field(description="Related news articles."),
|
82
75
|
] = None
|
83
|
-
news_tokens: Annotated[Optional[list], Field(description="Associated news tokens.")] = None
|
84
|
-
normalized_keyword: Annotated[Optional[str], Field(description="Normalized form of the keyword.")] = None
|
85
76
|
|
86
77
|
|
87
78
|
@asynccontextmanager
|
@@ -154,6 +145,19 @@ async def summarize_article(article: Article, ctx: Context) -> None:
|
|
154
145
|
article.summary = "No summary available."
|
155
146
|
|
156
147
|
|
148
|
+
async def summarize_articles(articles: list[Article], ctx: Context) -> None:
|
149
|
+
total_articles = len(articles)
|
150
|
+
try:
|
151
|
+
for idx, article in enumerate(articles):
|
152
|
+
await summarize_article(article, ctx)
|
153
|
+
await ctx.report_progress(idx, total_articles)
|
154
|
+
except Exception as err:
|
155
|
+
await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
|
156
|
+
for idx, article in enumerate(articles):
|
157
|
+
article.nlp()
|
158
|
+
await ctx.report_progress(idx, total_articles)
|
159
|
+
|
160
|
+
|
157
161
|
@mcp.tool(
|
158
162
|
description=news.get_news_by_keyword.__doc__,
|
159
163
|
tags={"news", "articles", "keyword"},
|
@@ -185,16 +189,7 @@ async def get_news_by_keyword(
|
|
185
189
|
report_progress=ctx.report_progress,
|
186
190
|
)
|
187
191
|
if summarize:
|
188
|
-
|
189
|
-
try:
|
190
|
-
for idx, article in enumerate(articles):
|
191
|
-
await summarize_article(article, ctx)
|
192
|
-
await ctx.report_progress(idx, total_articles)
|
193
|
-
except Exception as err:
|
194
|
-
await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
|
195
|
-
for idx, article in enumerate(articles):
|
196
|
-
article.nlp()
|
197
|
-
await ctx.report_progress(idx, total_articles)
|
192
|
+
await summarize_articles(articles, ctx)
|
198
193
|
await ctx.report_progress(progress=len(articles), total=len(articles))
|
199
194
|
return [ArticleOut(**a.to_json(False)) for a in articles]
|
200
195
|
|
@@ -230,16 +225,7 @@ async def get_news_by_location(
|
|
230
225
|
report_progress=ctx.report_progress,
|
231
226
|
)
|
232
227
|
if summarize:
|
233
|
-
|
234
|
-
try:
|
235
|
-
for idx, article in enumerate(articles):
|
236
|
-
await summarize_article(article, ctx)
|
237
|
-
await ctx.report_progress(idx, total_articles)
|
238
|
-
except Exception as err:
|
239
|
-
await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
|
240
|
-
for idx, article in enumerate(articles):
|
241
|
-
article.nlp()
|
242
|
-
await ctx.report_progress(idx, total_articles)
|
228
|
+
await summarize_articles(articles, ctx)
|
243
229
|
await ctx.report_progress(progress=len(articles), total=len(articles))
|
244
230
|
return [ArticleOut(**a.to_json(False)) for a in articles]
|
245
231
|
|
@@ -272,17 +258,7 @@ async def get_news_by_topic(
|
|
272
258
|
report_progress=ctx.report_progress,
|
273
259
|
)
|
274
260
|
if summarize:
|
275
|
-
|
276
|
-
try:
|
277
|
-
for idx, article in enumerate(articles):
|
278
|
-
await summarize_article(article, ctx)
|
279
|
-
await ctx.report_progress(idx, total_articles)
|
280
|
-
except Exception as err:
|
281
|
-
await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
|
282
|
-
for idx, article in enumerate(articles):
|
283
|
-
article.nlp()
|
284
|
-
await ctx.report_progress(idx, total_articles)
|
285
|
-
|
261
|
+
await summarize_articles(articles, ctx)
|
286
262
|
await ctx.report_progress(progress=len(articles), total=len(articles))
|
287
263
|
return [ArticleOut(**a.to_json(False)) for a in articles]
|
288
264
|
|
@@ -313,17 +289,7 @@ async def get_top_news(
|
|
313
289
|
report_progress=ctx.report_progress,
|
314
290
|
)
|
315
291
|
if summarize:
|
316
|
-
|
317
|
-
try:
|
318
|
-
for idx, article in enumerate(articles):
|
319
|
-
await summarize_article(article, ctx)
|
320
|
-
await ctx.report_progress(idx, total_articles)
|
321
|
-
except Exception as err:
|
322
|
-
await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
|
323
|
-
for idx, article in enumerate(articles):
|
324
|
-
article.nlp()
|
325
|
-
await ctx.report_progress(idx, total_articles)
|
326
|
-
|
292
|
+
await summarize_articles(articles, ctx)
|
327
293
|
await ctx.report_progress(progress=len(articles), total=len(articles))
|
328
294
|
return [ArticleOut(**a.to_json(False)) for a in articles]
|
329
295
|
|
@@ -335,14 +301,18 @@ async def get_trending_terms(
|
|
335
301
|
bool,
|
336
302
|
Field(description="Return full data for each trend. Should be False for most use cases."),
|
337
303
|
] = False,
|
338
|
-
max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 100,
|
339
304
|
) -> list[TrendingTermOut]:
|
340
305
|
if not full_data:
|
341
|
-
trends = await news.get_trending_terms(geo=geo, full_data=False
|
306
|
+
trends = await news.get_trending_terms(geo=geo, full_data=False)
|
342
307
|
return [TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"]) for tt in trends]
|
343
|
-
|
344
|
-
|
345
|
-
|
308
|
+
trends = await news.get_trending_terms(geo=geo, full_data=True)
|
309
|
+
trends_out = []
|
310
|
+
for trend in trends:
|
311
|
+
trend = trend.__dict__
|
312
|
+
if 'news' in trend:
|
313
|
+
trend["news"] = [TrendingTermArticleOut(**article.__dict__) for article in trend["news"]]
|
314
|
+
trends_out.append(TrendingTermOut(**trend))
|
315
|
+
return trends_out
|
346
316
|
|
347
317
|
|
348
318
|
def main():
|
@@ -1,7 +1,7 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: google-news-trends-mcp
|
3
|
-
Version: 0.2.
|
4
|
-
Summary: An MCP server to access Google News and Google Trends
|
3
|
+
Version: 0.2.2
|
4
|
+
Summary: An MCP server to access RSS feeds of Google News and Google Trends
|
5
5
|
Author-email: Jesse Manek <jesse.manek@gmail.com>
|
6
6
|
License-Expression: MIT
|
7
7
|
Project-URL: Homepage, https://github.com/jmanek/google-news-trends-mcp
|
@@ -27,11 +27,11 @@ Dynamic: license-file
|
|
27
27
|
|
28
28
|
# Google News Trends MCP
|
29
29
|
|
30
|
-
An MCP server to access Google News and Google Trends.
|
30
|
+
An MCP server to access RSS feeds of Google News and Google Trends.
|
31
31
|
|
32
32
|
## Features
|
33
33
|
|
34
|
-
- Search Google News articles based on keyword, location, topic
|
34
|
+
- Search Google News RSS feeds for articles based on keyword, location, topic
|
35
35
|
- Get top news stories from Google News
|
36
36
|
- Google Trends keywords based on location
|
37
37
|
- Optional LLM Sampling and NLP to summarize articles and extract keywords
|
@@ -0,0 +1,11 @@
|
|
1
|
+
google_news_trends_mcp/__init__.py,sha256=nDWNd6_TSf4vDQuHVBoAf4QfZCB3ZUFQ0M7XvifNJ-g,78
|
2
|
+
google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
|
3
|
+
google_news_trends_mcp/cli.py,sha256=IZ4UdAQ-tBnfutLcd3lGwsukpkYbdaJyuXZf7vddfLs,4383
|
4
|
+
google_news_trends_mcp/news.py,sha256=pTAUTrM8Rkp8GaTPOLOo7BkFx9mePdQeCON7C6Q32aA,12489
|
5
|
+
google_news_trends_mcp/server.py,sha256=TYVOnUVFQk2RQTGRVyHqoOMrADlHvFmfkN-0TmsuEO8,13394
|
6
|
+
google_news_trends_mcp-0.2.2.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
|
7
|
+
google_news_trends_mcp-0.2.2.dist-info/METADATA,sha256=W8baSQmZHv8jyLfp3ysVzZuG7zCfK4WU7NlBqGUCRxs,4458
|
8
|
+
google_news_trends_mcp-0.2.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
9
|
+
google_news_trends_mcp-0.2.2.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
|
10
|
+
google_news_trends_mcp-0.2.2.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
|
11
|
+
google_news_trends_mcp-0.2.2.dist-info/RECORD,,
|
@@ -1,11 +0,0 @@
|
|
1
|
-
google_news_trends_mcp/__init__.py,sha256=nDWNd6_TSf4vDQuHVBoAf4QfZCB3ZUFQ0M7XvifNJ-g,78
|
2
|
-
google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
|
3
|
-
google_news_trends_mcp/cli.py,sha256=3Z916898HXTigmQYEfvb7ybfbuUE7bjMC6yjT5-l6u0,4558
|
4
|
-
google_news_trends_mcp/news.py,sha256=MPNZlzI7KXkhQ2uj7233N2i9kFHGUgGMdRBCAbj-B44,12471
|
5
|
-
google_news_trends_mcp/server.py,sha256=S-tlFY1wiFm9VPeb4NDnV0NGtczaQDmx20kIrZZQHto,15031
|
6
|
-
google_news_trends_mcp-0.2.0.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
|
7
|
-
google_news_trends_mcp-0.2.0.dist-info/METADATA,sha256=rok_3L-eDVXQJSuG6ze1Vuicnh-kpcWyJxaF2DbqZ1s,4454
|
8
|
-
google_news_trends_mcp-0.2.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
9
|
-
google_news_trends_mcp-0.2.0.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
|
10
|
-
google_news_trends_mcp-0.2.0.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
|
11
|
-
google_news_trends_mcp-0.2.0.dist-info/RECORD,,
|
File without changes
|
{google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/entry_points.txt
RENAMED
File without changes
|
{google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/licenses/LICENSE
RENAMED
File without changes
|
{google_news_trends_mcp-0.2.0.dist-info → google_news_trends_mcp-0.2.2.dist-info}/top_level.txt
RENAMED
File without changes
|