google-news-trends-mcp 0.1.7__tar.gz → 0.1.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (18) hide show
  1. {google_news_trends_mcp-0.1.7/src/google_news_trends_mcp.egg-info → google_news_trends_mcp-0.1.8}/PKG-INFO +4 -4
  2. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/README.md +3 -3
  3. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/pyproject.toml +5 -2
  4. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp/cli.py +15 -49
  5. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp/news.py +12 -36
  6. google_news_trends_mcp-0.1.8/src/google_news_trends_mcp/server.py +344 -0
  7. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8/src/google_news_trends_mcp.egg-info}/PKG-INFO +4 -4
  8. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/tests/test_server.py +5 -7
  9. google_news_trends_mcp-0.1.7/src/google_news_trends_mcp/server.py +0 -329
  10. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/LICENSE +0 -0
  11. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/setup.cfg +0 -0
  12. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp/__init__.py +0 -0
  13. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp/__main__.py +0 -0
  14. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp.egg-info/SOURCES.txt +0 -0
  15. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp.egg-info/dependency_links.txt +0 -0
  16. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp.egg-info/entry_points.txt +0 -0
  17. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp.egg-info/requires.txt +0 -0
  18. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.8}/src/google_news_trends_mcp.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -36,7 +36,7 @@ An MCP server to access Google News and Google Trends. Does not rely on any pai
36
36
  - Search Google News articles based on keyword, location, topic
37
37
  - Get top news stories from Google News
38
38
  - Google Trends keywords base on location
39
- - Optional NLP to summarize articles and extract keywords
39
+ - Optional LLM Sampling and NLP to summarize articles and extract keywords
40
40
 
41
41
  ## Installation
42
42
 
@@ -70,7 +70,7 @@ Add to your Claude settings:
70
70
  "mcpServers": {
71
71
  "google-news-trends": {
72
72
  "command": "uvx",
73
- "args": ["google-news-trends-mcp"]
73
+ "args": ["google-news-trends-mcp@latest"]
74
74
  }
75
75
  }
76
76
  }
@@ -103,7 +103,7 @@ Add to your Claude settings:
103
103
  "servers": {
104
104
  "google-news-trends": {
105
105
  "command": "uvx",
106
- "args": ["google-news-trends-mcp"]
106
+ "args": ["google-news-trends-mcp@latest"]
107
107
  }
108
108
  }
109
109
  }
@@ -7,7 +7,7 @@ An MCP server to access Google News and Google Trends. Does not rely on any pai
7
7
  - Search Google News articles based on keyword, location, topic
8
8
  - Get top news stories from Google News
9
9
  - Google Trends keywords base on location
10
- - Optional NLP to summarize articles and extract keywords
10
+ - Optional LLM Sampling and NLP to summarize articles and extract keywords
11
11
 
12
12
  ## Installation
13
13
 
@@ -41,7 +41,7 @@ Add to your Claude settings:
41
41
  "mcpServers": {
42
42
  "google-news-trends": {
43
43
  "command": "uvx",
44
- "args": ["google-news-trends-mcp"]
44
+ "args": ["google-news-trends-mcp@latest"]
45
45
  }
46
46
  }
47
47
  }
@@ -74,7 +74,7 @@ Add to your Claude settings:
74
74
  "servers": {
75
75
  "google-news-trends": {
76
76
  "command": "uvx",
77
- "args": ["google-news-trends-mcp"]
77
+ "args": ["google-news-trends-mcp@latest"]
78
78
  }
79
79
  }
80
80
  }
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "google-news-trends-mcp"
3
- version = "0.1.7"
3
+ version = "0.1.8"
4
4
  description = "An MCP server to access Google News and Google Trends."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10.18"
@@ -63,4 +63,7 @@ pythonpath = "src"
63
63
  [project.urls]
64
64
  Homepage = "https://github.com/jmanek/google-news-trends-mcp"
65
65
  Repository = "https://github.com/jmanek/google-news-trends-mcp"
66
- Issues = "https://github.com/jmanek/google-news-trends-mcp/issues"
66
+ Issues = "https://github.com/jmanek/google-news-trends-mcp/issues"
67
+
68
+ [tool.black]
69
+ line-length = 120
@@ -17,9 +17,7 @@ def cli():
17
17
 
18
18
  @cli.command(help=get_news_by_keyword.__doc__)
19
19
  @click.argument("keyword")
20
- @click.option(
21
- "--period", type=int, default=7, help="Period in days to search for articles."
22
- )
20
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
23
21
  @click.option(
24
22
  "--max-results",
25
23
  "max_results",
@@ -27,24 +25,16 @@ def cli():
27
25
  default=10,
28
26
  help="Maximum number of results to return.",
29
27
  )
30
- @click.option(
31
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
32
- )
28
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
33
29
  def keyword(keyword, period, max_results, no_nlp):
34
- articles = asyncio.run(
35
- get_news_by_keyword(
36
- keyword, period=period, max_results=max_results, nlp=not no_nlp
37
- )
38
- )
30
+ articles = asyncio.run(get_news_by_keyword(keyword, period=period, max_results=max_results, nlp=not no_nlp))
39
31
  # asyncio.run(articles) # Ensure the articles are fetched asynchronously
40
32
  print_articles(articles)
41
33
 
42
34
 
43
35
  @cli.command(help=get_news_by_location.__doc__)
44
36
  @click.argument("location")
45
- @click.option(
46
- "--period", type=int, default=7, help="Period in days to search for articles."
47
- )
37
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
48
38
  @click.option(
49
39
  "--max-results",
50
40
  "max_results",
@@ -52,23 +42,15 @@ def keyword(keyword, period, max_results, no_nlp):
52
42
  default=10,
53
43
  help="Maximum number of results to return.",
54
44
  )
55
- @click.option(
56
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
57
- )
45
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
58
46
  def location(location, period, max_results, no_nlp):
59
- articles = asyncio.run(
60
- get_news_by_location(
61
- location, period=period, max_results=max_results, nlp=not no_nlp
62
- )
63
- )
47
+ articles = asyncio.run(get_news_by_location(location, period=period, max_results=max_results, nlp=not no_nlp))
64
48
  print_articles(articles)
65
49
 
66
50
 
67
51
  @cli.command(help=get_news_by_topic.__doc__)
68
52
  @click.argument("topic")
69
- @click.option(
70
- "--period", type=int, default=7, help="Period in days to search for articles."
71
- )
53
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
72
54
  @click.option(
73
55
  "--max-results",
74
56
  "max_results",
@@ -76,23 +58,15 @@ def location(location, period, max_results, no_nlp):
76
58
  default=10,
77
59
  help="Maximum number of results to return.",
78
60
  )
79
- @click.option(
80
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
81
- )
61
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
82
62
  def topic(topic, period, max_results, no_nlp):
83
- articles = asyncio.run(
84
- get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp)
85
- )
63
+ articles = asyncio.run(get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp))
86
64
  print_articles(articles)
87
65
 
88
66
 
89
67
  @cli.command(help=get_trending_terms.__doc__)
90
- @click.option(
91
- "--geo", type=str, default="US", help="Country code, e.g. 'US', 'GB', 'IN', etc."
92
- )
93
- @click.option(
94
- "--full-data", is_flag=True, default=False, help="Return full data for each trend."
95
- )
68
+ @click.option("--geo", type=str, default="US", help="Country code, e.g. 'US', 'GB', 'IN', etc.")
69
+ @click.option("--full-data", is_flag=True, default=False, help="Return full data for each trend.")
96
70
  @click.option(
97
71
  "--max-results",
98
72
  "max_results",
@@ -101,9 +75,7 @@ def topic(topic, period, max_results, no_nlp):
101
75
  help="Maximum number of results to return.",
102
76
  )
103
77
  def trending(geo, full_data, max_results):
104
- trending_terms = asyncio.run(
105
- get_trending_terms(geo=geo, full_data=full_data, max_results=max_results)
106
- )
78
+ trending_terms = asyncio.run(get_trending_terms(geo=geo, full_data=full_data, max_results=max_results))
107
79
  if trending_terms:
108
80
  print("Trending terms:")
109
81
  for term in trending_terms:
@@ -116,9 +88,7 @@ def trending(geo, full_data, max_results):
116
88
 
117
89
 
118
90
  @cli.command(help=get_top_news.__doc__)
119
- @click.option(
120
- "--period", type=int, default=3, help="Period in days to search for top articles."
121
- )
91
+ @click.option("--period", type=int, default=3, help="Period in days to search for top articles.")
122
92
  @click.option(
123
93
  "--max-results",
124
94
  "max_results",
@@ -126,13 +96,9 @@ def trending(geo, full_data, max_results):
126
96
  default=10,
127
97
  help="Maximum number of results to return.",
128
98
  )
129
- @click.option(
130
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
131
- )
99
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
132
100
  def top(period, max_results, no_nlp):
133
- articles = asyncio.run(
134
- get_top_news(max_results=max_results, period=period, nlp=not no_nlp)
135
- )
101
+ articles = asyncio.run(get_top_news(max_results=max_results, period=period, nlp=not no_nlp))
136
102
  print_articles(articles)
137
103
  print(f"Found {len(articles)} top articles.")
138
104
 
@@ -100,9 +100,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
100
100
  article = newspaper.article(url, input_html=content, language="en")
101
101
  return article
102
102
  except Exception as e:
103
- logging.warning(
104
- f"Error downloading article with Playwright from {url}\n {e.args}"
105
- )
103
+ logging.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
106
104
  return None
107
105
 
108
106
 
@@ -135,9 +133,7 @@ async def download_article(url: str, nlp: bool = True) -> newspaper.Article | No
135
133
  f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
136
134
  )
137
135
  except Exception as e:
138
- logging.debug(
139
- f"Error downloading article with cloudscraper from {url}\n {e.args}"
140
- )
136
+ logging.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
141
137
 
142
138
  try:
143
139
  if article is None or not article.text:
@@ -168,9 +164,7 @@ async def process_gnews_articles(
168
164
  for idx, gnews_article in enumerate(gnews_articles):
169
165
  article = await download_article(gnews_article["url"], nlp=nlp)
170
166
  if article is None or not article.text:
171
- logging.debug(
172
- f"Failed to download article from {gnews_article['url']}:\n{article}"
173
- )
167
+ logging.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
174
168
  continue
175
169
  articles.append(article)
176
170
  if report_progress:
@@ -196,13 +190,9 @@ async def get_news_by_keyword(
196
190
  google_news.max_results = max_results
197
191
  gnews_articles = google_news.get_news(keyword)
198
192
  if not gnews_articles:
199
- logging.debug(
200
- f"No articles found for keyword '{keyword}' in the last {period} days."
201
- )
193
+ logging.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
202
194
  return []
203
- return await process_gnews_articles(
204
- gnews_articles, nlp=nlp, report_progress=report_progress
205
- )
195
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
206
196
 
207
197
 
208
198
  async def get_top_news(
@@ -223,9 +213,7 @@ async def get_top_news(
223
213
  if not gnews_articles:
224
214
  logging.debug("No top news articles found.")
225
215
  return []
226
- return await process_gnews_articles(
227
- gnews_articles, nlp=nlp, report_progress=report_progress
228
- )
216
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
229
217
 
230
218
 
231
219
  async def get_news_by_location(
@@ -245,13 +233,9 @@ async def get_news_by_location(
245
233
  google_news.max_results = max_results
246
234
  gnews_articles = google_news.get_news_by_location(location)
247
235
  if not gnews_articles:
248
- logging.debug(
249
- f"No articles found for location '{location}' in the last {period} days."
250
- )
236
+ logging.debug(f"No articles found for location '{location}' in the last {period} days.")
251
237
  return []
252
- return await process_gnews_articles(
253
- gnews_articles, nlp=nlp, report_progress=report_progress
254
- )
238
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
255
239
 
256
240
 
257
241
  async def get_news_by_topic(
@@ -279,13 +263,9 @@ async def get_news_by_topic(
279
263
  google_news.max_results = max_results
280
264
  gnews_articles = google_news.get_news_by_topic(topic)
281
265
  if not gnews_articles:
282
- logging.debug(
283
- f"No articles found for topic '{topic}' in the last {period} days."
284
- )
266
+ logging.debug(f"No articles found for topic '{topic}' in the last {period} days.")
285
267
  return []
286
- return await process_gnews_articles(
287
- gnews_articles, nlp=nlp, report_progress=report_progress
288
- )
268
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
289
269
 
290
270
 
291
271
  @overload
@@ -314,13 +294,9 @@ async def get_trending_terms(
314
294
  """
315
295
  try:
316
296
  trends = list(tr.trending_now(geo=geo))
317
- trends = list(sorted(trends, key=lambda tt: tt.volume, reverse=True))[
318
- :max_results
319
- ]
297
+ trends = list(sorted(trends, key=lambda tt: tt.volume, reverse=True))[:max_results]
320
298
  if not full_data:
321
- return [
322
- {"keyword": trend.keyword, "volume": trend.volume} for trend in trends
323
- ]
299
+ return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
324
300
  return trends
325
301
  except Exception as e:
326
302
  logging.warning(f"Error fetching trending terms: {e}")
@@ -0,0 +1,344 @@
1
+ from typing import Annotated, cast, Optional, Any, Literal, TYPE_CHECKING
2
+ from fastmcp import FastMCP, Context
3
+ from fastmcp.exceptions import ToolError
4
+ from fastmcp.server.dependencies import get_context
5
+ from fastmcp.server.middleware.timing import TimingMiddleware
6
+ from fastmcp.server.middleware.logging import LoggingMiddleware
7
+ from fastmcp.server.middleware.rate_limiting import RateLimitingMiddleware
8
+ from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
9
+ from mcp.types import TextContent
10
+ from pydantic import BaseModel, Field, model_serializer
11
+ from google_news_trends_mcp import news
12
+ from newspaper import settings as newspaper_settings
13
+ from newspaper.article import Article
14
+
15
+
16
+ class BaseModelClean(BaseModel):
17
+ @model_serializer
18
+ def serializer(self, **kwargs) -> dict[str, Any]:
19
+ return {
20
+ field: self.__getattribute__(field)
21
+ for field in self.model_fields_set
22
+ if self.__getattribute__(field) is not None
23
+ }
24
+
25
+ if TYPE_CHECKING:
26
+
27
+ def model_dump(self, **kwargs) -> dict[str, Any]: ...
28
+
29
+
30
+ class ArticleOut(BaseModelClean):
31
+ title: Annotated[str, Field(description="Title of the article.")]
32
+ url: Annotated[str, Field(description="Original article URL.")]
33
+ read_more_link: Annotated[Optional[str], Field(description="Link to read more about the article.")] = None
34
+ language: Annotated[Optional[str], Field(description="Language code of the article.")] = None
35
+ meta_img: Annotated[Optional[str], Field(description="Meta image URL.")] = None
36
+ movies: Annotated[Optional[list[str]], Field(description="List of movie URLs or IDs.")] = None
37
+ meta_favicon: Annotated[Optional[str], Field(description="Favicon URL from meta data.")] = None
38
+ meta_site_name: Annotated[Optional[str], Field(description="Site name from meta data.")] = None
39
+ authors: Annotated[Optional[list[str]], Field(description="list of authors.")] = None
40
+ publish_date: Annotated[Optional[str], Field(description="Publish date in ISO format.")] = None
41
+ top_image: Annotated[Optional[str], Field(description="URL of the top image.")] = None
42
+ images: Annotated[Optional[list[str]], Field(description="list of image URLs.")] = None
43
+ text: Annotated[Optional[str], Field(description="Full text of the article.")] = None
44
+ summary: Annotated[Optional[str], Field(description="Summary of the article.")] = None
45
+ keywords: Annotated[Optional[list[str]], Field(description="Extracted keywords.")] = None
46
+ tags: Annotated[Optional[list[str]], Field(description="Tags for the article.")] = None
47
+ meta_keywords: Annotated[Optional[list[str]], Field(description="Meta keywords from the article.")] = None
48
+ meta_description: Annotated[Optional[str], Field(description="Meta description from the article.")] = None
49
+ canonical_link: Annotated[Optional[str], Field(description="Canonical link for the article.")] = None
50
+ meta_data: Annotated[Optional[dict[str, str | int]], Field(description="Meta data dictionary.")] = None
51
+ meta_lang: Annotated[Optional[str], Field(description="Language of the article.")] = None
52
+ source_url: Annotated[Optional[str], Field(description="Source URL if different from original.")] = None
53
+
54
+
55
+ class TrendingTermArticleOut(BaseModelClean):
56
+ title: Annotated[str, Field(description="Article title.")] = ""
57
+ url: Annotated[str, Field(description="Article URL.")] = ""
58
+ source: Annotated[Optional[str], Field(description="News source name.")] = None
59
+ picture: Annotated[Optional[str], Field(description="URL to article image.")] = None
60
+ time: Annotated[Optional[str | int], Field(description="Publication time or timestamp.")] = None
61
+ snippet: Annotated[Optional[str], Field(description="Article preview text.")] = None
62
+
63
+
64
+ class TrendingTermOut(BaseModelClean):
65
+ keyword: Annotated[str, Field(description="Trending keyword.")]
66
+ volume: Annotated[Optional[int], Field(description="Search volume.")] = None
67
+ geo: Annotated[Optional[str], Field(description="Geographic location code.")] = None
68
+ started_timestamp: Annotated[
69
+ Optional[list],
70
+ Field(description="When the trend started (year, month, day, hour, minute, second)."),
71
+ ] = None
72
+ ended_timestamp: Annotated[
73
+ Optional[list],
74
+ Field(description="When the trend ended (year, month, day, hour, minute, second)."),
75
+ ] = None
76
+ volume_growth_pct: Annotated[Optional[float], Field(description="Percentage growth in search volume.")] = None
77
+ trend_keywords: Annotated[Optional[list[str]], Field(description="Related keywords.")] = None
78
+ topics: Annotated[Optional[list[str | int]], Field(description="Related topics.")] = None
79
+ news: Annotated[
80
+ Optional[list[TrendingTermArticleOut]],
81
+ Field(description="Related news articles."),
82
+ ] = None
83
+ news_tokens: Annotated[Optional[list], Field(description="Associated news tokens.")] = None
84
+ normalized_keyword: Annotated[Optional[str], Field(description="Normalized form of the keyword.")] = None
85
+
86
+
87
+ mcp = FastMCP(
88
+ name="google-news-trends",
89
+ instructions="This server provides tools to search, analyze, and summarize Google News articles and Google Trends",
90
+ on_duplicate_tools="replace",
91
+ )
92
+
93
+ mcp.add_middleware(ErrorHandlingMiddleware()) # Handle errors first
94
+ mcp.add_middleware(RateLimitingMiddleware(max_requests_per_second=50))
95
+ mcp.add_middleware(TimingMiddleware()) # Time actual execution
96
+ mcp.add_middleware(LoggingMiddleware()) # Log everything
97
+
98
+
99
+ def set_newspaper_article_fields(full_data: bool = False):
100
+ if full_data:
101
+ newspaper_settings.article_json_fields = [
102
+ "url",
103
+ "read_more_link",
104
+ "language",
105
+ "title",
106
+ "top_image",
107
+ "meta_img",
108
+ "images",
109
+ "movies",
110
+ "keywords",
111
+ "keyword_scores",
112
+ "meta_keywords",
113
+ "tags",
114
+ "authors",
115
+ "publish_date",
116
+ "summary",
117
+ "meta_description",
118
+ "meta_lang",
119
+ "meta_favicon",
120
+ "meta_site_name",
121
+ "canonical_link",
122
+ "text",
123
+ ]
124
+ else:
125
+ newspaper_settings.article_json_fields = [
126
+ "url",
127
+ "title",
128
+ "publish_date",
129
+ "summary",
130
+ ]
131
+
132
+
133
+ async def summarize_article(article: Article, ctx: Context) -> None:
134
+ if article.text:
135
+ prompt = f"Please provide a concise summary of the following news article:\n\n{article.text}"
136
+ response = await ctx.sample(prompt)
137
+ # response = cast(TextContent, response)
138
+ if isinstance(response, TextContent):
139
+ if not response.text:
140
+ await ctx.warning("NLP response is empty. Unable to summarize article.")
141
+ article.summary = "No summary available."
142
+ else:
143
+ article.summary = response.text
144
+ else:
145
+ await ctx.warning("NLP response is not a TextContent object. Unable to summarize article.")
146
+ article.summary = "No summary available."
147
+ else:
148
+ article.summary = "No summary available."
149
+
150
+
151
+ @mcp.tool(
152
+ description=news.get_news_by_keyword.__doc__,
153
+ tags={"news", "articles", "keyword"},
154
+ )
155
+ async def get_news_by_keyword(
156
+ ctx: Context,
157
+ keyword: Annotated[str, Field(description="Search term to find articles.")],
158
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
159
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
160
+ full_data: Annotated[
161
+ bool,
162
+ Field(
163
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
164
+ ),
165
+ ] = False,
166
+ summarize: Annotated[
167
+ bool,
168
+ Field(
169
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
170
+ ),
171
+ ] = True,
172
+ ) -> list[ArticleOut]:
173
+ set_newspaper_article_fields(full_data)
174
+ articles = await news.get_news_by_keyword(
175
+ keyword=keyword,
176
+ period=period,
177
+ max_results=max_results,
178
+ nlp=False,
179
+ report_progress=ctx.report_progress,
180
+ )
181
+ if summarize:
182
+ total_articles = len(articles)
183
+ try:
184
+ for idx, article in enumerate(articles):
185
+ await summarize_article(article, ctx)
186
+ await ctx.report_progress(idx, total_articles)
187
+ except Exception as err:
188
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
189
+ for idx, article in enumerate(articles):
190
+ article.nlp()
191
+ await ctx.report_progress(idx, total_articles)
192
+ await ctx.report_progress(progress=len(articles), total=len(articles))
193
+ return [ArticleOut(**a.to_json(False)) for a in articles]
194
+
195
+
196
+ @mcp.tool(
197
+ description=news.get_news_by_location.__doc__,
198
+ tags={"news", "articles", "location"},
199
+ )
200
+ async def get_news_by_location(
201
+ ctx: Context,
202
+ location: Annotated[str, Field(description="Name of city/state/country.")],
203
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
204
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
205
+ full_data: Annotated[
206
+ bool,
207
+ Field(
208
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
209
+ ),
210
+ ] = False,
211
+ summarize: Annotated[
212
+ bool,
213
+ Field(
214
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
215
+ ),
216
+ ] = True,
217
+ ) -> list[ArticleOut]:
218
+ set_newspaper_article_fields(full_data)
219
+ articles = await news.get_news_by_location(
220
+ location=location,
221
+ period=period,
222
+ max_results=max_results,
223
+ nlp=False,
224
+ report_progress=ctx.report_progress,
225
+ )
226
+ if summarize:
227
+ total_articles = len(articles)
228
+ try:
229
+ for idx, article in enumerate(articles):
230
+ await summarize_article(article, ctx)
231
+ await ctx.report_progress(idx, total_articles)
232
+ except Exception as err:
233
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
234
+ for idx, article in enumerate(articles):
235
+ article.nlp()
236
+ await ctx.report_progress(idx, total_articles)
237
+ await ctx.report_progress(progress=len(articles), total=len(articles))
238
+ return [ArticleOut(**a.to_json(False)) for a in articles]
239
+
240
+
241
+ @mcp.tool(description=news.get_news_by_topic.__doc__, tags={"news", "articles", "topic"})
242
+ async def get_news_by_topic(
243
+ ctx: Context,
244
+ topic: Annotated[str, Field(description="Topic to search for articles.")],
245
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
246
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
247
+ full_data: Annotated[
248
+ bool,
249
+ Field(
250
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
251
+ ),
252
+ ] = False,
253
+ summarize: Annotated[
254
+ bool,
255
+ Field(
256
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
257
+ ),
258
+ ] = True,
259
+ ) -> list[ArticleOut]:
260
+ set_newspaper_article_fields(full_data)
261
+ articles = await news.get_news_by_topic(
262
+ topic=topic,
263
+ period=period,
264
+ max_results=max_results,
265
+ nlp=False,
266
+ report_progress=ctx.report_progress,
267
+ )
268
+ if summarize:
269
+ total_articles = len(articles)
270
+ try:
271
+ for idx, article in enumerate(articles):
272
+ await summarize_article(article, ctx)
273
+ await ctx.report_progress(idx, total_articles)
274
+ except Exception as err:
275
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
276
+ for idx, article in enumerate(articles):
277
+ article.nlp()
278
+ await ctx.report_progress(idx, total_articles)
279
+
280
+ await ctx.report_progress(progress=len(articles), total=len(articles))
281
+ return [ArticleOut(**a.to_json(False)) for a in articles]
282
+
283
+
284
+ @mcp.tool(description=news.get_top_news.__doc__, tags={"news", "articles", "top"})
285
+ async def get_top_news(
286
+ ctx: Context,
287
+ period: Annotated[int, Field(description="Number of days to look back for top articles.", ge=1)] = 3,
288
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
289
+ full_data: Annotated[
290
+ bool,
291
+ Field(
292
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
293
+ ),
294
+ ] = False,
295
+ summarize: Annotated[
296
+ bool,
297
+ Field(
298
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
299
+ ),
300
+ ] = True,
301
+ ) -> list[ArticleOut]:
302
+ set_newspaper_article_fields(full_data)
303
+ articles = await news.get_top_news(
304
+ period=period,
305
+ max_results=max_results,
306
+ nlp=False,
307
+ report_progress=ctx.report_progress,
308
+ )
309
+ if summarize:
310
+ total_articles = len(articles)
311
+ try:
312
+ for idx, article in enumerate(articles):
313
+ await summarize_article(article, ctx)
314
+ await ctx.report_progress(idx, total_articles)
315
+ except Exception as err:
316
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
317
+ for idx, article in enumerate(articles):
318
+ article.nlp()
319
+ await ctx.report_progress(idx, total_articles)
320
+
321
+ await ctx.report_progress(progress=len(articles), total=len(articles))
322
+ return [ArticleOut(**a.to_json(False)) for a in articles]
323
+
324
+
325
+ @mcp.tool(description=news.get_trending_terms.__doc__, tags={"trends", "google", "trending"})
326
+ async def get_trending_terms(
327
+ geo: Annotated[str, Field(description="Country code, e.g. 'US', 'GB', 'IN', etc.")] = "US",
328
+ full_data: Annotated[
329
+ bool,
330
+ Field(description="Return full data for each trend. Should be False for most use cases."),
331
+ ] = False,
332
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 100,
333
+ ) -> list[TrendingTermOut]:
334
+
335
+ if not full_data:
336
+ trends = await news.get_trending_terms(geo=geo, full_data=False, max_results=max_results)
337
+ return [TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"]) for tt in trends]
338
+
339
+ trends = await news.get_trending_terms(geo=geo, full_data=True, max_results=max_results)
340
+ return [TrendingTermOut(**tt.__dict__) for tt in trends]
341
+
342
+
343
+ def main():
344
+ mcp.run()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -36,7 +36,7 @@ An MCP server to access Google News and Google Trends. Does not rely on any pai
36
36
  - Search Google News articles based on keyword, location, topic
37
37
  - Get top news stories from Google News
38
38
  - Google Trends keywords base on location
39
- - Optional NLP to summarize articles and extract keywords
39
+ - Optional LLM Sampling and NLP to summarize articles and extract keywords
40
40
 
41
41
  ## Installation
42
42
 
@@ -70,7 +70,7 @@ Add to your Claude settings:
70
70
  "mcpServers": {
71
71
  "google-news-trends": {
72
72
  "command": "uvx",
73
- "args": ["google-news-trends-mcp"]
73
+ "args": ["google-news-trends-mcp@latest"]
74
74
  }
75
75
  }
76
76
  }
@@ -103,7 +103,7 @@ Add to your Claude settings:
103
103
  "servers": {
104
104
  "google-news-trends": {
105
105
  "command": "uvx",
106
- "args": ["google-news-trends-mcp"]
106
+ "args": ["google-news-trends-mcp@latest"]
107
107
  }
108
108
  }
109
109
  }
@@ -11,21 +11,19 @@ def mcp_server():
11
11
 
12
12
  async def test_get_news_by_keyword(mcp_server):
13
13
  async with Client(mcp_server) as client:
14
- params = {"keyword": "AI", "period": 3, "max_results": 2, "nlp": True}
14
+ params = {"keyword": "AI", "period": 3, "max_results": 2}
15
15
  result = await client.call_tool("get_news_by_keyword", params)
16
16
  assert isinstance(result, list)
17
17
  assert len(result) <= 2
18
18
  for article in result:
19
- article = json.loads(article.text)[
20
- 0
21
- ] # Assuming articles are returned as JSON strings
19
+ article = json.loads(article.text)[0] # Assuming articles are returned as JSON strings
22
20
  assert "title" in article
23
21
  assert "url" in article
24
22
 
25
23
 
26
24
  async def test_get_news_by_location(mcp_server):
27
25
  async with Client(mcp_server) as client:
28
- params = {"location": "California", "period": 3, "max_results": 2, "nlp": False}
26
+ params = {"location": "California", "period": 3, "max_results": 2}
29
27
  result = await client.call_tool("get_news_by_location", params)
30
28
  assert isinstance(result, list)
31
29
  assert len(result) <= 2
@@ -37,7 +35,7 @@ async def test_get_news_by_location(mcp_server):
37
35
 
38
36
  async def test_get_news_by_topic(mcp_server):
39
37
  async with Client(mcp_server) as client:
40
- params = {"topic": "TECHNOLOGY", "period": 3, "max_results": 2, "nlp": True}
38
+ params = {"topic": "TECHNOLOGY", "period": 3, "max_results": 2}
41
39
  result = await client.call_tool("get_news_by_topic", params)
42
40
  assert isinstance(result, list)
43
41
  assert len(result) <= 2
@@ -49,7 +47,7 @@ async def test_get_news_by_topic(mcp_server):
49
47
 
50
48
  async def test_get_top_news(mcp_server):
51
49
  async with Client(mcp_server) as client:
52
- params = {"period": 2, "max_results": 2, "nlp": False}
50
+ params = {"period": 2, "max_results": 2}
53
51
  result = await client.call_tool("get_top_news", params)
54
52
  assert isinstance(result, list)
55
53
  assert len(result) <= 2
@@ -1,329 +0,0 @@
1
- from fastmcp import FastMCP, Context
2
- from fastmcp.exceptions import ToolError
3
- from fastmcp.server.dependencies import get_context
4
- from pydantic import BaseModel, Field
5
- from typing import Optional
6
- from google_news_trends_mcp import news
7
- from typing import Annotated
8
- from newspaper import settings as newspaper_settings
9
- from fastmcp.server.middleware.timing import TimingMiddleware
10
- from fastmcp.server.middleware.logging import LoggingMiddleware
11
- from fastmcp.server.middleware.rate_limiting import RateLimitingMiddleware
12
- from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
13
-
14
-
15
- class ArticleOut(BaseModel):
16
- read_more_link: Annotated[
17
- Optional[str], Field(description="Link to read more about the article.")
18
- ] = None
19
- language: Annotated[
20
- Optional[str], Field(description="Language code of the article.")
21
- ] = None
22
- meta_img: Annotated[Optional[str], Field(description="Meta image URL.")] = None
23
- movies: Annotated[
24
- Optional[list[str]], Field(description="List of movie URLs or IDs.")
25
- ] = None
26
- meta_favicon: Annotated[
27
- Optional[str], Field(description="Favicon URL from meta data.")
28
- ] = None
29
- meta_site_name: Annotated[
30
- Optional[str], Field(description="Site name from meta data.")
31
- ] = None
32
- title: Annotated[str, Field(description="Title of the article.")]
33
- authors: Annotated[Optional[list[str]], Field(description="list of authors.")] = (
34
- None
35
- )
36
- publish_date: Annotated[
37
- Optional[str], Field(description="Publish date in ISO format.")
38
- ] = None
39
- top_image: Annotated[Optional[str], Field(description="URL of the top image.")] = (
40
- None
41
- )
42
- images: Annotated[Optional[list[str]], Field(description="list of image URLs.")] = (
43
- None
44
- )
45
- text: Annotated[str, Field(description="Full text of the article.")]
46
- url: Annotated[str, Field(description="Original article URL.")]
47
- summary: Annotated[Optional[str], Field(description="Summary of the article.")] = (
48
- None
49
- )
50
- keywords: Annotated[
51
- Optional[list[str]], Field(description="Extracted keywords.")
52
- ] = None
53
- tags: Annotated[Optional[list[str]], Field(description="Tags for the article.")] = (
54
- None
55
- )
56
- meta_keywords: Annotated[
57
- Optional[list[str]], Field(description="Meta keywords from the article.")
58
- ] = None
59
- meta_description: Annotated[
60
- Optional[str], Field(description="Meta description from the article.")
61
- ] = None
62
- canonical_link: Annotated[
63
- Optional[str], Field(description="Canonical link for the article.")
64
- ] = None
65
- meta_data: Annotated[
66
- Optional[dict[str, str | int]], Field(description="Meta data dictionary.")
67
- ] = None
68
- meta_lang: Annotated[
69
- Optional[str], Field(description="Language of the article.")
70
- ] = None
71
- source_url: Annotated[
72
- Optional[str], Field(description="Source URL if different from original.")
73
- ] = None
74
-
75
-
76
- class TrendingTermArticleOut(BaseModel):
77
- title: Annotated[str, Field(description="Article title.")] = ""
78
- url: Annotated[str, Field(description="Article URL.")] = ""
79
- source: Annotated[Optional[str], Field(description="News source name.")] = None
80
- picture: Annotated[Optional[str], Field(description="URL to article image.")] = None
81
- time: Annotated[
82
- Optional[str | int], Field(description="Publication time or timestamp.")
83
- ] = None
84
- snippet: Annotated[Optional[str], Field(description="Article preview text.")] = None
85
-
86
-
87
- class TrendingTermOut(BaseModel):
88
- keyword: Annotated[str, Field(description="Trending keyword.")]
89
- volume: Annotated[Optional[int], Field(description="Search volume.")] = None
90
- geo: Annotated[Optional[str], Field(description="Geographic location code.")] = None
91
- started_timestamp: Annotated[
92
- Optional[list],
93
- Field(
94
- description="When the trend started (year, month, day, hour, minute, second)."
95
- ),
96
- ] = None
97
- ended_timestamp: Annotated[
98
- Optional[tuple[int, int]],
99
- Field(
100
- description="When the trend ended (year, month, day, hour, minute, second)."
101
- ),
102
- ] = None
103
- volume_growth_pct: Annotated[
104
- Optional[float], Field(description="Percentage growth in search volume.")
105
- ] = None
106
- trend_keywords: Annotated[
107
- Optional[list[str]], Field(description="Related keywords.")
108
- ] = None
109
- topics: Annotated[
110
- Optional[list[str | int]], Field(description="Related topics.")
111
- ] = None
112
- news: Annotated[
113
- Optional[list[TrendingTermArticleOut]],
114
- Field(description="Related news articles."),
115
- ] = None
116
- news_tokens: Annotated[
117
- Optional[list], Field(description="Associated news tokens.")
118
- ] = None
119
- normalized_keyword: Annotated[
120
- Optional[str], Field(description="Normalized form of the keyword.")
121
- ] = None
122
-
123
-
124
- mcp = FastMCP(
125
- name="google-news-trends",
126
- instructions="This server provides tools to search, analyze, and summarize Google News articles and Google Trends",
127
- on_duplicate_tools="replace",
128
- )
129
-
130
- mcp.add_middleware(ErrorHandlingMiddleware()) # Handle errors first
131
- mcp.add_middleware(RateLimitingMiddleware(max_requests_per_second=50))
132
- mcp.add_middleware(TimingMiddleware()) # Time actual execution
133
- mcp.add_middleware(LoggingMiddleware()) # Log everything
134
-
135
-
136
- # Configure newspaper settings for article extraction
137
- def set_newspaper_article_fields(full_data: bool = False):
138
- if full_data:
139
- newspaper_settings.article_json_fields = [
140
- "url",
141
- "read_more_link",
142
- "language",
143
- "title",
144
- "top_image",
145
- "meta_img",
146
- "images",
147
- "movies",
148
- "keywords",
149
- "keyword_scores",
150
- "meta_keywords",
151
- "tags",
152
- "authors",
153
- "publish_date",
154
- "summary",
155
- "meta_description",
156
- "meta_lang",
157
- "meta_favicon",
158
- "meta_site_name",
159
- "canonical_link",
160
- "text",
161
- ]
162
- else:
163
- newspaper_settings.article_json_fields = [
164
- "url",
165
- "title",
166
- "text",
167
- "publish_date",
168
- "summary",
169
- "keywords",
170
- ]
171
-
172
-
173
- @mcp.tool(
174
- description=news.get_news_by_keyword.__doc__,
175
- tags={"news", "articles", "keyword"},
176
- )
177
- async def get_news_by_keyword(
178
- ctx: Context,
179
- keyword: Annotated[str, Field(description="Search term to find articles.")],
180
- period: Annotated[
181
- int, Field(description="Number of days to look back for articles.", ge=1)
182
- ] = 7,
183
- max_results: Annotated[
184
- int, Field(description="Maximum number of results to return.", ge=1)
185
- ] = 10,
186
- nlp: Annotated[
187
- bool, Field(description="Whether to perform NLP on the articles.")
188
- ] = False,
189
- full_data: Annotated[
190
- bool, Field(description="Return full data for each article.")
191
- ] = False,
192
- ) -> list[ArticleOut]:
193
- set_newspaper_article_fields(full_data)
194
- articles = await news.get_news_by_keyword(
195
- keyword=keyword,
196
- period=period,
197
- max_results=max_results,
198
- nlp=nlp,
199
- report_progress=ctx.report_progress,
200
- )
201
- await ctx.report_progress(progress=len(articles), total=len(articles))
202
- return [ArticleOut(**a.to_json(False)) for a in articles]
203
-
204
-
205
- @mcp.tool(
206
- description=news.get_news_by_location.__doc__,
207
- tags={"news", "articles", "location"},
208
- )
209
- async def get_news_by_location(
210
- ctx: Context,
211
- location: Annotated[str, Field(description="Name of city/state/country.")],
212
- period: Annotated[
213
- int, Field(description="Number of days to look back for articles.", ge=1)
214
- ] = 7,
215
- max_results: Annotated[
216
- int, Field(description="Maximum number of results to return.", ge=1)
217
- ] = 10,
218
- nlp: Annotated[
219
- bool, Field(description="Whether to perform NLP on the articles.")
220
- ] = False,
221
- full_data: Annotated[
222
- bool, Field(description="Return full data for each article.")
223
- ] = False,
224
- ) -> list[ArticleOut]:
225
- set_newspaper_article_fields(full_data)
226
- articles = await news.get_news_by_location(
227
- location=location,
228
- period=period,
229
- max_results=max_results,
230
- nlp=nlp,
231
- report_progress=ctx.report_progress,
232
- )
233
- await ctx.report_progress(progress=len(articles), total=len(articles))
234
- return [ArticleOut(**a.to_json(False)) for a in articles]
235
-
236
-
237
- @mcp.tool(
238
- description=news.get_news_by_topic.__doc__, tags={"news", "articles", "topic"}
239
- )
240
- async def get_news_by_topic(
241
- ctx: Context,
242
- topic: Annotated[str, Field(description="Topic to search for articles.")],
243
- period: Annotated[
244
- int, Field(description="Number of days to look back for articles.", ge=1)
245
- ] = 7,
246
- max_results: Annotated[
247
- int, Field(description="Maximum number of results to return.", ge=1)
248
- ] = 10,
249
- nlp: Annotated[
250
- bool, Field(description="Whether to perform NLP on the articles.")
251
- ] = False,
252
- full_data: Annotated[
253
- bool, Field(description="Return full data for each article.")
254
- ] = False,
255
- ) -> list[ArticleOut]:
256
- set_newspaper_article_fields(full_data)
257
- articles = await news.get_news_by_topic(
258
- topic=topic,
259
- period=period,
260
- max_results=max_results,
261
- nlp=nlp,
262
- report_progress=ctx.report_progress,
263
- )
264
- await ctx.report_progress(progress=len(articles), total=len(articles))
265
- return [ArticleOut(**a.to_json(False)) for a in articles]
266
-
267
-
268
- @mcp.tool(description=news.get_top_news.__doc__, tags={"news", "articles", "top"})
269
- async def get_top_news(
270
- ctx: Context,
271
- period: Annotated[
272
- int, Field(description="Number of days to look back for top articles.", ge=1)
273
- ] = 3,
274
- max_results: Annotated[
275
- int, Field(description="Maximum number of results to return.", ge=1)
276
- ] = 10,
277
- nlp: Annotated[
278
- bool, Field(description="Whether to perform NLP on the articles.")
279
- ] = False,
280
- full_data: Annotated[
281
- bool, Field(description="Return full data for each article.")
282
- ] = False,
283
- ) -> list[ArticleOut]:
284
- set_newspaper_article_fields(full_data)
285
- articles = await news.get_top_news(
286
- period=period,
287
- max_results=max_results,
288
- nlp=nlp,
289
- report_progress=ctx.report_progress,
290
- )
291
- await ctx.report_progress(progress=len(articles), total=len(articles))
292
- return [ArticleOut(**a.to_json(False)) for a in articles]
293
-
294
-
295
- @mcp.tool(
296
- description=news.get_trending_terms.__doc__, tags={"trends", "google", "trending"}
297
- )
298
- async def get_trending_terms(
299
- geo: Annotated[
300
- str, Field(description="Country code, e.g. 'US', 'GB', 'IN', etc.")
301
- ] = "US",
302
- full_data: Annotated[
303
- bool,
304
- Field(
305
- description="Return full data for each trend. Should be False for most use cases."
306
- ),
307
- ] = False,
308
- max_results: Annotated[
309
- int, Field(description="Maximum number of results to return.", ge=1)
310
- ] = 100,
311
- ) -> list[TrendingTermOut]:
312
-
313
- if not full_data:
314
- trends = await news.get_trending_terms(
315
- geo=geo, full_data=False, max_results=max_results
316
- )
317
- return [
318
- TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"])
319
- for tt in trends
320
- ]
321
-
322
- trends = await news.get_trending_terms(
323
- geo=geo, full_data=True, max_results=max_results
324
- )
325
- return [TrendingTermOut(**tt.__dict__) for tt in trends]
326
-
327
-
328
- def main():
329
- mcp.run()