google-news-trends-mcp 0.1.7__tar.gz → 0.1.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (19) hide show
  1. {google_news_trends_mcp-0.1.7/src/google_news_trends_mcp.egg-info → google_news_trends_mcp-0.1.9}/PKG-INFO +6 -6
  2. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/README.md +6 -6
  3. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/pyproject.toml +5 -2
  4. google_news_trends_mcp-0.1.9/src/google_news_trends_mcp/__init__.py +2 -0
  5. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp/cli.py +15 -49
  6. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp/news.py +42 -65
  7. google_news_trends_mcp-0.1.9/src/google_news_trends_mcp/server.py +341 -0
  8. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9/src/google_news_trends_mcp.egg-info}/PKG-INFO +6 -6
  9. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/tests/test_server.py +16 -10
  10. google_news_trends_mcp-0.1.7/src/google_news_trends_mcp/__init__.py +0 -2
  11. google_news_trends_mcp-0.1.7/src/google_news_trends_mcp/server.py +0 -329
  12. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/LICENSE +0 -0
  13. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/setup.cfg +0 -0
  14. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp/__main__.py +0 -0
  15. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp.egg-info/SOURCES.txt +0 -0
  16. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp.egg-info/dependency_links.txt +0 -0
  17. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp.egg-info/entry_points.txt +0 -0
  18. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp.egg-info/requires.txt +0 -0
  19. {google_news_trends_mcp-0.1.7 → google_news_trends_mcp-0.1.9}/src/google_news_trends_mcp.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.7
3
+ Version: 0.1.9
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -35,8 +35,8 @@ An MCP server to access Google News and Google Trends. Does not rely on any pai
35
35
 
36
36
  - Search Google News articles based on keyword, location, topic
37
37
  - Get top news stories from Google News
38
- - Google Trends keywords base on location
39
- - Optional NLP to summarize articles and extract keywords
38
+ - Google Trends keywords based on location
39
+ - Optional LLM Sampling and NLP to summarize articles and extract keywords
40
40
 
41
41
  ## Installation
42
42
 
@@ -70,7 +70,7 @@ Add to your Claude settings:
70
70
  "mcpServers": {
71
71
  "google-news-trends": {
72
72
  "command": "uvx",
73
- "args": ["google-news-trends-mcp"]
73
+ "args": ["google-news-trends-mcp@latest"]
74
74
  }
75
75
  }
76
76
  }
@@ -103,7 +103,7 @@ Add to your Claude settings:
103
103
  "servers": {
104
104
  "google-news-trends": {
105
105
  "command": "uvx",
106
- "args": ["google-news-trends-mcp"]
106
+ "args": ["google-news-trends-mcp@latest"]
107
107
  }
108
108
  }
109
109
  }
@@ -141,7 +141,7 @@ The following MCP tools are available:
141
141
  | **get_top_news** | Fetch the top news stories from Google News. |
142
142
  | **get_trending_keywords**| Return trending keywords from Google Trends for a specified location.|
143
143
 
144
- All of the news related tools have an option to summarize the text of the article using NLP (nltk)
144
+ All of the news related tools have an option to summarize the text of the article using LLM Sampling (if supported) or NLP
145
145
 
146
146
 
147
147
  ## CLI
@@ -6,8 +6,8 @@ An MCP server to access Google News and Google Trends. Does not rely on any pai
6
6
 
7
7
  - Search Google News articles based on keyword, location, topic
8
8
  - Get top news stories from Google News
9
- - Google Trends keywords base on location
10
- - Optional NLP to summarize articles and extract keywords
9
+ - Google Trends keywords based on location
10
+ - Optional LLM Sampling and NLP to summarize articles and extract keywords
11
11
 
12
12
  ## Installation
13
13
 
@@ -41,7 +41,7 @@ Add to your Claude settings:
41
41
  "mcpServers": {
42
42
  "google-news-trends": {
43
43
  "command": "uvx",
44
- "args": ["google-news-trends-mcp"]
44
+ "args": ["google-news-trends-mcp@latest"]
45
45
  }
46
46
  }
47
47
  }
@@ -74,7 +74,7 @@ Add to your Claude settings:
74
74
  "servers": {
75
75
  "google-news-trends": {
76
76
  "command": "uvx",
77
- "args": ["google-news-trends-mcp"]
77
+ "args": ["google-news-trends-mcp@latest"]
78
78
  }
79
79
  }
80
80
  }
@@ -112,7 +112,7 @@ The following MCP tools are available:
112
112
  | **get_top_news** | Fetch the top news stories from Google News. |
113
113
  | **get_trending_keywords**| Return trending keywords from Google Trends for a specified location.|
114
114
 
115
- All of the news related tools have an option to summarize the text of the article using NLP (nltk)
115
+ All of the news related tools have an option to summarize the text of the article using LLM Sampling (if supported) or NLP
116
116
 
117
117
 
118
118
  ## CLI
@@ -153,4 +153,4 @@ npx @modelcontextprotocol/inspector uv run google-news-trends-mcp
153
153
  ```bash
154
154
  cd path/to/google/news/tends/mcp
155
155
  python -m pytest
156
- ```
156
+ ```
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "google-news-trends-mcp"
3
- version = "0.1.7"
3
+ version = "0.1.9"
4
4
  description = "An MCP server to access Google News and Google Trends."
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.10.18"
@@ -63,4 +63,7 @@ pythonpath = "src"
63
63
  [project.urls]
64
64
  Homepage = "https://github.com/jmanek/google-news-trends-mcp"
65
65
  Repository = "https://github.com/jmanek/google-news-trends-mcp"
66
- Issues = "https://github.com/jmanek/google-news-trends-mcp/issues"
66
+ Issues = "https://github.com/jmanek/google-news-trends-mcp/issues"
67
+
68
+ [tool.black]
69
+ line-length = 120
@@ -0,0 +1,2 @@
1
+ import logging
2
+ logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -17,9 +17,7 @@ def cli():
17
17
 
18
18
  @cli.command(help=get_news_by_keyword.__doc__)
19
19
  @click.argument("keyword")
20
- @click.option(
21
- "--period", type=int, default=7, help="Period in days to search for articles."
22
- )
20
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
23
21
  @click.option(
24
22
  "--max-results",
25
23
  "max_results",
@@ -27,24 +25,16 @@ def cli():
27
25
  default=10,
28
26
  help="Maximum number of results to return.",
29
27
  )
30
- @click.option(
31
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
32
- )
28
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
33
29
  def keyword(keyword, period, max_results, no_nlp):
34
- articles = asyncio.run(
35
- get_news_by_keyword(
36
- keyword, period=period, max_results=max_results, nlp=not no_nlp
37
- )
38
- )
30
+ articles = asyncio.run(get_news_by_keyword(keyword, period=period, max_results=max_results, nlp=not no_nlp))
39
31
  # asyncio.run(articles) # Ensure the articles are fetched asynchronously
40
32
  print_articles(articles)
41
33
 
42
34
 
43
35
  @cli.command(help=get_news_by_location.__doc__)
44
36
  @click.argument("location")
45
- @click.option(
46
- "--period", type=int, default=7, help="Period in days to search for articles."
47
- )
37
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
48
38
  @click.option(
49
39
  "--max-results",
50
40
  "max_results",
@@ -52,23 +42,15 @@ def keyword(keyword, period, max_results, no_nlp):
52
42
  default=10,
53
43
  help="Maximum number of results to return.",
54
44
  )
55
- @click.option(
56
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
57
- )
45
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
58
46
  def location(location, period, max_results, no_nlp):
59
- articles = asyncio.run(
60
- get_news_by_location(
61
- location, period=period, max_results=max_results, nlp=not no_nlp
62
- )
63
- )
47
+ articles = asyncio.run(get_news_by_location(location, period=period, max_results=max_results, nlp=not no_nlp))
64
48
  print_articles(articles)
65
49
 
66
50
 
67
51
  @cli.command(help=get_news_by_topic.__doc__)
68
52
  @click.argument("topic")
69
- @click.option(
70
- "--period", type=int, default=7, help="Period in days to search for articles."
71
- )
53
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
72
54
  @click.option(
73
55
  "--max-results",
74
56
  "max_results",
@@ -76,23 +58,15 @@ def location(location, period, max_results, no_nlp):
76
58
  default=10,
77
59
  help="Maximum number of results to return.",
78
60
  )
79
- @click.option(
80
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
81
- )
61
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
82
62
  def topic(topic, period, max_results, no_nlp):
83
- articles = asyncio.run(
84
- get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp)
85
- )
63
+ articles = asyncio.run(get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp))
86
64
  print_articles(articles)
87
65
 
88
66
 
89
67
  @cli.command(help=get_trending_terms.__doc__)
90
- @click.option(
91
- "--geo", type=str, default="US", help="Country code, e.g. 'US', 'GB', 'IN', etc."
92
- )
93
- @click.option(
94
- "--full-data", is_flag=True, default=False, help="Return full data for each trend."
95
- )
68
+ @click.option("--geo", type=str, default="US", help="Country code, e.g. 'US', 'GB', 'IN', etc.")
69
+ @click.option("--full-data", is_flag=True, default=False, help="Return full data for each trend.")
96
70
  @click.option(
97
71
  "--max-results",
98
72
  "max_results",
@@ -101,9 +75,7 @@ def topic(topic, period, max_results, no_nlp):
101
75
  help="Maximum number of results to return.",
102
76
  )
103
77
  def trending(geo, full_data, max_results):
104
- trending_terms = asyncio.run(
105
- get_trending_terms(geo=geo, full_data=full_data, max_results=max_results)
106
- )
78
+ trending_terms = asyncio.run(get_trending_terms(geo=geo, full_data=full_data, max_results=max_results))
107
79
  if trending_terms:
108
80
  print("Trending terms:")
109
81
  for term in trending_terms:
@@ -116,9 +88,7 @@ def trending(geo, full_data, max_results):
116
88
 
117
89
 
118
90
  @cli.command(help=get_top_news.__doc__)
119
- @click.option(
120
- "--period", type=int, default=3, help="Period in days to search for top articles."
121
- )
91
+ @click.option("--period", type=int, default=3, help="Period in days to search for top articles.")
122
92
  @click.option(
123
93
  "--max-results",
124
94
  "max_results",
@@ -126,13 +96,9 @@ def trending(geo, full_data, max_results):
126
96
  default=10,
127
97
  help="Maximum number of results to return.",
128
98
  )
129
- @click.option(
130
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
131
- )
99
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
132
100
  def top(period, max_results, no_nlp):
133
- articles = asyncio.run(
134
- get_top_news(max_results=max_results, period=period, nlp=not no_nlp)
135
- )
101
+ articles = asyncio.run(get_top_news(max_results=max_results, period=period, nlp=not no_nlp))
136
102
  print_articles(articles)
137
103
  print(f"Found {len(articles)} top articles.")
138
104
 
@@ -8,7 +8,6 @@ It will fallback to using Playwright for websites that are difficult to scrape w
8
8
 
9
9
  import re
10
10
  import json
11
- import time
12
11
  import asyncio
13
12
  from gnews import GNews
14
13
  import newspaper # newspaper4k
@@ -16,7 +15,6 @@ from googlenewsdecoder import gnewsdecoder
16
15
  import cloudscraper
17
16
  from playwright.async_api import async_playwright, Browser, Playwright
18
17
  from trendspy import Trends, TrendKeyword
19
- import click
20
18
  from typing import Optional, cast, overload, Literal, Awaitable
21
19
  import atexit
22
20
  from contextlib import asynccontextmanager
@@ -97,30 +95,15 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
97
95
  await page.goto(url, wait_until="domcontentloaded")
98
96
  await asyncio.sleep(2) # Wait for the page to load completely
99
97
  content = await page.content()
100
- article = newspaper.article(url, input_html=content, language="en")
98
+ article = newspaper.article(url, input_html=content)
101
99
  return article
102
100
  except Exception as e:
103
- logging.warning(
104
- f"Error downloading article with Playwright from {url}\n {e.args}"
105
- )
101
+ logging.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
106
102
  return None
107
103
 
108
104
 
109
- async def download_article(url: str, nlp: bool = True) -> newspaper.Article | None:
110
- """
111
- Download an article from a given URL using newspaper4k and cloudscraper (async).
112
- """
105
+ def download_article_with_scraper(url) -> newspaper.Article | None:
113
106
  article = None
114
- if url.startswith("https://news.google.com/rss/"):
115
- try:
116
- decoded_url = gnewsdecoder(url)
117
- if decoded_url.get("status"):
118
- url = decoded_url["decoded_url"]
119
- else:
120
- logging.debug("Failed to decode Google News RSS link:")
121
- return None
122
- except Exception as err:
123
- logging.warning(f"Error while decoding url {url}\n {err.args}")
124
107
  try:
125
108
  article = newspaper.article(url)
126
109
  except Exception as e:
@@ -135,22 +118,33 @@ async def download_article(url: str, nlp: bool = True) -> newspaper.Article | No
135
118
  f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
136
119
  )
137
120
  except Exception as e:
138
- logging.debug(
139
- f"Error downloading article with cloudscraper from {url}\n {e.args}"
140
- )
121
+ logging.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
122
+ return article
141
123
 
142
- try:
143
- if article is None or not article.text:
144
- # If newspaper failed, try downloading with Playwright
145
- logging.debug(f"Retrying with Playwright for {url}")
146
- article = await download_article_with_playwright(url)
147
- article = cast(newspaper.Article, article)
148
- article.parse()
149
- if nlp:
150
- article.nlp()
151
- except Exception as e:
152
- logging.warning(f"Error parsing article from {url}\n {e.args}")
124
+
125
+ def decode_url(url: str) -> str:
126
+ if url.startswith("https://news.google.com/rss/"):
127
+ try:
128
+ decoded_url = gnewsdecoder(url)
129
+ if decoded_url.get("status"):
130
+ url = decoded_url["decoded_url"]
131
+ else:
132
+ logging.debug("Failed to decode Google News RSS link:")
133
+ return ""
134
+ except Exception as err:
135
+ logging.warning(f"Error while decoding url {url}\n {err.args}")
136
+ return url
137
+
138
+
139
+ async def download_article(url: str) -> newspaper.Article | None:
140
+ """
141
+ Download an article from a given URL using newspaper4k and cloudscraper (async).
142
+ """
143
+ if not (url := decode_url(url)):
153
144
  return None
145
+ article = download_article_with_scraper(url)
146
+ if article is None or not article.text:
147
+ article = await download_article_with_playwright(url)
154
148
  return article
155
149
 
156
150
 
@@ -166,12 +160,13 @@ async def process_gnews_articles(
166
160
  articles = []
167
161
  total = len(gnews_articles)
168
162
  for idx, gnews_article in enumerate(gnews_articles):
169
- article = await download_article(gnews_article["url"], nlp=nlp)
163
+ article = await download_article(gnews_article["url"])
170
164
  if article is None or not article.text:
171
- logging.debug(
172
- f"Failed to download article from {gnews_article['url']}:\n{article}"
173
- )
165
+ logging.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
174
166
  continue
167
+ article.parse()
168
+ if nlp:
169
+ article.nlp()
175
170
  articles.append(article)
176
171
  if report_progress:
177
172
  await report_progress(idx, total)
@@ -196,13 +191,9 @@ async def get_news_by_keyword(
196
191
  google_news.max_results = max_results
197
192
  gnews_articles = google_news.get_news(keyword)
198
193
  if not gnews_articles:
199
- logging.debug(
200
- f"No articles found for keyword '{keyword}' in the last {period} days."
201
- )
194
+ logging.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
202
195
  return []
203
- return await process_gnews_articles(
204
- gnews_articles, nlp=nlp, report_progress=report_progress
205
- )
196
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
206
197
 
207
198
 
208
199
  async def get_top_news(
@@ -223,9 +214,7 @@ async def get_top_news(
223
214
  if not gnews_articles:
224
215
  logging.debug("No top news articles found.")
225
216
  return []
226
- return await process_gnews_articles(
227
- gnews_articles, nlp=nlp, report_progress=report_progress
228
- )
217
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
229
218
 
230
219
 
231
220
  async def get_news_by_location(
@@ -245,13 +234,9 @@ async def get_news_by_location(
245
234
  google_news.max_results = max_results
246
235
  gnews_articles = google_news.get_news_by_location(location)
247
236
  if not gnews_articles:
248
- logging.debug(
249
- f"No articles found for location '{location}' in the last {period} days."
250
- )
237
+ logging.debug(f"No articles found for location '{location}' in the last {period} days.")
251
238
  return []
252
- return await process_gnews_articles(
253
- gnews_articles, nlp=nlp, report_progress=report_progress
254
- )
239
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
255
240
 
256
241
 
257
242
  async def get_news_by_topic(
@@ -279,13 +264,9 @@ async def get_news_by_topic(
279
264
  google_news.max_results = max_results
280
265
  gnews_articles = google_news.get_news_by_topic(topic)
281
266
  if not gnews_articles:
282
- logging.debug(
283
- f"No articles found for topic '{topic}' in the last {period} days."
284
- )
267
+ logging.debug(f"No articles found for topic '{topic}' in the last {period} days.")
285
268
  return []
286
- return await process_gnews_articles(
287
- gnews_articles, nlp=nlp, report_progress=report_progress
288
- )
269
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
289
270
 
290
271
 
291
272
  @overload
@@ -314,13 +295,9 @@ async def get_trending_terms(
314
295
  """
315
296
  try:
316
297
  trends = list(tr.trending_now(geo=geo))
317
- trends = list(sorted(trends, key=lambda tt: tt.volume, reverse=True))[
318
- :max_results
319
- ]
298
+ trends = list(sorted(trends, key=lambda tt: tt.volume, reverse=True))[:max_results]
320
299
  if not full_data:
321
- return [
322
- {"keyword": trend.keyword, "volume": trend.volume} for trend in trends
323
- ]
300
+ return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
324
301
  return trends
325
302
  except Exception as e:
326
303
  logging.warning(f"Error fetching trending terms: {e}")
@@ -0,0 +1,341 @@
1
+ from typing import Annotated, Optional, Any, TYPE_CHECKING
2
+ from fastmcp import FastMCP, Context
3
+ from fastmcp.server.middleware.timing import TimingMiddleware
4
+ from fastmcp.server.middleware.logging import LoggingMiddleware
5
+ from fastmcp.server.middleware.rate_limiting import RateLimitingMiddleware
6
+ from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
7
+ from mcp.types import TextContent
8
+ from pydantic import BaseModel, Field, model_serializer
9
+ from google_news_trends_mcp import news
10
+ from newspaper import settings as newspaper_settings
11
+ from newspaper.article import Article
12
+
13
+
14
+ class BaseModelClean(BaseModel):
15
+ @model_serializer
16
+ def serializer(self, **kwargs) -> dict[str, Any]:
17
+ return {
18
+ field: self.__getattribute__(field)
19
+ for field in self.model_fields_set
20
+ if self.__getattribute__(field) is not None
21
+ }
22
+
23
+ if TYPE_CHECKING:
24
+
25
+ def model_dump(self, **kwargs) -> dict[str, Any]: ...
26
+
27
+
28
+ class ArticleOut(BaseModelClean):
29
+ title: Annotated[str, Field(description="Title of the article.")]
30
+ url: Annotated[str, Field(description="Original article URL.")]
31
+ read_more_link: Annotated[Optional[str], Field(description="Link to read more about the article.")] = None
32
+ language: Annotated[Optional[str], Field(description="Language code of the article.")] = None
33
+ meta_img: Annotated[Optional[str], Field(description="Meta image URL.")] = None
34
+ movies: Annotated[Optional[list[str]], Field(description="List of movie URLs or IDs.")] = None
35
+ meta_favicon: Annotated[Optional[str], Field(description="Favicon URL from meta data.")] = None
36
+ meta_site_name: Annotated[Optional[str], Field(description="Site name from meta data.")] = None
37
+ authors: Annotated[Optional[list[str]], Field(description="list of authors.")] = None
38
+ publish_date: Annotated[Optional[str], Field(description="Publish date in ISO format.")] = None
39
+ top_image: Annotated[Optional[str], Field(description="URL of the top image.")] = None
40
+ images: Annotated[Optional[list[str]], Field(description="list of image URLs.")] = None
41
+ text: Annotated[Optional[str], Field(description="Full text of the article.")] = None
42
+ summary: Annotated[Optional[str], Field(description="Summary of the article.")] = None
43
+ keywords: Annotated[Optional[list[str]], Field(description="Extracted keywords.")] = None
44
+ tags: Annotated[Optional[list[str]], Field(description="Tags for the article.")] = None
45
+ meta_keywords: Annotated[Optional[list[str]], Field(description="Meta keywords from the article.")] = None
46
+ meta_description: Annotated[Optional[str], Field(description="Meta description from the article.")] = None
47
+ canonical_link: Annotated[Optional[str], Field(description="Canonical link for the article.")] = None
48
+ meta_data: Annotated[Optional[dict[str, str | int]], Field(description="Meta data dictionary.")] = None
49
+ meta_lang: Annotated[Optional[str], Field(description="Language of the article.")] = None
50
+ source_url: Annotated[Optional[str], Field(description="Source URL if different from original.")] = None
51
+
52
+
53
+ class TrendingTermArticleOut(BaseModelClean):
54
+ title: Annotated[str, Field(description="Article title.")] = ""
55
+ url: Annotated[str, Field(description="Article URL.")] = ""
56
+ source: Annotated[Optional[str], Field(description="News source name.")] = None
57
+ picture: Annotated[Optional[str], Field(description="URL to article image.")] = None
58
+ time: Annotated[Optional[str | int], Field(description="Publication time or timestamp.")] = None
59
+ snippet: Annotated[Optional[str], Field(description="Article preview text.")] = None
60
+
61
+
62
+ class TrendingTermOut(BaseModelClean):
63
+ keyword: Annotated[str, Field(description="Trending keyword.")]
64
+ volume: Annotated[Optional[int], Field(description="Search volume.")] = None
65
+ geo: Annotated[Optional[str], Field(description="Geographic location code.")] = None
66
+ started_timestamp: Annotated[
67
+ Optional[list],
68
+ Field(description="When the trend started (year, month, day, hour, minute, second)."),
69
+ ] = None
70
+ ended_timestamp: Annotated[
71
+ Optional[list],
72
+ Field(description="When the trend ended (year, month, day, hour, minute, second)."),
73
+ ] = None
74
+ volume_growth_pct: Annotated[Optional[float], Field(description="Percentage growth in search volume.")] = None
75
+ trend_keywords: Annotated[Optional[list[str]], Field(description="Related keywords.")] = None
76
+ topics: Annotated[Optional[list[str | int]], Field(description="Related topics.")] = None
77
+ news: Annotated[
78
+ Optional[list[TrendingTermArticleOut]],
79
+ Field(description="Related news articles."),
80
+ ] = None
81
+ news_tokens: Annotated[Optional[list], Field(description="Associated news tokens.")] = None
82
+ normalized_keyword: Annotated[Optional[str], Field(description="Normalized form of the keyword.")] = None
83
+
84
+
85
+ mcp = FastMCP(
86
+ name="google-news-trends",
87
+ instructions="This server provides tools to search, analyze, and summarize Google News articles and Google Trends",
88
+ on_duplicate_tools="replace",
89
+ )
90
+
91
+ mcp.add_middleware(ErrorHandlingMiddleware()) # Handle errors first
92
+ mcp.add_middleware(RateLimitingMiddleware(max_requests_per_second=50))
93
+ mcp.add_middleware(TimingMiddleware()) # Time actual execution
94
+ mcp.add_middleware(LoggingMiddleware()) # Log everything
95
+
96
+
97
+ def set_newspaper_article_fields(full_data: bool = False):
98
+ if full_data:
99
+ newspaper_settings.article_json_fields = [
100
+ "url",
101
+ "read_more_link",
102
+ "language",
103
+ "title",
104
+ "top_image",
105
+ "meta_img",
106
+ "images",
107
+ "movies",
108
+ "keywords",
109
+ "keyword_scores",
110
+ "meta_keywords",
111
+ "tags",
112
+ "authors",
113
+ "publish_date",
114
+ "summary",
115
+ "meta_description",
116
+ "meta_lang",
117
+ "meta_favicon",
118
+ "meta_site_name",
119
+ "canonical_link",
120
+ "text",
121
+ ]
122
+ else:
123
+ newspaper_settings.article_json_fields = [
124
+ "url",
125
+ "title",
126
+ "publish_date",
127
+ "summary",
128
+ ]
129
+
130
+
131
+ async def summarize_article(article: Article, ctx: Context) -> None:
132
+ if article.text:
133
+ prompt = f"Please provide a concise summary of the following news article:\n\n{article.text}"
134
+ response = await ctx.sample(prompt)
135
+ if isinstance(response, TextContent):
136
+ if not response.text:
137
+ await ctx.warning("LLM Sampling response is empty. Unable to summarize article.")
138
+ article.summary = "No summary available."
139
+ else:
140
+ article.summary = response.text
141
+ else:
142
+ await ctx.warning("LLM Sampling response is not a TextContent object. Unable to summarize article.")
143
+ article.summary = "No summary available."
144
+ else:
145
+ article.summary = "No summary available."
146
+
147
+
148
+ @mcp.tool(
149
+ description=news.get_news_by_keyword.__doc__,
150
+ tags={"news", "articles", "keyword"},
151
+ )
152
+ async def get_news_by_keyword(
153
+ ctx: Context,
154
+ keyword: Annotated[str, Field(description="Search term to find articles.")],
155
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
156
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
157
+ full_data: Annotated[
158
+ bool,
159
+ Field(
160
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
161
+ ),
162
+ ] = False,
163
+ summarize: Annotated[
164
+ bool,
165
+ Field(
166
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
167
+ ),
168
+ ] = True,
169
+ ) -> list[ArticleOut]:
170
+ set_newspaper_article_fields(full_data)
171
+ articles = await news.get_news_by_keyword(
172
+ keyword=keyword,
173
+ period=period,
174
+ max_results=max_results,
175
+ nlp=False,
176
+ report_progress=ctx.report_progress,
177
+ )
178
+ if summarize:
179
+ total_articles = len(articles)
180
+ try:
181
+ for idx, article in enumerate(articles):
182
+ await summarize_article(article, ctx)
183
+ await ctx.report_progress(idx, total_articles)
184
+ except Exception as err:
185
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
186
+ for idx, article in enumerate(articles):
187
+ article.nlp()
188
+ await ctx.report_progress(idx, total_articles)
189
+ await ctx.report_progress(progress=len(articles), total=len(articles))
190
+ return [ArticleOut(**a.to_json(False)) for a in articles]
191
+
192
+
193
+ @mcp.tool(
194
+ description=news.get_news_by_location.__doc__,
195
+ tags={"news", "articles", "location"},
196
+ )
197
+ async def get_news_by_location(
198
+ ctx: Context,
199
+ location: Annotated[str, Field(description="Name of city/state/country.")],
200
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
201
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
202
+ full_data: Annotated[
203
+ bool,
204
+ Field(
205
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
206
+ ),
207
+ ] = False,
208
+ summarize: Annotated[
209
+ bool,
210
+ Field(
211
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
212
+ ),
213
+ ] = True,
214
+ ) -> list[ArticleOut]:
215
+ set_newspaper_article_fields(full_data)
216
+ articles = await news.get_news_by_location(
217
+ location=location,
218
+ period=period,
219
+ max_results=max_results,
220
+ nlp=False,
221
+ report_progress=ctx.report_progress,
222
+ )
223
+ if summarize:
224
+ total_articles = len(articles)
225
+ try:
226
+ for idx, article in enumerate(articles):
227
+ await summarize_article(article, ctx)
228
+ await ctx.report_progress(idx, total_articles)
229
+ except Exception as err:
230
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
231
+ for idx, article in enumerate(articles):
232
+ article.nlp()
233
+ await ctx.report_progress(idx, total_articles)
234
+ await ctx.report_progress(progress=len(articles), total=len(articles))
235
+ return [ArticleOut(**a.to_json(False)) for a in articles]
236
+
237
+
238
+ @mcp.tool(description=news.get_news_by_topic.__doc__, tags={"news", "articles", "topic"})
239
+ async def get_news_by_topic(
240
+ ctx: Context,
241
+ topic: Annotated[str, Field(description="Topic to search for articles.")],
242
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
243
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
244
+ full_data: Annotated[
245
+ bool,
246
+ Field(
247
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
248
+ ),
249
+ ] = False,
250
+ summarize: Annotated[
251
+ bool,
252
+ Field(
253
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
254
+ ),
255
+ ] = True,
256
+ ) -> list[ArticleOut]:
257
+ set_newspaper_article_fields(full_data)
258
+ articles = await news.get_news_by_topic(
259
+ topic=topic,
260
+ period=period,
261
+ max_results=max_results,
262
+ nlp=False,
263
+ report_progress=ctx.report_progress,
264
+ )
265
+ if summarize:
266
+ total_articles = len(articles)
267
+ try:
268
+ for idx, article in enumerate(articles):
269
+ await summarize_article(article, ctx)
270
+ await ctx.report_progress(idx, total_articles)
271
+ except Exception as err:
272
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
273
+ for idx, article in enumerate(articles):
274
+ article.nlp()
275
+ await ctx.report_progress(idx, total_articles)
276
+
277
+ await ctx.report_progress(progress=len(articles), total=len(articles))
278
+ return [ArticleOut(**a.to_json(False)) for a in articles]
279
+
280
+
281
+ @mcp.tool(description=news.get_top_news.__doc__, tags={"news", "articles", "top"})
282
+ async def get_top_news(
283
+ ctx: Context,
284
+ period: Annotated[int, Field(description="Number of days to look back for top articles.", ge=1)] = 3,
285
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
286
+ full_data: Annotated[
287
+ bool,
288
+ Field(
289
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
290
+ ),
291
+ ] = False,
292
+ summarize: Annotated[
293
+ bool,
294
+ Field(
295
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
296
+ ),
297
+ ] = True,
298
+ ) -> list[ArticleOut]:
299
+ set_newspaper_article_fields(full_data)
300
+ articles = await news.get_top_news(
301
+ period=period,
302
+ max_results=max_results,
303
+ nlp=False,
304
+ report_progress=ctx.report_progress,
305
+ )
306
+ if summarize:
307
+ total_articles = len(articles)
308
+ try:
309
+ for idx, article in enumerate(articles):
310
+ await summarize_article(article, ctx)
311
+ await ctx.report_progress(idx, total_articles)
312
+ except Exception as err:
313
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
314
+ for idx, article in enumerate(articles):
315
+ article.nlp()
316
+ await ctx.report_progress(idx, total_articles)
317
+
318
+ await ctx.report_progress(progress=len(articles), total=len(articles))
319
+ return [ArticleOut(**a.to_json(False)) for a in articles]
320
+
321
+
322
+ @mcp.tool(description=news.get_trending_terms.__doc__, tags={"trends", "google", "trending"})
323
+ async def get_trending_terms(
324
+ geo: Annotated[str, Field(description="Country code, e.g. 'US', 'GB', 'IN', etc.")] = "US",
325
+ full_data: Annotated[
326
+ bool,
327
+ Field(description="Return full data for each trend. Should be False for most use cases."),
328
+ ] = False,
329
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 100,
330
+ ) -> list[TrendingTermOut]:
331
+
332
+ if not full_data:
333
+ trends = await news.get_trending_terms(geo=geo, full_data=False, max_results=max_results)
334
+ return [TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"]) for tt in trends]
335
+
336
+ trends = await news.get_trending_terms(geo=geo, full_data=True, max_results=max_results)
337
+ return [TrendingTermOut(**tt.__dict__) for tt in trends]
338
+
339
+
340
+ def main():
341
+ mcp.run()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.7
3
+ Version: 0.1.9
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -35,8 +35,8 @@ An MCP server to access Google News and Google Trends. Does not rely on any pai
35
35
 
36
36
  - Search Google News articles based on keyword, location, topic
37
37
  - Get top news stories from Google News
38
- - Google Trends keywords base on location
39
- - Optional NLP to summarize articles and extract keywords
38
+ - Google Trends keywords based on location
39
+ - Optional LLM Sampling and NLP to summarize articles and extract keywords
40
40
 
41
41
  ## Installation
42
42
 
@@ -70,7 +70,7 @@ Add to your Claude settings:
70
70
  "mcpServers": {
71
71
  "google-news-trends": {
72
72
  "command": "uvx",
73
- "args": ["google-news-trends-mcp"]
73
+ "args": ["google-news-trends-mcp@latest"]
74
74
  }
75
75
  }
76
76
  }
@@ -103,7 +103,7 @@ Add to your Claude settings:
103
103
  "servers": {
104
104
  "google-news-trends": {
105
105
  "command": "uvx",
106
- "args": ["google-news-trends-mcp"]
106
+ "args": ["google-news-trends-mcp@latest"]
107
107
  }
108
108
  }
109
109
  }
@@ -141,7 +141,7 @@ The following MCP tools are available:
141
141
  | **get_top_news** | Fetch the top news stories from Google News. |
142
142
  | **get_trending_keywords**| Return trending keywords from Google Trends for a specified location.|
143
143
 
144
- All of the news related tools have an option to summarize the text of the article using NLP (nltk)
144
+ All of the news related tools have an option to summarize the text of the article using LLM Sampling (if supported) or NLP
145
145
 
146
146
 
147
147
  ## CLI
@@ -11,50 +11,56 @@ def mcp_server():
11
11
 
12
12
  async def test_get_news_by_keyword(mcp_server):
13
13
  async with Client(mcp_server) as client:
14
- params = {"keyword": "AI", "period": 3, "max_results": 2, "nlp": True}
14
+ params = {"keyword": "AI", "period": 3, "max_results": 2}
15
15
  result = await client.call_tool("get_news_by_keyword", params)
16
16
  assert isinstance(result, list)
17
17
  assert len(result) <= 2
18
18
  for article in result:
19
- article = json.loads(article.text)[
20
- 0
21
- ] # Assuming articles are returned as JSON strings
19
+ article = json.loads(article.text)
20
+ if isinstance(article, list):
21
+ article = article[0] # Assuming articles are returned as JSON strings
22
22
  assert "title" in article
23
23
  assert "url" in article
24
24
 
25
25
 
26
26
  async def test_get_news_by_location(mcp_server):
27
27
  async with Client(mcp_server) as client:
28
- params = {"location": "California", "period": 3, "max_results": 2, "nlp": False}
28
+ params = {"location": "California", "period": 3, "max_results": 2}
29
29
  result = await client.call_tool("get_news_by_location", params)
30
30
  assert isinstance(result, list)
31
31
  assert len(result) <= 2
32
32
  for article in result:
33
- article = json.loads(article.text)[0]
33
+ article = json.loads(article.text)
34
+ if isinstance(article, list):
35
+ article = article[0]
34
36
  assert "title" in article
35
37
  assert "url" in article
36
38
 
37
39
 
38
40
  async def test_get_news_by_topic(mcp_server):
39
41
  async with Client(mcp_server) as client:
40
- params = {"topic": "TECHNOLOGY", "period": 3, "max_results": 2, "nlp": True}
42
+ params = {"topic": "TECHNOLOGY", "period": 3, "max_results": 2}
41
43
  result = await client.call_tool("get_news_by_topic", params)
42
44
  assert isinstance(result, list)
43
45
  assert len(result) <= 2
44
46
  for article in result:
45
- article = json.loads(article.text)[0]
47
+ article = json.loads(article.text)
48
+ if isinstance(article, list):
49
+ article = article[0]
46
50
  assert "title" in article
47
51
  assert "url" in article
48
52
 
49
53
 
50
54
  async def test_get_top_news(mcp_server):
51
55
  async with Client(mcp_server) as client:
52
- params = {"period": 2, "max_results": 2, "nlp": False}
56
+ params = {"period": 2, "max_results": 2}
53
57
  result = await client.call_tool("get_top_news", params)
54
58
  assert isinstance(result, list)
55
59
  assert len(result) <= 2
56
60
  for article in result:
57
- article = json.loads(article.text)[0]
61
+ article = json.loads(article.text)
62
+ if isinstance(article, list):
63
+ article = article[0]
58
64
  assert "title" in article
59
65
  assert "url" in article
60
66
 
@@ -1,2 +0,0 @@
1
- import logging
2
- logging.getLogger(__name__).addHandler(logging.NullHandler())
@@ -1,329 +0,0 @@
1
- from fastmcp import FastMCP, Context
2
- from fastmcp.exceptions import ToolError
3
- from fastmcp.server.dependencies import get_context
4
- from pydantic import BaseModel, Field
5
- from typing import Optional
6
- from google_news_trends_mcp import news
7
- from typing import Annotated
8
- from newspaper import settings as newspaper_settings
9
- from fastmcp.server.middleware.timing import TimingMiddleware
10
- from fastmcp.server.middleware.logging import LoggingMiddleware
11
- from fastmcp.server.middleware.rate_limiting import RateLimitingMiddleware
12
- from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
13
-
14
-
15
- class ArticleOut(BaseModel):
16
- read_more_link: Annotated[
17
- Optional[str], Field(description="Link to read more about the article.")
18
- ] = None
19
- language: Annotated[
20
- Optional[str], Field(description="Language code of the article.")
21
- ] = None
22
- meta_img: Annotated[Optional[str], Field(description="Meta image URL.")] = None
23
- movies: Annotated[
24
- Optional[list[str]], Field(description="List of movie URLs or IDs.")
25
- ] = None
26
- meta_favicon: Annotated[
27
- Optional[str], Field(description="Favicon URL from meta data.")
28
- ] = None
29
- meta_site_name: Annotated[
30
- Optional[str], Field(description="Site name from meta data.")
31
- ] = None
32
- title: Annotated[str, Field(description="Title of the article.")]
33
- authors: Annotated[Optional[list[str]], Field(description="list of authors.")] = (
34
- None
35
- )
36
- publish_date: Annotated[
37
- Optional[str], Field(description="Publish date in ISO format.")
38
- ] = None
39
- top_image: Annotated[Optional[str], Field(description="URL of the top image.")] = (
40
- None
41
- )
42
- images: Annotated[Optional[list[str]], Field(description="list of image URLs.")] = (
43
- None
44
- )
45
- text: Annotated[str, Field(description="Full text of the article.")]
46
- url: Annotated[str, Field(description="Original article URL.")]
47
- summary: Annotated[Optional[str], Field(description="Summary of the article.")] = (
48
- None
49
- )
50
- keywords: Annotated[
51
- Optional[list[str]], Field(description="Extracted keywords.")
52
- ] = None
53
- tags: Annotated[Optional[list[str]], Field(description="Tags for the article.")] = (
54
- None
55
- )
56
- meta_keywords: Annotated[
57
- Optional[list[str]], Field(description="Meta keywords from the article.")
58
- ] = None
59
- meta_description: Annotated[
60
- Optional[str], Field(description="Meta description from the article.")
61
- ] = None
62
- canonical_link: Annotated[
63
- Optional[str], Field(description="Canonical link for the article.")
64
- ] = None
65
- meta_data: Annotated[
66
- Optional[dict[str, str | int]], Field(description="Meta data dictionary.")
67
- ] = None
68
- meta_lang: Annotated[
69
- Optional[str], Field(description="Language of the article.")
70
- ] = None
71
- source_url: Annotated[
72
- Optional[str], Field(description="Source URL if different from original.")
73
- ] = None
74
-
75
-
76
- class TrendingTermArticleOut(BaseModel):
77
- title: Annotated[str, Field(description="Article title.")] = ""
78
- url: Annotated[str, Field(description="Article URL.")] = ""
79
- source: Annotated[Optional[str], Field(description="News source name.")] = None
80
- picture: Annotated[Optional[str], Field(description="URL to article image.")] = None
81
- time: Annotated[
82
- Optional[str | int], Field(description="Publication time or timestamp.")
83
- ] = None
84
- snippet: Annotated[Optional[str], Field(description="Article preview text.")] = None
85
-
86
-
87
- class TrendingTermOut(BaseModel):
88
- keyword: Annotated[str, Field(description="Trending keyword.")]
89
- volume: Annotated[Optional[int], Field(description="Search volume.")] = None
90
- geo: Annotated[Optional[str], Field(description="Geographic location code.")] = None
91
- started_timestamp: Annotated[
92
- Optional[list],
93
- Field(
94
- description="When the trend started (year, month, day, hour, minute, second)."
95
- ),
96
- ] = None
97
- ended_timestamp: Annotated[
98
- Optional[tuple[int, int]],
99
- Field(
100
- description="When the trend ended (year, month, day, hour, minute, second)."
101
- ),
102
- ] = None
103
- volume_growth_pct: Annotated[
104
- Optional[float], Field(description="Percentage growth in search volume.")
105
- ] = None
106
- trend_keywords: Annotated[
107
- Optional[list[str]], Field(description="Related keywords.")
108
- ] = None
109
- topics: Annotated[
110
- Optional[list[str | int]], Field(description="Related topics.")
111
- ] = None
112
- news: Annotated[
113
- Optional[list[TrendingTermArticleOut]],
114
- Field(description="Related news articles."),
115
- ] = None
116
- news_tokens: Annotated[
117
- Optional[list], Field(description="Associated news tokens.")
118
- ] = None
119
- normalized_keyword: Annotated[
120
- Optional[str], Field(description="Normalized form of the keyword.")
121
- ] = None
122
-
123
-
124
- mcp = FastMCP(
125
- name="google-news-trends",
126
- instructions="This server provides tools to search, analyze, and summarize Google News articles and Google Trends",
127
- on_duplicate_tools="replace",
128
- )
129
-
130
- mcp.add_middleware(ErrorHandlingMiddleware()) # Handle errors first
131
- mcp.add_middleware(RateLimitingMiddleware(max_requests_per_second=50))
132
- mcp.add_middleware(TimingMiddleware()) # Time actual execution
133
- mcp.add_middleware(LoggingMiddleware()) # Log everything
134
-
135
-
136
- # Configure newspaper settings for article extraction
137
- def set_newspaper_article_fields(full_data: bool = False):
138
- if full_data:
139
- newspaper_settings.article_json_fields = [
140
- "url",
141
- "read_more_link",
142
- "language",
143
- "title",
144
- "top_image",
145
- "meta_img",
146
- "images",
147
- "movies",
148
- "keywords",
149
- "keyword_scores",
150
- "meta_keywords",
151
- "tags",
152
- "authors",
153
- "publish_date",
154
- "summary",
155
- "meta_description",
156
- "meta_lang",
157
- "meta_favicon",
158
- "meta_site_name",
159
- "canonical_link",
160
- "text",
161
- ]
162
- else:
163
- newspaper_settings.article_json_fields = [
164
- "url",
165
- "title",
166
- "text",
167
- "publish_date",
168
- "summary",
169
- "keywords",
170
- ]
171
-
172
-
173
- @mcp.tool(
174
- description=news.get_news_by_keyword.__doc__,
175
- tags={"news", "articles", "keyword"},
176
- )
177
- async def get_news_by_keyword(
178
- ctx: Context,
179
- keyword: Annotated[str, Field(description="Search term to find articles.")],
180
- period: Annotated[
181
- int, Field(description="Number of days to look back for articles.", ge=1)
182
- ] = 7,
183
- max_results: Annotated[
184
- int, Field(description="Maximum number of results to return.", ge=1)
185
- ] = 10,
186
- nlp: Annotated[
187
- bool, Field(description="Whether to perform NLP on the articles.")
188
- ] = False,
189
- full_data: Annotated[
190
- bool, Field(description="Return full data for each article.")
191
- ] = False,
192
- ) -> list[ArticleOut]:
193
- set_newspaper_article_fields(full_data)
194
- articles = await news.get_news_by_keyword(
195
- keyword=keyword,
196
- period=period,
197
- max_results=max_results,
198
- nlp=nlp,
199
- report_progress=ctx.report_progress,
200
- )
201
- await ctx.report_progress(progress=len(articles), total=len(articles))
202
- return [ArticleOut(**a.to_json(False)) for a in articles]
203
-
204
-
205
- @mcp.tool(
206
- description=news.get_news_by_location.__doc__,
207
- tags={"news", "articles", "location"},
208
- )
209
- async def get_news_by_location(
210
- ctx: Context,
211
- location: Annotated[str, Field(description="Name of city/state/country.")],
212
- period: Annotated[
213
- int, Field(description="Number of days to look back for articles.", ge=1)
214
- ] = 7,
215
- max_results: Annotated[
216
- int, Field(description="Maximum number of results to return.", ge=1)
217
- ] = 10,
218
- nlp: Annotated[
219
- bool, Field(description="Whether to perform NLP on the articles.")
220
- ] = False,
221
- full_data: Annotated[
222
- bool, Field(description="Return full data for each article.")
223
- ] = False,
224
- ) -> list[ArticleOut]:
225
- set_newspaper_article_fields(full_data)
226
- articles = await news.get_news_by_location(
227
- location=location,
228
- period=period,
229
- max_results=max_results,
230
- nlp=nlp,
231
- report_progress=ctx.report_progress,
232
- )
233
- await ctx.report_progress(progress=len(articles), total=len(articles))
234
- return [ArticleOut(**a.to_json(False)) for a in articles]
235
-
236
-
237
- @mcp.tool(
238
- description=news.get_news_by_topic.__doc__, tags={"news", "articles", "topic"}
239
- )
240
- async def get_news_by_topic(
241
- ctx: Context,
242
- topic: Annotated[str, Field(description="Topic to search for articles.")],
243
- period: Annotated[
244
- int, Field(description="Number of days to look back for articles.", ge=1)
245
- ] = 7,
246
- max_results: Annotated[
247
- int, Field(description="Maximum number of results to return.", ge=1)
248
- ] = 10,
249
- nlp: Annotated[
250
- bool, Field(description="Whether to perform NLP on the articles.")
251
- ] = False,
252
- full_data: Annotated[
253
- bool, Field(description="Return full data for each article.")
254
- ] = False,
255
- ) -> list[ArticleOut]:
256
- set_newspaper_article_fields(full_data)
257
- articles = await news.get_news_by_topic(
258
- topic=topic,
259
- period=period,
260
- max_results=max_results,
261
- nlp=nlp,
262
- report_progress=ctx.report_progress,
263
- )
264
- await ctx.report_progress(progress=len(articles), total=len(articles))
265
- return [ArticleOut(**a.to_json(False)) for a in articles]
266
-
267
-
268
- @mcp.tool(description=news.get_top_news.__doc__, tags={"news", "articles", "top"})
269
- async def get_top_news(
270
- ctx: Context,
271
- period: Annotated[
272
- int, Field(description="Number of days to look back for top articles.", ge=1)
273
- ] = 3,
274
- max_results: Annotated[
275
- int, Field(description="Maximum number of results to return.", ge=1)
276
- ] = 10,
277
- nlp: Annotated[
278
- bool, Field(description="Whether to perform NLP on the articles.")
279
- ] = False,
280
- full_data: Annotated[
281
- bool, Field(description="Return full data for each article.")
282
- ] = False,
283
- ) -> list[ArticleOut]:
284
- set_newspaper_article_fields(full_data)
285
- articles = await news.get_top_news(
286
- period=period,
287
- max_results=max_results,
288
- nlp=nlp,
289
- report_progress=ctx.report_progress,
290
- )
291
- await ctx.report_progress(progress=len(articles), total=len(articles))
292
- return [ArticleOut(**a.to_json(False)) for a in articles]
293
-
294
-
295
- @mcp.tool(
296
- description=news.get_trending_terms.__doc__, tags={"trends", "google", "trending"}
297
- )
298
- async def get_trending_terms(
299
- geo: Annotated[
300
- str, Field(description="Country code, e.g. 'US', 'GB', 'IN', etc.")
301
- ] = "US",
302
- full_data: Annotated[
303
- bool,
304
- Field(
305
- description="Return full data for each trend. Should be False for most use cases."
306
- ),
307
- ] = False,
308
- max_results: Annotated[
309
- int, Field(description="Maximum number of results to return.", ge=1)
310
- ] = 100,
311
- ) -> list[TrendingTermOut]:
312
-
313
- if not full_data:
314
- trends = await news.get_trending_terms(
315
- geo=geo, full_data=False, max_results=max_results
316
- )
317
- return [
318
- TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"])
319
- for tt in trends
320
- ]
321
-
322
- trends = await news.get_trending_terms(
323
- geo=geo, full_data=True, max_results=max_results
324
- )
325
- return [TrendingTermOut(**tt.__dict__) for tt in trends]
326
-
327
-
328
- def main():
329
- mcp.run()