google-news-trends-mcp 0.1.7__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -17,9 +17,7 @@ def cli():
17
17
 
18
18
  @cli.command(help=get_news_by_keyword.__doc__)
19
19
  @click.argument("keyword")
20
- @click.option(
21
- "--period", type=int, default=7, help="Period in days to search for articles."
22
- )
20
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
23
21
  @click.option(
24
22
  "--max-results",
25
23
  "max_results",
@@ -27,24 +25,16 @@ def cli():
27
25
  default=10,
28
26
  help="Maximum number of results to return.",
29
27
  )
30
- @click.option(
31
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
32
- )
28
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
33
29
  def keyword(keyword, period, max_results, no_nlp):
34
- articles = asyncio.run(
35
- get_news_by_keyword(
36
- keyword, period=period, max_results=max_results, nlp=not no_nlp
37
- )
38
- )
30
+ articles = asyncio.run(get_news_by_keyword(keyword, period=period, max_results=max_results, nlp=not no_nlp))
39
31
  # asyncio.run(articles) # Ensure the articles are fetched asynchronously
40
32
  print_articles(articles)
41
33
 
42
34
 
43
35
  @cli.command(help=get_news_by_location.__doc__)
44
36
  @click.argument("location")
45
- @click.option(
46
- "--period", type=int, default=7, help="Period in days to search for articles."
47
- )
37
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
48
38
  @click.option(
49
39
  "--max-results",
50
40
  "max_results",
@@ -52,23 +42,15 @@ def keyword(keyword, period, max_results, no_nlp):
52
42
  default=10,
53
43
  help="Maximum number of results to return.",
54
44
  )
55
- @click.option(
56
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
57
- )
45
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
58
46
  def location(location, period, max_results, no_nlp):
59
- articles = asyncio.run(
60
- get_news_by_location(
61
- location, period=period, max_results=max_results, nlp=not no_nlp
62
- )
63
- )
47
+ articles = asyncio.run(get_news_by_location(location, period=period, max_results=max_results, nlp=not no_nlp))
64
48
  print_articles(articles)
65
49
 
66
50
 
67
51
  @cli.command(help=get_news_by_topic.__doc__)
68
52
  @click.argument("topic")
69
- @click.option(
70
- "--period", type=int, default=7, help="Period in days to search for articles."
71
- )
53
+ @click.option("--period", type=int, default=7, help="Period in days to search for articles.")
72
54
  @click.option(
73
55
  "--max-results",
74
56
  "max_results",
@@ -76,23 +58,15 @@ def location(location, period, max_results, no_nlp):
76
58
  default=10,
77
59
  help="Maximum number of results to return.",
78
60
  )
79
- @click.option(
80
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
81
- )
61
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
82
62
  def topic(topic, period, max_results, no_nlp):
83
- articles = asyncio.run(
84
- get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp)
85
- )
63
+ articles = asyncio.run(get_news_by_topic(topic, period=period, max_results=max_results, nlp=not no_nlp))
86
64
  print_articles(articles)
87
65
 
88
66
 
89
67
  @cli.command(help=get_trending_terms.__doc__)
90
- @click.option(
91
- "--geo", type=str, default="US", help="Country code, e.g. 'US', 'GB', 'IN', etc."
92
- )
93
- @click.option(
94
- "--full-data", is_flag=True, default=False, help="Return full data for each trend."
95
- )
68
+ @click.option("--geo", type=str, default="US", help="Country code, e.g. 'US', 'GB', 'IN', etc.")
69
+ @click.option("--full-data", is_flag=True, default=False, help="Return full data for each trend.")
96
70
  @click.option(
97
71
  "--max-results",
98
72
  "max_results",
@@ -101,9 +75,7 @@ def topic(topic, period, max_results, no_nlp):
101
75
  help="Maximum number of results to return.",
102
76
  )
103
77
  def trending(geo, full_data, max_results):
104
- trending_terms = asyncio.run(
105
- get_trending_terms(geo=geo, full_data=full_data, max_results=max_results)
106
- )
78
+ trending_terms = asyncio.run(get_trending_terms(geo=geo, full_data=full_data, max_results=max_results))
107
79
  if trending_terms:
108
80
  print("Trending terms:")
109
81
  for term in trending_terms:
@@ -116,9 +88,7 @@ def trending(geo, full_data, max_results):
116
88
 
117
89
 
118
90
  @cli.command(help=get_top_news.__doc__)
119
- @click.option(
120
- "--period", type=int, default=3, help="Period in days to search for top articles."
121
- )
91
+ @click.option("--period", type=int, default=3, help="Period in days to search for top articles.")
122
92
  @click.option(
123
93
  "--max-results",
124
94
  "max_results",
@@ -126,13 +96,9 @@ def trending(geo, full_data, max_results):
126
96
  default=10,
127
97
  help="Maximum number of results to return.",
128
98
  )
129
- @click.option(
130
- "--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles."
131
- )
99
+ @click.option("--no-nlp", is_flag=True, default=False, help="Disable NLP processing for articles.")
132
100
  def top(period, max_results, no_nlp):
133
- articles = asyncio.run(
134
- get_top_news(max_results=max_results, period=period, nlp=not no_nlp)
135
- )
101
+ articles = asyncio.run(get_top_news(max_results=max_results, period=period, nlp=not no_nlp))
136
102
  print_articles(articles)
137
103
  print(f"Found {len(articles)} top articles.")
138
104
 
@@ -100,9 +100,7 @@ async def download_article_with_playwright(url) -> newspaper.Article | None:
100
100
  article = newspaper.article(url, input_html=content, language="en")
101
101
  return article
102
102
  except Exception as e:
103
- logging.warning(
104
- f"Error downloading article with Playwright from {url}\n {e.args}"
105
- )
103
+ logging.warning(f"Error downloading article with Playwright from {url}\n {e.args}")
106
104
  return None
107
105
 
108
106
 
@@ -135,9 +133,7 @@ async def download_article(url: str, nlp: bool = True) -> newspaper.Article | No
135
133
  f"Failed to download article with cloudscraper from {url}, status code: {response.status_code}"
136
134
  )
137
135
  except Exception as e:
138
- logging.debug(
139
- f"Error downloading article with cloudscraper from {url}\n {e.args}"
140
- )
136
+ logging.debug(f"Error downloading article with cloudscraper from {url}\n {e.args}")
141
137
 
142
138
  try:
143
139
  if article is None or not article.text:
@@ -168,9 +164,7 @@ async def process_gnews_articles(
168
164
  for idx, gnews_article in enumerate(gnews_articles):
169
165
  article = await download_article(gnews_article["url"], nlp=nlp)
170
166
  if article is None or not article.text:
171
- logging.debug(
172
- f"Failed to download article from {gnews_article['url']}:\n{article}"
173
- )
167
+ logging.debug(f"Failed to download article from {gnews_article['url']}:\n{article}")
174
168
  continue
175
169
  articles.append(article)
176
170
  if report_progress:
@@ -196,13 +190,9 @@ async def get_news_by_keyword(
196
190
  google_news.max_results = max_results
197
191
  gnews_articles = google_news.get_news(keyword)
198
192
  if not gnews_articles:
199
- logging.debug(
200
- f"No articles found for keyword '{keyword}' in the last {period} days."
201
- )
193
+ logging.debug(f"No articles found for keyword '{keyword}' in the last {period} days.")
202
194
  return []
203
- return await process_gnews_articles(
204
- gnews_articles, nlp=nlp, report_progress=report_progress
205
- )
195
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
206
196
 
207
197
 
208
198
  async def get_top_news(
@@ -223,9 +213,7 @@ async def get_top_news(
223
213
  if not gnews_articles:
224
214
  logging.debug("No top news articles found.")
225
215
  return []
226
- return await process_gnews_articles(
227
- gnews_articles, nlp=nlp, report_progress=report_progress
228
- )
216
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
229
217
 
230
218
 
231
219
  async def get_news_by_location(
@@ -245,13 +233,9 @@ async def get_news_by_location(
245
233
  google_news.max_results = max_results
246
234
  gnews_articles = google_news.get_news_by_location(location)
247
235
  if not gnews_articles:
248
- logging.debug(
249
- f"No articles found for location '{location}' in the last {period} days."
250
- )
236
+ logging.debug(f"No articles found for location '{location}' in the last {period} days.")
251
237
  return []
252
- return await process_gnews_articles(
253
- gnews_articles, nlp=nlp, report_progress=report_progress
254
- )
238
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
255
239
 
256
240
 
257
241
  async def get_news_by_topic(
@@ -279,13 +263,9 @@ async def get_news_by_topic(
279
263
  google_news.max_results = max_results
280
264
  gnews_articles = google_news.get_news_by_topic(topic)
281
265
  if not gnews_articles:
282
- logging.debug(
283
- f"No articles found for topic '{topic}' in the last {period} days."
284
- )
266
+ logging.debug(f"No articles found for topic '{topic}' in the last {period} days.")
285
267
  return []
286
- return await process_gnews_articles(
287
- gnews_articles, nlp=nlp, report_progress=report_progress
288
- )
268
+ return await process_gnews_articles(gnews_articles, nlp=nlp, report_progress=report_progress)
289
269
 
290
270
 
291
271
  @overload
@@ -314,13 +294,9 @@ async def get_trending_terms(
314
294
  """
315
295
  try:
316
296
  trends = list(tr.trending_now(geo=geo))
317
- trends = list(sorted(trends, key=lambda tt: tt.volume, reverse=True))[
318
- :max_results
319
- ]
297
+ trends = list(sorted(trends, key=lambda tt: tt.volume, reverse=True))[:max_results]
320
298
  if not full_data:
321
- return [
322
- {"keyword": trend.keyword, "volume": trend.volume} for trend in trends
323
- ]
299
+ return [{"keyword": trend.keyword, "volume": trend.volume} for trend in trends]
324
300
  return trends
325
301
  except Exception as e:
326
302
  logging.warning(f"Error fetching trending terms: {e}")
@@ -1,124 +1,87 @@
1
+ from typing import Annotated, cast, Optional, Any, Literal, TYPE_CHECKING
1
2
  from fastmcp import FastMCP, Context
2
3
  from fastmcp.exceptions import ToolError
3
4
  from fastmcp.server.dependencies import get_context
4
- from pydantic import BaseModel, Field
5
- from typing import Optional
6
- from google_news_trends_mcp import news
7
- from typing import Annotated
8
- from newspaper import settings as newspaper_settings
9
5
  from fastmcp.server.middleware.timing import TimingMiddleware
10
6
  from fastmcp.server.middleware.logging import LoggingMiddleware
11
7
  from fastmcp.server.middleware.rate_limiting import RateLimitingMiddleware
12
8
  from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
9
+ from mcp.types import TextContent
10
+ from pydantic import BaseModel, Field, model_serializer
11
+ from google_news_trends_mcp import news
12
+ from newspaper import settings as newspaper_settings
13
+ from newspaper.article import Article
13
14
 
14
15
 
15
- class ArticleOut(BaseModel):
16
- read_more_link: Annotated[
17
- Optional[str], Field(description="Link to read more about the article.")
18
- ] = None
19
- language: Annotated[
20
- Optional[str], Field(description="Language code of the article.")
21
- ] = None
22
- meta_img: Annotated[Optional[str], Field(description="Meta image URL.")] = None
23
- movies: Annotated[
24
- Optional[list[str]], Field(description="List of movie URLs or IDs.")
25
- ] = None
26
- meta_favicon: Annotated[
27
- Optional[str], Field(description="Favicon URL from meta data.")
28
- ] = None
29
- meta_site_name: Annotated[
30
- Optional[str], Field(description="Site name from meta data.")
31
- ] = None
16
+ class BaseModelClean(BaseModel):
17
+ @model_serializer
18
+ def serializer(self, **kwargs) -> dict[str, Any]:
19
+ return {
20
+ field: self.__getattribute__(field)
21
+ for field in self.model_fields_set
22
+ if self.__getattribute__(field) is not None
23
+ }
24
+
25
+ if TYPE_CHECKING:
26
+
27
+ def model_dump(self, **kwargs) -> dict[str, Any]: ...
28
+
29
+
30
+ class ArticleOut(BaseModelClean):
32
31
  title: Annotated[str, Field(description="Title of the article.")]
33
- authors: Annotated[Optional[list[str]], Field(description="list of authors.")] = (
34
- None
35
- )
36
- publish_date: Annotated[
37
- Optional[str], Field(description="Publish date in ISO format.")
38
- ] = None
39
- top_image: Annotated[Optional[str], Field(description="URL of the top image.")] = (
40
- None
41
- )
42
- images: Annotated[Optional[list[str]], Field(description="list of image URLs.")] = (
43
- None
44
- )
45
- text: Annotated[str, Field(description="Full text of the article.")]
46
32
  url: Annotated[str, Field(description="Original article URL.")]
47
- summary: Annotated[Optional[str], Field(description="Summary of the article.")] = (
48
- None
49
- )
50
- keywords: Annotated[
51
- Optional[list[str]], Field(description="Extracted keywords.")
52
- ] = None
53
- tags: Annotated[Optional[list[str]], Field(description="Tags for the article.")] = (
54
- None
55
- )
56
- meta_keywords: Annotated[
57
- Optional[list[str]], Field(description="Meta keywords from the article.")
58
- ] = None
59
- meta_description: Annotated[
60
- Optional[str], Field(description="Meta description from the article.")
61
- ] = None
62
- canonical_link: Annotated[
63
- Optional[str], Field(description="Canonical link for the article.")
64
- ] = None
65
- meta_data: Annotated[
66
- Optional[dict[str, str | int]], Field(description="Meta data dictionary.")
67
- ] = None
68
- meta_lang: Annotated[
69
- Optional[str], Field(description="Language of the article.")
70
- ] = None
71
- source_url: Annotated[
72
- Optional[str], Field(description="Source URL if different from original.")
73
- ] = None
33
+ read_more_link: Annotated[Optional[str], Field(description="Link to read more about the article.")] = None
34
+ language: Annotated[Optional[str], Field(description="Language code of the article.")] = None
35
+ meta_img: Annotated[Optional[str], Field(description="Meta image URL.")] = None
36
+ movies: Annotated[Optional[list[str]], Field(description="List of movie URLs or IDs.")] = None
37
+ meta_favicon: Annotated[Optional[str], Field(description="Favicon URL from meta data.")] = None
38
+ meta_site_name: Annotated[Optional[str], Field(description="Site name from meta data.")] = None
39
+ authors: Annotated[Optional[list[str]], Field(description="list of authors.")] = None
40
+ publish_date: Annotated[Optional[str], Field(description="Publish date in ISO format.")] = None
41
+ top_image: Annotated[Optional[str], Field(description="URL of the top image.")] = None
42
+ images: Annotated[Optional[list[str]], Field(description="list of image URLs.")] = None
43
+ text: Annotated[Optional[str], Field(description="Full text of the article.")] = None
44
+ summary: Annotated[Optional[str], Field(description="Summary of the article.")] = None
45
+ keywords: Annotated[Optional[list[str]], Field(description="Extracted keywords.")] = None
46
+ tags: Annotated[Optional[list[str]], Field(description="Tags for the article.")] = None
47
+ meta_keywords: Annotated[Optional[list[str]], Field(description="Meta keywords from the article.")] = None
48
+ meta_description: Annotated[Optional[str], Field(description="Meta description from the article.")] = None
49
+ canonical_link: Annotated[Optional[str], Field(description="Canonical link for the article.")] = None
50
+ meta_data: Annotated[Optional[dict[str, str | int]], Field(description="Meta data dictionary.")] = None
51
+ meta_lang: Annotated[Optional[str], Field(description="Language of the article.")] = None
52
+ source_url: Annotated[Optional[str], Field(description="Source URL if different from original.")] = None
74
53
 
75
54
 
76
- class TrendingTermArticleOut(BaseModel):
55
+ class TrendingTermArticleOut(BaseModelClean):
77
56
  title: Annotated[str, Field(description="Article title.")] = ""
78
57
  url: Annotated[str, Field(description="Article URL.")] = ""
79
58
  source: Annotated[Optional[str], Field(description="News source name.")] = None
80
59
  picture: Annotated[Optional[str], Field(description="URL to article image.")] = None
81
- time: Annotated[
82
- Optional[str | int], Field(description="Publication time or timestamp.")
83
- ] = None
60
+ time: Annotated[Optional[str | int], Field(description="Publication time or timestamp.")] = None
84
61
  snippet: Annotated[Optional[str], Field(description="Article preview text.")] = None
85
62
 
86
63
 
87
- class TrendingTermOut(BaseModel):
64
+ class TrendingTermOut(BaseModelClean):
88
65
  keyword: Annotated[str, Field(description="Trending keyword.")]
89
66
  volume: Annotated[Optional[int], Field(description="Search volume.")] = None
90
67
  geo: Annotated[Optional[str], Field(description="Geographic location code.")] = None
91
68
  started_timestamp: Annotated[
92
69
  Optional[list],
93
- Field(
94
- description="When the trend started (year, month, day, hour, minute, second)."
95
- ),
70
+ Field(description="When the trend started (year, month, day, hour, minute, second)."),
96
71
  ] = None
97
72
  ended_timestamp: Annotated[
98
- Optional[tuple[int, int]],
99
- Field(
100
- description="When the trend ended (year, month, day, hour, minute, second)."
101
- ),
102
- ] = None
103
- volume_growth_pct: Annotated[
104
- Optional[float], Field(description="Percentage growth in search volume.")
105
- ] = None
106
- trend_keywords: Annotated[
107
- Optional[list[str]], Field(description="Related keywords.")
108
- ] = None
109
- topics: Annotated[
110
- Optional[list[str | int]], Field(description="Related topics.")
73
+ Optional[list],
74
+ Field(description="When the trend ended (year, month, day, hour, minute, second)."),
111
75
  ] = None
76
+ volume_growth_pct: Annotated[Optional[float], Field(description="Percentage growth in search volume.")] = None
77
+ trend_keywords: Annotated[Optional[list[str]], Field(description="Related keywords.")] = None
78
+ topics: Annotated[Optional[list[str | int]], Field(description="Related topics.")] = None
112
79
  news: Annotated[
113
80
  Optional[list[TrendingTermArticleOut]],
114
81
  Field(description="Related news articles."),
115
82
  ] = None
116
- news_tokens: Annotated[
117
- Optional[list], Field(description="Associated news tokens.")
118
- ] = None
119
- normalized_keyword: Annotated[
120
- Optional[str], Field(description="Normalized form of the keyword.")
121
- ] = None
83
+ news_tokens: Annotated[Optional[list], Field(description="Associated news tokens.")] = None
84
+ normalized_keyword: Annotated[Optional[str], Field(description="Normalized form of the keyword.")] = None
122
85
 
123
86
 
124
87
  mcp = FastMCP(
@@ -133,7 +96,6 @@ mcp.add_middleware(TimingMiddleware()) # Time actual execution
133
96
  mcp.add_middleware(LoggingMiddleware()) # Log everything
134
97
 
135
98
 
136
- # Configure newspaper settings for article extraction
137
99
  def set_newspaper_article_fields(full_data: bool = False):
138
100
  if full_data:
139
101
  newspaper_settings.article_json_fields = [
@@ -163,13 +125,29 @@ def set_newspaper_article_fields(full_data: bool = False):
163
125
  newspaper_settings.article_json_fields = [
164
126
  "url",
165
127
  "title",
166
- "text",
167
128
  "publish_date",
168
129
  "summary",
169
- "keywords",
170
130
  ]
171
131
 
172
132
 
133
+ async def summarize_article(article: Article, ctx: Context) -> None:
134
+ if article.text:
135
+ prompt = f"Please provide a concise summary of the following news article:\n\n{article.text}"
136
+ response = await ctx.sample(prompt)
137
+ # response = cast(TextContent, response)
138
+ if isinstance(response, TextContent):
139
+ if not response.text:
140
+ await ctx.warning("NLP response is empty. Unable to summarize article.")
141
+ article.summary = "No summary available."
142
+ else:
143
+ article.summary = response.text
144
+ else:
145
+ await ctx.warning("NLP response is not a TextContent object. Unable to summarize article.")
146
+ article.summary = "No summary available."
147
+ else:
148
+ article.summary = "No summary available."
149
+
150
+
173
151
  @mcp.tool(
174
152
  description=news.get_news_by_keyword.__doc__,
175
153
  tags={"news", "articles", "keyword"},
@@ -177,27 +155,40 @@ def set_newspaper_article_fields(full_data: bool = False):
177
155
  async def get_news_by_keyword(
178
156
  ctx: Context,
179
157
  keyword: Annotated[str, Field(description="Search term to find articles.")],
180
- period: Annotated[
181
- int, Field(description="Number of days to look back for articles.", ge=1)
182
- ] = 7,
183
- max_results: Annotated[
184
- int, Field(description="Maximum number of results to return.", ge=1)
185
- ] = 10,
186
- nlp: Annotated[
187
- bool, Field(description="Whether to perform NLP on the articles.")
188
- ] = False,
158
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
159
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
189
160
  full_data: Annotated[
190
- bool, Field(description="Return full data for each article.")
161
+ bool,
162
+ Field(
163
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
164
+ ),
191
165
  ] = False,
166
+ summarize: Annotated[
167
+ bool,
168
+ Field(
169
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
170
+ ),
171
+ ] = True,
192
172
  ) -> list[ArticleOut]:
193
173
  set_newspaper_article_fields(full_data)
194
174
  articles = await news.get_news_by_keyword(
195
175
  keyword=keyword,
196
176
  period=period,
197
177
  max_results=max_results,
198
- nlp=nlp,
178
+ nlp=False,
199
179
  report_progress=ctx.report_progress,
200
180
  )
181
+ if summarize:
182
+ total_articles = len(articles)
183
+ try:
184
+ for idx, article in enumerate(articles):
185
+ await summarize_article(article, ctx)
186
+ await ctx.report_progress(idx, total_articles)
187
+ except Exception as err:
188
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
189
+ for idx, article in enumerate(articles):
190
+ article.nlp()
191
+ await ctx.report_progress(idx, total_articles)
201
192
  await ctx.report_progress(progress=len(articles), total=len(articles))
202
193
  return [ArticleOut(**a.to_json(False)) for a in articles]
203
194
 
@@ -209,58 +200,83 @@ async def get_news_by_keyword(
209
200
  async def get_news_by_location(
210
201
  ctx: Context,
211
202
  location: Annotated[str, Field(description="Name of city/state/country.")],
212
- period: Annotated[
213
- int, Field(description="Number of days to look back for articles.", ge=1)
214
- ] = 7,
215
- max_results: Annotated[
216
- int, Field(description="Maximum number of results to return.", ge=1)
217
- ] = 10,
218
- nlp: Annotated[
219
- bool, Field(description="Whether to perform NLP on the articles.")
220
- ] = False,
203
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
204
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
221
205
  full_data: Annotated[
222
- bool, Field(description="Return full data for each article.")
206
+ bool,
207
+ Field(
208
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
209
+ ),
223
210
  ] = False,
211
+ summarize: Annotated[
212
+ bool,
213
+ Field(
214
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
215
+ ),
216
+ ] = True,
224
217
  ) -> list[ArticleOut]:
225
218
  set_newspaper_article_fields(full_data)
226
219
  articles = await news.get_news_by_location(
227
220
  location=location,
228
221
  period=period,
229
222
  max_results=max_results,
230
- nlp=nlp,
223
+ nlp=False,
231
224
  report_progress=ctx.report_progress,
232
225
  )
226
+ if summarize:
227
+ total_articles = len(articles)
228
+ try:
229
+ for idx, article in enumerate(articles):
230
+ await summarize_article(article, ctx)
231
+ await ctx.report_progress(idx, total_articles)
232
+ except Exception as err:
233
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
234
+ for idx, article in enumerate(articles):
235
+ article.nlp()
236
+ await ctx.report_progress(idx, total_articles)
233
237
  await ctx.report_progress(progress=len(articles), total=len(articles))
234
238
  return [ArticleOut(**a.to_json(False)) for a in articles]
235
239
 
236
240
 
237
- @mcp.tool(
238
- description=news.get_news_by_topic.__doc__, tags={"news", "articles", "topic"}
239
- )
241
+ @mcp.tool(description=news.get_news_by_topic.__doc__, tags={"news", "articles", "topic"})
240
242
  async def get_news_by_topic(
241
243
  ctx: Context,
242
244
  topic: Annotated[str, Field(description="Topic to search for articles.")],
243
- period: Annotated[
244
- int, Field(description="Number of days to look back for articles.", ge=1)
245
- ] = 7,
246
- max_results: Annotated[
247
- int, Field(description="Maximum number of results to return.", ge=1)
248
- ] = 10,
249
- nlp: Annotated[
250
- bool, Field(description="Whether to perform NLP on the articles.")
251
- ] = False,
245
+ period: Annotated[int, Field(description="Number of days to look back for articles.", ge=1)] = 7,
246
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
252
247
  full_data: Annotated[
253
- bool, Field(description="Return full data for each article.")
248
+ bool,
249
+ Field(
250
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
251
+ ),
254
252
  ] = False,
253
+ summarize: Annotated[
254
+ bool,
255
+ Field(
256
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
257
+ ),
258
+ ] = True,
255
259
  ) -> list[ArticleOut]:
256
260
  set_newspaper_article_fields(full_data)
257
261
  articles = await news.get_news_by_topic(
258
262
  topic=topic,
259
263
  period=period,
260
264
  max_results=max_results,
261
- nlp=nlp,
265
+ nlp=False,
262
266
  report_progress=ctx.report_progress,
263
267
  )
268
+ if summarize:
269
+ total_articles = len(articles)
270
+ try:
271
+ for idx, article in enumerate(articles):
272
+ await summarize_article(article, ctx)
273
+ await ctx.report_progress(idx, total_articles)
274
+ except Exception as err:
275
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
276
+ for idx, article in enumerate(articles):
277
+ article.nlp()
278
+ await ctx.report_progress(idx, total_articles)
279
+
264
280
  await ctx.report_progress(progress=len(articles), total=len(articles))
265
281
  return [ArticleOut(**a.to_json(False)) for a in articles]
266
282
 
@@ -268,60 +284,59 @@ async def get_news_by_topic(
268
284
  @mcp.tool(description=news.get_top_news.__doc__, tags={"news", "articles", "top"})
269
285
  async def get_top_news(
270
286
  ctx: Context,
271
- period: Annotated[
272
- int, Field(description="Number of days to look back for top articles.", ge=1)
273
- ] = 3,
274
- max_results: Annotated[
275
- int, Field(description="Maximum number of results to return.", ge=1)
276
- ] = 10,
277
- nlp: Annotated[
278
- bool, Field(description="Whether to perform NLP on the articles.")
279
- ] = False,
287
+ period: Annotated[int, Field(description="Number of days to look back for top articles.", ge=1)] = 3,
288
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 10,
280
289
  full_data: Annotated[
281
- bool, Field(description="Return full data for each article.")
290
+ bool,
291
+ Field(
292
+ description="Return full data for each article. If False a summary should be created by setting the summarize flag"
293
+ ),
282
294
  ] = False,
295
+ summarize: Annotated[
296
+ bool,
297
+ Field(
298
+ description="Generate a summary of the article, will first try LLM Sampling but if unavailable will use nlp"
299
+ ),
300
+ ] = True,
283
301
  ) -> list[ArticleOut]:
284
302
  set_newspaper_article_fields(full_data)
285
303
  articles = await news.get_top_news(
286
304
  period=period,
287
305
  max_results=max_results,
288
- nlp=nlp,
306
+ nlp=False,
289
307
  report_progress=ctx.report_progress,
290
308
  )
309
+ if summarize:
310
+ total_articles = len(articles)
311
+ try:
312
+ for idx, article in enumerate(articles):
313
+ await summarize_article(article, ctx)
314
+ await ctx.report_progress(idx, total_articles)
315
+ except Exception as err:
316
+ await ctx.debug(f"Failed to use LLM sampling for article summary:\n{err.args}")
317
+ for idx, article in enumerate(articles):
318
+ article.nlp()
319
+ await ctx.report_progress(idx, total_articles)
320
+
291
321
  await ctx.report_progress(progress=len(articles), total=len(articles))
292
322
  return [ArticleOut(**a.to_json(False)) for a in articles]
293
323
 
294
324
 
295
- @mcp.tool(
296
- description=news.get_trending_terms.__doc__, tags={"trends", "google", "trending"}
297
- )
325
+ @mcp.tool(description=news.get_trending_terms.__doc__, tags={"trends", "google", "trending"})
298
326
  async def get_trending_terms(
299
- geo: Annotated[
300
- str, Field(description="Country code, e.g. 'US', 'GB', 'IN', etc.")
301
- ] = "US",
327
+ geo: Annotated[str, Field(description="Country code, e.g. 'US', 'GB', 'IN', etc.")] = "US",
302
328
  full_data: Annotated[
303
329
  bool,
304
- Field(
305
- description="Return full data for each trend. Should be False for most use cases."
306
- ),
330
+ Field(description="Return full data for each trend. Should be False for most use cases."),
307
331
  ] = False,
308
- max_results: Annotated[
309
- int, Field(description="Maximum number of results to return.", ge=1)
310
- ] = 100,
332
+ max_results: Annotated[int, Field(description="Maximum number of results to return.", ge=1)] = 100,
311
333
  ) -> list[TrendingTermOut]:
312
334
 
313
335
  if not full_data:
314
- trends = await news.get_trending_terms(
315
- geo=geo, full_data=False, max_results=max_results
316
- )
317
- return [
318
- TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"])
319
- for tt in trends
320
- ]
336
+ trends = await news.get_trending_terms(geo=geo, full_data=False, max_results=max_results)
337
+ return [TrendingTermOut(keyword=str(tt["keyword"]), volume=tt["volume"]) for tt in trends]
321
338
 
322
- trends = await news.get_trending_terms(
323
- geo=geo, full_data=True, max_results=max_results
324
- )
339
+ trends = await news.get_trending_terms(geo=geo, full_data=True, max_results=max_results)
325
340
  return [TrendingTermOut(**tt.__dict__) for tt in trends]
326
341
 
327
342
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: google-news-trends-mcp
3
- Version: 0.1.7
3
+ Version: 0.1.8
4
4
  Summary: An MCP server to access Google News and Google Trends.
5
5
  Author-email: Jesse Manek <jesse.manek@gmail.com>
6
6
  License-Expression: MIT
@@ -36,7 +36,7 @@ An MCP server to access Google News and Google Trends. Does not rely on any pai
36
36
  - Search Google News articles based on keyword, location, topic
37
37
  - Get top news stories from Google News
38
38
  - Google Trends keywords base on location
39
- - Optional NLP to summarize articles and extract keywords
39
+ - Optional LLM Sampling and NLP to summarize articles and extract keywords
40
40
 
41
41
  ## Installation
42
42
 
@@ -70,7 +70,7 @@ Add to your Claude settings:
70
70
  "mcpServers": {
71
71
  "google-news-trends": {
72
72
  "command": "uvx",
73
- "args": ["google-news-trends-mcp"]
73
+ "args": ["google-news-trends-mcp@latest"]
74
74
  }
75
75
  }
76
76
  }
@@ -103,7 +103,7 @@ Add to your Claude settings:
103
103
  "servers": {
104
104
  "google-news-trends": {
105
105
  "command": "uvx",
106
- "args": ["google-news-trends-mcp"]
106
+ "args": ["google-news-trends-mcp@latest"]
107
107
  }
108
108
  }
109
109
  }
@@ -0,0 +1,11 @@
1
+ google_news_trends_mcp/__init__.py,sha256=J9O5WNvC9cNDaxecveSUvzLGOXOYO-pCHbiGopfYoIc,76
2
+ google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
+ google_news_trends_mcp/cli.py,sha256=-Cith02x6-9o91rXpgMM0lrhArPDMB9d3h8AAE1rimw,3959
4
+ google_news_trends_mcp/news.py,sha256=Anxs65Fxq1Qz_tkmVyTDY3Fn-I0dv0xR3ipDrLBc6gw,12851
5
+ google_news_trends_mcp/server.py,sha256=promIVXRcd1ZUSgFClZ73l2scXlsS-joRHv1AZs73SE,14946
6
+ google_news_trends_mcp-0.1.8.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
+ google_news_trends_mcp-0.1.8.dist-info/METADATA,sha256=j0yuLLp3OaCYS07o-Q5vNVs3xCk5HHAMcGVUn7kT2TI,4495
8
+ google_news_trends_mcp-0.1.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
+ google_news_trends_mcp-0.1.8.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
+ google_news_trends_mcp-0.1.8.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
+ google_news_trends_mcp-0.1.8.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- google_news_trends_mcp/__init__.py,sha256=J9O5WNvC9cNDaxecveSUvzLGOXOYO-pCHbiGopfYoIc,76
2
- google_news_trends_mcp/__main__.py,sha256=ysiAk_xpnnW3lrLlzdIQQa71tuGBRT8WocbecBsY2Fs,87
3
- google_news_trends_mcp/cli.py,sha256=XJNnRVpDXX2MCb8dPfDcQJWYYA4CxTuxbhvpJGeVQgs,4133
4
- google_news_trends_mcp/news.py,sha256=2xmlwe4txaqiB8MljbhbBLmpb6tM35autGJVQ144k0s,13107
5
- google_news_trends_mcp/server.py,sha256=7hau48vQr_a2YbLgz4MqkwsTHMuSIU8jYEkjInID4gY,11553
6
- google_news_trends_mcp-0.1.7.dist-info/licenses/LICENSE,sha256=5dsv2ZI5EZIer0a9MktVmILVrlp5vqH_0tPIe3bRLgE,1067
7
- google_news_trends_mcp-0.1.7.dist-info/METADATA,sha256=qkJh_vuB7gIH2Pp0TorRH_9J6ZvkKmU4JOvjsZqwtoY,4464
8
- google_news_trends_mcp-0.1.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
9
- google_news_trends_mcp-0.1.7.dist-info/entry_points.txt,sha256=eVT3xd6YJQgsWAUBwhnffuwhXNF7yyt_uco6fjBy-1o,130
10
- google_news_trends_mcp-0.1.7.dist-info/top_level.txt,sha256=RFheDbzhNnEV_Y3iFNm7jhRhY1P1wQgfiYqVpXCTD_U,23
11
- google_news_trends_mcp-0.1.7.dist-info/RECORD,,