finance-crawler-mcp 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- finance_crawler_mcp-0.1.0/PKG-INFO +10 -0
- finance_crawler_mcp-0.1.0/finance_crawler_mcp.egg-info/PKG-INFO +10 -0
- finance_crawler_mcp-0.1.0/finance_crawler_mcp.egg-info/SOURCES.txt +8 -0
- finance_crawler_mcp-0.1.0/finance_crawler_mcp.egg-info/dependency_links.txt +1 -0
- finance_crawler_mcp-0.1.0/finance_crawler_mcp.egg-info/entry_points.txt +2 -0
- finance_crawler_mcp-0.1.0/finance_crawler_mcp.egg-info/requires.txt +5 -0
- finance_crawler_mcp-0.1.0/finance_crawler_mcp.egg-info/top_level.txt +1 -0
- finance_crawler_mcp-0.1.0/pyproject.toml +15 -0
- finance_crawler_mcp-0.1.0/server.py +378 -0
- finance_crawler_mcp-0.1.0/setup.cfg +4 -0
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
pyproject.toml
|
|
2
|
+
server.py
|
|
3
|
+
finance_crawler_mcp.egg-info/PKG-INFO
|
|
4
|
+
finance_crawler_mcp.egg-info/SOURCES.txt
|
|
5
|
+
finance_crawler_mcp.egg-info/dependency_links.txt
|
|
6
|
+
finance_crawler_mcp.egg-info/entry_points.txt
|
|
7
|
+
finance_crawler_mcp.egg-info/requires.txt
|
|
8
|
+
finance_crawler_mcp.egg-info/top_level.txt
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
server
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "finance-crawler-mcp"
|
|
3
|
+
version = "0.1.0"
|
|
4
|
+
description = "多源金融资讯爬虫 MCP 服务"
|
|
5
|
+
requires-python = ">=3.10"
|
|
6
|
+
dependencies = [
|
|
7
|
+
"mcp",
|
|
8
|
+
"fastmcp",
|
|
9
|
+
"requests",
|
|
10
|
+
"beautifulsoup4",
|
|
11
|
+
"stock-stil",
|
|
12
|
+
]
|
|
13
|
+
|
|
14
|
+
[project.scripts]
|
|
15
|
+
finance-crawler-mcp = "server:main"
|
|
@@ -0,0 +1,378 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
MCP 服务:多源金融资讯采集(支持邮件附件)
|
|
4
|
+
"""
|
|
5
|
+
import sys
|
|
6
|
+
import json
|
|
7
|
+
import time
|
|
8
|
+
import csv
|
|
9
|
+
import io
|
|
10
|
+
import ssl
|
|
11
|
+
import smtplib
|
|
12
|
+
import re
|
|
13
|
+
import requests
|
|
14
|
+
from datetime import datetime, timezone, timedelta
|
|
15
|
+
from email.mime.text import MIMEText
|
|
16
|
+
from email.mime.multipart import MIMEMultipart
|
|
17
|
+
from email.mime.base import MIMEBase
|
|
18
|
+
from email import encoders
|
|
19
|
+
from bs4 import BeautifulSoup
|
|
20
|
+
from collections import Counter, defaultdict
|
|
21
|
+
from typing import List, Optional
|
|
22
|
+
|
|
23
|
+
from fastmcp import FastMCP
|
|
24
|
+
|
|
25
|
+
# 尝试导入股吧专用库,若未安装则跳过
|
|
26
|
+
try:
|
|
27
|
+
from stock_stil import comments
|
|
28
|
+
STOCK_STIL_AVAILABLE = True
|
|
29
|
+
except ImportError:
|
|
30
|
+
STOCK_STIL_AVAILABLE = False
|
|
31
|
+
|
|
32
|
+
mcp = FastMCP("FinanceCrawler")
|
|
33
|
+
|
|
34
|
+
# ===================== 通用配置 =====================
|
|
35
|
+
TZ_CN = timezone(timedelta(hours=8))
|
|
36
|
+
HEADERS_10JQKA = {
|
|
37
|
+
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
|
|
38
|
+
"(KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
39
|
+
"Referer": "https://stock.10jqka.com.cn/",
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
# ===================== 邮箱配置(保持原样)=====================
|
|
43
|
+
SMTP_SERVER = "smtp.163.com"
|
|
44
|
+
SMTP_PORT = 465
|
|
45
|
+
SENDER_EMAIL = "m13956155221_1@163.com"
|
|
46
|
+
AUTH_CODE = "JEYLKpUU3pthEddH"
|
|
47
|
+
RECEIVER_EMAILS = [
|
|
48
|
+
"kellybian89@163.com",
|
|
49
|
+
"1239601342@qq.com"
|
|
50
|
+
]
|
|
51
|
+
|
|
52
|
+
# ===================== 工具函数 =====================
|
|
53
|
+
def clean_text(text: str) -> str:
|
|
54
|
+
if not text:
|
|
55
|
+
return ""
|
|
56
|
+
text = re.sub(r'<[^>]+>', '', text)
|
|
57
|
+
text = text.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ')
|
|
58
|
+
text = re.sub(r'\s+', ' ', text).strip()
|
|
59
|
+
return text
|
|
60
|
+
|
|
61
|
+
# ===================== 数据源 1:股吧评论 =====================
|
|
62
|
+
def fetch_guba_posts(stock_code: str = "002455", target: int = 50) -> List[dict]:
|
|
63
|
+
if not STOCK_STIL_AVAILABLE:
|
|
64
|
+
print("stock_stil 不可用,跳过股吧抓取", file=sys.stderr)
|
|
65
|
+
return []
|
|
66
|
+
try:
|
|
67
|
+
post_list = comments.getEastMoneyPostList(stock_code=stock_code)
|
|
68
|
+
except Exception as e:
|
|
69
|
+
print(f"股吧列表获取失败: {e}", file=sys.stderr)
|
|
70
|
+
return []
|
|
71
|
+
results = []
|
|
72
|
+
for post in post_list[:target]:
|
|
73
|
+
pid = post.post_id
|
|
74
|
+
title = getattr(post, "post_title", "")
|
|
75
|
+
try:
|
|
76
|
+
detail = comments.getEstMoneyPostDetail(stock_code=stock_code, post_id=pid)
|
|
77
|
+
content = getattr(detail, "post_content", "")
|
|
78
|
+
except:
|
|
79
|
+
content = ""
|
|
80
|
+
results.append({
|
|
81
|
+
"type": "股吧评论",
|
|
82
|
+
"title": clean_text(title),
|
|
83
|
+
"content": clean_text(content),
|
|
84
|
+
"post_id": pid,
|
|
85
|
+
"publish_time": getattr(post, "post_publish_time", ""),
|
|
86
|
+
"source": "东方财富股吧"
|
|
87
|
+
})
|
|
88
|
+
time.sleep(1.0)
|
|
89
|
+
print(f"股吧评论获取 {len(results)} 条", file=sys.stderr)
|
|
90
|
+
return results
|
|
91
|
+
|
|
92
|
+
# ===================== 数据源 2:个股聚焦 =====================
|
|
93
|
+
def _extract_body(url: str) -> str:
|
|
94
|
+
try:
|
|
95
|
+
resp = requests.get(url, headers=HEADERS_10JQKA, timeout=10)
|
|
96
|
+
resp.encoding = resp.apparent_encoding or "utf-8"
|
|
97
|
+
soup = BeautifulSoup(resp.text, "html.parser")
|
|
98
|
+
for sel in ["div.main-text", "div.article-content", "div.article_con", "div#content",
|
|
99
|
+
"div.detail-content", "div.newsContent", "div.art_main", "div.article-body", "div.body"]:
|
|
100
|
+
container = soup.select_one(sel)
|
|
101
|
+
if container:
|
|
102
|
+
text = container.get_text(separator=' ', strip=True)
|
|
103
|
+
text = re.sub(r'\s+', ' ', text)
|
|
104
|
+
if len(text) > 100:
|
|
105
|
+
return text
|
|
106
|
+
pars = [p.get_text(strip=True) for p in soup.find_all("p") if len(p.get_text(strip=True)) > 20]
|
|
107
|
+
if pars:
|
|
108
|
+
text = " ".join(pars)
|
|
109
|
+
if len(text) > 100:
|
|
110
|
+
return text
|
|
111
|
+
body = soup.find("body")
|
|
112
|
+
if body:
|
|
113
|
+
for tag in body(["script", "style", "nav", "footer"]):
|
|
114
|
+
tag.decompose()
|
|
115
|
+
text = body.get_text(separator=' ', strip=True)
|
|
116
|
+
text = re.sub(r'\s+', ' ', text)
|
|
117
|
+
if len(text) > 100:
|
|
118
|
+
return text
|
|
119
|
+
except:
|
|
120
|
+
pass
|
|
121
|
+
return ""
|
|
122
|
+
|
|
123
|
+
def _fetch_news_contents(news_items: list, source_type: str) -> list:
|
|
124
|
+
results = []
|
|
125
|
+
for item in news_items[:20]:
|
|
126
|
+
title = item["title"]
|
|
127
|
+
url = item["url"]
|
|
128
|
+
content = _extract_body(url)
|
|
129
|
+
results.append({
|
|
130
|
+
"type": source_type,
|
|
131
|
+
"title": title,
|
|
132
|
+
"content": clean_text(content),
|
|
133
|
+
"url": url,
|
|
134
|
+
"source": "同花顺"
|
|
135
|
+
})
|
|
136
|
+
time.sleep(1.5)
|
|
137
|
+
return results
|
|
138
|
+
|
|
139
|
+
def fetch_ggjj_news() -> list:
|
|
140
|
+
items = []
|
|
141
|
+
api_url = "https://stock.10jqka.com.cn/interface/getArticleList.php?class=ggjj&page=1&num=20"
|
|
142
|
+
try:
|
|
143
|
+
resp = requests.get(api_url, headers=HEADERS_10JQKA, timeout=10)
|
|
144
|
+
data = resp.json()
|
|
145
|
+
articles = data.get("list") or data.get("article_list") or []
|
|
146
|
+
for art in articles:
|
|
147
|
+
title = art.get("title", "").strip()
|
|
148
|
+
url = art.get("art_url") or art.get("url", "")
|
|
149
|
+
if not title or not url:
|
|
150
|
+
continue
|
|
151
|
+
full_url = url if url.startswith("http") else "https:" + url
|
|
152
|
+
items.append({"title": title, "url": full_url})
|
|
153
|
+
if items:
|
|
154
|
+
print(f"个股聚焦 API 获取 {len(items)} 条", file=sys.stderr)
|
|
155
|
+
return _fetch_news_contents(items, "个股聚焦")
|
|
156
|
+
except Exception as e:
|
|
157
|
+
print(f"个股聚焦 API 失败,尝试 HTML 解析: {e}", file=sys.stderr)
|
|
158
|
+
|
|
159
|
+
list_url = "https://stock.10jqka.com.cn/ggjj_list/"
|
|
160
|
+
try:
|
|
161
|
+
resp = requests.get(list_url, headers=HEADERS_10JQKA, timeout=10)
|
|
162
|
+
resp.encoding = resp.apparent_encoding or "gbk"
|
|
163
|
+
soup = BeautifulSoup(resp.text, "html.parser")
|
|
164
|
+
for a in soup.find_all("a", href=True):
|
|
165
|
+
href = a['href']
|
|
166
|
+
title = a.get_text(strip=True)
|
|
167
|
+
if re.search(r"/\d{8}/c\d+\.shtml", href) and len(title) > 5:
|
|
168
|
+
full_url = href if href.startswith("http") else "https:" + href
|
|
169
|
+
items.append({"title": title, "url": full_url})
|
|
170
|
+
if len(items) >= 20:
|
|
171
|
+
break
|
|
172
|
+
except Exception as e:
|
|
173
|
+
print(f"个股聚焦 HTML 获取失败: {e}", file=sys.stderr)
|
|
174
|
+
return _fetch_news_contents(items, "个股聚焦")
|
|
175
|
+
|
|
176
|
+
# ===================== 数据源 3:公告速递 =====================
|
|
177
|
+
def fetch_ggsd_today() -> list:
|
|
178
|
+
base_url = "https://data.10jqka.com.cn/market/ggsd/"
|
|
179
|
+
today_str = datetime.now().strftime("%m-%d")
|
|
180
|
+
all_titles = []
|
|
181
|
+
page = 1
|
|
182
|
+
while page <= 3:
|
|
183
|
+
url = base_url if page == 1 else f"{base_url}?page={page}"
|
|
184
|
+
try:
|
|
185
|
+
resp = requests.get(url, headers=HEADERS_10JQKA, timeout=10)
|
|
186
|
+
resp.encoding = resp.apparent_encoding or "gbk"
|
|
187
|
+
soup = BeautifulSoup(resp.text, "html.parser")
|
|
188
|
+
rows = soup.select("table.m-table tr") or soup.find_all("tr")
|
|
189
|
+
for row in rows:
|
|
190
|
+
cols = row.find_all("td")
|
|
191
|
+
if len(cols) < 3:
|
|
192
|
+
continue
|
|
193
|
+
date_text = cols[0].get_text(strip=True)
|
|
194
|
+
if today_str not in date_text:
|
|
195
|
+
continue
|
|
196
|
+
title = ""
|
|
197
|
+
for col in cols[1:]:
|
|
198
|
+
txt = col.get_text(strip=True)
|
|
199
|
+
if len(txt) > 5 and len(txt) > len(title):
|
|
200
|
+
title = txt
|
|
201
|
+
if title:
|
|
202
|
+
all_titles.append(title)
|
|
203
|
+
if not all_titles:
|
|
204
|
+
break
|
|
205
|
+
page += 1
|
|
206
|
+
time.sleep(1.5)
|
|
207
|
+
except Exception as e:
|
|
208
|
+
print(f"公告速递第{page}页失败: {e}", file=sys.stderr)
|
|
209
|
+
break
|
|
210
|
+
unique = []
|
|
211
|
+
for t in all_titles:
|
|
212
|
+
if t not in unique:
|
|
213
|
+
unique.append(t)
|
|
214
|
+
print(f"公告速递获取 {len(unique)} 条", file=sys.stderr)
|
|
215
|
+
return [{"type": "公告速递", "title": t, "content": "", "source": "同花顺公告"} for t in unique[:20]]
|
|
216
|
+
|
|
217
|
+
# ===================== 数据源 4:公司资讯 =====================
|
|
218
|
+
def fetch_company_news() -> list:
|
|
219
|
+
list_url = "https://stock.10jqka.com.cn/companynews_list/"
|
|
220
|
+
items = []
|
|
221
|
+
try:
|
|
222
|
+
resp = requests.get(list_url, headers=HEADERS_10JQKA, timeout=10)
|
|
223
|
+
resp.encoding = 'gbk'
|
|
224
|
+
soup = BeautifulSoup(resp.text, "html.parser")
|
|
225
|
+
for a in soup.find_all("a", href=True):
|
|
226
|
+
href = a['href']
|
|
227
|
+
title = a.get_text(strip=True)
|
|
228
|
+
if re.search(r"/\d{8}/c\d+\.shtml", href) and len(title) > 5:
|
|
229
|
+
full_url = href if href.startswith("http") else "https:" + href
|
|
230
|
+
items.append({"title": title, "url": full_url})
|
|
231
|
+
if len(items) >= 20:
|
|
232
|
+
break
|
|
233
|
+
except Exception as e:
|
|
234
|
+
print(f"公司资讯列表获取失败: {e}", file=sys.stderr)
|
|
235
|
+
return []
|
|
236
|
+
return _fetch_news_contents(items, "公司资讯")
|
|
237
|
+
|
|
238
|
+
# ===================== 数据源 5:近期新闻(同花顺要闻) =====================
|
|
239
|
+
def fetch_recent_news(pages: int = 2) -> list:
|
|
240
|
+
headers = {"User-Agent": "Mozilla/5.0"}
|
|
241
|
+
all_items = []
|
|
242
|
+
for page in range(1, pages+1):
|
|
243
|
+
url = f"https://news.10jqka.com.cn/tapp/news/push/stock/?page={page}&pagesize=100"
|
|
244
|
+
try:
|
|
245
|
+
resp = requests.get(url, headers=headers, timeout=10)
|
|
246
|
+
data = resp.json()
|
|
247
|
+
for item in data.get('data', {}).get('list', []):
|
|
248
|
+
title = item.get('title', '').strip()
|
|
249
|
+
if len(title) >= 5:
|
|
250
|
+
all_items.append({
|
|
251
|
+
"type": "近期新闻",
|
|
252
|
+
"title": title,
|
|
253
|
+
"content": item.get('digest', ''),
|
|
254
|
+
"publish_time": item.get('ctime', ''),
|
|
255
|
+
"source": item.get('source', '同花顺')
|
|
256
|
+
})
|
|
257
|
+
time.sleep(0.5)
|
|
258
|
+
except Exception as e:
|
|
259
|
+
print(f"近期新闻第{page}页失败: {e}", file=sys.stderr)
|
|
260
|
+
break
|
|
261
|
+
seen = set()
|
|
262
|
+
unique = []
|
|
263
|
+
for item in all_items:
|
|
264
|
+
if item['title'] not in seen:
|
|
265
|
+
seen.add(item['title'])
|
|
266
|
+
unique.append(item)
|
|
267
|
+
return unique[:100]
|
|
268
|
+
|
|
269
|
+
# ===================== 邮件发送(摘要邮件 + CSV 附件) =====================
|
|
270
|
+
def send_email_with_csv_attachment(analysis_time: str, all_data: list):
|
|
271
|
+
if not all_data:
|
|
272
|
+
return
|
|
273
|
+
type_counts = Counter(item['type'] for item in all_data)
|
|
274
|
+
lines = [f"<h3>📋 本次采集概览</h3><ul>"]
|
|
275
|
+
for t, cnt in type_counts.items():
|
|
276
|
+
lines.append(f"<li><b>{t}</b>:{cnt} 条</li>")
|
|
277
|
+
lines.append("</ul>")
|
|
278
|
+
grouped = defaultdict(list)
|
|
279
|
+
for item in all_data:
|
|
280
|
+
grouped[item['type']].append(item)
|
|
281
|
+
for category in ["股吧评论", "个股聚焦", "公告速递", "公司资讯", "近期新闻"]:
|
|
282
|
+
items = grouped.get(category, [])
|
|
283
|
+
if not items:
|
|
284
|
+
continue
|
|
285
|
+
lines.append(f"<h4>{category} 示例(前3条)</h4><ul>")
|
|
286
|
+
for it in items[:3]:
|
|
287
|
+
title = it.get('title', '')[:80]
|
|
288
|
+
content = it.get('content', '')
|
|
289
|
+
snippet = content[:60] + "..." if len(content) > 60 else content
|
|
290
|
+
lines.append(f"<li><b>{title}</b> —— {snippet}</li>")
|
|
291
|
+
lines.append("</ul>")
|
|
292
|
+
html_body = f"""
|
|
293
|
+
<html>
|
|
294
|
+
<head><style>
|
|
295
|
+
body {{ font-family: 'Microsoft YaHei', Arial; }}
|
|
296
|
+
h2 {{ color: #2c3e50; }}
|
|
297
|
+
h3 {{ color: #27ae60; }}
|
|
298
|
+
ul {{ list-style: none; padding: 0; }}
|
|
299
|
+
li {{ margin: 5px 0; }}
|
|
300
|
+
.footer {{ margin-top: 30px; font-size: 12px; color: #7f8c8d; }}
|
|
301
|
+
</style></head>
|
|
302
|
+
<body>
|
|
303
|
+
<h2>📊 多源金融资讯采集报告</h2>
|
|
304
|
+
<p><strong>生成时间:</strong> {analysis_time}</p>
|
|
305
|
+
{''.join(lines)}
|
|
306
|
+
<div class="footer">
|
|
307
|
+
<p>完整数据见附件 <b>financial_news.csv</b></p>
|
|
308
|
+
<p>此邮件由自动化系统生成,仅供参考</p>
|
|
309
|
+
</div>
|
|
310
|
+
</body>
|
|
311
|
+
</html>
|
|
312
|
+
"""
|
|
313
|
+
fieldnames = ["type", "title", "content", "url", "publish_time", "source", "post_id"]
|
|
314
|
+
output = io.StringIO()
|
|
315
|
+
output.write('\ufeff')
|
|
316
|
+
writer = csv.DictWriter(output, fieldnames=fieldnames, extrasaction='ignore')
|
|
317
|
+
writer.writeheader()
|
|
318
|
+
writer.writerows(all_data)
|
|
319
|
+
csv_content = output.getvalue()
|
|
320
|
+
output.close()
|
|
321
|
+
csv_filename = f"financial_news_{analysis_time.replace(' ', '_').replace(':', '-')}.csv"
|
|
322
|
+
for receiver in RECEIVER_EMAILS:
|
|
323
|
+
msg = MIMEMultipart('mixed')
|
|
324
|
+
msg['Subject'] = f"金融数据采集报告_{analysis_time.replace(' ', '_')}"
|
|
325
|
+
msg['From'] = SENDER_EMAIL
|
|
326
|
+
msg['To'] = receiver
|
|
327
|
+
msg.attach(MIMEText(html_body, 'html', 'utf-8'))
|
|
328
|
+
part = MIMEBase('application', 'octet-stream')
|
|
329
|
+
part.set_payload(csv_content.encode('utf-8'))
|
|
330
|
+
encoders.encode_base64(part)
|
|
331
|
+
part.add_header('Content-Disposition', f'attachment; filename="{csv_filename}"')
|
|
332
|
+
msg.attach(part)
|
|
333
|
+
try:
|
|
334
|
+
context = ssl.create_default_context()
|
|
335
|
+
with smtplib.SMTP_SSL(SMTP_SERVER, SMTP_PORT, context=context) as server:
|
|
336
|
+
server.login(SENDER_EMAIL, AUTH_CODE)
|
|
337
|
+
server.send_message(msg)
|
|
338
|
+
print(f"✅ 邮件已成功发送至 {receiver}")
|
|
339
|
+
time.sleep(1)
|
|
340
|
+
except Exception as e:
|
|
341
|
+
print(f"❌ 发送至 {receiver} 失败: {e}")
|
|
342
|
+
|
|
343
|
+
# ===================== MCP 工具注册 =====================
|
|
344
|
+
@mcp.tool()
|
|
345
|
+
def collect_all_news(stock_code: str = "002455",
|
|
346
|
+
guba_target: int = 50,
|
|
347
|
+
ggjj_count: int = 20,
|
|
348
|
+
ggsd_count: int = 20,
|
|
349
|
+
company_count: int = 20,
|
|
350
|
+
recent_pages: int = 2) -> dict:
|
|
351
|
+
"""采集五种金融资讯源,自动发送邮件报告。"""
|
|
352
|
+
analysis_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
|
353
|
+
all_data = []
|
|
354
|
+
all_data.extend(fetch_guba_posts(stock_code, guba_target))
|
|
355
|
+
all_data.extend(fetch_ggjj_news()[:ggjj_count])
|
|
356
|
+
all_data.extend(fetch_ggsd_today()[:ggsd_count])
|
|
357
|
+
all_data.extend(fetch_company_news()[:company_count])
|
|
358
|
+
all_data.extend(fetch_recent_news(recent_pages))
|
|
359
|
+
send_email_with_csv_attachment(analysis_time, all_data)
|
|
360
|
+
type_counts = Counter(item['type'] for item in all_data)
|
|
361
|
+
return {"status": "success", "analysis_time": analysis_time,
|
|
362
|
+
"total_items": len(all_data), "type_counts": dict(type_counts)}
|
|
363
|
+
|
|
364
|
+
@mcp.tool()
|
|
365
|
+
def crawl_guba(stock_code: str = "002455", target: int = 50) -> list:
|
|
366
|
+
"""单独抓取东方财富股吧帖子"""
|
|
367
|
+
return fetch_guba_posts(stock_code, target)
|
|
368
|
+
|
|
369
|
+
@mcp.tool()
|
|
370
|
+
def crawl_recent_news(pages: int = 2) -> list:
|
|
371
|
+
"""单独抓取同花顺要闻"""
|
|
372
|
+
return fetch_recent_news(pages)
|
|
373
|
+
|
|
374
|
+
def main():
|
|
375
|
+
mcp.run()
|
|
376
|
+
|
|
377
|
+
if __name__ == "__main__":
|
|
378
|
+
main()
|