cnks 0.1.1__py3-none-any.whl → 0.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cnks/__init__.py +17 -6
- cnks/chrome_extractor.py +413 -0
- cnks/extractor.py +250 -0
- cnks/server.py +125 -101
- cnks-0.2.2.dist-info/METADATA +187 -0
- cnks-0.2.2.dist-info/RECORD +8 -0
- cnks-0.1.1.dist-info/METADATA +0 -841
- cnks-0.1.1.dist-info/RECORD +0 -6
- {cnks-0.1.1.dist-info → cnks-0.2.2.dist-info}/WHEEL +0 -0
- {cnks-0.1.1.dist-info → cnks-0.2.2.dist-info}/entry_points.txt +0 -0
cnks/extractor.py
ADDED
@@ -0,0 +1,250 @@
|
|
1
|
+
from typing import Dict, List, Optional, Union
|
2
|
+
import logging
|
3
|
+
import traceback
|
4
|
+
import asyncio
|
5
|
+
from contextlib import asynccontextmanager
|
6
|
+
from datetime import datetime
|
7
|
+
from pydantic import BaseModel
|
8
|
+
|
9
|
+
from playwright.async_api import async_playwright, Browser, BrowserContext, Page
|
10
|
+
|
11
|
+
# 配置日志
|
12
|
+
logging.basicConfig(
|
13
|
+
level=logging.DEBUG,
|
14
|
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
15
|
+
filename="cnki_extractor.log",
|
16
|
+
filemode="a"
|
17
|
+
)
|
18
|
+
logger = logging.getLogger("cnki_extractor")
|
19
|
+
|
20
|
+
# 定义数据模型
|
21
|
+
class CNKIContent(BaseModel):
|
22
|
+
"""CNKI论文内容模型"""
|
23
|
+
title: str = ""
|
24
|
+
authors: List[str] = []
|
25
|
+
abstract: str = ""
|
26
|
+
keywords: List[str] = []
|
27
|
+
cite_format: str = ""
|
28
|
+
url: str = "" # 添加URL字段以记录来源
|
29
|
+
|
30
|
+
async def get_browser():
|
31
|
+
"""获取浏览器实例"""
|
32
|
+
from . import server # 导入服务器模块以使用查找Chrome的函数
|
33
|
+
|
34
|
+
playwright = await async_playwright().start()
|
35
|
+
|
36
|
+
# 查找本地Chrome路径
|
37
|
+
chrome_path = server.find_chrome_executable()
|
38
|
+
|
39
|
+
if not chrome_path:
|
40
|
+
raise ValueError('未找到Chrome可执行文件,请设置CHROME_PATH环境变量指向Chrome位置')
|
41
|
+
|
42
|
+
logger.info(f"使用本地Chrome: {chrome_path}")
|
43
|
+
|
44
|
+
try:
|
45
|
+
# 尝试使用channel='chrome'模式
|
46
|
+
browser = await playwright.chromium.launch(
|
47
|
+
headless=False,
|
48
|
+
channel="chrome" # 优先使用Chrome通道
|
49
|
+
)
|
50
|
+
except Exception as e:
|
51
|
+
logger.info(f"使用channel='chrome'失败: {str(e)},尝试使用executable_path")
|
52
|
+
# 如果失败,尝试使用executable_path指定Chrome路径
|
53
|
+
browser = await playwright.chromium.launch(
|
54
|
+
headless=False,
|
55
|
+
executable_path=chrome_path
|
56
|
+
)
|
57
|
+
|
58
|
+
context = await browser.new_context(viewport={'width': 1920, 'height': 1080})
|
59
|
+
|
60
|
+
return playwright, browser, context
|
61
|
+
|
62
|
+
async def extract_content_from_url(url: str) -> CNKIContent:
|
63
|
+
"""从CNKI页面提取论文内容"""
|
64
|
+
if not url.startswith('https://kns.cnki.net/'):
|
65
|
+
raise ValueError('URL必须是CNKI知网的链接')
|
66
|
+
|
67
|
+
content = CNKIContent(url=url)
|
68
|
+
playwright = None
|
69
|
+
browser = None
|
70
|
+
context = None
|
71
|
+
page = None
|
72
|
+
|
73
|
+
try:
|
74
|
+
# 初始化浏览器
|
75
|
+
playwright, browser, context = await get_browser()
|
76
|
+
|
77
|
+
# 创建新页面
|
78
|
+
page = await context.new_page()
|
79
|
+
logger.info(f"正在访问页面: {url}")
|
80
|
+
|
81
|
+
try:
|
82
|
+
# 访问页面
|
83
|
+
await page.goto(url, wait_until='networkidle', timeout=60000)
|
84
|
+
logger.info("页面加载完成")
|
85
|
+
|
86
|
+
# 等待并检查登录状态
|
87
|
+
login_text = await page.evaluate('() => document.querySelector(".login-btn")?.textContent || ""')
|
88
|
+
if "登录" in login_text:
|
89
|
+
logger.info("需要登录,请手动登录...")
|
90
|
+
# 等待用户登录完成
|
91
|
+
await asyncio.sleep(10) # 给用户一些时间登录
|
92
|
+
|
93
|
+
# 提取标题
|
94
|
+
content.title = await page.evaluate('''
|
95
|
+
() => {
|
96
|
+
const selectors = ['h1.title', '.wx-tit h1', '.title', 'h1'];
|
97
|
+
for (const selector of selectors) {
|
98
|
+
const element = document.querySelector(selector);
|
99
|
+
if (element) {
|
100
|
+
const text = element.textContent.trim();
|
101
|
+
if (!text.includes('系统检测')) {
|
102
|
+
return text.split(/\\s+/)[0];
|
103
|
+
}
|
104
|
+
}
|
105
|
+
}
|
106
|
+
return "";
|
107
|
+
}
|
108
|
+
''')
|
109
|
+
logger.info(f"提取到标题: {content.title}")
|
110
|
+
|
111
|
+
# 提取引用格式和作者
|
112
|
+
try:
|
113
|
+
cite_button = await page.wait_for_selector(
|
114
|
+
'button:has-text("引用"), [class*="cite"], [class*="quote"]',
|
115
|
+
timeout=15000
|
116
|
+
)
|
117
|
+
if cite_button:
|
118
|
+
await cite_button.click()
|
119
|
+
logger.info("获取引用格式")
|
120
|
+
|
121
|
+
cite_result = await page.evaluate('''
|
122
|
+
() => {
|
123
|
+
const textarea = document.querySelector('.quote-r textarea.text');
|
124
|
+
if (textarea) {
|
125
|
+
const text = textarea.value.trim();
|
126
|
+
const cite_text = text.replace(/^\\[1\\]/, '').trim();
|
127
|
+
|
128
|
+
const match = cite_text.match(/^([^\\.]+)\\./);
|
129
|
+
const authors = match ? match[1].split(',').map(a => a.trim()) : [];
|
130
|
+
|
131
|
+
const titleMatch = cite_text.match(/\\.([^\\.]+?)\\[/);
|
132
|
+
const title = titleMatch ? titleMatch[1].trim() : '';
|
133
|
+
|
134
|
+
return {
|
135
|
+
cite_format: cite_text,
|
136
|
+
authors: authors,
|
137
|
+
title: title
|
138
|
+
};
|
139
|
+
}
|
140
|
+
return null;
|
141
|
+
}
|
142
|
+
''')
|
143
|
+
|
144
|
+
if cite_result:
|
145
|
+
content.cite_format = cite_result["cite_format"]
|
146
|
+
content.authors = cite_result["authors"]
|
147
|
+
if cite_result["title"]:
|
148
|
+
content.title = cite_result["title"]
|
149
|
+
logger.info(f"提取到作者: {content.authors}")
|
150
|
+
except Exception as e:
|
151
|
+
logger.error(f"提取引用格式时出错: {str(e)}")
|
152
|
+
|
153
|
+
# 提取摘要
|
154
|
+
content.abstract = await page.evaluate('''
|
155
|
+
() => {
|
156
|
+
const abstract = document.querySelector('.abstract-text, .abstract, .wx-tit + p');
|
157
|
+
return abstract ? abstract.textContent.trim() : "";
|
158
|
+
}
|
159
|
+
''')
|
160
|
+
logger.info(f"提取到摘要长度: {len(content.abstract)} 字符")
|
161
|
+
|
162
|
+
# 提取关键词
|
163
|
+
content.keywords = await page.evaluate('''
|
164
|
+
() => {
|
165
|
+
const keywordElements = Array.from(document.querySelectorAll('.keywords a, .keywords-text, .keyword'));
|
166
|
+
if (keywordElements.length > 0) {
|
167
|
+
return keywordElements.map(k => k.textContent.trim());
|
168
|
+
}
|
169
|
+
|
170
|
+
const paragraphs = Array.from(document.querySelectorAll('p'));
|
171
|
+
for (const p of paragraphs) {
|
172
|
+
if (p.textContent.includes('关键词')) {
|
173
|
+
const text = p.textContent.trim();
|
174
|
+
const keywordText = text.split(/关键词[::]/)[1];
|
175
|
+
if (keywordText) {
|
176
|
+
return keywordText.split(/[,,;;]/)
|
177
|
+
.map(k => k.trim())
|
178
|
+
.filter(k => k);
|
179
|
+
}
|
180
|
+
}
|
181
|
+
}
|
182
|
+
return [];
|
183
|
+
}
|
184
|
+
''')
|
185
|
+
logger.info(f"提取到关键词: {content.keywords}")
|
186
|
+
|
187
|
+
return content
|
188
|
+
|
189
|
+
except Exception as e:
|
190
|
+
logger.error(f"提取内容时出错: {str(e)}")
|
191
|
+
if page:
|
192
|
+
await page.screenshot(path=f'extraction_error_{datetime.now().strftime("%Y%m%d%H%M%S")}.png')
|
193
|
+
raise Exception(f"提取内容失败: {str(e)}")
|
194
|
+
|
195
|
+
except Exception as e:
|
196
|
+
logger.error(f"处理请求时出错: {str(e)}")
|
197
|
+
logger.error(f"错误堆栈: {traceback.format_exc()}")
|
198
|
+
raise
|
199
|
+
|
200
|
+
finally:
|
201
|
+
# 关闭资源
|
202
|
+
if page:
|
203
|
+
await page.close()
|
204
|
+
if context:
|
205
|
+
await context.close()
|
206
|
+
if browser:
|
207
|
+
await browser.close()
|
208
|
+
if playwright:
|
209
|
+
await playwright.stop()
|
210
|
+
|
211
|
+
async def batch_extract_contents(urls: List[str]) -> List[Dict]:
|
212
|
+
"""批量处理多个URL,提取内容并返回JSON格式"""
|
213
|
+
results = []
|
214
|
+
|
215
|
+
for i, url in enumerate(urls):
|
216
|
+
try:
|
217
|
+
logger.info(f"正在处理第 {i+1}/{len(urls)} 个URL: {url}")
|
218
|
+
content = await extract_content_from_url(url)
|
219
|
+
results.append(content.dict())
|
220
|
+
logger.info(f"成功提取第 {i+1} 个URL的内容")
|
221
|
+
# 添加短暂延迟,避免过快请求导致被封
|
222
|
+
await asyncio.sleep(2)
|
223
|
+
except Exception as e:
|
224
|
+
logger.error(f"处理URL {url} 时出错: {str(e)}")
|
225
|
+
# 添加错误信息而不是跳过,这样可以知道哪些URL处理失败
|
226
|
+
results.append({
|
227
|
+
"url": url,
|
228
|
+
"error": str(e),
|
229
|
+
"title": "",
|
230
|
+
"authors": [],
|
231
|
+
"abstract": "",
|
232
|
+
"keywords": [],
|
233
|
+
"cite_format": ""
|
234
|
+
})
|
235
|
+
|
236
|
+
return results
|
237
|
+
|
238
|
+
# 单元测试
|
239
|
+
async def test_extractor():
|
240
|
+
"""测试提取器功能"""
|
241
|
+
test_url = "https://kns.cnki.net/kcms2/article/abstract?v=3uoqIhG8C44YLTlOAiTRKibYlV5Vjs7ioT0BO4yQ4m_wBGfVyh51O4GSy-IA63-FppCj3oNSHEUNzY35qnIKlFKtN6Av&uniplatform=NZKPT"
|
242
|
+
try:
|
243
|
+
content = await extract_content_from_url(test_url)
|
244
|
+
print(f"提取成功:\n{content.json(indent=2, ensure_ascii=False)}")
|
245
|
+
except Exception as e:
|
246
|
+
print(f"测试失败: {str(e)}")
|
247
|
+
|
248
|
+
if __name__ == "__main__":
|
249
|
+
# 运行测试
|
250
|
+
asyncio.run(test_extractor())
|
cnks/server.py
CHANGED
@@ -8,6 +8,7 @@ import sys
|
|
8
8
|
import time
|
9
9
|
import logging
|
10
10
|
import webbrowser
|
11
|
+
import traceback
|
11
12
|
from pathlib import Path
|
12
13
|
from urllib.parse import quote
|
13
14
|
from typing import Dict, List, Any, Optional, Union
|
@@ -43,6 +44,16 @@ browser_instance = None
|
|
43
44
|
|
44
45
|
server = Server("cnks")
|
45
46
|
|
47
|
+
# 导入我们新创建的extractor模块
|
48
|
+
try:
|
49
|
+
from . import chrome_extractor as extractor
|
50
|
+
except ImportError:
|
51
|
+
try:
|
52
|
+
import chrome_extractor as extractor
|
53
|
+
except ImportError:
|
54
|
+
extractor = None
|
55
|
+
logger.warning("无法导入chrome_extractor模块,批量提取功能将不可用")
|
56
|
+
|
46
57
|
def find_chrome_executable():
|
47
58
|
"""查找Chrome可执行文件路径"""
|
48
59
|
system = platform.system()
|
@@ -586,31 +597,25 @@ async def handle_get_prompt(
|
|
586
597
|
@server.list_tools()
|
587
598
|
async def handle_list_tools() -> list[types.Tool]:
|
588
599
|
"""列出可用工具"""
|
589
|
-
|
590
|
-
|
591
|
-
|
592
|
-
|
593
|
-
|
594
|
-
|
595
|
-
"
|
596
|
-
|
597
|
-
|
598
|
-
|
599
|
-
|
600
|
-
|
601
|
-
|
602
|
-
|
603
|
-
description="添加笔记",
|
604
|
-
inputSchema={
|
605
|
-
"type": "object",
|
606
|
-
"properties": {
|
607
|
-
"name": {"type": "string", "description": "笔记名称"},
|
608
|
-
"content": {"type": "string", "description": "笔记内容"},
|
600
|
+
tools = []
|
601
|
+
|
602
|
+
# 只添加搜索并提取的组合工具
|
603
|
+
if extractor is not None and PLAYWRIGHT_AVAILABLE:
|
604
|
+
tools.append(
|
605
|
+
types.Tool(
|
606
|
+
name="mcp_cnks_search_and_extract",
|
607
|
+
description="搜索知网关键词并提取所有论文的详细内容",
|
608
|
+
inputSchema={
|
609
|
+
"type": "object",
|
610
|
+
"properties": {
|
611
|
+
"keywords": {"type": "string", "description": "搜索关键词"},
|
612
|
+
},
|
613
|
+
"required": ["keywords"],
|
609
614
|
},
|
610
|
-
|
611
|
-
|
612
|
-
|
613
|
-
|
615
|
+
)
|
616
|
+
)
|
617
|
+
|
618
|
+
return tools
|
614
619
|
|
615
620
|
@server.call_tool()
|
616
621
|
async def handle_call_tool(
|
@@ -619,7 +624,7 @@ async def handle_call_tool(
|
|
619
624
|
"""处理工具执行请求"""
|
620
625
|
global current_url, page_content
|
621
626
|
|
622
|
-
if name == "
|
627
|
+
if name == "mcp_cnks_search_and_extract" and extractor is not None and PLAYWRIGHT_AVAILABLE:
|
623
628
|
if not arguments:
|
624
629
|
raise ValueError("缺少参数")
|
625
630
|
|
@@ -627,76 +632,75 @@ async def handle_call_tool(
|
|
627
632
|
if not keywords:
|
628
633
|
raise ValueError("缺少关键词")
|
629
634
|
|
630
|
-
|
631
|
-
|
635
|
+
try:
|
636
|
+
# 第一步:执行搜索
|
637
|
+
logger.info(f"开始执行搜索并提取:关键词 '{keywords}'")
|
632
638
|
links_count = await search_with_playwright(keywords)
|
633
639
|
current_url = "https://kns.cnki.net/kns8s/search"
|
634
640
|
|
635
|
-
#
|
636
|
-
if isinstance(page_content, dict):
|
641
|
+
# 检查搜索结果
|
642
|
+
if not isinstance(page_content, dict) or "links" not in page_content or not page_content["links"]:
|
637
643
|
return [
|
638
644
|
types.TextContent(
|
639
645
|
type="text",
|
640
|
-
text=json.dumps(
|
646
|
+
text=json.dumps({
|
647
|
+
"error": "搜索未返回有效链接",
|
648
|
+
"count": 0,
|
649
|
+
"results": []
|
650
|
+
}, ensure_ascii=False)
|
641
651
|
)
|
642
652
|
]
|
643
|
-
|
644
|
-
|
645
|
-
|
646
|
-
|
647
|
-
"links": [],
|
648
|
-
"error": "搜索执行失败或结果格式异常"
|
649
|
-
}
|
653
|
+
|
654
|
+
# 提取链接
|
655
|
+
urls = [link["url"] for link in page_content["links"] if "url" in link]
|
656
|
+
if not urls:
|
650
657
|
return [
|
651
658
|
types.TextContent(
|
652
659
|
type="text",
|
653
|
-
text=json.dumps(
|
660
|
+
text=json.dumps({
|
661
|
+
"error": "未找到有效链接",
|
662
|
+
"count": 0,
|
663
|
+
"results": []
|
664
|
+
}, ensure_ascii=False)
|
654
665
|
)
|
655
666
|
]
|
656
|
-
else:
|
657
|
-
# 如果没有playwright,回退到传统方式
|
658
|
-
result = search_with_direct_chrome(keywords)
|
659
|
-
current_url = "https://kns.cnki.net/kns8s/search"
|
660
667
|
|
661
|
-
#
|
662
|
-
|
663
|
-
|
664
|
-
|
665
|
-
|
666
|
-
|
667
|
-
|
668
|
-
|
668
|
+
# 第二步:执行提取
|
669
|
+
logger.info(f"搜索成功,找到 {len(urls)} 个链接,开始提取内容")
|
670
|
+
results = await extractor.batch_extract_contents(urls)
|
671
|
+
|
672
|
+
# 包装结果
|
673
|
+
result_json = {
|
674
|
+
"keywords": keywords,
|
675
|
+
"count": len(results),
|
676
|
+
"results": results,
|
677
|
+
"success_count": sum(1 for r in results if "error" not in r or not r["error"]),
|
678
|
+
"error_count": sum(1 for r in results if "error" in r and r["error"])
|
679
|
+
}
|
680
|
+
|
669
681
|
return [
|
670
682
|
types.TextContent(
|
671
683
|
type="text",
|
672
|
-
text=json.dumps(
|
684
|
+
text=json.dumps(result_json, ensure_ascii=False)
|
685
|
+
)
|
686
|
+
]
|
687
|
+
except Exception as e:
|
688
|
+
logger.error(f"搜索并提取时出错: {str(e)}")
|
689
|
+
logger.error(traceback.format_exc())
|
690
|
+
return [
|
691
|
+
types.TextContent(
|
692
|
+
type="text",
|
693
|
+
text=json.dumps({
|
694
|
+
"error": f"搜索并提取内容时出错: {str(e)}",
|
695
|
+
"keywords": keywords,
|
696
|
+
"count": 0,
|
697
|
+
"results": []
|
698
|
+
}, ensure_ascii=False)
|
673
699
|
)
|
674
700
|
]
|
675
701
|
|
676
|
-
|
677
|
-
|
678
|
-
raise ValueError("缺少参数")
|
679
|
-
|
680
|
-
note_name = arguments.get("name")
|
681
|
-
content = arguments.get("content")
|
682
|
-
|
683
|
-
if not note_name or not content:
|
684
|
-
raise ValueError("缺少名称或内容")
|
685
|
-
|
686
|
-
# 更新服务器状态
|
687
|
-
notes[note_name] = content
|
688
|
-
|
689
|
-
# 通知客户端资源已更改
|
690
|
-
await server.request_context.session.send_resource_list_changed()
|
691
|
-
|
692
|
-
return [
|
693
|
-
types.TextContent(
|
694
|
-
type="text",
|
695
|
-
text=f"已添加笔记 '{note_name}': {content}"
|
696
|
-
)
|
697
|
-
]
|
698
|
-
|
699
|
-
raise ValueError(f"未知工具: {name}")
|
702
|
+
else:
|
703
|
+
raise ValueError(f"未知工具: {name}")
|
700
704
|
|
701
705
|
async def find_and_count_abstract_links(page):
|
702
706
|
"""查找并统计包含article/abstract?v=的链接"""
|
@@ -755,7 +759,7 @@ async def main():
|
|
755
759
|
write_stream,
|
756
760
|
InitializationOptions(
|
757
761
|
server_name="cnks",
|
758
|
-
server_version="0.
|
762
|
+
server_version="0.2.2",
|
759
763
|
capabilities=server.get_capabilities(
|
760
764
|
notification_options=NotificationOptions(),
|
761
765
|
experimental_capabilities={},
|
@@ -770,34 +774,54 @@ def create_fastmcp_server():
|
|
770
774
|
from mcp.server.fastmcp import FastMCP
|
771
775
|
fast_mcp = FastMCP("知网搜索")
|
772
776
|
|
773
|
-
|
774
|
-
|
775
|
-
|
776
|
-
|
777
|
-
|
778
|
-
|
779
|
-
|
780
|
-
|
781
|
-
|
782
|
-
|
783
|
-
#
|
777
|
+
# 只添加搜索并提取的工具
|
778
|
+
if extractor is not None and PLAYWRIGHT_AVAILABLE:
|
779
|
+
@fast_mcp.tool()
|
780
|
+
async def mcp_cnks_search_and_extract(keywords: str) -> dict:
|
781
|
+
"""搜索关键词并提取所有论文的详细内容"""
|
782
|
+
logger.debug("[DEBUG] 正在使用FastMCP的mcp_cnks_search_and_extract函数")
|
783
|
+
try:
|
784
|
+
# 第一步:执行搜索
|
785
|
+
result_count = await search_with_playwright(keywords)
|
786
|
+
|
787
|
+
# 检查搜索结果
|
788
|
+
if not isinstance(page_content, dict) or "links" not in page_content or not page_content["links"]:
|
789
|
+
return {
|
790
|
+
"error": "搜索未返回有效链接",
|
791
|
+
"keywords": keywords,
|
792
|
+
"count": 0,
|
793
|
+
"results": []
|
794
|
+
}
|
795
|
+
|
796
|
+
# 提取链接
|
797
|
+
urls = [link["url"] for link in page_content["links"] if "url" in link]
|
798
|
+
if not urls:
|
799
|
+
return {
|
800
|
+
"error": "未找到有效链接",
|
801
|
+
"keywords": keywords,
|
802
|
+
"count": 0,
|
803
|
+
"results": []
|
804
|
+
}
|
805
|
+
|
806
|
+
# 第二步:执行提取
|
807
|
+
results = await extractor.batch_extract_contents(urls)
|
808
|
+
|
809
|
+
# 包装结果
|
810
|
+
return {
|
811
|
+
"keywords": keywords,
|
812
|
+
"count": len(results),
|
813
|
+
"results": results,
|
814
|
+
"success_count": sum(1 for r in results if "error" not in r or not r["error"]),
|
815
|
+
"error_count": sum(1 for r in results if "error" in r and r["error"])
|
816
|
+
}
|
817
|
+
except Exception as e:
|
818
|
+
logger.error(f"搜索并提取时出错: {str(e)}")
|
784
819
|
return {
|
820
|
+
"error": f"搜索并提取内容时出错: {str(e)}",
|
821
|
+
"keywords": keywords,
|
785
822
|
"count": 0,
|
786
|
-
"
|
787
|
-
"error": "搜索执行失败或结果格式异常"
|
823
|
+
"results": []
|
788
824
|
}
|
789
|
-
else:
|
790
|
-
# 直接Chrome方式不会返回链接
|
791
|
-
return {
|
792
|
-
"count": 0,
|
793
|
-
"links": [],
|
794
|
-
"error": "需要安装playwright以获取链接: uv add playwright"
|
795
|
-
}
|
796
|
-
|
797
|
-
@fast_mcp.resource("webpage://current")
|
798
|
-
def get_current_webpage() -> str:
|
799
|
-
"""获取当前网页内容"""
|
800
|
-
return get_page_content()
|
801
825
|
|
802
826
|
return fast_mcp
|
803
827
|
except ImportError:
|