entari-plugin-hyw 4.0.0rc11__py3-none-any.whl → 4.0.0rc12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- entari_plugin_hyw/__init__.py +175 -133
- entari_plugin_hyw/filters.py +83 -0
- entari_plugin_hyw/misc.py +42 -0
- {entari_plugin_hyw-4.0.0rc11.dist-info → entari_plugin_hyw-4.0.0rc12.dist-info}/METADATA +1 -1
- {entari_plugin_hyw-4.0.0rc11.dist-info → entari_plugin_hyw-4.0.0rc12.dist-info}/RECORD +13 -11
- hyw_core/agent.py +648 -0
- hyw_core/browser_control/service.py +75 -11
- hyw_core/core.py +148 -1
- hyw_core/definitions.py +70 -52
- hyw_core/search.py +10 -0
- hyw_core/stages/summary.py +1 -3
- {entari_plugin_hyw-4.0.0rc11.dist-info → entari_plugin_hyw-4.0.0rc12.dist-info}/WHEEL +0 -0
- {entari_plugin_hyw-4.0.0rc11.dist-info → entari_plugin_hyw-4.0.0rc12.dist-info}/top_level.txt +0 -0
|
@@ -542,19 +542,54 @@ class ScreenshotService:
|
|
|
542
542
|
return await asyncio.gather(*tasks, return_exceptions=True)
|
|
543
543
|
|
|
544
544
|
async def screenshot_url(self, url: str, wait_load: bool = True, timeout: float = 15.0, full_page: bool = False, quality: int = 80) -> Optional[str]:
|
|
545
|
-
"""Screenshot URL (Async wrapper for sync)."""
|
|
545
|
+
"""Screenshot URL (Async wrapper for sync). Returns base64 string only."""
|
|
546
546
|
loop = asyncio.get_running_loop()
|
|
547
|
-
|
|
547
|
+
result = await loop.run_in_executor(
|
|
548
548
|
self._executor,
|
|
549
549
|
self._screenshot_sync,
|
|
550
|
-
url, wait_load, timeout, full_page, quality
|
|
550
|
+
url, wait_load, timeout, full_page, quality, False # extract_content=False
|
|
551
551
|
)
|
|
552
|
+
# Backward compatible: return just the screenshot for old callers
|
|
553
|
+
if isinstance(result, dict):
|
|
554
|
+
return result.get("screenshot_b64")
|
|
555
|
+
return result
|
|
552
556
|
|
|
553
|
-
def
|
|
554
|
-
"""
|
|
555
|
-
|
|
557
|
+
async def screenshot_with_content(self, url: str, timeout: float = 15.0, max_content_length: int = 8000) -> Dict[str, Any]:
|
|
558
|
+
"""
|
|
559
|
+
Screenshot URL and extract page content.
|
|
560
|
+
|
|
561
|
+
Returns:
|
|
562
|
+
Dict with:
|
|
563
|
+
- screenshot_b64: base64 encoded screenshot
|
|
564
|
+
- content: trafilatura extracted text (truncated to max_content_length)
|
|
565
|
+
- title: page title
|
|
566
|
+
- url: final URL
|
|
567
|
+
"""
|
|
568
|
+
loop = asyncio.get_running_loop()
|
|
569
|
+
result = await loop.run_in_executor(
|
|
570
|
+
self._executor,
|
|
571
|
+
self._screenshot_sync,
|
|
572
|
+
url, True, timeout, False, 65, True # quality=65 for balance, extract_content=True
|
|
573
|
+
)
|
|
574
|
+
|
|
575
|
+
if not isinstance(result, dict):
|
|
576
|
+
return {"screenshot_b64": result, "content": "", "title": "", "url": url}
|
|
577
|
+
|
|
578
|
+
# Truncate content if needed
|
|
579
|
+
content = result.get("content", "") or ""
|
|
580
|
+
if len(content) > max_content_length:
|
|
581
|
+
content = content[:max_content_length] + "\n\n[内容已截断...]"
|
|
582
|
+
result["content"] = content
|
|
583
|
+
|
|
584
|
+
return result
|
|
585
|
+
|
|
586
|
+
|
|
587
|
+
def _screenshot_sync(self, url: str, wait_load: bool, timeout: float, full_page: bool, quality: int, extract_content: bool = False) -> Any:
|
|
588
|
+
"""Synchronous screenshot. If extract_content=True, returns Dict else str."""
|
|
589
|
+
if not url:
|
|
590
|
+
return {"screenshot_b64": None, "content": "", "title": "", "url": url} if extract_content else None
|
|
556
591
|
tab = None
|
|
557
|
-
capture_width =
|
|
592
|
+
capture_width = 1440 # Higher resolution for readability
|
|
558
593
|
|
|
559
594
|
try:
|
|
560
595
|
self._ensure_ready()
|
|
@@ -838,13 +873,42 @@ class ScreenshotService:
|
|
|
838
873
|
# Final scroll to top
|
|
839
874
|
tab.run_js("window.scrollTo(0, 0);")
|
|
840
875
|
|
|
841
|
-
#
|
|
842
|
-
|
|
843
|
-
|
|
876
|
+
# Capture screenshot
|
|
877
|
+
screenshot_b64 = tab.get_screenshot(as_base64='jpg', full_page=False)
|
|
878
|
+
|
|
879
|
+
# Extract content if requested
|
|
880
|
+
if extract_content:
|
|
881
|
+
try:
|
|
882
|
+
html = tab.html
|
|
883
|
+
title = tab.title
|
|
884
|
+
final_url = tab.url
|
|
885
|
+
|
|
886
|
+
# Minimal trafilatura settings to reduce token consumption
|
|
887
|
+
content = trafilatura.extract(
|
|
888
|
+
html,
|
|
889
|
+
include_links=False, # No links to reduce tokens
|
|
890
|
+
include_images=False, # No image descriptions
|
|
891
|
+
include_comments=False, # No comments
|
|
892
|
+
include_tables=False, # No tables (can be verbose)
|
|
893
|
+
favor_precision=True, # Favor precision over recall
|
|
894
|
+
output_format="txt" # Plain text (no markdown formatting)
|
|
895
|
+
) or ""
|
|
896
|
+
|
|
897
|
+
return {
|
|
898
|
+
"screenshot_b64": screenshot_b64,
|
|
899
|
+
"content": content,
|
|
900
|
+
"title": title,
|
|
901
|
+
"url": final_url
|
|
902
|
+
}
|
|
903
|
+
except Exception as e:
|
|
904
|
+
logger.warning(f"ScreenshotService: Content extraction failed: {e}")
|
|
905
|
+
return {"screenshot_b64": screenshot_b64, "content": "", "title": "", "url": url}
|
|
906
|
+
|
|
907
|
+
return screenshot_b64
|
|
844
908
|
|
|
845
909
|
except Exception as e:
|
|
846
910
|
logger.error(f"ScreenshotService: Screenshot URL failed: {e}")
|
|
847
|
-
return None
|
|
911
|
+
return {"screenshot_b64": None, "content": "", "title": "", "url": url} if extract_content else None
|
|
848
912
|
finally:
|
|
849
913
|
if tab:
|
|
850
914
|
try: tab.close()
|
hyw_core/core.py
CHANGED
|
@@ -13,6 +13,7 @@ from loguru import logger
|
|
|
13
13
|
|
|
14
14
|
from .config import HywCoreConfig, ModelConfig
|
|
15
15
|
from .pipeline import ModularPipeline
|
|
16
|
+
from .agent import AgentPipeline
|
|
16
17
|
from .search import SearchService
|
|
17
18
|
from .stages.base import StageContext
|
|
18
19
|
|
|
@@ -97,13 +98,16 @@ class HywCore:
|
|
|
97
98
|
# Create search service
|
|
98
99
|
self._search_service = SearchService(config)
|
|
99
100
|
|
|
100
|
-
# Create pipeline
|
|
101
|
+
# Create pipeline (for non-agent mode)
|
|
101
102
|
self._pipeline = ModularPipeline(
|
|
102
103
|
config=config,
|
|
103
104
|
search_service=self._search_service,
|
|
104
105
|
send_func=send_func
|
|
105
106
|
)
|
|
106
107
|
|
|
108
|
+
# Agent pipeline (lazy init)
|
|
109
|
+
self._agent_pipeline = None
|
|
110
|
+
|
|
107
111
|
# Create renderer (lazy init)
|
|
108
112
|
self._renderer = None
|
|
109
113
|
|
|
@@ -218,6 +222,140 @@ class HywCore:
|
|
|
218
222
|
|
|
219
223
|
except Exception as e:
|
|
220
224
|
logger.error(f"HywCore query failed: {e}")
|
|
225
|
+
logger.exception("Query error details:")
|
|
226
|
+
return QueryResponse(
|
|
227
|
+
success=False,
|
|
228
|
+
content="",
|
|
229
|
+
error=str(e),
|
|
230
|
+
total_time=time.time() - start_time
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
async def query_agent(
|
|
234
|
+
self,
|
|
235
|
+
request: QueryRequest,
|
|
236
|
+
output_path: Optional[str] = None
|
|
237
|
+
) -> QueryResponse:
|
|
238
|
+
"""
|
|
239
|
+
Agent-mode query with tool-calling capability.
|
|
240
|
+
|
|
241
|
+
Uses AgentPipeline which can autonomously call web_tool up to 2 times.
|
|
242
|
+
Each tool call triggers an IM notification via send_notification callback.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
request: QueryRequest with user input, images, history
|
|
246
|
+
output_path: Optional path to save rendered image
|
|
247
|
+
|
|
248
|
+
Returns:
|
|
249
|
+
QueryResponse with content, rendered image path, and metadata
|
|
250
|
+
"""
|
|
251
|
+
start_time = time.time()
|
|
252
|
+
|
|
253
|
+
try:
|
|
254
|
+
# Get or create agent pipeline with current send_func
|
|
255
|
+
send_func = request.send_notification or self._send_func
|
|
256
|
+
|
|
257
|
+
if self._agent_pipeline is None or self._agent_pipeline.send_func != send_func:
|
|
258
|
+
self._agent_pipeline = AgentPipeline(
|
|
259
|
+
config=self.config,
|
|
260
|
+
search_service=self._search_service,
|
|
261
|
+
send_func=send_func
|
|
262
|
+
)
|
|
263
|
+
|
|
264
|
+
# Execute agent pipeline
|
|
265
|
+
result = await self._agent_pipeline.execute(
|
|
266
|
+
user_input=request.user_input,
|
|
267
|
+
conversation_history=request.conversation_history,
|
|
268
|
+
images=request.images if request.images else None,
|
|
269
|
+
model_name=request.model_name
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
total_time = time.time() - start_time
|
|
273
|
+
|
|
274
|
+
# Check for refusal
|
|
275
|
+
if result.get("refuse_answer"):
|
|
276
|
+
return QueryResponse(
|
|
277
|
+
success=True,
|
|
278
|
+
content="",
|
|
279
|
+
should_refuse=True,
|
|
280
|
+
refuse_reason=result.get("refuse_reason", ""),
|
|
281
|
+
total_time=total_time
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
# Check for error
|
|
285
|
+
if not result.get("success", True):
|
|
286
|
+
return QueryResponse(
|
|
287
|
+
success=False,
|
|
288
|
+
content="",
|
|
289
|
+
error=result.get("error", "Unknown error"),
|
|
290
|
+
total_time=total_time
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
# Extract response data
|
|
294
|
+
content = result.get("llm_response", "")
|
|
295
|
+
usage = result.get("usage", {})
|
|
296
|
+
|
|
297
|
+
# Convert web_results to references format for frontend
|
|
298
|
+
# Only include references that are actually cited in the markdown
|
|
299
|
+
import re
|
|
300
|
+
web_results = result.get("web_results", [])
|
|
301
|
+
|
|
302
|
+
# Build visible results list (excluding hidden items)
|
|
303
|
+
visible_results = [r for r in web_results if not r.get("_hidden")]
|
|
304
|
+
|
|
305
|
+
# Parse markdown to find which citations are used (pattern: [number])
|
|
306
|
+
citation_pattern = re.compile(r'\[(\d+)\]')
|
|
307
|
+
cited_ids = set()
|
|
308
|
+
for match in citation_pattern.finditer(content):
|
|
309
|
+
cited_ids.add(int(match.group(1)))
|
|
310
|
+
|
|
311
|
+
# Only include cited references, in order of first appearance
|
|
312
|
+
references = []
|
|
313
|
+
for idx in sorted(cited_ids):
|
|
314
|
+
# idx is 1-based in markdown
|
|
315
|
+
if 1 <= idx <= len(visible_results):
|
|
316
|
+
r = visible_results[idx - 1]
|
|
317
|
+
references.append({
|
|
318
|
+
"title": r.get("title", ""),
|
|
319
|
+
"url": r.get("url", ""),
|
|
320
|
+
"snippet": r.get("content", "")[:300] if r.get("content") else "",
|
|
321
|
+
"images": r.get("images", []),
|
|
322
|
+
"is_fetched": r.get("_type") == "page",
|
|
323
|
+
"raw_screenshot_b64": r.get("screenshot_b64"),
|
|
324
|
+
})
|
|
325
|
+
|
|
326
|
+
# Build response
|
|
327
|
+
response = QueryResponse(
|
|
328
|
+
success=True,
|
|
329
|
+
content=content,
|
|
330
|
+
usage=usage,
|
|
331
|
+
total_time=total_time,
|
|
332
|
+
references=references,
|
|
333
|
+
web_results=web_results,
|
|
334
|
+
stages_used=result.get("stages_used", [])
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
# Render image if output path provided
|
|
338
|
+
if output_path and content:
|
|
339
|
+
await self._ensure_renderer()
|
|
340
|
+
|
|
341
|
+
render_success = await self._renderer.render(
|
|
342
|
+
markdown_content=content,
|
|
343
|
+
output_path=output_path,
|
|
344
|
+
stats=result.get("stats", {}),
|
|
345
|
+
references=references,
|
|
346
|
+
page_references=[],
|
|
347
|
+
stages_used=result.get("stages_used", []),
|
|
348
|
+
theme_color=self.config.theme_color
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
if render_success:
|
|
352
|
+
response.image_path = output_path
|
|
353
|
+
|
|
354
|
+
return response
|
|
355
|
+
|
|
356
|
+
except Exception as e:
|
|
357
|
+
logger.error(f"HywCore query_agent failed: {e}")
|
|
358
|
+
logger.exception("Agent query error details:")
|
|
221
359
|
return QueryResponse(
|
|
222
360
|
success=False,
|
|
223
361
|
content="",
|
|
@@ -254,6 +392,15 @@ class HywCore:
|
|
|
254
392
|
"""
|
|
255
393
|
# Default to full_page=True as requested for /w command
|
|
256
394
|
return await self._search_service.screenshot_url(url, full_page=True)
|
|
395
|
+
|
|
396
|
+
async def screenshot_with_content(self, url: str, max_content_length: int = 8000) -> Dict[str, Any]:
|
|
397
|
+
"""
|
|
398
|
+
Capture screenshot and extract page content.
|
|
399
|
+
|
|
400
|
+
Returns:
|
|
401
|
+
Dict with screenshot_b64, content (truncated), title, url
|
|
402
|
+
"""
|
|
403
|
+
return await self._search_service.screenshot_with_content(url, max_content_length=max_content_length)
|
|
257
404
|
|
|
258
405
|
async def screenshot_batch(self, urls: List[str]) -> List[Optional[str]]:
|
|
259
406
|
"""
|
hyw_core/definitions.py
CHANGED
|
@@ -6,66 +6,28 @@ All global prompts and tool definitions for the pipeline stages.
|
|
|
6
6
|
|
|
7
7
|
from typing import Dict, Any
|
|
8
8
|
|
|
9
|
-
#
|
|
10
|
-
#
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
以下情况调用 `refuse_answer`:
|
|
23
|
-
- 内容涉及隐喻政治事件、任务、现役国家领导人
|
|
24
|
-
- r18+、r18g(但不包含正常galgame、科普等)
|
|
25
|
-
|
|
26
|
-
## 过程要求
|
|
27
|
-
- 用户要求的回复语言(包裹在 language 标签内)
|
|
28
|
-
```language
|
|
29
|
-
{language}
|
|
30
|
-
```
|
|
31
|
-
- 字数控制在600字以内, 百科式风格, 语言严谨不啰嗦.
|
|
32
|
-
- 视觉信息: 输入中如果包含自动获取的网页截图,请分析图片中的信息作为参考.
|
|
33
|
-
- 注意分辨搜索内容是否和用户问题有直接关系, 避免盲目相信混为一谈.
|
|
34
|
-
- 正文格式:
|
|
35
|
-
- 先给出一个 `# `大标题约 8-10 个字, 不要有多余废话, 不要直接回答用户的提问.
|
|
36
|
-
- 然后紧接着给出一个 <summary>...</summary>, 除了给出一个约 100 字的纯文本简介, 介绍本次输出的长文的清晰、重点概括.
|
|
37
|
-
- 随后开始详细二级标题 + markdown 正文, 语言描绘格式丰富多样, 简洁准确可信.
|
|
38
|
-
- 请不要给出过长的代码、表格列数等, 只讲重点和准确的数据.
|
|
39
|
-
- 不支持渲染: 链接, 图片链接, mermaid
|
|
40
|
-
- 支持渲染: 公式, 代码高亮, 只在需要的时候给出.
|
|
41
|
-
- 图片链接、链接框架会自动渲染出, 你无需显式给出.
|
|
42
|
-
- 引用:
|
|
43
|
-
> 重要: 所有正文内容必须基于实际信息, 保证百分百真实度
|
|
44
|
-
- 信息来源已按获取顺序编号为 [1], [2], [3]...
|
|
45
|
-
- 优先引用优质 fetch 抓取的页面的资源, 但如果抓取到需要登录、需要验证码、需要跳转到其他网站等无法获取的资源, 则不引用此资源
|
|
46
|
-
- 正文中直接使用 [1] 格式引用, 只引用对回答有帮助的来源, 只使用官方性较强的 wiki、官方网站、资源站等等, 不使用第三方转载新闻网站.
|
|
47
|
-
- 无需给出参考文献列表, 系统会自动生成
|
|
9
|
+
# Used by SummaryStage - language appended at runtime
|
|
10
|
+
SUMMARY_REPORT_SP = """# 你是一个总结助手 (Agent), 你的职责是基于搜索工具给出的信息,回答用户的问题或解释用户问题中的关键词。
|
|
11
|
+
## 核心原则
|
|
12
|
+
最小限度使用自身知识, 尽可能使用 web_tool 获取信息.
|
|
13
|
+
|
|
14
|
+
## 工具使用指南
|
|
15
|
+
- 适当时候调用 `refuse_answer`
|
|
16
|
+
|
|
17
|
+
## 回答格式
|
|
18
|
+
- `# ` 大标题约 8-10 个字
|
|
19
|
+
- <summary>...</summary> 约 100 字的概括
|
|
20
|
+
- 二级标题 + markdown 正文
|
|
21
|
+
- 正文使用 [1] 格式引用信息来源, 无需写出源, 系统自动渲染
|
|
48
22
|
"""
|
|
49
23
|
|
|
50
|
-
|
|
51
|
-
# =============================================================================
|
|
52
|
-
# VISION DESCRIPTION PROMPT
|
|
53
|
-
# =============================================================================
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
# =============================================================================
|
|
59
|
-
# TOOL DEFINITIONS
|
|
60
|
-
# =============================================================================
|
|
61
|
-
|
|
62
24
|
def get_refuse_answer_tool() -> Dict[str, Any]:
|
|
63
25
|
"""Tool for refusing to answer inappropriate content."""
|
|
64
26
|
return {
|
|
65
27
|
"type": "function",
|
|
66
28
|
"function": {
|
|
67
29
|
"name": "refuse_answer",
|
|
68
|
-
"description": "
|
|
30
|
+
"description": "违规内容拒绝回答,内容涉及隐喻政治事件、任务、现役国家领导人、r18+、r18g(但不包含正常galgame、科普等)",
|
|
69
31
|
"parameters": {
|
|
70
32
|
"type": "object",
|
|
71
33
|
"properties": {
|
|
@@ -77,7 +39,63 @@ def get_refuse_answer_tool() -> Dict[str, Any]:
|
|
|
77
39
|
}
|
|
78
40
|
|
|
79
41
|
|
|
42
|
+
def get_web_tool() -> Dict[str, Any]:
|
|
43
|
+
"""Tool for web search with filter syntax and URL screenshot."""
|
|
44
|
+
return {
|
|
45
|
+
"type": "function",
|
|
46
|
+
"function": {
|
|
47
|
+
"name": "web_tool",
|
|
48
|
+
"description": """搜索网页或截图指定URL。用于获取最新信息、查找资料。
|
|
49
|
+
网页搜索(大部分问题优先使用此方法):
|
|
50
|
+
直接传入搜索词如 "python async" 会返回搜索结果列表
|
|
51
|
+
|
|
52
|
+
网页截图(当用户明确要求截图时使用):
|
|
53
|
+
传入完整URL如 "https://example.com" 会直接截图该页面
|
|
54
|
+
|
|
55
|
+
网页搜索 + 网页截图(可以预测能直接搜到什么样的结果时使用): (最终截图最多3张)
|
|
56
|
+
- 域名过滤: "github=2: python async" → 会搜索 "python async github" 并截图 链接/标题包含 "github" 的前2个结果
|
|
57
|
+
- 序号选择: "1,2: minecraft mods" → 会搜索 "minecraft mods" 并截图第1、2个结果
|
|
58
|
+
- 多域名: "mcmod=1, github=1: forge mod" → 会搜索 "forge mod mcmod github" 并截图 链接/标题包含 "mcmod" 的前1个结果和 链接/标题包含 "github" 的前1个结果
|
|
59
|
+
""",
|
|
60
|
+
"parameters": {
|
|
61
|
+
"type": "object",
|
|
62
|
+
"properties": {
|
|
63
|
+
"query": {
|
|
64
|
+
"type": "string",
|
|
65
|
+
"description": "搜索查询或URL。支持过滤器语法(见描述)"
|
|
66
|
+
}
|
|
67
|
+
},
|
|
68
|
+
"required": ["query"]
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
80
73
|
|
|
74
|
+
# =============================================================================
|
|
75
|
+
# AGENT PROMPTS
|
|
76
|
+
# =============================================================================
|
|
81
77
|
|
|
78
|
+
AGENT_SYSTEM_PROMPT = """# 你是一个智能助手 (Agent), 你的职责是使用 `web_tool` 工具来帮助用户搜索网页或截图URL, 同时完成用户分配给你的任务.
|
|
79
|
+
## 任务
|
|
80
|
+
理解用户意图分配给你的任务.
|
|
81
|
+
如果用户没有明确分配任务, 则默认任务为解释用户问题中的关键词.
|
|
82
|
+
|
|
83
|
+
## 核心原则
|
|
84
|
+
最小限度使用自身知识, 尽可能使用 web_tool 获取信息.
|
|
85
|
+
|
|
86
|
+
## 工具使用指南
|
|
87
|
+
- 积极使用 web_tool 获取信息
|
|
88
|
+
- 搜索时, 关键词保证简单、指向准确、利于传统搜索引擎.
|
|
89
|
+
- 获取页面截图时, 只使用官方性较强的 wiki、官方网站、资源站等等, 不使用第三方转载新闻网站.
|
|
90
|
+
- 最多可调用2次工具, 之后必须给出最终回答
|
|
91
|
+
- 适当时候调用 `refuse_answer`
|
|
92
|
+
- 对于具体任务, 如果是转述、格式化、翻译等, 请直接给出最终回答, 不再调用工具
|
|
93
|
+
|
|
94
|
+
## 回答格式
|
|
95
|
+
- `# ` 大标题约 8-10 个字
|
|
96
|
+
- <summary>...</summary> 约 100 字的概括
|
|
97
|
+
- 二级标题 + markdown 正文
|
|
98
|
+
- 正文使用 [1] 格式引用信息来源, 无需写出源, 系统自动渲染
|
|
99
|
+
"""
|
|
82
100
|
|
|
83
101
|
|
hyw_core/search.py
CHANGED
|
@@ -159,3 +159,13 @@ class SearchService:
|
|
|
159
159
|
"""
|
|
160
160
|
service = get_screenshot_service(headless=self._headless)
|
|
161
161
|
return await service.screenshot_url(url, full_page=full_page)
|
|
162
|
+
|
|
163
|
+
async def screenshot_with_content(self, url: str, max_content_length: int = 8000) -> Dict[str, Any]:
|
|
164
|
+
"""
|
|
165
|
+
Capture screenshot and extract page content.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
Dict with screenshot_b64, content (truncated), title, url
|
|
169
|
+
"""
|
|
170
|
+
service = get_screenshot_service(headless=self._headless)
|
|
171
|
+
return await service.screenshot_with_content(url, max_content_length=max_content_length)
|
hyw_core/stages/summary.py
CHANGED
|
@@ -43,9 +43,7 @@ class SummaryStage(BaseStage):
|
|
|
43
43
|
# Select prompt
|
|
44
44
|
language = getattr(self.config, "language", "Simplified Chinese")
|
|
45
45
|
|
|
46
|
-
system_prompt = SUMMARY_REPORT_SP
|
|
47
|
-
language=language
|
|
48
|
-
)
|
|
46
|
+
system_prompt = SUMMARY_REPORT_SP + f"\n\n用户要求的语言: {language}"
|
|
49
47
|
|
|
50
48
|
# Build Context Message
|
|
51
49
|
context_message = f"## Web Search & Page Content\n\n```context\n{full_context}\n```"
|
|
File without changes
|
{entari_plugin_hyw-4.0.0rc11.dist-info → entari_plugin_hyw-4.0.0rc12.dist-info}/top_level.txt
RENAMED
|
File without changes
|