entari-plugin-hyw 4.0.0rc6__py3-none-any.whl → 4.0.0rc8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of entari-plugin-hyw might be problematic. Click here for more details.

Files changed (114) hide show
  1. entari_plugin_hyw/Untitled-1 +1865 -0
  2. entari_plugin_hyw/__init__.py +733 -379
  3. entari_plugin_hyw/history.py +60 -57
  4. entari_plugin_hyw/misc.py +3 -0
  5. entari_plugin_hyw/search_cache.py +154 -0
  6. {entari_plugin_hyw-4.0.0rc6.dist-info → entari_plugin_hyw-4.0.0rc8.dist-info}/METADATA +3 -1
  7. entari_plugin_hyw-4.0.0rc8.dist-info/RECORD +68 -0
  8. {entari_plugin_hyw-4.0.0rc6.dist-info → entari_plugin_hyw-4.0.0rc8.dist-info}/WHEEL +1 -1
  9. {entari_plugin_hyw-4.0.0rc6.dist-info → entari_plugin_hyw-4.0.0rc8.dist-info}/top_level.txt +1 -0
  10. hyw_core/__init__.py +94 -0
  11. hyw_core/browser_control/__init__.py +65 -0
  12. hyw_core/browser_control/assets/card-dist/index.html +409 -0
  13. hyw_core/browser_control/assets/index.html +5691 -0
  14. hyw_core/browser_control/engines/__init__.py +17 -0
  15. hyw_core/browser_control/engines/default.py +166 -0
  16. {entari_plugin_hyw/browser → hyw_core/browser_control}/engines/duckduckgo.py +42 -8
  17. {entari_plugin_hyw/browser → hyw_core/browser_control}/engines/google.py +1 -1
  18. {entari_plugin_hyw/browser → hyw_core/browser_control}/manager.py +15 -8
  19. entari_plugin_hyw/render_vue.py → hyw_core/browser_control/renderer.py +29 -14
  20. hyw_core/browser_control/service.py +720 -0
  21. hyw_core/config.py +154 -0
  22. hyw_core/core.py +322 -0
  23. hyw_core/definitions.py +83 -0
  24. entari_plugin_hyw/modular_pipeline.py → hyw_core/pipeline.py +204 -86
  25. {entari_plugin_hyw → hyw_core}/search.py +60 -19
  26. hyw_core/stages/__init__.py +21 -0
  27. entari_plugin_hyw/stage_base.py → hyw_core/stages/base.py +3 -0
  28. entari_plugin_hyw/stage_summary.py → hyw_core/stages/summary.py +36 -7
  29. entari_plugin_hyw/assets/card-dist/index.html +0 -387
  30. entari_plugin_hyw/browser/__init__.py +0 -10
  31. entari_plugin_hyw/browser/engines/bing.py +0 -95
  32. entari_plugin_hyw/browser/service.py +0 -304
  33. entari_plugin_hyw/card-ui/.gitignore +0 -24
  34. entari_plugin_hyw/card-ui/README.md +0 -5
  35. entari_plugin_hyw/card-ui/index.html +0 -16
  36. entari_plugin_hyw/card-ui/package-lock.json +0 -2342
  37. entari_plugin_hyw/card-ui/package.json +0 -31
  38. entari_plugin_hyw/card-ui/public/logos/anthropic.svg +0 -1
  39. entari_plugin_hyw/card-ui/public/logos/cerebras.svg +0 -9
  40. entari_plugin_hyw/card-ui/public/logos/deepseek.png +0 -0
  41. entari_plugin_hyw/card-ui/public/logos/gemini.svg +0 -1
  42. entari_plugin_hyw/card-ui/public/logos/google.svg +0 -1
  43. entari_plugin_hyw/card-ui/public/logos/grok.png +0 -0
  44. entari_plugin_hyw/card-ui/public/logos/huggingface.png +0 -0
  45. entari_plugin_hyw/card-ui/public/logos/microsoft.svg +0 -15
  46. entari_plugin_hyw/card-ui/public/logos/minimax.png +0 -0
  47. entari_plugin_hyw/card-ui/public/logos/mistral.png +0 -0
  48. entari_plugin_hyw/card-ui/public/logos/nvida.png +0 -0
  49. entari_plugin_hyw/card-ui/public/logos/openai.svg +0 -1
  50. entari_plugin_hyw/card-ui/public/logos/openrouter.png +0 -0
  51. entari_plugin_hyw/card-ui/public/logos/perplexity.svg +0 -24
  52. entari_plugin_hyw/card-ui/public/logos/qwen.png +0 -0
  53. entari_plugin_hyw/card-ui/public/logos/xai.png +0 -0
  54. entari_plugin_hyw/card-ui/public/logos/xiaomi.png +0 -0
  55. entari_plugin_hyw/card-ui/public/logos/zai.png +0 -0
  56. entari_plugin_hyw/card-ui/public/vite.svg +0 -1
  57. entari_plugin_hyw/card-ui/src/App.vue +0 -756
  58. entari_plugin_hyw/card-ui/src/assets/vue.svg +0 -1
  59. entari_plugin_hyw/card-ui/src/components/HelloWorld.vue +0 -41
  60. entari_plugin_hyw/card-ui/src/components/MarkdownContent.vue +0 -382
  61. entari_plugin_hyw/card-ui/src/components/SectionCard.vue +0 -41
  62. entari_plugin_hyw/card-ui/src/components/StageCard.vue +0 -240
  63. entari_plugin_hyw/card-ui/src/main.ts +0 -5
  64. entari_plugin_hyw/card-ui/src/style.css +0 -29
  65. entari_plugin_hyw/card-ui/src/test_regex.js +0 -103
  66. entari_plugin_hyw/card-ui/src/types.ts +0 -61
  67. entari_plugin_hyw/card-ui/tsconfig.app.json +0 -16
  68. entari_plugin_hyw/card-ui/tsconfig.json +0 -7
  69. entari_plugin_hyw/card-ui/tsconfig.node.json +0 -26
  70. entari_plugin_hyw/card-ui/vite.config.ts +0 -16
  71. entari_plugin_hyw/definitions.py +0 -155
  72. entari_plugin_hyw/stage_instruct.py +0 -345
  73. entari_plugin_hyw/stage_instruct_deepsearch.py +0 -104
  74. entari_plugin_hyw-4.0.0rc6.dist-info/RECORD +0 -100
  75. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/anthropic.svg +0 -0
  76. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/cerebras.svg +0 -0
  77. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/deepseek.png +0 -0
  78. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/gemini.svg +0 -0
  79. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/google.svg +0 -0
  80. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/grok.png +0 -0
  81. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/huggingface.png +0 -0
  82. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/microsoft.svg +0 -0
  83. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/minimax.png +0 -0
  84. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/mistral.png +0 -0
  85. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/nvida.png +0 -0
  86. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/openai.svg +0 -0
  87. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/openrouter.png +0 -0
  88. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/perplexity.svg +0 -0
  89. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/qwen.png +0 -0
  90. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/xai.png +0 -0
  91. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/xiaomi.png +0 -0
  92. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/logos/zai.png +0 -0
  93. {entari_plugin_hyw → hyw_core/browser_control}/assets/card-dist/vite.svg +0 -0
  94. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/anthropic.svg +0 -0
  95. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/cerebras.svg +0 -0
  96. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/deepseek.png +0 -0
  97. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/gemini.svg +0 -0
  98. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/google.svg +0 -0
  99. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/grok.png +0 -0
  100. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/huggingface.png +0 -0
  101. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/microsoft.svg +0 -0
  102. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/minimax.png +0 -0
  103. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/mistral.png +0 -0
  104. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/nvida.png +0 -0
  105. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/openai.svg +0 -0
  106. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/openrouter.png +0 -0
  107. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/perplexity.svg +0 -0
  108. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/qwen.png +0 -0
  109. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/xai.png +0 -0
  110. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/xiaomi.png +0 -0
  111. {entari_plugin_hyw/assets/icon → hyw_core/browser_control/assets/logos}/zai.png +0 -0
  112. {entari_plugin_hyw/browser → hyw_core/browser_control}/engines/base.py +0 -0
  113. {entari_plugin_hyw/browser → hyw_core/browser_control}/landing.html +0 -0
  114. {entari_plugin_hyw → hyw_core}/image_cache.py +0 -0
@@ -1,62 +1,137 @@
1
+ """
2
+ entari-plugin-hyw - Entari Plugin for HYW
3
+
4
+ Use large language models to interpret chat messages.
5
+ """
6
+
1
7
  from dataclasses import dataclass, field
2
8
  from importlib.metadata import version as get_version
3
- from typing import List, Dict, Any, Optional, Union
9
+ from typing import List, Dict, Any, Optional
4
10
  import time
5
11
  import asyncio
12
+ import os
13
+ import secrets
14
+ import base64
15
+ import re
16
+ import tempfile
6
17
 
7
- # 从 pyproject.toml 读取版本号,避免重复维护
8
- try:
9
- __version__ = get_version("entari_plugin_hyw")
10
- except Exception:
11
- __version__ = "0.0.0"
12
-
13
- from arclet.alconna import Alconna, Args, AllParam, CommandMeta, Option, Arparma, MultiVar, store_true
14
- from arclet.entari import metadata, listen, Session, plugin_config, BasicConfModel, plugin, command
15
- from arclet.letoderea import on
18
+ from arclet.alconna import Alconna, Args, AllParam, Arparma
19
+ from arclet.entari import metadata, listen, Session, plugin_config, BasicConfModel, command
16
20
  from arclet.entari import MessageChain, Text, Image, MessageCreatedEvent, Quote, At
17
21
  from satori.element import Custom
18
22
  from loguru import logger
19
- import arclet.letoderea as leto
20
23
  from arclet.entari.event.command import CommandReceive
21
-
22
- from .modular_pipeline import ModularPipeline
23
- from .history import HistoryManager
24
- from .render_vue import ContentRenderer, get_content_renderer
25
- from .misc import process_onebot_json, process_images, resolve_model_name, render_refuse_answer, render_image_unsupported, REFUSE_ANSWER_MARKDOWN
26
24
  from arclet.entari.event.lifespan import Cleanup
27
25
 
28
- import os
29
- import secrets
30
- import base64
26
+ # Import from internal hyw_core
27
+ from hyw_core import HywCore, HywCoreConfig, QueryRequest
28
+ from hyw_core.browser_control import (
29
+ ContentRenderer,
30
+ get_content_renderer,
31
+ set_global_renderer,
32
+ close_screenshot_service,
33
+ )
34
+ from hyw_core.browser_control.manager import close_shared_browser
31
35
 
32
- import re
36
+ # Local modules
37
+ from .history import HistoryManager
38
+ from .misc import (
39
+ process_onebot_json,
40
+ process_images,
41
+ resolve_model_name,
42
+ render_refuse_answer,
43
+ render_image_unsupported,
44
+ )
45
+ from .search_cache import SearchResultCache, parse_single_index, parse_multi_indices
33
46
 
34
47
 
35
- def parse_color(color: str) -> str:
48
+ def parse_filter_syntax(query: str, max_count: int = 3):
36
49
  """
37
- Parse color from hex or RGB tuple to hex format.
38
- Supports: #ff0000, ff0000, (255, 0, 0), 255,0,0
50
+ Parse enhanced filter syntax supporting:
51
+ - Chinese/English colons (: :) and commas (, )
52
+ - Multiple filters: "mcmod=2, github=1 : xxx"
53
+ - Index lists: "1, 2, 3 : xxx"
54
+ - Max total selections
55
+
56
+ Returns:
57
+ (filters, search_query, error_msg)
58
+ filters: list of (filter_type, filter_value, count) tuples
59
+ filter_type: 'index' or 'link'
60
+ count: how many to get (default 1)
61
+ search_query: the actual search query
62
+ error_msg: error message if exceeded max
39
63
  """
64
+ if not query:
65
+ return [], query, None
66
+
67
+ # Normalize Chinese punctuation to English
68
+ normalized = query.replace(':', ':').replace(',', ',').replace('、', ',')
69
+
70
+ # Handle escaped colons: \: or /: -> placeholder
71
+ normalized = re.sub(r'[/\\]:', '\x00COLON\x00', normalized)
72
+
73
+ # Split by colon - last part is the search query
74
+ parts = normalized.split(':')
75
+ if len(parts) < 2:
76
+ # No colon found, restore escaped colons and return as-is
77
+ return [], query.replace('\\:', ':').replace('/:', ':'), None
78
+
79
+ # Everything after the last colon is the search query
80
+ search_query = parts[-1].strip().replace('\x00COLON\x00', ':')
81
+
82
+ # Everything before is the filter specification
83
+ filter_spec = ':'.join(parts[:-1]).strip().replace('\x00COLON\x00', ':')
84
+
85
+ if not filter_spec or not search_query:
86
+ return [], query.replace('\\:', ':').replace('/:', ':'), None
87
+
88
+ # Parse filter specifications (comma-separated)
89
+ filter_items = [f.strip() for f in filter_spec.split(',') if f.strip()]
90
+
91
+ filters = []
92
+ for item in filter_items:
93
+ # Check for "filter=count" pattern (e.g., "mcmod=2")
94
+ eq_match = re.match(r'^(\w+)\s*=\s*(\d+)$', item)
95
+ if eq_match:
96
+ filter_name = eq_match.group(1).lower()
97
+ count = int(eq_match.group(2))
98
+ filters.append(('link', filter_name, count))
99
+ elif item.isdigit():
100
+ # Pure index
101
+ filters.append(('index', int(item), 1))
102
+ else:
103
+ # Filter name without count (default count=1)
104
+ filters.append(('link', item.lower(), 1))
105
+
106
+ # Calculate total count
107
+ total = sum(f[2] for f in filters)
108
+ if total > max_count:
109
+ return [], search_query, f"最多选择{max_count}个结果 (当前选择了{total}个)"
110
+
111
+ return filters, search_query, None
112
+
113
+
114
+ try:
115
+ __version__ = get_version("entari_plugin_hyw")
116
+ except Exception:
117
+ __version__ = "4.0.0-rc8"
118
+
119
+
120
+ def parse_color(color: str) -> str:
40
121
  if not color:
41
122
  return "#ef4444"
42
-
43
123
  color = str(color).strip()
44
-
45
- # Hex format: #fff or #ffffff or ffffff
46
124
  if color.startswith('#') and len(color) in [4, 7]:
47
125
  return color
48
126
  if re.match(r'^[0-9a-fA-F]{6}$', color):
49
127
  return f'#{color}'
50
-
51
- # RGB tuple: (r, g, b) or r,g,b
52
128
  rgb_match = re.match(r'^\(?(\d+)[,\s]+(\d+)[,\s]+(\d+)\)?$', color)
53
129
  if rgb_match:
54
130
  r, g, b = (max(0, min(255, int(x))) for x in rgb_match.groups())
55
131
  return f'#{r:02x}{g:02x}{b:02x}'
56
-
57
- logger.warning(f"Invalid color '{color}', using default #ef4444")
58
132
  return "#ef4444"
59
133
 
134
+
60
135
  class _RecentEventDeduper:
61
136
  def __init__(self, ttl_seconds: float = 30.0, max_size: int = 2048):
62
137
  self.ttl_seconds = ttl_seconds
@@ -77,152 +152,104 @@ class _RecentEventDeduper:
77
152
  expired = [k for k, ts in self._seen.items() if now - ts > self.ttl_seconds]
78
153
  for k in expired:
79
154
  self._seen.pop(k, None)
80
- if len(self._seen) > self.max_size:
81
- for k, _ in sorted(self._seen.items(), key=lambda kv: kv[1])[: len(self._seen) - self.max_size]:
82
- self._seen.pop(k, None)
83
155
 
84
156
  _event_deduper = _RecentEventDeduper()
85
157
 
86
- @dataclass
87
- class ModelConfig:
88
- """Model configuration for a specific stage."""
89
- model_name: Optional[str] = None
90
- api_key: Optional[str] = None
91
- base_url: Optional[str] = None
92
- extra_body: Optional[Dict[str, Any]] = None
93
- model_provider: Optional[str] = None
94
- input_price: Optional[float] = None
95
- output_price: Optional[float] = None
96
- image_input: bool = True
97
-
98
158
 
99
159
  @dataclass
100
160
  class HywConfig(BasicConfModel):
101
- # Core Settings
161
+ """Plugin configuration"""
102
162
  admins: List[str] = field(default_factory=list)
103
163
  models: List[Dict[str, Any]] = field(default_factory=list)
104
164
  question_command: str = "/q"
105
165
  language: str = "Simplified Chinese"
106
166
  temperature: float = 0.4
107
167
 
108
- # Root-level defaults (backward compatible)
109
168
  model_name: Optional[str] = None
110
169
  api_key: Optional[str] = None
111
170
  base_url: str = "https://openrouter.ai/api/v1"
112
- extra_body: Optional[Dict[str, Any]] = None
113
- model_provider: Optional[str] = None
114
- input_price: Optional[float] = None
115
- output_price: Optional[float] = None
116
171
 
117
- # Nested Stage Configs
118
- instruct: Optional[ModelConfig] = None
119
- qa: Optional[ModelConfig] = None
120
- main: Optional[ModelConfig] = None # Summary stage
121
-
122
- # Search/Fetch Settings
123
172
  search_engine: str = "google"
124
173
 
125
- # Rendering Settings
126
174
  headless: bool = False
127
- render_timeout_ms: int = 6000
128
- render_image_timeout_ms: int = 3000
129
-
130
- # Bot Behavior
131
175
  save_conversation: bool = False
132
176
  reaction: bool = False
133
177
  quote: bool = False
134
-
135
- # UI Theme
136
178
  theme_color: str = "#ff0000"
179
+
180
+ # Nested configurations
181
+ main: Optional[Dict[str, Any]] = None
182
+ instruct: Optional[Dict[str, Any]] = None
183
+ vision: Optional[Dict[str, Any]] = None
184
+ deepsearch_instruct: Optional[Dict[str, Any]] = None
185
+ deepsearch_agent: Optional[Dict[str, Any]] = None
137
186
 
138
187
  def __post_init__(self):
139
- """Parse and normalize theme color after initialization."""
140
188
  self.theme_color = parse_color(self.theme_color)
141
- # Convert dicts to ModelConfig if needed
142
- if isinstance(self.instruct, dict):
143
- self.instruct = ModelConfig(**self.instruct)
144
- if isinstance(self.qa, dict):
145
- self.qa = ModelConfig(**self.qa)
146
- if isinstance(self.main, dict):
147
- self.main = ModelConfig(**self.main)
148
189
 
149
- def get_model_config(self, stage: str) -> Dict[str, Any]:
150
- """
151
- Get resolved model config for a stage.
152
-
153
- Args:
154
- stage: "instruct", "qa", or "main" (summary)
155
-
156
- Returns:
157
- Dict with model_name, api_key, base_url, extra_body, etc.
158
- """
159
- # Determine primary and secondary config sources
160
- primary = None
161
- secondary = None
162
-
163
- if stage == "instruct":
164
- primary = self.instruct
165
- secondary = self.main # Fallback to main
166
- elif stage == "qa":
167
- # QA fallback to main as well if ever used
168
- primary = self.qa
169
- secondary = self.main
170
- elif stage == "main":
171
- primary = self.main
172
-
173
- # Build result with fallback logic
174
- def resolve(field_name: str, is_essential: bool = True):
175
- """Resolve a field with fallback: Primary -> Secondary -> Root."""
176
- # 1. Try Primary
177
- val = getattr(primary, field_name, None) if primary else None
178
-
179
- # 2. Try Secondary (if value missing)
180
- if val is None and secondary:
181
- val = getattr(secondary, field_name, None)
182
-
183
- # 3. Try Root (if value still missing)
184
- if val is None:
185
- val = getattr(self, field_name, None)
186
- return val
190
+ def to_hyw_core_config(self) -> HywCoreConfig:
191
+ main_cfg = self.main or {}
192
+ instruct_cfg = self.instruct or {}
187
193
 
194
+ return HywCoreConfig.from_dict({
195
+ "models": self.models,
196
+ "model_name": self.model_name or "",
197
+ "api_key": self.api_key or "",
198
+ "base_url": self.base_url,
199
+ "temperature": self.temperature,
200
+ "search_engine": self.search_engine,
201
+ "headless": self.headless,
202
+ "language": self.language,
203
+ "theme_color": self.theme_color,
204
+
205
+ # Map nested 'main' config to summary stage
206
+ "summary_model": main_cfg.get("model_name"),
207
+ "summary_api_key": main_cfg.get("api_key"),
208
+ "summary_base_url": main_cfg.get("base_url"),
209
+ "summary_extra_body": main_cfg.get("extra_body"),
210
+
211
+ # Map nested 'instruct' config to instruct stage
212
+ "instruct_model": instruct_cfg.get("model_name"),
213
+ "instruct_api_key": instruct_cfg.get("api_key"),
214
+ "instruct_base_url": instruct_cfg.get("base_url"),
215
+ "instruct_extra_body": instruct_cfg.get("extra_body"),
216
+ })
217
+
218
+ def get_model_config(self, stage: str) -> Dict[str, Any]:
188
219
  return {
189
- "model_name": resolve("model_name"),
190
- "api_key": resolve("api_key"),
191
- "base_url": resolve("base_url"),
192
- "extra_body": resolve("extra_body", is_essential=False),
193
- "model_provider": resolve("model_provider", is_essential=False),
194
- "input_price": resolve("input_price", is_essential=False),
195
- "output_price": resolve("output_price", is_essential=False),
220
+ "model_name": self.model_name,
221
+ "api_key": self.api_key,
222
+ "base_url": self.base_url,
196
223
  }
197
224
 
198
225
 
199
226
  conf = plugin_config(HywConfig)
200
227
  history_manager = HistoryManager()
201
228
  renderer = ContentRenderer(headless=conf.headless)
202
- from .render_vue import set_global_renderer
203
229
  set_global_renderer(renderer)
230
+ search_cache = SearchResultCache(ttl_seconds=600.0) # 10 minutes
204
231
 
205
- # Pre-start Crawl4AI browser for fast fetching/screenshots
206
- from .browser.service import prestart_browser, close_screenshot_service
207
- # prestart_browser(headless=conf.headless) # Removed to avoid RuntimeError: no running event loop
208
-
232
+ _hyw_core: Optional[HywCore] = None
209
233
 
210
- class GlobalCache:
211
- models_image_path: Optional[str] = None
212
-
213
- global_cache = GlobalCache()
234
+ def get_hyw_core() -> HywCore:
235
+ global _hyw_core
236
+ if _hyw_core is None:
237
+ _hyw_core = HywCore(conf.to_hyw_core_config())
238
+ return _hyw_core
214
239
 
215
240
 
216
241
  @listen(Cleanup)
217
242
  async def cleanup_screenshot_service():
218
- """Cleanup shared browser on shutdown."""
243
+ global _hyw_core
219
244
  try:
245
+ if _hyw_core:
246
+ await _hyw_core.close()
247
+ _hyw_core = None
220
248
  await close_screenshot_service()
221
- # Also close the shared browser manager
222
- from .browser.manager import close_shared_browser
223
- await close_shared_browser()
249
+ close_shared_browser()
224
250
  except Exception as e:
225
- logger.warning(f"Failed to cleanup browser services: {e}")
251
+ logger.warning(f"Failed to cleanup: {e}")
252
+
226
253
 
227
254
  async def react(session: Session, emoji: str):
228
255
  if not conf.reaction: return
@@ -231,52 +258,35 @@ async def react(session: Session, emoji: str):
231
258
  except Exception as e:
232
259
  logger.warning(f"Reaction failed: {e}")
233
260
 
261
+
234
262
  async def process_request(
235
263
  session: Session[MessageCreatedEvent],
236
264
  all_param: Optional[MessageChain] = None,
237
265
  selected_model: Optional[str] = None,
238
- selected_vision_model: Optional[str] = None,
239
- conversation_key_override: Optional[str] = None,
240
- local_mode: bool = False,
241
266
  ) -> None:
242
267
  mc = MessageChain(all_param)
243
268
  if session.reply:
244
269
  try:
245
- # Check if reply is from self (the bot)
246
- # 1. Check by Message ID (reliable for bot's own messages if recorded)
247
270
  reply_msg_id = str(session.reply.origin.id) if hasattr(session.reply.origin, 'id') else None
248
- is_bot = False
249
-
250
- if reply_msg_id and history_manager.is_bot_message(reply_msg_id):
251
- is_bot = True
252
-
253
- if is_bot:
254
- pass # Reply is from bot - ignoring
255
- else:
271
+ if not (reply_msg_id and history_manager.is_bot_message(reply_msg_id)):
256
272
  mc.extend(MessageChain(" ") + session.reply.origin.message)
257
- except Exception as e:
258
- logger.warning(f"Failed to process reply origin: {e}")
273
+ except Exception:
259
274
  mc.extend(MessageChain(" ") + session.reply.origin.message)
260
275
 
261
- # Filter and reconstruct MessageChain
262
- filtered_elements = mc.get(Text) + mc.get(Image) + mc.get(Custom)
263
- mc = MessageChain(filtered_elements)
264
-
265
-
276
+ filtered = mc.get(Text) + mc.get(Image) + mc.get(Custom)
277
+ mc = MessageChain(filtered)
278
+
266
279
  text_content = str(mc.get(Text)).strip()
267
- # Remove HTML image tags from text content to prevent "unreasonable code behavior"
268
280
  text_content = re.sub(r'<img[^>]+>', '', text_content, flags=re.IGNORECASE)
269
-
281
+
270
282
  if not text_content and not mc.get(Image) and not mc.get(Custom):
271
283
  return
272
284
 
273
- # History & Context
274
- hist_key = conversation_key_override
275
- if not hist_key and session.reply and hasattr(session.reply.origin, 'id'):
285
+ hist_key = None
286
+ if session.reply and hasattr(session.reply.origin, 'id'):
276
287
  hist_key = history_manager.get_conversation_id(str(session.reply.origin.id))
277
288
 
278
289
  hist_payload = history_manager.get_history(hist_key) if hist_key else []
279
- meta = history_manager.get_metadata(hist_key) if hist_key else {}
280
290
  context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
281
291
 
282
292
  if conf.reaction: await react(session, "✨")
@@ -285,287 +295,631 @@ async def process_request(
285
295
  msg_text = str(mc.get(Text)).strip() if mc.get(Text) else ""
286
296
  msg_text = re.sub(r'<img[^>]+>', '', msg_text, flags=re.IGNORECASE)
287
297
 
288
- # If message is empty but has images, use a placeholder
289
298
  if not msg_text and (mc.get(Image) or mc.get(Custom)):
290
- msg_text = "[图片]"
299
+ msg_text = "[图片]"
291
300
 
292
301
  for custom in [e for e in mc if isinstance(e, Custom)]:
293
302
  if custom.tag == 'onebot:json':
294
- if decoded := process_onebot_json(custom.attributes()): msg_text += f"\n{decoded}"
303
+ if decoded := process_onebot_json(custom.attributes()):
304
+ msg_text += f"\n{decoded}"
295
305
  break
296
306
 
297
- # Model Selection (Step 1)
298
- # Resolve model names from config if they are short names/keywords
299
- model = selected_model or meta.get("model")
300
- if model and model != "off":
301
- resolved, err = resolve_model_name(model, conf.models)
307
+ model = selected_model
308
+ if model:
309
+ resolved, _ = resolve_model_name(model, conf.models)
302
310
  if resolved:
303
311
  model = resolved
304
- elif err:
305
- logger.warning(f"Model resolution warning for {model}: {err}")
306
312
 
307
- vision_model = selected_vision_model or meta.get("vision_model")
308
- if vision_model and vision_model != "off":
309
- resolved_v, err_v = resolve_model_name(vision_model, conf.models)
310
- if resolved_v:
311
- vision_model = resolved_v
312
- elif err_v:
313
- logger.warning(f"Vision model resolution warning for {vision_model}: {err_v}")
314
-
315
- images, err = await process_images(mc, vision_model)
316
-
317
- # Check image input support
318
- model_cfg_dict = next((m for m in conf.models if m.get("name") == model), None)
319
- image_input_supported = True
320
- if model_cfg_dict:
321
- image_input_supported = model_cfg_dict.get("image_input", True)
313
+ images, _ = await process_images(mc, None)
322
314
 
323
- # Log inferenced content mode
324
- inferred_content_mode = "image" if image_input_supported else "text"
325
- logger.info(f"Process Request: Model '{model}' Image Input: {image_input_supported} -> Mode: {inferred_content_mode}")
326
-
327
- if images and not image_input_supported:
328
- logger.warning(f"Model '{model}' does not support images, but user sent {len(images)} images.")
329
-
330
- # Start renderer for the unsupported card
331
- renderer = await get_content_renderer()
332
- render_tab_task = asyncio.create_task(renderer.prepare_tab())
333
-
334
- # Wait for tab and render unsupported
315
+ # Prepare renderer
316
+ local_renderer = await get_content_renderer()
317
+ render_tab_task = asyncio.create_task(local_renderer.prepare_tab())
318
+
319
+ async def send_noti(msg: str):
335
320
  try:
336
- tab_id = await render_tab_task
321
+ if conf.quote:
322
+ await session.send([Quote(session.event.message.id), msg])
323
+ else:
324
+ await session.send(msg)
337
325
  except Exception as e:
338
- tab_id = None
339
-
340
- import tempfile
341
- with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
342
- output_path = tf.name
343
-
344
- render_ok = await render_image_unsupported(
345
- renderer=renderer,
346
- output_path=output_path,
347
- theme_color=conf.theme_color,
348
- tab_id=tab_id
349
- )
350
-
351
- if render_ok:
352
- with open(output_path, "rb") as f:
353
- img_data = base64.b64encode(f.read()).decode()
354
- await session.send(MessageChain(Image(src=f'data:image/png;base64,{img_data}')))
355
- if os.path.exists(output_path):
356
- os.remove(output_path)
357
- return
358
-
359
- renderer = await get_content_renderer()
360
- render_tab_task = asyncio.create_task(renderer.prepare_tab())
361
- tab_id = None
362
-
363
- # Call Pipeline directly
364
- safe_input = msg_text
365
- pipeline = ModularPipeline(conf)
366
- try:
367
- resp = await pipeline.execute(
368
- safe_input,
369
- hist_payload,
370
- model_name=model,
371
- images=images,
372
- selected_vision_model=vision_model,
373
- )
374
- finally:
375
- await pipeline.close()
376
-
377
- # Step 1 Results
378
- step1_vision_model = resp.get("vision_model_used")
379
- step1_model = resp.get("model_used")
380
- step1_history = resp.get("conversation_history", [])
381
- step1_stats = resp.get("stats", {})
382
-
383
- final_resp = resp
326
+ logger.warning(f"Failed to send notification: {e}")
327
+
328
+ request = QueryRequest(
329
+ user_input=msg_text,
330
+ images=images,
331
+ conversation_history=hist_payload,
332
+ model_name=model,
333
+ send_notification=send_noti
334
+ )
384
335
 
385
- # Step 2 (Optional)
386
-
387
-
336
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
337
+ output_path = tf.name
388
338
 
389
- # Extract Response Data
390
- content = final_resp.get("llm_response", "")
391
- structured = final_resp.get("structured_response", {})
339
+ core = get_hyw_core()
340
+ # 1. Query ONLY (no render path provided)
341
+ # Pass output_path=None so it returns raw response without internal rendering
342
+ response = await core.query(request, output_path=None)
392
343
 
393
- # Wait for tab preparation if needed (should be ready by now)
344
+ # 2. Get the warmed-up tab
394
345
  try:
395
346
  tab_id = await render_tab_task
396
- except Exception as e:
397
- logger.warning(f"Failed to prepare render tab: {e}")
347
+ except Exception:
398
348
  tab_id = None
399
349
 
400
- # Render
401
- import tempfile
402
- with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
403
- output_path = tf.name
404
- model_used = final_resp.get("model_used")
350
+ display_session_id = history_manager.generate_short_code()
405
351
 
406
- # Determine session short code
407
- if hist_key:
408
- display_session_id = history_manager.get_code_by_key(hist_key)
409
- if not display_session_id:
410
- display_session_id = history_manager.generate_short_code()
411
- else:
412
- display_session_id = history_manager.generate_short_code()
413
-
414
- # Use stats_list if available, otherwise standard stats
415
- stats_to_render = final_resp.get("stats_list", final_resp.get("stats", {}))
416
-
417
- # Check if refuse_answer was triggered
418
- if final_resp.get("refuse_answer"):
419
- logger.info(f"Refuse answer triggered. Rendering refuse image. Reason: {final_resp.get('refuse_reason', '')}")
352
+ if response.should_refuse:
420
353
  render_ok = await render_refuse_answer(
421
- renderer=renderer,
354
+ renderer=local_renderer,
422
355
  output_path=output_path,
423
- reason=final_resp.get('refuse_reason', 'Instruct 专家分配此任务流程失败,请尝试提出其他问题~'),
356
+ reason=response.refuse_reason or 'Refused',
424
357
  theme_color=conf.theme_color,
425
358
  tab_id=tab_id,
426
359
  )
360
+ elif not response.success:
361
+ await session.send(f"Error: {response.error}")
362
+ return
427
363
  else:
428
- render_ok = await renderer.render(
429
- markdown_content=content,
364
+ # 3. Explicit External Render using the Parallel Tab
365
+ render_ok = await core.render(
366
+ markdown_content=response.content,
430
367
  output_path=output_path,
431
- tab_id=tab_id,
432
- stats=stats_to_render,
433
- references=structured.get("references", []),
434
- page_references=structured.get("page_references", []),
435
- image_references=structured.get("image_references", []),
436
- stages_used=final_resp.get("stages_used", []),
437
- theme_color=conf.theme_color,
368
+ stats={"total_time": response.total_time},
369
+ references=response.references,
370
+ page_references=response.page_references,
371
+ image_references=response.image_references,
372
+ stages_used=response.stages_used,
373
+ tab_id=tab_id
438
374
  )
375
+ if render_ok:
376
+ response.image_path = output_path
439
377
 
440
- # Send & Save
441
- if not render_ok:
442
- logger.error("Render failed; skipping reply.")
443
- if os.path.exists(output_path):
444
- try:
445
- os.remove(output_path)
446
- except Exception as exc:
447
- logger.warning(f"Failed to delete render output {output_path}: {exc}")
448
- sent = None
449
- else:
450
- # Convert to base64
378
+ if render_ok:
451
379
  with open(output_path, "rb") as f:
452
380
  img_data = base64.b64encode(f.read()).decode()
453
-
454
- # Build single reply chain (image only now)
455
- elements = []
456
- elements.append(Image(src=f'data:image/png;base64,{img_data}'))
457
-
458
- msg_chain = MessageChain(*elements)
459
381
 
382
+ msg_chain = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
460
383
  if conf.quote:
461
384
  msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
462
-
463
- # Use reply_to instead of manual Quote insertion to avoid ActionFailed errors
385
+
464
386
  sent = await session.send(msg_chain)
465
-
466
- sent_id = next((str(e.id) for e in sent if hasattr(e, 'id')), None) if sent else None
467
- msg_id = str(session.event.message.id) if hasattr(session.event, 'message') else str(session.event.id)
468
- related = [msg_id] + ([str(session.reply.origin.id)] if session.reply and hasattr(session.reply.origin, 'id') else [])
469
-
470
- history_manager.remember(
471
- sent_id,
472
- final_resp.get("conversation_history", []),
473
- related,
474
- {
475
- "model": model_used,
476
- "trace_markdown": final_resp.get("trace_markdown"),
477
- },
478
- context_id,
479
- code=display_session_id,
480
- )
481
-
482
- if conf.save_conversation and sent_id:
483
- try:
484
- # Pass web_results to save fetched pages as markdown, and output image
387
+
388
+ sent_id = next((str(e.id) for e in sent if hasattr(e, 'id')), None) if sent else None
389
+ msg_id = str(session.event.message.id) if hasattr(session.event, 'message') else str(session.event.id)
390
+
391
+ updated_history = hist_payload + [
392
+ {"role": "user", "content": msg_text},
393
+ {"role": "assistant", "content": response.content}
394
+ ]
395
+
396
+ # Save to Memory
397
+ history_manager.remember(
398
+ sent_id, updated_history, [msg_id],
399
+ {"model": model}, context_id, code=display_session_id,
400
+ )
401
+
402
+ # Save to Disk (Debug/Logging)
403
+ if conf.save_conversation:
404
+ # Extract traces from response
405
+ trace = response.stages_trace
406
+ instruct_traces = trace.get("instruct_rounds") if trace else None
407
+
408
+ # Check for web_results in response (needs Core update)
409
+ web_results = getattr(response, "web_results", [])
410
+
485
411
  history_manager.save_to_disk(
486
- sent_id,
487
- web_results=final_resp.get("web_results"),
488
- image_path=output_path if 'output_path' in locals() else None
412
+ key=sent_id,
413
+ image_path=output_path,
414
+ web_results=web_results,
415
+ instruct_traces=instruct_traces,
416
+ vision_trace=None # Vision integrated into Instruct now
489
417
  )
490
- except Exception as e:
491
- logger.warning(f"Failed to save conversation: {e}")
492
418
 
493
- # Cleanup temp image
494
- if 'output_path' in locals() and output_path and os.path.exists(output_path):
495
- try:
496
- os.remove(output_path)
497
- except Exception:
498
- pass
499
-
500
-
501
-
419
+ if os.path.exists(output_path):
420
+ os.remove(output_path)
502
421
 
503
422
  except Exception as e:
504
423
  logger.exception(f"Error: {e}")
505
- err_msg = f"Error: {e}"
506
- if conf.quote:
507
- await session.send([Quote(session.event.message.id), err_msg])
508
- else:
509
- await session.send(err_msg)
510
-
511
- # Save conversation on error if response was generated
512
- if 'resp' in locals() and resp and conf.save_conversation:
513
- try:
514
- # Use a temporary ID for error cases
515
- error_id = f"error_{int(time.time())}_{secrets.token_hex(4)}"
516
-
517
- # Try to salvage history
518
- partial_hist = []
519
- if 'resp' in locals() and resp:
520
- partial_hist = resp.get("conversation_history", [])
521
- elif 'context' in locals() and context and hasattr(context, 'instruct_history'):
522
- partial_hist = context.instruct_history
523
-
524
- related_ids = []
525
- if 'session' in locals():
526
- msg_id = str(session.event.message.id) if hasattr(session.event, 'message') else str(session.event.id)
527
- related_ids = [msg_id]
528
-
529
- history_manager.remember(error_id, partial_hist, related_ids, {"model": "error", "error": str(e)}, context_id, code=display_session_id if 'display_session_id' in locals() else None)
530
-
531
- # Save debug data on error
532
- web_res = context.web_results if 'context' in locals() and context else []
533
-
534
- history_manager.save_to_disk(
535
- error_id,
536
- web_results=web_res
537
- )
424
+ await session.send(f"Error: {e}")
538
425
 
539
- except Exception as save_err:
540
- logger.error(f"Failed to save error conversation: {save_err}")
541
426
 
542
427
 
543
- alc = Alconna(
544
- conf.question_command,
545
- Args["all_param;?", AllParam],
546
- )
428
+ alc = Alconna(conf.question_command, Args["all_param;?", AllParam])
547
429
 
548
430
  @command.on(alc)
549
431
  async def handle_question_command(session: Session[MessageCreatedEvent], result: Arparma):
550
- """Handle main Question command"""
551
432
  try:
552
- logger.info(f"Question Command Triggered. Message: {result}")
553
433
  mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
554
434
  dedupe_key = f"{getattr(session.account, 'id', 'account')}:{mid}"
555
435
  if _event_deduper.seen_recently(dedupe_key):
556
- logger.warning(f"Duplicate command event ignored: {dedupe_key}")
557
436
  return
558
437
  except Exception:
559
438
  pass
560
-
561
- logger.info(f"Question Command Triggered. Message: {session.event.message}")
562
439
 
563
440
  args = result.all_matched_args
441
+ all_param = args.get("all_param")
442
+
443
+ # Extract query text
444
+ if all_param:
445
+ if isinstance(all_param, MessageChain):
446
+ query_text = str(all_param.get(Text)).strip()
447
+ else:
448
+ query_text = str(all_param).strip()
449
+ else:
450
+ query_text = ""
451
+
452
+ # Check if replying to a cached search result
453
+ reply_msg_id = None
454
+ if session.reply and hasattr(session.reply.origin, 'id'):
455
+ reply_msg_id = str(session.reply.origin.id)
456
+
457
+ # Quote mode: Use cached search results
458
+ if reply_msg_id:
459
+ cached = search_cache.get(reply_msg_id)
460
+ if cached:
461
+ # Parse indices if provided
462
+ indices = parse_multi_indices(query_text, max_count=3) if query_text else None
463
+
464
+ # Check if too many indices requested (parse_multi_indices returns None if > max_count)
465
+ if query_text and indices is None:
466
+ # Check if it looks like indices but exceeded limit
467
+ import re
468
+ if re.match(r'^[\d,、\s\-–]+$', query_text):
469
+ await session.send("最多选择3个结果进行总结")
470
+ search_cache.cleanup()
471
+ return
472
+
473
+ if conf.reaction:
474
+ asyncio.create_task(react(session, "✨"))
475
+
476
+ core = get_hyw_core()
477
+ local_renderer = await get_content_renderer()
478
+ tab_task = asyncio.create_task(local_renderer.prepare_tab())
479
+
480
+ # Collect screenshots for selected pages
481
+ screenshots = []
482
+ if indices:
483
+ # Screenshot mode: capture pages for selected indices
484
+ for idx in indices:
485
+ if idx < len(cached.results):
486
+ url = cached.results[idx].get("url", "")
487
+ if url:
488
+ b64_img = await core.screenshot(url)
489
+ if b64_img:
490
+ screenshots.append(b64_img)
491
+
492
+ if not screenshots:
493
+ try: await tab_task
494
+ except: pass
495
+ await session.send("无法截图所选页面")
496
+ search_cache.cleanup()
497
+ return
498
+
499
+ user_query = f"总结关于 \"{cached.query}\" 的内容"
500
+ else:
501
+ # No indices - summarize based on cached snippets (no screenshots)
502
+ context_parts = []
503
+ for i, res in enumerate(cached.results[:10]):
504
+ title = res.get("title", f"Result {i+1}")
505
+ snippet = res.get("content", "") or res.get("snippet", "")
506
+ context_parts.append(f"## {title}\n{snippet}")
507
+
508
+ context_message = f"基于搜索 \"{cached.query}\" 的结果摘要回答用户问题:\n\n" + "\n\n".join(context_parts)
509
+ user_query = query_text if query_text else f"总结关于 \"{cached.query}\" 的搜索结果"
510
+
511
+ # Build request with screenshots (if any)
512
+ if screenshots:
513
+ request = QueryRequest(
514
+ user_input=user_query,
515
+ images=screenshots,
516
+ conversation_history=[],
517
+ model_name=None,
518
+ )
519
+ else:
520
+ request = QueryRequest(
521
+ user_input=f"{context_message}\n\n用户问题: {user_query}",
522
+ images=[],
523
+ conversation_history=[],
524
+ model_name=None,
525
+ )
526
+
527
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
528
+ output_path = tf.name
529
+
530
+ response = await core.query(request, output_path=None)
531
+
532
+ try:
533
+ tab_id = await tab_task
534
+ except Exception:
535
+ tab_id = None
536
+
537
+ if response.success and response.content:
538
+ render_ok = await core.render(
539
+ markdown_content=response.content,
540
+ output_path=output_path,
541
+ stats={"total_time": response.total_time},
542
+ references=[],
543
+ page_references=[],
544
+ tab_id=tab_id
545
+ )
546
+
547
+ if render_ok and os.path.exists(output_path):
548
+ with open(output_path, "rb") as f:
549
+ img_data = base64.b64encode(f.read()).decode()
550
+
551
+ msg_chain = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
552
+ if conf.quote:
553
+ msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
554
+
555
+ await session.send(msg_chain)
556
+ os.remove(output_path)
557
+ else:
558
+ await session.send(response.content[:500])
559
+ else:
560
+ await session.send(f"总结失败: {response.error or 'Unknown error'}")
561
+
562
+ search_cache.cleanup()
563
+ return
564
+
565
+ # === Filter Mode: Search + Find matching links + Summarize ===
566
+ filters, search_query, filter_error = parse_filter_syntax(query_text, max_count=3)
567
+
568
+ if filter_error:
569
+ await session.send(filter_error)
570
+ return
571
+
572
+ if filters:
573
+ if conf.reaction:
574
+ asyncio.create_task(react(session, "✨"))
575
+
576
+ core = get_hyw_core()
577
+ local_renderer = await get_content_renderer()
578
+
579
+ # Run search and prepare tab in parallel
580
+ search_task = asyncio.create_task(core.search([search_query]))
581
+ tab_task = asyncio.create_task(local_renderer.prepare_tab())
582
+
583
+ results = await search_task
584
+ flat_results = results[0] if results else []
585
+
586
+ if not flat_results:
587
+ try: await tab_task
588
+ except: pass
589
+ await session.send("Search returned no results.")
590
+ return
591
+
592
+ visible = [r for r in flat_results if not r.get("_hidden", False)]
593
+
594
+ # Collect URLs to screenshot
595
+ urls_to_screenshot = []
596
+ for filter_type, filter_value, count in filters:
597
+ if filter_type == 'index':
598
+ idx = filter_value - 1
599
+ if 0 <= idx < len(visible):
600
+ url = visible[idx].get("url", "")
601
+ if url and url not in urls_to_screenshot:
602
+ urls_to_screenshot.append(url)
603
+ else:
604
+ try: await tab_task
605
+ except: pass
606
+ await session.send(f"序号 {filter_value} 超出范围 (1-{len(visible)})")
607
+ return
608
+ else:
609
+ found_count = 0
610
+ for res in visible:
611
+ url = res.get("url", "")
612
+ if filter_value in url.lower() and url not in urls_to_screenshot:
613
+ urls_to_screenshot.append(url)
614
+ found_count += 1
615
+ if found_count >= count:
616
+ break
617
+
618
+ if found_count == 0:
619
+ try: await tab_task
620
+ except: pass
621
+ await session.send(f"未找到包含 \"{filter_value}\" 的链接")
622
+ return
623
+
624
+ if not urls_to_screenshot:
625
+ try: await tab_task
626
+ except: pass
627
+ await session.send("未找到匹配的链接")
628
+ return
629
+
630
+ # Take screenshots concurrently
631
+ screenshot_tasks = [core.screenshot(url) for url in urls_to_screenshot]
632
+ screenshot_results = await asyncio.gather(*screenshot_tasks)
633
+ screenshots = [b64 for b64 in screenshot_results if b64]
634
+
635
+ if not screenshots:
636
+ try: await tab_task
637
+ except: pass
638
+ await session.send("无法截图页面")
639
+ return
640
+
641
+ # Pass screenshots to LLM for summarization
642
+ user_query = f"总结关于 \"{search_query}\" 的内容"
643
+
644
+ request = QueryRequest(
645
+ user_input=user_query,
646
+ images=screenshots,
647
+ conversation_history=[],
648
+ model_name=None,
649
+ )
650
+
651
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
652
+ output_path = tf.name
653
+
654
+ response = await core.query(request, output_path=None)
655
+
656
+ try:
657
+ tab_id = await tab_task
658
+ except Exception:
659
+ tab_id = None
660
+
661
+ if response.success and response.content:
662
+ render_ok = await core.render(
663
+ markdown_content=response.content,
664
+ output_path=output_path,
665
+ stats={"total_time": response.total_time},
666
+ references=[],
667
+ page_references=[],
668
+ tab_id=tab_id
669
+ )
670
+
671
+ if render_ok and os.path.exists(output_path):
672
+ with open(output_path, "rb") as f:
673
+ img_data = base64.b64encode(f.read()).decode()
674
+
675
+ msg_chain = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
676
+ if conf.quote:
677
+ msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
678
+
679
+ await session.send(msg_chain)
680
+ os.remove(output_path)
681
+ else:
682
+ await session.send(response.content[:500])
683
+ else:
684
+ await session.send(f"总结失败: {response.error or 'Unknown error'}")
685
+
686
+ return
564
687
 
565
- await process_request(session, args.get("all_param"), selected_model=None, selected_vision_model=None, conversation_key_override=None)
688
+ # Normal query mode (no cache context)
689
+ await process_request(session, all_param)
566
690
 
567
- metadata("hyw", author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}], version=__version__, config=HywConfig)
568
691
 
692
+ # Search/Web Command (/w)
693
+ alc_search = Alconna("/w", Args["query;?", AllParam])
694
+
695
+ @command.on(alc_search)
696
+ async def handle_web_command(session: Session[MessageCreatedEvent], result: Arparma):
697
+ """
698
+ Handle web command /w:
699
+ - If query is index + Quote -> Screenshot cached result
700
+ - If query is URL -> Screenshot
701
+ - If query is text -> Search
702
+ """
703
+ query = result.all_matched_args.get("query")
704
+
705
+ # Extract query text
706
+ if query:
707
+ if isinstance(query, MessageChain):
708
+ query = str(query.get(Text)).strip()
709
+ query = str(query).strip()
710
+ else:
711
+ query = ""
712
+
713
+ # Check if replying to a cached search result
714
+ reply_msg_id = None
715
+ if session.reply and hasattr(session.reply.origin, 'id'):
716
+ reply_msg_id = str(session.reply.origin.id)
717
+
718
+ # Quote + Index mode: Screenshot specific cached result
719
+ if reply_msg_id:
720
+ cached = search_cache.get(reply_msg_id)
721
+ if cached:
722
+ # Parse index from query
723
+ idx = parse_single_index(query)
724
+ if idx is None:
725
+ # No valid index - show prompt
726
+ await session.send("请指定序号 (1-10)")
727
+ search_cache.cleanup() # Lazy cleanup
728
+ return
729
+
730
+ if idx >= len(cached.results):
731
+ await session.send(f"序号超出范围 (1-{len(cached.results)})")
732
+ search_cache.cleanup()
733
+ return
734
+
735
+ # Screenshot the cached URL
736
+ target_result = cached.results[idx]
737
+ target_url = target_result.get("url", "")
738
+ if not target_url:
739
+ await session.send("该结果无有效URL")
740
+ search_cache.cleanup()
741
+ return
742
+
743
+ if conf.reaction:
744
+ asyncio.create_task(react(session, "📸"))
745
+
746
+ core = get_hyw_core()
747
+ b64_img = await core.screenshot(target_url)
748
+
749
+ if b64_img:
750
+ msg_chain = MessageChain(Image(src=f'data:image/jpeg;base64,{b64_img}'))
751
+ if conf.quote:
752
+ msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
753
+ await session.send(msg_chain)
754
+ else:
755
+ await session.send(f"截图失败: {target_url}")
756
+
757
+ search_cache.cleanup()
758
+ return
759
+
760
+ # No query and no cache context - nothing to do
761
+ if not query:
762
+ return
763
+
764
+ try:
765
+ core = get_hyw_core()
766
+
767
+ # 1. URL Detection
768
+ url_pattern = re.compile(r'^https?://(?:[-\w./?=&%#]+)')
769
+ if url_pattern.match(query):
770
+ # === URL Screenshot Mode ===
771
+ if conf.reaction: asyncio.create_task(react(session, "📸"))
772
+
773
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
774
+ output_path = tf.name
775
+
776
+ b64_img = await core.screenshot(query)
777
+
778
+ if b64_img:
779
+ with open(output_path, "wb") as f:
780
+ f.write(base64.b64decode(b64_img))
781
+
782
+ msg_chain = MessageChain(Image(src=f'data:image/jpeg;base64,{b64_img}'))
783
+ if conf.quote:
784
+ msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
785
+
786
+ await session.send(msg_chain)
787
+
788
+ if conf.save_conversation:
789
+ mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
790
+ context_id = f"guild_{session.guild.id}" if session.guild else "user"
791
+ history_manager.remember(mid, [{"role": "user", "content": f"/w {query}"}], [], {}, context_id=context_id)
792
+ history_manager.save_to_disk(mid, image_path=output_path, web_results=[{"url": query, "title": "Screenshot", "_type": "screenshot"}])
793
+
794
+ os.remove(output_path)
795
+ else:
796
+ await session.send(f"Failed to screenshot URL: {query}")
797
+ return
798
+
799
+ # 2. Search Mode (Fallthrough)
800
+
801
+ # Parse enhanced filter syntax
802
+ filters, search_query, filter_error = parse_filter_syntax(query, max_count=3)
803
+
804
+ if filter_error:
805
+ await session.send(filter_error)
806
+ return
807
+
808
+ # Search first
809
+ search_task = asyncio.create_task(core.search([search_query]))
810
+
811
+ if conf.reaction:
812
+ asyncio.create_task(react(session, "🔍"))
813
+
814
+ results = await search_task
815
+ flat_results = results[0] if results else []
816
+
817
+ if not flat_results:
818
+ await session.send("Search returned no results.")
819
+ return
820
+
821
+ visible = [r for r in flat_results if not r.get("_hidden", False)]
822
+
823
+ if not visible:
824
+ await session.send("Search returned no visible results.")
825
+ return
826
+
827
+ # === Filter Mode: Screenshot matching links ===
828
+ if filters:
829
+ urls_to_screenshot = []
830
+
831
+ for filter_type, filter_value, count in filters:
832
+ if filter_type == 'index':
833
+ # Index-based (1-based)
834
+ idx = filter_value - 1
835
+ if 0 <= idx < len(visible):
836
+ url = visible[idx].get("url", "")
837
+ if url and url not in urls_to_screenshot:
838
+ urls_to_screenshot.append(url)
839
+ else:
840
+ await session.send(f"序号 {filter_value} 超出范围 (1-{len(visible)})")
841
+ return
842
+ else:
843
+ # Link filter: find URLs containing filter term
844
+ found_count = 0
845
+ for res in visible:
846
+ url = res.get("url", "")
847
+ if filter_value in url.lower() and url not in urls_to_screenshot:
848
+ urls_to_screenshot.append(url)
849
+ found_count += 1
850
+ if found_count >= count:
851
+ break
852
+
853
+ if found_count == 0:
854
+ await session.send(f"未找到包含 \"{filter_value}\" 的链接")
855
+ return
856
+
857
+ if not urls_to_screenshot:
858
+ await session.send("未找到匹配的链接")
859
+ return
860
+
861
+ if conf.reaction:
862
+ asyncio.create_task(react(session, "📸"))
863
+
864
+ # Take screenshots concurrently
865
+ screenshot_tasks = [core.screenshot(url) for url in urls_to_screenshot]
866
+ screenshot_results = await asyncio.gather(*screenshot_tasks)
867
+
868
+ images = [Image(src=f'data:image/jpeg;base64,{b64}') for b64 in screenshot_results if b64]
869
+
870
+ if images:
871
+ msg_chain = MessageChain(images)
872
+ if conf.quote:
873
+ msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
874
+ await session.send(msg_chain)
875
+
876
+ if conf.save_conversation:
877
+ mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
878
+ context_id = f"guild_{session.guild.id}" if session.guild else "user"
879
+ history_manager.remember(mid, [{"role": "user", "content": f"/w {query}"}], [], {}, context_id=context_id)
880
+ else:
881
+ await session.send("截图失败")
882
+ return
883
+
884
+ # === Normal Search Mode: Screenshot search results page ===
885
+ search_service = core._search_service
886
+ search_url = search_service._build_search_url(search_query)
887
+
888
+ # Handle address bar search marker
889
+ if search_url.startswith("__ADDRESS_BAR_SEARCH__:"):
890
+ import urllib.parse
891
+ encoded_query = urllib.parse.quote_plus(search_query)
892
+ search_url = f"https://www.google.com/search?q={encoded_query}"
893
+
894
+ b64_img = await core.screenshot(search_url)
895
+
896
+ if b64_img:
897
+ msg_chain = MessageChain(Image(src=f'data:image/jpeg;base64,{b64_img}'))
898
+ if conf.quote:
899
+ msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
900
+
901
+ sent = await session.send(msg_chain)
902
+
903
+ # Store in cache for future /w and /q lookups
904
+ sent_id = next((str(e.id) for e in sent if hasattr(e, 'id')), None) if sent else None
905
+ if sent_id:
906
+ search_cache.store(sent_id, visible[:10], search_query)
907
+
908
+ if conf.save_conversation:
909
+ mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
910
+ context_id = f"guild_{session.guild.id}" if session.guild else "user"
911
+ history_manager.remember(mid, [{"role": "user", "content": f"/w {query}"}], [], {}, context_id=context_id)
912
+ else:
913
+ await session.send(f"截图搜索页面失败: {search_url}")
914
+
915
+ search_cache.cleanup() # Lazy cleanup
916
+
917
+ except Exception as e:
918
+ logger.error(f"Search command failed: {e}")
919
+ await session.send(f"Search error: {e}")
920
+
921
+
922
+ metadata("hyw", author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}], version=__version__, config=HywConfig)
569
923
 
570
924
  @listen(CommandReceive)
571
925
  async def remove_at(content: MessageChain):