entari-plugin-hyw 3.2.113__py3-none-any.whl → 3.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of entari-plugin-hyw might be problematic. Click here for more details.

Files changed (49) hide show
  1. entari_plugin_hyw/__init__.py +309 -758
  2. entari_plugin_hyw/hyw_core.py +700 -0
  3. {entari_plugin_hyw-3.2.113.dist-info → entari_plugin_hyw-3.3.1.dist-info}/METADATA +25 -17
  4. entari_plugin_hyw-3.3.1.dist-info/RECORD +6 -0
  5. entari_plugin_hyw/assets/icon/anthropic.svg +0 -1
  6. entari_plugin_hyw/assets/icon/deepseek.png +0 -0
  7. entari_plugin_hyw/assets/icon/gemini.svg +0 -1
  8. entari_plugin_hyw/assets/icon/google.svg +0 -1
  9. entari_plugin_hyw/assets/icon/grok.png +0 -0
  10. entari_plugin_hyw/assets/icon/microsoft.svg +0 -15
  11. entari_plugin_hyw/assets/icon/minimax.png +0 -0
  12. entari_plugin_hyw/assets/icon/mistral.png +0 -0
  13. entari_plugin_hyw/assets/icon/nvida.png +0 -0
  14. entari_plugin_hyw/assets/icon/openai.svg +0 -1
  15. entari_plugin_hyw/assets/icon/openrouter.png +0 -0
  16. entari_plugin_hyw/assets/icon/perplexity.svg +0 -24
  17. entari_plugin_hyw/assets/icon/qwen.png +0 -0
  18. entari_plugin_hyw/assets/icon/xai.png +0 -0
  19. entari_plugin_hyw/assets/icon/zai.png +0 -0
  20. entari_plugin_hyw/assets/libs/highlight.css +0 -10
  21. entari_plugin_hyw/assets/libs/highlight.js +0 -1213
  22. entari_plugin_hyw/assets/libs/katex-auto-render.js +0 -1
  23. entari_plugin_hyw/assets/libs/katex.css +0 -1
  24. entari_plugin_hyw/assets/libs/katex.js +0 -1
  25. entari_plugin_hyw/assets/libs/tailwind.css +0 -1
  26. entari_plugin_hyw/assets/package-lock.json +0 -953
  27. entari_plugin_hyw/assets/package.json +0 -16
  28. entari_plugin_hyw/assets/tailwind.config.js +0 -12
  29. entari_plugin_hyw/assets/tailwind.input.css +0 -235
  30. entari_plugin_hyw/assets/template.html +0 -157
  31. entari_plugin_hyw/assets/template.html.bak +0 -157
  32. entari_plugin_hyw/assets/template.j2 +0 -259
  33. entari_plugin_hyw/core/__init__.py +0 -0
  34. entari_plugin_hyw/core/config.py +0 -36
  35. entari_plugin_hyw/core/history.py +0 -146
  36. entari_plugin_hyw/core/hyw.py +0 -41
  37. entari_plugin_hyw/core/pipeline.py +0 -840
  38. entari_plugin_hyw/core/render.py +0 -531
  39. entari_plugin_hyw/core/render.py.bak +0 -926
  40. entari_plugin_hyw/utils/__init__.py +0 -3
  41. entari_plugin_hyw/utils/browser.py +0 -61
  42. entari_plugin_hyw/utils/mcp_playwright.py +0 -128
  43. entari_plugin_hyw/utils/misc.py +0 -93
  44. entari_plugin_hyw/utils/playwright_tool.py +0 -46
  45. entari_plugin_hyw/utils/prompts.py +0 -94
  46. entari_plugin_hyw/utils/search.py +0 -193
  47. entari_plugin_hyw-3.2.113.dist-info/RECORD +0 -47
  48. {entari_plugin_hyw-3.2.113.dist-info → entari_plugin_hyw-3.3.1.dist-info}/WHEEL +0 -0
  49. {entari_plugin_hyw-3.2.113.dist-info → entari_plugin_hyw-3.3.1.dist-info}/top_level.txt +0 -0
@@ -1,813 +1,364 @@
1
- from dataclasses import dataclass, field
2
- from typing import List, Dict, Any, Optional, Union
1
+ from dataclasses import dataclass
2
+ import html
3
3
  import time
4
-
5
- from arclet.alconna import Alconna, Args, AllParam, CommandMeta, Option, Arparma, MultiVar, store_true
6
- from arclet.entari import metadata, listen, Session, plugin_config, BasicConfModel, plugin, command
7
- from arclet.entari import MessageChain, Text, Image, MessageCreatedEvent, Quote, At
8
- from satori.element import Custom
4
+ from collections import deque
5
+ from typing import Any, Deque, Dict, List, Optional, Set, Text, Tuple, Union, TYPE_CHECKING, cast
6
+ from typing_extensions import override
7
+ from arclet.entari import metadata
8
+ from arclet.entari import MessageChain, Session
9
+ from arclet.entari.event.base import MessageEvent
9
10
  from loguru import logger
11
+ from satori.exception import ActionFailed
12
+ from arclet.entari import MessageChain, Image, Quote, Text
10
13
  import arclet.letoderea as leto
11
- from arclet.entari.event.command import CommandReceive
14
+ from arclet.entari import MessageCreatedEvent, Session
15
+ from arclet.entari import BasicConfModel, metadata, plugin_config
16
+ import httpx
17
+ import asyncio
18
+ import json
19
+ import re
20
+ from arclet.alconna import (
21
+ Args,
22
+ Alconna,
23
+ AllParam,
24
+ MultiVar,
25
+ CommandMeta,
26
+ Option,
27
+ )
28
+ from arclet.entari import MessageChain, Session, command
29
+ from arclet.entari import plugin, Ready, Cleanup, Startup
30
+ from satori.element import Custom, E
31
+ from .hyw_core import HYW, HYWConfig
32
+
33
+ # 全局变量
34
+ hyw_core = None
35
+
36
+ class HistoryManager:
37
+ def __init__(self, max_records: int = 20):
38
+ self.max_records = max_records
39
+ self._order: Deque[str] = deque()
40
+ self._store: Dict[str, List[dict]] = {}
41
+ self._bindings: Dict[str, Set[str]] = {}
42
+ self._msg_map: Dict[str, str] = {}
43
+
44
+ def extract_message_id(self, message_like: Any) -> Optional[str]:
45
+ if message_like is None:
46
+ return None
47
+ if isinstance(message_like, (list, tuple)):
48
+ for item in message_like:
49
+ mid = self.extract_message_id(item)
50
+ if mid:
51
+ return mid
52
+ return None
53
+ if isinstance(message_like, dict):
54
+ for key in ("message_id", "id"):
55
+ value = message_like.get(key)
56
+ if value:
57
+ return str(value)
58
+ for attr in ("message_id", "id"):
59
+ value = getattr(message_like, attr, None)
60
+ if value:
61
+ return str(value)
62
+ nested = getattr(message_like, "message", None)
63
+ if nested is not None and nested is not message_like:
64
+ return self.extract_message_id(nested)
65
+ return None
66
+
67
+ def remove(self, conversation_id: Optional[str], *, remove_from_order: bool = True) -> None:
68
+ if not conversation_id:
69
+ return
70
+ cid = str(conversation_id)
71
+ if remove_from_order:
72
+ try:
73
+ self._order.remove(cid)
74
+ except ValueError:
75
+ pass
76
+ bindings = self._bindings.pop(cid, set())
77
+ for msg_id in bindings:
78
+ self._msg_map.pop(msg_id, None)
79
+ self._store.pop(cid, None)
80
+
81
+ def _enforce_limit(self) -> None:
82
+ while len(self._order) > self.max_records:
83
+ obsolete = self._order.popleft()
84
+ self.remove(obsolete, remove_from_order=False)
85
+
86
+ def remember(self, conversation_id: Optional[str], history: Optional[List[dict]], related_ids: List[Optional[str]]) -> None:
87
+ if not conversation_id or not history:
88
+ return
89
+ cid = str(conversation_id)
90
+ self._store[cid] = list(history)
91
+ binding_ids = {str(mid) for mid in related_ids if mid}
92
+ self._bindings[cid] = binding_ids
93
+ for mid in binding_ids:
94
+ self._msg_map[mid] = cid
95
+ self._order.append(cid)
96
+ self._enforce_limit()
97
+
98
+ def get_history(self, msg_id: str) -> Optional[List[dict]]:
99
+ cid = self._msg_map.get(msg_id)
100
+ if cid:
101
+ return list(self._store.get(cid, []))
102
+ return None
103
+
104
+ def get_conversation_id(self, msg_id: str) -> Optional[str]:
105
+ return self._msg_map.get(msg_id)
12
106
 
13
- from .core.hyw import HYW
14
- from .core.history import HistoryManager
15
- from .core.render import ContentRenderer
16
- from .utils.misc import process_onebot_json, process_images, resolve_model_name
17
- from arclet.entari.event.lifespan import Startup, Ready, Cleanup
107
+ history_manager = HistoryManager()
18
108
 
19
- import os
20
- import secrets
21
- import base64
109
+ # Request lock for HYW agent
110
+ _hyw_request_lock: Optional[asyncio.Lock] = None
111
+
112
+ def _get_hyw_request_lock() -> asyncio.Lock:
113
+ global _hyw_request_lock
114
+ if _hyw_request_lock is None:
115
+ _hyw_request_lock = asyncio.Lock()
116
+ return _hyw_request_lock
22
117
 
23
- import re
24
118
 
25
- class _RecentEventDeduper:
26
- def __init__(self, ttl_seconds: float = 30.0, max_size: int = 2048):
27
- self.ttl_seconds = ttl_seconds
28
- self.max_size = max_size
29
- self._seen: Dict[str, float] = {}
30
-
31
- def seen_recently(self, key: str) -> bool:
32
- now = time.time()
33
- if len(self._seen) > self.max_size:
34
- self._prune(now)
35
- ts = self._seen.get(key)
36
- if ts is None or now - ts > self.ttl_seconds:
37
- self._seen[key] = now
38
- return False
39
- return True
40
-
41
- def _prune(self, now: float):
42
- expired = [k for k, ts in self._seen.items() if now - ts > self.ttl_seconds]
43
- for k in expired:
44
- self._seen.pop(k, None)
45
- if len(self._seen) > self.max_size:
46
- for k, _ in sorted(self._seen.items(), key=lambda kv: kv[1])[: len(self._seen) - self.max_size]:
47
- self._seen.pop(k, None)
48
-
49
- _event_deduper = _RecentEventDeduper()
50
-
51
- @dataclass
52
119
  class HywConfig(BasicConfModel):
53
- admins: List[str] = field(default_factory=list)
54
- models: List[Dict[str, Any]] = field(default_factory=list)
55
- question_command: str = "/q"
56
- model_name: Optional[str] = None
57
- api_key: Optional[str] = None
120
+ command_name_list: Union[str, List[str]] = "hyw"
121
+ model_name: str
122
+ api_key: str
58
123
  base_url: str = "https://openrouter.ai/api/v1"
59
- vision_model_name: Optional[str] = None
60
- vision_api_key: Optional[str] = None
61
- vision_base_url: Optional[str] = None
62
- vision_system_prompt: Optional[str] = None
63
- intruct_model_name: Optional[str] = None
64
- intruct_api_key: Optional[str] = None
65
- intruct_base_url: Optional[str] = None
66
- intruct_system_prompt: Optional[str] = None
67
- agent_system_prompt: Optional[str] = None
68
- playwright_mcp_command: str = "npx"
69
- playwright_mcp_args: Optional[List[str]] = None
70
- search_base_url: str = "https://duckduckgo.com/?q={query}&format=json&results_per_page={limit}"
71
- image_search_base_url: str = "https://duckduckgo.com/?q={query}&iax=images&ia=images&format=json&results_per_page={limit}"
72
124
  headless: bool = False
73
125
  save_conversation: bool = False
74
- icon: str = "openai"
75
- render_timeout_ms: int = 6000
76
- extra_body: Optional[Dict[str, Any]] = None
77
- enable_browser_fallback: bool = False
78
- reaction: bool = True
79
- quote: bool = True
80
- temperature: float = 0.4
81
- # Billing configuration (price per million tokens)
82
- input_price: Optional[float] = None # $ per 1M input tokens
83
- output_price: Optional[float] = None # $ per 1M output tokens
84
- # Vision model pricing overrides (defaults to main model pricing if not set)
85
- vision_input_price: Optional[float] = None
86
- vision_output_price: Optional[float] = None
87
- # Instruct model pricing overrides (defaults to main model pricing if not set)
88
- intruct_input_price: Optional[float] = None
89
- intruct_output_price: Optional[float] = None
90
126
 
91
- # Provider Names
92
- search_name: str = "SearXNG"
93
- search_provider: str = "SearXNG"
94
- model_provider: Optional[str] = None
95
- vision_model_provider: Optional[str] = None
96
- intruct_model_provider: Optional[str] = None
127
+ browser_tool: str = "jina"
128
+ jina_api_key: Optional[str] = None
97
129
 
98
- start_test: Optional[Union[str, bool]] = None
99
-
100
- conf = plugin_config(HywConfig)
101
- history_manager = HistoryManager()
102
- renderer = ContentRenderer()
103
- hyw = HYW(config=conf)
104
-
105
- @listen(Ready, once=True)
106
- async def _hyw_warmup_mcp():
107
- try:
108
- await hyw.pipeline.warmup_mcp()
109
- except Exception as e:
110
- logger.warning(f"MCP Playwright warmup error: {e}")
111
-
112
- @listen(Ready, once=True)
113
- async def _run_ui_test():
114
- """Run UI rendering test on startup if configured."""
115
- # Debug log to confirm listener is active
116
- logger.info(f"UI TEST Listener Active. start_test config: {conf.start_test} (type: {type(conf.start_test)})")
130
+ vision_model_name: Optional[str] = None
131
+ vision_base_url: Optional[str] = None
132
+ vision_api_key: Optional[str] = None
117
133
 
118
- if not conf.start_test:
119
- return
120
-
121
- test_file = ""
122
- if isinstance(conf.start_test, str):
123
- test_file = conf.start_test
124
- elif conf.start_test is True:
125
- # User enabled boolean toggle, assume default path
126
- # Try a few locations
127
- candidates = ["data/conversations/ui-test.md", "ui-test.md", "README.md"]
128
- for c in candidates:
129
- if os.path.exists(c):
130
- test_file = c
131
- break
132
- if not test_file:
133
- logger.warning("UI TEST: start_test=True but no default test file found (tried: data/conversations/ui-test.md, ui-test.md, README.md)")
134
- return
135
-
136
- logger.info(f"UI TEST: Starting render test with file {test_file}")
134
+ extra_body: Optional[Dict[str, Any]] = None
137
135
 
138
- if not os.path.exists(test_file):
139
- logger.error(f"UI TEST: File not found: {test_file}")
140
- return
141
-
142
- try:
143
- with open(test_file, "r", encoding="utf-8") as f:
144
- content = f.read()
145
-
146
- # Mock Data for Full UI Test
147
- stats = {
148
- "time": 12.5,
149
- "vision_duration": 3.2,
150
- "cost": 0.0015
151
- }
152
-
153
- stages = [
154
- {"name": "Vision", "model": "google/gemini-pro-vision", "time": 3.2, "cost": 0.0005, "provider": "Google"},
155
- {"name": "Search", "model": "duckduckgo", "time": 1.5, "cost": 0.0, "provider": "DDG"},
156
- {"name": "Agent", "model": "anthropic/claude-3-5-sonnet", "time": 7.8, "cost": 0.0010, "provider": "Anthropic"}
157
- ]
158
-
159
- mcp_steps = [
160
- {"name": "search_google", "description": "searching for 'latest entari news'", "icon": "search"},
161
- {"name": "visit_page", "description": "visiting python.org", "icon": "navigate"},
162
- {"name": "click_element", "description": "clicking 'Downloads'", "icon": "click"}
163
- ]
164
-
165
- references = [
166
- {"title": "Entari Docs", "url": "https://entari.onebot.dev", "domain": "entari.onebot.dev"},
167
- {"title": "Python Language", "url": "https://python.org", "domain": "python.org"}
168
- ]
169
-
170
- output_dir = "data/cache"
171
- os.makedirs(output_dir, exist_ok=True)
172
- output_path = f"{output_dir}/ui_test_result.png"
173
-
174
- logger.info(f"UI TEST: Rendering to {output_path}...")
175
-
176
- start = time.time()
177
- success = await renderer.render(
178
- markdown_content=content,
179
- output_path=output_path,
180
- stats=stats,
181
- stages_used=stages,
182
- mcp_steps=mcp_steps,
183
- references=references,
184
- model_name="CLAUDE-3-5-SONNET",
185
- provider_name="Anthropic",
186
- behavior_summary="Automated Test",
187
- icon_config="anthropic",
188
- render_timeout_ms=10000
189
- )
190
-
191
- if success:
192
- logger.success(f"UI TEST: Render completed in {time.time() - start:.2f}s. Saved to {output_path}")
193
- else:
194
- logger.error("UI TEST: Render FAILED.")
195
-
196
- except Exception as e:
197
- logger.error(f"UI TEST: Exception during test: {e}")
198
-
199
-
200
- @listen(Cleanup, once=True)
201
- async def _hyw_cleanup():
202
- try:
203
- await hyw.close()
204
- except Exception as e:
205
- logger.warning(f"HYW cleanup error: {e}")
206
-
207
- class GlobalCache:
208
- models_image_path: Optional[str] = None
136
+ enable_browser_fallback: bool = False
137
+ # verbose: bool = False
138
+
139
+ metadata(
140
+ "hyw",
141
+ author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}],
142
+ version="3.3.1",
143
+ description="",
144
+ config=HywConfig,
145
+ )
209
146
 
210
- global_cache = GlobalCache()
147
+ conf = plugin_config(HywConfig)
148
+ alc = Alconna(
149
+ conf.command_name_list,
150
+ Option("-t|--text", dest="text_only", default=False, help_text="仅文本模式(禁用图片识别)"),
151
+ Args["all_param", AllParam],
152
+ # Option("-v|--verbose", dest="verbose", default=False, help_text="启用详细日志输出"),
153
+ meta=CommandMeta(compact=False)
154
+ )
211
155
 
212
- from satori.exception import ActionFailed
213
- from satori.adapters.onebot11.reverse import _Connection
156
+ # Create HYW configuration
157
+ hyw_config = HYWConfig(
158
+ api_key=conf.api_key,
159
+ model_name=conf.model_name,
160
+ base_url=conf.base_url,
161
+ save_conversation=conf.save_conversation,
162
+ headless=conf.headless,
163
+ browser_tool=conf.browser_tool,
164
+ jina_api_key=conf.jina_api_key,
165
+ vision_model_name=conf.vision_model_name,
166
+ vision_base_url=conf.vision_base_url,
167
+ vision_api_key=conf.vision_api_key,
168
+ extra_body=conf.extra_body,
169
+ enable_browser_fallback=conf.enable_browser_fallback
170
+ )
214
171
 
215
- # Monkeypatch to suppress ActionFailed for get_msg
216
- original_call_api = _Connection.call_api
172
+ hyw = HYW(config=hyw_config)
217
173
 
218
- async def patched_call_api(self, action: str, params: dict = None):
219
- try:
220
- return await original_call_api(self, action, params)
221
- except ActionFailed as e:
222
- if action == "get_msg":
223
- logger.warning(f"Suppressed ActionFailed for get_msg: {e}")
224
- return None
225
- raise e
226
174
 
227
- _Connection.call_api = patched_call_api
228
175
 
176
+ # Emoji到代码的映射字典
229
177
  EMOJI_TO_CODE = {
178
+ "🐳": "128051",
179
+ "❌": "10060",
180
+ "🍧": "127847",
230
181
  "✨": "10024",
231
- "": "10004",
232
- "❌": "10060"
182
+ "📫": "128235"
233
183
  }
234
184
 
235
- async def react(session: Session, emoji: str):
236
- if not conf.reaction: return
185
+ async def download_image(url: str) -> bytes:
186
+ """下载图片"""
237
187
  try:
238
- if session.event.login.platform == "onebot":
239
- code = EMOJI_TO_CODE.get(emoji, "10024")
240
- # OneBot specific reaction
241
- await session.account.protocol.call_api(
242
- "internal/set_group_reaction",
243
- {
244
- "group_id": str(session.guild.id),
245
- "message_id": str(session.event.message.id),
246
- "code": code,
247
- "is_add": True
248
- }
249
- )
250
- else:
251
- # Standard Satori reaction
252
- await session.reaction_create(emoji=emoji)
253
- except ActionFailed:
254
- pass
255
- except Exception as e:
256
- logger.warning(f"Reaction failed: {e}")
257
-
258
- async def process_request(session: Session[MessageCreatedEvent], all_param: Optional[MessageChain] = None,
259
- selected_model: Optional[str] = None, selected_vision_model: Optional[str] = None,
260
- conversation_key_override: Optional[str] = None, local_mode: bool = False,
261
- next_prompt: Optional[str] = None, next_text_model: Optional[str] = None, next_vision_model: Optional[str] = None):
262
- logger.info(f"Processing request: {all_param}")
263
- mc = MessageChain(all_param)
264
- logger.info(f"reply: {session.reply}")
265
- if session.reply:
266
- try:
267
- # Check if reply is from self (the bot)
268
- # 1. Check by Message ID (reliable for bot's own messages if recorded)
269
- reply_msg_id = str(session.reply.origin.id) if hasattr(session.reply.origin, 'id') else None
270
- is_bot = False
271
-
272
- if reply_msg_id and history_manager.is_bot_message(reply_msg_id):
273
- is_bot = True
274
- logger.info(f"Reply target {reply_msg_id} identified as bot message via history")
275
-
276
- if is_bot:
277
- logger.info("Reply is from me - ignoring content")
188
+ async with httpx.AsyncClient(timeout=30.0) as client:
189
+ resp = await client.get(url)
190
+ if resp.status_code == 200:
191
+ return resp.content
278
192
  else:
279
- logger.info(f"Reply is from user (or unknown) - including content")
280
- mc.extend(MessageChain(" ") + session.reply.origin.message)
281
- except Exception as e:
282
- logger.warning(f"Failed to process reply origin: {e}")
283
- mc.extend(MessageChain(" ") + session.reply.origin.message)
284
-
285
- # Filter and reconstruct MessageChain
286
- filtered_elements = mc.get(Text) + mc.get(Image) + mc.get(Custom)
287
- mc = MessageChain(filtered_elements)
288
- logger.info(f"mc: {mc}")
289
-
290
- text_content = str(mc.get(Text)).strip()
291
- # Remove HTML image tags from text content to prevent "unreasonable code behavior"
292
- text_content = re.sub(r'<img[^>]+>', '', text_content, flags=re.IGNORECASE)
293
-
294
- if not text_content and not mc.get(Image) and not mc.get(Custom):
295
- return
296
-
297
- # History & Context
298
- hist_key = conversation_key_override
299
- if not hist_key and session.reply and hasattr(session.reply.origin, 'id'):
300
- hist_key = history_manager.get_conversation_id(str(session.reply.origin.id))
301
-
302
- hist_payload = history_manager.get_history(hist_key) if hist_key else []
303
- meta = history_manager.get_metadata(hist_key) if hist_key else {}
304
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
193
+ raise ActionFailed(f"下载图片失败,状态码: {resp.status_code}")
194
+ except Exception as e:
195
+ raise ActionFailed(f"下载图片失败: {url}, 错误: {str(e)}")
305
196
 
306
- if conf.reaction: await react(session, "✨")
307
197
 
198
+ def process_onebot_json(json_data_str: str) -> str:
308
199
  try:
309
- msg_text = str(mc.get(Text)).strip() if mc.get(Text) else ""
310
- msg_text = re.sub(r'<img[^>]+>', '', msg_text, flags=re.IGNORECASE)
311
-
312
- # If message is empty but has images, use a placeholder
313
- if not msg_text and (mc.get(Image) or mc.get(Custom)):
314
- msg_text = "[图片]"
315
-
316
- for custom in [e for e in mc if isinstance(e, Custom)]:
317
- if custom.tag == 'onebot:json':
318
- if decoded := process_onebot_json(custom.attributes()): msg_text += f"\n{decoded}"
319
- break
320
-
321
- # Model Selection (Step 1)
322
- # Resolve model names from config if they are short names/keywords
323
- model = selected_model or meta.get("model")
324
- if model and model != "off":
325
- resolved, err = resolve_model_name(model, conf.models)
326
- if resolved:
327
- model = resolved
328
- elif err:
329
- logger.warning(f"Model resolution warning for {model}: {err}")
330
-
331
- vision_model = selected_vision_model or meta.get("vision_model")
332
- if vision_model and vision_model != "off":
333
- resolved_v, err_v = resolve_model_name(vision_model, conf.models)
334
- if resolved_v:
335
- vision_model = resolved_v
336
- elif err_v:
337
- logger.warning(f"Vision model resolution warning for {vision_model}: {err_v}")
338
-
339
- images, err = await process_images(mc, vision_model)
340
-
341
- # Call Agent (Step 1)
342
- # Sanitize user_input: use extracted text only
343
- safe_input = msg_text
344
-
345
- resp = await hyw.agent(safe_input, conversation_history=hist_payload, images=images,
346
- selected_model=model, selected_vision_model=vision_model, local_mode=local_mode)
347
-
348
- # Step 1 Results
349
- step1_vision_model = resp.get("vision_model_used")
350
- step1_model = resp.get("model_used")
351
- step1_history = resp.get("conversation_history", [])
352
- step1_stats = resp.get("stats", {})
353
-
354
- final_resp = resp
355
-
356
- # Step 2 (Optional)
357
- if next_prompt:
358
- logger.info(f"Executing Step 2 with prompt: {next_prompt}")
359
-
360
- # Use Step 1 history as base for Step 2
361
- # hyw.agent already returns the updated history including the new turn
362
- # So we just pass step1_history
363
-
364
- # Determine Step 2 models
365
- # If not specified, inherit from Step 1 or config?
366
- # Usually inherit from config or meta if not specified in -n
367
- step2_model = next_text_model or model
368
- if step2_model and step2_model != "off":
369
- resolved_s2, err_s2 = resolve_model_name(step2_model, conf.models)
370
- if resolved_s2:
371
- step2_model = resolved_s2
372
-
373
- step2_vision_model = next_vision_model or vision_model # Probably not used if no new images, but consistent
374
- if step2_vision_model and step2_vision_model != "off":
375
- resolved_s2v, err_s2v = resolve_model_name(step2_vision_model, conf.models)
376
- if resolved_s2v:
377
- step2_vision_model = resolved_s2v
378
-
379
- # No new images for Step 2 usually, unless we want to carry over images?
380
- # The user said "First round image model, second round text model".
381
- # Usually Step 2 is text-only follow-up.
382
- # But hyw.agent stateless? No, we pass history.
383
- # We don't pass 'images' again to Step 2 unless we want them re-analyzed.
384
- # If Step 1 analyzed images, the analysis is in history (as assistant message or system message?).
385
- # In hyw.agent, image analysis result is added to history.
386
- # So we don't need to pass images again.
387
-
388
- resp2 = await hyw.agent(str(next_prompt), conversation_history=step1_history, images=None,
389
- selected_model=step2_model, selected_vision_model=step2_vision_model, local_mode=local_mode)
390
-
391
- final_resp = resp2
392
-
393
- # Merge Stats
394
- # Instead of merging into a single dict, we prepare a list of stats for the renderer
395
- # But we also need a combined stats for history recording?
396
- # History manager likely expects a single dict or doesn't care much (it stores what we give)
397
-
398
- # Let's keep step1_stats and resp2["stats"] separate for rendering
399
- # But for history, maybe we still want a merged one?
400
- # The code below uses final_resp["stats"] for rendering AND history.
401
-
402
- # Let's create a list for rendering
403
- stats_for_render = [step1_stats, resp2.get("stats", {})]
404
-
405
- # And a merged one for history/final_resp
406
- merged_stats = step1_stats.copy()
407
- if "stats" in resp2:
408
- for k, v in resp2["stats"].items():
409
- if isinstance(v, (int, float)) and k in merged_stats:
410
- merged_stats[k] += v
411
- elif k == "visited_domains":
412
- merged_stats[k] = list(set(merged_stats.get(k, []) + v))
413
- else:
414
- merged_stats[k] = v
415
-
416
- final_resp["stats"] = merged_stats
417
- final_resp["stats_list"] = stats_for_render # Pass this to renderer if available
418
-
419
- # Merge Model Info for Display
420
- # We want to show Step 1 Vision Model AND Step 2 Text Model
421
- if step1_vision_model:
422
- final_resp["vision_model_used"] = step1_vision_model
423
- # final_resp["model_used"] is already from Step 2
424
-
425
-
426
- # Extract Response Data
427
- content = final_resp.get("llm_response", "")
428
- structured = final_resp.get("structured_response", {})
429
-
430
- # Render
431
- import tempfile
432
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tf:
433
- output_path = tf.name
434
- model_used = final_resp.get("model_used")
435
- vision_model_used = final_resp.get("vision_model_used")
436
-
437
- # Helper to infer icon from model name
438
- def infer_icon_from_model(model_name: str) -> str:
439
- """Infer icon name from model name (e.g. 'google/gemini-3-flash' -> 'google' or 'gemini')"""
440
- if not model_name:
441
- return conf.icon
442
- name_lower = model_name.lower()
443
- # Check for known providers/models in the name
444
- known_icons = ["google", "gemini", "openai", "anthropic", "deepseek", "mistral",
445
- "qwen", "grok", "xai", "perplexity", "microsoft", "minimax", "nvidia"]
446
- for icon_name in known_icons:
447
- if icon_name in name_lower:
448
- return icon_name
449
- return conf.icon
450
-
451
- icon = conf.icon
452
- m_conf = None
453
- if model_used:
454
- m_conf = next((m for m in conf.models if m.get("name") == model_used), None)
455
- if m_conf:
456
- icon = m_conf.get("icon", infer_icon_from_model(model_used))
457
- else:
458
- # Model not in config list, infer from name
459
- icon = infer_icon_from_model(model_used)
460
-
461
- # Determine session short code
462
- if hist_key:
463
- display_session_id = history_manager.get_code_by_key(hist_key)
464
- if not display_session_id:
465
- # Should not happen if key exists, but fallback
466
- display_session_id = history_manager.generate_short_code()
467
- else:
468
- # New conversation, pre-generate code
469
- display_session_id = history_manager.generate_short_code()
470
-
471
- # Determine vision base url and icon
472
- vision_base_url = None
473
- vision_icon = None
474
-
475
- if vision_model_used:
476
- v_conf = next((m for m in conf.models if m.get("name") == vision_model_used), None)
477
- if v_conf:
478
- vision_base_url = v_conf.get("base_url")
479
- vision_icon = v_conf.get("icon", infer_icon_from_model(vision_model_used))
480
- else:
481
- vision_icon = infer_icon_from_model(vision_model_used)
482
-
483
- # Handle Vision Only Mode (suppress text model display)
484
- render_model_name = model_used or conf.model_name or "unknown"
485
- render_icon = icon
486
- render_base_url = m_conf.get("base_url", conf.base_url) if m_conf else conf.base_url
487
-
488
- if not model_used and vision_model_used:
489
- render_model_name = ""
490
- render_icon = ""
491
-
492
- # Use stats_list if available, otherwise standard stats
493
- stats_to_render = final_resp.get("stats_list", final_resp.get("stats", {}))
494
-
495
- # Determine Behavior Summary & Provider Name
496
-
497
- # 1. Behavior Summary
498
- behavior_summary = "Text Generation"
499
- if structured.get("mcp_steps"):
500
- behavior_summary = "Agentic Loop"
501
- elif vision_model_used:
502
- behavior_summary = "Visual Analysis"
503
-
504
- # 2. Provider Name
505
- # Try to get from m_conf (resolved above)
506
- provider_name = "Unknown Provider"
507
- if model_used and m_conf:
508
- provider_name = m_conf.get("provider", "Unknown Provider")
509
- elif not model_used and vision_model_used:
510
- # If only vision model used (unlikely but possible in code logic)
511
- if 'v_conf' in locals() and v_conf:
512
- provider_name = v_conf.get("provider", "Unknown Provider")
513
-
514
- # If still unknown and we have base_url, maybe use domain as last resort fallback?
515
- # User said: "provider does not automatically get from url if not filled"
516
- # So if it's "Unknown Provider", we leave it or maybe empty string?
517
- # Let's stick to "Unknown Provider" or just empty if we want to be clean.
518
- # But for UI validation it's better to show something if missing config.
519
-
520
- render_ok = await renderer.render(
521
- markdown_content=content,
522
- output_path=output_path,
523
- suggestions=[],
524
- stats=stats_to_render,
525
- references=structured.get("references", []),
526
- mcp_steps=structured.get("mcp_steps", []),
527
- stages_used=final_resp.get("stages_used", []),
528
- model_name=render_model_name,
529
- provider_name=provider_name,
530
- behavior_summary=behavior_summary,
531
- icon_config=render_icon,
532
- vision_model_name=vision_model_used,
533
- vision_base_url=vision_base_url,
534
- vision_icon_config=vision_icon,
535
- base_url=render_base_url,
536
- billing_info=final_resp.get("billing_info"),
537
- render_timeout_ms=conf.render_timeout_ms
538
- )
539
-
540
- # Send & Save
541
- if not render_ok:
542
- logger.error("Render failed; skipping reply. Check browser/playwright status.")
543
- if os.path.exists(output_path):
544
- try:
545
- os.remove(output_path)
546
- except Exception as exc:
547
- logger.warning(f"Failed to delete render output {output_path}: {exc}")
548
- sent = None
549
- else:
550
- # Convert to base64
551
- with open(output_path, "rb") as f:
552
- img_data = base64.b64encode(f.read()).decode()
553
-
554
- # Build single reply chain (image only now)
555
- elements = []
556
- elements.append(Image(src=f'data:image/png;base64,{img_data}'))
557
-
558
- msg_chain = MessageChain(*elements)
559
-
560
- if conf.quote:
561
- msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
562
-
563
- # Use reply_to instead of manual Quote insertion to avoid ActionFailed errors
564
- sent = await session.send(msg_chain)
565
-
566
- sent_id = next((str(e.id) for e in sent if hasattr(e, 'id')), None) if sent else None
567
- msg_id = str(session.event.message.id) if hasattr(session.event, 'message') else str(session.event.id)
568
- related = [msg_id] + ([str(session.reply.origin.id)] if session.reply and hasattr(session.reply.origin, 'id') else [])
569
-
570
- history_manager.remember(
571
- sent_id,
572
- final_resp.get("conversation_history", []),
573
- related,
574
- {
575
- "model": model_used,
576
- "trace_markdown": final_resp.get("trace_markdown"),
577
- },
578
- context_id,
579
- code=display_session_id,
580
- )
581
-
582
- if conf.save_conversation and sent_id:
583
- history_manager.save_to_disk(sent_id)
584
-
585
-
200
+ # 解码HTML实体
201
+ json_str = html.unescape(json_data_str)
202
+ return json_str
586
203
  except Exception as e:
587
- logger.exception(f"Error: {e}")
588
- err_msg = f"Error: {e}"
589
- if conf.quote:
590
- await session.send([Quote(session.event.message.id), err_msg])
591
- else:
592
- await session.send(err_msg)
593
-
594
- # Save conversation on error if response was generated
595
- if 'resp' in locals() and resp and conf.save_conversation:
596
- try:
597
- # Use a temporary ID for error cases
598
- error_id = f"error_{int(time.time())}_{secrets.token_hex(4)}"
599
- history_manager.remember(error_id, resp.get("conversation_history", []), [], {"model": model_used if 'model_used' in locals() else "unknown", "error": str(e)}, context_id, code=display_session_id if 'display_session_id' in locals() else None)
600
- history_manager.save_to_disk(error_id)
601
- logger.info(f"Saved error conversation to {error_id}")
602
- except Exception as save_err:
603
- logger.error(f"Failed to save error conversation: {save_err}")
604
-
605
- # Secondary Parser for -n content
606
- next_alc = Alconna(
607
- "next",
608
- Option("-v|--vision", Args["vision_model", str], help_text="设置视觉模型(设为off禁用)"),
609
- Option("-t|--text", Args["text_model", str], help_text="设置文本模型"),
610
- Args["prompt", AllParam],
611
- )
204
+ return json_data_str
612
205
 
613
- # Main Command (Question)
614
- alc = Alconna(
615
- conf.question_command,
616
- Option("-v|--vision", Args["vision_model", str]),
617
- Option("-t|--text", Args["text_model", str]),
618
- Option("-c|--code", Args["code", str]),
619
- Option("-n|--next", Args["next_input", AllParam]),
620
- Args["list_models;?", "-m|--models"],
621
- Args["all_chat;?", "-a"],
622
- Args["local_mode;?", "-l"],
623
- Args["all_param?", MultiVar(str | Image | Custom)],
624
- meta=CommandMeta(
625
- compact=False,
626
- description=f"""使用方法:
627
- {conf.question_command} -a : 列出所有会话
628
- {conf.question_command} -m : 列出所有模型
629
- {conf.question_command} -v <模型名> : 设置主要视觉模型, 设为 off 禁用
630
- {conf.question_command} -t <模型名> : 设置主要文本模型
631
- {conf.question_command} -l : 开启本地模式 (关闭Web索引)
632
- {conf.question_command} -c <4位消息码> : 继续指定会话
633
- {conf.question_command} -n <后续提示词> : 在第一步完成后执行后续操作 (支持 -t/-v)
634
- {conf.question_command} <问题> : 发起问题
635
- 特性:
636
- """
637
- )
638
- )
639
206
 
640
- @command.on(alc)
641
- async def handle_question_command(session: Session[MessageCreatedEvent], result: Arparma):
642
- """Handle main Question command"""
207
+ async def react(session: Session, emoji: str):
643
208
  try:
644
- mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
645
- dedupe_key = f"{getattr(session.account, 'id', 'account')}:{mid}"
646
- if _event_deduper.seen_recently(dedupe_key):
647
- logger.warning(f"Duplicate command event ignored: {dedupe_key}")
648
- return
649
- except Exception:
209
+ if session.event.login.platform == "onebot":
210
+ code = EMOJI_TO_CODE.get(emoji, "10024")
211
+ await session.account.protocol.call_api("internal/set_group_reaction", {"group_id": int(session.guild.id), "message_id": int(session.event.message.id), "code": code, "is_add": True})
212
+ else:
213
+ await session.reaction_create(emoji=emoji)
214
+ except ActionFailed:
650
215
  pass
651
216
 
652
- logger.info(f"Question Command Triggered. Message: {session.event.message}")
217
+ def handle_shortcut(message_chain: MessageChain) -> Tuple[bool, str]:
218
+ current_msg_text = str(message_chain.get(Text)) if message_chain.get(Text) else ""
219
+ is_shortcut = False
220
+ shortcut_replacement = ""
221
+ if current_msg_text.strip().startswith("/"):
222
+ is_shortcut = True
223
+ shortcut_replacement = current_msg_text.strip()[1:]
224
+ return is_shortcut, shortcut_replacement
225
+
226
+ async def process_images(mc: MessageChain, parse_result: Any) -> Tuple[List[str], Optional[str]]:
227
+ is_text_only = False
228
+ if parse_result.matched:
229
+ def get_bool_value(val):
230
+ if hasattr(val, 'value'):
231
+ return bool(val.value)
232
+ return bool(val)
233
+ is_text_only = get_bool_value(getattr(parse_result, 'text_only', False))
653
234
 
654
- args = result.all_matched_args
655
- logger.info(f"Matched Args: {args}")
235
+ text_str = str(mc.get(Text) or "")
236
+ if not is_text_only and re.search(r'(?:^|\s)(-t|--text)(?:$|\s)', text_str):
237
+ is_text_only = True
656
238
 
657
- text_model_val = args.get("text_model")
658
- vision_model_val = args.get("vision_model")
659
- code_val = args.get("code")
660
- all_flag_val = args.get("all_chat")
661
- list_models_val = args.get("list_models")
662
- local_mode_val = True if args.get("local_mode") else False
663
- logger.info(f"Local mode: {local_mode_val} (type: {type(local_mode_val)})")
239
+ if is_text_only:
240
+ logger.info("检测到仅文本模式参数,跳过图片分析")
241
+ return [], None
242
+
243
+ has_images = bool(mc.get(Image))
244
+ images = []
245
+ if has_images:
246
+ urls = mc[Image].map(lambda x: x.src)
247
+ tasks = [download_image(url) for url in urls]
248
+ raw_images = await asyncio.gather(*tasks)
249
+ import base64
250
+ images = [base64.b64encode(img).decode('utf-8') for img in raw_images]
664
251
 
665
- # Handle -m (List Models)
666
- if list_models_val:
667
- # global_cache is already imported/defined in __init__.py
668
-
669
- if global_cache.models_image_path and os.path.exists(global_cache.models_image_path):
670
- logger.info(f"Using cached models list: {global_cache.models_image_path}")
671
- with open(global_cache.models_image_path, "rb") as f:
672
- img_data = base64.b64encode(f.read()).decode()
673
- msg = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
674
- if conf.quote: msg = MessageChain(Quote(session.event.message.id)) + msg
675
- await session.send(msg)
676
- return
677
-
678
- output_dir = "data/cache"
679
- os.makedirs(output_dir, exist_ok=True)
680
- output_path = f"{output_dir}/models_list_cache.png"
681
-
682
- await renderer.render_models_list(
683
- conf.models,
684
- output_path,
685
- default_base_url=conf.base_url,
686
- render_timeout_ms=conf.render_timeout_ms,
687
- )
688
- global_cache.models_image_path = os.path.abspath(output_path)
689
-
690
- with open(output_path, "rb") as f:
691
- img_data = base64.b64encode(f.read()).decode()
692
- msg = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
693
- if conf.quote: msg = MessageChain(Quote(session.event.message.id)) + msg
694
- await session.send(msg)
252
+ return images, None
253
+
254
+ @leto.on(MessageCreatedEvent)
255
+ async def on_message_created(message_chain: MessageChain, session: Session[MessageEvent]):
256
+ # Skip if no substantial content in original message
257
+ original_text = str(message_chain.get(Text)).strip()
258
+ has_images = bool(message_chain.get(Image))
259
+ has_custom = bool(message_chain.get(Custom))
260
+ if not original_text and not has_images and not has_custom:
695
261
  return
696
262
 
697
- # Handle -a (List History)
698
- if all_flag_val:
699
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
700
- keys = history_manager.list_by_context(context_id, limit=10)
701
- if not keys:
702
- msg = "暂无历史会话"
703
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
704
- else: await session.send(msg)
705
- return
263
+ if session.reply:
264
+ try:
265
+ message_chain.extend(MessageChain(" ") + session.reply.origin.message)
266
+ except Exception:
267
+ pass
706
268
 
707
- msg = "历史会话 [最近10条]\n"
708
- for i, key in enumerate(keys):
709
- short_code = history_manager.get_code_by_key(key) or "????"
710
- hist = history_manager.get_history(key)
711
- preview = "..."
712
- if hist and len(hist) > 0:
713
- last_content = hist[-1].get("content", "")
714
- preview = (last_content[:20] + "...") if len(last_content) > 20 else last_content
269
+ message_chain = message_chain.get(Text) + message_chain.get(Image) + message_chain.get(Custom)
270
+
271
+ quoted_message_id: Optional[str] = None
272
+ conversation_history_key: Optional[str] = None
273
+ conversation_history_payload: List[dict] = []
274
+
275
+ if session.reply:
276
+ try:
277
+ quoted_message_id = str(session.reply.origin.id) if hasattr(session.reply.origin, 'id') else None
278
+ except Exception as e:
279
+ logger.warning(f"提取引用消息ID失败: {e}")
280
+ quoted_message_id = None
715
281
 
716
- msg += f"{short_code} {preview}\n"
717
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
718
- else: await session.send(msg)
719
- return
282
+ if quoted_message_id:
283
+ conversation_history_key = history_manager.get_conversation_id(quoted_message_id)
284
+ if conversation_history_key:
285
+ conversation_history_payload = history_manager.get_history(quoted_message_id) or []
286
+ logger.info(f"继续对话模式触发, 引用消息ID: {quoted_message_id}, 历史长度: {len(conversation_history_payload)}")
287
+
288
+ parse_result = alc.parse(message_chain)
289
+ is_shortcut, shortcut_replacement = handle_shortcut(message_chain)
720
290
 
721
- selected_vision_model = None
722
- selected_text_model = None
291
+ should_process = parse_result.matched or (bool(conversation_history_key) and is_shortcut)
723
292
 
724
- if vision_model_val:
725
- if vision_model_val.lower() == "off":
726
- selected_vision_model = "off"
727
- else:
728
- selected_vision_model, err = resolve_model_name(vision_model_val, conf.models)
729
- if err:
730
- if conf.quote: await session.send([Quote(session.event.message.id), err])
731
- else: await session.send(err)
732
- return
733
- logger.info(f"Selected vision model: {selected_vision_model}")
734
-
735
- if text_model_val:
736
- selected_text_model, err = resolve_model_name(text_model_val, conf.models)
737
- if err:
738
- if conf.quote: await session.send([Quote(session.event.message.id), err])
739
- else: await session.send(err)
740
- return
741
- logger.info(f"Selected text model: {selected_text_model}")
293
+ if not should_process:
294
+ return
295
+
296
+ raw_param_chain: MessageChain = parse_result.all_param if parse_result.matched else message_chain # type: ignore
297
+ if not parse_result.matched and is_shortcut:
298
+ logger.debug(f"触发快捷指令,替换内容: {shortcut_replacement}")
742
299
 
743
- # Determine History to Continue
744
- target_key = None
745
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
746
-
747
- # 1. Explicit Code
748
- if code_val:
749
- target_code = code_val
750
- target_key = history_manager.get_key_by_code(target_code)
751
- if not target_key:
752
- msg = f"未找到代码为 {target_code} 的会话"
753
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
754
- else: await session.send(msg)
755
- return
756
- logger.info(f"Question: Continuing session {target_code} -> {target_key}")
757
-
758
- next_input_val = args.get("next_input")
759
- next_text_model = None
760
- next_vision_model = None
761
- next_prompt = None
300
+ mc = MessageChain(raw_param_chain)
762
301
 
763
- if next_input_val:
764
- # Parse secondary command
765
- # next_input_val is likely a MessageChain or string depending on AllParam behavior with Alconna
766
- # We need to ensure it's a string or compatible input for parse
767
- logger.info(f"Parsing next input: {next_input_val}")
302
+ async def process_request() -> None:
303
+ await react(session, "✨")
768
304
  try:
769
- # Convert next_input_val to string
770
- if isinstance(next_input_val, list):
771
- # It's a list of segments (e.g. [Text(...)])
772
- # We need to join them into a string
773
- # Assuming they are Satori elements or similar
774
- cmd_str = "".join(str(x) for x in next_input_val)
305
+ if is_shortcut and not parse_result.matched:
306
+ msg = shortcut_replacement
775
307
  else:
776
- cmd_str = str(next_input_val)
308
+ msg = mc.get(Text).strip() if mc.get(Text) else ""
777
309
 
778
- # Prepend 'next' header for Alconna
779
- parse_target = f"next {cmd_str}"
310
+ if mc.get(Custom): # type: ignore
311
+ custom_elements = [e for e in mc if isinstance(e, Custom)]
312
+ for custom in custom_elements:
313
+ if custom.tag == 'onebot:json':
314
+ decoded_json = process_onebot_json(custom.attributes())
315
+ msg += decoded_json
316
+ break
780
317
 
781
- next_res = next_alc.parse(parse_target)
782
- if next_res.matched:
783
- next_args = next_res.all_matched_args
784
- next_text_model = next_args.get("text_model")
785
- next_vision_model = next_args.get("vision_model")
786
- next_prompt = next_args.get("prompt")
787
-
788
- # If prompt is AllParam, it might be captured as a list or string depending on Alconna version
789
- # If it's a list, join it back to string
790
- if isinstance(next_prompt, list):
791
- next_prompt = "".join(str(x) for x in next_prompt)
792
-
793
- logger.info(f"Next Command Parsed: text={next_text_model}, vision={next_vision_model}, prompt={next_prompt}")
318
+ time_start = time.perf_counter()
319
+ images, error_msg = await process_images(mc, parse_result)
320
+
321
+ if error_msg:
322
+ await session.send(error_msg)
323
+ return
324
+
325
+ lock = _get_hyw_request_lock()
326
+ async with lock:
327
+ response = await hyw.agent(str(msg), conversation_history=conversation_history_payload, images=images)
328
+
329
+ response_content = response.get("llm_response", "") if isinstance(response, dict) else ""
330
+ new_history = response.get("conversation_history", []) if isinstance(response, dict) else []
331
+
332
+ try:
333
+ send_result = await session.send([Quote(session.event.message.id), response_content])
334
+ except ActionFailed as e:
335
+ if "9057" in str(e):
336
+ logger.warning(f"发送消息失败(9057),尝试截断发送: {e}")
337
+ truncated_content = response_content[:1000] + "\n\n[...内容过长,已大幅截断...]"
338
+ send_result = await session.send([Quote(session.event.message.id), truncated_content])
339
+ else:
340
+ raise e
341
+
342
+ sent_message_id = history_manager.extract_message_id(send_result)
343
+ current_user_message_id = str(session.event.message.id)
344
+ related_ids: List[Optional[str]] = [current_user_message_id, sent_message_id]
345
+
346
+ if conversation_history_key:
347
+ history_manager.remove(conversation_history_key)
348
+ related_ids.append(quoted_message_id)
349
+
350
+ # Check turn limit
351
+ user_turns = len([m for m in new_history if m.get("role") == "user"])
352
+ if user_turns < 5:
353
+ history_manager.remember(sent_message_id, new_history, related_ids)
794
354
  else:
795
- logger.warning(f"Next command parsing failed or no match for: {parse_target}")
796
- # Fallback: treat the whole string as prompt if parsing failed (e.g. if it didn't match options but Alconna should have matched prompt)
797
- # But next_alc has Args["prompt", AllParam], so it should match everything else.
798
- # If it failed, maybe something else is wrong.
799
- # Let's assume if it failed, we just use the raw string as prompt?
800
- # But wait, if we prepend "next ", and next_alc starts with "next", it should match.
801
- pass
802
- except Exception as e:
803
- logger.error(f"Failed to parse next command: {e}")
355
+ logger.info(f"对话轮数达到上限 ({user_turns}),停止记录历史")
356
+
357
+ except Exception as exc:
358
+ await react(session, "❌")
359
+ logger.exception("处理HYW消息失败: {}", exc)
804
360
 
805
- await process_request(session, args.get("all_param"), selected_model=selected_text_model, selected_vision_model=selected_vision_model, conversation_key_override=target_key, local_mode=local_mode_val,
806
- next_prompt=next_prompt, next_text_model=next_text_model, next_vision_model=next_vision_model)
361
+ asyncio.create_task(process_request())
362
+ return
807
363
 
808
- metadata("hyw", author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}], version="3.2.105", config=HywConfig)
809
364
 
810
- @leto.on(CommandReceive)
811
- async def remove_at(content: MessageChain):
812
- content = content.lstrip(At)
813
- return content