entari-plugin-hyw 3.3.0__py3-none-any.whl → 3.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of entari-plugin-hyw might be problematic. Click here for more details.

Files changed (48) hide show
  1. entari_plugin_hyw/__init__.py +309 -763
  2. entari_plugin_hyw/hyw_core.py +700 -0
  3. {entari_plugin_hyw-3.3.0.dist-info → entari_plugin_hyw-3.3.1.dist-info}/METADATA +28 -20
  4. entari_plugin_hyw-3.3.1.dist-info/RECORD +6 -0
  5. entari_plugin_hyw/assets/icon/anthropic.svg +0 -1
  6. entari_plugin_hyw/assets/icon/deepseek.png +0 -0
  7. entari_plugin_hyw/assets/icon/gemini.svg +0 -1
  8. entari_plugin_hyw/assets/icon/google.svg +0 -1
  9. entari_plugin_hyw/assets/icon/grok.png +0 -0
  10. entari_plugin_hyw/assets/icon/microsoft.svg +0 -15
  11. entari_plugin_hyw/assets/icon/minimax.png +0 -0
  12. entari_plugin_hyw/assets/icon/mistral.png +0 -0
  13. entari_plugin_hyw/assets/icon/nvida.png +0 -0
  14. entari_plugin_hyw/assets/icon/openai.svg +0 -1
  15. entari_plugin_hyw/assets/icon/openrouter.png +0 -0
  16. entari_plugin_hyw/assets/icon/perplexity.svg +0 -24
  17. entari_plugin_hyw/assets/icon/qwen.png +0 -0
  18. entari_plugin_hyw/assets/icon/xai.png +0 -0
  19. entari_plugin_hyw/assets/icon/zai.png +0 -0
  20. entari_plugin_hyw/assets/libs/highlight.css +0 -10
  21. entari_plugin_hyw/assets/libs/highlight.js +0 -1213
  22. entari_plugin_hyw/assets/libs/katex-auto-render.js +0 -1
  23. entari_plugin_hyw/assets/libs/katex.css +0 -1
  24. entari_plugin_hyw/assets/libs/katex.js +0 -1
  25. entari_plugin_hyw/assets/libs/tailwind.css +0 -1
  26. entari_plugin_hyw/assets/package-lock.json +0 -953
  27. entari_plugin_hyw/assets/package.json +0 -16
  28. entari_plugin_hyw/assets/tailwind.config.js +0 -12
  29. entari_plugin_hyw/assets/tailwind.input.css +0 -235
  30. entari_plugin_hyw/assets/template.html +0 -157
  31. entari_plugin_hyw/assets/template.html.bak +0 -157
  32. entari_plugin_hyw/assets/template.j2 +0 -307
  33. entari_plugin_hyw/core/__init__.py +0 -0
  34. entari_plugin_hyw/core/config.py +0 -35
  35. entari_plugin_hyw/core/history.py +0 -146
  36. entari_plugin_hyw/core/hyw.py +0 -41
  37. entari_plugin_hyw/core/pipeline.py +0 -1065
  38. entari_plugin_hyw/core/render.py +0 -596
  39. entari_plugin_hyw/core/render.py.bak +0 -926
  40. entari_plugin_hyw/utils/__init__.py +0 -2
  41. entari_plugin_hyw/utils/browser.py +0 -40
  42. entari_plugin_hyw/utils/misc.py +0 -93
  43. entari_plugin_hyw/utils/playwright_tool.py +0 -36
  44. entari_plugin_hyw/utils/prompts.py +0 -128
  45. entari_plugin_hyw/utils/search.py +0 -241
  46. entari_plugin_hyw-3.3.0.dist-info/RECORD +0 -46
  47. {entari_plugin_hyw-3.3.0.dist-info → entari_plugin_hyw-3.3.1.dist-info}/WHEEL +0 -0
  48. {entari_plugin_hyw-3.3.0.dist-info → entari_plugin_hyw-3.3.1.dist-info}/top_level.txt +0 -0
@@ -1,818 +1,364 @@
1
- from dataclasses import dataclass, field
2
- from typing import List, Dict, Any, Optional, Union
1
+ from dataclasses import dataclass
2
+ import html
3
3
  import time
4
-
5
- from arclet.alconna import Alconna, Args, AllParam, CommandMeta, Option, Arparma, MultiVar, store_true
6
- from arclet.entari import metadata, listen, Session, plugin_config, BasicConfModel, plugin, command
7
- from arclet.entari import MessageChain, Text, Image, MessageCreatedEvent, Quote, At
8
- from satori.element import Custom
4
+ from collections import deque
5
+ from typing import Any, Deque, Dict, List, Optional, Set, Text, Tuple, Union, TYPE_CHECKING, cast
6
+ from typing_extensions import override
7
+ from arclet.entari import metadata
8
+ from arclet.entari import MessageChain, Session
9
+ from arclet.entari.event.base import MessageEvent
9
10
  from loguru import logger
11
+ from satori.exception import ActionFailed
12
+ from arclet.entari import MessageChain, Image, Quote, Text
10
13
  import arclet.letoderea as leto
11
- from arclet.entari.event.command import CommandReceive
14
+ from arclet.entari import MessageCreatedEvent, Session
15
+ from arclet.entari import BasicConfModel, metadata, plugin_config
16
+ import httpx
17
+ import asyncio
18
+ import json
19
+ import re
20
+ from arclet.alconna import (
21
+ Args,
22
+ Alconna,
23
+ AllParam,
24
+ MultiVar,
25
+ CommandMeta,
26
+ Option,
27
+ )
28
+ from arclet.entari import MessageChain, Session, command
29
+ from arclet.entari import plugin, Ready, Cleanup, Startup
30
+ from satori.element import Custom, E
31
+ from .hyw_core import HYW, HYWConfig
32
+
33
+ # 全局变量
34
+ hyw_core = None
35
+
36
+ class HistoryManager:
37
+ def __init__(self, max_records: int = 20):
38
+ self.max_records = max_records
39
+ self._order: Deque[str] = deque()
40
+ self._store: Dict[str, List[dict]] = {}
41
+ self._bindings: Dict[str, Set[str]] = {}
42
+ self._msg_map: Dict[str, str] = {}
43
+
44
+ def extract_message_id(self, message_like: Any) -> Optional[str]:
45
+ if message_like is None:
46
+ return None
47
+ if isinstance(message_like, (list, tuple)):
48
+ for item in message_like:
49
+ mid = self.extract_message_id(item)
50
+ if mid:
51
+ return mid
52
+ return None
53
+ if isinstance(message_like, dict):
54
+ for key in ("message_id", "id"):
55
+ value = message_like.get(key)
56
+ if value:
57
+ return str(value)
58
+ for attr in ("message_id", "id"):
59
+ value = getattr(message_like, attr, None)
60
+ if value:
61
+ return str(value)
62
+ nested = getattr(message_like, "message", None)
63
+ if nested is not None and nested is not message_like:
64
+ return self.extract_message_id(nested)
65
+ return None
66
+
67
+ def remove(self, conversation_id: Optional[str], *, remove_from_order: bool = True) -> None:
68
+ if not conversation_id:
69
+ return
70
+ cid = str(conversation_id)
71
+ if remove_from_order:
72
+ try:
73
+ self._order.remove(cid)
74
+ except ValueError:
75
+ pass
76
+ bindings = self._bindings.pop(cid, set())
77
+ for msg_id in bindings:
78
+ self._msg_map.pop(msg_id, None)
79
+ self._store.pop(cid, None)
80
+
81
+ def _enforce_limit(self) -> None:
82
+ while len(self._order) > self.max_records:
83
+ obsolete = self._order.popleft()
84
+ self.remove(obsolete, remove_from_order=False)
85
+
86
+ def remember(self, conversation_id: Optional[str], history: Optional[List[dict]], related_ids: List[Optional[str]]) -> None:
87
+ if not conversation_id or not history:
88
+ return
89
+ cid = str(conversation_id)
90
+ self._store[cid] = list(history)
91
+ binding_ids = {str(mid) for mid in related_ids if mid}
92
+ self._bindings[cid] = binding_ids
93
+ for mid in binding_ids:
94
+ self._msg_map[mid] = cid
95
+ self._order.append(cid)
96
+ self._enforce_limit()
97
+
98
+ def get_history(self, msg_id: str) -> Optional[List[dict]]:
99
+ cid = self._msg_map.get(msg_id)
100
+ if cid:
101
+ return list(self._store.get(cid, []))
102
+ return None
103
+
104
+ def get_conversation_id(self, msg_id: str) -> Optional[str]:
105
+ return self._msg_map.get(msg_id)
12
106
 
13
- from .core.hyw import HYW
14
- from .core.history import HistoryManager
15
- from .core.render import ContentRenderer
16
- from .utils.misc import process_onebot_json, process_images, resolve_model_name
17
- from arclet.entari.event.lifespan import Startup, Ready, Cleanup
107
+ history_manager = HistoryManager()
18
108
 
19
- import os
20
- import secrets
21
- import base64
109
+ # Request lock for HYW agent
110
+ _hyw_request_lock: Optional[asyncio.Lock] = None
111
+
112
+ def _get_hyw_request_lock() -> asyncio.Lock:
113
+ global _hyw_request_lock
114
+ if _hyw_request_lock is None:
115
+ _hyw_request_lock = asyncio.Lock()
116
+ return _hyw_request_lock
22
117
 
23
- import re
24
118
 
25
- class _RecentEventDeduper:
26
- def __init__(self, ttl_seconds: float = 30.0, max_size: int = 2048):
27
- self.ttl_seconds = ttl_seconds
28
- self.max_size = max_size
29
- self._seen: Dict[str, float] = {}
30
-
31
- def seen_recently(self, key: str) -> bool:
32
- now = time.time()
33
- if len(self._seen) > self.max_size:
34
- self._prune(now)
35
- ts = self._seen.get(key)
36
- if ts is None or now - ts > self.ttl_seconds:
37
- self._seen[key] = now
38
- return False
39
- return True
40
-
41
- def _prune(self, now: float):
42
- expired = [k for k, ts in self._seen.items() if now - ts > self.ttl_seconds]
43
- for k in expired:
44
- self._seen.pop(k, None)
45
- if len(self._seen) > self.max_size:
46
- for k, _ in sorted(self._seen.items(), key=lambda kv: kv[1])[: len(self._seen) - self.max_size]:
47
- self._seen.pop(k, None)
48
-
49
- _event_deduper = _RecentEventDeduper()
50
-
51
- @dataclass
52
119
  class HywConfig(BasicConfModel):
53
- admins: List[str] = field(default_factory=list)
54
- models: List[Dict[str, Any]] = field(default_factory=list)
55
- question_command: str = "/q"
56
- model_name: Optional[str] = None
57
- api_key: Optional[str] = None
120
+ command_name_list: Union[str, List[str]] = "hyw"
121
+ model_name: str
122
+ api_key: str
58
123
  base_url: str = "https://openrouter.ai/api/v1"
59
- vision_model_name: Optional[str] = None
60
- vision_api_key: Optional[str] = None
61
- vision_base_url: Optional[str] = None
62
- vision_system_prompt: Optional[str] = None
63
- intruct_model_name: Optional[str] = None
64
- intruct_api_key: Optional[str] = None
65
- intruct_base_url: Optional[str] = None
66
- intruct_system_prompt: Optional[str] = None
67
- agent_system_prompt: Optional[str] = None
68
- search_base_url: str = "https://lite.duckduckgo.com/lite/?q={query}"
69
- image_search_base_url: str = "https://duckduckgo.com/?q={query}&iax=images&ia=images"
70
124
  headless: bool = False
71
125
  save_conversation: bool = False
72
- icon: str = "openai"
73
- render_timeout_ms: int = 6000
74
- extra_body: Optional[Dict[str, Any]] = None
75
- enable_browser_fallback: bool = False
76
- reaction: bool = True
77
- quote: bool = True
78
- temperature: float = 0.4
79
- # Billing configuration (price per million tokens)
80
- input_price: Optional[float] = None # $ per 1M input tokens
81
- output_price: Optional[float] = None # $ per 1M output tokens
82
- # Vision model pricing overrides (defaults to main model pricing if not set)
83
- vision_input_price: Optional[float] = None
84
- vision_output_price: Optional[float] = None
85
- # Instruct model pricing overrides (defaults to main model pricing if not set)
86
- intruct_input_price: Optional[float] = None
87
- intruct_output_price: Optional[float] = None
88
126
 
89
- # Provider Names
90
- search_name: str = "DuckDuckGo"
91
- search_provider: str = "Crawl4AI"
92
- model_provider: Optional[str] = None
93
- vision_model_provider: Optional[str] = None
94
- intruct_model_provider: Optional[str] = None
127
+ browser_tool: str = "jina"
128
+ jina_api_key: Optional[str] = None
95
129
 
96
- start_test: Optional[Union[str, bool]] = None
97
-
98
- conf = plugin_config(HywConfig)
99
- history_manager = HistoryManager()
100
- renderer = ContentRenderer()
101
- hyw = HYW(config=conf)
102
-
103
- @listen(Ready, once=True)
104
- async def _run_ui_test():
105
- """Run UI rendering test on startup if configured."""
106
- # Debug log to confirm listener is active
107
- logger.info(f"UI TEST Listener Active. start_test config: {conf.start_test} (type: {type(conf.start_test)})")
130
+ vision_model_name: Optional[str] = None
131
+ vision_base_url: Optional[str] = None
132
+ vision_api_key: Optional[str] = None
108
133
 
109
- if not conf.start_test:
110
- return
111
-
112
- test_file = ""
113
- if isinstance(conf.start_test, str):
114
- test_file = conf.start_test
115
- elif conf.start_test is True:
116
- # User enabled boolean toggle, assume default path
117
- # Try a few locations
118
- candidates = ["data/conversations/ui-test.md", "ui-test.md", "README.md"]
119
- for c in candidates:
120
- if os.path.exists(c):
121
- test_file = c
122
- break
123
- if not test_file:
124
- logger.warning("UI TEST: start_test=True but no default test file found (tried: data/conversations/ui-test.md, ui-test.md, README.md)")
125
- return
126
-
127
- logger.info(f"UI TEST: Starting render test with file {test_file}")
134
+ extra_body: Optional[Dict[str, Any]] = None
128
135
 
129
- if not os.path.exists(test_file):
130
- logger.error(f"UI TEST: File not found: {test_file}")
131
- return
132
-
133
- try:
134
- with open(test_file, "r", encoding="utf-8") as f:
135
- content = f.read()
136
-
137
- # Mock Data for Full UI Test
138
- stats = {
139
- "total_time": 12.5,
140
- "vision_duration": 3.2,
141
- "cost": 0.0015
142
- }
143
-
144
- stages = [
145
- {"name": "Vision", "model": "google/gemini-pro-vision", "time": 3.2, "cost": 0.0005, "provider": "Google", "icon_config": "google"},
146
- {"name": "Search", "model": "duckduckgo", "time": 1.5, "cost": 0.0, "provider": "DDG", "icon_config": "search",
147
- "children": {"references": [
148
- {"title": "Crawl4AI, Open-source LLM-Friendly Web Crawler & Scraper", "url": "https://docs.crawl4ai.com/core/llmtxt", "domain": "docs.crawl4ai.com"}
149
- ]}},
150
- {"name": "Crawler", "model": "Crawl4AI", "time": 2.5, "cost": 0.0, "provider": "Page Fetcher", "icon_config": "browser",
151
- "children": {"crawled_pages": [
152
- {"title": "Quick Start - Crawl4AI Documentation (v0.7.x)", "url": "https://docs.crawl4ai.com/core/quickstart/", "domain": "docs.crawl4ai.com"},
153
- {"title": "Crawl4AI Explained: The AI-Friendly Web Crawling Framework", "url": "https://scrapfly.io/blog/posts/crawl4AI-explained/", "domain": "scrapfly.io"},
154
- {"title": "Llmtxt - Crawl4AI Documentation (v0.7.x)", "url": "https://docs.crawl4ai.com/core/llmtxt/", "domain": "docs.crawl4ai.com"},
155
- {"title": "Multi-URL Crawling - Crawl4AI Documentation (v0.7.x)", "url": "https://docs.crawl4ai.com/advanced/multi-url-crawling/", "domain": "docs.crawl4ai.com"}
156
- ]}},
157
- {"name": "Agent", "model": "anthropic/claude-3-5-sonnet", "time": 7.8, "cost": 0.0010, "provider": "Anthropic", "icon_config": "anthropic"}
158
- ]
159
-
160
- # References come from search results
161
- references = [
162
- {"title": "Crawl4AI, Open-source LLM-Friendly Web Crawler & Scraper", "url": "https://docs.crawl4ai.com/core/llmtxt", "domain": "docs.crawl4ai.com"}
163
- ]
164
-
165
- # Page references come from crawled pages
166
- page_references = [
167
- {"title": "Quick Start - Crawl4AI Documentation (v0.7.x)", "url": "https://docs.crawl4ai.com/core/quickstart/", "domain": "docs.crawl4ai.com"},
168
- {"title": "Crawl4AI Explained: The AI-Friendly Web Crawling Framework", "url": "https://scrapfly.io/blog/posts/crawl4AI-explained/", "domain": "scrapfly.io"},
169
- {"title": "Llmtxt - Crawl4AI Documentation (v0.7.x)", "url": "https://docs.crawl4ai.com/core/llmtxt/", "domain": "docs.crawl4ai.com"},
170
- {"title": "Multi-URL Crawling - Crawl4AI Documentation (v0.7.x)", "url": "https://docs.crawl4ai.com/advanced/multi-url-crawling/", "domain": "docs.crawl4ai.com"}
171
- ]
172
-
173
- output_dir = "data/cache"
174
- os.makedirs(output_dir, exist_ok=True)
175
- output_path = f"{output_dir}/ui_test_result.jpg"
176
-
177
- logger.info(f"UI TEST: Rendering to {output_path}...")
178
-
179
- start = time.time()
180
- success = await renderer.render(
181
- markdown_content=content,
182
- output_path=output_path,
183
- stats=stats,
184
- stages_used=stages,
185
- references=references,
186
- page_references=page_references,
187
- flow_steps=[],
188
- model_name="CLAUDE-3-5-SONNET",
189
- provider_name="Anthropic",
190
- behavior_summary="Automated Test",
191
- icon_config="anthropic",
192
- render_timeout_ms=10000
193
- )
194
-
195
- if success:
196
- logger.success(f"UI TEST: Render completed in {time.time() - start:.2f}s. Saved to {output_path}")
197
- else:
198
- logger.error("UI TEST: Render FAILED.")
199
-
200
- except Exception as e:
201
- logger.error(f"UI TEST: Exception during test: {e}")
202
-
203
-
204
- @listen(Cleanup, once=True)
205
- async def _hyw_cleanup():
206
- try:
207
- await hyw.close()
208
- except Exception as e:
209
- logger.warning(f"HYW cleanup error: {e}")
210
-
211
- class GlobalCache:
212
- models_image_path: Optional[str] = None
136
+ enable_browser_fallback: bool = False
137
+ # verbose: bool = False
138
+
139
+ metadata(
140
+ "hyw",
141
+ author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}],
142
+ version="3.3.1",
143
+ description="",
144
+ config=HywConfig,
145
+ )
213
146
 
214
- global_cache = GlobalCache()
147
+ conf = plugin_config(HywConfig)
148
+ alc = Alconna(
149
+ conf.command_name_list,
150
+ Option("-t|--text", dest="text_only", default=False, help_text="仅文本模式(禁用图片识别)"),
151
+ Args["all_param", AllParam],
152
+ # Option("-v|--verbose", dest="verbose", default=False, help_text="启用详细日志输出"),
153
+ meta=CommandMeta(compact=False)
154
+ )
215
155
 
216
- from satori.exception import ActionFailed
217
- from satori.adapters.onebot11.reverse import _Connection
156
+ # Create HYW configuration
157
+ hyw_config = HYWConfig(
158
+ api_key=conf.api_key,
159
+ model_name=conf.model_name,
160
+ base_url=conf.base_url,
161
+ save_conversation=conf.save_conversation,
162
+ headless=conf.headless,
163
+ browser_tool=conf.browser_tool,
164
+ jina_api_key=conf.jina_api_key,
165
+ vision_model_name=conf.vision_model_name,
166
+ vision_base_url=conf.vision_base_url,
167
+ vision_api_key=conf.vision_api_key,
168
+ extra_body=conf.extra_body,
169
+ enable_browser_fallback=conf.enable_browser_fallback
170
+ )
218
171
 
219
- # Monkeypatch to suppress ActionFailed for get_msg
220
- original_call_api = _Connection.call_api
172
+ hyw = HYW(config=hyw_config)
221
173
 
222
- async def patched_call_api(self, action: str, params: dict = None):
223
- try:
224
- return await original_call_api(self, action, params)
225
- except ActionFailed as e:
226
- if action == "get_msg":
227
- logger.warning(f"Suppressed ActionFailed for get_msg: {e}")
228
- return None
229
- raise e
230
174
 
231
- _Connection.call_api = patched_call_api
232
175
 
176
+ # Emoji到代码的映射字典
233
177
  EMOJI_TO_CODE = {
178
+ "🐳": "128051",
179
+ "❌": "10060",
180
+ "🍧": "127847",
234
181
  "✨": "10024",
235
- "": "10004",
236
- "❌": "10060"
182
+ "📫": "128235"
237
183
  }
238
184
 
239
- async def react(session: Session, emoji: str):
240
- if not conf.reaction: return
185
+ async def download_image(url: str) -> bytes:
186
+ """下载图片"""
241
187
  try:
242
- if session.event.login.platform == "onebot":
243
- code = EMOJI_TO_CODE.get(emoji, "10024")
244
- # OneBot specific reaction
245
- await session.account.protocol.call_api(
246
- "internal/set_group_reaction",
247
- {
248
- "group_id": str(session.guild.id),
249
- "message_id": str(session.event.message.id),
250
- "code": code,
251
- "is_add": True
252
- }
253
- )
254
- else:
255
- # Standard Satori reaction
256
- await session.reaction_create(emoji=emoji)
257
- except ActionFailed:
258
- pass
259
- except Exception as e:
260
- logger.warning(f"Reaction failed: {e}")
261
-
262
- async def process_request(session: Session[MessageCreatedEvent], all_param: Optional[MessageChain] = None,
263
- selected_model: Optional[str] = None, selected_vision_model: Optional[str] = None,
264
- conversation_key_override: Optional[str] = None, local_mode: bool = False,
265
- next_prompt: Optional[str] = None, next_text_model: Optional[str] = None, next_vision_model: Optional[str] = None):
266
- logger.info(f"Processing request: {all_param}")
267
- mc = MessageChain(all_param)
268
- logger.info(f"reply: {session.reply}")
269
- if session.reply:
270
- try:
271
- # Check if reply is from self (the bot)
272
- # 1. Check by Message ID (reliable for bot's own messages if recorded)
273
- reply_msg_id = str(session.reply.origin.id) if hasattr(session.reply.origin, 'id') else None
274
- is_bot = False
275
-
276
- if reply_msg_id and history_manager.is_bot_message(reply_msg_id):
277
- is_bot = True
278
- logger.info(f"Reply target {reply_msg_id} identified as bot message via history")
279
-
280
- if is_bot:
281
- logger.info("Reply is from me - ignoring content")
188
+ async with httpx.AsyncClient(timeout=30.0) as client:
189
+ resp = await client.get(url)
190
+ if resp.status_code == 200:
191
+ return resp.content
282
192
  else:
283
- logger.info(f"Reply is from user (or unknown) - including content")
284
- mc.extend(MessageChain(" ") + session.reply.origin.message)
285
- except Exception as e:
286
- logger.warning(f"Failed to process reply origin: {e}")
287
- mc.extend(MessageChain(" ") + session.reply.origin.message)
288
-
289
- # Filter and reconstruct MessageChain
290
- filtered_elements = mc.get(Text) + mc.get(Image) + mc.get(Custom)
291
- mc = MessageChain(filtered_elements)
292
- logger.info(f"mc: {mc}")
293
-
294
- text_content = str(mc.get(Text)).strip()
295
- # Remove HTML image tags from text content to prevent "unreasonable code behavior"
296
- text_content = re.sub(r'<img[^>]+>', '', text_content, flags=re.IGNORECASE)
297
-
298
- if not text_content and not mc.get(Image) and not mc.get(Custom):
299
- return
300
-
301
- # History & Context
302
- hist_key = conversation_key_override
303
- if not hist_key and session.reply and hasattr(session.reply.origin, 'id'):
304
- hist_key = history_manager.get_conversation_id(str(session.reply.origin.id))
305
-
306
- hist_payload = history_manager.get_history(hist_key) if hist_key else []
307
- meta = history_manager.get_metadata(hist_key) if hist_key else {}
308
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
193
+ raise ActionFailed(f"下载图片失败,状态码: {resp.status_code}")
194
+ except Exception as e:
195
+ raise ActionFailed(f"下载图片失败: {url}, 错误: {str(e)}")
309
196
 
310
- if conf.reaction: await react(session, "✨")
311
197
 
198
+ def process_onebot_json(json_data_str: str) -> str:
312
199
  try:
313
- msg_text = str(mc.get(Text)).strip() if mc.get(Text) else ""
314
- msg_text = re.sub(r'<img[^>]+>', '', msg_text, flags=re.IGNORECASE)
315
-
316
- # If message is empty but has images, use a placeholder
317
- if not msg_text and (mc.get(Image) or mc.get(Custom)):
318
- msg_text = "[图片]"
319
-
320
- for custom in [e for e in mc if isinstance(e, Custom)]:
321
- if custom.tag == 'onebot:json':
322
- if decoded := process_onebot_json(custom.attributes()): msg_text += f"\n{decoded}"
323
- break
324
-
325
- # Model Selection (Step 1)
326
- # Resolve model names from config if they are short names/keywords
327
- model = selected_model or meta.get("model")
328
- if model and model != "off":
329
- resolved, err = resolve_model_name(model, conf.models)
330
- if resolved:
331
- model = resolved
332
- elif err:
333
- logger.warning(f"Model resolution warning for {model}: {err}")
334
-
335
- vision_model = selected_vision_model or meta.get("vision_model")
336
- if vision_model and vision_model != "off":
337
- resolved_v, err_v = resolve_model_name(vision_model, conf.models)
338
- if resolved_v:
339
- vision_model = resolved_v
340
- elif err_v:
341
- logger.warning(f"Vision model resolution warning for {vision_model}: {err_v}")
342
-
343
- images, err = await process_images(mc, vision_model)
344
-
345
- # Call Agent (Step 1)
346
- # Sanitize user_input: use extracted text only
347
- safe_input = msg_text
348
-
349
- resp = await hyw.agent(safe_input, conversation_history=hist_payload, images=images,
350
- selected_model=model, selected_vision_model=vision_model, local_mode=local_mode)
351
-
352
- # Step 1 Results
353
- step1_vision_model = resp.get("vision_model_used")
354
- step1_model = resp.get("model_used")
355
- step1_history = resp.get("conversation_history", [])
356
- step1_stats = resp.get("stats", {})
357
-
358
- final_resp = resp
359
-
360
- # Step 2 (Optional)
361
- if next_prompt:
362
- logger.info(f"Executing Step 2 with prompt: {next_prompt}")
363
-
364
- # Use Step 1 history as base for Step 2
365
- # hyw.agent already returns the updated history including the new turn
366
- # So we just pass step1_history
367
-
368
- # Determine Step 2 models
369
- # If not specified, inherit from Step 1 or config?
370
- # Usually inherit from config or meta if not specified in -n
371
- step2_model = next_text_model or model
372
- if step2_model and step2_model != "off":
373
- resolved_s2, err_s2 = resolve_model_name(step2_model, conf.models)
374
- if resolved_s2:
375
- step2_model = resolved_s2
376
-
377
- step2_vision_model = next_vision_model or vision_model # Probably not used if no new images, but consistent
378
- if step2_vision_model and step2_vision_model != "off":
379
- resolved_s2v, err_s2v = resolve_model_name(step2_vision_model, conf.models)
380
- if resolved_s2v:
381
- step2_vision_model = resolved_s2v
382
-
383
- # No new images for Step 2 usually, unless we want to carry over images?
384
- # The user said "First round image model, second round text model".
385
- # Usually Step 2 is text-only follow-up.
386
- # But hyw.agent stateless? No, we pass history.
387
- # We don't pass 'images' again to Step 2 unless we want them re-analyzed.
388
- # If Step 1 analyzed images, the analysis is in history (as assistant message or system message?).
389
- # In hyw.agent, image analysis result is added to history.
390
- # So we don't need to pass images again.
391
-
392
- resp2 = await hyw.agent(str(next_prompt), conversation_history=step1_history, images=None,
393
- selected_model=step2_model, selected_vision_model=step2_vision_model, local_mode=local_mode)
394
-
395
- final_resp = resp2
396
-
397
- # Merge Stats
398
- # Instead of merging into a single dict, we prepare a list of stats for the renderer
399
- # But we also need a combined stats for history recording?
400
- # History manager likely expects a single dict or doesn't care much (it stores what we give)
401
-
402
- # Let's keep step1_stats and resp2["stats"] separate for rendering
403
- # But for history, maybe we still want a merged one?
404
- # The code below uses final_resp["stats"] for rendering AND history.
405
-
406
- # Let's create a list for rendering
407
- stats_for_render = [step1_stats, resp2.get("stats", {})]
408
-
409
- # And a merged one for history/final_resp
410
- merged_stats = step1_stats.copy()
411
- if "stats" in resp2:
412
- for k, v in resp2["stats"].items():
413
- if isinstance(v, (int, float)) and k in merged_stats:
414
- merged_stats[k] += v
415
- elif k == "visited_domains":
416
- merged_stats[k] = list(set(merged_stats.get(k, []) + v))
417
- else:
418
- merged_stats[k] = v
419
-
420
- final_resp["stats"] = merged_stats
421
- final_resp["stats_list"] = stats_for_render # Pass this to renderer if available
422
-
423
- # Merge Model Info for Display
424
- # We want to show Step 1 Vision Model AND Step 2 Text Model
425
- if step1_vision_model:
426
- final_resp["vision_model_used"] = step1_vision_model
427
- # final_resp["model_used"] is already from Step 2
428
-
429
-
430
- # Extract Response Data
431
- content = final_resp.get("llm_response", "")
432
- structured = final_resp.get("structured_response", {})
433
-
434
- # Render
435
- import tempfile
436
- with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
437
- output_path = tf.name
438
- model_used = final_resp.get("model_used")
439
- vision_model_used = final_resp.get("vision_model_used")
440
-
441
- # Helper to infer icon from model name
442
- def infer_icon_from_model(model_name: str) -> str:
443
- """Infer icon name from model name (e.g. 'google/gemini-3-flash' -> 'google' or 'gemini')"""
444
- if not model_name:
445
- return conf.icon
446
- name_lower = model_name.lower()
447
- # Check for known providers/models in the name
448
- known_icons = ["google", "gemini", "openai", "anthropic", "deepseek", "mistral",
449
- "qwen", "grok", "xai", "perplexity", "microsoft", "minimax", "nvidia"]
450
- for icon_name in known_icons:
451
- if icon_name in name_lower:
452
- return icon_name
453
- return conf.icon
454
-
455
- icon = conf.icon
456
- m_conf = None
457
- if model_used:
458
- m_conf = next((m for m in conf.models if m.get("name") == model_used), None)
459
- if m_conf:
460
- icon = m_conf.get("icon", infer_icon_from_model(model_used))
461
- else:
462
- # Model not in config list, infer from name
463
- icon = infer_icon_from_model(model_used)
464
-
465
- # Determine session short code
466
- if hist_key:
467
- display_session_id = history_manager.get_code_by_key(hist_key)
468
- if not display_session_id:
469
- # Should not happen if key exists, but fallback
470
- display_session_id = history_manager.generate_short_code()
471
- else:
472
- # New conversation, pre-generate code
473
- display_session_id = history_manager.generate_short_code()
474
-
475
- # Determine vision base url and icon
476
- vision_base_url = None
477
- vision_icon = None
478
-
479
- if vision_model_used:
480
- v_conf = next((m for m in conf.models if m.get("name") == vision_model_used), None)
481
- if v_conf:
482
- vision_base_url = v_conf.get("base_url")
483
- vision_icon = v_conf.get("icon", infer_icon_from_model(vision_model_used))
484
- else:
485
- vision_icon = infer_icon_from_model(vision_model_used)
486
-
487
- # Handle Vision Only Mode (suppress text model display)
488
- render_model_name = model_used or conf.model_name or "unknown"
489
- render_icon = icon
490
- render_base_url = m_conf.get("base_url", conf.base_url) if m_conf else conf.base_url
491
-
492
- if not model_used and vision_model_used:
493
- render_model_name = ""
494
- render_icon = ""
495
-
496
- # Use stats_list if available, otherwise standard stats
497
- stats_to_render = final_resp.get("stats_list", final_resp.get("stats", {}))
498
-
499
- # Determine Behavior Summary & Provider Name
500
-
501
- # 1. Behavior Summary
502
- behavior_summary = "Text Generation"
503
- if vision_model_used:
504
- behavior_summary = "Visual Analysis"
505
- elif any(s.get("name") == "Search" for s in final_resp.get("stages_used", []) or []):
506
- behavior_summary = "Search-Augmented"
507
-
508
- # 2. Provider Name
509
- # Try to get from m_conf (resolved above)
510
- provider_name = "Unknown Provider"
511
- if model_used and m_conf:
512
- provider_name = m_conf.get("provider", "Unknown Provider")
513
- elif not model_used and vision_model_used:
514
- # If only vision model used (unlikely but possible in code logic)
515
- if 'v_conf' in locals() and v_conf:
516
- provider_name = v_conf.get("provider", "Unknown Provider")
517
-
518
- # If still unknown and we have base_url, maybe use domain as last resort fallback?
519
- # User said: "provider does not automatically get from url if not filled"
520
- # So if it's "Unknown Provider", we leave it or maybe empty string?
521
- # Let's stick to "Unknown Provider" or just empty if we want to be clean.
522
- # But for UI validation it's better to show something if missing config.
523
-
524
- render_ok = await renderer.render(
525
- markdown_content=content,
526
- output_path=output_path,
527
- suggestions=[],
528
- stats=stats_to_render,
529
- references=structured.get("references", []),
530
- page_references=structured.get("page_references", []),
531
- flow_steps=structured.get("flow_steps", []),
532
- stages_used=final_resp.get("stages_used", []),
533
- model_name=render_model_name,
534
- provider_name=provider_name,
535
- behavior_summary=behavior_summary,
536
- icon_config=render_icon,
537
- vision_model_name=vision_model_used,
538
- vision_base_url=vision_base_url,
539
- vision_icon_config=vision_icon,
540
- base_url=render_base_url,
541
- billing_info=final_resp.get("billing_info"),
542
- render_timeout_ms=conf.render_timeout_ms
543
- )
544
-
545
- # Send & Save
546
- if not render_ok:
547
- logger.error("Render failed; skipping reply. Check Crawl4AI rendering status.")
548
- if os.path.exists(output_path):
549
- try:
550
- os.remove(output_path)
551
- except Exception as exc:
552
- logger.warning(f"Failed to delete render output {output_path}: {exc}")
553
- sent = None
554
- else:
555
- # Convert to base64
556
- with open(output_path, "rb") as f:
557
- img_data = base64.b64encode(f.read()).decode()
558
-
559
- # Build single reply chain (image only now)
560
- elements = []
561
- elements.append(Image(src=f'data:image/png;base64,{img_data}'))
562
-
563
- msg_chain = MessageChain(*elements)
564
-
565
- if conf.quote:
566
- msg_chain = MessageChain(Quote(session.event.message.id)) + msg_chain
567
-
568
- # Use reply_to instead of manual Quote insertion to avoid ActionFailed errors
569
- sent = await session.send(msg_chain)
570
-
571
- sent_id = next((str(e.id) for e in sent if hasattr(e, 'id')), None) if sent else None
572
- msg_id = str(session.event.message.id) if hasattr(session.event, 'message') else str(session.event.id)
573
- related = [msg_id] + ([str(session.reply.origin.id)] if session.reply and hasattr(session.reply.origin, 'id') else [])
574
-
575
- history_manager.remember(
576
- sent_id,
577
- final_resp.get("conversation_history", []),
578
- related,
579
- {
580
- "model": model_used,
581
- "trace_markdown": final_resp.get("trace_markdown"),
582
- },
583
- context_id,
584
- code=display_session_id,
585
- )
586
-
587
- if conf.save_conversation and sent_id:
588
- history_manager.save_to_disk(sent_id)
589
-
590
-
200
+ # 解码HTML实体
201
+ json_str = html.unescape(json_data_str)
202
+ return json_str
591
203
  except Exception as e:
592
- logger.exception(f"Error: {e}")
593
- err_msg = f"Error: {e}"
594
- if conf.quote:
595
- await session.send([Quote(session.event.message.id), err_msg])
596
- else:
597
- await session.send(err_msg)
598
-
599
- # Save conversation on error if response was generated
600
- if 'resp' in locals() and resp and conf.save_conversation:
601
- try:
602
- # Use a temporary ID for error cases
603
- error_id = f"error_{int(time.time())}_{secrets.token_hex(4)}"
604
- history_manager.remember(error_id, resp.get("conversation_history", []), [], {"model": model_used if 'model_used' in locals() else "unknown", "error": str(e)}, context_id, code=display_session_id if 'display_session_id' in locals() else None)
605
- history_manager.save_to_disk(error_id)
606
- logger.info(f"Saved error conversation to {error_id}")
607
- except Exception as save_err:
608
- logger.error(f"Failed to save error conversation: {save_err}")
609
-
610
- # Secondary Parser for -n content
611
- next_alc = Alconna(
612
- "next",
613
- Option("-v|--vision", Args["vision_model", str], help_text="设置视觉模型(设为off禁用)"),
614
- Option("-t|--text", Args["text_model", str], help_text="设置文本模型"),
615
- Args["prompt", AllParam],
616
- )
204
+ return json_data_str
617
205
 
618
- # Main Command (Question)
619
- alc = Alconna(
620
- conf.question_command,
621
- Option("-v|--vision", Args["vision_model", str]),
622
- Option("-t|--text", Args["text_model", str]),
623
- Option("-c|--code", Args["code", str]),
624
- Option("-n|--next", Args["next_input", AllParam]),
625
- Args["list_models;?", "-m|--models"],
626
- Args["all_chat;?", "-a"],
627
- Args["local_mode;?", "-l"],
628
- Args["all_param?", MultiVar(str | Image | Custom)],
629
- meta=CommandMeta(
630
- compact=False,
631
- description=f"""使用方法:
632
- {conf.question_command} -a : 列出所有会话
633
- {conf.question_command} -m : 列出所有模型
634
- {conf.question_command} -v <模型名> : 设置主要视觉模型, 设为 off 禁用
635
- {conf.question_command} -t <模型名> : 设置主要文本模型
636
- {conf.question_command} -l : 开启本地模式 (关闭Web索引)
637
- {conf.question_command} -c <4位消息码> : 继续指定会话
638
- {conf.question_command} -n <后续提示词> : 在第一步完成后执行后续操作 (支持 -t/-v)
639
- {conf.question_command} <问题> : 发起问题
640
- 特性:
641
- """
642
- )
643
- )
644
206
 
645
- @command.on(alc)
646
- async def handle_question_command(session: Session[MessageCreatedEvent], result: Arparma):
647
- """Handle main Question command"""
207
+ async def react(session: Session, emoji: str):
648
208
  try:
649
- mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
650
- dedupe_key = f"{getattr(session.account, 'id', 'account')}:{mid}"
651
- if _event_deduper.seen_recently(dedupe_key):
652
- logger.warning(f"Duplicate command event ignored: {dedupe_key}")
653
- return
654
- except Exception:
209
+ if session.event.login.platform == "onebot":
210
+ code = EMOJI_TO_CODE.get(emoji, "10024")
211
+ await session.account.protocol.call_api("internal/set_group_reaction", {"group_id": int(session.guild.id), "message_id": int(session.event.message.id), "code": code, "is_add": True})
212
+ else:
213
+ await session.reaction_create(emoji=emoji)
214
+ except ActionFailed:
655
215
  pass
656
216
 
657
- logger.info(f"Question Command Triggered. Message: {session.event.message}")
217
+ def handle_shortcut(message_chain: MessageChain) -> Tuple[bool, str]:
218
+ current_msg_text = str(message_chain.get(Text)) if message_chain.get(Text) else ""
219
+ is_shortcut = False
220
+ shortcut_replacement = ""
221
+ if current_msg_text.strip().startswith("/"):
222
+ is_shortcut = True
223
+ shortcut_replacement = current_msg_text.strip()[1:]
224
+ return is_shortcut, shortcut_replacement
225
+
226
+ async def process_images(mc: MessageChain, parse_result: Any) -> Tuple[List[str], Optional[str]]:
227
+ is_text_only = False
228
+ if parse_result.matched:
229
+ def get_bool_value(val):
230
+ if hasattr(val, 'value'):
231
+ return bool(val.value)
232
+ return bool(val)
233
+ is_text_only = get_bool_value(getattr(parse_result, 'text_only', False))
658
234
 
659
- args = result.all_matched_args
660
- logger.info(f"Matched Args: {args}")
235
+ text_str = str(mc.get(Text) or "")
236
+ if not is_text_only and re.search(r'(?:^|\s)(-t|--text)(?:$|\s)', text_str):
237
+ is_text_only = True
661
238
 
662
- text_model_val = args.get("text_model")
663
- vision_model_val = args.get("vision_model")
664
- code_val = args.get("code")
665
- all_flag_val = args.get("all_chat")
666
- list_models_val = args.get("list_models")
667
- local_mode_val = True if args.get("local_mode") else False
668
- logger.info(f"Local mode: {local_mode_val} (type: {type(local_mode_val)})")
239
+ if is_text_only:
240
+ logger.info("检测到仅文本模式参数,跳过图片分析")
241
+ return [], None
242
+
243
+ has_images = bool(mc.get(Image))
244
+ images = []
245
+ if has_images:
246
+ urls = mc[Image].map(lambda x: x.src)
247
+ tasks = [download_image(url) for url in urls]
248
+ raw_images = await asyncio.gather(*tasks)
249
+ import base64
250
+ images = [base64.b64encode(img).decode('utf-8') for img in raw_images]
669
251
 
670
- # Handle -m (List Models)
671
- if list_models_val:
672
- # global_cache is already imported/defined in __init__.py
673
-
674
- if global_cache.models_image_path and os.path.exists(global_cache.models_image_path):
675
- logger.info(f"Using cached models list: {global_cache.models_image_path}")
676
- with open(global_cache.models_image_path, "rb") as f:
677
- img_data = base64.b64encode(f.read()).decode()
678
- msg = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
679
- if conf.quote: msg = MessageChain(Quote(session.event.message.id)) + msg
680
- await session.send(msg)
681
- return
682
-
683
- output_dir = "data/cache"
684
- os.makedirs(output_dir, exist_ok=True)
685
- output_path = f"{output_dir}/models_list_cache.png"
686
-
687
- await renderer.render_models_list(
688
- conf.models,
689
- output_path,
690
- default_base_url=conf.base_url,
691
- render_timeout_ms=conf.render_timeout_ms,
692
- )
693
- global_cache.models_image_path = os.path.abspath(output_path)
694
-
695
- with open(output_path, "rb") as f:
696
- img_data = base64.b64encode(f.read()).decode()
697
- msg = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
698
- if conf.quote: msg = MessageChain(Quote(session.event.message.id)) + msg
699
- await session.send(msg)
252
+ return images, None
253
+
254
+ @leto.on(MessageCreatedEvent)
255
+ async def on_message_created(message_chain: MessageChain, session: Session[MessageEvent]):
256
+ # Skip if no substantial content in original message
257
+ original_text = str(message_chain.get(Text)).strip()
258
+ has_images = bool(message_chain.get(Image))
259
+ has_custom = bool(message_chain.get(Custom))
260
+ if not original_text and not has_images and not has_custom:
700
261
  return
701
262
 
702
- # Handle -a (List History)
703
- if all_flag_val:
704
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
705
- keys = history_manager.list_by_context(context_id, limit=10)
706
- if not keys:
707
- msg = "暂无历史会话"
708
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
709
- else: await session.send(msg)
710
- return
263
+ if session.reply:
264
+ try:
265
+ message_chain.extend(MessageChain(" ") + session.reply.origin.message)
266
+ except Exception:
267
+ pass
711
268
 
712
- msg = "历史会话 [最近10条]\n"
713
- for i, key in enumerate(keys):
714
- short_code = history_manager.get_code_by_key(key) or "????"
715
- hist = history_manager.get_history(key)
716
- preview = "..."
717
- if hist and len(hist) > 0:
718
- last_content = hist[-1].get("content", "")
719
- preview = (last_content[:20] + "...") if len(last_content) > 20 else last_content
269
+ message_chain = message_chain.get(Text) + message_chain.get(Image) + message_chain.get(Custom)
270
+
271
+ quoted_message_id: Optional[str] = None
272
+ conversation_history_key: Optional[str] = None
273
+ conversation_history_payload: List[dict] = []
274
+
275
+ if session.reply:
276
+ try:
277
+ quoted_message_id = str(session.reply.origin.id) if hasattr(session.reply.origin, 'id') else None
278
+ except Exception as e:
279
+ logger.warning(f"提取引用消息ID失败: {e}")
280
+ quoted_message_id = None
720
281
 
721
- msg += f"{short_code} {preview}\n"
722
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
723
- else: await session.send(msg)
724
- return
282
+ if quoted_message_id:
283
+ conversation_history_key = history_manager.get_conversation_id(quoted_message_id)
284
+ if conversation_history_key:
285
+ conversation_history_payload = history_manager.get_history(quoted_message_id) or []
286
+ logger.info(f"继续对话模式触发, 引用消息ID: {quoted_message_id}, 历史长度: {len(conversation_history_payload)}")
287
+
288
+ parse_result = alc.parse(message_chain)
289
+ is_shortcut, shortcut_replacement = handle_shortcut(message_chain)
725
290
 
726
- selected_vision_model = None
727
- selected_text_model = None
291
+ should_process = parse_result.matched or (bool(conversation_history_key) and is_shortcut)
728
292
 
729
- if vision_model_val:
730
- if vision_model_val.lower() == "off":
731
- selected_vision_model = "off"
732
- else:
733
- selected_vision_model, err = resolve_model_name(vision_model_val, conf.models)
734
- if err:
735
- if conf.quote: await session.send([Quote(session.event.message.id), err])
736
- else: await session.send(err)
737
- return
738
- logger.info(f"Selected vision model: {selected_vision_model}")
739
-
740
- if text_model_val:
741
- selected_text_model, err = resolve_model_name(text_model_val, conf.models)
742
- if err:
743
- if conf.quote: await session.send([Quote(session.event.message.id), err])
744
- else: await session.send(err)
745
- return
746
- logger.info(f"Selected text model: {selected_text_model}")
293
+ if not should_process:
294
+ return
295
+
296
+ raw_param_chain: MessageChain = parse_result.all_param if parse_result.matched else message_chain # type: ignore
297
+ if not parse_result.matched and is_shortcut:
298
+ logger.debug(f"触发快捷指令,替换内容: {shortcut_replacement}")
747
299
 
748
- # Determine History to Continue
749
- target_key = None
750
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
751
-
752
- # 1. Explicit Code
753
- if code_val:
754
- target_code = code_val
755
- target_key = history_manager.get_key_by_code(target_code)
756
- if not target_key:
757
- msg = f"未找到代码为 {target_code} 的会话"
758
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
759
- else: await session.send(msg)
760
- return
761
- logger.info(f"Question: Continuing session {target_code} -> {target_key}")
762
-
763
- next_input_val = args.get("next_input")
764
- next_text_model = None
765
- next_vision_model = None
766
- next_prompt = None
300
+ mc = MessageChain(raw_param_chain)
767
301
 
768
- if next_input_val:
769
- # Parse secondary command
770
- # next_input_val is likely a MessageChain or string depending on AllParam behavior with Alconna
771
- # We need to ensure it's a string or compatible input for parse
772
- logger.info(f"Parsing next input: {next_input_val}")
302
+ async def process_request() -> None:
303
+ await react(session, "✨")
773
304
  try:
774
- # Convert next_input_val to string
775
- if isinstance(next_input_val, list):
776
- # It's a list of segments (e.g. [Text(...)])
777
- # We need to join them into a string
778
- # Assuming they are Satori elements or similar
779
- cmd_str = "".join(str(x) for x in next_input_val)
305
+ if is_shortcut and not parse_result.matched:
306
+ msg = shortcut_replacement
780
307
  else:
781
- cmd_str = str(next_input_val)
308
+ msg = mc.get(Text).strip() if mc.get(Text) else ""
782
309
 
783
- # Prepend 'next' header for Alconna
784
- parse_target = f"next {cmd_str}"
310
+ if mc.get(Custom): # type: ignore
311
+ custom_elements = [e for e in mc if isinstance(e, Custom)]
312
+ for custom in custom_elements:
313
+ if custom.tag == 'onebot:json':
314
+ decoded_json = process_onebot_json(custom.attributes())
315
+ msg += decoded_json
316
+ break
785
317
 
786
- next_res = next_alc.parse(parse_target)
787
- if next_res.matched:
788
- next_args = next_res.all_matched_args
789
- next_text_model = next_args.get("text_model")
790
- next_vision_model = next_args.get("vision_model")
791
- next_prompt = next_args.get("prompt")
792
-
793
- # If prompt is AllParam, it might be captured as a list or string depending on Alconna version
794
- # If it's a list, join it back to string
795
- if isinstance(next_prompt, list):
796
- next_prompt = "".join(str(x) for x in next_prompt)
797
-
798
- logger.info(f"Next Command Parsed: text={next_text_model}, vision={next_vision_model}, prompt={next_prompt}")
318
+ time_start = time.perf_counter()
319
+ images, error_msg = await process_images(mc, parse_result)
320
+
321
+ if error_msg:
322
+ await session.send(error_msg)
323
+ return
324
+
325
+ lock = _get_hyw_request_lock()
326
+ async with lock:
327
+ response = await hyw.agent(str(msg), conversation_history=conversation_history_payload, images=images)
328
+
329
+ response_content = response.get("llm_response", "") if isinstance(response, dict) else ""
330
+ new_history = response.get("conversation_history", []) if isinstance(response, dict) else []
331
+
332
+ try:
333
+ send_result = await session.send([Quote(session.event.message.id), response_content])
334
+ except ActionFailed as e:
335
+ if "9057" in str(e):
336
+ logger.warning(f"发送消息失败(9057),尝试截断发送: {e}")
337
+ truncated_content = response_content[:1000] + "\n\n[...内容过长,已大幅截断...]"
338
+ send_result = await session.send([Quote(session.event.message.id), truncated_content])
339
+ else:
340
+ raise e
341
+
342
+ sent_message_id = history_manager.extract_message_id(send_result)
343
+ current_user_message_id = str(session.event.message.id)
344
+ related_ids: List[Optional[str]] = [current_user_message_id, sent_message_id]
345
+
346
+ if conversation_history_key:
347
+ history_manager.remove(conversation_history_key)
348
+ related_ids.append(quoted_message_id)
349
+
350
+ # Check turn limit
351
+ user_turns = len([m for m in new_history if m.get("role") == "user"])
352
+ if user_turns < 5:
353
+ history_manager.remember(sent_message_id, new_history, related_ids)
799
354
  else:
800
- logger.warning(f"Next command parsing failed or no match for: {parse_target}")
801
- # Fallback: treat the whole string as prompt if parsing failed (e.g. if it didn't match options but Alconna should have matched prompt)
802
- # But next_alc has Args["prompt", AllParam], so it should match everything else.
803
- # If it failed, maybe something else is wrong.
804
- # Let's assume if it failed, we just use the raw string as prompt?
805
- # But wait, if we prepend "next ", and next_alc starts with "next", it should match.
806
- pass
807
- except Exception as e:
808
- logger.error(f"Failed to parse next command: {e}")
355
+ logger.info(f"对话轮数达到上限 ({user_turns}),停止记录历史")
356
+
357
+ except Exception as exc:
358
+ await react(session, "❌")
359
+ logger.exception("处理HYW消息失败: {}", exc)
809
360
 
810
- await process_request(session, args.get("all_param"), selected_model=selected_text_model, selected_vision_model=selected_vision_model, conversation_key_override=target_key, local_mode=local_mode_val,
811
- next_prompt=next_prompt, next_text_model=next_text_model, next_vision_model=next_vision_model)
361
+ asyncio.create_task(process_request())
362
+ return
812
363
 
813
- metadata("hyw", author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}], version="3.2.105", config=HywConfig)
814
364
 
815
- @leto.on(CommandReceive)
816
- async def remove_at(content: MessageChain):
817
- content = content.lstrip(At)
818
- return content