entari-plugin-hyw 3.2.105__py3-none-any.whl → 3.5.0rc6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. entari_plugin_hyw/__init__.py +120 -428
  2. entari_plugin_hyw/assets/card-dist/index.html +396 -0
  3. entari_plugin_hyw/assets/card-dist/logos/anthropic.svg +1 -0
  4. entari_plugin_hyw/assets/card-dist/logos/cerebras.svg +9 -0
  5. entari_plugin_hyw/assets/card-dist/logos/deepseek.png +0 -0
  6. entari_plugin_hyw/assets/card-dist/logos/gemini.svg +1 -0
  7. entari_plugin_hyw/assets/card-dist/logos/google.svg +1 -0
  8. entari_plugin_hyw/assets/card-dist/logos/grok.png +0 -0
  9. entari_plugin_hyw/assets/card-dist/logos/huggingface.png +0 -0
  10. entari_plugin_hyw/assets/card-dist/logos/microsoft.svg +15 -0
  11. entari_plugin_hyw/assets/card-dist/logos/minimax.png +0 -0
  12. entari_plugin_hyw/assets/card-dist/logos/mistral.png +0 -0
  13. entari_plugin_hyw/assets/card-dist/logos/nvida.png +0 -0
  14. entari_plugin_hyw/assets/card-dist/logos/openai.svg +1 -0
  15. entari_plugin_hyw/assets/card-dist/logos/openrouter.png +0 -0
  16. entari_plugin_hyw/assets/card-dist/logos/perplexity.svg +24 -0
  17. entari_plugin_hyw/assets/card-dist/logos/qwen.png +0 -0
  18. entari_plugin_hyw/assets/card-dist/logos/xai.png +0 -0
  19. entari_plugin_hyw/assets/card-dist/logos/xiaomi.png +0 -0
  20. entari_plugin_hyw/assets/card-dist/logos/zai.png +0 -0
  21. entari_plugin_hyw/assets/card-dist/vite.svg +1 -0
  22. entari_plugin_hyw/assets/icon/cerebras.svg +9 -0
  23. entari_plugin_hyw/assets/icon/huggingface.png +0 -0
  24. entari_plugin_hyw/assets/icon/xiaomi.png +0 -0
  25. entari_plugin_hyw/card-ui/.gitignore +24 -0
  26. entari_plugin_hyw/card-ui/README.md +5 -0
  27. entari_plugin_hyw/card-ui/index.html +16 -0
  28. entari_plugin_hyw/card-ui/package-lock.json +2342 -0
  29. entari_plugin_hyw/card-ui/package.json +31 -0
  30. entari_plugin_hyw/card-ui/public/logos/anthropic.svg +1 -0
  31. entari_plugin_hyw/card-ui/public/logos/cerebras.svg +9 -0
  32. entari_plugin_hyw/card-ui/public/logos/deepseek.png +0 -0
  33. entari_plugin_hyw/card-ui/public/logos/gemini.svg +1 -0
  34. entari_plugin_hyw/card-ui/public/logos/google.svg +1 -0
  35. entari_plugin_hyw/card-ui/public/logos/grok.png +0 -0
  36. entari_plugin_hyw/card-ui/public/logos/huggingface.png +0 -0
  37. entari_plugin_hyw/card-ui/public/logos/microsoft.svg +15 -0
  38. entari_plugin_hyw/card-ui/public/logos/minimax.png +0 -0
  39. entari_plugin_hyw/card-ui/public/logos/mistral.png +0 -0
  40. entari_plugin_hyw/card-ui/public/logos/nvida.png +0 -0
  41. entari_plugin_hyw/card-ui/public/logos/openai.svg +1 -0
  42. entari_plugin_hyw/card-ui/public/logos/openrouter.png +0 -0
  43. entari_plugin_hyw/card-ui/public/logos/perplexity.svg +24 -0
  44. entari_plugin_hyw/card-ui/public/logos/qwen.png +0 -0
  45. entari_plugin_hyw/card-ui/public/logos/xai.png +0 -0
  46. entari_plugin_hyw/card-ui/public/logos/xiaomi.png +0 -0
  47. entari_plugin_hyw/card-ui/public/logos/zai.png +0 -0
  48. entari_plugin_hyw/card-ui/public/vite.svg +1 -0
  49. entari_plugin_hyw/card-ui/src/App.vue +412 -0
  50. entari_plugin_hyw/card-ui/src/assets/vue.svg +1 -0
  51. entari_plugin_hyw/card-ui/src/components/HelloWorld.vue +41 -0
  52. entari_plugin_hyw/card-ui/src/components/MarkdownContent.vue +386 -0
  53. entari_plugin_hyw/card-ui/src/components/SectionCard.vue +41 -0
  54. entari_plugin_hyw/card-ui/src/components/StageCard.vue +237 -0
  55. entari_plugin_hyw/card-ui/src/main.ts +5 -0
  56. entari_plugin_hyw/card-ui/src/style.css +29 -0
  57. entari_plugin_hyw/card-ui/src/test_regex.js +103 -0
  58. entari_plugin_hyw/card-ui/src/types.ts +52 -0
  59. entari_plugin_hyw/card-ui/tsconfig.app.json +16 -0
  60. entari_plugin_hyw/card-ui/tsconfig.json +7 -0
  61. entari_plugin_hyw/card-ui/tsconfig.node.json +26 -0
  62. entari_plugin_hyw/card-ui/vite.config.ts +16 -0
  63. entari_plugin_hyw/{core/history.py → history.py} +25 -1
  64. entari_plugin_hyw/image_cache.py +274 -0
  65. entari_plugin_hyw/{utils/misc.py → misc.py} +38 -3
  66. entari_plugin_hyw/pipeline.py +1338 -0
  67. entari_plugin_hyw/prompts.py +108 -0
  68. entari_plugin_hyw/render_vue.py +314 -0
  69. entari_plugin_hyw/search.py +696 -0
  70. entari_plugin_hyw-3.5.0rc6.dist-info/METADATA +116 -0
  71. entari_plugin_hyw-3.5.0rc6.dist-info/RECORD +88 -0
  72. entari_plugin_hyw/assets/libs/highlight.css +0 -10
  73. entari_plugin_hyw/assets/libs/highlight.js +0 -1213
  74. entari_plugin_hyw/assets/libs/katex-auto-render.js +0 -1
  75. entari_plugin_hyw/assets/libs/katex.css +0 -1
  76. entari_plugin_hyw/assets/libs/katex.js +0 -1
  77. entari_plugin_hyw/assets/libs/tailwind.css +0 -1
  78. entari_plugin_hyw/assets/tailwind.config.js +0 -12
  79. entari_plugin_hyw/assets/tailwind.input.css +0 -235
  80. entari_plugin_hyw/assets/template.html +0 -157
  81. entari_plugin_hyw/core/__init__.py +0 -0
  82. entari_plugin_hyw/core/config.py +0 -36
  83. entari_plugin_hyw/core/hyw.py +0 -41
  84. entari_plugin_hyw/core/pipeline.py +0 -816
  85. entari_plugin_hyw/core/render.py +0 -926
  86. entari_plugin_hyw/utils/__init__.py +0 -3
  87. entari_plugin_hyw/utils/browser.py +0 -61
  88. entari_plugin_hyw/utils/mcp_playwright.py +0 -128
  89. entari_plugin_hyw/utils/playwright_tool.py +0 -46
  90. entari_plugin_hyw/utils/prompts.py +0 -91
  91. entari_plugin_hyw/utils/search.py +0 -193
  92. entari_plugin_hyw-3.2.105.dist-info/METADATA +0 -141
  93. entari_plugin_hyw-3.2.105.dist-info/RECORD +0 -42
  94. {entari_plugin_hyw-3.2.105.dist-info → entari_plugin_hyw-3.5.0rc6.dist-info}/WHEEL +0 -0
  95. {entari_plugin_hyw-3.2.105.dist-info → entari_plugin_hyw-3.5.0rc6.dist-info}/top_level.txt +0 -0
@@ -1,20 +1,29 @@
1
1
  from dataclasses import dataclass, field
2
- from typing import List, Dict, Any, Optional
2
+ from importlib.metadata import version as get_version
3
+ from typing import List, Dict, Any, Optional, Union
3
4
  import time
5
+ import asyncio
6
+
7
+ # 从 pyproject.toml 读取版本号,避免重复维护
8
+ try:
9
+ __version__ = get_version("entari_plugin_hyw")
10
+ except Exception:
11
+ __version__ = "0.0.0"
4
12
 
5
13
  from arclet.alconna import Alconna, Args, AllParam, CommandMeta, Option, Arparma, MultiVar, store_true
6
14
  from arclet.entari import metadata, listen, Session, plugin_config, BasicConfModel, plugin, command
15
+ from arclet.letoderea import on
7
16
  from arclet.entari import MessageChain, Text, Image, MessageCreatedEvent, Quote, At
8
17
  from satori.element import Custom
9
18
  from loguru import logger
10
19
  import arclet.letoderea as leto
11
20
  from arclet.entari.event.command import CommandReceive
12
21
 
13
- from .core.hyw import HYW
14
- from .core.history import HistoryManager
15
- from .core.render import ContentRenderer
16
- from .utils.misc import process_onebot_json, process_images, resolve_model_name
17
- from arclet.entari.event.lifespan import Startup, Ready, Cleanup
22
+ from .pipeline import ProcessingPipeline
23
+ from .history import HistoryManager
24
+ from .render_vue import ContentRenderer
25
+ from .misc import process_onebot_json, process_images, resolve_model_name, render_refuse_answer, REFUSE_ANSWER_MARKDOWN
26
+ from arclet.entari.event.lifespan import Cleanup
18
27
 
19
28
  import os
20
29
  import secrets
@@ -22,6 +31,32 @@ import base64
22
31
 
23
32
  import re
24
33
 
34
+
35
+ def parse_color(color: str) -> str:
36
+ """
37
+ Parse color from hex or RGB tuple to hex format.
38
+ Supports: #ff0000, ff0000, (255, 0, 0), 255,0,0
39
+ """
40
+ if not color:
41
+ return "#ef4444"
42
+
43
+ color = str(color).strip()
44
+
45
+ # Hex format: #fff or #ffffff or ffffff
46
+ if color.startswith('#') and len(color) in [4, 7]:
47
+ return color
48
+ if re.match(r'^[0-9a-fA-F]{6}$', color):
49
+ return f'#{color}'
50
+
51
+ # RGB tuple: (r, g, b) or r,g,b
52
+ rgb_match = re.match(r'^\(?(\d+)[,\s]+(\d+)[,\s]+(\d+)\)?$', color)
53
+ if rgb_match:
54
+ r, g, b = (max(0, min(255, int(x))) for x in rgb_match.groups())
55
+ return f'#{r:02x}{g:02x}{b:02x}'
56
+
57
+ logger.warning(f"Invalid color '{color}', using default #ef4444")
58
+ return "#ef4444"
59
+
25
60
  class _RecentEventDeduper:
26
61
  def __init__(self, ttl_seconds: float = 30.0, max_size: int = 2048):
27
62
  self.ttl_seconds = ttl_seconds
@@ -58,24 +93,23 @@ class HywConfig(BasicConfModel):
58
93
  base_url: str = "https://openrouter.ai/api/v1"
59
94
  vision_model_name: Optional[str] = None
60
95
  vision_api_key: Optional[str] = None
96
+ language: str = "Simplified Chinese"
61
97
  vision_base_url: Optional[str] = None
62
- vision_system_prompt: Optional[str] = None
63
- intruct_model_name: Optional[str] = None
64
- intruct_api_key: Optional[str] = None
65
- intruct_base_url: Optional[str] = None
66
- intruct_system_prompt: Optional[str] = None
67
- agent_system_prompt: Optional[str] = None
68
- playwright_mcp_command: str = "npx"
69
- playwright_mcp_args: Optional[List[str]] = None
70
- search_base_url: str = "https://duckduckgo.com/?q={query}&format=json&results_per_page={limit}"
71
- image_search_base_url: str = "https://duckduckgo.com/?q={query}&iax=images&ia=images&format=json&results_per_page={limit}"
98
+ instruct_model_name: Optional[str] = None
99
+ instruct_api_key: Optional[str] = None
100
+ instruct_base_url: Optional[str] = None
101
+ search_base_url: str = "https://lite.duckduckgo.com/lite/?q={query}"
102
+ image_search_base_url: str = "https://duckduckgo.com/?q={query}&iax=images&ia=images"
72
103
  headless: bool = False
73
104
  save_conversation: bool = False
74
105
  icon: str = "openai"
75
106
  render_timeout_ms: int = 6000
107
+ render_image_timeout_ms: int = 3000
76
108
  extra_body: Optional[Dict[str, Any]] = None
109
+ vision_extra_body: Optional[Dict[str, Any]] = None
110
+ instruct_extra_body: Optional[Dict[str, Any]] = None
77
111
  enable_browser_fallback: bool = False
78
- reaction: bool = True
112
+ reaction: bool = False
79
113
  quote: bool = True
80
114
  temperature: float = 0.4
81
115
  # Billing configuration (price per million tokens)
@@ -85,84 +119,50 @@ class HywConfig(BasicConfModel):
85
119
  vision_input_price: Optional[float] = None
86
120
  vision_output_price: Optional[float] = None
87
121
  # Instruct model pricing overrides (defaults to main model pricing if not set)
88
- intruct_input_price: Optional[float] = None
89
- intruct_output_price: Optional[float] = None
122
+ instruct_input_price: Optional[float] = None
123
+ instruct_output_price: Optional[float] = None
124
+ # Provider Names
125
+ search_name: str = "DuckDuckGo"
126
+ search_provider: str = "crawl4ai" # crawl4ai | httpx | ddgs
127
+ fetch_provider: str = "crawl4ai" # crawl4ai | jinaai
128
+ jina_api_key: Optional[str] = None # Optional API key for Jina AI
129
+ model_provider: Optional[str] = None
130
+ vision_model_provider: Optional[str] = None
131
+ instruct_model_provider: Optional[str] = None
132
+ # UI Theme
133
+ theme_color: str = "#ef4444" # Tailwind red-500, supports hex/RGB/color names
134
+
135
+ def __post_init__(self):
136
+ """Parse and normalize theme color after initialization."""
137
+ self.theme_color = parse_color(self.theme_color)
138
+
139
+
90
140
 
91
141
  conf = plugin_config(HywConfig)
92
142
  history_manager = HistoryManager()
93
143
  renderer = ContentRenderer()
94
- hyw = HYW(config=conf)
95
144
 
96
- @listen(Ready, once=True)
97
- async def _hyw_warmup_mcp():
98
- try:
99
- await hyw.pipeline.warmup_mcp()
100
- except Exception as e:
101
- logger.warning(f"MCP Playwright warmup error: {e}")
102
-
103
-
104
- @listen(Cleanup, once=True)
105
- async def _hyw_cleanup():
106
- try:
107
- await hyw.close()
108
- except Exception as e:
109
- logger.warning(f"HYW cleanup error: {e}")
110
145
 
111
146
  class GlobalCache:
112
147
  models_image_path: Optional[str] = None
113
148
 
114
149
  global_cache = GlobalCache()
115
150
 
116
- from satori.exception import ActionFailed
117
- from satori.adapters.onebot11.reverse import _Connection
118
-
119
- # Monkeypatch to suppress ActionFailed for get_msg
120
- original_call_api = _Connection.call_api
121
-
122
- async def patched_call_api(self, action: str, params: dict = None):
123
- try:
124
- return await original_call_api(self, action, params)
125
- except ActionFailed as e:
126
- if action == "get_msg":
127
- logger.warning(f"Suppressed ActionFailed for get_msg: {e}")
128
- return None
129
- raise e
130
-
131
- _Connection.call_api = patched_call_api
132
-
133
- EMOJI_TO_CODE = {
134
- "✨": "10024",
135
- "✅": "10004",
136
- "❌": "10060"
137
- }
138
-
139
151
  async def react(session: Session, emoji: str):
140
152
  if not conf.reaction: return
141
153
  try:
142
- if session.event.login.platform == "onebot":
143
- code = EMOJI_TO_CODE.get(emoji, "10024")
144
- # OneBot specific reaction
145
- await session.account.protocol.call_api(
146
- "internal/set_group_reaction",
147
- {
148
- "group_id": str(session.guild.id),
149
- "message_id": str(session.event.message.id),
150
- "code": code,
151
- "is_add": True
152
- }
153
- )
154
- else:
155
- # Standard Satori reaction
156
- await session.reaction_create(emoji=emoji)
157
- except ActionFailed:
158
- pass
154
+ await session.reaction_create(emoji=emoji)
159
155
  except Exception as e:
160
156
  logger.warning(f"Reaction failed: {e}")
161
157
 
162
- async def process_request(session: Session[MessageCreatedEvent], all_param: Optional[MessageChain] = None,
163
- selected_model: Optional[str] = None, selected_vision_model: Optional[str] = None,
164
- conversation_key_override: Optional[str] = None, local_mode: bool = False,
165
- next_prompt: Optional[str] = None, next_text_model: Optional[str] = None, next_vision_model: Optional[str] = None):
158
+ async def process_request(
159
+ session: Session[MessageCreatedEvent],
160
+ all_param: Optional[MessageChain] = None,
161
+ selected_model: Optional[str] = None,
162
+ selected_vision_model: Optional[str] = None,
163
+ conversation_key_override: Optional[str] = None,
164
+ local_mode: bool = False,
165
+ ) -> None:
166
166
  logger.info(f"Processing request: {all_param}")
167
167
  mc = MessageChain(all_param)
168
168
  logger.info(f"reply: {session.reply}")
@@ -242,12 +242,19 @@ async def process_request(session: Session[MessageCreatedEvent], all_param: Opti
242
242
 
243
243
  images, err = await process_images(mc, vision_model)
244
244
 
245
- # Call Agent (Step 1)
246
- # Sanitize user_input: use extracted text only
245
+ # Call Pipeline directly
247
246
  safe_input = msg_text
248
-
249
- resp = await hyw.agent(safe_input, conversation_history=hist_payload, images=images,
250
- selected_model=model, selected_vision_model=vision_model, local_mode=local_mode)
247
+ pipeline = ProcessingPipeline(conf)
248
+ try:
249
+ resp = await pipeline.execute(
250
+ safe_input,
251
+ hist_payload,
252
+ model_name=model,
253
+ images=images,
254
+ selected_vision_model=vision_model,
255
+ )
256
+ finally:
257
+ await pipeline.close()
251
258
 
252
259
  # Step 1 Results
253
260
  step1_vision_model = resp.get("vision_model_used")
@@ -258,73 +265,7 @@ async def process_request(session: Session[MessageCreatedEvent], all_param: Opti
258
265
  final_resp = resp
259
266
 
260
267
  # Step 2 (Optional)
261
- if next_prompt:
262
- logger.info(f"Executing Step 2 with prompt: {next_prompt}")
263
-
264
- # Use Step 1 history as base for Step 2
265
- # hyw.agent already returns the updated history including the new turn
266
- # So we just pass step1_history
267
-
268
- # Determine Step 2 models
269
- # If not specified, inherit from Step 1 or config?
270
- # Usually inherit from config or meta if not specified in -n
271
- step2_model = next_text_model or model
272
- if step2_model and step2_model != "off":
273
- resolved_s2, err_s2 = resolve_model_name(step2_model, conf.models)
274
- if resolved_s2:
275
- step2_model = resolved_s2
276
-
277
- step2_vision_model = next_vision_model or vision_model # Probably not used if no new images, but consistent
278
- if step2_vision_model and step2_vision_model != "off":
279
- resolved_s2v, err_s2v = resolve_model_name(step2_vision_model, conf.models)
280
- if resolved_s2v:
281
- step2_vision_model = resolved_s2v
282
-
283
- # No new images for Step 2 usually, unless we want to carry over images?
284
- # The user said "First round image model, second round text model".
285
- # Usually Step 2 is text-only follow-up.
286
- # But hyw.agent stateless? No, we pass history.
287
- # We don't pass 'images' again to Step 2 unless we want them re-analyzed.
288
- # If Step 1 analyzed images, the analysis is in history (as assistant message or system message?).
289
- # In hyw.agent, image analysis result is added to history.
290
- # So we don't need to pass images again.
291
-
292
- resp2 = await hyw.agent(str(next_prompt), conversation_history=step1_history, images=None,
293
- selected_model=step2_model, selected_vision_model=step2_vision_model, local_mode=local_mode)
294
-
295
- final_resp = resp2
296
-
297
- # Merge Stats
298
- # Instead of merging into a single dict, we prepare a list of stats for the renderer
299
- # But we also need a combined stats for history recording?
300
- # History manager likely expects a single dict or doesn't care much (it stores what we give)
301
-
302
- # Let's keep step1_stats and resp2["stats"] separate for rendering
303
- # But for history, maybe we still want a merged one?
304
- # The code below uses final_resp["stats"] for rendering AND history.
305
-
306
- # Let's create a list for rendering
307
- stats_for_render = [step1_stats, resp2.get("stats", {})]
308
-
309
- # And a merged one for history/final_resp
310
- merged_stats = step1_stats.copy()
311
- if "stats" in resp2:
312
- for k, v in resp2["stats"].items():
313
- if isinstance(v, (int, float)) and k in merged_stats:
314
- merged_stats[k] += v
315
- elif k == "visited_domains":
316
- merged_stats[k] = list(set(merged_stats.get(k, []) + v))
317
- else:
318
- merged_stats[k] = v
319
-
320
- final_resp["stats"] = merged_stats
321
- final_resp["stats_list"] = stats_for_render # Pass this to renderer if available
322
-
323
- # Merge Model Info for Display
324
- # We want to show Step 1 Vision Model AND Step 2 Text Model
325
- if step1_vision_model:
326
- final_resp["vision_model_used"] = step1_vision_model
327
- # final_resp["model_used"] is already from Step 2
268
+
328
269
 
329
270
 
330
271
  # Extract Response Data
@@ -333,117 +274,45 @@ async def process_request(session: Session[MessageCreatedEvent], all_param: Opti
333
274
 
334
275
  # Render
335
276
  import tempfile
336
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tf:
277
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as tf:
337
278
  output_path = tf.name
338
279
  model_used = final_resp.get("model_used")
339
- vision_model_used = final_resp.get("vision_model_used")
340
-
341
- # Helper to infer icon from model name
342
- def infer_icon_from_model(model_name: str) -> str:
343
- """Infer icon name from model name (e.g. 'google/gemini-3-flash' -> 'google' or 'gemini')"""
344
- if not model_name:
345
- return conf.icon
346
- name_lower = model_name.lower()
347
- # Check for known providers/models in the name
348
- known_icons = ["google", "gemini", "openai", "anthropic", "deepseek", "mistral",
349
- "qwen", "grok", "xai", "perplexity", "microsoft", "minimax", "nvidia"]
350
- for icon_name in known_icons:
351
- if icon_name in name_lower:
352
- return icon_name
353
- return conf.icon
354
-
355
- icon = conf.icon
356
- m_conf = None
357
- if model_used:
358
- m_conf = next((m for m in conf.models if m.get("name") == model_used), None)
359
- if m_conf:
360
- icon = m_conf.get("icon", infer_icon_from_model(model_used))
361
- else:
362
- # Model not in config list, infer from name
363
- icon = infer_icon_from_model(model_used)
364
280
 
365
281
  # Determine session short code
366
282
  if hist_key:
367
283
  display_session_id = history_manager.get_code_by_key(hist_key)
368
284
  if not display_session_id:
369
- # Should not happen if key exists, but fallback
370
285
  display_session_id = history_manager.generate_short_code()
371
286
  else:
372
- # New conversation, pre-generate code
373
287
  display_session_id = history_manager.generate_short_code()
374
288
 
375
- # Determine vision base url and icon
376
- vision_base_url = None
377
- vision_icon = None
378
-
379
- if vision_model_used:
380
- v_conf = next((m for m in conf.models if m.get("name") == vision_model_used), None)
381
- if v_conf:
382
- vision_base_url = v_conf.get("base_url")
383
- vision_icon = v_conf.get("icon", infer_icon_from_model(vision_model_used))
384
- else:
385
- vision_icon = infer_icon_from_model(vision_model_used)
386
-
387
- # Handle Vision Only Mode (suppress text model display)
388
- render_model_name = model_used or conf.model_name or "unknown"
389
- render_icon = icon
390
- render_base_url = m_conf.get("base_url", conf.base_url) if m_conf else conf.base_url
391
-
392
- if not model_used and vision_model_used:
393
- render_model_name = ""
394
- render_icon = ""
395
-
396
289
  # Use stats_list if available, otherwise standard stats
397
290
  stats_to_render = final_resp.get("stats_list", final_resp.get("stats", {}))
398
-
399
- # Determine Behavior Summary & Provider Name
400
-
401
- # 1. Behavior Summary
402
- behavior_summary = "Text Generation"
403
- if structured.get("mcp_steps"):
404
- behavior_summary = "Agentic Loop"
405
- elif vision_model_used:
406
- behavior_summary = "Visual Analysis"
407
-
408
- # 2. Provider Name
409
- # Try to get from m_conf (resolved above)
410
- provider_name = "Unknown Provider"
411
- if model_used and m_conf:
412
- provider_name = m_conf.get("provider", "Unknown Provider")
413
- elif not model_used and vision_model_used:
414
- # If only vision model used (unlikely but possible in code logic)
415
- if 'v_conf' in locals() and v_conf:
416
- provider_name = v_conf.get("provider", "Unknown Provider")
417
291
 
418
- # If still unknown and we have base_url, maybe use domain as last resort fallback?
419
- # User said: "provider does not automatically get from url if not filled"
420
- # So if it's "Unknown Provider", we leave it or maybe empty string?
421
- # Let's stick to "Unknown Provider" or just empty if we want to be clean.
422
- # But for UI validation it's better to show something if missing config.
423
-
424
- render_ok = await renderer.render(
425
- markdown_content=content,
426
- output_path=output_path,
427
- suggestions=[],
428
- stats=stats_to_render,
429
- references=structured.get("references", []),
430
- mcp_steps=structured.get("mcp_steps", []),
431
- stages_used=final_resp.get("stages_used", []),
432
- model_name=render_model_name,
433
- provider_name=provider_name,
434
- behavior_summary=behavior_summary,
435
- icon_config=render_icon,
436
- vision_model_name=vision_model_used,
437
- vision_base_url=vision_base_url,
438
- vision_icon_config=vision_icon,
439
- base_url=render_base_url,
440
- billing_info=final_resp.get("billing_info"),
441
- render_timeout_ms=conf.render_timeout_ms
442
- )
292
+ # Check if refuse_answer was triggered
293
+ if final_resp.get("refuse_answer"):
294
+ logger.info(f"Refuse answer triggered. Rendering refuse image. Reason: {final_resp.get('refuse_reason', '')}")
295
+ render_ok = await render_refuse_answer(
296
+ renderer=renderer,
297
+ output_path=output_path,
298
+ theme_color=conf.theme_color,
299
+ )
300
+ else:
301
+ render_ok = await renderer.render(
302
+ markdown_content=content,
303
+ output_path=output_path,
304
+ stats=stats_to_render,
305
+ references=structured.get("references", []),
306
+ page_references=structured.get("page_references", []),
307
+ image_references=structured.get("image_references", []),
308
+ stages_used=final_resp.get("stages_used", []),
309
+ image_timeout=conf.render_image_timeout_ms,
310
+ theme_color=conf.theme_color,
311
+ )
443
312
 
444
313
  # Send & Save
445
314
  if not render_ok:
446
- logger.error("Render failed; skipping reply. Check browser/playwright status.")
315
+ logger.error("Render failed; skipping reply. Check Crawl4AI rendering status.")
447
316
  if os.path.exists(output_path):
448
317
  try:
449
318
  os.remove(output_path)
@@ -506,45 +375,17 @@ async def process_request(session: Session[MessageCreatedEvent], all_param: Opti
506
375
  except Exception as save_err:
507
376
  logger.error(f"Failed to save error conversation: {save_err}")
508
377
 
509
- # Secondary Parser for -n content
510
- next_alc = Alconna(
511
- "next",
512
- Option("-v|--vision", Args["vision_model", str], help_text="设置视觉模型(设为off禁用)"),
513
- Option("-t|--text", Args["text_model", str], help_text="设置文本模型"),
514
- Args["prompt", AllParam],
515
- )
516
378
 
517
- # Main Command (Question)
518
379
  alc = Alconna(
519
380
  conf.question_command,
520
- Option("-v|--vision", Args["vision_model", str]),
521
- Option("-t|--text", Args["text_model", str]),
522
- Option("-c|--code", Args["code", str]),
523
- Option("-n|--next", Args["next_input", AllParam]),
524
- Args["list_models;?", "-m|--models"],
525
- Args["all_chat;?", "-a"],
526
- Args["local_mode;?", "-l"],
527
- Args["all_param?", MultiVar(str | Image | Custom)],
528
- meta=CommandMeta(
529
- compact=False,
530
- description=f"""使用方法:
531
- {conf.question_command} -a : 列出所有会话
532
- {conf.question_command} -m : 列出所有模型
533
- {conf.question_command} -v <模型名> : 设置主要视觉模型, 设为 off 禁用
534
- {conf.question_command} -t <模型名> : 设置主要文本模型
535
- {conf.question_command} -l : 开启本地模式 (关闭Web索引)
536
- {conf.question_command} -c <4位消息码> : 继续指定会话
537
- {conf.question_command} -n <后续提示词> : 在第一步完成后执行后续操作 (支持 -t/-v)
538
- {conf.question_command} <问题> : 发起问题
539
- 特性:
540
- """
541
- )
381
+ Args["all_param;?", AllParam],
542
382
  )
543
383
 
544
- @command.on(alc)
384
+ @command.on(alc)
545
385
  async def handle_question_command(session: Session[MessageCreatedEvent], result: Arparma):
546
386
  """Handle main Question command"""
547
387
  try:
388
+ logger.info(f"Question Command Triggered. Message: {result}")
548
389
  mid = str(session.event.message.id) if getattr(session.event, "message", None) else str(session.event.id)
549
390
  dedupe_key = f"{getattr(session.account, 'id', 'account')}:{mid}"
550
391
  if _event_deduper.seen_recently(dedupe_key):
@@ -558,160 +399,11 @@ async def handle_question_command(session: Session[MessageCreatedEvent], result:
558
399
  args = result.all_matched_args
559
400
  logger.info(f"Matched Args: {args}")
560
401
 
561
- text_model_val = args.get("text_model")
562
- vision_model_val = args.get("vision_model")
563
- code_val = args.get("code")
564
- all_flag_val = args.get("all_chat")
565
- list_models_val = args.get("list_models")
566
- local_mode_val = True if args.get("local_mode") else False
567
- logger.info(f"Local mode: {local_mode_val} (type: {type(local_mode_val)})")
568
-
569
- # Handle -m (List Models)
570
- if list_models_val:
571
- # global_cache is already imported/defined in __init__.py
572
-
573
- if global_cache.models_image_path and os.path.exists(global_cache.models_image_path):
574
- logger.info(f"Using cached models list: {global_cache.models_image_path}")
575
- with open(global_cache.models_image_path, "rb") as f:
576
- img_data = base64.b64encode(f.read()).decode()
577
- msg = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
578
- if conf.quote: msg = MessageChain(Quote(session.event.message.id)) + msg
579
- await session.send(msg)
580
- return
581
-
582
- output_dir = "data/cache"
583
- os.makedirs(output_dir, exist_ok=True)
584
- output_path = f"{output_dir}/models_list_cache.png"
585
-
586
- await renderer.render_models_list(
587
- conf.models,
588
- output_path,
589
- default_base_url=conf.base_url,
590
- render_timeout_ms=conf.render_timeout_ms,
591
- )
592
- global_cache.models_image_path = os.path.abspath(output_path)
593
-
594
- with open(output_path, "rb") as f:
595
- img_data = base64.b64encode(f.read()).decode()
596
- msg = MessageChain(Image(src=f'data:image/png;base64,{img_data}'))
597
- if conf.quote: msg = MessageChain(Quote(session.event.message.id)) + msg
598
- await session.send(msg)
599
- return
600
-
601
- # Handle -a (List History)
602
- if all_flag_val:
603
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
604
- keys = history_manager.list_by_context(context_id, limit=10)
605
- if not keys:
606
- msg = "暂无历史会话"
607
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
608
- else: await session.send(msg)
609
- return
610
-
611
- msg = "历史会话 [最近10条]\n"
612
- for i, key in enumerate(keys):
613
- short_code = history_manager.get_code_by_key(key) or "????"
614
- hist = history_manager.get_history(key)
615
- preview = "..."
616
- if hist and len(hist) > 0:
617
- last_content = hist[-1].get("content", "")
618
- preview = (last_content[:20] + "...") if len(last_content) > 20 else last_content
619
-
620
- msg += f"{short_code} {preview}\n"
621
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
622
- else: await session.send(msg)
623
- return
624
-
625
- selected_vision_model = None
626
- selected_text_model = None
627
-
628
- if vision_model_val:
629
- if vision_model_val.lower() == "off":
630
- selected_vision_model = "off"
631
- else:
632
- selected_vision_model, err = resolve_model_name(vision_model_val, conf.models)
633
- if err:
634
- if conf.quote: await session.send([Quote(session.event.message.id), err])
635
- else: await session.send(err)
636
- return
637
- logger.info(f"Selected vision model: {selected_vision_model}")
638
-
639
- if text_model_val:
640
- selected_text_model, err = resolve_model_name(text_model_val, conf.models)
641
- if err:
642
- if conf.quote: await session.send([Quote(session.event.message.id), err])
643
- else: await session.send(err)
644
- return
645
- logger.info(f"Selected text model: {selected_text_model}")
646
-
647
- # Determine History to Continue
648
- target_key = None
649
- context_id = f"guild_{session.guild.id}" if session.guild else f"user_{session.user.id}"
650
-
651
- # 1. Explicit Code
652
- if code_val:
653
- target_code = code_val
654
- target_key = history_manager.get_key_by_code(target_code)
655
- if not target_key:
656
- msg = f"未找到代码为 {target_code} 的会话"
657
- if conf.quote: await session.send([Quote(session.event.message.id), msg])
658
- else: await session.send(msg)
659
- return
660
- logger.info(f"Question: Continuing session {target_code} -> {target_key}")
661
-
662
- next_input_val = args.get("next_input")
663
- next_text_model = None
664
- next_vision_model = None
665
- next_prompt = None
666
-
667
- if next_input_val:
668
- # Parse secondary command
669
- # next_input_val is likely a MessageChain or string depending on AllParam behavior with Alconna
670
- # We need to ensure it's a string or compatible input for parse
671
- logger.info(f"Parsing next input: {next_input_val}")
672
- try:
673
- # Convert next_input_val to string
674
- if isinstance(next_input_val, list):
675
- # It's a list of segments (e.g. [Text(...)])
676
- # We need to join them into a string
677
- # Assuming they are Satori elements or similar
678
- cmd_str = "".join(str(x) for x in next_input_val)
679
- else:
680
- cmd_str = str(next_input_val)
681
-
682
- # Prepend 'next' header for Alconna
683
- parse_target = f"next {cmd_str}"
684
-
685
- next_res = next_alc.parse(parse_target)
686
- if next_res.matched:
687
- next_args = next_res.all_matched_args
688
- next_text_model = next_args.get("text_model")
689
- next_vision_model = next_args.get("vision_model")
690
- next_prompt = next_args.get("prompt")
691
-
692
- # If prompt is AllParam, it might be captured as a list or string depending on Alconna version
693
- # If it's a list, join it back to string
694
- if isinstance(next_prompt, list):
695
- next_prompt = "".join(str(x) for x in next_prompt)
696
-
697
- logger.info(f"Next Command Parsed: text={next_text_model}, vision={next_vision_model}, prompt={next_prompt}")
698
- else:
699
- logger.warning(f"Next command parsing failed or no match for: {parse_target}")
700
- # Fallback: treat the whole string as prompt if parsing failed (e.g. if it didn't match options but Alconna should have matched prompt)
701
- # But next_alc has Args["prompt", AllParam], so it should match everything else.
702
- # If it failed, maybe something else is wrong.
703
- # Let's assume if it failed, we just use the raw string as prompt?
704
- # But wait, if we prepend "next ", and next_alc starts with "next", it should match.
705
- pass
706
- except Exception as e:
707
- logger.error(f"Failed to parse next command: {e}")
402
+ await process_request(session, args.get("all_param"), selected_model=None, selected_vision_model=None, conversation_key_override=None, local_mode=False)
708
403
 
709
- await process_request(session, args.get("all_param"), selected_model=selected_text_model, selected_vision_model=selected_vision_model, conversation_key_override=target_key, local_mode=local_mode_val,
710
- next_prompt=next_prompt, next_text_model=next_text_model, next_vision_model=next_vision_model)
404
+ metadata("hyw", author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}], version=__version__, config=HywConfig)
711
405
 
712
- metadata("hyw", author=[{"name": "kumoSleeping", "email": "zjr2992@outlook.com"}], version="3.2.105", config=HywConfig)
713
406
 
714
- @leto.on(CommandReceive)
407
+ @listen(CommandReceive)
715
408
  async def remove_at(content: MessageChain):
716
- content = content.lstrip(At)
717
- return content
409
+ return content.lstrip(At)