entari-plugin-hyw 0.3.5__py3-none-any.whl → 4.0.0rc14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of entari-plugin-hyw might be problematic. Click here for more details.

Files changed (78) hide show
  1. entari_plugin_hyw/Untitled-1 +1865 -0
  2. entari_plugin_hyw/__init__.py +979 -116
  3. entari_plugin_hyw/filters.py +83 -0
  4. entari_plugin_hyw/history.py +251 -0
  5. entari_plugin_hyw/misc.py +214 -0
  6. entari_plugin_hyw/search_cache.py +154 -0
  7. entari_plugin_hyw-4.0.0rc14.dist-info/METADATA +118 -0
  8. entari_plugin_hyw-4.0.0rc14.dist-info/RECORD +72 -0
  9. {entari_plugin_hyw-0.3.5.dist-info → entari_plugin_hyw-4.0.0rc14.dist-info}/WHEEL +1 -1
  10. {entari_plugin_hyw-0.3.5.dist-info → entari_plugin_hyw-4.0.0rc14.dist-info}/top_level.txt +1 -0
  11. hyw_core/__init__.py +94 -0
  12. hyw_core/agent.py +768 -0
  13. hyw_core/browser_control/__init__.py +63 -0
  14. hyw_core/browser_control/assets/card-dist/index.html +425 -0
  15. hyw_core/browser_control/assets/card-dist/logos/anthropic.svg +1 -0
  16. hyw_core/browser_control/assets/card-dist/logos/cerebras.svg +9 -0
  17. hyw_core/browser_control/assets/card-dist/logos/deepseek.png +0 -0
  18. hyw_core/browser_control/assets/card-dist/logos/gemini.svg +1 -0
  19. hyw_core/browser_control/assets/card-dist/logos/google.svg +1 -0
  20. hyw_core/browser_control/assets/card-dist/logos/grok.png +0 -0
  21. hyw_core/browser_control/assets/card-dist/logos/huggingface.png +0 -0
  22. hyw_core/browser_control/assets/card-dist/logos/microsoft.svg +15 -0
  23. hyw_core/browser_control/assets/card-dist/logos/minimax.png +0 -0
  24. hyw_core/browser_control/assets/card-dist/logos/mistral.png +0 -0
  25. hyw_core/browser_control/assets/card-dist/logos/nvida.png +0 -0
  26. hyw_core/browser_control/assets/card-dist/logos/openai.svg +1 -0
  27. hyw_core/browser_control/assets/card-dist/logos/openrouter.png +0 -0
  28. hyw_core/browser_control/assets/card-dist/logos/perplexity.svg +24 -0
  29. hyw_core/browser_control/assets/card-dist/logos/qwen.png +0 -0
  30. hyw_core/browser_control/assets/card-dist/logos/xai.png +0 -0
  31. hyw_core/browser_control/assets/card-dist/logos/xiaomi.png +0 -0
  32. hyw_core/browser_control/assets/card-dist/logos/zai.png +0 -0
  33. hyw_core/browser_control/assets/card-dist/vite.svg +1 -0
  34. hyw_core/browser_control/assets/index.html +5691 -0
  35. hyw_core/browser_control/assets/logos/anthropic.svg +1 -0
  36. hyw_core/browser_control/assets/logos/cerebras.svg +9 -0
  37. hyw_core/browser_control/assets/logos/deepseek.png +0 -0
  38. hyw_core/browser_control/assets/logos/gemini.svg +1 -0
  39. hyw_core/browser_control/assets/logos/google.svg +1 -0
  40. hyw_core/browser_control/assets/logos/grok.png +0 -0
  41. hyw_core/browser_control/assets/logos/huggingface.png +0 -0
  42. hyw_core/browser_control/assets/logos/microsoft.svg +15 -0
  43. hyw_core/browser_control/assets/logos/minimax.png +0 -0
  44. hyw_core/browser_control/assets/logos/mistral.png +0 -0
  45. hyw_core/browser_control/assets/logos/nvida.png +0 -0
  46. hyw_core/browser_control/assets/logos/openai.svg +1 -0
  47. hyw_core/browser_control/assets/logos/openrouter.png +0 -0
  48. hyw_core/browser_control/assets/logos/perplexity.svg +24 -0
  49. hyw_core/browser_control/assets/logos/qwen.png +0 -0
  50. hyw_core/browser_control/assets/logos/xai.png +0 -0
  51. hyw_core/browser_control/assets/logos/xiaomi.png +0 -0
  52. hyw_core/browser_control/assets/logos/zai.png +0 -0
  53. hyw_core/browser_control/engines/__init__.py +15 -0
  54. hyw_core/browser_control/engines/base.py +13 -0
  55. hyw_core/browser_control/engines/default.py +166 -0
  56. hyw_core/browser_control/engines/duckduckgo.py +171 -0
  57. hyw_core/browser_control/landing.html +172 -0
  58. hyw_core/browser_control/manager.py +173 -0
  59. hyw_core/browser_control/renderer.py +446 -0
  60. hyw_core/browser_control/service.py +940 -0
  61. hyw_core/config.py +154 -0
  62. hyw_core/core.py +462 -0
  63. hyw_core/crawling/__init__.py +18 -0
  64. hyw_core/crawling/completeness.py +437 -0
  65. hyw_core/crawling/models.py +88 -0
  66. hyw_core/definitions.py +104 -0
  67. hyw_core/image_cache.py +274 -0
  68. hyw_core/pipeline.py +502 -0
  69. hyw_core/search.py +171 -0
  70. hyw_core/stages/__init__.py +21 -0
  71. hyw_core/stages/base.py +95 -0
  72. hyw_core/stages/summary.py +191 -0
  73. entari_plugin_hyw/agent.py +0 -419
  74. entari_plugin_hyw/compressor.py +0 -59
  75. entari_plugin_hyw/tools.py +0 -236
  76. entari_plugin_hyw/vision.py +0 -35
  77. entari_plugin_hyw-0.3.5.dist-info/METADATA +0 -112
  78. entari_plugin_hyw-0.3.5.dist-info/RECORD +0 -9
hyw_core/agent.py ADDED
@@ -0,0 +1,768 @@
1
+ """
2
+ Agent Pipeline
3
+
4
+ Tool-calling agent that can autonomously use web_tool to search/screenshot.
5
+ Maximum 2 rounds of tool calls, up to 3 parallel calls per round.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ import re
11
+ import time
12
+ from dataclasses import dataclass, field
13
+ from typing import Any, Callable, Awaitable, Dict, List, Optional
14
+
15
+ from loguru import logger
16
+ from openai import AsyncOpenAI
17
+
18
+ from .definitions import get_web_tool, get_refuse_answer_tool, AGENT_SYSTEM_PROMPT
19
+ from .stages.base import StageContext, StageResult
20
+ from .search import SearchService
21
+
22
+
23
+ @dataclass
24
+ class AgentSession:
25
+ """Agent session with tool call tracking."""
26
+ session_id: str
27
+ user_query: str
28
+ tool_calls: List[Dict[str, Any]] = field(default_factory=list)
29
+ tool_results: List[Dict[str, Any]] = field(default_factory=list)
30
+ conversation_history: List[Dict] = field(default_factory=list)
31
+ messages: List[Dict] = field(default_factory=list) # LLM conversation
32
+ created_at: float = field(default_factory=time.time)
33
+
34
+ # Round tracking (each round can have up to 3 parallel tool calls)
35
+ round_count: int = 0
36
+
37
+ # Image tracking
38
+ user_image_count: int = 0 # Number of images from user input
39
+ total_image_count: int = 0 # Total images including web screenshots
40
+
41
+ # Time tracking
42
+ search_time: float = 0.0 # Total time spent on search/screenshot
43
+ llm_time: float = 0.0 # Total time spent on LLM calls
44
+ first_llm_time: float = 0.0 # Time for first LLM call (understanding intent)
45
+
46
+ # Usage tracking
47
+ usage_totals: Dict[str, int] = field(default_factory=lambda: {"input_tokens": 0, "output_tokens": 0})
48
+
49
+ @property
50
+ def call_count(self) -> int:
51
+ """Total number of individual tool calls."""
52
+ return len(self.tool_calls)
53
+
54
+ @property
55
+ def should_force_summary(self) -> bool:
56
+ """Force summary after 2 rounds of tool calls."""
57
+ return self.round_count >= 2
58
+
59
+
60
+ def parse_filter_syntax(query: str, max_count: int = 3):
61
+ """
62
+ Parse enhanced filter syntax supporting:
63
+ - Chinese/English colons (: :) and commas (, ,)
64
+ - Multiple filters: "mcmod=2, github=1 : xxx"
65
+ - Index lists: "1, 2, 3 : xxx"
66
+ - Max total selections
67
+
68
+ Returns:
69
+ filters: list of (filter_type, filter_value, count) tuples
70
+ filter_type: 'index' or 'link'
71
+ filter_value: int (for index) or str (for link match term)
72
+ count: how many to get (default 1)
73
+ search_query: the actual search query
74
+ error_msg: error message if exceeded max
75
+ """
76
+ import re
77
+
78
+ # Skip filter parsing if query contains URL (has :// pattern)
79
+ if re.search(r'https?://', query):
80
+ return [], query.strip(), None
81
+
82
+ # Normalize colons
83
+ query = query.replace(':', ':')
84
+
85
+ if ':' not in query:
86
+ return [], query.strip(), None
87
+
88
+ parts = query.split(':', 1)
89
+ if len(parts) != 2:
90
+ return [], query.strip(), None
91
+
92
+ filter_part = parts[0].strip()
93
+ search_query = parts[1].strip()
94
+
95
+ if not filter_part or not search_query:
96
+ return [], query.strip(), None
97
+
98
+ # Parse filter expressions
99
+ filters = []
100
+ total_count = 0
101
+
102
+ # Normalize commas
103
+ filter_part = filter_part.replace(',', ',').replace('、', ',')
104
+ filter_items = [f.strip() for f in filter_part.split(',') if f.strip()]
105
+
106
+ for item in filter_items:
107
+ # Check for "term=count" format (link filter)
108
+ if '=' in item:
109
+ term, count_str = item.split('=', 1)
110
+ term = term.strip().lower()
111
+ try:
112
+ count = int(count_str.strip())
113
+ except ValueError:
114
+ count = 1
115
+ if term and count > 0:
116
+ filters.append(('link', term, count))
117
+ total_count += count
118
+ # Check for pure number (index filter)
119
+ elif item.isdigit():
120
+ idx = int(item)
121
+ if 1 <= idx <= 10:
122
+ filters.append(('index', idx, 1))
123
+ total_count += 1
124
+
125
+ if total_count > max_count:
126
+ return None, search_query, f"⚠️ 最多选择{max_count}个结果"
127
+
128
+ return filters, search_query, None
129
+
130
+
131
+ class AgentPipeline:
132
+ """
133
+ Tool-calling agent pipeline.
134
+
135
+ Flow:
136
+ 1. 用户输入 → LLM (with tools)
137
+ 2. If tool_call: execute all tools in parallel → notify user with batched message → loop
138
+ 3. If call_count >= 2 rounds: force summary on next call
139
+ 4. Return final content
140
+ """
141
+
142
+ MAX_TOOL_ROUNDS = 2 # Maximum rounds of tool calls
143
+ MAX_PARALLEL_TOOLS = 3 # Maximum parallel tool calls per round
144
+ MAX_LLM_RETRIES = 3 # Maximum retries for empty API responses
145
+ LLM_RETRY_DELAY = 1.0 # Delay between retries in seconds
146
+
147
+ def __init__(
148
+ self,
149
+ config: Any,
150
+ search_service: SearchService,
151
+ send_func: Optional[Callable[[str], Awaitable[None]]] = None
152
+ ):
153
+ self.config = config
154
+ self.search_service = search_service
155
+ self.send_func = send_func
156
+ self.client = AsyncOpenAI(base_url=config.base_url, api_key=config.api_key)
157
+
158
+ async def execute(
159
+ self,
160
+ user_input: str,
161
+ conversation_history: List[Dict],
162
+ images: List[str] = None,
163
+ model_name: str = None,
164
+ ) -> Dict[str, Any]:
165
+ """Execute agent with tool-calling loop."""
166
+ start_time = time.time()
167
+
168
+ # Get model config
169
+ model_cfg = self.config.get_model_config("main")
170
+ model = model_name or model_cfg.model_name or self.config.model_name
171
+
172
+ client = AsyncOpenAI(
173
+ base_url=model_cfg.base_url or self.config.base_url,
174
+ api_key=model_cfg.api_key or self.config.api_key
175
+ )
176
+
177
+ # Create session
178
+ session = AgentSession(
179
+ session_id=str(time.time()),
180
+ user_query=user_input,
181
+ conversation_history=conversation_history.copy()
182
+ )
183
+
184
+ # Create context for results
185
+ context = StageContext(
186
+ user_input=user_input,
187
+ images=images or [],
188
+ conversation_history=conversation_history,
189
+ )
190
+
191
+ # Build initial messages
192
+ language = getattr(self.config, "language", "Simplified Chinese")
193
+ from datetime import datetime
194
+ current_time = datetime.now().strftime("%Y-%m-%d %H:%M")
195
+ system_prompt = AGENT_SYSTEM_PROMPT + f"\n\n用户要求的语言: {language}\n当前时间: {current_time}"
196
+
197
+ # Build user content with images if provided
198
+ user_image_count = len(images) if images else 0
199
+ session.user_image_count = user_image_count
200
+ session.total_image_count = user_image_count
201
+
202
+ if images:
203
+ user_content: List[Dict[str, Any]] = [{"type": "text", "text": user_input}]
204
+ for img_b64 in images:
205
+ url = f"data:image/jpeg;base64,{img_b64}" if not img_b64.startswith("data:") else img_b64
206
+ user_content.append({"type": "image_url", "image_url": {"url": url}})
207
+ else:
208
+ user_content = user_input
209
+
210
+ session.messages = [
211
+ {"role": "system", "content": system_prompt},
212
+ {"role": "user", "content": user_content}
213
+ ]
214
+
215
+ # Add image source hint for user images
216
+ if user_image_count > 0:
217
+ if user_image_count == 1:
218
+ hint = "第1张图片来自用户输入,请将这张图片作为用户输入的参考"
219
+ else:
220
+ hint = f"第1-{user_image_count}张图片来自用户输入,请将这{user_image_count}张图片作为用户输入的参考"
221
+ session.messages.append({"role": "system", "content": hint})
222
+
223
+ # Tool definitions
224
+ web_tool = get_web_tool()
225
+ refuse_tool = get_refuse_answer_tool()
226
+ tools = [web_tool, refuse_tool]
227
+
228
+ usage_totals = {"input_tokens": 0, "output_tokens": 0}
229
+ final_content = ""
230
+
231
+ # Send initial status notification
232
+ if self.send_func:
233
+ try:
234
+ await self.send_func("💭 正在理解用户意图...")
235
+ except Exception as e:
236
+ logger.warning(f"AgentPipeline: Failed to send initial notification: {e}")
237
+
238
+ # Agent loop
239
+ while True:
240
+ # Check if we need to force summary (no tools)
241
+ if session.should_force_summary:
242
+ logger.info(f"AgentPipeline: Max tool rounds ({self.MAX_TOOL_ROUNDS}) reached, forcing summary")
243
+ # Add context message about collected info
244
+ if context.web_results:
245
+ context_msg = self._format_web_context(context)
246
+ session.messages.append({
247
+ "role": "system",
248
+ "content": f"你已经完成了{session.call_count}次工具调用。请基于已收集的信息给出最终回答。\n\n{context_msg}"
249
+ })
250
+
251
+
252
+ # Final call without tools (with retry)
253
+ response = None
254
+ for retry in range(self.MAX_LLM_RETRIES):
255
+ try:
256
+ response = await client.chat.completions.create(
257
+ model=model,
258
+ messages=session.messages,
259
+ temperature=self.config.temperature,
260
+ )
261
+
262
+ if response.usage:
263
+ usage_totals["input_tokens"] += response.usage.prompt_tokens or 0
264
+ usage_totals["output_tokens"] += response.usage.completion_tokens or 0
265
+
266
+ # Check for valid response
267
+ if response.choices:
268
+ break # Success, exit retry loop
269
+
270
+ # Empty choices, retry
271
+ logger.warning(f"AgentPipeline: Empty choices in force-summary (attempt {retry + 1}/{self.MAX_LLM_RETRIES}): {response}")
272
+ if retry < self.MAX_LLM_RETRIES - 1:
273
+ await asyncio.sleep(self.LLM_RETRY_DELAY)
274
+ except Exception as e:
275
+ logger.warning(f"AgentPipeline: LLM error (attempt {retry + 1}/{self.MAX_LLM_RETRIES}): {e}")
276
+ if retry < self.MAX_LLM_RETRIES - 1:
277
+ await asyncio.sleep(self.LLM_RETRY_DELAY)
278
+ else:
279
+ return {
280
+ "llm_response": f"Error: {e}",
281
+ "success": False,
282
+ "error": str(e),
283
+ "stats": {"total_time": time.time() - start_time}
284
+ }
285
+
286
+ # Final check after all retries
287
+ if not response or not response.choices:
288
+ logger.error(f"AgentPipeline: All retries failed for force-summary")
289
+ return {
290
+ "llm_response": "抱歉,AI 服务返回了空响应,请稍后重试。",
291
+ "success": False,
292
+ "error": "Empty response from API after retries",
293
+ "stats": {"total_time": time.time() - start_time},
294
+ "usage": usage_totals,
295
+ }
296
+
297
+ final_content = response.choices[0].message.content or ""
298
+ break
299
+
300
+ # Normal call with tools (with retry)
301
+ llm_start = time.time()
302
+ response = None
303
+
304
+ for retry in range(self.MAX_LLM_RETRIES):
305
+ try:
306
+ response = await client.chat.completions.create(
307
+ model=model,
308
+ messages=session.messages,
309
+ temperature=self.config.temperature,
310
+ tools=tools,
311
+ tool_choice="auto",
312
+ )
313
+
314
+ # Check for valid response
315
+ if response.choices:
316
+ break # Success, exit retry loop
317
+
318
+ # Empty choices, retry
319
+ logger.warning(f"AgentPipeline: Empty choices (attempt {retry + 1}/{self.MAX_LLM_RETRIES}): {response}")
320
+ if retry < self.MAX_LLM_RETRIES - 1:
321
+ await asyncio.sleep(self.LLM_RETRY_DELAY)
322
+ except Exception as e:
323
+ logger.warning(f"AgentPipeline: LLM error (attempt {retry + 1}/{self.MAX_LLM_RETRIES}): {e}")
324
+ if retry < self.MAX_LLM_RETRIES - 1:
325
+ await asyncio.sleep(self.LLM_RETRY_DELAY)
326
+ else:
327
+ logger.error(f"AgentPipeline: All retries failed: {e}")
328
+ return {
329
+ "llm_response": f"Error: {e}",
330
+ "success": False,
331
+ "error": str(e),
332
+ "stats": {"total_time": time.time() - start_time}
333
+ }
334
+
335
+ llm_duration = time.time() - llm_start
336
+ session.llm_time += llm_duration
337
+
338
+ # Track first LLM call time (理解用户意图)
339
+ if session.call_count == 0 and session.first_llm_time == 0:
340
+ session.first_llm_time = llm_duration
341
+
342
+ # Final check after all retries
343
+ if not response or not response.choices:
344
+ logger.error(f"AgentPipeline: All retries failed, empty choices")
345
+ return {
346
+ "llm_response": "抱歉,AI 服务返回了空响应,请稍后重试。",
347
+ "success": False,
348
+ "error": "Empty response from API after retries",
349
+ "stats": {"total_time": time.time() - start_time},
350
+ "usage": usage_totals,
351
+ }
352
+
353
+ if response.usage:
354
+ usage_totals["input_tokens"] += response.usage.prompt_tokens or 0
355
+ usage_totals["output_tokens"] += response.usage.completion_tokens or 0
356
+
357
+ message = response.choices[0].message
358
+
359
+ # Check for tool calls
360
+ if not message.tool_calls:
361
+ # Model chose to answer directly
362
+ final_content = message.content or ""
363
+ logger.info(f"AgentPipeline: Model answered directly after {session.call_count} tool calls")
364
+ break
365
+
366
+ # Add assistant message with tool calls
367
+ session.messages.append({
368
+ "role": "assistant",
369
+ "content": message.content,
370
+ "tool_calls": [
371
+ {
372
+ "id": tc.id,
373
+ "type": "function",
374
+ "function": {"name": tc.function.name, "arguments": tc.function.arguments}
375
+ }
376
+ for tc in message.tool_calls
377
+ ]
378
+ })
379
+
380
+ # Execute all tool calls in parallel
381
+ tool_tasks = []
382
+ tool_call_ids = []
383
+ tool_call_names = []
384
+ tool_call_args_list = []
385
+
386
+ for tool_call in message.tool_calls:
387
+ tc_id = tool_call.id
388
+ func_name = tool_call.function.name
389
+
390
+ try:
391
+ args = json.loads(tool_call.function.arguments)
392
+ except json.JSONDecodeError:
393
+ args = {}
394
+
395
+ tool_call_ids.append(tc_id)
396
+ tool_call_names.append(func_name)
397
+ tool_call_args_list.append(args)
398
+ logger.info(f"AgentPipeline: Queueing tool '{func_name}' with args: {args}")
399
+
400
+ # Check for refuse_answer first (handle immediately)
401
+ for idx, func_name in enumerate(tool_call_names):
402
+ if func_name == "refuse_answer":
403
+ args = tool_call_args_list[idx]
404
+ reason = args.get("reason", "Refused")
405
+ context.should_refuse = True
406
+ context.refuse_reason = reason
407
+
408
+ session.messages.append({
409
+ "role": "tool",
410
+ "tool_call_id": tool_call_ids[idx],
411
+ "content": f"已拒绝回答: {reason}"
412
+ })
413
+
414
+ return {
415
+ "llm_response": "",
416
+ "success": True,
417
+ "refuse_answer": True,
418
+ "refuse_reason": reason,
419
+ "stats": {"total_time": time.time() - start_time},
420
+ "usage": usage_totals,
421
+ }
422
+
423
+ # Execute web_tool calls in parallel
424
+ search_start = time.time()
425
+ tasks_to_run = []
426
+ task_indices = []
427
+
428
+ for idx, func_name in enumerate(tool_call_names):
429
+ if func_name == "web_tool":
430
+ tasks_to_run.append(self._execute_web_tool(tool_call_args_list[idx], context))
431
+ task_indices.append(idx)
432
+
433
+ # Run all web_tool calls in parallel
434
+ if tasks_to_run:
435
+ results = await asyncio.gather(*tasks_to_run, return_exceptions=True)
436
+ else:
437
+ results = []
438
+
439
+ session.search_time += time.time() - search_start
440
+
441
+ # Process results and collect notifications
442
+ notifications = []
443
+ result_map = {} # Map task index to result
444
+
445
+ for i, result in enumerate(results):
446
+ task_idx = task_indices[i]
447
+ if isinstance(result, Exception):
448
+ result_map[task_idx] = {"summary": f"执行失败: {result}", "results": []}
449
+ else:
450
+ result_map[task_idx] = result
451
+
452
+ # Add all tool results to messages and collect notifications
453
+ for idx, func_name in enumerate(tool_call_names):
454
+ tc_id = tool_call_ids[idx]
455
+ args = tool_call_args_list[idx]
456
+
457
+ if func_name == "web_tool":
458
+ result = result_map.get(idx, {"summary": "未执行", "results": []})
459
+
460
+ # Track tool call
461
+ session.tool_calls.append({"name": func_name, "args": args})
462
+ session.tool_results.append(result)
463
+
464
+ # Collect notification
465
+ notifications.append(f"🔍 {result['summary']}")
466
+
467
+ # Add tool result to messages
468
+ result_content = f"搜索完成: {result['summary']}\n\n找到 {len(result.get('results', []))} 个结果"
469
+ session.messages.append({
470
+ "role": "tool",
471
+ "tool_call_id": tc_id,
472
+ "content": result_content
473
+ })
474
+
475
+ # Add image source hint for web screenshots
476
+ screenshot_count = result.get("screenshot_count", 0)
477
+ if screenshot_count > 0:
478
+ start_idx_img = session.total_image_count + 1
479
+ end_idx_img = session.total_image_count + screenshot_count
480
+ session.total_image_count = end_idx_img
481
+
482
+ source_desc = result.get("source_desc", "网页截图")
483
+ if start_idx_img == end_idx_img:
484
+ hint = f"第{start_idx_img}张图片来自{source_desc},作为查询的参考资料"
485
+ else:
486
+ hint = f"第{start_idx_img}-{end_idx_img}张图片来自{source_desc},作为查询的参考资料"
487
+ session.messages.append({"role": "system", "content": hint})
488
+ else:
489
+ # Unknown tool
490
+ session.messages.append({
491
+ "role": "tool",
492
+ "tool_call_id": tc_id,
493
+ "content": f"Unknown tool: {func_name}"
494
+ })
495
+
496
+ # Send batched notification (up to 3 lines)
497
+ if self.send_func and notifications:
498
+ try:
499
+ # Join notifications with newlines, max 3 lines
500
+ notification_msg = "\n".join(notifications[:3])
501
+ await self.send_func(notification_msg)
502
+ except Exception as e:
503
+ logger.warning(f"AgentPipeline: Failed to send notification: {e}")
504
+
505
+ # Increment round count after processing all tool calls in this round
506
+ if tasks_to_run:
507
+ session.round_count += 1
508
+
509
+ # Build final response
510
+ total_time = time.time() - start_time
511
+ stats = {"total_time": total_time}
512
+
513
+ # Update conversation history
514
+ conversation_history.append({"role": "user", "content": user_input})
515
+ conversation_history.append({"role": "assistant", "content": final_content})
516
+
517
+ stages_used = self._build_stages_ui(session, context, usage_totals, total_time)
518
+ logger.info(f"AgentPipeline: Built stages_used = {stages_used}")
519
+
520
+ return {
521
+ "llm_response": final_content,
522
+ "success": True,
523
+ "stats": stats,
524
+ "model_used": model,
525
+ "conversation_history": conversation_history,
526
+ "usage": usage_totals,
527
+ "web_results": context.web_results,
528
+ "tool_calls_count": session.call_count,
529
+ "stages_used": stages_used,
530
+ }
531
+
532
+ async def _execute_web_tool(self, args: Dict, context: StageContext) -> Dict[str, Any]:
533
+ """执行 web_tool - 复用 /w 逻辑,支持过滤器语法"""
534
+ query = args.get("query", "")
535
+
536
+ # 1. URL 截图模式 - 检测 query 中是否包含 URL
537
+ url_match = re.search(r'https?://\S+', query)
538
+ if url_match:
539
+ url = url_match.group(0)
540
+ # Send URL screenshot notification
541
+ if self.send_func:
542
+ try:
543
+ short_url = url[:40] + "..." if len(url) > 40 else url
544
+ await self.send_func(f"📸 正在截图: {short_url}")
545
+ except Exception:
546
+ pass
547
+
548
+ logger.info(f"AgentPipeline: Screenshot URL with content: {url}")
549
+ # Use screenshot_with_content to get both screenshot and text
550
+ result = await self.search_service.screenshot_with_content(url)
551
+ screenshot_b64 = result.get("screenshot_b64")
552
+ content = result.get("content", "")
553
+ title = result.get("title", "")
554
+
555
+ if screenshot_b64:
556
+ context.web_results.append({
557
+ "_id": context.next_id(),
558
+ "_type": "page",
559
+ "url": url,
560
+ "title": title or "Screenshot",
561
+ "screenshot_b64": screenshot_b64,
562
+ "content": content, # Text content for LLM
563
+ })
564
+ return {
565
+ "summary": f"已截图: {url[:50]}{'...' if len(url) > 50 else ''}",
566
+ "results": [{"_type": "screenshot", "url": url}],
567
+ "screenshot_count": 1,
568
+ "source_desc": f"URL截图 ({url[:30]}...)"
569
+ }
570
+ return {
571
+ "summary": f"截图失败: {url[:50]}",
572
+ "results": [],
573
+ "screenshot_count": 0
574
+ }
575
+
576
+ # 2. 解析过滤器语法
577
+ filters, search_query, error = parse_filter_syntax(query, max_count=3)
578
+
579
+ if error:
580
+ return {"summary": error, "results": []}
581
+
582
+ # 3. 如果有过滤器,发送搜索+截图预告
583
+ if filters and self.send_func:
584
+ try:
585
+ # Build filter description
586
+ filter_desc_parts = []
587
+ for f_type, f_val, f_count in filters:
588
+ if f_type == 'index':
589
+ filter_desc_parts.append(f"第{f_val}个")
590
+ else:
591
+ filter_desc_parts.append(f"{f_val}={f_count}")
592
+ filter_desc = ", ".join(filter_desc_parts)
593
+ await self.send_func(f"🔍 正在搜索 \"{search_query}\" 并匹配 [{filter_desc}]...")
594
+ except Exception:
595
+ pass
596
+
597
+ logger.info(f"AgentPipeline: Searching for: {search_query}")
598
+ results = await self.search_service.search(search_query)
599
+ visible = [r for r in results if not r.get("_hidden")]
600
+
601
+ # Add search results to context
602
+ for r in results:
603
+ r["_id"] = context.next_id()
604
+ if "_type" not in r:
605
+ r["_type"] = "search"
606
+ r["query"] = search_query
607
+ context.web_results.append(r)
608
+
609
+ # 4. 如果有过滤器,截图匹配的链接
610
+ if filters:
611
+ urls = self._collect_filter_urls(filters, visible)
612
+ if urls:
613
+ logger.info(f"AgentPipeline: Taking screenshots with content of {len(urls)} URLs")
614
+ # Use screenshot_with_content to get both screenshot and text
615
+ screenshot_tasks = [self.search_service.screenshot_with_content(u) for u in urls]
616
+ results = await asyncio.gather(*screenshot_tasks)
617
+
618
+ # Add screenshots and content to context
619
+ successful_count = 0
620
+ for url, result in zip(urls, results):
621
+ screenshot_b64 = result.get("screenshot_b64") if isinstance(result, dict) else None
622
+ content = result.get("content", "") if isinstance(result, dict) else ""
623
+ title = result.get("title", "") if isinstance(result, dict) else ""
624
+
625
+ if screenshot_b64:
626
+ successful_count += 1
627
+ # Find and update the matching result
628
+ for r in context.web_results:
629
+ if r.get("url") == url:
630
+ r["screenshot_b64"] = screenshot_b64
631
+ r["content"] = content # Text content for LLM
632
+ r["title"] = title or r.get("title", "")
633
+ r["_type"] = "page"
634
+ break
635
+
636
+ return {
637
+ "summary": f"搜索 \"{search_query}\" 并截图 {successful_count} 个匹配结果",
638
+ "results": [{"url": u, "_type": "page"} for u in urls],
639
+ "screenshot_count": successful_count,
640
+ "source_desc": f"搜索 \"{search_query}\" 的网页截图"
641
+ }
642
+
643
+ # 5. 普通搜索模式 (无截图)
644
+ return {
645
+ "summary": f"搜索 \"{search_query}\" 找到 {len(visible)} 条结果",
646
+ "results": visible,
647
+ "screenshot_count": 0
648
+ }
649
+
650
+ def _collect_filter_urls(self, filters: List, visible: List[Dict]) -> List[str]:
651
+ """Collect URLs based on filter specifications."""
652
+ urls = []
653
+
654
+ for filter_type, filter_value, count in filters:
655
+ if filter_type == 'index':
656
+ idx = filter_value - 1 # Convert to 0-based
657
+ if 0 <= idx < len(visible):
658
+ url = visible[idx].get("url", "")
659
+ if url and url not in urls:
660
+ urls.append(url)
661
+ else:
662
+ # Link filter
663
+ found_count = 0
664
+ for res in visible:
665
+ url = res.get("url", "")
666
+ title = res.get("title", "")
667
+ # Match filter against both URL and title
668
+ if (filter_value in url.lower() or filter_value in title.lower()) and url not in urls:
669
+ urls.append(url)
670
+ found_count += 1
671
+ if found_count >= count:
672
+ break
673
+
674
+ return urls
675
+
676
+ def _format_web_context(self, context: StageContext) -> str:
677
+ """Format web results for summary context."""
678
+ if not context.web_results:
679
+ return ""
680
+
681
+ lines = ["## 已收集的信息\n"]
682
+ for r in context.web_results:
683
+ idx = r.get("_id", "?")
684
+ title = r.get("title", "Untitled")
685
+ url = r.get("url", "")
686
+ content = r.get("content", "")[:500] if r.get("content") else ""
687
+ has_screenshot = "有截图" if r.get("screenshot_b64") else ""
688
+
689
+ lines.append(f"[{idx}] {title}")
690
+ if url:
691
+ lines.append(f" URL: {url}")
692
+ if has_screenshot:
693
+ lines.append(f" {has_screenshot}")
694
+ if content:
695
+ lines.append(f" 摘要: {content[:200]}...")
696
+ lines.append("")
697
+
698
+ return "\n".join(lines)
699
+
700
+ def _build_stages_ui(self, session: AgentSession, context: StageContext, usage_totals: Dict, total_time: float) -> List[Dict[str, Any]]:
701
+ """Build stages UI for rendering - compatible with App.vue flow section.
702
+
703
+ Flow: Instruct (意图) → Search (搜索) → Summary (总结)
704
+ """
705
+ stages = []
706
+
707
+ # Get model config for pricing
708
+ model_cfg = self.config.get_model_config("main")
709
+ model_name = model_cfg.model_name or self.config.model_name
710
+ input_price = getattr(model_cfg, "input_price", 0) or 0
711
+ output_price = getattr(model_cfg, "output_price", 0) or 0
712
+
713
+ # 1. Instruct Stage (理解用户意图 - 第一次LLM调用)
714
+ if session.first_llm_time > 0:
715
+ # Estimate tokens for first call (rough split based on proportion)
716
+ # Since we track total usage, we approximate first call as ~40% of total
717
+ first_call_ratio = 0.4 if session.call_count > 0 else 1.0
718
+ instruct_input = int(usage_totals.get("input_tokens", 0) * first_call_ratio)
719
+ instruct_output = int(usage_totals.get("output_tokens", 0) * first_call_ratio)
720
+ instruct_cost = (instruct_input * input_price + instruct_output * output_price) / 1_000_000
721
+
722
+ stages.append({
723
+ "name": "Instruct",
724
+ "model": model_name,
725
+ "provider": model_cfg.model_provider or "OpenRouter",
726
+ "description": "理解用户意图",
727
+ "time": session.first_llm_time,
728
+ "usage": {"input_tokens": instruct_input, "output_tokens": instruct_output},
729
+ "cost": instruct_cost,
730
+ })
731
+
732
+ # 2. Search Stage (搜索)
733
+ if session.tool_calls:
734
+ # Collect all search descriptions
735
+ search_descriptions = []
736
+ for tc, result in zip(session.tool_calls, session.tool_results):
737
+ desc = result.get("summary", "")
738
+ if desc:
739
+ search_descriptions.append(desc)
740
+
741
+ stages.append({
742
+ "name": "Search",
743
+ "model": "",
744
+ "provider": "Web",
745
+ "description": " → ".join(search_descriptions) if search_descriptions else "Web Search",
746
+ "time": session.search_time,
747
+ })
748
+
749
+ # 3. Summary Stage (总结)
750
+ # Calculate remaining tokens after instruct
751
+ summary_ratio = 0.6 if session.call_count > 0 else 0.0
752
+ summary_input = int(usage_totals.get("input_tokens", 0) * summary_ratio)
753
+ summary_output = int(usage_totals.get("output_tokens", 0) * summary_ratio)
754
+ summary_cost = (summary_input * input_price + summary_output * output_price) / 1_000_000
755
+ summary_time = session.llm_time - session.first_llm_time
756
+
757
+ if summary_time > 0 or session.call_count > 0:
758
+ stages.append({
759
+ "name": "Summary",
760
+ "model": model_name,
761
+ "provider": model_cfg.model_provider or "OpenRouter",
762
+ "description": f"生成回答 ({session.call_count} 次工具调用)",
763
+ "time": max(0, summary_time),
764
+ "usage": {"input_tokens": summary_input, "output_tokens": summary_output},
765
+ "cost": summary_cost,
766
+ })
767
+
768
+ return stages