entari-plugin-hyw 4.0.0rc4__py3-none-any.whl → 4.0.0rc6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of entari-plugin-hyw might be problematic. Click here for more details.
- entari_plugin_hyw/__init__.py +216 -75
- entari_plugin_hyw/assets/card-dist/index.html +70 -79
- entari_plugin_hyw/browser/__init__.py +10 -0
- entari_plugin_hyw/browser/engines/base.py +13 -0
- entari_plugin_hyw/browser/engines/bing.py +95 -0
- entari_plugin_hyw/browser/engines/duckduckgo.py +137 -0
- entari_plugin_hyw/browser/engines/google.py +155 -0
- entari_plugin_hyw/browser/landing.html +172 -0
- entari_plugin_hyw/browser/manager.py +153 -0
- entari_plugin_hyw/browser/service.py +304 -0
- entari_plugin_hyw/card-ui/src/App.vue +526 -182
- entari_plugin_hyw/card-ui/src/components/MarkdownContent.vue +7 -11
- entari_plugin_hyw/card-ui/src/components/StageCard.vue +33 -30
- entari_plugin_hyw/card-ui/src/types.ts +9 -0
- entari_plugin_hyw/definitions.py +155 -0
- entari_plugin_hyw/history.py +111 -33
- entari_plugin_hyw/misc.py +34 -0
- entari_plugin_hyw/modular_pipeline.py +384 -0
- entari_plugin_hyw/render_vue.py +326 -239
- entari_plugin_hyw/search.py +95 -708
- entari_plugin_hyw/stage_base.py +92 -0
- entari_plugin_hyw/stage_instruct.py +345 -0
- entari_plugin_hyw/stage_instruct_deepsearch.py +104 -0
- entari_plugin_hyw/stage_summary.py +164 -0
- {entari_plugin_hyw-4.0.0rc4.dist-info → entari_plugin_hyw-4.0.0rc6.dist-info}/METADATA +4 -4
- {entari_plugin_hyw-4.0.0rc4.dist-info → entari_plugin_hyw-4.0.0rc6.dist-info}/RECORD +28 -16
- entari_plugin_hyw/pipeline.py +0 -1219
- entari_plugin_hyw/prompts.py +0 -47
- {entari_plugin_hyw-4.0.0rc4.dist-info → entari_plugin_hyw-4.0.0rc6.dist-info}/WHEEL +0 -0
- {entari_plugin_hyw-4.0.0rc4.dist-info → entari_plugin_hyw-4.0.0rc6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Stage Base Classes
|
|
3
|
+
|
|
4
|
+
Abstract base classes for pipeline stages.
|
|
5
|
+
Each stage is a self-contained unit of work.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from abc import ABC, abstractmethod
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from typing import Any, Dict, List, Optional
|
|
11
|
+
|
|
12
|
+
from openai import AsyncOpenAI
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class StageContext:
|
|
17
|
+
"""Shared context passed between stages."""
|
|
18
|
+
user_input: str
|
|
19
|
+
images: List[str] = field(default_factory=list)
|
|
20
|
+
conversation_history: List[Dict] = field(default_factory=list)
|
|
21
|
+
instruct_history: List[Dict] = field(default_factory=list) # History for Instruct stage rounds
|
|
22
|
+
|
|
23
|
+
# Accumulated data
|
|
24
|
+
web_results: List[Dict] = field(default_factory=list)
|
|
25
|
+
agent_context: str = ""
|
|
26
|
+
review_context: str = "" # Context passed from Instruct to Review stage
|
|
27
|
+
|
|
28
|
+
# Mode info (set by Instruct stage)
|
|
29
|
+
task_list: List[str] = field(default_factory=list)
|
|
30
|
+
|
|
31
|
+
# Control flags
|
|
32
|
+
should_refuse: bool = False
|
|
33
|
+
refuse_reason: str = ""
|
|
34
|
+
selected_mode: str = "fast" # "fast" or "deepsearch"
|
|
35
|
+
|
|
36
|
+
# ID counter for unified referencing
|
|
37
|
+
global_id_counter: int = 0
|
|
38
|
+
|
|
39
|
+
# Model capabilities
|
|
40
|
+
image_input_supported: bool = True
|
|
41
|
+
|
|
42
|
+
def next_id(self) -> int:
|
|
43
|
+
"""Get next global ID."""
|
|
44
|
+
self.global_id_counter += 1
|
|
45
|
+
return self.global_id_counter
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@dataclass
|
|
49
|
+
class StageResult:
|
|
50
|
+
"""Result from a stage execution."""
|
|
51
|
+
success: bool
|
|
52
|
+
data: Dict[str, Any] = field(default_factory=dict)
|
|
53
|
+
usage: Dict[str, int] = field(default_factory=lambda: {"input_tokens": 0, "output_tokens": 0})
|
|
54
|
+
trace: Dict[str, Any] = field(default_factory=dict)
|
|
55
|
+
error: Optional[str] = None
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class BaseStage(ABC):
|
|
59
|
+
"""Abstract base class for pipeline stages."""
|
|
60
|
+
|
|
61
|
+
def __init__(self, config: Any, search_service: Any, client: AsyncOpenAI):
|
|
62
|
+
self.config = config
|
|
63
|
+
self.search_service = search_service
|
|
64
|
+
self.client = client
|
|
65
|
+
|
|
66
|
+
@property
|
|
67
|
+
@abstractmethod
|
|
68
|
+
def name(self) -> str:
|
|
69
|
+
"""Stage name for logging and tracing."""
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
@abstractmethod
|
|
73
|
+
async def execute(self, context: StageContext) -> StageResult:
|
|
74
|
+
"""
|
|
75
|
+
Execute the stage.
|
|
76
|
+
|
|
77
|
+
Args:
|
|
78
|
+
context: Shared context with accumulated data
|
|
79
|
+
|
|
80
|
+
Returns:
|
|
81
|
+
StageResult with success status, data, usage, and trace info
|
|
82
|
+
"""
|
|
83
|
+
pass
|
|
84
|
+
|
|
85
|
+
def _client_for(self, api_key: Optional[str], base_url: Optional[str]) -> AsyncOpenAI:
|
|
86
|
+
"""Get or create client with custom credentials."""
|
|
87
|
+
if api_key or base_url:
|
|
88
|
+
return AsyncOpenAI(
|
|
89
|
+
base_url=base_url or self.config.base_url,
|
|
90
|
+
api_key=api_key or self.config.api_key
|
|
91
|
+
)
|
|
92
|
+
return self.client
|
|
@@ -0,0 +1,345 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Instruct Stage
|
|
3
|
+
|
|
4
|
+
Handles initial task planning and search generation.
|
|
5
|
+
Analyze user query and execute initial searches.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import time
|
|
10
|
+
import asyncio
|
|
11
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
12
|
+
from loguru import logger
|
|
13
|
+
from openai import AsyncOpenAI
|
|
14
|
+
|
|
15
|
+
from .stage_base import BaseStage, StageContext, StageResult
|
|
16
|
+
from .definitions import (
|
|
17
|
+
get_refuse_answer_tool,
|
|
18
|
+
get_web_search_tool,
|
|
19
|
+
get_crawl_page_tool,
|
|
20
|
+
get_set_mode_tool,
|
|
21
|
+
INSTRUCT_SP
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
class InstructStage(BaseStage):
|
|
25
|
+
@property
|
|
26
|
+
def name(self) -> str:
|
|
27
|
+
return "Instruct"
|
|
28
|
+
|
|
29
|
+
def __init__(self, config: Any, search_service: Any, client: AsyncOpenAI):
|
|
30
|
+
super().__init__(config, search_service, client)
|
|
31
|
+
|
|
32
|
+
self.refuse_answer_tool = get_refuse_answer_tool()
|
|
33
|
+
self.web_search_tool = get_web_search_tool()
|
|
34
|
+
self.crawl_page_tool = get_crawl_page_tool()
|
|
35
|
+
self.set_mode_tool = get_set_mode_tool()
|
|
36
|
+
|
|
37
|
+
async def execute(self, context: StageContext) -> StageResult:
|
|
38
|
+
start_time = time.time()
|
|
39
|
+
|
|
40
|
+
# --- Round 1: Initial Discovery ---
|
|
41
|
+
logger.info("Instruct: Starting Round 1 (Initial Discovery)")
|
|
42
|
+
|
|
43
|
+
# Build Round 1 User Message
|
|
44
|
+
r1_user_content = self._build_user_message(context)
|
|
45
|
+
r1_messages = [
|
|
46
|
+
{"role": "system", "content": INSTRUCT_SP},
|
|
47
|
+
{"role": "user", "content": r1_user_content}
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
# Execute Round 1 LLM
|
|
51
|
+
r1_response, r1_usage, r1_tool_calls, r1_content = await self._call_llm(
|
|
52
|
+
messages=r1_messages,
|
|
53
|
+
tools=[self.refuse_answer_tool, self.web_search_tool, self.crawl_page_tool, self.set_mode_tool],
|
|
54
|
+
tool_choice="auto"
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
if context.should_refuse:
|
|
58
|
+
# If refused in Round 1, stop here
|
|
59
|
+
return self._build_result(start_time, r1_usage, r1_content, len(r1_tool_calls or []))
|
|
60
|
+
|
|
61
|
+
# Execute Round 1 Tools
|
|
62
|
+
r1_tool_outputs = []
|
|
63
|
+
if r1_tool_calls:
|
|
64
|
+
r1_tool_outputs = await self._process_tool_calls(context, r1_tool_calls)
|
|
65
|
+
|
|
66
|
+
# --- Context Assembly for Round 2 ---
|
|
67
|
+
|
|
68
|
+
# Summarize Round 1 actions for context
|
|
69
|
+
r1_summary_text = "## Round 1 Execution Summary\n"
|
|
70
|
+
if r1_content:
|
|
71
|
+
r1_summary_text += f"Thought: {r1_content}\n"
|
|
72
|
+
|
|
73
|
+
if r1_tool_outputs:
|
|
74
|
+
r1_summary_text += "Tools Executed & Results:\n"
|
|
75
|
+
for output in r1_tool_outputs:
|
|
76
|
+
# content here is the tool output (e.g. search results text or crawl preview)
|
|
77
|
+
r1_summary_text += f"- Action: {output['name']}\n"
|
|
78
|
+
r1_summary_text += f" Result: {output['content']}\n"
|
|
79
|
+
else:
|
|
80
|
+
r1_summary_text += "No tools were executed in Round 1.\n"
|
|
81
|
+
|
|
82
|
+
r2_context_str = f"""User Query: {context.user_input}
|
|
83
|
+
|
|
84
|
+
{r1_summary_text}
|
|
85
|
+
"""
|
|
86
|
+
# Save to context for next stage
|
|
87
|
+
context.review_context = r2_context_str
|
|
88
|
+
|
|
89
|
+
# Update instruct_history for logging/record purposes
|
|
90
|
+
context.instruct_history.append({
|
|
91
|
+
"role": "assistant",
|
|
92
|
+
"content": f"[Round 1 Thought]: {r1_content}\n[Round 1 Actions]: {len(r1_tool_outputs)} tools"
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
return self._build_result(start_time, r1_usage, r1_content, len(r1_tool_calls or []))
|
|
96
|
+
|
|
97
|
+
def _build_user_message(self, context: StageContext) -> Any:
|
|
98
|
+
text_prompt = f"User Query: {context.user_input}"
|
|
99
|
+
if context.images:
|
|
100
|
+
user_content: List[Dict[str, Any]] = [{"type": "text", "text": text_prompt}]
|
|
101
|
+
for img_b64 in context.images:
|
|
102
|
+
url = f"data:image/jpeg;base64,{img_b64}" if not img_b64.startswith("data:") else img_b64
|
|
103
|
+
user_content.append({"type": "image_url", "image_url": {"url": url}})
|
|
104
|
+
return user_content
|
|
105
|
+
return text_prompt
|
|
106
|
+
|
|
107
|
+
async def _call_llm(self, messages, tools, tool_choice="auto"):
|
|
108
|
+
model_cfg = self.config.get_model_config("instruct")
|
|
109
|
+
client = self._client_for(
|
|
110
|
+
api_key=model_cfg.get("api_key"),
|
|
111
|
+
base_url=model_cfg.get("base_url")
|
|
112
|
+
)
|
|
113
|
+
model = model_cfg.get("model_name") or self.config.model_name
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
response = await client.chat.completions.create(
|
|
117
|
+
model=model,
|
|
118
|
+
messages=messages,
|
|
119
|
+
tools=tools,
|
|
120
|
+
tool_choice=tool_choice,
|
|
121
|
+
temperature=self.config.temperature,
|
|
122
|
+
extra_body=model_cfg.get("extra_body"),
|
|
123
|
+
)
|
|
124
|
+
except Exception as e:
|
|
125
|
+
logger.error(f"InstructStage LLM Error: {e}")
|
|
126
|
+
raise e
|
|
127
|
+
|
|
128
|
+
usage = {"input_tokens": 0, "output_tokens": 0}
|
|
129
|
+
if hasattr(response, "usage") and response.usage:
|
|
130
|
+
usage["input_tokens"] = getattr(response.usage, "prompt_tokens", 0) or 0
|
|
131
|
+
usage["output_tokens"] = getattr(response.usage, "completion_tokens", 0) or 0
|
|
132
|
+
|
|
133
|
+
message = response.choices[0].message
|
|
134
|
+
content = message.content or ""
|
|
135
|
+
tool_calls = message.tool_calls
|
|
136
|
+
|
|
137
|
+
if content:
|
|
138
|
+
logger.debug(f"Instruct: Agent Thought -> {content[:100]}...")
|
|
139
|
+
|
|
140
|
+
return response, usage, tool_calls, content
|
|
141
|
+
|
|
142
|
+
async def _process_tool_calls(self, context: StageContext, tool_calls: List[Any]) -> List[Dict[str, Any]]:
|
|
143
|
+
"""
|
|
144
|
+
Executes tool calls and returns a list of outputs for context building.
|
|
145
|
+
Updates context.web_results globally.
|
|
146
|
+
"""
|
|
147
|
+
pending_crawls = [] # List of (url, tool_call_id)
|
|
148
|
+
pending_searches = [] # List of (query, tool_call_id)
|
|
149
|
+
|
|
150
|
+
results_for_context = []
|
|
151
|
+
|
|
152
|
+
for tc in tool_calls:
|
|
153
|
+
name = tc.function.name
|
|
154
|
+
tc_id = tc.id
|
|
155
|
+
try:
|
|
156
|
+
args = json.loads(tc.function.arguments)
|
|
157
|
+
except json.JSONDecodeError:
|
|
158
|
+
results_for_context.append({
|
|
159
|
+
"id": tc_id, "name": name, "content": "Error: Invalid JSON arguments"
|
|
160
|
+
})
|
|
161
|
+
continue
|
|
162
|
+
|
|
163
|
+
if name == "refuse_answer":
|
|
164
|
+
reason = args.get("reason", "Refused")
|
|
165
|
+
logger.warning(f"Instruct: Model Refused Answer. Reason: {reason}")
|
|
166
|
+
context.should_refuse = True
|
|
167
|
+
context.refuse_reason = reason
|
|
168
|
+
results_for_context.append({
|
|
169
|
+
"id": tc_id, "name": name, "content": f"Refused: {reason}"
|
|
170
|
+
})
|
|
171
|
+
|
|
172
|
+
elif name == "web_search":
|
|
173
|
+
query = args.get("query")
|
|
174
|
+
if query:
|
|
175
|
+
logger.info(f"Instruct: Planned search query -> '{query}'")
|
|
176
|
+
pending_searches.append((query, tc_id))
|
|
177
|
+
|
|
178
|
+
elif name == "crawl_page":
|
|
179
|
+
url = args.get("url")
|
|
180
|
+
if url:
|
|
181
|
+
logger.info(f"Instruct: Planned page crawl -> {url}")
|
|
182
|
+
pending_crawls.append((url, tc_id))
|
|
183
|
+
|
|
184
|
+
elif name == "set_mode":
|
|
185
|
+
mode = args.get("mode", "fast")
|
|
186
|
+
if mode in ("fast", "deepsearch"):
|
|
187
|
+
context.selected_mode = mode
|
|
188
|
+
logger.info(f"Instruct: Mode set to '{mode}'")
|
|
189
|
+
results_for_context.append({
|
|
190
|
+
"id": tc_id, "name": name, "content": f"Mode set to: {mode}"
|
|
191
|
+
})
|
|
192
|
+
else:
|
|
193
|
+
logger.warning(f"Instruct: Invalid mode '{mode}', defaulting to 'fast'")
|
|
194
|
+
context.selected_mode = "fast"
|
|
195
|
+
|
|
196
|
+
# Execute Batches
|
|
197
|
+
|
|
198
|
+
# 1. Crawls
|
|
199
|
+
if pending_crawls:
|
|
200
|
+
urls = [u for u, _ in pending_crawls]
|
|
201
|
+
logger.info(f"Instruct: Executing {len(urls)} crawls via batch...")
|
|
202
|
+
|
|
203
|
+
# Start fetch
|
|
204
|
+
fetch_task = asyncio.create_task(self.search_service.fetch_pages_batch(urls))
|
|
205
|
+
|
|
206
|
+
# Use image capability from context to determine content mode
|
|
207
|
+
is_image_mode = getattr(context, "image_input_supported", True)
|
|
208
|
+
tab_ids = []
|
|
209
|
+
if is_image_mode:
|
|
210
|
+
from .render_vue import get_content_renderer
|
|
211
|
+
renderer = await get_content_renderer()
|
|
212
|
+
loop = asyncio.get_running_loop()
|
|
213
|
+
tab_tasks = [
|
|
214
|
+
loop.run_in_executor(renderer._executor, renderer._prepare_tab_sync)
|
|
215
|
+
for _ in urls
|
|
216
|
+
]
|
|
217
|
+
tab_ids = await asyncio.gather(*tab_tasks, return_exceptions=True)
|
|
218
|
+
logger.debug(f"Instruct: Prepared {len(tab_ids)} tabs: {tab_ids}")
|
|
219
|
+
|
|
220
|
+
crawl_results_list = await fetch_task
|
|
221
|
+
|
|
222
|
+
if is_image_mode and tab_ids:
|
|
223
|
+
theme_color = getattr(self.config, "theme_color", "#ef4444")
|
|
224
|
+
render_tasks = []
|
|
225
|
+
valid_pairs = []
|
|
226
|
+
MAX_CHARS = 3000
|
|
227
|
+
for i, (page_data, tab_id) in enumerate(zip(crawl_results_list, tab_ids)):
|
|
228
|
+
if isinstance(tab_id, Exception):
|
|
229
|
+
logger.warning(f"Instruct: Skip rendering page {i} due to tab error: {tab_id}")
|
|
230
|
+
continue
|
|
231
|
+
|
|
232
|
+
# Truncate content to avoid excessive size
|
|
233
|
+
content = page_data.get("content", "")
|
|
234
|
+
if len(content) > MAX_CHARS:
|
|
235
|
+
content = content[:MAX_CHARS] + "\n\n...(content truncated for length)..."
|
|
236
|
+
page_data["content"] = content
|
|
237
|
+
|
|
238
|
+
if not content:
|
|
239
|
+
logger.warning(f"Instruct: Skip rendering page {i} due to empty content")
|
|
240
|
+
continue
|
|
241
|
+
|
|
242
|
+
valid_pairs.append((i, page_data))
|
|
243
|
+
render_tasks.append(
|
|
244
|
+
loop.run_in_executor(
|
|
245
|
+
renderer._executor,
|
|
246
|
+
renderer._render_page_to_b64_sync,
|
|
247
|
+
{"title": page_data.get("title", "Page"), "content": content},
|
|
248
|
+
tab_id,
|
|
249
|
+
theme_color
|
|
250
|
+
)
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
if render_tasks:
|
|
254
|
+
logger.debug(f"Instruct: Parallel rendering {len(render_tasks)} pages...")
|
|
255
|
+
screenshots = await asyncio.gather(*render_tasks, return_exceptions=True)
|
|
256
|
+
logger.debug(f"Instruct: Parallel rendering finished. Results count: {len(screenshots)}")
|
|
257
|
+
for j, (orig_idx, page_data) in enumerate(valid_pairs):
|
|
258
|
+
if j < len(screenshots) and not isinstance(screenshots[j], Exception):
|
|
259
|
+
crawl_results_list[orig_idx]["screenshot_b64"] = screenshots[j]
|
|
260
|
+
|
|
261
|
+
for i, (url, tc_id) in enumerate(pending_crawls):
|
|
262
|
+
page_data = crawl_results_list[i]
|
|
263
|
+
title = page_data.get("title", "Unknown")
|
|
264
|
+
|
|
265
|
+
# Update global context
|
|
266
|
+
page_item = {
|
|
267
|
+
"_id": context.next_id(),
|
|
268
|
+
"_type": "page",
|
|
269
|
+
"title": page_data.get("title", "Page"),
|
|
270
|
+
"url": page_data.get("url", url),
|
|
271
|
+
"content": page_data.get("content", ""),
|
|
272
|
+
"is_crawled": True,
|
|
273
|
+
}
|
|
274
|
+
if page_data.get("screenshot_b64"):
|
|
275
|
+
page_item["screenshot_b64"] = page_data["screenshot_b64"]
|
|
276
|
+
if page_data.get("raw_screenshot_b64"):
|
|
277
|
+
page_item["raw_screenshot_b64"] = page_data["raw_screenshot_b64"]
|
|
278
|
+
if page_data.get("images"):
|
|
279
|
+
page_item["images"] = page_data["images"]
|
|
280
|
+
|
|
281
|
+
context.web_results.append(page_item)
|
|
282
|
+
|
|
283
|
+
# Output for Context Assembly
|
|
284
|
+
content_preview = page_data.get("content", "")[:500]
|
|
285
|
+
results_for_context.append({
|
|
286
|
+
"id": tc_id,
|
|
287
|
+
"name": "crawl_page",
|
|
288
|
+
"content": f"Crawled '{title}' ({url}):\n{content_preview}..."
|
|
289
|
+
})
|
|
290
|
+
|
|
291
|
+
# 2. Searches
|
|
292
|
+
if pending_searches:
|
|
293
|
+
queries = [q for q, _ in pending_searches]
|
|
294
|
+
logger.info(f"Instruct: Executing {len(queries)} searches via batch...")
|
|
295
|
+
|
|
296
|
+
search_results_list = await self.search_service.search_batch(queries)
|
|
297
|
+
|
|
298
|
+
for i, (query, tc_id) in enumerate(pending_searches):
|
|
299
|
+
web_results = search_results_list[i]
|
|
300
|
+
visible_results = [r for r in web_results if not r.get("_hidden")]
|
|
301
|
+
|
|
302
|
+
# Update global context
|
|
303
|
+
total_images = sum(len(item.get("images", []) or []) for item in web_results)
|
|
304
|
+
logger.debug(f"Instruct: Search '{query}' returned {len(web_results)} items with {total_images} images total")
|
|
305
|
+
for item in web_results:
|
|
306
|
+
item["_id"] = context.next_id()
|
|
307
|
+
if "type" in item:
|
|
308
|
+
item["_type"] = item["type"]
|
|
309
|
+
elif "_type" not in item:
|
|
310
|
+
item["_type"] = "search"
|
|
311
|
+
item["query"] = query
|
|
312
|
+
context.web_results.append(item)
|
|
313
|
+
|
|
314
|
+
# Output for Context Assembly
|
|
315
|
+
summary = f"Found {len(visible_results)} results for '{query}':\n"
|
|
316
|
+
for r in visible_results[:5]:
|
|
317
|
+
summary += f"- {r.get('title')} ({r.get('url')}): {(r.get('content') or '')[:100]}...\n"
|
|
318
|
+
|
|
319
|
+
results_for_context.append({
|
|
320
|
+
"id": tc_id,
|
|
321
|
+
"name": "web_search",
|
|
322
|
+
"content": summary
|
|
323
|
+
})
|
|
324
|
+
|
|
325
|
+
return results_for_context
|
|
326
|
+
|
|
327
|
+
def _build_result(self, start_time, usage, content, tool_calls_count):
|
|
328
|
+
model_cfg = self.config.get_model_config("instruct")
|
|
329
|
+
model = model_cfg.get("model_name") or self.config.model_name
|
|
330
|
+
|
|
331
|
+
trace = {
|
|
332
|
+
"stage": "Instruct",
|
|
333
|
+
"model": model,
|
|
334
|
+
"usage": usage,
|
|
335
|
+
"output": content,
|
|
336
|
+
"tool_calls": tool_calls_count,
|
|
337
|
+
"time": time.time() - start_time,
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
return StageResult(
|
|
341
|
+
success=True,
|
|
342
|
+
data={"reasoning": content},
|
|
343
|
+
usage=usage,
|
|
344
|
+
trace=trace
|
|
345
|
+
)
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Instruct Deepsearch Stage
|
|
3
|
+
|
|
4
|
+
Handles the deepsearch loop: Supplement information until sufficient or max iterations reached.
|
|
5
|
+
Inherits from InstructStage to reuse tool execution logic.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, List
|
|
10
|
+
from loguru import logger
|
|
11
|
+
from openai import AsyncOpenAI
|
|
12
|
+
|
|
13
|
+
from .stage_base import StageContext, StageResult
|
|
14
|
+
from .stage_instruct import InstructStage
|
|
15
|
+
from .definitions import INSTRUCT_DEEPSEARCH_SP
|
|
16
|
+
|
|
17
|
+
class InstructDeepsearchStage(InstructStage):
|
|
18
|
+
@property
|
|
19
|
+
def name(self) -> str:
|
|
20
|
+
return "Instruct Deepsearch"
|
|
21
|
+
|
|
22
|
+
def __init__(self, config: Any, search_service: Any, client: AsyncOpenAI):
|
|
23
|
+
super().__init__(config, search_service, client)
|
|
24
|
+
# Inherits tools from InstructStage (web_search, crawl_page)
|
|
25
|
+
|
|
26
|
+
async def execute(self, context: StageContext) -> StageResult:
|
|
27
|
+
start_time = time.time()
|
|
28
|
+
logger.info("Instruct Deepsearch: Starting supplementary research")
|
|
29
|
+
|
|
30
|
+
# Check if we have context to review
|
|
31
|
+
if not context.review_context:
|
|
32
|
+
logger.warning("Instruct Deepsearch: No context found. Skipping.")
|
|
33
|
+
return StageResult(
|
|
34
|
+
success=True,
|
|
35
|
+
data={"reasoning": "Skipped due to missing context.", "should_stop": True}
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Build System Prompt (Clean)
|
|
39
|
+
system_prompt = INSTRUCT_DEEPSEARCH_SP
|
|
40
|
+
|
|
41
|
+
# Build Messages
|
|
42
|
+
# Inject context as a separate user message explaining the background
|
|
43
|
+
context_message = f"## 已收集的信息\n\n```context\n{context.review_context}\n```"
|
|
44
|
+
|
|
45
|
+
messages = [
|
|
46
|
+
{"role": "system", "content": system_prompt},
|
|
47
|
+
{"role": "user", "content": context_message},
|
|
48
|
+
{"role": "user", "content": self._build_user_message(context)}
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
# Call LLM
|
|
52
|
+
# We use only web_search and crawl_page tools (no set_mode, no refuse_answer in this stage)
|
|
53
|
+
tools = [self.web_search_tool, self.crawl_page_tool]
|
|
54
|
+
|
|
55
|
+
response, usage, tool_calls, content = await self._call_llm(
|
|
56
|
+
messages=messages,
|
|
57
|
+
tools=tools,
|
|
58
|
+
tool_choice="auto"
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# Check for empty response = signal to stop
|
|
62
|
+
should_stop = False
|
|
63
|
+
if not tool_calls or len(tool_calls) == 0:
|
|
64
|
+
logger.info("Instruct Deepsearch: No tool calls, signaling to stop loop.")
|
|
65
|
+
should_stop = True
|
|
66
|
+
else:
|
|
67
|
+
# Execute Tools
|
|
68
|
+
tool_outputs = await self._process_tool_calls(context, tool_calls)
|
|
69
|
+
|
|
70
|
+
# Update context for next iteration
|
|
71
|
+
iteration_summary = f"\n## Deepsearch Iteration\n"
|
|
72
|
+
if content:
|
|
73
|
+
iteration_summary += f"Thought: {content}\n"
|
|
74
|
+
for output in tool_outputs:
|
|
75
|
+
iteration_summary += f"- {output['name']}: {output['content'][:200]}...\n"
|
|
76
|
+
context.review_context += iteration_summary
|
|
77
|
+
|
|
78
|
+
# Update history
|
|
79
|
+
context.instruct_history.append({
|
|
80
|
+
"role": "assistant",
|
|
81
|
+
"content": f"[Deepsearch]: {content}\n[Actions]: {len(tool_outputs)} tools"
|
|
82
|
+
})
|
|
83
|
+
|
|
84
|
+
return self._build_result(start_time, usage, content, len(tool_calls or []), should_stop)
|
|
85
|
+
|
|
86
|
+
def _build_result(self, start_time, usage, content, tool_calls_count, should_stop=False):
|
|
87
|
+
model_cfg = self.config.get_model_config("instruct")
|
|
88
|
+
model = model_cfg.get("model_name") or self.config.model_name
|
|
89
|
+
|
|
90
|
+
trace = {
|
|
91
|
+
"stage": "Instruct Deepsearch",
|
|
92
|
+
"model": model,
|
|
93
|
+
"usage": usage,
|
|
94
|
+
"output": content,
|
|
95
|
+
"tool_calls": tool_calls_count,
|
|
96
|
+
"time": time.time() - start_time,
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
return StageResult(
|
|
100
|
+
success=True,
|
|
101
|
+
data={"reasoning": content, "should_stop": should_stop},
|
|
102
|
+
usage=usage,
|
|
103
|
+
trace=trace
|
|
104
|
+
)
|