entari-plugin-hyw 4.0.0rc5__py3-none-any.whl → 4.0.0rc7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of entari-plugin-hyw might be problematic. Click here for more details.
- entari_plugin_hyw/__init__.py +71 -9
- entari_plugin_hyw/assets/card-dist/index.html +26 -26
- entari_plugin_hyw/browser/engines/default.py +166 -0
- entari_plugin_hyw/browser/engines/{searxng.py → duckduckgo.py} +4 -4
- entari_plugin_hyw/browser/engines/google.py +155 -0
- entari_plugin_hyw/browser/manager.py +1 -1
- entari_plugin_hyw/browser/service.py +323 -53
- entari_plugin_hyw/card-ui/src/App.vue +32 -1
- entari_plugin_hyw/definitions.py +55 -11
- entari_plugin_hyw/history.py +34 -44
- entari_plugin_hyw/misc.py +34 -0
- entari_plugin_hyw/modular_pipeline.py +177 -50
- entari_plugin_hyw/search.py +67 -25
- entari_plugin_hyw/stage_base.py +7 -0
- entari_plugin_hyw/stage_instruct.py +34 -7
- entari_plugin_hyw/stage_instruct_deepsearch.py +104 -0
- entari_plugin_hyw/stage_summary.py +6 -0
- entari_plugin_hyw/stage_vision.py +113 -0
- {entari_plugin_hyw-4.0.0rc5.dist-info → entari_plugin_hyw-4.0.0rc7.dist-info}/METADATA +1 -1
- {entari_plugin_hyw-4.0.0rc5.dist-info → entari_plugin_hyw-4.0.0rc7.dist-info}/RECORD +22 -19
- entari_plugin_hyw/stage_instruct_review.py +0 -92
- {entari_plugin_hyw-4.0.0rc5.dist-info → entari_plugin_hyw-4.0.0rc7.dist-info}/WHEEL +0 -0
- {entari_plugin_hyw-4.0.0rc5.dist-info → entari_plugin_hyw-4.0.0rc7.dist-info}/top_level.txt +0 -0
|
@@ -8,7 +8,7 @@ Analyze user query and execute initial searches.
|
|
|
8
8
|
import json
|
|
9
9
|
import time
|
|
10
10
|
import asyncio
|
|
11
|
-
from typing import Any, Dict, List, Optional, Tuple
|
|
11
|
+
from typing import Any, Dict, List, Optional, Tuple, Callable, Awaitable
|
|
12
12
|
from loguru import logger
|
|
13
13
|
from openai import AsyncOpenAI
|
|
14
14
|
|
|
@@ -17,6 +17,7 @@ from .definitions import (
|
|
|
17
17
|
get_refuse_answer_tool,
|
|
18
18
|
get_web_search_tool,
|
|
19
19
|
get_crawl_page_tool,
|
|
20
|
+
get_set_mode_tool,
|
|
20
21
|
INSTRUCT_SP
|
|
21
22
|
)
|
|
22
23
|
|
|
@@ -25,13 +26,15 @@ class InstructStage(BaseStage):
|
|
|
25
26
|
def name(self) -> str:
|
|
26
27
|
return "Instruct"
|
|
27
28
|
|
|
28
|
-
def __init__(self, config: Any, search_service: Any, client: AsyncOpenAI):
|
|
29
|
+
def __init__(self, config: Any, search_service: Any, client: AsyncOpenAI, send_func: Optional[Callable[[str], Awaitable[None]]] = None):
|
|
29
30
|
super().__init__(config, search_service, client)
|
|
31
|
+
self.send_func = send_func
|
|
30
32
|
|
|
31
33
|
self.refuse_answer_tool = get_refuse_answer_tool()
|
|
32
34
|
self.web_search_tool = get_web_search_tool()
|
|
33
35
|
self.crawl_page_tool = get_crawl_page_tool()
|
|
34
|
-
|
|
36
|
+
self.set_mode_tool = get_set_mode_tool()
|
|
37
|
+
|
|
35
38
|
async def execute(self, context: StageContext) -> StageResult:
|
|
36
39
|
start_time = time.time()
|
|
37
40
|
|
|
@@ -48,7 +51,7 @@ class InstructStage(BaseStage):
|
|
|
48
51
|
# Execute Round 1 LLM
|
|
49
52
|
r1_response, r1_usage, r1_tool_calls, r1_content = await self._call_llm(
|
|
50
53
|
messages=r1_messages,
|
|
51
|
-
tools=[self.refuse_answer_tool, self.web_search_tool, self.crawl_page_tool],
|
|
54
|
+
tools=[self.refuse_answer_tool, self.web_search_tool, self.crawl_page_tool, self.set_mode_tool],
|
|
52
55
|
tool_choice="auto"
|
|
53
56
|
)
|
|
54
57
|
|
|
@@ -111,6 +114,7 @@ class InstructStage(BaseStage):
|
|
|
111
114
|
model = model_cfg.get("model_name") or self.config.model_name
|
|
112
115
|
|
|
113
116
|
try:
|
|
117
|
+
logger.info(f"Instruct: Sending LLM request to {model}...")
|
|
114
118
|
response = await client.chat.completions.create(
|
|
115
119
|
model=model,
|
|
116
120
|
messages=messages,
|
|
@@ -167,7 +171,7 @@ class InstructStage(BaseStage):
|
|
|
167
171
|
"id": tc_id, "name": name, "content": f"Refused: {reason}"
|
|
168
172
|
})
|
|
169
173
|
|
|
170
|
-
elif name == "
|
|
174
|
+
elif name == "web_search":
|
|
171
175
|
query = args.get("query")
|
|
172
176
|
if query:
|
|
173
177
|
logger.info(f"Instruct: Planned search query -> '{query}'")
|
|
@@ -178,6 +182,26 @@ class InstructStage(BaseStage):
|
|
|
178
182
|
if url:
|
|
179
183
|
logger.info(f"Instruct: Planned page crawl -> {url}")
|
|
180
184
|
pending_crawls.append((url, tc_id))
|
|
185
|
+
|
|
186
|
+
elif name == "set_mode":
|
|
187
|
+
mode = args.get("mode", "fast")
|
|
188
|
+
if mode in ("fast", "deepsearch"):
|
|
189
|
+
context.selected_mode = mode
|
|
190
|
+
logger.info(f"Instruct: Mode set to '{mode}'")
|
|
191
|
+
|
|
192
|
+
# Notify immediately if deepsearch
|
|
193
|
+
if mode == "deepsearch" and self.send_func:
|
|
194
|
+
try:
|
|
195
|
+
await self.send_func("🔍 正在进行深度研究,可能需要一些时间,请耐心等待...")
|
|
196
|
+
except Exception as e:
|
|
197
|
+
logger.warning(f"Instruct: Failed to send notification: {e}")
|
|
198
|
+
|
|
199
|
+
results_for_context.append({
|
|
200
|
+
"id": tc_id, "name": name, "content": f"Mode set to: {mode}"
|
|
201
|
+
})
|
|
202
|
+
else:
|
|
203
|
+
logger.warning(f"Instruct: Invalid mode '{mode}', defaulting to 'fast'")
|
|
204
|
+
context.selected_mode = "fast"
|
|
181
205
|
|
|
182
206
|
# Execute Batches
|
|
183
207
|
|
|
@@ -189,7 +213,8 @@ class InstructStage(BaseStage):
|
|
|
189
213
|
# Start fetch
|
|
190
214
|
fetch_task = asyncio.create_task(self.search_service.fetch_pages_batch(urls))
|
|
191
215
|
|
|
192
|
-
|
|
216
|
+
# Use image capability from context to determine content mode
|
|
217
|
+
is_image_mode = getattr(context, "image_input_supported", True)
|
|
193
218
|
tab_ids = []
|
|
194
219
|
if is_image_mode:
|
|
195
220
|
from .render_vue import get_content_renderer
|
|
@@ -285,6 +310,8 @@ class InstructStage(BaseStage):
|
|
|
285
310
|
visible_results = [r for r in web_results if not r.get("_hidden")]
|
|
286
311
|
|
|
287
312
|
# Update global context
|
|
313
|
+
total_images = sum(len(item.get("images", []) or []) for item in web_results)
|
|
314
|
+
logger.debug(f"Instruct: Search '{query}' returned {len(web_results)} items with {total_images} images total")
|
|
288
315
|
for item in web_results:
|
|
289
316
|
item["_id"] = context.next_id()
|
|
290
317
|
if "type" in item:
|
|
@@ -301,7 +328,7 @@ class InstructStage(BaseStage):
|
|
|
301
328
|
|
|
302
329
|
results_for_context.append({
|
|
303
330
|
"id": tc_id,
|
|
304
|
-
"name": "
|
|
331
|
+
"name": "web_search",
|
|
305
332
|
"content": summary
|
|
306
333
|
})
|
|
307
334
|
|
|
@@ -0,0 +1,104 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Instruct Deepsearch Stage
|
|
3
|
+
|
|
4
|
+
Handles the deepsearch loop: Supplement information until sufficient or max iterations reached.
|
|
5
|
+
Inherits from InstructStage to reuse tool execution logic.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, List
|
|
10
|
+
from loguru import logger
|
|
11
|
+
from openai import AsyncOpenAI
|
|
12
|
+
|
|
13
|
+
from .stage_base import StageContext, StageResult
|
|
14
|
+
from .stage_instruct import InstructStage
|
|
15
|
+
from .definitions import INSTRUCT_DEEPSEARCH_SP
|
|
16
|
+
|
|
17
|
+
class InstructDeepsearchStage(InstructStage):
|
|
18
|
+
@property
|
|
19
|
+
def name(self) -> str:
|
|
20
|
+
return "Instruct Deepsearch"
|
|
21
|
+
|
|
22
|
+
def __init__(self, config: Any, search_service: Any, client: AsyncOpenAI):
|
|
23
|
+
super().__init__(config, search_service, client)
|
|
24
|
+
# Inherits tools from InstructStage (web_search, crawl_page)
|
|
25
|
+
|
|
26
|
+
async def execute(self, context: StageContext) -> StageResult:
|
|
27
|
+
start_time = time.time()
|
|
28
|
+
logger.info("Instruct Deepsearch: Starting supplementary research")
|
|
29
|
+
|
|
30
|
+
# Check if we have context to review
|
|
31
|
+
if not context.review_context:
|
|
32
|
+
logger.warning("Instruct Deepsearch: No context found. Skipping.")
|
|
33
|
+
return StageResult(
|
|
34
|
+
success=True,
|
|
35
|
+
data={"reasoning": "Skipped due to missing context.", "should_stop": True}
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
# Build System Prompt (Clean)
|
|
39
|
+
system_prompt = INSTRUCT_DEEPSEARCH_SP
|
|
40
|
+
|
|
41
|
+
# Build Messages
|
|
42
|
+
# Inject context as a separate user message explaining the background
|
|
43
|
+
context_message = f"## 已收集的信息\n\n```context\n{context.review_context}\n```"
|
|
44
|
+
|
|
45
|
+
messages = [
|
|
46
|
+
{"role": "system", "content": system_prompt},
|
|
47
|
+
{"role": "user", "content": context_message},
|
|
48
|
+
{"role": "user", "content": self._build_user_message(context)}
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
# Call LLM
|
|
52
|
+
# We use only web_search and crawl_page tools (no set_mode, no refuse_answer in this stage)
|
|
53
|
+
tools = [self.web_search_tool, self.crawl_page_tool]
|
|
54
|
+
|
|
55
|
+
response, usage, tool_calls, content = await self._call_llm(
|
|
56
|
+
messages=messages,
|
|
57
|
+
tools=tools,
|
|
58
|
+
tool_choice="auto"
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
# Check for empty response = signal to stop
|
|
62
|
+
should_stop = False
|
|
63
|
+
if not tool_calls or len(tool_calls) == 0:
|
|
64
|
+
logger.info("Instruct Deepsearch: No tool calls, signaling to stop loop.")
|
|
65
|
+
should_stop = True
|
|
66
|
+
else:
|
|
67
|
+
# Execute Tools
|
|
68
|
+
tool_outputs = await self._process_tool_calls(context, tool_calls)
|
|
69
|
+
|
|
70
|
+
# Update context for next iteration
|
|
71
|
+
iteration_summary = f"\n## Deepsearch Iteration\n"
|
|
72
|
+
if content:
|
|
73
|
+
iteration_summary += f"Thought: {content}\n"
|
|
74
|
+
for output in tool_outputs:
|
|
75
|
+
iteration_summary += f"- {output['name']}: {output['content'][:200]}...\n"
|
|
76
|
+
context.review_context += iteration_summary
|
|
77
|
+
|
|
78
|
+
# Update history
|
|
79
|
+
context.instruct_history.append({
|
|
80
|
+
"role": "assistant",
|
|
81
|
+
"content": f"[Deepsearch]: {content}\n[Actions]: {len(tool_outputs)} tools"
|
|
82
|
+
})
|
|
83
|
+
|
|
84
|
+
return self._build_result(start_time, usage, content, len(tool_calls or []), should_stop)
|
|
85
|
+
|
|
86
|
+
def _build_result(self, start_time, usage, content, tool_calls_count, should_stop=False):
|
|
87
|
+
model_cfg = self.config.get_model_config("instruct")
|
|
88
|
+
model = model_cfg.get("model_name") or self.config.model_name
|
|
89
|
+
|
|
90
|
+
trace = {
|
|
91
|
+
"stage": "Instruct Deepsearch",
|
|
92
|
+
"model": model,
|
|
93
|
+
"usage": usage,
|
|
94
|
+
"output": content,
|
|
95
|
+
"tool_calls": tool_calls_count,
|
|
96
|
+
"time": time.time() - start_time,
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
return StageResult(
|
|
100
|
+
success=True,
|
|
101
|
+
data={"reasoning": content, "should_stop": should_stop},
|
|
102
|
+
usage=usage,
|
|
103
|
+
trace=trace
|
|
104
|
+
)
|
|
@@ -47,6 +47,11 @@ class SummaryStage(BaseStage):
|
|
|
47
47
|
# Build Context Message
|
|
48
48
|
context_message = f"## Web Search & Page Content\n\n```context\n{full_context}\n```"
|
|
49
49
|
|
|
50
|
+
# Add vision description if present (from VisionStage)
|
|
51
|
+
if context.vision_description:
|
|
52
|
+
vision_context = f"## 用户图片描述\n\n{context.vision_description}"
|
|
53
|
+
context_message = f"{vision_context}\n\n{context_message}"
|
|
54
|
+
|
|
50
55
|
# Build user content
|
|
51
56
|
user_text = context.user_input or "..."
|
|
52
57
|
if images:
|
|
@@ -104,6 +109,7 @@ class SummaryStage(BaseStage):
|
|
|
104
109
|
"provider": model_cfg.get("model_provider") or "Unknown",
|
|
105
110
|
"usage": usage,
|
|
106
111
|
"system_prompt": system_prompt,
|
|
112
|
+
"context_message": context_message, # Includes vision description + search results
|
|
107
113
|
"output": content,
|
|
108
114
|
"time": time.time() - start_time,
|
|
109
115
|
"images_count": len(images) if images else 0,
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Vision Stage
|
|
3
|
+
|
|
4
|
+
Generates image description using a vision-capable model.
|
|
5
|
+
The description is then passed as context to subsequent stages.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import time
|
|
9
|
+
from typing import Any, Dict, List, Optional
|
|
10
|
+
|
|
11
|
+
from loguru import logger
|
|
12
|
+
from openai import AsyncOpenAI
|
|
13
|
+
|
|
14
|
+
from .stage_base import BaseStage, StageContext, StageResult
|
|
15
|
+
from .definitions import VISION_DESCRIPTION_SP
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class VisionStage(BaseStage):
|
|
19
|
+
"""
|
|
20
|
+
Vision Stage: Generate image description.
|
|
21
|
+
|
|
22
|
+
Takes user images and text, calls a vision model to produce
|
|
23
|
+
a detailed description of the image content.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
@property
|
|
27
|
+
def name(self) -> str:
|
|
28
|
+
return "Vision"
|
|
29
|
+
|
|
30
|
+
async def execute(
|
|
31
|
+
self,
|
|
32
|
+
context: StageContext,
|
|
33
|
+
images: List[str] = None
|
|
34
|
+
) -> StageResult:
|
|
35
|
+
"""Generate image description."""
|
|
36
|
+
start_time = time.time()
|
|
37
|
+
|
|
38
|
+
if not images:
|
|
39
|
+
return StageResult(
|
|
40
|
+
success=True,
|
|
41
|
+
data={"description": ""},
|
|
42
|
+
trace={"skipped": True, "reason": "No images provided"}
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
# Get model config for vision stage
|
|
46
|
+
model_cfg = self.config.get_model_config("vision")
|
|
47
|
+
model = model_cfg.get("model_name")
|
|
48
|
+
|
|
49
|
+
if not model:
|
|
50
|
+
logger.warning("VisionStage: No vision model configured, skipping")
|
|
51
|
+
return StageResult(
|
|
52
|
+
success=True,
|
|
53
|
+
data={"description": ""},
|
|
54
|
+
trace={"skipped": True, "reason": "No vision model configured"}
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
client = self._client_for(
|
|
58
|
+
api_key=model_cfg.get("api_key"),
|
|
59
|
+
base_url=model_cfg.get("base_url")
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
# Build user content with images
|
|
63
|
+
user_text = context.user_input or "请描述这张图片"
|
|
64
|
+
user_content: List[Dict[str, Any]] = [{"type": "text", "text": user_text}]
|
|
65
|
+
|
|
66
|
+
for img_b64 in images:
|
|
67
|
+
url = f"data:image/jpeg;base64,{img_b64}" if not img_b64.startswith("data:") else img_b64
|
|
68
|
+
user_content.append({"type": "image_url", "image_url": {"url": url}})
|
|
69
|
+
|
|
70
|
+
messages = [
|
|
71
|
+
{"role": "system", "content": VISION_DESCRIPTION_SP},
|
|
72
|
+
{"role": "user", "content": user_content}
|
|
73
|
+
]
|
|
74
|
+
|
|
75
|
+
try:
|
|
76
|
+
logger.info(f"VisionStage: Calling model '{model}' with {len(images)} image(s)")
|
|
77
|
+
response = await client.chat.completions.create(
|
|
78
|
+
model=model,
|
|
79
|
+
messages=messages,
|
|
80
|
+
temperature=0.3, # Lower temperature for factual description
|
|
81
|
+
extra_body=model_cfg.get("extra_body"),
|
|
82
|
+
)
|
|
83
|
+
except Exception as e:
|
|
84
|
+
logger.error(f"VisionStage LLM error: {e}")
|
|
85
|
+
return StageResult(
|
|
86
|
+
success=False,
|
|
87
|
+
error=str(e),
|
|
88
|
+
data={"description": ""},
|
|
89
|
+
trace={"error": str(e)}
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
usage = {"input_tokens": 0, "output_tokens": 0}
|
|
93
|
+
if hasattr(response, "usage") and response.usage:
|
|
94
|
+
usage["input_tokens"] = getattr(response.usage, "prompt_tokens", 0) or 0
|
|
95
|
+
usage["output_tokens"] = getattr(response.usage, "completion_tokens", 0) or 0
|
|
96
|
+
|
|
97
|
+
description = (response.choices[0].message.content or "").strip()
|
|
98
|
+
|
|
99
|
+
logger.info(f"VisionStage: Generated description ({len(description)} chars)")
|
|
100
|
+
|
|
101
|
+
return StageResult(
|
|
102
|
+
success=True,
|
|
103
|
+
data={"description": description},
|
|
104
|
+
usage=usage,
|
|
105
|
+
trace={
|
|
106
|
+
"model": model,
|
|
107
|
+
"provider": model_cfg.get("model_provider") or "Unknown",
|
|
108
|
+
"usage": usage,
|
|
109
|
+
"output": description,
|
|
110
|
+
"time": time.time() - start_time,
|
|
111
|
+
"images_count": len(images),
|
|
112
|
+
}
|
|
113
|
+
)
|
|
@@ -1,16 +1,17 @@
|
|
|
1
|
-
entari_plugin_hyw/__init__.py,sha256=
|
|
2
|
-
entari_plugin_hyw/definitions.py,sha256=
|
|
3
|
-
entari_plugin_hyw/history.py,sha256=
|
|
1
|
+
entari_plugin_hyw/__init__.py,sha256=mh0aArXgQxFMNa3XUN2bfYv2IORn1CODNRRwKKz8oi0,22950
|
|
2
|
+
entari_plugin_hyw/definitions.py,sha256=WOTweqy-qZm6Y3jJblETGqYGVDWJs5ztIuDepOuCdMM,7288
|
|
3
|
+
entari_plugin_hyw/history.py,sha256=d3lsQevhtq0PbPR_6n53q0-kqmPwPGvQp8yo3dmyAbw,11343
|
|
4
4
|
entari_plugin_hyw/image_cache.py,sha256=t8pr1kgH2ngK9IhrBAhzUqhBWERNztUywMzgCFZEtQk,9899
|
|
5
|
-
entari_plugin_hyw/misc.py,sha256=
|
|
6
|
-
entari_plugin_hyw/modular_pipeline.py,sha256=
|
|
5
|
+
entari_plugin_hyw/misc.py,sha256=ZGDXeXbSugG4tRrBfUDVd94i2SCaPOmaGtVmPz8mqtY,5413
|
|
6
|
+
entari_plugin_hyw/modular_pipeline.py,sha256=M8wmpfDuGUpPEf-Fvp9YcefzEaK5MiT8czXacs9QJEg,21747
|
|
7
7
|
entari_plugin_hyw/render_vue.py,sha256=4i5xTZCb9amPgSWo6f7Ev279ZOk-D8Kfmxe2HdcA_vI,14737
|
|
8
|
-
entari_plugin_hyw/search.py,sha256=
|
|
9
|
-
entari_plugin_hyw/stage_base.py,sha256
|
|
10
|
-
entari_plugin_hyw/stage_instruct.py,sha256=
|
|
11
|
-
entari_plugin_hyw/
|
|
12
|
-
entari_plugin_hyw/stage_summary.py,sha256=
|
|
13
|
-
entari_plugin_hyw/
|
|
8
|
+
entari_plugin_hyw/search.py,sha256=rcEUpE7AVUerbQfMQA2X00KTlf2NN_nz3kBLky2eP2k,7162
|
|
9
|
+
entari_plugin_hyw/stage_base.py,sha256=YvHU2bYRRKjjaJXbKUyHCRKfeZbULqIUyCwfxxHuHUM,2834
|
|
10
|
+
entari_plugin_hyw/stage_instruct.py,sha256=qHJpG6vnQQaNItX4ihGEQtEwKpobciBLjAzSFynngfE,15243
|
|
11
|
+
entari_plugin_hyw/stage_instruct_deepsearch.py,sha256=_I_xZ-M1xovM4I7NL5eyc21Wg9xdFKnJDQVQActTjUI,3969
|
|
12
|
+
entari_plugin_hyw/stage_summary.py,sha256=o5SjgWuWEG__oKLot5sDl6gB8E_6cdOiYQ6FYbp9ncs,6104
|
|
13
|
+
entari_plugin_hyw/stage_vision.py,sha256=fbudjmzcS3lfOjRKqNwzz1AGDrxHJBiaWVXlQfy1_S4,3864
|
|
14
|
+
entari_plugin_hyw/assets/card-dist/index.html,sha256=jiRhPVQQDv3dgEQ-pqPUm0R2wA6wlInI8pLJyTQN1wM,2205760
|
|
14
15
|
entari_plugin_hyw/assets/card-dist/vite.svg,sha256=SnSK_UQ5GLsWWRyDTEAdrjPoeGGrXbrQgRw6O0qSFPs,1497
|
|
15
16
|
entari_plugin_hyw/assets/card-dist/logos/anthropic.svg,sha256=ASsy1ypo3osNc3n-B0R81tk_dIFsVgg7qQORrd5T2kA,558
|
|
16
17
|
entari_plugin_hyw/assets/card-dist/logos/cerebras.svg,sha256=bpmiiYTODwc06knTmPj3GQ7NNtosMog5lkggvB_Z-7M,44166
|
|
@@ -50,11 +51,13 @@ entari_plugin_hyw/assets/icon/xiaomi.png,sha256=WHxlDFGU5FCjb-ure3ngdGG18-efYZUU
|
|
|
50
51
|
entari_plugin_hyw/assets/icon/zai.png,sha256=K-gnabdsjMLInppHA1Op7Nyt33iegrx1x-yNlvCZ0Tc,2351
|
|
51
52
|
entari_plugin_hyw/browser/__init__.py,sha256=Cht-i5MowfAdmfW3kiY4sV7oXKDb-DmhZ-_eKwDl6r0,321
|
|
52
53
|
entari_plugin_hyw/browser/landing.html,sha256=wgqldumdylz69T83pvOkrigT1Mdb9GY0_KU0ceLGwdY,4642
|
|
53
|
-
entari_plugin_hyw/browser/manager.py,sha256=
|
|
54
|
-
entari_plugin_hyw/browser/service.py,sha256=
|
|
54
|
+
entari_plugin_hyw/browser/manager.py,sha256=aT1DQTDC5WMYUEt5678Xbb5XkFEFYwgFb3XrGDbB0Rc,5233
|
|
55
|
+
entari_plugin_hyw/browser/service.py,sha256=cqvS-O7taTvOdvbnwI-6GlHmJpgfGglh6JqfQgYQvek,23040
|
|
55
56
|
entari_plugin_hyw/browser/engines/base.py,sha256=q5y4SM1G6xS7-6TQ-nZz9iTWw3XonjJn01fWzoTxr6c,414
|
|
56
57
|
entari_plugin_hyw/browser/engines/bing.py,sha256=rIWcvjzvm700xji_OBl6COUAtwXg87DcXQ97DlTzleA,3838
|
|
57
|
-
entari_plugin_hyw/browser/engines/
|
|
58
|
+
entari_plugin_hyw/browser/engines/default.py,sha256=BlHCQI4-rN9cEzLLfqvRD4bvhyP2G2KUGlo92J4kFNw,6092
|
|
59
|
+
entari_plugin_hyw/browser/engines/duckduckgo.py,sha256=jlCIXR1mpJ0LjSbuHMYNGMYjHiQ9Lw_x-A8242Zajgo,5154
|
|
60
|
+
entari_plugin_hyw/browser/engines/google.py,sha256=vCtyOxr63F40hDMW70sS1CyoMsqc0HzyESYrK_qcZLg,6091
|
|
58
61
|
entari_plugin_hyw/card-ui/.gitignore,sha256=_nGOe6uxTzy60tl_CIibnOUhXtP-DkOyuM-_s7m4ROg,253
|
|
59
62
|
entari_plugin_hyw/card-ui/README.md,sha256=fN9IawCcxEcJ8LM-RfKiAH835fRqyY_iqrRsgSkxiSk,442
|
|
60
63
|
entari_plugin_hyw/card-ui/index.html,sha256=Hd7vk8v8PtATbfiEWLYoKDpUT0dlyozW_K5gR_cObfo,328
|
|
@@ -83,7 +86,7 @@ entari_plugin_hyw/card-ui/public/logos/qwen.png,sha256=eqLbnIPbjh2_PsODU_mmqjeD8
|
|
|
83
86
|
entari_plugin_hyw/card-ui/public/logos/xai.png,sha256=uSulvvDVqoA4RUOW0ZAkdvBVM2rpyGJRZIbn5dEFspw,362
|
|
84
87
|
entari_plugin_hyw/card-ui/public/logos/xiaomi.png,sha256=WHxlDFGU5FCjb-ure3ngdGG18-efYZUUfqA3_lqCUN0,4084
|
|
85
88
|
entari_plugin_hyw/card-ui/public/logos/zai.png,sha256=K-gnabdsjMLInppHA1Op7Nyt33iegrx1x-yNlvCZ0Tc,2351
|
|
86
|
-
entari_plugin_hyw/card-ui/src/App.vue,sha256=
|
|
89
|
+
entari_plugin_hyw/card-ui/src/App.vue,sha256=yuZtrI3PeY8F7WvsiF7PUFkWKTKa5wRCnyC70cuXgik,30693
|
|
87
90
|
entari_plugin_hyw/card-ui/src/main.ts,sha256=rm653lPnK5fuTIj-iNLpgr8GAmayuCoKop7IWfo0IBk,111
|
|
88
91
|
entari_plugin_hyw/card-ui/src/style.css,sha256=Qm0sv9em6k4VI_j5PI0_E_ng6u20eFpf2lN44H19zzc,671
|
|
89
92
|
entari_plugin_hyw/card-ui/src/test_regex.js,sha256=cWmclm6LRKYfjeN1RT5HECdltmo1HvS2BwGCYY_4l14,3040
|
|
@@ -93,7 +96,7 @@ entari_plugin_hyw/card-ui/src/components/HelloWorld.vue,sha256=yvBIzJua9BfikUOR1
|
|
|
93
96
|
entari_plugin_hyw/card-ui/src/components/MarkdownContent.vue,sha256=SV95Vuj99tQN2yrU9GqiyhiemWAW8omhYnS8AsH1YIU,13325
|
|
94
97
|
entari_plugin_hyw/card-ui/src/components/SectionCard.vue,sha256=owcDNx2JYVmF2J5SYCroR2gvg_cPApQsNunjK1WJpVI,1433
|
|
95
98
|
entari_plugin_hyw/card-ui/src/components/StageCard.vue,sha256=MgpOaBlPR--LJoRenN37i72BV8qVgzDdurpoKCvzKyk,11133
|
|
96
|
-
entari_plugin_hyw-4.0.
|
|
97
|
-
entari_plugin_hyw-4.0.
|
|
98
|
-
entari_plugin_hyw-4.0.
|
|
99
|
-
entari_plugin_hyw-4.0.
|
|
99
|
+
entari_plugin_hyw-4.0.0rc7.dist-info/METADATA,sha256=4wYIdgmoSZRZ4VFfd14jVS5_3kHjvHik_4zPUnJPZrs,3766
|
|
100
|
+
entari_plugin_hyw-4.0.0rc7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
101
|
+
entari_plugin_hyw-4.0.0rc7.dist-info/top_level.txt,sha256=TIDsn6XPs6KA5e3ezsE65JoXsy03ejDdrB41I4SPjmo,18
|
|
102
|
+
entari_plugin_hyw-4.0.0rc7.dist-info/RECORD,,
|
|
@@ -1,92 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Instruct Review Stage
|
|
3
|
-
|
|
4
|
-
Handles the second round of instruction: Review and Refine.
|
|
5
|
-
Inherits from InstructStage to reuse tool execution logic.
|
|
6
|
-
"""
|
|
7
|
-
|
|
8
|
-
import time
|
|
9
|
-
from typing import Any, List
|
|
10
|
-
from loguru import logger
|
|
11
|
-
from openai import AsyncOpenAI
|
|
12
|
-
|
|
13
|
-
from .stage_base import StageContext, StageResult
|
|
14
|
-
from .stage_instruct import InstructStage
|
|
15
|
-
from .definitions import INSTRUCT_REVIEW_SP
|
|
16
|
-
|
|
17
|
-
class InstructReviewStage(InstructStage):
|
|
18
|
-
@property
|
|
19
|
-
def name(self) -> str:
|
|
20
|
-
return "Instruct Review"
|
|
21
|
-
|
|
22
|
-
def __init__(self, config: Any, search_service: Any, client: AsyncOpenAI):
|
|
23
|
-
super().__init__(config, search_service, client)
|
|
24
|
-
# Inherits tools from InstructStage
|
|
25
|
-
|
|
26
|
-
async def execute(self, context: StageContext) -> StageResult:
|
|
27
|
-
start_time = time.time()
|
|
28
|
-
logger.info("Instruct Review: Starting Round 2 (Review & Refine)")
|
|
29
|
-
|
|
30
|
-
# Check if we have context to review
|
|
31
|
-
if not context.review_context:
|
|
32
|
-
logger.warning("Instruct Review: No context found from Round 1. Skipping.")
|
|
33
|
-
return StageResult(success=True, data={"reasoning": "Skipped due to missing context."})
|
|
34
|
-
|
|
35
|
-
# Build System Prompt (Clean)
|
|
36
|
-
system_prompt = INSTRUCT_REVIEW_SP
|
|
37
|
-
|
|
38
|
-
# Build Messages
|
|
39
|
-
# Inject context as a separate user message explaining the background
|
|
40
|
-
context_message = f"## Previous Round Context\n\n```context\n{context.review_context}\n```"
|
|
41
|
-
|
|
42
|
-
messages = [
|
|
43
|
-
{"role": "system", "content": system_prompt},
|
|
44
|
-
{"role": "user", "content": context_message},
|
|
45
|
-
{"role": "user", "content": self._build_user_message(context)}
|
|
46
|
-
]
|
|
47
|
-
|
|
48
|
-
# Call LLM
|
|
49
|
-
# We reuse _call_llm from parent
|
|
50
|
-
# We reuse tools from parent (refuse_answer might be redundant but harmless, or we can filter)
|
|
51
|
-
tools = [self.web_search_tool, self.crawl_page_tool] # Review prompt doesn't mention refuse_answer explicitly, but usually fine.
|
|
52
|
-
|
|
53
|
-
response, usage, tool_calls, content = await self._call_llm(
|
|
54
|
-
messages=messages,
|
|
55
|
-
tools=tools,
|
|
56
|
-
tool_choice="auto"
|
|
57
|
-
)
|
|
58
|
-
|
|
59
|
-
# Execute Tools
|
|
60
|
-
tool_outputs = []
|
|
61
|
-
if tool_calls:
|
|
62
|
-
tool_outputs = await self._process_tool_calls(context, tool_calls)
|
|
63
|
-
|
|
64
|
-
# Update history logic?
|
|
65
|
-
# The prompt says "上下文". It is "independent".
|
|
66
|
-
# But for the record, we might want to log it.
|
|
67
|
-
context.instruct_history.append({
|
|
68
|
-
"role": "assistant",
|
|
69
|
-
"content": f"[Round 2 Review]: {content}\n[Round 2 Actions]: {len(tool_outputs)} tools"
|
|
70
|
-
})
|
|
71
|
-
|
|
72
|
-
return self._build_result(start_time, usage, content, len(tool_calls or []))
|
|
73
|
-
|
|
74
|
-
def _build_result(self, start_time, usage, content, tool_calls_count):
|
|
75
|
-
model_cfg = self.config.get_model_config("instruct")
|
|
76
|
-
model = model_cfg.get("model_name") or self.config.model_name
|
|
77
|
-
|
|
78
|
-
trace = {
|
|
79
|
-
"stage": "Instruct Review",
|
|
80
|
-
"model": model,
|
|
81
|
-
"usage": usage,
|
|
82
|
-
"output": content,
|
|
83
|
-
"tool_calls": tool_calls_count,
|
|
84
|
-
"time": time.time() - start_time,
|
|
85
|
-
}
|
|
86
|
-
|
|
87
|
-
return StageResult(
|
|
88
|
-
success=True,
|
|
89
|
-
data={"reasoning": content},
|
|
90
|
-
usage=usage,
|
|
91
|
-
trace=trace
|
|
92
|
-
)
|
|
File without changes
|
|
File without changes
|