deepcraft-agent 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- deepcraft/__init__.py +3 -0
- deepcraft/__main__.py +6 -0
- deepcraft/agent/__init__.py +23 -0
- deepcraft/agent/context.py +114 -0
- deepcraft/agent/corrector.py +120 -0
- deepcraft/agent/llm.py +65 -0
- deepcraft/agent/loop.py +299 -0
- deepcraft/agent/mcp/__init__.py +5 -0
- deepcraft/agent/mcp/client.py +384 -0
- deepcraft/agent/modes.py +58 -0
- deepcraft/agent/planner.py +185 -0
- deepcraft/agent/repl.py +213 -0
- deepcraft/agent/skills/__init__.py +3 -0
- deepcraft/agent/skills/builtin/api_design.md +110 -0
- deepcraft/agent/skills/builtin/code_review.md +63 -0
- deepcraft/agent/skills/builtin/git_workflow.md +95 -0
- deepcraft/agent/skills/builtin/python_style.md +71 -0
- deepcraft/agent/skills/builtin/testing.md +136 -0
- deepcraft/agent/skills/loader.py +164 -0
- deepcraft/agent/subagent.py +203 -0
- deepcraft/agent/tools/__init__.py +17 -0
- deepcraft/agent/tools/base.py +84 -0
- deepcraft/agent/tools/file.py +183 -0
- deepcraft/agent/tools/git_tool.py +100 -0
- deepcraft/agent/tools/terminal.py +84 -0
- deepcraft/agent/types.py +123 -0
- deepcraft/cli.py +41 -0
- deepcraft/config.py +57 -0
- deepcraft_agent-0.1.0.dist-info/METADATA +204 -0
- deepcraft_agent-0.1.0.dist-info/RECORD +32 -0
- deepcraft_agent-0.1.0.dist-info/WHEEL +4 -0
- deepcraft_agent-0.1.0.dist-info/entry_points.txt +2 -0
deepcraft/__init__.py
ADDED
deepcraft/__main__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from deepcraft.agent.context import ContextManager
|
|
2
|
+
from deepcraft.agent.corrector import SelfCorrector
|
|
3
|
+
from deepcraft.agent.llm import DeepSeekLLM
|
|
4
|
+
from deepcraft.agent.loop import Agent
|
|
5
|
+
from deepcraft.agent.mcp.client import MCPManager
|
|
6
|
+
from deepcraft.agent.modes import AgentMode, ModeSelector
|
|
7
|
+
from deepcraft.agent.planner import Planner
|
|
8
|
+
from deepcraft.agent.subagent import Delegator, SubAgent
|
|
9
|
+
from deepcraft.agent.tools.base import ToolRegistry
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"Agent",
|
|
13
|
+
"AgentMode",
|
|
14
|
+
"ContextManager",
|
|
15
|
+
"DeepSeekLLM",
|
|
16
|
+
"Delegator",
|
|
17
|
+
"MCPManager",
|
|
18
|
+
"ModeSelector",
|
|
19
|
+
"Planner",
|
|
20
|
+
"SelfCorrector",
|
|
21
|
+
"SubAgent",
|
|
22
|
+
"ToolRegistry",
|
|
23
|
+
]
|
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
"""Context and memory management — conversation history, compression, system prompt."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from deepcraft.agent.types import ChatMessage, Role
|
|
6
|
+
|
|
7
|
+
# Approximate tokens per character (rough estimate for mixed Chinese/English)
|
|
8
|
+
TOKENS_PER_CHAR = 0.4
|
|
9
|
+
MAX_CONTEXT_TOKENS = 100_000 # Leave headroom below 128K
|
|
10
|
+
COMPRESS_THRESHOLD = 0.8 # Compress at 80% capacity
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
SYSTEM_PROMPT = """You are DeepCraft, an AI coding agent powered by DeepSeek.
|
|
14
|
+
|
|
15
|
+
Your capabilities:
|
|
16
|
+
- Read, write, and search files in the project workspace
|
|
17
|
+
- Execute shell commands via terminal
|
|
18
|
+
- Manage git repositories
|
|
19
|
+
- Analyze code, find bugs, and suggest fixes
|
|
20
|
+
|
|
21
|
+
Guidelines:
|
|
22
|
+
- Be concise and direct. No fluff.
|
|
23
|
+
- When you need to see file contents, use read_file.
|
|
24
|
+
- When you need to create/modify files, use write_file.
|
|
25
|
+
- When you need to run commands, use terminal.
|
|
26
|
+
- Think step by step: understand the problem first, then act.
|
|
27
|
+
- If a tool fails, analyze the error and try a different approach."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class ContextManager:
|
|
31
|
+
"""Manages conversation context: messages, token counting, compression."""
|
|
32
|
+
|
|
33
|
+
def __init__(self, system_prompt: str | None = None) -> None:
|
|
34
|
+
self._messages: list[ChatMessage] = []
|
|
35
|
+
self._system_prompt = system_prompt or SYSTEM_PROMPT
|
|
36
|
+
# Add system message
|
|
37
|
+
self._messages.append(ChatMessage(role="system", content=self._system_prompt))
|
|
38
|
+
|
|
39
|
+
@property
|
|
40
|
+
def token_count(self) -> int:
|
|
41
|
+
"""Estimate token count of all messages."""
|
|
42
|
+
total = 0
|
|
43
|
+
for msg in self._messages:
|
|
44
|
+
if msg.content:
|
|
45
|
+
total += int(len(msg.content) * TOKENS_PER_CHAR)
|
|
46
|
+
if msg.tool_calls:
|
|
47
|
+
total += int(len(str(msg.tool_calls)) * TOKENS_PER_CHAR)
|
|
48
|
+
return total
|
|
49
|
+
|
|
50
|
+
def add_message(
|
|
51
|
+
self,
|
|
52
|
+
role: Role | str,
|
|
53
|
+
content: str | None = None,
|
|
54
|
+
tool_call_id: str | None = None,
|
|
55
|
+
name: str | None = None,
|
|
56
|
+
) -> None:
|
|
57
|
+
"""Add a message to the conversation."""
|
|
58
|
+
msg = ChatMessage(
|
|
59
|
+
role=role.value if isinstance(role, Role) else role,
|
|
60
|
+
content=content,
|
|
61
|
+
tool_call_id=tool_call_id,
|
|
62
|
+
name=name,
|
|
63
|
+
)
|
|
64
|
+
self._messages.append(msg)
|
|
65
|
+
|
|
66
|
+
# Check if compression needed
|
|
67
|
+
if self.token_count > int(MAX_CONTEXT_TOKENS * COMPRESS_THRESHOLD):
|
|
68
|
+
self._compress()
|
|
69
|
+
|
|
70
|
+
def get_messages(self) -> list[ChatMessage]:
|
|
71
|
+
"""Get all messages for LLM context."""
|
|
72
|
+
return list(self._messages)
|
|
73
|
+
|
|
74
|
+
def _compress(self) -> None:
|
|
75
|
+
"""Compress older messages to save tokens.
|
|
76
|
+
|
|
77
|
+
Strategy: keep the system prompt + last 20 messages.
|
|
78
|
+
Summarize everything else into a single message.
|
|
79
|
+
"""
|
|
80
|
+
if len(self._messages) <= 22:
|
|
81
|
+
return # Not enough to compress
|
|
82
|
+
|
|
83
|
+
# System prompt is at index 0, keep it
|
|
84
|
+
# Keep last 20 messages (index -20 onward)
|
|
85
|
+
old_messages = self._messages[1:-20]
|
|
86
|
+
|
|
87
|
+
# Build summary
|
|
88
|
+
summary_parts = []
|
|
89
|
+
for msg in old_messages:
|
|
90
|
+
role = msg.role
|
|
91
|
+
content = msg.content or ""
|
|
92
|
+
if role == "user":
|
|
93
|
+
summary_parts.append(f"User asked: {content[:200]}")
|
|
94
|
+
elif role == "assistant" and content:
|
|
95
|
+
summary_parts.append(f"Assistant responded: {content[:200]}")
|
|
96
|
+
elif role == "tool":
|
|
97
|
+
summary_parts.append(f"Tool result ({msg.tool_call_id}): {content[:100]}")
|
|
98
|
+
|
|
99
|
+
summary = "[Conversation history compressed]\n" + "\n".join(summary_parts[-20:])
|
|
100
|
+
|
|
101
|
+
# Replace old messages with summary
|
|
102
|
+
self._messages = [
|
|
103
|
+
self._messages[0], # system prompt
|
|
104
|
+
ChatMessage(role="user", content=summary),
|
|
105
|
+
] + self._messages[-20:]
|
|
106
|
+
|
|
107
|
+
def reset(self) -> None:
|
|
108
|
+
"""Reset conversation, keeping only system prompt."""
|
|
109
|
+
self._messages = [
|
|
110
|
+
ChatMessage(role="system", content=self._system_prompt),
|
|
111
|
+
]
|
|
112
|
+
|
|
113
|
+
def __len__(self) -> int:
|
|
114
|
+
return len(self._messages)
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
"""Self-correction — analyze errors and retry with adjusted approach."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from deepcraft.agent.llm import DeepSeekLLM
|
|
9
|
+
from deepcraft.agent.types import ChatMessage
|
|
10
|
+
|
|
11
|
+
CORRECTION_PROMPT = """你是一个调试专家。一个工具执行失败了,请分析错误原因并给出修正后的参数。
|
|
12
|
+
|
|
13
|
+
原始调用:
|
|
14
|
+
工具: {tool_name}
|
|
15
|
+
参数: {tool_args}
|
|
16
|
+
错误: {error}
|
|
17
|
+
|
|
18
|
+
请输出 JSON:
|
|
19
|
+
```json
|
|
20
|
+
{{
|
|
21
|
+
"analysis": "错误原因分析",
|
|
22
|
+
"retry": true或false,
|
|
23
|
+
"corrected_args": {{"参数名": "修正后的值"}} 或 {{}},
|
|
24
|
+
"fix_description": "修正说明"
|
|
25
|
+
}}
|
|
26
|
+
```
|
|
27
|
+
如果错误无法通过修改参数解决(如网络问题、权限问题),retry 设为 false。"""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class CorrectionResult:
|
|
32
|
+
"""Result of error analysis and correction."""
|
|
33
|
+
|
|
34
|
+
analysis: str
|
|
35
|
+
retry: bool
|
|
36
|
+
corrected_args: dict[str, Any] | None = None
|
|
37
|
+
fix_description: str = ""
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class SelfCorrector:
|
|
41
|
+
"""Analyze tool failures and suggest corrections."""
|
|
42
|
+
|
|
43
|
+
MAX_RETRIES = 2 # Max retries per step
|
|
44
|
+
|
|
45
|
+
def __init__(self, llm: DeepSeekLLM | None = None) -> None:
|
|
46
|
+
self.llm = llm or DeepSeekLLM()
|
|
47
|
+
self._retry_counts: dict[str, int] = {}
|
|
48
|
+
|
|
49
|
+
async def analyze_error(
|
|
50
|
+
self,
|
|
51
|
+
tool_name: str,
|
|
52
|
+
tool_args: dict[str, Any],
|
|
53
|
+
error: str,
|
|
54
|
+
) -> CorrectionResult:
|
|
55
|
+
"""Analyze a tool failure and suggest a correction.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
tool_name: The tool that failed.
|
|
59
|
+
tool_args: The arguments that were used.
|
|
60
|
+
error: The error message.
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
CorrectionResult with analysis and optionally corrected args.
|
|
64
|
+
"""
|
|
65
|
+
# Check retry limit
|
|
66
|
+
key = f"{tool_name}:{str(sorted(tool_args.items()))}"
|
|
67
|
+
count = self._retry_counts.get(key, 0)
|
|
68
|
+
if count >= self.MAX_RETRIES:
|
|
69
|
+
return CorrectionResult(
|
|
70
|
+
analysis=f"已重试 {count} 次,放弃",
|
|
71
|
+
retry=False,
|
|
72
|
+
)
|
|
73
|
+
self._retry_counts[key] = count + 1
|
|
74
|
+
|
|
75
|
+
# Ask LLM to analyze
|
|
76
|
+
prompt = CORRECTION_PROMPT.format(
|
|
77
|
+
tool_name=tool_name,
|
|
78
|
+
tool_args=str(tool_args),
|
|
79
|
+
error=error[:500],
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
messages = [
|
|
83
|
+
ChatMessage(role="system", content=prompt),
|
|
84
|
+
ChatMessage(role="user", content="请分析并建议修正"),
|
|
85
|
+
]
|
|
86
|
+
|
|
87
|
+
response = ""
|
|
88
|
+
async for chunk in self.llm.chat(messages):
|
|
89
|
+
if chunk.delta and chunk.delta.content:
|
|
90
|
+
response += chunk.delta.content
|
|
91
|
+
|
|
92
|
+
# Parse response
|
|
93
|
+
import json
|
|
94
|
+
import re
|
|
95
|
+
|
|
96
|
+
match = re.search(r"```(?:json)?\s*\n(.*?)\n```", response, re.DOTALL)
|
|
97
|
+
if match:
|
|
98
|
+
try:
|
|
99
|
+
data = json.loads(match.group(1))
|
|
100
|
+
return CorrectionResult(
|
|
101
|
+
analysis=data.get("analysis", ""),
|
|
102
|
+
retry=data.get("retry", False),
|
|
103
|
+
corrected_args=data.get("corrected_args"),
|
|
104
|
+
fix_description=data.get("fix_description", ""),
|
|
105
|
+
)
|
|
106
|
+
except json.JSONDecodeError:
|
|
107
|
+
pass
|
|
108
|
+
|
|
109
|
+
# Fallback: don't retry
|
|
110
|
+
return CorrectionResult(
|
|
111
|
+
analysis=response[:200] if response else "无法分析错误",
|
|
112
|
+
retry=False,
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
def reset_retries(self) -> None:
|
|
116
|
+
"""Reset retry counters for a new task."""
|
|
117
|
+
self._retry_counts.clear()
|
|
118
|
+
|
|
119
|
+
async def close(self) -> None:
|
|
120
|
+
await self.llm.close()
|
deepcraft/agent/llm.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
"""DeepSeek API client with streaming and tool-call support."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from collections.abc import AsyncIterator
|
|
6
|
+
|
|
7
|
+
import httpx
|
|
8
|
+
|
|
9
|
+
from deepcraft.agent.types import (
|
|
10
|
+
ChatMessage,
|
|
11
|
+
LLMResponse,
|
|
12
|
+
ToolDefinition,
|
|
13
|
+
)
|
|
14
|
+
from deepcraft.config import (
|
|
15
|
+
DEEPSEEK_API_KEY,
|
|
16
|
+
DEEPSEEK_BASE_URL,
|
|
17
|
+
DEEPSEEK_MAX_TOKENS,
|
|
18
|
+
DEEPSEEK_MODEL,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class DeepSeekLLM:
|
|
23
|
+
"""Async DeepSeek chat API client."""
|
|
24
|
+
|
|
25
|
+
def __init__(self, model: str | None = None) -> None:
|
|
26
|
+
self.model = model or DEEPSEEK_MODEL
|
|
27
|
+
self._client = httpx.AsyncClient(
|
|
28
|
+
base_url=DEEPSEEK_BASE_URL,
|
|
29
|
+
headers={
|
|
30
|
+
"Authorization": f"Bearer {DEEPSEEK_API_KEY}",
|
|
31
|
+
"Content-Type": "application/json",
|
|
32
|
+
},
|
|
33
|
+
timeout=httpx.Timeout(120.0),
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
async def chat(
|
|
37
|
+
self,
|
|
38
|
+
messages: list[ChatMessage],
|
|
39
|
+
tools: list[ToolDefinition] | None = None,
|
|
40
|
+
stream: bool = True,
|
|
41
|
+
) -> AsyncIterator[LLMResponse]:
|
|
42
|
+
"""Send a chat completion request, yielding responses as they stream."""
|
|
43
|
+
body: dict[str, object] = {
|
|
44
|
+
"model": self.model,
|
|
45
|
+
"messages": [m.model_dump(exclude_none=True) for m in messages],
|
|
46
|
+
"max_tokens": DEEPSEEK_MAX_TOKENS,
|
|
47
|
+
"stream": stream,
|
|
48
|
+
}
|
|
49
|
+
if tools:
|
|
50
|
+
body["tools"] = [t.model_dump(exclude_none=True) for t in tools]
|
|
51
|
+
|
|
52
|
+
async with self._client.stream("POST", "/v1/chat/completions", json=body) as resp:
|
|
53
|
+
resp.raise_for_status()
|
|
54
|
+
async for line in resp.aiter_lines():
|
|
55
|
+
if not line.startswith("data: "):
|
|
56
|
+
continue
|
|
57
|
+
data = line[6:].strip()
|
|
58
|
+
if data == "[DONE]":
|
|
59
|
+
break
|
|
60
|
+
result = LLMResponse.from_chunk(data)
|
|
61
|
+
if result is not None:
|
|
62
|
+
yield result
|
|
63
|
+
|
|
64
|
+
async def close(self) -> None:
|
|
65
|
+
await self._client.aclose()
|
deepcraft/agent/loop.py
ADDED
|
@@ -0,0 +1,299 @@
|
|
|
1
|
+
"""Agent main loop — ReAct pattern with safety guards."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
|
|
10
|
+
from deepcraft.agent.context import ContextManager
|
|
11
|
+
from deepcraft.agent.corrector import SelfCorrector
|
|
12
|
+
from deepcraft.agent.llm import DeepSeekLLM
|
|
13
|
+
from deepcraft.agent.modes import AgentMode, ModeSelector
|
|
14
|
+
from deepcraft.agent.planner import Planner
|
|
15
|
+
from deepcraft.agent.subagent import Delegator, SubTask
|
|
16
|
+
from deepcraft.agent.tools.base import ToolRegistry
|
|
17
|
+
from deepcraft.agent.types import (
|
|
18
|
+
ChatMessage,
|
|
19
|
+
Role,
|
|
20
|
+
)
|
|
21
|
+
from deepcraft.agent.types import (
|
|
22
|
+
ToolResult as TypedToolResult,
|
|
23
|
+
)
|
|
24
|
+
from deepcraft.config import MAX_TURNS
|
|
25
|
+
|
|
26
|
+
console = Console()
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Agent:
|
|
30
|
+
"""ReAct agent with DeepSeek backend."""
|
|
31
|
+
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
llm: DeepSeekLLM | None = None,
|
|
35
|
+
tools: ToolRegistry | None = None,
|
|
36
|
+
context: ContextManager | None = None,
|
|
37
|
+
) -> None:
|
|
38
|
+
self.llm = llm or DeepSeekLLM()
|
|
39
|
+
self.tools = tools or ToolRegistry()
|
|
40
|
+
self.ctx = context or ContextManager()
|
|
41
|
+
self._turn_count = 0
|
|
42
|
+
self._recent_actions: list[str] = []
|
|
43
|
+
|
|
44
|
+
async def run(self, user_input: str) -> str:
|
|
45
|
+
"""Run the agent loop for a user input. Returns final response."""
|
|
46
|
+
self._turn_count = 0
|
|
47
|
+
self._recent_actions = []
|
|
48
|
+
self.ctx.add_message(Role.USER, user_input)
|
|
49
|
+
|
|
50
|
+
while self._turn_count < MAX_TURNS:
|
|
51
|
+
self._turn_count += 1
|
|
52
|
+
|
|
53
|
+
# --- Get LLM response (streaming) ---
|
|
54
|
+
tool_calls_buffer: dict[int, dict[str, Any]] = {}
|
|
55
|
+
content_parts: list[str] = []
|
|
56
|
+
|
|
57
|
+
tool_defs = self.tools.list_definitions()
|
|
58
|
+
messages = self.ctx.get_messages()
|
|
59
|
+
|
|
60
|
+
async for chunk in self.llm.chat(messages, tools=tool_defs or None):
|
|
61
|
+
if chunk.delta is None:
|
|
62
|
+
continue
|
|
63
|
+
|
|
64
|
+
# Collect streaming content
|
|
65
|
+
if chunk.delta.content:
|
|
66
|
+
content_parts.append(chunk.delta.content)
|
|
67
|
+
console.print(chunk.delta.content, end="", highlight=False)
|
|
68
|
+
|
|
69
|
+
# Collect tool calls — use index from stream chunk, not buffer length
|
|
70
|
+
if chunk.delta.tool_calls:
|
|
71
|
+
for tc in chunk.delta.tool_calls:
|
|
72
|
+
# DeepSeek streaming sends "index" in every tool_call chunk
|
|
73
|
+
tc_index = tc.get("index")
|
|
74
|
+
if tc_index is None:
|
|
75
|
+
tc_index = len(tool_calls_buffer)
|
|
76
|
+
if tc.get("id"):
|
|
77
|
+
tool_calls_buffer[tc_index] = dict(tc)
|
|
78
|
+
elif tc_index in tool_calls_buffer:
|
|
79
|
+
# Append streaming arguments fragment
|
|
80
|
+
tc_args = tc.get("function", {}).get("arguments", "")
|
|
81
|
+
tool_calls_buffer[tc_index]["function"]["arguments"] += tc_args
|
|
82
|
+
|
|
83
|
+
if chunk.finish_reason:
|
|
84
|
+
break
|
|
85
|
+
|
|
86
|
+
console.print() # newline after streaming
|
|
87
|
+
|
|
88
|
+
response_text = "".join(content_parts)
|
|
89
|
+
|
|
90
|
+
# --- No tool calls: return final response ---
|
|
91
|
+
if not tool_calls_buffer:
|
|
92
|
+
self.ctx.add_message(Role.ASSISTANT, response_text)
|
|
93
|
+
return response_text
|
|
94
|
+
|
|
95
|
+
# --- Execute tool calls ---
|
|
96
|
+
tool_results: list[TypedToolResult] = []
|
|
97
|
+
|
|
98
|
+
for tc in tool_calls_buffer.values():
|
|
99
|
+
tool_name = tc["function"]["name"]
|
|
100
|
+
tool_id = tc["id"]
|
|
101
|
+
|
|
102
|
+
# Parse arguments
|
|
103
|
+
args_str = tc["function"]["arguments"]
|
|
104
|
+
try:
|
|
105
|
+
arguments = json.loads(args_str) if args_str else {}
|
|
106
|
+
except json.JSONDecodeError:
|
|
107
|
+
arguments = {}
|
|
108
|
+
|
|
109
|
+
# Safety: loop detection
|
|
110
|
+
action_key = f"{tool_name}:{json.dumps(arguments, sort_keys=True)}"
|
|
111
|
+
self._recent_actions.append(action_key)
|
|
112
|
+
if self._recent_actions[-3:].count(action_key) == 3:
|
|
113
|
+
# 3 consecutive identical calls → force stop
|
|
114
|
+
summary = "⚠️ Detected repetitive tool calls. Stopping to avoid loop."
|
|
115
|
+
self.ctx.add_message(Role.ASSISTANT, summary)
|
|
116
|
+
return summary
|
|
117
|
+
# Keep only last 10
|
|
118
|
+
self._recent_actions = self._recent_actions[-10:]
|
|
119
|
+
|
|
120
|
+
# Execute
|
|
121
|
+
args_str = json.dumps(arguments, ensure_ascii=False)
|
|
122
|
+
console.print(f" 🔧 {tool_name}({args_str})", style="dim")
|
|
123
|
+
result = await self.tools.execute(tool_name, arguments)
|
|
124
|
+
tool_results.append(TypedToolResult(
|
|
125
|
+
tool_call_id=tool_id,
|
|
126
|
+
content=result.content,
|
|
127
|
+
is_error=result.is_error,
|
|
128
|
+
))
|
|
129
|
+
|
|
130
|
+
# --- Add assistant message with tool calls ---
|
|
131
|
+
assistant_msg = ChatMessage(
|
|
132
|
+
role="assistant",
|
|
133
|
+
content=response_text or None,
|
|
134
|
+
tool_calls=[
|
|
135
|
+
{
|
|
136
|
+
"id": tc["id"],
|
|
137
|
+
"type": "function",
|
|
138
|
+
"function": {
|
|
139
|
+
"name": tc["function"]["name"],
|
|
140
|
+
"arguments": tc["function"]["arguments"],
|
|
141
|
+
},
|
|
142
|
+
}
|
|
143
|
+
for tc in tool_calls_buffer.values()
|
|
144
|
+
],
|
|
145
|
+
)
|
|
146
|
+
self.ctx._messages.append(assistant_msg)
|
|
147
|
+
|
|
148
|
+
# --- Add tool results ---
|
|
149
|
+
for tr in tool_results:
|
|
150
|
+
self.ctx.add_message(
|
|
151
|
+
Role.TOOL,
|
|
152
|
+
tr.content,
|
|
153
|
+
tool_call_id=tr.tool_call_id,
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
# Continue loop — LLM will process tool results
|
|
157
|
+
|
|
158
|
+
# Max turns reached
|
|
159
|
+
summary = f"\n⚠️ Reached max turns ({MAX_TURNS}). Summarizing based on current state."
|
|
160
|
+
return summary
|
|
161
|
+
|
|
162
|
+
async def close(self) -> None:
|
|
163
|
+
await self.llm.close()
|
|
164
|
+
|
|
165
|
+
# ------------------------------------------------------------------
|
|
166
|
+
# Advanced modes
|
|
167
|
+
# ------------------------------------------------------------------
|
|
168
|
+
|
|
169
|
+
async def run_with_mode(
|
|
170
|
+
self, user_input: str, mode: AgentMode = AgentMode.AUTO
|
|
171
|
+
) -> str:
|
|
172
|
+
"""Run the agent loop, auto-selecting or using a specific mode.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
user_input: The user's message.
|
|
176
|
+
mode: AgentMode to use (AUTO = auto-detect).
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Final response text.
|
|
180
|
+
"""
|
|
181
|
+
selected = ModeSelector.select(user_input, prefer=mode)
|
|
182
|
+
|
|
183
|
+
console.print(f" 🧠 模式: {selected.value}", style="cyan")
|
|
184
|
+
|
|
185
|
+
if selected == AgentMode.PLAN_EXECUTE:
|
|
186
|
+
return await self._run_plan_execute(user_input)
|
|
187
|
+
elif selected == AgentMode.SUBAGENT:
|
|
188
|
+
return await self._run_subagent(user_input)
|
|
189
|
+
else:
|
|
190
|
+
return await self.run(user_input)
|
|
191
|
+
|
|
192
|
+
async def _run_plan_execute(self, user_input: str) -> str:
|
|
193
|
+
"""Plan-and-Execute: generate a plan, then execute step by step."""
|
|
194
|
+
self._turn_count = 0
|
|
195
|
+
self.ctx.add_message(Role.USER, user_input)
|
|
196
|
+
|
|
197
|
+
# Step 1: Generate plan
|
|
198
|
+
planner = Planner(self.llm)
|
|
199
|
+
plan = await planner.generate_plan(user_input, self.tools)
|
|
200
|
+
console.print(f" 📋 计划: {plan.goal}")
|
|
201
|
+
for i, s in enumerate(plan.steps, 1):
|
|
202
|
+
console.print(f" {i}. {s.description}", style="dim")
|
|
203
|
+
|
|
204
|
+
# Step 2: Execute each step
|
|
205
|
+
corrector = SelfCorrector(llm=self.llm)
|
|
206
|
+
try:
|
|
207
|
+
while not plan.completed and self._turn_count < MAX_TURNS:
|
|
208
|
+
self._turn_count += 1
|
|
209
|
+
next_s = plan.next_step()
|
|
210
|
+
if next_s is None:
|
|
211
|
+
break
|
|
212
|
+
|
|
213
|
+
from deepcraft.agent.planner import StepStatus
|
|
214
|
+
next_s.status = StepStatus.RUNNING
|
|
215
|
+
|
|
216
|
+
# Pure reasoning step (no tool)
|
|
217
|
+
if not next_s.tool_name:
|
|
218
|
+
next_s.status = StepStatus.DONE
|
|
219
|
+
next_s.result = next_s.description
|
|
220
|
+
continue
|
|
221
|
+
|
|
222
|
+
# Execute tool
|
|
223
|
+
try:
|
|
224
|
+
result = await self.tools.execute(next_s.tool_name, **next_s.tool_args)
|
|
225
|
+
if result.is_error:
|
|
226
|
+
# Try self-correction
|
|
227
|
+
correction = await corrector.analyze_error(
|
|
228
|
+
next_s.tool_name, next_s.tool_args, result.content
|
|
229
|
+
)
|
|
230
|
+
if correction.retry and correction.corrected_args:
|
|
231
|
+
desc = correction.fix_description
|
|
232
|
+
console.print(f" 🔄 纠错: {desc}", style="yellow")
|
|
233
|
+
result = await self.tools.execute(
|
|
234
|
+
next_s.tool_name, **correction.corrected_args
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
if result.is_error:
|
|
238
|
+
next_s.status = StepStatus.FAILED
|
|
239
|
+
next_s.error = result.content
|
|
240
|
+
else:
|
|
241
|
+
next_s.status = StepStatus.DONE
|
|
242
|
+
next_s.result = result.content
|
|
243
|
+
else:
|
|
244
|
+
next_s.status = StepStatus.DONE
|
|
245
|
+
next_s.result = result.content
|
|
246
|
+
except Exception as e:
|
|
247
|
+
next_s.status = StepStatus.FAILED
|
|
248
|
+
next_s.error = str(e)
|
|
249
|
+
|
|
250
|
+
# Step 3: Synthesize results
|
|
251
|
+
plan_context = plan.format_for_context()
|
|
252
|
+
self.ctx.add_message(Role.USER, f"执行计划完成。结果:\n{plan_context}\n请总结。")
|
|
253
|
+
|
|
254
|
+
# Final summary
|
|
255
|
+
return await self.run("总结以上执行结果")
|
|
256
|
+
|
|
257
|
+
finally:
|
|
258
|
+
await corrector.close()
|
|
259
|
+
await planner.close()
|
|
260
|
+
|
|
261
|
+
async def _run_subagent(self, user_input: str) -> str:
|
|
262
|
+
"""Sub-agent mode: decompose and delegate to parallel sub-agents."""
|
|
263
|
+
self._turn_count = 0
|
|
264
|
+
self.ctx.add_message(Role.USER, user_input)
|
|
265
|
+
|
|
266
|
+
# Step 1: Use planner to generate subtasks
|
|
267
|
+
planner = Planner(self.llm)
|
|
268
|
+
plan = await planner.generate_plan(user_input, self.tools)
|
|
269
|
+
|
|
270
|
+
# Step 2: Convert plan steps to subtasks
|
|
271
|
+
subtasks = [
|
|
272
|
+
SubTask(
|
|
273
|
+
description=step.description,
|
|
274
|
+
context=f"目标: {plan.goal}",
|
|
275
|
+
)
|
|
276
|
+
for step in plan.steps
|
|
277
|
+
]
|
|
278
|
+
|
|
279
|
+
console.print(f" 🚀 委派 {len(subtasks)} 个子任务...", style="cyan")
|
|
280
|
+
|
|
281
|
+
# Step 3: Execute in parallel
|
|
282
|
+
delegator = Delegator(llm=self.llm, tools=self.tools)
|
|
283
|
+
try:
|
|
284
|
+
results = await delegator.delegate(subtasks)
|
|
285
|
+
|
|
286
|
+
# Step 4: Synthesize
|
|
287
|
+
summary_parts = ["## 子代理执行结果\n"]
|
|
288
|
+
for r in results:
|
|
289
|
+
status = "✅" if r.success else "❌"
|
|
290
|
+
summary_parts.append(f"{status} **{r.task}**\n{r.output[:500]}\n")
|
|
291
|
+
|
|
292
|
+
summary = "\n".join(summary_parts)
|
|
293
|
+
self.ctx.add_message(Role.USER, f"子代理执行完成:\n{summary}\n请总结。")
|
|
294
|
+
|
|
295
|
+
return await self.run("总结以上子代理的执行结果")
|
|
296
|
+
|
|
297
|
+
finally:
|
|
298
|
+
await delegator.close()
|
|
299
|
+
await planner.close()
|