klaude-code 1.2.15__py3-none-any.whl → 1.2.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/cli/main.py +66 -42
- klaude_code/cli/runtime.py +34 -13
- klaude_code/command/__init__.py +3 -0
- klaude_code/command/export_cmd.py +2 -2
- klaude_code/command/export_online_cmd.py +149 -0
- klaude_code/command/prompt-handoff.md +33 -0
- klaude_code/command/thinking_cmd.py +5 -1
- klaude_code/config/config.py +20 -21
- klaude_code/config/list_model.py +1 -1
- klaude_code/const/__init__.py +3 -0
- klaude_code/core/executor.py +2 -2
- klaude_code/core/manager/llm_clients_builder.py +1 -1
- klaude_code/core/manager/sub_agent_manager.py +30 -6
- klaude_code/core/prompt.py +15 -13
- klaude_code/core/prompts/{prompt-subagent-explore.md → prompt-sub-agent-explore.md} +0 -1
- klaude_code/core/prompts/{prompt-subagent-oracle.md → prompt-sub-agent-oracle.md} +1 -2
- klaude_code/core/prompts/prompt-sub-agent-web.md +48 -0
- klaude_code/core/reminders.py +75 -32
- klaude_code/core/task.py +18 -22
- klaude_code/core/tool/__init__.py +4 -0
- klaude_code/core/tool/report_back_tool.py +84 -0
- klaude_code/core/tool/sub_agent_tool.py +6 -0
- klaude_code/core/tool/tool_runner.py +9 -1
- klaude_code/core/tool/web/web_search_tool.md +23 -0
- klaude_code/core/tool/web/web_search_tool.py +126 -0
- klaude_code/core/turn.py +45 -4
- klaude_code/llm/anthropic/input.py +14 -5
- klaude_code/llm/openrouter/input.py +14 -3
- klaude_code/llm/responses/input.py +19 -0
- klaude_code/protocol/commands.py +1 -0
- klaude_code/protocol/events.py +9 -0
- klaude_code/protocol/model.py +24 -14
- klaude_code/protocol/sub_agent/__init__.py +117 -0
- klaude_code/protocol/sub_agent/explore.py +63 -0
- klaude_code/protocol/sub_agent/oracle.py +91 -0
- klaude_code/protocol/sub_agent/task.py +61 -0
- klaude_code/protocol/sub_agent/web.py +78 -0
- klaude_code/protocol/tools.py +2 -0
- klaude_code/session/export.py +12 -6
- klaude_code/session/session.py +12 -2
- klaude_code/session/templates/export_session.html +111 -36
- klaude_code/ui/modes/repl/completers.py +1 -1
- klaude_code/ui/modes/repl/event_handler.py +65 -8
- klaude_code/ui/modes/repl/renderer.py +11 -9
- klaude_code/ui/renderers/developer.py +18 -7
- klaude_code/ui/renderers/metadata.py +24 -12
- klaude_code/ui/renderers/sub_agent.py +63 -3
- klaude_code/ui/renderers/thinking.py +1 -1
- klaude_code/ui/renderers/tools.py +24 -37
- klaude_code/ui/rich/markdown.py +20 -48
- klaude_code/ui/rich/status.py +61 -17
- klaude_code/ui/rich/theme.py +8 -7
- {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/METADATA +114 -22
- {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/RECORD +57 -48
- klaude_code/core/prompts/prompt-subagent-webfetch.md +0 -46
- klaude_code/protocol/sub_agent.py +0 -354
- /klaude_code/core/prompts/{prompt-subagent.md → prompt-sub-agent.md} +0 -0
- {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/WHEEL +0 -0
- {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/entry_points.txt +0 -0
|
@@ -9,8 +9,9 @@ from __future__ import annotations
|
|
|
9
9
|
|
|
10
10
|
import asyncio
|
|
11
11
|
|
|
12
|
-
from klaude_code.core.agent import Agent, ModelProfileProvider
|
|
12
|
+
from klaude_code.core.agent import Agent, AgentProfile, ModelProfileProvider
|
|
13
13
|
from klaude_code.core.manager.llm_clients import LLMClients
|
|
14
|
+
from klaude_code.core.tool import ReportBackTool
|
|
14
15
|
from klaude_code.protocol import events, model
|
|
15
16
|
from klaude_code.protocol.sub_agent import SubAgentResult
|
|
16
17
|
from klaude_code.session.session import Session
|
|
@@ -35,7 +36,7 @@ class SubAgentManager:
|
|
|
35
36
|
|
|
36
37
|
await self._event_queue.put(event)
|
|
37
38
|
|
|
38
|
-
async def
|
|
39
|
+
async def run_sub_agent(self, parent_agent: Agent, state: model.SubAgentState) -> SubAgentResult:
|
|
39
40
|
"""Run a nested sub-agent task and return its result."""
|
|
40
41
|
|
|
41
42
|
# Create a child session under the same workdir
|
|
@@ -47,6 +48,25 @@ class SubAgentManager:
|
|
|
47
48
|
self._llm_clients.get_client(state.sub_agent_type),
|
|
48
49
|
state.sub_agent_type,
|
|
49
50
|
)
|
|
51
|
+
|
|
52
|
+
# Inject report_back tool if output_schema is provided
|
|
53
|
+
if state.output_schema:
|
|
54
|
+
report_back_tool_class = ReportBackTool.for_schema(state.output_schema)
|
|
55
|
+
report_back_prompt = """\
|
|
56
|
+
|
|
57
|
+
# Structured Output
|
|
58
|
+
You have a `report_back` tool available. When you complete the task,\
|
|
59
|
+
you MUST call `report_back` with the structured result matching the required schema.\
|
|
60
|
+
Only the content passed to `report_back` will be returned to user.\
|
|
61
|
+
"""
|
|
62
|
+
base_prompt = child_profile.system_prompt or ""
|
|
63
|
+
child_profile = AgentProfile(
|
|
64
|
+
llm_client=child_profile.llm_client,
|
|
65
|
+
system_prompt=base_prompt + report_back_prompt,
|
|
66
|
+
tools=[*child_profile.tools, report_back_tool_class.schema()],
|
|
67
|
+
reminders=child_profile.reminders,
|
|
68
|
+
)
|
|
69
|
+
|
|
50
70
|
child_agent = Agent(session=child_session, profile=child_profile)
|
|
51
71
|
|
|
52
72
|
log_debug(
|
|
@@ -68,23 +88,27 @@ class SubAgentManager:
|
|
|
68
88
|
elif isinstance(event, events.TaskMetadataEvent):
|
|
69
89
|
task_metadata = event.metadata.main
|
|
70
90
|
await self.emit_event(event)
|
|
71
|
-
return SubAgentResult(
|
|
91
|
+
return SubAgentResult(
|
|
92
|
+
task_result=result,
|
|
93
|
+
session_id=child_session.id,
|
|
94
|
+
task_metadata=task_metadata,
|
|
95
|
+
)
|
|
72
96
|
except asyncio.CancelledError:
|
|
73
97
|
# Propagate cancellation so tooling can treat it as user interrupt
|
|
74
98
|
log_debug(
|
|
75
|
-
f"
|
|
99
|
+
f"Sub-agent task for {state.sub_agent_type} was cancelled",
|
|
76
100
|
style="yellow",
|
|
77
101
|
debug_type=DebugType.EXECUTION,
|
|
78
102
|
)
|
|
79
103
|
raise
|
|
80
104
|
except Exception as exc: # pragma: no cover - defensive logging
|
|
81
105
|
log_debug(
|
|
82
|
-
f"
|
|
106
|
+
f"Sub-agent task failed: [{exc.__class__.__name__}] {exc!s}",
|
|
83
107
|
style="red",
|
|
84
108
|
debug_type=DebugType.EXECUTION,
|
|
85
109
|
)
|
|
86
110
|
return SubAgentResult(
|
|
87
|
-
task_result=f"
|
|
111
|
+
task_result=f"Sub-agent task failed: [{exc.__class__.__name__}] {exc!s}",
|
|
88
112
|
session_id="",
|
|
89
113
|
error=True,
|
|
90
114
|
)
|
klaude_code/core/prompt.py
CHANGED
|
@@ -5,6 +5,7 @@ from importlib.resources import files
|
|
|
5
5
|
from pathlib import Path
|
|
6
6
|
|
|
7
7
|
from klaude_code.protocol import llm_param
|
|
8
|
+
from klaude_code.protocol.sub_agent import get_sub_agent_profile
|
|
8
9
|
|
|
9
10
|
COMMAND_DESCRIPTIONS: dict[str, str] = {
|
|
10
11
|
"rg": "ripgrep - fast text search",
|
|
@@ -19,15 +20,15 @@ PROMPT_FILES: dict[str, str] = {
|
|
|
19
20
|
"main_gpt_5_1_codex_max": "prompts/prompt-codex-gpt-5-1-codex-max.md",
|
|
20
21
|
"main": "prompts/prompt-claude-code.md",
|
|
21
22
|
"main_gemini": "prompts/prompt-gemini.md", # https://ai.google.dev/gemini-api/docs/prompting-strategies?hl=zh-cn#agentic-si-template
|
|
22
|
-
# Sub-agent prompts keyed by their name
|
|
23
|
-
"Task": "prompts/prompt-subagent.md",
|
|
24
|
-
"Oracle": "prompts/prompt-subagent-oracle.md",
|
|
25
|
-
"Explore": "prompts/prompt-subagent-explore.md",
|
|
26
|
-
"WebFetchAgent": "prompts/prompt-subagent-webfetch.md",
|
|
27
23
|
}
|
|
28
24
|
|
|
29
25
|
|
|
30
26
|
@cache
|
|
27
|
+
def _load_prompt_by_path(prompt_path: str) -> str:
|
|
28
|
+
"""Load and cache prompt content from a file path relative to core package."""
|
|
29
|
+
return files(__package__).joinpath(prompt_path).read_text(encoding="utf-8").strip()
|
|
30
|
+
|
|
31
|
+
|
|
31
32
|
def _load_base_prompt(file_key: str) -> str:
|
|
32
33
|
"""Load and cache the base prompt content from file."""
|
|
33
34
|
try:
|
|
@@ -35,14 +36,11 @@ def _load_base_prompt(file_key: str) -> str:
|
|
|
35
36
|
except KeyError as exc:
|
|
36
37
|
raise ValueError(f"Unknown prompt key: {file_key}") from exc
|
|
37
38
|
|
|
38
|
-
return
|
|
39
|
+
return _load_prompt_by_path(prompt_path)
|
|
39
40
|
|
|
40
41
|
|
|
41
|
-
def _get_file_key(model_name: str, protocol: llm_param.LLMClientProtocol
|
|
42
|
-
"""Determine which prompt file to use based on model
|
|
43
|
-
if sub_agent_type is not None:
|
|
44
|
-
return sub_agent_type
|
|
45
|
-
|
|
42
|
+
def _get_file_key(model_name: str, protocol: llm_param.LLMClientProtocol) -> str:
|
|
43
|
+
"""Determine which prompt file to use based on model."""
|
|
46
44
|
match model_name:
|
|
47
45
|
case name if "gpt-5.1-codex-max" in name:
|
|
48
46
|
return "main_gpt_5_1_codex_max"
|
|
@@ -90,8 +88,12 @@ def load_system_prompt(
|
|
|
90
88
|
model_name: str, protocol: llm_param.LLMClientProtocol, sub_agent_type: str | None = None
|
|
91
89
|
) -> str:
|
|
92
90
|
"""Get system prompt content for the given model and sub-agent type."""
|
|
93
|
-
|
|
94
|
-
|
|
91
|
+
if sub_agent_type is not None:
|
|
92
|
+
profile = get_sub_agent_profile(sub_agent_type)
|
|
93
|
+
base_prompt = _load_prompt_by_path(profile.prompt_file)
|
|
94
|
+
else:
|
|
95
|
+
file_key = _get_file_key(model_name, protocol)
|
|
96
|
+
base_prompt = _load_base_prompt(file_key)
|
|
95
97
|
|
|
96
98
|
if protocol == llm_param.LLMClientProtocol.CODEX:
|
|
97
99
|
# Do not append environment info for Codex protocol
|
|
@@ -22,7 +22,6 @@ Guidelines:
|
|
|
22
22
|
|
|
23
23
|
Complete the user's search request efficiently and report your findings clearly.
|
|
24
24
|
|
|
25
|
-
|
|
26
25
|
Notes:
|
|
27
26
|
- Agent threads always have their cwd reset between bash calls, as a result please only use absolute file paths.
|
|
28
27
|
- In your final response always share relevant file names and code snippets. Any file paths you return in your response MUST be absolute. Do NOT use relative paths.
|
|
@@ -1,8 +1,7 @@
|
|
|
1
1
|
You are the Oracle - an expert AI advisor with advanced reasoning capabilities
|
|
2
2
|
|
|
3
3
|
Your role is to provide high-quality technical guidance, code reviews, architectural advice, and strategic planning for software engineering tasks.
|
|
4
|
-
You are running inside an AI coding system in which you act as a
|
|
5
|
-
|
|
4
|
+
You are running inside an AI coding system in which you act as a sub-agent that's used when the main agent needs a smarter, more capable model to help out.
|
|
6
5
|
|
|
7
6
|
Key responsibilities:
|
|
8
7
|
- Analyze code and architecture patterns
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
You are a web research agent that searches and fetches web content to provide up-to-date information.
|
|
2
|
+
|
|
3
|
+
## Available Tools
|
|
4
|
+
|
|
5
|
+
**WebSearch**: Search the web via DuckDuckGo
|
|
6
|
+
- Returns: title, URL, and snippet for each result
|
|
7
|
+
- Parameter `max_results`: control result count (default: 10, max: 20)
|
|
8
|
+
- Snippets are brief summaries - use WebFetch for full content
|
|
9
|
+
|
|
10
|
+
**WebFetch**: Fetch and process web page content
|
|
11
|
+
- HTML pages are automatically converted to Markdown
|
|
12
|
+
- JSON responses are auto-formatted with indentation
|
|
13
|
+
- Other text content returned as-is
|
|
14
|
+
|
|
15
|
+
## Tool Usage Strategy
|
|
16
|
+
|
|
17
|
+
Scale tool calls to query complexity:
|
|
18
|
+
- Simple facts: 1-2 calls
|
|
19
|
+
- Medium research: 3-5 calls
|
|
20
|
+
- Deep research/comparisons: 5-10 calls
|
|
21
|
+
|
|
22
|
+
Balance efficiency with thoroughness. For open-ended questions (e.g., "recommendations for video games" or "recent developments in RL"), use more calls for comprehensive answers.
|
|
23
|
+
|
|
24
|
+
## Search Guidelines
|
|
25
|
+
|
|
26
|
+
- Keep queries concise (1-6 words). Start broad, then narrow if needed
|
|
27
|
+
- Avoid repeating similar queries - they won't yield new results
|
|
28
|
+
- NEVER use '-', 'site:', or quotes unless explicitly asked
|
|
29
|
+
- Include year/date for time-sensitive queries (check "Today's date" in <env>)
|
|
30
|
+
- Use WebFetch to get full content - search snippets are often insufficient
|
|
31
|
+
- Follow relevant links on pages with WebFetch
|
|
32
|
+
- If truncated results are saved to local files, use grep/read to explore
|
|
33
|
+
|
|
34
|
+
## Response Guidelines
|
|
35
|
+
|
|
36
|
+
- Only your last message is returned to the main agent
|
|
37
|
+
- Be succinct - include only relevant information
|
|
38
|
+
- Lead with the most recent info for evolving topics
|
|
39
|
+
- Favor original sources (company blogs, papers, gov sites) over aggregators
|
|
40
|
+
- Note conflicting sources when they exist
|
|
41
|
+
|
|
42
|
+
## Sources (REQUIRED)
|
|
43
|
+
|
|
44
|
+
You MUST end every response with a "Sources:" section listing all URLs as markdown links:
|
|
45
|
+
|
|
46
|
+
Sources:
|
|
47
|
+
- [Source Title](https://example.com)
|
|
48
|
+
- [Another Source](https://example.com/page) (saved: /path/to/file)
|
klaude_code/core/reminders.py
CHANGED
|
@@ -14,7 +14,8 @@ from klaude_code.session import Session
|
|
|
14
14
|
type Reminder = Callable[[Session], Awaitable[model.DeveloperMessageItem | None]]
|
|
15
15
|
|
|
16
16
|
|
|
17
|
-
|
|
17
|
+
# Match @ preceded by whitespace, start of line, or → (ReadTool line number arrow)
|
|
18
|
+
AT_FILE_PATTERN = re.compile(r'(?:(?<!\S)|(?<=\u2192))@("(?P<quoted>[^\"]+)"|(?P<plain>\S+))')
|
|
18
19
|
|
|
19
20
|
|
|
20
21
|
def get_last_new_user_input(session: Session) -> str | None:
|
|
@@ -31,10 +32,74 @@ def get_last_new_user_input(session: Session) -> str | None:
|
|
|
31
32
|
return "\n\n".join(result)
|
|
32
33
|
|
|
33
34
|
|
|
35
|
+
async def _load_at_file_recursive(
|
|
36
|
+
session: Session,
|
|
37
|
+
pattern: str,
|
|
38
|
+
at_files: dict[str, model.AtPatternParseResult],
|
|
39
|
+
collected_images: list[model.ImageURLPart],
|
|
40
|
+
visited: set[str],
|
|
41
|
+
base_dir: Path | None = None,
|
|
42
|
+
mentioned_in: str | None = None,
|
|
43
|
+
) -> None:
|
|
44
|
+
"""Recursively load @ file references."""
|
|
45
|
+
path = (base_dir / pattern).resolve() if base_dir else Path(pattern).resolve()
|
|
46
|
+
path_str = str(path)
|
|
47
|
+
|
|
48
|
+
if path_str in visited:
|
|
49
|
+
return
|
|
50
|
+
visited.add(path_str)
|
|
51
|
+
|
|
52
|
+
context_token = set_tool_context_from_session(session)
|
|
53
|
+
try:
|
|
54
|
+
if path.exists() and path.is_file():
|
|
55
|
+
args = ReadTool.ReadArguments(file_path=path_str)
|
|
56
|
+
tool_result = await ReadTool.call_with_args(args)
|
|
57
|
+
at_files[path_str] = model.AtPatternParseResult(
|
|
58
|
+
path=path_str,
|
|
59
|
+
tool_name=tools.READ,
|
|
60
|
+
result=tool_result.output or "",
|
|
61
|
+
tool_args=args.model_dump_json(exclude_none=True),
|
|
62
|
+
operation="Read",
|
|
63
|
+
images=tool_result.images,
|
|
64
|
+
mentioned_in=mentioned_in,
|
|
65
|
+
)
|
|
66
|
+
if tool_result.images:
|
|
67
|
+
collected_images.extend(tool_result.images)
|
|
68
|
+
|
|
69
|
+
# Recursively parse @ references from ReadTool output
|
|
70
|
+
output = tool_result.output or ""
|
|
71
|
+
if "@" in output:
|
|
72
|
+
for match in AT_FILE_PATTERN.finditer(output):
|
|
73
|
+
nested = match.group("quoted") or match.group("plain")
|
|
74
|
+
if nested:
|
|
75
|
+
await _load_at_file_recursive(
|
|
76
|
+
session,
|
|
77
|
+
nested,
|
|
78
|
+
at_files,
|
|
79
|
+
collected_images,
|
|
80
|
+
visited,
|
|
81
|
+
base_dir=path.parent,
|
|
82
|
+
mentioned_in=path_str,
|
|
83
|
+
)
|
|
84
|
+
elif path.exists() and path.is_dir():
|
|
85
|
+
quoted_path = shlex.quote(path_str)
|
|
86
|
+
args = BashTool.BashArguments(command=f"ls {quoted_path}")
|
|
87
|
+
tool_result = await BashTool.call_with_args(args)
|
|
88
|
+
at_files[path_str] = model.AtPatternParseResult(
|
|
89
|
+
path=path_str + "/",
|
|
90
|
+
tool_name=tools.BASH,
|
|
91
|
+
result=tool_result.output or "",
|
|
92
|
+
tool_args=args.model_dump_json(exclude_none=True),
|
|
93
|
+
operation="List",
|
|
94
|
+
)
|
|
95
|
+
finally:
|
|
96
|
+
reset_tool_context(context_token)
|
|
97
|
+
|
|
98
|
+
|
|
34
99
|
async def at_file_reader_reminder(
|
|
35
100
|
session: Session,
|
|
36
101
|
) -> model.DeveloperMessageItem | None:
|
|
37
|
-
"""Parse @foo/bar to read"""
|
|
102
|
+
"""Parse @foo/bar to read, with recursive loading of nested @ references"""
|
|
38
103
|
last_user_input = get_last_new_user_input(session)
|
|
39
104
|
if not last_user_input or "@" not in last_user_input:
|
|
40
105
|
return None
|
|
@@ -53,38 +118,16 @@ async def at_file_reader_reminder(
|
|
|
53
118
|
|
|
54
119
|
at_files: dict[str, model.AtPatternParseResult] = {} # path -> content
|
|
55
120
|
collected_images: list[model.ImageURLPart] = []
|
|
121
|
+
visited: set[str] = set()
|
|
56
122
|
|
|
57
123
|
for pattern in at_patterns:
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
path=str(path),
|
|
66
|
-
tool_name=tools.READ,
|
|
67
|
-
result=tool_result.output or "",
|
|
68
|
-
tool_args=args.model_dump_json(exclude_none=True),
|
|
69
|
-
operation="Read",
|
|
70
|
-
images=tool_result.images,
|
|
71
|
-
)
|
|
72
|
-
at_files[str(path)] = at_result
|
|
73
|
-
if tool_result.images:
|
|
74
|
-
collected_images.extend(tool_result.images)
|
|
75
|
-
elif path.exists() and path.is_dir():
|
|
76
|
-
quoted_path = shlex.quote(str(path))
|
|
77
|
-
args = BashTool.BashArguments(command=f"ls {quoted_path}")
|
|
78
|
-
tool_result = await BashTool.call_with_args(args)
|
|
79
|
-
at_files[str(path)] = model.AtPatternParseResult(
|
|
80
|
-
path=str(path) + "/",
|
|
81
|
-
tool_name=tools.BASH,
|
|
82
|
-
result=tool_result.output or "",
|
|
83
|
-
tool_args=args.model_dump_json(exclude_none=True),
|
|
84
|
-
operation="List",
|
|
85
|
-
)
|
|
86
|
-
finally:
|
|
87
|
-
reset_tool_context(context_token)
|
|
124
|
+
await _load_at_file_recursive(
|
|
125
|
+
session,
|
|
126
|
+
pattern,
|
|
127
|
+
at_files,
|
|
128
|
+
collected_images,
|
|
129
|
+
visited,
|
|
130
|
+
)
|
|
88
131
|
|
|
89
132
|
if len(at_files) == 0:
|
|
90
133
|
return None
|
klaude_code/core/task.py
CHANGED
|
@@ -41,10 +41,8 @@ class MetadataAccumulator:
|
|
|
41
41
|
if main.usage is None:
|
|
42
42
|
main.usage = model.Usage()
|
|
43
43
|
acc_usage = main.usage
|
|
44
|
-
|
|
45
|
-
acc_usage
|
|
46
|
-
acc_usage.reasoning_tokens += usage.reasoning_tokens
|
|
47
|
-
acc_usage.output_tokens += usage.output_tokens
|
|
44
|
+
|
|
45
|
+
model.TaskMetadata.merge_usage(acc_usage, usage)
|
|
48
46
|
acc_usage.currency = usage.currency
|
|
49
47
|
|
|
50
48
|
if usage.context_size is not None:
|
|
@@ -67,13 +65,6 @@ class MetadataAccumulator:
|
|
|
67
65
|
self._throughput_weighted_sum += usage.throughput_tps * current_output
|
|
68
66
|
self._throughput_tracked_tokens += current_output
|
|
69
67
|
|
|
70
|
-
if usage.input_cost is not None:
|
|
71
|
-
acc_usage.input_cost = (acc_usage.input_cost or 0.0) + usage.input_cost
|
|
72
|
-
if usage.output_cost is not None:
|
|
73
|
-
acc_usage.output_cost = (acc_usage.output_cost or 0.0) + usage.output_cost
|
|
74
|
-
if usage.cache_read_cost is not None:
|
|
75
|
-
acc_usage.cache_read_cost = (acc_usage.cache_read_cost or 0.0) + usage.cache_read_cost
|
|
76
|
-
|
|
77
68
|
if turn_metadata.provider is not None:
|
|
78
69
|
main.provider = turn_metadata.provider
|
|
79
70
|
if turn_metadata.model_name:
|
|
@@ -191,6 +182,14 @@ class TaskExecutor:
|
|
|
191
182
|
yield am
|
|
192
183
|
case events.ResponseMetadataEvent() as e:
|
|
193
184
|
metadata_accumulator.add(e.metadata)
|
|
185
|
+
# Emit context usage event if available
|
|
186
|
+
if e.metadata.usage is not None:
|
|
187
|
+
context_percent = e.metadata.usage.context_usage_percent
|
|
188
|
+
if context_percent is not None:
|
|
189
|
+
yield events.ContextUsageEvent(
|
|
190
|
+
session_id=session_ctx.session_id,
|
|
191
|
+
context_percent=context_percent,
|
|
192
|
+
)
|
|
194
193
|
case events.ToolResultEvent() as e:
|
|
195
194
|
# Collect sub-agent task metadata from tool results
|
|
196
195
|
if e.task_metadata is not None:
|
|
@@ -225,7 +224,7 @@ class TaskExecutor:
|
|
|
225
224
|
yield events.ErrorEvent(error_message=final_error, can_retry=False)
|
|
226
225
|
return
|
|
227
226
|
|
|
228
|
-
if turn is None or
|
|
227
|
+
if turn is None or turn.task_finished:
|
|
229
228
|
break
|
|
230
229
|
|
|
231
230
|
# Finalize metadata
|
|
@@ -234,21 +233,18 @@ class TaskExecutor:
|
|
|
234
233
|
|
|
235
234
|
yield events.TaskMetadataEvent(metadata=accumulated, session_id=session_ctx.session_id)
|
|
236
235
|
session_ctx.append_history([accumulated])
|
|
236
|
+
|
|
237
|
+
# Get task result from turn
|
|
238
|
+
task_result = turn.task_result if turn is not None else ""
|
|
239
|
+
has_structured_output = turn.has_structured_output if turn is not None else False
|
|
240
|
+
|
|
237
241
|
yield events.TaskFinishEvent(
|
|
238
242
|
session_id=session_ctx.session_id,
|
|
239
|
-
task_result=
|
|
243
|
+
task_result=task_result,
|
|
244
|
+
has_structured_output=has_structured_output,
|
|
240
245
|
)
|
|
241
246
|
|
|
242
247
|
|
|
243
|
-
def _get_last_assistant_message(history: list[model.ConversationItem]) -> str | None:
|
|
244
|
-
"""Return the content of the most recent assistant message in history."""
|
|
245
|
-
|
|
246
|
-
for item in reversed(history):
|
|
247
|
-
if isinstance(item, model.AssistantMessageItem):
|
|
248
|
-
return item.content or ""
|
|
249
|
-
return None
|
|
250
|
-
|
|
251
|
-
|
|
252
248
|
def _retry_delay_seconds(attempt: int) -> float:
|
|
253
249
|
"""Compute exponential backoff delay for the given attempt count."""
|
|
254
250
|
capped_attempt = max(1, attempt)
|
|
@@ -7,6 +7,7 @@ from .file.write_tool import WriteTool
|
|
|
7
7
|
from .memory.memory_tool import MEMORY_DIR_NAME, MemoryTool
|
|
8
8
|
from .memory.skill_loader import Skill, SkillLoader
|
|
9
9
|
from .memory.skill_tool import SkillTool
|
|
10
|
+
from .report_back_tool import ReportBackTool
|
|
10
11
|
from .shell.bash_tool import BashTool
|
|
11
12
|
from .shell.command_safety import SafetyCheckResult, is_safe_command
|
|
12
13
|
from .sub_agent_tool import SubAgentTool
|
|
@@ -27,6 +28,7 @@ from .tool_runner import run_tool
|
|
|
27
28
|
from .truncation import SimpleTruncationStrategy, TruncationStrategy, get_truncation_strategy, set_truncation_strategy
|
|
28
29
|
from .web.mermaid_tool import MermaidTool
|
|
29
30
|
from .web.web_fetch_tool import WebFetchTool
|
|
31
|
+
from .web.web_search_tool import WebSearchTool
|
|
30
32
|
|
|
31
33
|
__all__ = [
|
|
32
34
|
"MEMORY_DIR_NAME",
|
|
@@ -38,6 +40,7 @@ __all__ = [
|
|
|
38
40
|
"MermaidTool",
|
|
39
41
|
"MultiEditTool",
|
|
40
42
|
"ReadTool",
|
|
43
|
+
"ReportBackTool",
|
|
41
44
|
"SafetyCheckResult",
|
|
42
45
|
"SimpleTruncationStrategy",
|
|
43
46
|
"Skill",
|
|
@@ -51,6 +54,7 @@ __all__ = [
|
|
|
51
54
|
"TruncationStrategy",
|
|
52
55
|
"UpdatePlanTool",
|
|
53
56
|
"WebFetchTool",
|
|
57
|
+
"WebSearchTool",
|
|
54
58
|
"WriteTool",
|
|
55
59
|
"build_todo_context",
|
|
56
60
|
"current_run_subtask_callback",
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
"""ReportBackTool for sub-agents to return structured output."""
|
|
2
|
+
|
|
3
|
+
from typing import Any, ClassVar, cast
|
|
4
|
+
|
|
5
|
+
from klaude_code.protocol import llm_param, model, tools
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def _normalize_schema_types(schema: dict[str, Any]) -> dict[str, Any]:
|
|
9
|
+
"""Recursively normalize JSON schema type values to lowercase.
|
|
10
|
+
|
|
11
|
+
Some LLMs (e.g., Gemini 3) generate type values in uppercase like "OBJECT", "STRING".
|
|
12
|
+
Standard JSON Schema requires lowercase type values.
|
|
13
|
+
"""
|
|
14
|
+
result: dict[str, Any] = {}
|
|
15
|
+
for key, value in schema.items():
|
|
16
|
+
if key == "type" and isinstance(value, str):
|
|
17
|
+
result[key] = value.lower()
|
|
18
|
+
elif isinstance(value, dict):
|
|
19
|
+
result[key] = _normalize_schema_types(cast(dict[str, Any], value))
|
|
20
|
+
elif isinstance(value, list):
|
|
21
|
+
normalized_list: list[Any] = []
|
|
22
|
+
for item in cast(list[Any], value):
|
|
23
|
+
if isinstance(item, dict):
|
|
24
|
+
normalized_list.append(_normalize_schema_types(cast(dict[str, Any], item)))
|
|
25
|
+
else:
|
|
26
|
+
normalized_list.append(item)
|
|
27
|
+
result[key] = normalized_list
|
|
28
|
+
else:
|
|
29
|
+
result[key] = value
|
|
30
|
+
return result
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
class ReportBackTool:
|
|
34
|
+
"""Special tool for sub-agents to return structured output and end the task.
|
|
35
|
+
|
|
36
|
+
This tool is dynamically injected when a parent agent calls a sub-agent with
|
|
37
|
+
an output_schema. The schema for this tool's parameters is defined by the
|
|
38
|
+
parent agent, allowing structured data to be returned.
|
|
39
|
+
|
|
40
|
+
Note: This class does not inherit from ToolABC because it's not registered
|
|
41
|
+
in the global tool registry. Instead, it's handled specially by the
|
|
42
|
+
TurnExecutor and SubAgentManager.
|
|
43
|
+
"""
|
|
44
|
+
|
|
45
|
+
_schema: ClassVar[dict[str, Any]] = {}
|
|
46
|
+
|
|
47
|
+
@classmethod
|
|
48
|
+
def for_schema(cls, schema: dict[str, Any]) -> type["ReportBackTool"]:
|
|
49
|
+
"""Create a tool class with the specified output schema.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
schema: JSON Schema defining the expected structure of the report_back arguments.
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
A new class with the schema set as a class variable.
|
|
56
|
+
"""
|
|
57
|
+
normalized = _normalize_schema_types(schema)
|
|
58
|
+
return type("ReportBackTool", (ReportBackTool,), {"_schema": normalized})
|
|
59
|
+
|
|
60
|
+
@classmethod
|
|
61
|
+
def schema(cls) -> llm_param.ToolSchema:
|
|
62
|
+
"""Generate the tool schema for this report_back tool."""
|
|
63
|
+
return llm_param.ToolSchema(
|
|
64
|
+
name=tools.REPORT_BACK,
|
|
65
|
+
type="function",
|
|
66
|
+
description=(
|
|
67
|
+
"Report the final structured result back to the parent agent. "
|
|
68
|
+
"Call this when you have completed the task and want to return structured data. "
|
|
69
|
+
"The task will end after this tool is called."
|
|
70
|
+
),
|
|
71
|
+
parameters=cls._schema,
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
@classmethod
|
|
75
|
+
async def call(cls, arguments: str) -> model.ToolResultItem:
|
|
76
|
+
"""Execute the report_back tool.
|
|
77
|
+
|
|
78
|
+
The actual handling of report_back results is done by TurnExecutor.
|
|
79
|
+
This method just returns a success status to maintain the tool call flow.
|
|
80
|
+
"""
|
|
81
|
+
return model.ToolResultItem(
|
|
82
|
+
status="success",
|
|
83
|
+
output="Result reported successfully. Task will end.",
|
|
84
|
+
)
|
|
@@ -63,12 +63,18 @@ class SubAgentTool(ToolABC):
|
|
|
63
63
|
prompt = profile.prompt_builder(args)
|
|
64
64
|
description = args.get("description", "")
|
|
65
65
|
|
|
66
|
+
# Extract output_schema if configured
|
|
67
|
+
output_schema = None
|
|
68
|
+
if profile.output_schema_arg:
|
|
69
|
+
output_schema = args.get(profile.output_schema_arg)
|
|
70
|
+
|
|
66
71
|
try:
|
|
67
72
|
result = await runner(
|
|
68
73
|
model.SubAgentState(
|
|
69
74
|
sub_agent_type=profile.name,
|
|
70
75
|
sub_agent_desc=description,
|
|
71
76
|
sub_agent_prompt=prompt,
|
|
77
|
+
output_schema=output_schema,
|
|
72
78
|
)
|
|
73
79
|
)
|
|
74
80
|
except asyncio.CancelledError:
|
|
@@ -3,9 +3,10 @@ from collections.abc import AsyncGenerator, Callable, Iterable, Sequence
|
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
|
|
5
5
|
from klaude_code import const
|
|
6
|
+
from klaude_code.core.tool.report_back_tool import ReportBackTool
|
|
6
7
|
from klaude_code.core.tool.tool_abc import ToolABC
|
|
7
8
|
from klaude_code.core.tool.truncation import truncate_tool_output
|
|
8
|
-
from klaude_code.protocol import model
|
|
9
|
+
from klaude_code.protocol import model, tools
|
|
9
10
|
from klaude_code.protocol.sub_agent import is_sub_agent_tool
|
|
10
11
|
|
|
11
12
|
|
|
@@ -19,6 +20,13 @@ async def run_tool(tool_call: model.ToolCallItem, registry: dict[str, type[ToolA
|
|
|
19
20
|
Returns:
|
|
20
21
|
The result of the tool execution.
|
|
21
22
|
"""
|
|
23
|
+
# Special handling for report_back tool (not registered in global registry)
|
|
24
|
+
if tool_call.name == tools.REPORT_BACK:
|
|
25
|
+
tool_result = await ReportBackTool.call(tool_call.arguments)
|
|
26
|
+
tool_result.call_id = tool_call.call_id
|
|
27
|
+
tool_result.tool_name = tool_call.name
|
|
28
|
+
return tool_result
|
|
29
|
+
|
|
22
30
|
if tool_call.name not in registry:
|
|
23
31
|
return model.ToolResultItem(
|
|
24
32
|
call_id=tool_call.call_id,
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
- Search the web and use the results to inform responses
|
|
2
|
+
- Provides up-to-date information for current events and recent data
|
|
3
|
+
- Returns search result information formatted as search result blocks, including links as markdown hyperlinks
|
|
4
|
+
- Use this tool for accessing information beyond your knowledge cutoff
|
|
5
|
+
- Searches are performed automatically within a single API call
|
|
6
|
+
|
|
7
|
+
CRITICAL REQUIREMENT - You MUST follow this:
|
|
8
|
+
- After answering the user's question, you MUST include a "Sources:" section at the end of your response
|
|
9
|
+
- In the Sources section, list all relevant URLs from the search results as markdown hyperlinks: [Title](URL)
|
|
10
|
+
- This is MANDATORY - never skip including sources in your response
|
|
11
|
+
- Example format:
|
|
12
|
+
|
|
13
|
+
[Your answer here]
|
|
14
|
+
|
|
15
|
+
Sources:
|
|
16
|
+
- [Source Title 1](https://example.com/1)
|
|
17
|
+
- [Source Title 2](https://example.com/2)
|
|
18
|
+
|
|
19
|
+
Usage notes:
|
|
20
|
+
- Domain filtering is supported to include or block specific websites
|
|
21
|
+
- Web search is only available in the US
|
|
22
|
+
- Account for "Today's date" in <env>. For example, if <env> says "Today's date: 2025-07-01", and the user wants the latest docs, do not use 2024 in the search query. Use 2025.
|
|
23
|
+
|