klaude-code 1.2.15__py3-none-any.whl → 1.2.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (59) hide show
  1. klaude_code/cli/main.py +66 -42
  2. klaude_code/cli/runtime.py +34 -13
  3. klaude_code/command/__init__.py +3 -0
  4. klaude_code/command/export_cmd.py +2 -2
  5. klaude_code/command/export_online_cmd.py +149 -0
  6. klaude_code/command/prompt-handoff.md +33 -0
  7. klaude_code/command/thinking_cmd.py +5 -1
  8. klaude_code/config/config.py +20 -21
  9. klaude_code/config/list_model.py +1 -1
  10. klaude_code/const/__init__.py +3 -0
  11. klaude_code/core/executor.py +2 -2
  12. klaude_code/core/manager/llm_clients_builder.py +1 -1
  13. klaude_code/core/manager/sub_agent_manager.py +30 -6
  14. klaude_code/core/prompt.py +15 -13
  15. klaude_code/core/prompts/{prompt-subagent-explore.md → prompt-sub-agent-explore.md} +0 -1
  16. klaude_code/core/prompts/{prompt-subagent-oracle.md → prompt-sub-agent-oracle.md} +1 -2
  17. klaude_code/core/prompts/prompt-sub-agent-web.md +48 -0
  18. klaude_code/core/reminders.py +75 -32
  19. klaude_code/core/task.py +18 -22
  20. klaude_code/core/tool/__init__.py +4 -0
  21. klaude_code/core/tool/report_back_tool.py +84 -0
  22. klaude_code/core/tool/sub_agent_tool.py +6 -0
  23. klaude_code/core/tool/tool_runner.py +9 -1
  24. klaude_code/core/tool/web/web_search_tool.md +23 -0
  25. klaude_code/core/tool/web/web_search_tool.py +126 -0
  26. klaude_code/core/turn.py +45 -4
  27. klaude_code/llm/anthropic/input.py +14 -5
  28. klaude_code/llm/openrouter/input.py +14 -3
  29. klaude_code/llm/responses/input.py +19 -0
  30. klaude_code/protocol/commands.py +1 -0
  31. klaude_code/protocol/events.py +9 -0
  32. klaude_code/protocol/model.py +24 -14
  33. klaude_code/protocol/sub_agent/__init__.py +117 -0
  34. klaude_code/protocol/sub_agent/explore.py +63 -0
  35. klaude_code/protocol/sub_agent/oracle.py +91 -0
  36. klaude_code/protocol/sub_agent/task.py +61 -0
  37. klaude_code/protocol/sub_agent/web.py +78 -0
  38. klaude_code/protocol/tools.py +2 -0
  39. klaude_code/session/export.py +12 -6
  40. klaude_code/session/session.py +12 -2
  41. klaude_code/session/templates/export_session.html +111 -36
  42. klaude_code/ui/modes/repl/completers.py +1 -1
  43. klaude_code/ui/modes/repl/event_handler.py +65 -8
  44. klaude_code/ui/modes/repl/renderer.py +11 -9
  45. klaude_code/ui/renderers/developer.py +18 -7
  46. klaude_code/ui/renderers/metadata.py +24 -12
  47. klaude_code/ui/renderers/sub_agent.py +63 -3
  48. klaude_code/ui/renderers/thinking.py +1 -1
  49. klaude_code/ui/renderers/tools.py +24 -37
  50. klaude_code/ui/rich/markdown.py +20 -48
  51. klaude_code/ui/rich/status.py +61 -17
  52. klaude_code/ui/rich/theme.py +8 -7
  53. {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/METADATA +114 -22
  54. {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/RECORD +57 -48
  55. klaude_code/core/prompts/prompt-subagent-webfetch.md +0 -46
  56. klaude_code/protocol/sub_agent.py +0 -354
  57. /klaude_code/core/prompts/{prompt-subagent.md → prompt-sub-agent.md} +0 -0
  58. {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/WHEEL +0 -0
  59. {klaude_code-1.2.15.dist-info → klaude_code-1.2.17.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,126 @@
1
+ import asyncio
2
+ from dataclasses import dataclass
3
+ from pathlib import Path
4
+
5
+ from pydantic import BaseModel
6
+
7
+ from klaude_code.core.tool.tool_abc import ToolABC, load_desc
8
+ from klaude_code.core.tool.tool_registry import register
9
+ from klaude_code.protocol import llm_param, model, tools
10
+
11
+ DEFAULT_MAX_RESULTS = 10
12
+ MAX_RESULTS_LIMIT = 20
13
+
14
+
15
+ @dataclass
16
+ class SearchResult:
17
+ """A single search result from DuckDuckGo."""
18
+
19
+ title: str
20
+ url: str
21
+ snippet: str
22
+ position: int
23
+
24
+
25
+ def _search_duckduckgo(query: str, max_results: int) -> list[SearchResult]:
26
+ """Perform a web search using ddgs library."""
27
+ from ddgs import DDGS # type: ignore
28
+
29
+ results: list[SearchResult] = []
30
+
31
+ with DDGS() as ddgs:
32
+ for i, r in enumerate(ddgs.text(query, max_results=max_results)):
33
+ results.append(
34
+ SearchResult(
35
+ title=r.get("title", ""),
36
+ url=r.get("href", ""),
37
+ snippet=r.get("body", ""),
38
+ position=i + 1,
39
+ )
40
+ )
41
+
42
+ return results
43
+
44
+
45
+ def _format_results(results: list[SearchResult]) -> str:
46
+ """Format search results for LLM consumption."""
47
+ if not results:
48
+ return (
49
+ "No results were found for your search query. "
50
+ "Please try rephrasing your search or using different keywords."
51
+ )
52
+
53
+ lines = [f"Found {len(results)} search results:\n"]
54
+
55
+ for result in results:
56
+ lines.append(f"{result.position}. {result.title}")
57
+ lines.append(f" URL: {result.url}")
58
+ lines.append(f" Summary: {result.snippet}\n")
59
+
60
+ return "\n".join(lines)
61
+
62
+
63
+ @register(tools.WEB_SEARCH)
64
+ class WebSearchTool(ToolABC):
65
+ @classmethod
66
+ def schema(cls) -> llm_param.ToolSchema:
67
+ return llm_param.ToolSchema(
68
+ name=tools.WEB_SEARCH,
69
+ type="function",
70
+ description=load_desc(Path(__file__).parent / "web_search_tool.md"),
71
+ parameters={
72
+ "type": "object",
73
+ "properties": {
74
+ "query": {
75
+ "type": "string",
76
+ "description": "The search query to use",
77
+ },
78
+ "max_results": {
79
+ "type": "integer",
80
+ "description": f"Maximum number of results to return (default: {DEFAULT_MAX_RESULTS}, max: {MAX_RESULTS_LIMIT})",
81
+ },
82
+ },
83
+ "required": ["query"],
84
+ },
85
+ )
86
+
87
+ class WebSearchArguments(BaseModel):
88
+ query: str
89
+ max_results: int = DEFAULT_MAX_RESULTS
90
+
91
+ @classmethod
92
+ async def call(cls, arguments: str) -> model.ToolResultItem:
93
+ try:
94
+ args = WebSearchTool.WebSearchArguments.model_validate_json(arguments)
95
+ except ValueError as e:
96
+ return model.ToolResultItem(
97
+ status="error",
98
+ output=f"Invalid arguments: {e}",
99
+ )
100
+ return await cls.call_with_args(args)
101
+
102
+ @classmethod
103
+ async def call_with_args(cls, args: WebSearchArguments) -> model.ToolResultItem:
104
+ query = args.query.strip()
105
+ if not query:
106
+ return model.ToolResultItem(
107
+ status="error",
108
+ output="Query cannot be empty",
109
+ )
110
+
111
+ max_results = min(max(args.max_results, 1), MAX_RESULTS_LIMIT)
112
+
113
+ try:
114
+ results = await asyncio.to_thread(_search_duckduckgo, query, max_results)
115
+ formatted = _format_results(results)
116
+
117
+ return model.ToolResultItem(
118
+ status="success",
119
+ output=formatted,
120
+ )
121
+
122
+ except Exception as e:
123
+ return model.ToolResultItem(
124
+ status="error",
125
+ output=f"Search failed: {e}",
126
+ )
klaude_code/core/turn.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  from collections.abc import AsyncGenerator
4
- from dataclasses import dataclass
4
+ from dataclasses import dataclass, field
5
5
  from typing import TYPE_CHECKING
6
6
 
7
7
  from klaude_code.core.tool import ToolABC, tool_context
@@ -17,7 +17,7 @@ from klaude_code.core.tool.tool_runner import (
17
17
  ToolExecutorEvent,
18
18
  )
19
19
  from klaude_code.llm import LLMClientABC
20
- from klaude_code.protocol import events, llm_param, model
20
+ from klaude_code.protocol import events, llm_param, model, tools
21
21
  from klaude_code.trace import DebugType, log_debug
22
22
 
23
23
 
@@ -46,6 +46,7 @@ class TurnResult:
46
46
  assistant_message: model.AssistantMessageItem | None
47
47
  tool_calls: list[model.ToolCallItem]
48
48
  stream_error: model.StreamErrorItem | None
49
+ report_back_result: str | None = field(default=None)
49
50
 
50
51
 
51
52
  def build_events_from_tool_executor_event(session_id: str, event: ToolExecutorEvent) -> list[events.Event]:
@@ -101,8 +102,38 @@ class TurnExecutor:
101
102
  self._turn_result: TurnResult | None = None
102
103
 
103
104
  @property
104
- def has_tool_call(self) -> bool:
105
- return bool(self._turn_result and self._turn_result.tool_calls)
105
+ def report_back_result(self) -> str | None:
106
+ return self._turn_result.report_back_result if self._turn_result else None
107
+
108
+ @property
109
+ def task_finished(self) -> bool:
110
+ """Check if this turn indicates the task should end.
111
+
112
+ Task ends when there are no tool calls or report_back was called.
113
+ """
114
+ if self._turn_result is None:
115
+ return True
116
+ if not self._turn_result.tool_calls:
117
+ return True
118
+ return self._turn_result.report_back_result is not None
119
+
120
+ @property
121
+ def task_result(self) -> str:
122
+ """Get the task result from this turn.
123
+
124
+ Returns report_back result if available, otherwise returns
125
+ the assistant message content.
126
+ """
127
+ if self._turn_result is not None and self._turn_result.report_back_result is not None:
128
+ return self._turn_result.report_back_result
129
+ if self._turn_result is not None and self._turn_result.assistant_message is not None:
130
+ return self._turn_result.assistant_message.content or ""
131
+ return ""
132
+
133
+ @property
134
+ def has_structured_output(self) -> bool:
135
+ """Check if the task result is structured output from report_back."""
136
+ return bool(self._turn_result and self._turn_result.report_back_result)
106
137
 
107
138
  def cancel(self) -> list[events.Event]:
108
139
  """Cancel running tools and return any resulting events."""
@@ -143,11 +174,21 @@ class TurnExecutor:
143
174
  self._append_success_history(self._turn_result)
144
175
 
145
176
  if self._turn_result.tool_calls:
177
+ # Check for report_back before running tools
178
+ self._detect_report_back(self._turn_result)
179
+
146
180
  async for ui_event in self._run_tool_executor(self._turn_result.tool_calls):
147
181
  yield ui_event
148
182
 
149
183
  yield events.TurnEndEvent(session_id=session_ctx.session_id)
150
184
 
185
+ def _detect_report_back(self, turn_result: TurnResult) -> None:
186
+ """Detect report_back tool call and store its arguments as JSON string."""
187
+ for tool_call in turn_result.tool_calls:
188
+ if tool_call.name == tools.REPORT_BACK:
189
+ turn_result.report_back_result = tool_call.arguments
190
+ break
191
+
151
192
  async def _consume_llm_stream(self, turn_result: TurnResult) -> AsyncGenerator[events.Event]:
152
193
  """Stream events from LLM and update turn_result in place."""
153
194
 
@@ -104,18 +104,22 @@ def _tool_groups_to_message(groups: list[ToolGroup]) -> BetaMessageParam:
104
104
  def _assistant_group_to_message(group: AssistantGroup, model_name: str | None) -> BetaMessageParam:
105
105
  content: list[dict[str, object]] = []
106
106
  current_reasoning_content: str | None = None
107
+ degraded_thinking_texts: list[str] = []
107
108
 
108
109
  # Process reasoning items in original order so that text and
109
110
  # encrypted parts are paired correctly for the given model.
111
+ # For cross-model scenarios, degrade thinking to plain text.
110
112
  for item in group.reasoning_items:
111
113
  if isinstance(item, model.ReasoningTextItem):
112
114
  if model_name != item.model:
113
- continue
114
- current_reasoning_content = item.content
115
+ # Cross-model: collect thinking text for degradation
116
+ if item.content:
117
+ degraded_thinking_texts.append(item.content)
118
+ else:
119
+ current_reasoning_content = item.content
115
120
  else:
116
- if model_name != item.model:
117
- continue
118
- if item.encrypted_content and len(item.encrypted_content) > 0:
121
+ # Same model: preserve signature
122
+ if model_name == item.model and item.encrypted_content and len(item.encrypted_content) > 0:
119
123
  content.append(
120
124
  {
121
125
  "type": "thinking",
@@ -131,6 +135,11 @@ def _assistant_group_to_message(group: AssistantGroup, model_name: str | None) -
131
135
  if len(current_reasoning_content or "") > 0:
132
136
  content.insert(0, {"type": "thinking", "thinking": current_reasoning_content})
133
137
 
138
+ # Cross-model: degrade thinking to plain text with <thinking> tags
139
+ if degraded_thinking_texts:
140
+ degraded_text = "<thinking>\n" + "\n".join(degraded_thinking_texts) + "\n</thinking>"
141
+ content.insert(0, {"type": "text", "text": degraded_text})
142
+
134
143
  if group.text_content:
135
144
  content.append({"type": "text", "text": group.text_content})
136
145
 
@@ -28,9 +28,6 @@ def is_gemini_model(model_name: str | None) -> bool:
28
28
  def _assistant_group_to_message(group: AssistantGroup, model_name: str | None) -> chat.ChatCompletionMessageParam:
29
29
  assistant_message: dict[str, object] = {"role": "assistant"}
30
30
 
31
- if group.text_content:
32
- assistant_message["content"] = group.text_content
33
-
34
31
  if group.tool_calls:
35
32
  assistant_message["tool_calls"] = [
36
33
  {
@@ -48,9 +45,14 @@ def _assistant_group_to_message(group: AssistantGroup, model_name: str | None) -
48
45
  # The order of items in reasoning_details must match the original
49
46
  # stream order from the provider, so we iterate reasoning_items
50
47
  # instead of the separated reasoning_text / reasoning_encrypted lists.
48
+ # For cross-model scenarios, degrade thinking to plain text.
51
49
  reasoning_details: list[dict[str, object]] = []
50
+ degraded_thinking_texts: list[str] = []
52
51
  for item in group.reasoning_items:
53
52
  if model_name != item.model:
53
+ # Cross-model: collect thinking text for degradation
54
+ if isinstance(item, model.ReasoningTextItem) and item.content:
55
+ degraded_thinking_texts.append(item.content)
54
56
  continue
55
57
  if isinstance(item, model.ReasoningEncryptedItem):
56
58
  if item.encrypted_content and len(item.encrypted_content) > 0:
@@ -75,6 +77,15 @@ def _assistant_group_to_message(group: AssistantGroup, model_name: str | None) -
75
77
  if reasoning_details:
76
78
  assistant_message["reasoning_details"] = reasoning_details
77
79
 
80
+ # Build content with optional degraded thinking prefix
81
+ content_parts: list[str] = []
82
+ if degraded_thinking_texts:
83
+ content_parts.append("<thinking>\n" + "\n".join(degraded_thinking_texts) + "\n</thinking>")
84
+ if group.text_content:
85
+ content_parts.append(group.text_content)
86
+ if content_parts:
87
+ assistant_message["content"] = "\n".join(content_parts)
88
+
78
89
  return assistant_message
79
90
 
80
91
 
@@ -1,5 +1,6 @@
1
1
  # pyright: reportReturnType=false
2
2
  # pyright: reportArgumentType=false
3
+ # pyright: reportAssignmentType=false
3
4
 
4
5
  from typing import Any
5
6
 
@@ -51,6 +52,7 @@ def convert_history_to_input(
51
52
  items: list[responses.ResponseInputItemParam] = []
52
53
 
53
54
  pending_reasoning_text: str | None = None
55
+ degraded_thinking_texts: list[str] = []
54
56
 
55
57
  for item in history:
56
58
  match item:
@@ -60,6 +62,9 @@ def convert_history_to_input(
60
62
  # or we can choose to output it if the next item is NOT reasoning?
61
63
  # For now, based on instructions, we pair them.
62
64
  if model_name != item.model:
65
+ # Cross-model: collect thinking text for degradation
66
+ if item.content:
67
+ degraded_thinking_texts.append(item.content)
63
68
  continue
64
69
  pending_reasoning_text = item.content
65
70
 
@@ -130,6 +135,20 @@ def convert_history_to_input(
130
135
  # Other items may be Metadata
131
136
  continue
132
137
 
138
+ # Cross-model: degrade thinking to plain text with <thinking> tags
139
+ if degraded_thinking_texts:
140
+ degraded_item: responses.ResponseInputItemParam = {
141
+ "type": "message",
142
+ "role": "assistant",
143
+ "content": [
144
+ {
145
+ "type": "output_text",
146
+ "text": "<thinking>\n" + "\n".join(degraded_thinking_texts) + "\n</thinking>",
147
+ }
148
+ ],
149
+ }
150
+ items.insert(0, degraded_item)
151
+
133
152
  return items
134
153
 
135
154
 
@@ -11,6 +11,7 @@ class CommandName(str, Enum):
11
11
  CLEAR = "clear"
12
12
  TERMINAL_SETUP = "terminal-setup"
13
13
  EXPORT = "export"
14
+ EXPORT_ONLINE = "export-online"
14
15
  STATUS = "status"
15
16
  RELEASE_NOTES = "release-notes"
16
17
  THINKING = "thinking"
@@ -26,6 +26,7 @@ class TaskStartEvent(BaseModel):
26
26
  class TaskFinishEvent(BaseModel):
27
27
  session_id: str
28
28
  task_result: str
29
+ has_structured_output: bool = False
29
30
 
30
31
 
31
32
  class TurnStartEvent(BaseModel):
@@ -132,6 +133,13 @@ class TodoChangeEvent(BaseModel):
132
133
  todos: list[model.TodoItem]
133
134
 
134
135
 
136
+ class ContextUsageEvent(BaseModel):
137
+ """Real-time context usage update during task execution."""
138
+
139
+ session_id: str
140
+ context_percent: float # Context usage percentage (0-100)
141
+
142
+
135
143
  HistoryItemEvent = (
136
144
  ThinkingEvent
137
145
  | TaskStartEvent
@@ -177,4 +185,5 @@ Event = (
177
185
  | TurnStartEvent
178
186
  | TurnEndEvent
179
187
  | TurnToolCallStartEvent
188
+ | ContextUsageEvent
180
189
  )
@@ -1,6 +1,6 @@
1
1
  from datetime import datetime
2
2
  from enum import Enum
3
- from typing import Annotated, Literal
3
+ from typing import Annotated, Any, Literal
4
4
 
5
5
  from pydantic import BaseModel, ConfigDict, Field, computed_field
6
6
 
@@ -132,6 +132,7 @@ class AtPatternParseResult(BaseModel):
132
132
  tool_args: str
133
133
  operation: Literal["Read", "List"]
134
134
  images: list["ImageURLPart"] | None = None
135
+ mentioned_in: str | None = None # Parent file that referenced this file
135
136
 
136
137
 
137
138
  class CommandOutput(BaseModel):
@@ -144,6 +145,7 @@ class SubAgentState(BaseModel):
144
145
  sub_agent_type: SubAgentType
145
146
  sub_agent_desc: str
146
147
  sub_agent_prompt: str
148
+ output_schema: dict[str, Any] | None = None
147
149
 
148
150
 
149
151
  """
@@ -327,6 +329,26 @@ class TaskMetadata(BaseModel):
327
329
  task_duration_s: float | None = None
328
330
  turn_count: int = 0
329
331
 
332
+ @staticmethod
333
+ def merge_usage(dst: Usage, src: Usage) -> None:
334
+ """Merge src usage into dst usage (in-place).
335
+
336
+ Accumulates token counts and cost components. Does not handle
337
+ special fields like throughput_tps, first_token_latency_ms,
338
+ context_size, or context_limit - those require custom logic.
339
+ """
340
+ dst.input_tokens += src.input_tokens
341
+ dst.cached_tokens += src.cached_tokens
342
+ dst.reasoning_tokens += src.reasoning_tokens
343
+ dst.output_tokens += src.output_tokens
344
+
345
+ if src.input_cost is not None:
346
+ dst.input_cost = (dst.input_cost or 0.0) + src.input_cost
347
+ if src.output_cost is not None:
348
+ dst.output_cost = (dst.output_cost or 0.0) + src.output_cost
349
+ if src.cache_read_cost is not None:
350
+ dst.cache_read_cost = (dst.cache_read_cost or 0.0) + src.cache_read_cost
351
+
330
352
  @staticmethod
331
353
  def aggregate_by_model(metadata_list: list["TaskMetadata"]) -> list["TaskMetadata"]:
332
354
  """Aggregate multiple TaskMetadata by (model_name, provider).
@@ -356,19 +378,7 @@ class TaskMetadata(BaseModel):
356
378
  if agg.usage is None:
357
379
  continue
358
380
 
359
- # Accumulate primary token fields (total_tokens is computed)
360
- agg.usage.input_tokens += usage.input_tokens
361
- agg.usage.cached_tokens += usage.cached_tokens
362
- agg.usage.reasoning_tokens += usage.reasoning_tokens
363
- agg.usage.output_tokens += usage.output_tokens
364
-
365
- # Accumulate cost components (total_cost is computed)
366
- if usage.input_cost is not None:
367
- agg.usage.input_cost = (agg.usage.input_cost or 0.0) + usage.input_cost
368
- if usage.output_cost is not None:
369
- agg.usage.output_cost = (agg.usage.output_cost or 0.0) + usage.output_cost
370
- if usage.cache_read_cost is not None:
371
- agg.usage.cache_read_cost = (agg.usage.cache_read_cost or 0.0) + usage.cache_read_cost
381
+ TaskMetadata.merge_usage(agg.usage, usage)
372
382
 
373
383
  # Sort by total_cost descending
374
384
  return sorted(
@@ -0,0 +1,117 @@
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import Callable
4
+ from dataclasses import dataclass, field
5
+ from typing import TYPE_CHECKING, Any
6
+
7
+ from klaude_code.protocol import tools
8
+
9
+ if TYPE_CHECKING:
10
+ from klaude_code.protocol import model
11
+
12
+ AvailabilityPredicate = Callable[[str], bool]
13
+ PromptBuilder = Callable[[dict[str, Any]], str]
14
+
15
+
16
+ @dataclass
17
+ class SubAgentResult:
18
+ task_result: str
19
+ session_id: str
20
+ error: bool = False
21
+ task_metadata: model.TaskMetadata | None = None
22
+
23
+
24
+ def _default_prompt_builder(args: dict[str, Any]) -> str:
25
+ """Default prompt builder that just returns the 'prompt' field."""
26
+ return args.get("prompt", "")
27
+
28
+
29
+ @dataclass(frozen=True)
30
+ class SubAgentProfile:
31
+ """Metadata describing a sub agent and how it integrates with the system.
32
+
33
+ This dataclass contains all the information needed to:
34
+ 1. Register the sub agent with the system
35
+ 2. Generate the tool schema for the main agent
36
+ 3. Build the prompt for the sub agent
37
+ """
38
+
39
+ # Identity - single name used for type, tool_name, config_key, and prompt_key
40
+ name: str # e.g., "Task", "Oracle", "Explore"
41
+
42
+ # Tool schema
43
+ description: str # Tool description shown to the main agent
44
+ parameters: dict[str, Any] = field(
45
+ default_factory=lambda: dict[str, Any](), hash=False
46
+ ) # JSON Schema for tool parameters
47
+
48
+ # System prompt
49
+ prompt_file: str = "" # Resource file path relative to core package (e.g., "prompts/prompt-sub-agent.md")
50
+
51
+ # Sub agent configuration
52
+ tool_set: tuple[str, ...] = () # Tools available to this sub agent
53
+ prompt_builder: PromptBuilder = _default_prompt_builder # Builds the sub agent prompt from tool arguments
54
+
55
+ # UI display
56
+ active_form: str = "" # Active form for spinner status (e.g., "Tasking", "Exploring")
57
+
58
+ # Availability
59
+ enabled_by_default: bool = True
60
+ show_in_main_agent: bool = True
61
+ target_model_filter: AvailabilityPredicate | None = None
62
+
63
+ # Structured output support: specifies which parameter in the tool schema contains the output schema
64
+ output_schema_arg: str | None = None
65
+
66
+ def enabled_for_model(self, model_name: str | None) -> bool:
67
+ if not self.enabled_by_default:
68
+ return False
69
+ if model_name is None or self.target_model_filter is None:
70
+ return True
71
+ return self.target_model_filter(model_name)
72
+
73
+
74
+ _PROFILES: dict[str, SubAgentProfile] = {}
75
+
76
+
77
+ def register_sub_agent(profile: SubAgentProfile) -> None:
78
+ if profile.name in _PROFILES:
79
+ raise ValueError(f"Duplicate sub agent profile: {profile.name}")
80
+ _PROFILES[profile.name] = profile
81
+
82
+
83
+ def get_sub_agent_profile(sub_agent_type: tools.SubAgentType) -> SubAgentProfile:
84
+ try:
85
+ return _PROFILES[sub_agent_type]
86
+ except KeyError as exc:
87
+ raise KeyError(f"Unknown sub agent type: {sub_agent_type}") from exc
88
+
89
+
90
+ def iter_sub_agent_profiles(enabled_only: bool = False, model_name: str | None = None) -> list[SubAgentProfile]:
91
+ profiles = list(_PROFILES.values())
92
+ if not enabled_only:
93
+ return profiles
94
+ return [p for p in profiles if p.enabled_for_model(model_name)]
95
+
96
+
97
+ def get_sub_agent_profile_by_tool(tool_name: str) -> SubAgentProfile | None:
98
+ return _PROFILES.get(tool_name)
99
+
100
+
101
+ def is_sub_agent_tool(tool_name: str) -> bool:
102
+ return tool_name in _PROFILES
103
+
104
+
105
+ def sub_agent_tool_names(enabled_only: bool = False, model_name: str | None = None) -> list[str]:
106
+ return [
107
+ profile.name
108
+ for profile in iter_sub_agent_profiles(enabled_only=enabled_only, model_name=model_name)
109
+ if profile.show_in_main_agent
110
+ ]
111
+
112
+
113
+ # Import sub-agent modules to trigger registration
114
+ from klaude_code.protocol.sub_agent import explore as explore # noqa: E402
115
+ from klaude_code.protocol.sub_agent import oracle as oracle # noqa: E402
116
+ from klaude_code.protocol.sub_agent import task as task # noqa: E402
117
+ from klaude_code.protocol.sub_agent import web as web # noqa: E402
@@ -0,0 +1,63 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Any
4
+
5
+ from klaude_code.protocol import tools
6
+ from klaude_code.protocol.sub_agent import SubAgentProfile, register_sub_agent
7
+
8
+ EXPLORE_DESCRIPTION = """\
9
+ Spin up a fast agent specialized for exploring codebases. Use this when you need to quickly find files by patterns (eg. "src/components/**/*.tsx"), \
10
+ search code for keywords (eg. "API endpoints"), or answer questions about the codebase (eg. "how do API endpoints work?")\
11
+ When calling this agent, specify the desired thoroughness level: "quick" for basic searches, "medium" for moderate exploration, or "very thorough" for comprehensive analysis across multiple locations and naming conventions.
12
+ Always spawn multiple search agents in parallel to maximise speed.
13
+
14
+ Structured output:
15
+ - Provide an `output_format` (JSON Schema) parameter for structured data back from the sub-agent
16
+ - Example: `output_format={"type": "object", "properties": {"files": {"type": "array", "items": {"type": "string"}, "description": "List of file paths that match the search criteria, e.g. ['src/main.py', 'src/utils/helper.py']"}}, "required": ["files"]}`\
17
+ """
18
+
19
+ EXPLORE_PARAMETERS = {
20
+ "type": "object",
21
+ "properties": {
22
+ "description": {
23
+ "type": "string",
24
+ "description": "Short (3-5 words) label for the exploration goal",
25
+ },
26
+ "prompt": {
27
+ "type": "string",
28
+ "description": "The task for the agent to perform",
29
+ },
30
+ "thoroughness": {
31
+ "type": "string",
32
+ "enum": ["quick", "medium", "very thorough"],
33
+ "description": "Controls how deep the sub-agent should search the repo",
34
+ },
35
+ "output_format": {
36
+ "type": "object",
37
+ "description": "Optional JSON Schema for sub-agent structured output",
38
+ },
39
+ },
40
+ "required": ["description", "prompt"],
41
+ "additionalProperties": False,
42
+ }
43
+
44
+
45
+ def _explore_prompt_builder(args: dict[str, Any]) -> str:
46
+ """Build the Explore prompt from tool arguments."""
47
+ prompt = args.get("prompt", "").strip()
48
+ thoroughness = args.get("thoroughness", "medium")
49
+ return f"{prompt}\nthoroughness: {thoroughness}"
50
+
51
+
52
+ register_sub_agent(
53
+ SubAgentProfile(
54
+ name="Explore",
55
+ description=EXPLORE_DESCRIPTION,
56
+ parameters=EXPLORE_PARAMETERS,
57
+ prompt_file="prompts/prompt-sub-agent-explore.md",
58
+ tool_set=(tools.BASH, tools.READ),
59
+ prompt_builder=_explore_prompt_builder,
60
+ active_form="Exploring",
61
+ output_schema_arg="output_format",
62
+ )
63
+ )