klaude-code 1.2.1__py3-none-any.whl → 1.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (140) hide show
  1. klaude_code/cli/main.py +9 -4
  2. klaude_code/cli/runtime.py +42 -43
  3. klaude_code/command/__init__.py +7 -5
  4. klaude_code/command/clear_cmd.py +6 -29
  5. klaude_code/command/command_abc.py +44 -8
  6. klaude_code/command/diff_cmd.py +33 -27
  7. klaude_code/command/export_cmd.py +18 -26
  8. klaude_code/command/help_cmd.py +10 -8
  9. klaude_code/command/model_cmd.py +11 -40
  10. klaude_code/command/{prompt-update-dev-doc.md → prompt-dev-docs-update.md} +3 -2
  11. klaude_code/command/{prompt-dev-doc.md → prompt-dev-docs.md} +3 -2
  12. klaude_code/command/prompt-init.md +2 -5
  13. klaude_code/command/prompt_command.py +6 -6
  14. klaude_code/command/refresh_cmd.py +4 -5
  15. klaude_code/command/registry.py +16 -19
  16. klaude_code/command/terminal_setup_cmd.py +12 -11
  17. klaude_code/config/__init__.py +4 -0
  18. klaude_code/config/config.py +25 -26
  19. klaude_code/config/list_model.py +8 -3
  20. klaude_code/config/select_model.py +1 -1
  21. klaude_code/const/__init__.py +1 -1
  22. klaude_code/core/__init__.py +0 -3
  23. klaude_code/core/agent.py +25 -50
  24. klaude_code/core/executor.py +268 -101
  25. klaude_code/core/prompt.py +12 -12
  26. klaude_code/core/{prompt → prompts}/prompt-gemini.md +1 -1
  27. klaude_code/core/reminders.py +76 -95
  28. klaude_code/core/task.py +21 -14
  29. klaude_code/core/tool/__init__.py +45 -11
  30. klaude_code/core/tool/file/apply_patch.py +5 -1
  31. klaude_code/core/tool/file/apply_patch_tool.py +11 -13
  32. klaude_code/core/tool/file/edit_tool.py +27 -23
  33. klaude_code/core/tool/file/multi_edit_tool.py +15 -17
  34. klaude_code/core/tool/file/read_tool.py +41 -36
  35. klaude_code/core/tool/file/write_tool.py +13 -15
  36. klaude_code/core/tool/memory/memory_tool.py +85 -68
  37. klaude_code/core/tool/memory/skill_tool.py +10 -12
  38. klaude_code/core/tool/shell/bash_tool.py +24 -22
  39. klaude_code/core/tool/shell/command_safety.py +12 -1
  40. klaude_code/core/tool/sub_agent_tool.py +11 -12
  41. klaude_code/core/tool/todo/todo_write_tool.py +21 -28
  42. klaude_code/core/tool/todo/update_plan_tool.py +14 -24
  43. klaude_code/core/tool/tool_abc.py +3 -4
  44. klaude_code/core/tool/tool_context.py +7 -7
  45. klaude_code/core/tool/tool_registry.py +30 -47
  46. klaude_code/core/tool/tool_runner.py +35 -43
  47. klaude_code/core/tool/truncation.py +14 -20
  48. klaude_code/core/tool/web/mermaid_tool.py +12 -14
  49. klaude_code/core/tool/web/web_fetch_tool.py +15 -17
  50. klaude_code/core/turn.py +19 -7
  51. klaude_code/llm/__init__.py +3 -4
  52. klaude_code/llm/anthropic/client.py +30 -46
  53. klaude_code/llm/anthropic/input.py +4 -11
  54. klaude_code/llm/client.py +29 -8
  55. klaude_code/llm/input_common.py +66 -36
  56. klaude_code/llm/openai_compatible/client.py +42 -84
  57. klaude_code/llm/openai_compatible/input.py +11 -16
  58. klaude_code/llm/openai_compatible/tool_call_accumulator.py +2 -2
  59. klaude_code/llm/openrouter/client.py +40 -289
  60. klaude_code/llm/openrouter/input.py +13 -35
  61. klaude_code/llm/openrouter/reasoning_handler.py +209 -0
  62. klaude_code/llm/registry.py +5 -75
  63. klaude_code/llm/responses/client.py +34 -55
  64. klaude_code/llm/responses/input.py +24 -26
  65. klaude_code/llm/usage.py +109 -0
  66. klaude_code/protocol/__init__.py +4 -0
  67. klaude_code/protocol/events.py +3 -2
  68. klaude_code/protocol/{llm_parameter.py → llm_param.py} +12 -32
  69. klaude_code/protocol/model.py +49 -4
  70. klaude_code/protocol/op.py +18 -16
  71. klaude_code/protocol/op_handler.py +28 -0
  72. klaude_code/{core → protocol}/sub_agent.py +7 -0
  73. klaude_code/session/export.py +150 -70
  74. klaude_code/session/session.py +28 -14
  75. klaude_code/session/templates/export_session.html +180 -42
  76. klaude_code/trace/__init__.py +2 -2
  77. klaude_code/trace/log.py +11 -5
  78. klaude_code/ui/__init__.py +91 -8
  79. klaude_code/ui/core/__init__.py +1 -0
  80. klaude_code/ui/core/display.py +103 -0
  81. klaude_code/ui/core/input.py +71 -0
  82. klaude_code/ui/modes/__init__.py +1 -0
  83. klaude_code/ui/modes/debug/__init__.py +1 -0
  84. klaude_code/ui/{base/debug_event_display.py → modes/debug/display.py} +9 -5
  85. klaude_code/ui/modes/exec/__init__.py +1 -0
  86. klaude_code/ui/{base/exec_display.py → modes/exec/display.py} +28 -2
  87. klaude_code/ui/{repl → modes/repl}/__init__.py +5 -6
  88. klaude_code/ui/modes/repl/clipboard.py +152 -0
  89. klaude_code/ui/modes/repl/completers.py +429 -0
  90. klaude_code/ui/modes/repl/display.py +60 -0
  91. klaude_code/ui/modes/repl/event_handler.py +375 -0
  92. klaude_code/ui/modes/repl/input_prompt_toolkit.py +198 -0
  93. klaude_code/ui/modes/repl/key_bindings.py +170 -0
  94. klaude_code/ui/{repl → modes/repl}/renderer.py +109 -132
  95. klaude_code/ui/renderers/assistant.py +21 -0
  96. klaude_code/ui/renderers/common.py +0 -16
  97. klaude_code/ui/renderers/developer.py +18 -18
  98. klaude_code/ui/renderers/diffs.py +36 -14
  99. klaude_code/ui/renderers/errors.py +1 -1
  100. klaude_code/ui/renderers/metadata.py +50 -27
  101. klaude_code/ui/renderers/sub_agent.py +43 -9
  102. klaude_code/ui/renderers/thinking.py +33 -1
  103. klaude_code/ui/renderers/tools.py +212 -20
  104. klaude_code/ui/renderers/user_input.py +19 -23
  105. klaude_code/ui/rich/__init__.py +1 -0
  106. klaude_code/ui/{rich_ext → rich}/searchable_text.py +3 -1
  107. klaude_code/ui/{renderers → rich}/status.py +29 -18
  108. klaude_code/ui/{base → rich}/theme.py +8 -2
  109. klaude_code/ui/terminal/__init__.py +1 -0
  110. klaude_code/ui/{base/terminal_color.py → terminal/color.py} +4 -1
  111. klaude_code/ui/{base/terminal_control.py → terminal/control.py} +1 -0
  112. klaude_code/ui/{base/terminal_notifier.py → terminal/notifier.py} +5 -2
  113. klaude_code/ui/utils/__init__.py +1 -0
  114. klaude_code/ui/{base/utils.py → utils/common.py} +35 -3
  115. {klaude_code-1.2.1.dist-info → klaude_code-1.2.3.dist-info}/METADATA +1 -1
  116. klaude_code-1.2.3.dist-info/RECORD +161 -0
  117. klaude_code/core/clipboard_manifest.py +0 -124
  118. klaude_code/llm/openrouter/tool_call_accumulator.py +0 -80
  119. klaude_code/ui/base/__init__.py +0 -1
  120. klaude_code/ui/base/display_abc.py +0 -36
  121. klaude_code/ui/base/input_abc.py +0 -20
  122. klaude_code/ui/repl/display.py +0 -36
  123. klaude_code/ui/repl/event_handler.py +0 -247
  124. klaude_code/ui/repl/input.py +0 -773
  125. klaude_code/ui/rich_ext/__init__.py +0 -1
  126. klaude_code-1.2.1.dist-info/RECORD +0 -151
  127. /klaude_code/core/{prompt → prompts}/prompt-claude-code.md +0 -0
  128. /klaude_code/core/{prompt → prompts}/prompt-codex.md +0 -0
  129. /klaude_code/core/{prompt → prompts}/prompt-subagent-explore.md +0 -0
  130. /klaude_code/core/{prompt → prompts}/prompt-subagent-oracle.md +0 -0
  131. /klaude_code/core/{prompt → prompts}/prompt-subagent-webfetch.md +0 -0
  132. /klaude_code/core/{prompt → prompts}/prompt-subagent.md +0 -0
  133. /klaude_code/ui/{base → core}/stage_manager.py +0 -0
  134. /klaude_code/ui/{rich_ext → rich}/live.py +0 -0
  135. /klaude_code/ui/{rich_ext → rich}/markdown.py +0 -0
  136. /klaude_code/ui/{rich_ext → rich}/quote.py +0 -0
  137. /klaude_code/ui/{base → terminal}/progress_bar.py +0 -0
  138. /klaude_code/ui/{base → utils}/debouncer.py +0 -0
  139. {klaude_code-1.2.1.dist-info → klaude_code-1.2.3.dist-info}/WHEEL +0 -0
  140. {klaude_code-1.2.1.dist-info → klaude_code-1.2.3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,109 @@
1
+ import time
2
+
3
+ import openai.types
4
+
5
+ from klaude_code.protocol import llm_param, model
6
+
7
+
8
+ def calculate_cost(usage: model.Usage, cost_config: llm_param.Cost | None) -> None:
9
+ """Calculate and set cost fields on usage based on cost configuration.
10
+
11
+ Note: input_tokens includes cached_tokens, so we need to subtract cached_tokens
12
+ to get the actual non-cached input tokens for cost calculation.
13
+ """
14
+ if cost_config is None:
15
+ return
16
+
17
+ # Non-cached input tokens cost
18
+ non_cached_input = usage.input_tokens - usage.cached_tokens
19
+ usage.input_cost = (non_cached_input / 1_000_000) * cost_config.input
20
+
21
+ # Output tokens cost (includes reasoning tokens)
22
+ usage.output_cost = (usage.output_tokens / 1_000_000) * cost_config.output
23
+
24
+ # Cache read cost
25
+ usage.cache_read_cost = (usage.cached_tokens / 1_000_000) * cost_config.cache_read
26
+
27
+ # Total cost
28
+ usage.total_cost = usage.input_cost + usage.output_cost + usage.cache_read_cost
29
+
30
+
31
+ class MetadataTracker:
32
+ """Tracks timing and metadata for LLM responses."""
33
+
34
+ def __init__(self, cost_config: llm_param.Cost | None = None) -> None:
35
+ self._request_start_time: float = time.time()
36
+ self._first_token_time: float | None = None
37
+ self._last_token_time: float | None = None
38
+ self._metadata_item = model.ResponseMetadataItem()
39
+ self._cost_config = cost_config
40
+
41
+ @property
42
+ def metadata_item(self) -> model.ResponseMetadataItem:
43
+ return self._metadata_item
44
+
45
+ @property
46
+ def first_token_time(self) -> float | None:
47
+ return self._first_token_time
48
+
49
+ @property
50
+ def last_token_time(self) -> float | None:
51
+ return self._last_token_time
52
+
53
+ def record_token(self) -> None:
54
+ """Record a token arrival, updating first/last token times."""
55
+ now = time.time()
56
+ if self._first_token_time is None:
57
+ self._first_token_time = now
58
+ self._last_token_time = now
59
+
60
+ def set_usage(self, usage: model.Usage) -> None:
61
+ """Set the usage information."""
62
+ self._metadata_item.usage = usage
63
+
64
+ def set_model_name(self, model_name: str) -> None:
65
+ """Set the model name."""
66
+ self._metadata_item.model_name = model_name
67
+
68
+ def set_provider(self, provider: str) -> None:
69
+ """Set the provider name."""
70
+ self._metadata_item.provider = provider
71
+
72
+ def set_response_id(self, response_id: str | None) -> None:
73
+ """Set the response ID."""
74
+ self._metadata_item.response_id = response_id
75
+
76
+ def finalize(self) -> model.ResponseMetadataItem:
77
+ """Finalize and return the metadata item with calculated performance metrics."""
78
+ if self._metadata_item.usage and self._first_token_time is not None:
79
+ self._metadata_item.usage.first_token_latency_ms = (
80
+ self._first_token_time - self._request_start_time
81
+ ) * 1000
82
+
83
+ if self._last_token_time is not None and self._metadata_item.usage.output_tokens > 0:
84
+ time_duration = self._last_token_time - self._first_token_time
85
+ if time_duration >= 0.15:
86
+ self._metadata_item.usage.throughput_tps = self._metadata_item.usage.output_tokens / time_duration
87
+
88
+ # Calculate cost if config is available
89
+ if self._metadata_item.usage:
90
+ calculate_cost(self._metadata_item.usage, self._cost_config)
91
+
92
+ return self._metadata_item
93
+
94
+
95
+ def convert_usage(usage: openai.types.CompletionUsage, context_limit: int | None = None) -> model.Usage:
96
+ """Convert OpenAI CompletionUsage to internal Usage model."""
97
+ total_tokens = usage.total_tokens
98
+ context_usage_percent = (total_tokens / context_limit) * 100 if context_limit else None
99
+ return model.Usage(
100
+ input_tokens=usage.prompt_tokens,
101
+ cached_tokens=(usage.prompt_tokens_details.cached_tokens if usage.prompt_tokens_details else 0) or 0,
102
+ reasoning_tokens=(usage.completion_tokens_details.reasoning_tokens if usage.completion_tokens_details else 0)
103
+ or 0,
104
+ output_tokens=usage.completion_tokens,
105
+ total_tokens=total_tokens,
106
+ context_usage_percent=context_usage_percent,
107
+ throughput_tps=None,
108
+ first_token_latency_ms=None,
109
+ )
@@ -0,0 +1,4 @@
1
+ from klaude_code.protocol import commands as commands
2
+ from klaude_code.protocol import events as events
3
+ from klaude_code.protocol import model as model
4
+ from klaude_code.protocol import op as op
@@ -2,7 +2,7 @@ from typing import Literal
2
2
 
3
3
  from pydantic import BaseModel
4
4
 
5
- from klaude_code.protocol import llm_parameter, model
5
+ from klaude_code.protocol import llm_param, model
6
6
 
7
7
  """
8
8
  Event is how Agent Executor and UI Display communicate.
@@ -103,11 +103,12 @@ class ResponseMetadataEvent(BaseModel):
103
103
  class UserMessageEvent(BaseModel):
104
104
  session_id: str
105
105
  content: str
106
+ images: list[model.ImageURLPart] | None = None
106
107
 
107
108
 
108
109
  class WelcomeEvent(BaseModel):
109
110
  work_dir: str
110
- llm_config: llm_parameter.LLMConfigParameter
111
+ llm_config: llm_param.LLMConfigParameter
111
112
 
112
113
 
113
114
  class InterruptEvent(BaseModel):
@@ -4,7 +4,6 @@ from typing import Any, Literal
4
4
  from pydantic import BaseModel
5
5
  from pydantic.json_schema import JsonSchemaValue
6
6
 
7
- from klaude_code.const import DEFAULT_ANTHROPIC_THINKING_BUDGET_TOKENS, DEFAULT_MAX_TOKENS, DEFAULT_TEMPERATURE
8
7
  from klaude_code.protocol.model import ConversationItem
9
8
 
10
9
 
@@ -36,6 +35,15 @@ class Thinking(BaseModel):
36
35
  budget_tokens: int | None = None
37
36
 
38
37
 
38
+ class Cost(BaseModel):
39
+ """Cost configuration per million tokens (USD)."""
40
+
41
+ input: float # Input token price per million tokens
42
+ output: float # Output token price per million tokens
43
+ cache_read: float = 0.0 # Cache read price per million tokens
44
+ cache_write: float = 0.0 # Cache write price per million tokens (ignored in calculation for now)
45
+
46
+
39
47
  class OpenRouterProviderRouting(BaseModel):
40
48
  """
41
49
  https://openrouter.ai/docs/features/provider-routing#json-schema-for-provider-preferences
@@ -100,6 +108,9 @@ class LLMConfigModelParameter(BaseModel):
100
108
  # OpenRouter Provider Routing Preferences
101
109
  provider_routing: OpenRouterProviderRouting | None = None
102
110
 
111
+ # Cost configuration (USD per million tokens)
112
+ cost: Cost | None = None
113
+
103
114
 
104
115
  class LLMConfigParameter(LLMConfigProviderParameter, LLMConfigModelParameter):
105
116
  """
@@ -134,34 +145,3 @@ class LLMCallParameter(LLMConfigModelParameter):
134
145
  previous_response_id: str | None = None
135
146
 
136
147
  session_id: str | None = None
137
-
138
-
139
- def apply_config_defaults(param: LLMCallParameter, config: LLMConfigParameter) -> LLMCallParameter:
140
- if param.model is None:
141
- param.model = config.model
142
- if param.temperature is None:
143
- param.temperature = config.temperature
144
- if param.max_tokens is None:
145
- param.max_tokens = config.max_tokens
146
- if param.context_limit is None:
147
- param.context_limit = config.context_limit
148
- if param.verbosity is None:
149
- param.verbosity = config.verbosity
150
- if param.thinking is None:
151
- param.thinking = config.thinking
152
- if param.provider_routing is None:
153
- param.provider_routing = config.provider_routing
154
-
155
- if param.model is None:
156
- raise ValueError("Model is required")
157
- if param.max_tokens is None:
158
- param.max_tokens = DEFAULT_MAX_TOKENS
159
- if param.temperature is None:
160
- param.temperature = DEFAULT_TEMPERATURE
161
- if param.thinking is not None and param.thinking.type == "enabled" and param.thinking.budget_tokens is None:
162
- param.thinking.budget_tokens = DEFAULT_ANTHROPIC_THINKING_BUDGET_TOKENS
163
-
164
- if param.model and "gpt-5" in param.model:
165
- param.temperature = 1.0 # Required for GPT-5
166
-
167
- return param
@@ -1,7 +1,8 @@
1
+ from datetime import datetime
1
2
  from enum import Enum
2
3
  from typing import Literal
3
4
 
4
- from pydantic import BaseModel
5
+ from pydantic import BaseModel, Field
5
6
 
6
7
  from klaude_code.protocol.commands import CommandName
7
8
  from klaude_code.protocol.tools import SubAgentType
@@ -20,6 +21,12 @@ class Usage(BaseModel):
20
21
  throughput_tps: float | None = None
21
22
  first_token_latency_ms: float | None = None
22
23
 
24
+ # Cost in USD (calculated from token counts and cost config)
25
+ input_cost: float | None = None # Cost for non-cached input tokens
26
+ output_cost: float | None = None # Cost for output tokens (including reasoning)
27
+ cache_read_cost: float | None = None # Cost for cached tokens
28
+ total_cost: float | None = None # Total cost (input + output + cache_read)
29
+
23
30
 
24
31
  class TodoItem(BaseModel):
25
32
  content: str
@@ -113,16 +120,18 @@ When adding a new item, please also modify the following:
113
120
 
114
121
  class StartItem(BaseModel):
115
122
  response_id: str
123
+ created_at: datetime = Field(default_factory=datetime.now)
116
124
 
117
125
 
118
126
  class InterruptItem(BaseModel):
119
- pass
127
+ created_at: datetime = Field(default_factory=datetime.now)
120
128
 
121
129
 
122
130
  class SystemMessageItem(BaseModel):
123
131
  id: str | None = None
124
132
  role: RoleType = "system"
125
133
  content: str | None = None
134
+ created_at: datetime = Field(default_factory=datetime.now)
126
135
 
127
136
 
128
137
  class DeveloperMessageItem(BaseModel):
@@ -130,6 +139,7 @@ class DeveloperMessageItem(BaseModel):
130
139
  role: RoleType = "developer"
131
140
  content: str | None = None # For LLM input
132
141
  images: list["ImageURLPart"] | None = None
142
+ created_at: datetime = Field(default_factory=datetime.now)
133
143
 
134
144
  # Special fields for reminders UI
135
145
  memory_paths: list[str] | None = None
@@ -137,7 +147,7 @@ class DeveloperMessageItem(BaseModel):
137
147
  todo_use: bool | None = None
138
148
  at_files: list[AtPatternParseResult] | None = None
139
149
  command_output: CommandOutput | None = None
140
- clipboard_images: list[str] | None = None
150
+ user_image_count: int | None = None
141
151
 
142
152
 
143
153
  class ImageURLPart(BaseModel):
@@ -148,11 +158,23 @@ class ImageURLPart(BaseModel):
148
158
  image_url: ImageURL
149
159
 
150
160
 
161
+ class UserInputPayload(BaseModel):
162
+ """Structured payload for user input containing text and optional images.
163
+
164
+ This is the unified data structure for user input across the entire
165
+ UI -> CLI -> Executor -> Agent -> Task chain.
166
+ """
167
+
168
+ text: str
169
+ images: list["ImageURLPart"] | None = None
170
+
171
+
151
172
  class UserMessageItem(BaseModel):
152
173
  id: str | None = None
153
174
  role: RoleType = "user"
154
175
  content: str | None = None
155
176
  images: list[ImageURLPart] | None = None
177
+ created_at: datetime = Field(default_factory=datetime.now)
156
178
 
157
179
 
158
180
  class AssistantMessageItem(BaseModel):
@@ -160,6 +182,7 @@ class AssistantMessageItem(BaseModel):
160
182
  role: RoleType = "assistant"
161
183
  content: str | None = None
162
184
  response_id: str | None = None
185
+ created_at: datetime = Field(default_factory=datetime.now)
163
186
 
164
187
 
165
188
  class ReasoningTextItem(BaseModel):
@@ -167,6 +190,7 @@ class ReasoningTextItem(BaseModel):
167
190
  response_id: str | None = None
168
191
  content: str
169
192
  model: str | None = None
193
+ created_at: datetime = Field(default_factory=datetime.now)
170
194
 
171
195
 
172
196
  class ReasoningEncryptedItem(BaseModel):
@@ -175,6 +199,20 @@ class ReasoningEncryptedItem(BaseModel):
175
199
  encrypted_content: str # OpenAI encrypted content or Anthropic thinking signature
176
200
  format: str | None = None
177
201
  model: str | None
202
+ created_at: datetime = Field(default_factory=datetime.now)
203
+
204
+
205
+ class ToolCallStartItem(BaseModel):
206
+ """Transient streaming signal when LLM starts a tool call.
207
+
208
+ This is NOT persisted to conversation history. Used only for
209
+ real-time UI feedback (e.g., "Calling Bash ...").
210
+ """
211
+
212
+ response_id: str | None = None
213
+ call_id: str
214
+ name: str
215
+ created_at: datetime = Field(default_factory=datetime.now)
178
216
 
179
217
 
180
218
  class ToolCallItem(BaseModel):
@@ -183,6 +221,7 @@ class ToolCallItem(BaseModel):
183
221
  call_id: str
184
222
  name: str
185
223
  arguments: str
224
+ created_at: datetime = Field(default_factory=datetime.now)
186
225
 
187
226
 
188
227
  class ToolResultItem(BaseModel):
@@ -193,15 +232,18 @@ class ToolResultItem(BaseModel):
193
232
  ui_extra: ToolResultUIExtra | None = None # Extra data for UI display, e.g. diff render
194
233
  images: list[ImageURLPart] | None = None
195
234
  side_effects: list[ToolSideEffect] | None = None
235
+ created_at: datetime = Field(default_factory=datetime.now)
196
236
 
197
237
 
198
238
  class AssistantMessageDelta(BaseModel):
199
239
  response_id: str | None = None
200
240
  content: str
241
+ created_at: datetime = Field(default_factory=datetime.now)
201
242
 
202
243
 
203
244
  class StreamErrorItem(BaseModel):
204
245
  error: str
246
+ created_at: datetime = Field(default_factory=datetime.now)
205
247
 
206
248
 
207
249
  class ResponseMetadataItem(BaseModel):
@@ -212,6 +254,7 @@ class ResponseMetadataItem(BaseModel):
212
254
  task_duration_s: float | None = None
213
255
  status: str | None = None
214
256
  error_reason: str | None = None
257
+ created_at: datetime = Field(default_factory=datetime.now)
215
258
 
216
259
 
217
260
  MessageItem = (
@@ -228,7 +271,9 @@ MessageItem = (
228
271
 
229
272
  StreamItem = AssistantMessageDelta
230
273
 
231
- ConversationItem = StartItem | InterruptItem | StreamErrorItem | StreamItem | MessageItem | ResponseMetadataItem
274
+ ConversationItem = (
275
+ StartItem | InterruptItem | StreamErrorItem | StreamItem | MessageItem | ResponseMetadataItem | ToolCallStartItem
276
+ )
232
277
 
233
278
 
234
279
  def todo_list_str(todos: list[TodoItem]) -> str:
@@ -5,15 +5,18 @@ This module defines the operation types and submission structure
5
5
  that the executor uses to handle different types of requests.
6
6
  """
7
7
 
8
- from abc import ABC, abstractmethod
8
+ from __future__ import annotations
9
+
9
10
  from enum import Enum
10
11
  from typing import TYPE_CHECKING
11
12
  from uuid import uuid4
12
13
 
13
14
  from pydantic import BaseModel, Field
14
15
 
16
+ from klaude_code.protocol.model import UserInputPayload
17
+
15
18
  if TYPE_CHECKING:
16
- from klaude_code.core.executor import ExecutorContext
19
+ from klaude_code.protocol.op_handler import OperationHandler
17
20
 
18
21
 
19
22
  class OperationType(Enum):
@@ -25,28 +28,27 @@ class OperationType(Enum):
25
28
  END = "end"
26
29
 
27
30
 
28
- class Operation(BaseModel, ABC):
31
+ class Operation(BaseModel):
29
32
  """Base class for all operations that can be submitted to the executor."""
30
33
 
31
34
  type: OperationType
32
35
  id: str = Field(default_factory=lambda: uuid4().hex)
33
36
 
34
- @abstractmethod
35
- async def execute(self, context: "ExecutorContext") -> None:
36
- """Execute this operation within the given executor context."""
37
- pass
37
+ async def execute(self, handler: OperationHandler) -> None:
38
+ """Execute this operation using the given handler."""
39
+ raise NotImplementedError("Subclasses must implement execute()")
38
40
 
39
41
 
40
42
  class UserInputOperation(Operation):
41
- """Operation for handling user text input that should be processed by an agent."""
43
+ """Operation for handling user input (text and optional images) that should be processed by an agent."""
42
44
 
43
45
  type: OperationType = OperationType.USER_INPUT
44
- content: str
46
+ input: UserInputPayload
45
47
  session_id: str | None = None
46
48
 
47
- async def execute(self, context: "ExecutorContext") -> None:
49
+ async def execute(self, handler: OperationHandler) -> None:
48
50
  """Execute user input by running it through an agent."""
49
- await context.handle_user_input(self)
51
+ await handler.handle_user_input(self)
50
52
 
51
53
 
52
54
  class InterruptOperation(Operation):
@@ -55,9 +57,9 @@ class InterruptOperation(Operation):
55
57
  type: OperationType = OperationType.INTERRUPT
56
58
  target_session_id: str | None = None # If None, interrupt all sessions
57
59
 
58
- async def execute(self, context: "ExecutorContext") -> None:
60
+ async def execute(self, handler: OperationHandler) -> None:
59
61
  """Execute interrupt by cancelling active tasks."""
60
- await context.handle_interrupt(self)
62
+ await handler.handle_interrupt(self)
61
63
 
62
64
 
63
65
  class InitAgentOperation(Operation):
@@ -66,8 +68,8 @@ class InitAgentOperation(Operation):
66
68
  type: OperationType = OperationType.INIT_AGENT
67
69
  session_id: str | None = None
68
70
 
69
- async def execute(self, context: "ExecutorContext") -> None:
70
- await context.handle_init_agent(self)
71
+ async def execute(self, handler: OperationHandler) -> None:
72
+ await handler.handle_init_agent(self)
71
73
 
72
74
 
73
75
  class EndOperation(Operation):
@@ -75,7 +77,7 @@ class EndOperation(Operation):
75
77
 
76
78
  type: OperationType = OperationType.END
77
79
 
78
- async def execute(self, context: "ExecutorContext") -> None:
80
+ async def execute(self, handler: OperationHandler) -> None:
79
81
  """Execute end operation - this is a no-op, just signals the executor to stop."""
80
82
  pass
81
83
 
@@ -0,0 +1,28 @@
1
+ """
2
+ Operation handler protocol for the executor system.
3
+
4
+ This module defines the protocol that operation handlers must implement.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING, Protocol
10
+
11
+ if TYPE_CHECKING:
12
+ from klaude_code.protocol.op import InitAgentOperation, InterruptOperation, UserInputOperation
13
+
14
+
15
+ class OperationHandler(Protocol):
16
+ """Protocol defining the interface for handling operations."""
17
+
18
+ async def handle_user_input(self, operation: UserInputOperation) -> None:
19
+ """Handle a user input operation."""
20
+ ...
21
+
22
+ async def handle_interrupt(self, operation: InterruptOperation) -> None:
23
+ """Handle an interrupt operation."""
24
+ ...
25
+
26
+ async def handle_init_agent(self, operation: InitAgentOperation) -> None:
27
+ """Handle an init agent operation."""
28
+ ...
@@ -44,6 +44,9 @@ class SubAgentProfile:
44
44
  tool_set: tuple[str, ...] = () # Tools available to this sub agent
45
45
  prompt_builder: PromptBuilder = _default_prompt_builder # Builds the sub agent prompt from tool arguments
46
46
 
47
+ # UI display
48
+ active_form: str = "" # Active form for spinner status (e.g., "Tasking", "Exploring")
49
+
47
50
  # Availability
48
51
  enabled_by_default: bool = True
49
52
  show_in_main_agent: bool = True
@@ -141,6 +144,7 @@ register_sub_agent(
141
144
  description=TASK_DESCRIPTION,
142
145
  parameters=TASK_PARAMETERS,
143
146
  tool_set=(tools.BASH, tools.READ, tools.EDIT, tools.WRITE),
147
+ active_form="Tasking",
144
148
  )
145
149
  )
146
150
 
@@ -228,6 +232,7 @@ register_sub_agent(
228
232
  parameters=ORACLE_PARAMETERS,
229
233
  tool_set=(tools.READ, tools.BASH),
230
234
  prompt_builder=_oracle_prompt_builder,
235
+ active_form="Consulting Oracle",
231
236
  target_model_filter=lambda model: ("gpt-5" not in model) and ("gemini-3" not in model),
232
237
  )
233
238
  )
@@ -279,6 +284,7 @@ register_sub_agent(
279
284
  parameters=EXPLORE_PARAMETERS,
280
285
  tool_set=(tools.BASH, tools.READ),
281
286
  prompt_builder=_explore_prompt_builder,
287
+ active_form="Exploring",
282
288
  )
283
289
  )
284
290
 
@@ -337,5 +343,6 @@ register_sub_agent(
337
343
  parameters=WEB_FETCH_AGENT_PARAMETERS,
338
344
  tool_set=(tools.BASH, tools.READ, tools.WEB_FETCH),
339
345
  prompt_builder=_web_fetch_prompt_builder,
346
+ active_form="Fetching Web",
340
347
  )
341
348
  )