klaude-code 1.2.11__py3-none-any.whl → 1.2.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/auth/codex/oauth.py +3 -3
- klaude_code/cli/main.py +5 -5
- klaude_code/cli/runtime.py +19 -27
- klaude_code/cli/session_cmd.py +6 -8
- klaude_code/command/__init__.py +31 -28
- klaude_code/command/clear_cmd.py +0 -2
- klaude_code/command/diff_cmd.py +0 -2
- klaude_code/command/export_cmd.py +3 -5
- klaude_code/command/help_cmd.py +0 -2
- klaude_code/command/model_cmd.py +0 -2
- klaude_code/command/refresh_cmd.py +0 -2
- klaude_code/command/registry.py +5 -9
- klaude_code/command/release_notes_cmd.py +0 -2
- klaude_code/command/status_cmd.py +2 -4
- klaude_code/command/terminal_setup_cmd.py +2 -4
- klaude_code/command/thinking_cmd.py +229 -0
- klaude_code/config/__init__.py +1 -1
- klaude_code/config/list_model.py +1 -1
- klaude_code/config/select_model.py +5 -15
- klaude_code/const/__init__.py +1 -1
- klaude_code/core/agent.py +14 -69
- klaude_code/core/executor.py +11 -10
- klaude_code/core/manager/agent_manager.py +4 -4
- klaude_code/core/manager/llm_clients.py +10 -49
- klaude_code/core/manager/llm_clients_builder.py +8 -21
- klaude_code/core/manager/sub_agent_manager.py +3 -3
- klaude_code/core/prompt.py +3 -3
- klaude_code/core/reminders.py +1 -1
- klaude_code/core/task.py +4 -5
- klaude_code/core/tool/__init__.py +16 -25
- klaude_code/core/tool/file/_utils.py +1 -1
- klaude_code/core/tool/file/apply_patch.py +17 -25
- klaude_code/core/tool/file/apply_patch_tool.py +4 -7
- klaude_code/core/tool/file/edit_tool.py +4 -11
- klaude_code/core/tool/file/multi_edit_tool.py +2 -3
- klaude_code/core/tool/file/read_tool.py +3 -4
- klaude_code/core/tool/file/write_tool.py +2 -3
- klaude_code/core/tool/memory/memory_tool.py +2 -8
- klaude_code/core/tool/memory/skill_loader.py +3 -2
- klaude_code/core/tool/shell/command_safety.py +0 -1
- klaude_code/core/tool/tool_context.py +1 -3
- klaude_code/core/tool/tool_registry.py +2 -1
- klaude_code/core/tool/tool_runner.py +1 -1
- klaude_code/core/tool/truncation.py +2 -5
- klaude_code/core/turn.py +9 -4
- klaude_code/llm/anthropic/client.py +62 -49
- klaude_code/llm/client.py +2 -20
- klaude_code/llm/codex/client.py +51 -32
- klaude_code/llm/input_common.py +2 -2
- klaude_code/llm/openai_compatible/client.py +60 -39
- klaude_code/llm/openai_compatible/stream_processor.py +2 -1
- klaude_code/llm/openrouter/client.py +79 -45
- klaude_code/llm/openrouter/reasoning_handler.py +19 -132
- klaude_code/llm/registry.py +6 -5
- klaude_code/llm/responses/client.py +65 -43
- klaude_code/llm/usage.py +1 -49
- klaude_code/protocol/commands.py +1 -0
- klaude_code/protocol/events.py +7 -0
- klaude_code/protocol/llm_param.py +1 -9
- klaude_code/protocol/model.py +10 -6
- klaude_code/protocol/sub_agent.py +2 -1
- klaude_code/session/export.py +1 -8
- klaude_code/session/selector.py +12 -7
- klaude_code/session/session.py +2 -4
- klaude_code/trace/__init__.py +1 -1
- klaude_code/trace/log.py +1 -1
- klaude_code/ui/__init__.py +4 -9
- klaude_code/ui/core/stage_manager.py +7 -4
- klaude_code/ui/modes/repl/__init__.py +1 -1
- klaude_code/ui/modes/repl/completers.py +6 -7
- klaude_code/ui/modes/repl/display.py +3 -4
- klaude_code/ui/modes/repl/event_handler.py +63 -5
- klaude_code/ui/modes/repl/key_bindings.py +2 -3
- klaude_code/ui/modes/repl/renderer.py +2 -1
- klaude_code/ui/renderers/diffs.py +1 -4
- klaude_code/ui/renderers/metadata.py +1 -12
- klaude_code/ui/rich/markdown.py +3 -3
- klaude_code/ui/rich/searchable_text.py +6 -6
- klaude_code/ui/rich/status.py +3 -4
- klaude_code/ui/rich/theme.py +1 -4
- klaude_code/ui/terminal/control.py +7 -16
- klaude_code/ui/terminal/notifier.py +2 -4
- klaude_code/ui/utils/common.py +1 -1
- klaude_code/ui/utils/debouncer.py +2 -2
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/METADATA +1 -1
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/RECORD +88 -87
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/WHEEL +0 -0
- {klaude_code-1.2.11.dist-info → klaude_code-1.2.13.dist-info}/entry_points.txt +0 -0
|
@@ -28,7 +28,7 @@ class Thinking(BaseModel):
|
|
|
28
28
|
"""
|
|
29
29
|
|
|
30
30
|
# OpenAI Reasoning Style
|
|
31
|
-
reasoning_effort: Literal["high", "medium", "low", "minimal", "none"] | None = None
|
|
31
|
+
reasoning_effort: Literal["high", "medium", "low", "minimal", "none", "xhigh"] | None = None
|
|
32
32
|
reasoning_summary: Literal["auto", "concise", "detailed"] | None = None
|
|
33
33
|
|
|
34
34
|
# Claude/Gemini Thinking Style
|
|
@@ -138,12 +138,4 @@ class LLMCallParameter(LLMConfigModelParameter):
|
|
|
138
138
|
input: list[ConversationItem]
|
|
139
139
|
system: str | None = None
|
|
140
140
|
tools: list[ToolSchema] | None = None
|
|
141
|
-
|
|
142
|
-
stream: Literal[True] = True # Always True
|
|
143
|
-
|
|
144
|
-
# OpenAI Responses
|
|
145
|
-
include: list[str] | None = None
|
|
146
|
-
store: bool = True
|
|
147
|
-
previous_response_id: str | None = None
|
|
148
|
-
|
|
149
141
|
session_id: str | None = None
|
klaude_code/protocol/model.py
CHANGED
|
@@ -20,9 +20,7 @@ class Usage(BaseModel):
|
|
|
20
20
|
output_tokens: int = 0
|
|
21
21
|
|
|
22
22
|
# Context window tracking
|
|
23
|
-
|
|
24
|
-
context_delta: int | None = None # Context growth since last task (for cache ratio calculation)
|
|
25
|
-
last_turn_output_token: int | None = None # Context growth since last task (for cache ratio calculation)
|
|
23
|
+
context_size: int | None = None # Peak total_tokens seen (for context usage display)
|
|
26
24
|
context_limit: int | None = None # Model's context limit
|
|
27
25
|
max_tokens: int | None = None # Max output tokens for this request
|
|
28
26
|
|
|
@@ -55,12 +53,12 @@ class Usage(BaseModel):
|
|
|
55
53
|
"""Context usage percentage computed from context_token / (context_limit - max_tokens)."""
|
|
56
54
|
if self.context_limit is None or self.context_limit <= 0:
|
|
57
55
|
return None
|
|
58
|
-
if self.
|
|
56
|
+
if self.context_size is None:
|
|
59
57
|
return None
|
|
60
58
|
effective_limit = self.context_limit - (self.max_tokens or const.DEFAULT_MAX_TOKENS)
|
|
61
59
|
if effective_limit <= 0:
|
|
62
60
|
return None
|
|
63
|
-
return (self.
|
|
61
|
+
return (self.context_size / effective_limit) * 100
|
|
64
62
|
|
|
65
63
|
|
|
66
64
|
class TodoItem(BaseModel):
|
|
@@ -298,6 +296,12 @@ class AssistantMessageDelta(BaseModel):
|
|
|
298
296
|
created_at: datetime = Field(default_factory=datetime.now)
|
|
299
297
|
|
|
300
298
|
|
|
299
|
+
class ReasoningTextDelta(BaseModel):
|
|
300
|
+
response_id: str | None = None
|
|
301
|
+
content: str
|
|
302
|
+
created_at: datetime = Field(default_factory=datetime.now)
|
|
303
|
+
|
|
304
|
+
|
|
301
305
|
class StreamErrorItem(BaseModel):
|
|
302
306
|
error: str
|
|
303
307
|
created_at: datetime = Field(default_factory=datetime.now)
|
|
@@ -394,7 +398,7 @@ MessageItem = (
|
|
|
394
398
|
)
|
|
395
399
|
|
|
396
400
|
|
|
397
|
-
StreamItem = AssistantMessageDelta
|
|
401
|
+
StreamItem = AssistantMessageDelta | ReasoningTextDelta
|
|
398
402
|
|
|
399
403
|
ConversationItem = (
|
|
400
404
|
StartItem
|
klaude_code/session/export.py
CHANGED
|
@@ -194,18 +194,11 @@ def _render_single_metadata(
|
|
|
194
194
|
input_stat += f"({_format_cost(u.input_cost, u.currency)})"
|
|
195
195
|
parts.append(f'<span class="metadata-stat">{input_stat}</span>')
|
|
196
196
|
|
|
197
|
-
# Cached with cost
|
|
197
|
+
# Cached with cost
|
|
198
198
|
if u.cached_tokens > 0:
|
|
199
199
|
cached_stat = f"cached: {_format_token_count(u.cached_tokens)}"
|
|
200
200
|
if u.cache_read_cost is not None:
|
|
201
201
|
cached_stat += f"({_format_cost(u.cache_read_cost, u.currency)})"
|
|
202
|
-
# Cache ratio: (cached + context_delta - last_turn_output) / input tokens
|
|
203
|
-
# Shows how much of the input was cached (not new context growth)
|
|
204
|
-
if u.input_tokens > 0:
|
|
205
|
-
context_delta = u.context_delta or 0
|
|
206
|
-
last_turn_output_token = u.last_turn_output_token or 0
|
|
207
|
-
cache_ratio = (u.cached_tokens + context_delta - last_turn_output_token) / u.input_tokens * 100
|
|
208
|
-
cached_stat += f"[{cache_ratio:.0f}%]"
|
|
209
202
|
parts.append(f'<span class="metadata-stat">{cached_stat}</span>')
|
|
210
203
|
|
|
211
204
|
# Output with cost
|
klaude_code/session/selector.py
CHANGED
|
@@ -10,6 +10,11 @@ from .session import Session
|
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
def resume_select_session() -> str | None:
|
|
13
|
+
# Column widths
|
|
14
|
+
UPDATED_AT_WIDTH = 16
|
|
15
|
+
MSG_COUNT_WIDTH = 3
|
|
16
|
+
MODEL_WIDTH = 25
|
|
17
|
+
FIRST_MESSAGE_WIDTH = 50
|
|
13
18
|
sessions = Session.list_sessions()
|
|
14
19
|
if not sessions:
|
|
15
20
|
log("No sessions found for this project.")
|
|
@@ -31,20 +36,20 @@ def resume_select_session() -> str | None:
|
|
|
31
36
|
model_display = s.model_name or "N/A"
|
|
32
37
|
|
|
33
38
|
title = [
|
|
34
|
-
("class:d", f"{_fmt(s.updated_at):<
|
|
35
|
-
("class:b", f"{msg_count_display:>
|
|
39
|
+
("class:d", f"{_fmt(s.updated_at):<{UPDATED_AT_WIDTH}} "),
|
|
40
|
+
("class:b", f"{msg_count_display:>{MSG_COUNT_WIDTH}} "),
|
|
36
41
|
(
|
|
37
42
|
"class:t",
|
|
38
|
-
f"{model_display[:
|
|
43
|
+
f"{model_display[: MODEL_WIDTH - 1] + '…' if len(model_display) > MODEL_WIDTH else model_display:<{MODEL_WIDTH}} ",
|
|
39
44
|
),
|
|
40
45
|
(
|
|
41
46
|
"class:t",
|
|
42
|
-
f"{first_user_message.strip().replace('\n', ' ↩ '):<
|
|
47
|
+
f"{first_user_message.strip().replace('\n', ' ↩ '):<{FIRST_MESSAGE_WIDTH}}",
|
|
43
48
|
),
|
|
44
49
|
]
|
|
45
50
|
choices.append(questionary.Choice(title=title, value=s.id))
|
|
46
51
|
return questionary.select(
|
|
47
|
-
message=f"{' Updated at':<
|
|
52
|
+
message=f"{' Updated at':<{UPDATED_AT_WIDTH + 1}} {'Msg':>{MSG_COUNT_WIDTH}} {'Model':<{MODEL_WIDTH}} {'First message':<{FIRST_MESSAGE_WIDTH}}",
|
|
48
53
|
choices=choices,
|
|
49
54
|
pointer="→",
|
|
50
55
|
instruction="↑↓ to move",
|
|
@@ -63,8 +68,8 @@ def resume_select_session() -> str | None:
|
|
|
63
68
|
msg_count_display = "N/A" if s.messages_count == -1 else str(s.messages_count)
|
|
64
69
|
model_display = s.model_name or "N/A"
|
|
65
70
|
print(
|
|
66
|
-
f"{i}. {_fmt(s.updated_at)} {msg_count_display:>
|
|
67
|
-
f"{model_display[:
|
|
71
|
+
f"{i}. {_fmt(s.updated_at)} {msg_count_display:>{MSG_COUNT_WIDTH}} "
|
|
72
|
+
f"{model_display[: MODEL_WIDTH - 1] + '…' if len(model_display) > MODEL_WIDTH else model_display:<{MODEL_WIDTH}} {s.id} {s.work_dir}"
|
|
68
73
|
)
|
|
69
74
|
try:
|
|
70
75
|
raw = input("Select a session number: ").strip()
|
klaude_code/session/session.py
CHANGED
|
@@ -261,12 +261,10 @@ class Session(BaseModel):
|
|
|
261
261
|
return False
|
|
262
262
|
if prev_item is None:
|
|
263
263
|
return True
|
|
264
|
-
|
|
264
|
+
return isinstance(
|
|
265
265
|
prev_item,
|
|
266
266
|
model.UserMessageItem | model.ToolResultItem | model.DeveloperMessageItem,
|
|
267
|
-
)
|
|
268
|
-
return True
|
|
269
|
-
return False
|
|
267
|
+
)
|
|
270
268
|
|
|
271
269
|
def get_history_item(self) -> Iterable[events.HistoryItemEvent]:
|
|
272
270
|
prev_item: model.ConversationItem | None = None
|
klaude_code/trace/__init__.py
CHANGED
|
@@ -1,3 +1,3 @@
|
|
|
1
1
|
from .log import DebugType, is_debug_enabled, log, log_debug, logger, set_debug_logging
|
|
2
2
|
|
|
3
|
-
__all__ = ["
|
|
3
|
+
__all__ = ["DebugType", "is_debug_enabled", "log", "log_debug", "logger", "set_debug_logging"]
|
klaude_code/trace/log.py
CHANGED
klaude_code/ui/__init__.py
CHANGED
|
@@ -73,19 +73,14 @@ def create_exec_display(debug: bool = False, stream_json: bool = False) -> Displ
|
|
|
73
73
|
|
|
74
74
|
|
|
75
75
|
__all__ = [
|
|
76
|
-
|
|
76
|
+
"DebugEventDisplay",
|
|
77
77
|
"DisplayABC",
|
|
78
|
+
"ExecDisplay",
|
|
78
79
|
"InputProviderABC",
|
|
79
|
-
|
|
80
|
+
"PromptToolkitInput",
|
|
80
81
|
"REPLDisplay",
|
|
81
|
-
"ExecDisplay",
|
|
82
82
|
"StreamJsonDisplay",
|
|
83
|
-
"
|
|
84
|
-
# Input implementations
|
|
85
|
-
"PromptToolkitInput",
|
|
86
|
-
# Factory functions
|
|
83
|
+
"TerminalNotifier",
|
|
87
84
|
"create_default_display",
|
|
88
85
|
"create_exec_display",
|
|
89
|
-
# Supporting types
|
|
90
|
-
"TerminalNotifier",
|
|
91
86
|
]
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from collections.abc import Awaitable, Callable
|
|
3
4
|
from enum import Enum
|
|
4
|
-
from typing import Awaitable, Callable
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class Stage(Enum):
|
|
@@ -19,10 +19,12 @@ class StageManager:
|
|
|
19
19
|
self,
|
|
20
20
|
*,
|
|
21
21
|
finish_assistant: Callable[[], Awaitable[None]],
|
|
22
|
+
finish_thinking: Callable[[], Awaitable[None]],
|
|
22
23
|
on_enter_thinking: Callable[[], None],
|
|
23
24
|
):
|
|
24
25
|
self._stage = Stage.WAITING
|
|
25
26
|
self._finish_assistant = finish_assistant
|
|
27
|
+
self._finish_thinking = finish_thinking
|
|
26
28
|
self._on_enter_thinking = on_enter_thinking
|
|
27
29
|
|
|
28
30
|
@property
|
|
@@ -49,7 +51,8 @@ class StageManager:
|
|
|
49
51
|
self._stage = Stage.WAITING
|
|
50
52
|
|
|
51
53
|
async def _leave_current_stage(self) -> None:
|
|
52
|
-
if self._stage == Stage.
|
|
54
|
+
if self._stage == Stage.THINKING:
|
|
55
|
+
await self._finish_thinking()
|
|
56
|
+
elif self._stage == Stage.ASSISTANT:
|
|
53
57
|
await self.finish_assistant()
|
|
54
|
-
|
|
55
|
-
self._stage = Stage.WAITING
|
|
58
|
+
self._stage = Stage.WAITING
|
|
@@ -9,7 +9,7 @@ if TYPE_CHECKING:
|
|
|
9
9
|
from klaude_code.core.agent import Agent
|
|
10
10
|
|
|
11
11
|
|
|
12
|
-
def build_repl_status_snapshot(agent:
|
|
12
|
+
def build_repl_status_snapshot(agent: Agent | None, update_message: str | None) -> REPLStatusSnapshot:
|
|
13
13
|
"""Build a status snapshot for the REPL bottom toolbar.
|
|
14
14
|
|
|
15
15
|
Aggregates model name, context usage, and basic call counts from the
|
|
@@ -81,9 +81,9 @@ class _SlashCommandCompleter(Completer):
|
|
|
81
81
|
# Get available commands
|
|
82
82
|
commands = get_commands()
|
|
83
83
|
|
|
84
|
-
# Filter commands that match the fragment
|
|
84
|
+
# Filter commands that match the fragment (preserve registration order)
|
|
85
85
|
matched: list[tuple[str, object, str]] = []
|
|
86
|
-
for cmd_name, cmd_obj in
|
|
86
|
+
for cmd_name, cmd_obj in commands.items():
|
|
87
87
|
if cmd_name.startswith(frag):
|
|
88
88
|
hint = " [args]" if cmd_obj.support_addition_params else ""
|
|
89
89
|
matched.append((cmd_name, cmd_obj, hint))
|
|
@@ -103,7 +103,7 @@ class _SlashCommandCompleter(Completer):
|
|
|
103
103
|
|
|
104
104
|
# Using HTML for formatting: bold command name, normal hint, gray summary
|
|
105
105
|
display_text = HTML(
|
|
106
|
-
f"<b>{cmd_name}</b>{hint}{padding}<style color='ansibrightblack'
|
|
106
|
+
f"<b>{cmd_name}</b>{hint}{padding}<style color='ansibrightblack'>{cmd_obj.summary}</style>" # pyright: ignore[reportUnknownMemberType, reportAttributeAccessIssue]
|
|
107
107
|
)
|
|
108
108
|
completion_text = f"/{cmd_name} "
|
|
109
109
|
yield Completion(
|
|
@@ -133,10 +133,9 @@ class _ComboCompleter(Completer):
|
|
|
133
133
|
complete_event, # type: ignore[override]
|
|
134
134
|
) -> Iterable[Completion]:
|
|
135
135
|
# Try slash command completion first (only on first line)
|
|
136
|
-
if document.cursor_position_row == 0:
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
return
|
|
136
|
+
if document.cursor_position_row == 0 and self._slash_completer.is_slash_command_context(document):
|
|
137
|
+
yield from self._slash_completer.get_completions(document, complete_event)
|
|
138
|
+
return
|
|
140
139
|
|
|
141
140
|
# Fall back to @ file completion
|
|
142
141
|
yield from self._at_completer.get_completions(document, complete_event)
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
3
4
|
from typing import override
|
|
4
5
|
|
|
5
6
|
from klaude_code.protocol import events
|
|
@@ -53,8 +54,6 @@ class REPLDisplay(DisplayABC):
|
|
|
53
54
|
async def stop(self) -> None:
|
|
54
55
|
await self.event_handler.stop()
|
|
55
56
|
# Ensure any active spinner is stopped so Rich restores the cursor.
|
|
56
|
-
|
|
57
|
+
# Spinner may already be stopped or not started; ignore.
|
|
58
|
+
with contextlib.suppress(Exception):
|
|
57
59
|
self.renderer.spinner_stop()
|
|
58
|
-
except Exception:
|
|
59
|
-
# Spinner may already be stopped or not started; ignore.
|
|
60
|
-
pass
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from collections.abc import Awaitable, Callable
|
|
3
4
|
from dataclasses import dataclass
|
|
4
|
-
from typing import Awaitable, Callable
|
|
5
5
|
|
|
6
6
|
from rich.text import Text
|
|
7
7
|
|
|
@@ -10,6 +10,7 @@ from klaude_code.protocol import events
|
|
|
10
10
|
from klaude_code.ui.core.stage_manager import Stage, StageManager
|
|
11
11
|
from klaude_code.ui.modes.repl.renderer import REPLRenderer
|
|
12
12
|
from klaude_code.ui.rich.markdown import MarkdownStream
|
|
13
|
+
from klaude_code.ui.rich.theme import ThemeKey
|
|
13
14
|
from klaude_code.ui.terminal.notifier import Notification, NotificationType, TerminalNotifier
|
|
14
15
|
from klaude_code.ui.terminal.progress_bar import OSC94States, emit_osc94
|
|
15
16
|
from klaude_code.ui.utils.debouncer import Debouncer
|
|
@@ -41,7 +42,7 @@ class StreamState:
|
|
|
41
42
|
This design ensures buffer and mdstream are always in sync.
|
|
42
43
|
"""
|
|
43
44
|
|
|
44
|
-
def __init__(self, interval: float, flush_handler: Callable[[
|
|
45
|
+
def __init__(self, interval: float, flush_handler: Callable[[StreamState], Awaitable[None]]):
|
|
45
46
|
self._active: ActiveStream | None = None
|
|
46
47
|
self._flush_handler = flush_handler
|
|
47
48
|
self.debouncer = Debouncer(interval=interval, callback=self._debounced_flush)
|
|
@@ -199,10 +200,14 @@ class DisplayEventHandler:
|
|
|
199
200
|
self.assistant_stream = StreamState(
|
|
200
201
|
interval=1 / const.UI_REFRESH_RATE_FPS, flush_handler=self._flush_assistant_buffer
|
|
201
202
|
)
|
|
203
|
+
self.thinking_stream = StreamState(
|
|
204
|
+
interval=1 / const.UI_REFRESH_RATE_FPS, flush_handler=self._flush_thinking_buffer
|
|
205
|
+
)
|
|
202
206
|
self.spinner_status = SpinnerStatusState()
|
|
203
207
|
|
|
204
208
|
self.stage_manager = StageManager(
|
|
205
209
|
finish_assistant=self._finish_assistant_stream,
|
|
210
|
+
finish_thinking=self._finish_thinking_stream,
|
|
206
211
|
on_enter_thinking=self._print_thinking_prefix,
|
|
207
212
|
)
|
|
208
213
|
|
|
@@ -222,6 +227,8 @@ class DisplayEventHandler:
|
|
|
222
227
|
self._on_turn_start(e)
|
|
223
228
|
case events.ThinkingEvent() as e:
|
|
224
229
|
await self._on_thinking(e)
|
|
230
|
+
case events.ThinkingDeltaEvent() as e:
|
|
231
|
+
await self._on_thinking_delta(e)
|
|
225
232
|
case events.AssistantMessageDeltaEvent() as e:
|
|
226
233
|
await self._on_assistant_delta(e)
|
|
227
234
|
case events.AssistantMessageEvent() as e:
|
|
@@ -252,6 +259,8 @@ class DisplayEventHandler:
|
|
|
252
259
|
async def stop(self) -> None:
|
|
253
260
|
await self.assistant_stream.debouncer.flush()
|
|
254
261
|
self.assistant_stream.debouncer.cancel()
|
|
262
|
+
await self.thinking_stream.debouncer.flush()
|
|
263
|
+
self.thinking_stream.debouncer.cancel()
|
|
255
264
|
|
|
256
265
|
# ─────────────────────────────────────────────────────────────────────────────
|
|
257
266
|
# Private event handlers
|
|
@@ -285,8 +294,41 @@ class DisplayEventHandler:
|
|
|
285
294
|
async def _on_thinking(self, event: events.ThinkingEvent) -> None:
|
|
286
295
|
if self.renderer.is_sub_agent_session(event.session_id):
|
|
287
296
|
return
|
|
297
|
+
# If streaming was active, finalize it
|
|
298
|
+
if self.thinking_stream.is_active:
|
|
299
|
+
await self._finish_thinking_stream()
|
|
300
|
+
else:
|
|
301
|
+
# Non-streaming path (history replay or models without delta support)
|
|
302
|
+
await self.stage_manager.enter_thinking_stage()
|
|
303
|
+
self.renderer.display_thinking(event.content)
|
|
304
|
+
|
|
305
|
+
async def _on_thinking_delta(self, event: events.ThinkingDeltaEvent) -> None:
|
|
306
|
+
if self.renderer.is_sub_agent_session(event.session_id):
|
|
307
|
+
return
|
|
308
|
+
|
|
309
|
+
first_delta = not self.thinking_stream.is_active
|
|
310
|
+
if first_delta:
|
|
311
|
+
self.renderer.console.push_theme(self.renderer.themes.thinking_markdown_theme)
|
|
312
|
+
mdstream = MarkdownStream(
|
|
313
|
+
mdargs={
|
|
314
|
+
"code_theme": self.renderer.themes.code_theme,
|
|
315
|
+
"style": self.renderer.console.get_style(ThemeKey.THINKING),
|
|
316
|
+
},
|
|
317
|
+
theme=self.renderer.themes.thinking_markdown_theme,
|
|
318
|
+
console=self.renderer.console,
|
|
319
|
+
spinner=self.renderer.spinner_renderable(),
|
|
320
|
+
indent=2,
|
|
321
|
+
)
|
|
322
|
+
self.thinking_stream.start(mdstream)
|
|
323
|
+
self.renderer.spinner_stop()
|
|
324
|
+
|
|
325
|
+
self.thinking_stream.append(event.content)
|
|
326
|
+
|
|
327
|
+
if first_delta and self.thinking_stream.mdstream is not None:
|
|
328
|
+
self.thinking_stream.mdstream.update(self.thinking_stream.buffer)
|
|
329
|
+
|
|
288
330
|
await self.stage_manager.enter_thinking_stage()
|
|
289
|
-
self.
|
|
331
|
+
self.thinking_stream.debouncer.schedule()
|
|
290
332
|
|
|
291
333
|
async def _on_assistant_delta(self, event: events.AssistantMessageDeltaEvent) -> None:
|
|
292
334
|
if self.renderer.is_sub_agent_session(event.session_id):
|
|
@@ -419,6 +461,22 @@ class DisplayEventHandler:
|
|
|
419
461
|
assert mdstream is not None
|
|
420
462
|
mdstream.update(state.buffer)
|
|
421
463
|
|
|
464
|
+
async def _flush_thinking_buffer(self, state: StreamState) -> None:
|
|
465
|
+
if state.is_active:
|
|
466
|
+
mdstream = state.mdstream
|
|
467
|
+
assert mdstream is not None
|
|
468
|
+
mdstream.update(state.buffer)
|
|
469
|
+
|
|
470
|
+
async def _finish_thinking_stream(self) -> None:
|
|
471
|
+
if self.thinking_stream.is_active:
|
|
472
|
+
self.thinking_stream.debouncer.cancel()
|
|
473
|
+
mdstream = self.thinking_stream.mdstream
|
|
474
|
+
assert mdstream is not None
|
|
475
|
+
mdstream.update(self.thinking_stream.buffer, final=True)
|
|
476
|
+
self.thinking_stream.finish()
|
|
477
|
+
self.renderer.console.pop_theme()
|
|
478
|
+
self.renderer.spinner_start()
|
|
479
|
+
|
|
422
480
|
def _maybe_notify_task_finish(self, event: events.TaskFinishEvent) -> None:
|
|
423
481
|
if self.notifier is None:
|
|
424
482
|
return
|
|
@@ -453,10 +511,10 @@ class DisplayEventHandler:
|
|
|
453
511
|
if len(todo.content) > 0:
|
|
454
512
|
status_text = todo.content
|
|
455
513
|
status_text = status_text.replace("\n", "")
|
|
456
|
-
return self._truncate_status_text(status_text, max_length=
|
|
514
|
+
return self._truncate_status_text(status_text, max_length=50)
|
|
457
515
|
|
|
458
516
|
def _truncate_status_text(self, text: str, max_length: int) -> str:
|
|
459
517
|
if len(text) <= max_length:
|
|
460
518
|
return text
|
|
461
519
|
truncated = text[:max_length]
|
|
462
|
-
return truncated + "
|
|
520
|
+
return truncated + "…"
|
|
@@ -6,6 +6,7 @@ with dependencies injected to avoid circular imports.
|
|
|
6
6
|
|
|
7
7
|
from __future__ import annotations
|
|
8
8
|
|
|
9
|
+
import contextlib
|
|
9
10
|
import re
|
|
10
11
|
from collections.abc import Callable
|
|
11
12
|
from typing import cast
|
|
@@ -35,10 +36,8 @@ def create_key_bindings(
|
|
|
35
36
|
"""Paste image from clipboard as [Image #N]."""
|
|
36
37
|
tag = capture_clipboard_tag()
|
|
37
38
|
if tag:
|
|
38
|
-
|
|
39
|
+
with contextlib.suppress(Exception):
|
|
39
40
|
event.current_buffer.insert_text(tag) # pyright: ignore[reportUnknownMemberType]
|
|
40
|
-
except Exception:
|
|
41
|
-
pass
|
|
42
41
|
|
|
43
42
|
@kb.add("enter")
|
|
44
43
|
def _(event): # type: ignore
|
|
@@ -1,8 +1,9 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
from collections.abc import Iterator
|
|
3
4
|
from contextlib import contextmanager
|
|
4
5
|
from dataclasses import dataclass
|
|
5
|
-
from typing import Any
|
|
6
|
+
from typing import Any
|
|
6
7
|
|
|
7
8
|
from rich import box
|
|
8
9
|
from rich.box import Box
|
|
@@ -73,10 +73,7 @@ def render_diff(diff_text: str, show_file_name: bool = False) -> RenderableType:
|
|
|
73
73
|
if line.startswith("--- "):
|
|
74
74
|
raw = line[4:].strip()
|
|
75
75
|
if raw != "/dev/null":
|
|
76
|
-
if raw.startswith(("a/", "b/"))
|
|
77
|
-
from_file_name = raw[2:]
|
|
78
|
-
else:
|
|
79
|
-
from_file_name = raw
|
|
76
|
+
from_file_name = raw[2:] if raw.startswith(("a/", "b/")) else raw
|
|
80
77
|
continue
|
|
81
78
|
|
|
82
79
|
# Parse file name from diff headers
|
|
@@ -77,17 +77,6 @@ def _render_task_metadata_block(
|
|
|
77
77
|
]
|
|
78
78
|
if metadata.usage.cache_read_cost is not None:
|
|
79
79
|
cached_parts.append((f"({currency_symbol}{metadata.usage.cache_read_cost:.4f})", ThemeKey.METADATA_DIM))
|
|
80
|
-
# Cache ratio: (content + cached - last turn output) / input tokens, this might caclulate over 100% if system prompt is cached in first turn
|
|
81
|
-
# Shows how much of the input was cached (not new context growth)
|
|
82
|
-
if show_context_and_time and metadata.usage.input_tokens > 0:
|
|
83
|
-
context_delta = metadata.usage.context_delta or 0
|
|
84
|
-
last_turn_output_token = metadata.usage.last_turn_output_token or 0
|
|
85
|
-
cache_ratio = (
|
|
86
|
-
(metadata.usage.cached_tokens + context_delta - last_turn_output_token)
|
|
87
|
-
/ metadata.usage.input_tokens
|
|
88
|
-
* 100
|
|
89
|
-
)
|
|
90
|
-
cached_parts.append((f"[{cache_ratio:.0f}%]", ThemeKey.METADATA_DIM))
|
|
91
80
|
parts2.append(Text.assemble(*cached_parts))
|
|
92
81
|
|
|
93
82
|
# Output
|
|
@@ -129,7 +118,7 @@ def _render_task_metadata_block(
|
|
|
129
118
|
if metadata.usage is not None:
|
|
130
119
|
# Context (only for main agent)
|
|
131
120
|
if show_context_and_time and metadata.usage.context_usage_percent is not None:
|
|
132
|
-
context_size = format_number(metadata.usage.
|
|
121
|
+
context_size = format_number(metadata.usage.context_size or 0)
|
|
133
122
|
parts3.append(
|
|
134
123
|
Text.assemble(
|
|
135
124
|
("context", ThemeKey.METADATA_DIM),
|
klaude_code/ui/rich/markdown.py
CHANGED
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
# copy from https://github.com/Aider-AI/aider/blob/main/aider/mdstream.py
|
|
2
2
|
from __future__ import annotations
|
|
3
3
|
|
|
4
|
+
import contextlib
|
|
4
5
|
import io
|
|
5
6
|
import time
|
|
6
7
|
from typing import Any, ClassVar
|
|
@@ -183,10 +184,9 @@ class MarkdownStream:
|
|
|
183
184
|
def __del__(self) -> None:
|
|
184
185
|
"""Destructor to ensure Live display is properly cleaned up."""
|
|
185
186
|
if self.live:
|
|
186
|
-
|
|
187
|
+
# Ignore any errors during cleanup
|
|
188
|
+
with contextlib.suppress(Exception):
|
|
187
189
|
self.live.stop()
|
|
188
|
-
except Exception:
|
|
189
|
-
pass # Ignore any errors during cleanup
|
|
190
190
|
|
|
191
191
|
def update(self, text: str, final: bool = False) -> None:
|
|
192
192
|
"""Update the displayed markdown content.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from collections.abc import Iterable, Sequence
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class SearchableFormattedText:
|
|
@@ -16,8 +16,8 @@ class SearchableFormattedText:
|
|
|
16
16
|
concatenating the text parts of the fragments.
|
|
17
17
|
"""
|
|
18
18
|
|
|
19
|
-
def __init__(self, fragments: Sequence[
|
|
20
|
-
self._fragments:
|
|
19
|
+
def __init__(self, fragments: Sequence[tuple[str, str]], plain: str | None = None):
|
|
20
|
+
self._fragments: list[tuple[str, str]] = list(fragments)
|
|
21
21
|
if plain is None:
|
|
22
22
|
plain = "".join(text for _, text in self._fragments)
|
|
23
23
|
self._plain = plain
|
|
@@ -25,7 +25,7 @@ class SearchableFormattedText:
|
|
|
25
25
|
# Recognized by prompt_toolkit's to_formatted_text(value)
|
|
26
26
|
def __pt_formatted_text__(
|
|
27
27
|
self,
|
|
28
|
-
) -> Iterable[
|
|
28
|
+
) -> Iterable[tuple[str, str]]: # pragma: no cover - passthrough
|
|
29
29
|
return self._fragments
|
|
30
30
|
|
|
31
31
|
# Provide a human-readable representation.
|
|
@@ -45,7 +45,7 @@ class SearchableFormattedText:
|
|
|
45
45
|
return self._plain
|
|
46
46
|
|
|
47
47
|
|
|
48
|
-
class SearchableFormattedList(list[
|
|
48
|
+
class SearchableFormattedList(list[tuple[str, str]]):
|
|
49
49
|
"""
|
|
50
50
|
List variant compatible with questionary's expected ``Choice.title`` type.
|
|
51
51
|
|
|
@@ -54,7 +54,7 @@ class SearchableFormattedList(list[Tuple[str, str]]):
|
|
|
54
54
|
- Provides ``.lower()``/``.upper()`` returning the plain text for search filtering.
|
|
55
55
|
"""
|
|
56
56
|
|
|
57
|
-
def __init__(self, fragments: Sequence[
|
|
57
|
+
def __init__(self, fragments: Sequence[tuple[str, str]], plain: str | None = None):
|
|
58
58
|
super().__init__(fragments)
|
|
59
59
|
if plain is None:
|
|
60
60
|
plain = "".join(text for _, text in fragments)
|
klaude_code/ui/rich/status.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
3
4
|
import math
|
|
4
5
|
import time
|
|
5
6
|
|
|
@@ -233,8 +234,6 @@ class BreathingSpinner(RichSpinner):
|
|
|
233
234
|
|
|
234
235
|
# Monkey-patch Rich's Status module to use the breathing spinner implementation
|
|
235
236
|
# for the configured spinner name, while preserving default behavior elsewhere.
|
|
236
|
-
|
|
237
|
+
# Best-effort patch; if it fails we silently fall back to default spinner.
|
|
238
|
+
with contextlib.suppress(Exception):
|
|
237
239
|
rich_status.Spinner = BreathingSpinner # type: ignore[assignment]
|
|
238
|
-
except Exception:
|
|
239
|
-
# Best-effort patch; if it fails we silently fall back to default spinner.
|
|
240
|
-
pass
|
klaude_code/ui/rich/theme.py
CHANGED
|
@@ -153,10 +153,7 @@ class Themes:
|
|
|
153
153
|
|
|
154
154
|
|
|
155
155
|
def get_theme(theme: str | None = None) -> Themes:
|
|
156
|
-
if theme == "light"
|
|
157
|
-
palette = LIGHT_PALETTE
|
|
158
|
-
else:
|
|
159
|
-
palette = DARK_PALETTE
|
|
156
|
+
palette = LIGHT_PALETTE if theme == "light" else DARK_PALETTE
|
|
160
157
|
return Themes(
|
|
161
158
|
app_theme=Theme(
|
|
162
159
|
styles={
|