klaude-code 1.2.24__py3-none-any.whl → 1.2.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/cli/runtime.py +17 -1
- klaude_code/command/thinking_cmd.py +37 -28
- klaude_code/const.py +3 -5
- klaude_code/core/executor.py +45 -2
- klaude_code/protocol/op.py +11 -0
- klaude_code/protocol/op_handler.py +5 -0
- klaude_code/ui/modes/repl/display.py +2 -0
- klaude_code/ui/modes/repl/event_handler.py +3 -11
- klaude_code/ui/modes/repl/input_prompt_toolkit.py +12 -1
- klaude_code/ui/modes/repl/renderer.py +99 -12
- klaude_code/ui/rich/code_panel.py +24 -5
- klaude_code/ui/rich/live.py +17 -0
- klaude_code/ui/rich/markdown.py +167 -104
- klaude_code/ui/rich/status.py +5 -11
- {klaude_code-1.2.24.dist-info → klaude_code-1.2.25.dist-info}/METADATA +2 -1
- {klaude_code-1.2.24.dist-info → klaude_code-1.2.25.dist-info}/RECORD +18 -18
- {klaude_code-1.2.24.dist-info → klaude_code-1.2.25.dist-info}/WHEEL +0 -0
- {klaude_code-1.2.24.dist-info → klaude_code-1.2.25.dist-info}/entry_points.txt +0 -0
klaude_code/cli/runtime.py
CHANGED
|
@@ -265,7 +265,23 @@ async def run_interactive(init_config: AppInitConfig, session_id: str | None = N
|
|
|
265
265
|
)
|
|
266
266
|
|
|
267
267
|
# Set up input provider for interactive mode
|
|
268
|
-
|
|
268
|
+
def _stop_rich_bottom_ui() -> None:
|
|
269
|
+
display = components.display
|
|
270
|
+
if isinstance(display, ui.REPLDisplay):
|
|
271
|
+
display.renderer.spinner_stop()
|
|
272
|
+
display.renderer.stop_bottom_live()
|
|
273
|
+
elif (
|
|
274
|
+
isinstance(display, ui.DebugEventDisplay)
|
|
275
|
+
and display.wrapped_display
|
|
276
|
+
and isinstance(display.wrapped_display, ui.REPLDisplay)
|
|
277
|
+
):
|
|
278
|
+
display.wrapped_display.renderer.spinner_stop()
|
|
279
|
+
display.wrapped_display.renderer.stop_bottom_live()
|
|
280
|
+
|
|
281
|
+
input_provider: ui.InputProviderABC = ui.PromptToolkitInput(
|
|
282
|
+
status_provider=_status_provider,
|
|
283
|
+
pre_prompt=_stop_rich_bottom_ui,
|
|
284
|
+
)
|
|
269
285
|
|
|
270
286
|
# --- Custom Ctrl+C handler: double-press within 2s to exit, single press shows toast ---
|
|
271
287
|
def _show_toast_once() -> None:
|
|
@@ -56,6 +56,14 @@ def _is_gemini_flash_model(model_name: str | None) -> bool:
|
|
|
56
56
|
return "gemini-3-flash" in model_name.lower()
|
|
57
57
|
|
|
58
58
|
|
|
59
|
+
def should_auto_trigger_thinking(model_name: str | None) -> bool:
|
|
60
|
+
"""Check if model should auto-trigger thinking selection on switch."""
|
|
61
|
+
if not model_name:
|
|
62
|
+
return False
|
|
63
|
+
model_lower = model_name.lower()
|
|
64
|
+
return "gpt-5" in model_lower or "gemini-3" in model_lower or "opus" in model_lower
|
|
65
|
+
|
|
66
|
+
|
|
59
67
|
def _get_levels_for_responses(model_name: str | None) -> list[str]:
|
|
60
68
|
"""Get thinking levels for responses protocol."""
|
|
61
69
|
if _is_codex_max_model(model_name):
|
|
@@ -69,7 +77,7 @@ def _get_levels_for_responses(model_name: str | None) -> list[str]:
|
|
|
69
77
|
return RESPONSES_LEVELS
|
|
70
78
|
|
|
71
79
|
|
|
72
|
-
def
|
|
80
|
+
def format_current_thinking(config: llm_param.LLMConfigParameter) -> str:
|
|
73
81
|
"""Format the current thinking configuration for display."""
|
|
74
82
|
thinking = config.thinking
|
|
75
83
|
if not thinking:
|
|
@@ -164,6 +172,31 @@ def _select_anthropic_thinking_sync() -> llm_param.Thinking | None:
|
|
|
164
172
|
return None
|
|
165
173
|
|
|
166
174
|
|
|
175
|
+
async def select_thinking_for_protocol(config: llm_param.LLMConfigParameter) -> llm_param.Thinking | None:
|
|
176
|
+
"""Select thinking configuration based on the LLM protocol.
|
|
177
|
+
|
|
178
|
+
Returns the selected Thinking config, or None if user cancelled.
|
|
179
|
+
"""
|
|
180
|
+
protocol = config.protocol
|
|
181
|
+
model_name = config.model
|
|
182
|
+
|
|
183
|
+
if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
|
|
184
|
+
return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
|
|
185
|
+
|
|
186
|
+
if protocol == llm_param.LLMClientProtocol.ANTHROPIC:
|
|
187
|
+
return await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
188
|
+
|
|
189
|
+
if protocol == llm_param.LLMClientProtocol.OPENROUTER:
|
|
190
|
+
if _is_openrouter_model_with_reasoning_effort(model_name):
|
|
191
|
+
return await asyncio.to_thread(_select_responses_thinking_sync, model_name)
|
|
192
|
+
return await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
193
|
+
|
|
194
|
+
if protocol == llm_param.LLMClientProtocol.OPENAI:
|
|
195
|
+
return await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
196
|
+
|
|
197
|
+
return None
|
|
198
|
+
|
|
199
|
+
|
|
167
200
|
class ThinkingCommand(CommandABC):
|
|
168
201
|
"""Configure model thinking/reasoning level."""
|
|
169
202
|
|
|
@@ -185,40 +218,16 @@ class ThinkingCommand(CommandABC):
|
|
|
185
218
|
return self._no_change_result(agent, "No profile configured")
|
|
186
219
|
|
|
187
220
|
config = agent.profile.llm_client.get_llm_config()
|
|
188
|
-
|
|
189
|
-
model_name = config.model
|
|
190
|
-
|
|
191
|
-
current = _format_current_thinking(config)
|
|
192
|
-
|
|
193
|
-
# Select new thinking configuration based on protocol
|
|
194
|
-
new_thinking: llm_param.Thinking | None = None
|
|
195
|
-
|
|
196
|
-
if protocol in (llm_param.LLMClientProtocol.RESPONSES, llm_param.LLMClientProtocol.CODEX):
|
|
197
|
-
new_thinking = await asyncio.to_thread(_select_responses_thinking_sync, model_name)
|
|
198
|
-
|
|
199
|
-
elif protocol == llm_param.LLMClientProtocol.ANTHROPIC:
|
|
200
|
-
new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
201
|
-
|
|
202
|
-
elif protocol == llm_param.LLMClientProtocol.OPENROUTER:
|
|
203
|
-
if _is_openrouter_model_with_reasoning_effort(model_name):
|
|
204
|
-
new_thinking = await asyncio.to_thread(_select_responses_thinking_sync, model_name)
|
|
205
|
-
else:
|
|
206
|
-
new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
207
|
-
|
|
208
|
-
elif protocol == llm_param.LLMClientProtocol.OPENAI:
|
|
209
|
-
# openai_compatible uses anthropic style
|
|
210
|
-
new_thinking = await asyncio.to_thread(_select_anthropic_thinking_sync)
|
|
211
|
-
|
|
212
|
-
else:
|
|
213
|
-
return self._no_change_result(agent, f"Unsupported protocol: {protocol}")
|
|
221
|
+
current = format_current_thinking(config)
|
|
214
222
|
|
|
223
|
+
new_thinking = await select_thinking_for_protocol(config)
|
|
215
224
|
if new_thinking is None:
|
|
216
225
|
return self._no_change_result(agent, "(no change)")
|
|
217
226
|
|
|
218
227
|
# Apply the new thinking configuration
|
|
219
228
|
config.thinking = new_thinking
|
|
220
229
|
agent.session.model_thinking = new_thinking
|
|
221
|
-
new_status =
|
|
230
|
+
new_status = format_current_thinking(config)
|
|
222
231
|
|
|
223
232
|
return CommandResult(
|
|
224
233
|
events=[
|
klaude_code/const.py
CHANGED
|
@@ -117,16 +117,14 @@ STATUS_DEFAULT_TEXT = "Thinking …"
|
|
|
117
117
|
# Status shimmer animation
|
|
118
118
|
# Horizontal padding used when computing shimmer band position
|
|
119
119
|
STATUS_SHIMMER_PADDING = 10
|
|
120
|
-
# Duration in seconds for one full shimmer sweep across the text
|
|
121
|
-
STATUS_SHIMMER_SWEEP_SECONDS = 2
|
|
122
120
|
# Half-width of the shimmer band in characters
|
|
123
121
|
STATUS_SHIMMER_BAND_HALF_WIDTH = 5.0
|
|
124
122
|
# Scale factor applied to shimmer intensity when blending colors
|
|
125
123
|
STATUS_SHIMMER_ALPHA_SCALE = 0.7
|
|
126
124
|
|
|
127
|
-
# Spinner breathing animation
|
|
128
|
-
# Duration in seconds for one full breathe-in + breathe-out cycle
|
|
129
|
-
#
|
|
125
|
+
# Spinner breathing and shimmer animation period
|
|
126
|
+
# Duration in seconds for one full breathe-in + breathe-out cycle (breathing)
|
|
127
|
+
# and one full shimmer sweep across the text (shimmer)
|
|
130
128
|
SPINNER_BREATH_PERIOD_SECONDS: float = 2.0
|
|
131
129
|
|
|
132
130
|
|
klaude_code/core/executor.py
CHANGED
|
@@ -14,6 +14,11 @@ from dataclasses import dataclass
|
|
|
14
14
|
from pathlib import Path
|
|
15
15
|
|
|
16
16
|
from klaude_code.command import dispatch_command
|
|
17
|
+
from klaude_code.command.thinking_cmd import (
|
|
18
|
+
format_current_thinking,
|
|
19
|
+
select_thinking_for_protocol,
|
|
20
|
+
should_auto_trigger_thinking,
|
|
21
|
+
)
|
|
17
22
|
from klaude_code.config import load_config
|
|
18
23
|
from klaude_code.core.agent import Agent, DefaultModelProfileProvider, ModelProfileProvider
|
|
19
24
|
from klaude_code.core.manager import LLMClients, SubAgentManager
|
|
@@ -235,17 +240,55 @@ class ExecutorContext:
|
|
|
235
240
|
agent.session.model_thinking = llm_config.thinking
|
|
236
241
|
|
|
237
242
|
developer_item = model.DeveloperMessageItem(
|
|
238
|
-
content=f"
|
|
243
|
+
content=f"Switched to: {llm_config.model}",
|
|
239
244
|
command_output=model.CommandOutput(command_name=commands.CommandName.MODEL),
|
|
240
245
|
)
|
|
241
246
|
agent.session.append_history([developer_item])
|
|
242
247
|
|
|
243
248
|
await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
|
|
244
|
-
await self.emit_event(events.WelcomeEvent(llm_config=llm_config, work_dir=str(agent.session.work_dir)))
|
|
245
249
|
|
|
246
250
|
if self._on_model_change is not None:
|
|
247
251
|
self._on_model_change(llm_client.model_name)
|
|
248
252
|
|
|
253
|
+
if should_auto_trigger_thinking(llm_config.model):
|
|
254
|
+
thinking_op = op.ChangeThinkingOperation(session_id=operation.session_id)
|
|
255
|
+
await thinking_op.execute(handler=self)
|
|
256
|
+
# WelcomeEvent is already handled by the thinking change
|
|
257
|
+
else:
|
|
258
|
+
await self.emit_event(events.WelcomeEvent(llm_config=llm_config, work_dir=str(agent.session.work_dir)))
|
|
259
|
+
|
|
260
|
+
async def handle_change_thinking(self, operation: op.ChangeThinkingOperation) -> None:
|
|
261
|
+
"""Handle a change thinking operation by prompting user to select thinking level."""
|
|
262
|
+
agent = await self._ensure_agent(operation.session_id)
|
|
263
|
+
if not agent.profile:
|
|
264
|
+
return
|
|
265
|
+
|
|
266
|
+
config = agent.profile.llm_client.get_llm_config()
|
|
267
|
+
current = format_current_thinking(config)
|
|
268
|
+
|
|
269
|
+
new_thinking = await select_thinking_for_protocol(config)
|
|
270
|
+
|
|
271
|
+
if new_thinking is None:
|
|
272
|
+
developer_item = model.DeveloperMessageItem(
|
|
273
|
+
content="(thinking unchanged)",
|
|
274
|
+
command_output=model.CommandOutput(command_name=commands.CommandName.THINKING),
|
|
275
|
+
)
|
|
276
|
+
await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
|
|
277
|
+
return
|
|
278
|
+
|
|
279
|
+
config.thinking = new_thinking
|
|
280
|
+
agent.session.model_thinking = new_thinking
|
|
281
|
+
new_status = format_current_thinking(config)
|
|
282
|
+
|
|
283
|
+
developer_item = model.DeveloperMessageItem(
|
|
284
|
+
content=f"Thinking changed: {current} -> {new_status}",
|
|
285
|
+
command_output=model.CommandOutput(command_name=commands.CommandName.THINKING),
|
|
286
|
+
)
|
|
287
|
+
agent.session.append_history([developer_item])
|
|
288
|
+
|
|
289
|
+
await self.emit_event(events.DeveloperMessageEvent(session_id=agent.session.id, item=developer_item))
|
|
290
|
+
await self.emit_event(events.WelcomeEvent(work_dir=str(agent.session.work_dir), llm_config=config))
|
|
291
|
+
|
|
249
292
|
async def handle_clear_session(self, operation: op.ClearSessionOperation) -> None:
|
|
250
293
|
agent = await self._ensure_agent(operation.session_id)
|
|
251
294
|
new_session = Session.create(work_dir=agent.session.work_dir)
|
klaude_code/protocol/op.py
CHANGED
|
@@ -25,6 +25,7 @@ class OperationType(Enum):
|
|
|
25
25
|
USER_INPUT = "user_input"
|
|
26
26
|
RUN_AGENT = "run_agent"
|
|
27
27
|
CHANGE_MODEL = "change_model"
|
|
28
|
+
CHANGE_THINKING = "change_thinking"
|
|
28
29
|
CLEAR_SESSION = "clear_session"
|
|
29
30
|
EXPORT_SESSION = "export_session"
|
|
30
31
|
INTERRUPT = "interrupt"
|
|
@@ -77,6 +78,16 @@ class ChangeModelOperation(Operation):
|
|
|
77
78
|
await handler.handle_change_model(self)
|
|
78
79
|
|
|
79
80
|
|
|
81
|
+
class ChangeThinkingOperation(Operation):
|
|
82
|
+
"""Operation for changing the thinking/reasoning configuration."""
|
|
83
|
+
|
|
84
|
+
type: OperationType = OperationType.CHANGE_THINKING
|
|
85
|
+
session_id: str
|
|
86
|
+
|
|
87
|
+
async def execute(self, handler: OperationHandler) -> None:
|
|
88
|
+
await handler.handle_change_thinking(self)
|
|
89
|
+
|
|
90
|
+
|
|
80
91
|
class ClearSessionOperation(Operation):
|
|
81
92
|
"""Operation for clearing the active session and starting a new one."""
|
|
82
93
|
|
|
@@ -11,6 +11,7 @@ from typing import TYPE_CHECKING, Protocol
|
|
|
11
11
|
if TYPE_CHECKING:
|
|
12
12
|
from klaude_code.protocol.op import (
|
|
13
13
|
ChangeModelOperation,
|
|
14
|
+
ChangeThinkingOperation,
|
|
14
15
|
ClearSessionOperation,
|
|
15
16
|
ExportSessionOperation,
|
|
16
17
|
InitAgentOperation,
|
|
@@ -35,6 +36,10 @@ class OperationHandler(Protocol):
|
|
|
35
36
|
"""Handle a change model operation."""
|
|
36
37
|
...
|
|
37
38
|
|
|
39
|
+
async def handle_change_thinking(self, operation: ChangeThinkingOperation) -> None:
|
|
40
|
+
"""Handle a change thinking operation."""
|
|
41
|
+
...
|
|
42
|
+
|
|
38
43
|
async def handle_clear_session(self, operation: ClearSessionOperation) -> None:
|
|
39
44
|
"""Handle a clear session operation."""
|
|
40
45
|
...
|
|
@@ -368,22 +368,20 @@ class DisplayEventHandler:
|
|
|
368
368
|
|
|
369
369
|
first_delta = not self.thinking_stream.is_active
|
|
370
370
|
if first_delta:
|
|
371
|
-
self.renderer.console.push_theme(self.renderer.themes.thinking_markdown_theme)
|
|
372
371
|
mdstream = MarkdownStream(
|
|
373
372
|
mdargs={
|
|
374
373
|
"code_theme": self.renderer.themes.code_theme,
|
|
375
|
-
"style":
|
|
374
|
+
"style": ThemeKey.THINKING,
|
|
376
375
|
},
|
|
377
376
|
theme=self.renderer.themes.thinking_markdown_theme,
|
|
378
377
|
console=self.renderer.console,
|
|
379
|
-
|
|
378
|
+
live_sink=self.renderer.set_stream_renderable,
|
|
380
379
|
mark=THINKING_MESSAGE_MARK,
|
|
381
380
|
mark_style=ThemeKey.THINKING,
|
|
382
381
|
left_margin=const.MARKDOWN_LEFT_MARGIN,
|
|
383
382
|
markdown_class=ThinkingMarkdown,
|
|
384
383
|
)
|
|
385
384
|
self.thinking_stream.start(mdstream)
|
|
386
|
-
self.renderer.spinner_stop()
|
|
387
385
|
|
|
388
386
|
self.thinking_stream.append(event.content)
|
|
389
387
|
|
|
@@ -414,17 +412,13 @@ class DisplayEventHandler:
|
|
|
414
412
|
mdargs={"code_theme": self.renderer.themes.code_theme},
|
|
415
413
|
theme=self.renderer.themes.markdown_theme,
|
|
416
414
|
console=self.renderer.console,
|
|
417
|
-
|
|
415
|
+
live_sink=self.renderer.set_stream_renderable,
|
|
418
416
|
mark=ASSISTANT_MESSAGE_MARK,
|
|
419
417
|
left_margin=const.MARKDOWN_LEFT_MARGIN,
|
|
420
418
|
)
|
|
421
419
|
self.assistant_stream.start(mdstream)
|
|
422
420
|
self.assistant_stream.append(event.content)
|
|
423
421
|
if first_delta and self.assistant_stream.mdstream is not None:
|
|
424
|
-
# Stop spinner and immediately start MarkdownStream's Live
|
|
425
|
-
# to avoid flicker. The update() call starts the Live with
|
|
426
|
-
# the spinner embedded, providing seamless transition.
|
|
427
|
-
self.renderer.spinner_stop()
|
|
428
422
|
self.assistant_stream.mdstream.update(self.assistant_stream.buffer)
|
|
429
423
|
await self.stage_manager.transition_to(Stage.ASSISTANT)
|
|
430
424
|
await self._flush_assistant_buffer(self.assistant_stream)
|
|
@@ -488,7 +482,6 @@ class DisplayEventHandler:
|
|
|
488
482
|
self.spinner_status.reset()
|
|
489
483
|
self.renderer.spinner_stop()
|
|
490
484
|
self.renderer.console.print(Rule(characters="-", style=ThemeKey.LINES))
|
|
491
|
-
self.renderer.print()
|
|
492
485
|
await self.stage_manager.transition_to(Stage.WAITING)
|
|
493
486
|
self._maybe_notify_task_finish(event)
|
|
494
487
|
|
|
@@ -552,7 +545,6 @@ class DisplayEventHandler:
|
|
|
552
545
|
assert mdstream is not None
|
|
553
546
|
mdstream.update(normalize_thinking_content(self.thinking_stream.buffer), final=True)
|
|
554
547
|
self.thinking_stream.finish()
|
|
555
|
-
self.renderer.console.pop_theme()
|
|
556
548
|
self.renderer.print()
|
|
557
549
|
self.renderer.spinner_start()
|
|
558
550
|
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
3
4
|
import shutil
|
|
4
5
|
from collections.abc import AsyncIterator, Callable
|
|
5
6
|
from pathlib import Path
|
|
@@ -51,8 +52,12 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
51
52
|
self,
|
|
52
53
|
prompt: str = USER_MESSAGE_MARK,
|
|
53
54
|
status_provider: Callable[[], REPLStatusSnapshot] | None = None,
|
|
55
|
+
pre_prompt: Callable[[], None] | None = None,
|
|
56
|
+
post_prompt: Callable[[], None] | None = None,
|
|
54
57
|
): # ▌
|
|
55
58
|
self._status_provider = status_provider
|
|
59
|
+
self._pre_prompt = pre_prompt
|
|
60
|
+
self._post_prompt = post_prompt
|
|
56
61
|
self._is_light_terminal_background = is_light_terminal_background(timeout=0.2)
|
|
57
62
|
|
|
58
63
|
project = str(Path.cwd()).strip("/").replace("/", "-")
|
|
@@ -80,7 +85,7 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
80
85
|
[(INPUT_PROMPT_STYLE, prompt)],
|
|
81
86
|
history=FileHistory(str(history_path)),
|
|
82
87
|
multiline=True,
|
|
83
|
-
cursor=CursorShape.
|
|
88
|
+
cursor=CursorShape.BLINKING_BEAM,
|
|
84
89
|
prompt_continuation=[(INPUT_PROMPT_STYLE, " ")],
|
|
85
90
|
key_bindings=kb,
|
|
86
91
|
completer=ThreadedCompleter(create_repl_completer()),
|
|
@@ -202,8 +207,14 @@ class PromptToolkitInput(InputProviderABC):
|
|
|
202
207
|
@override
|
|
203
208
|
async def iter_inputs(self) -> AsyncIterator[UserInputPayload]:
|
|
204
209
|
while True:
|
|
210
|
+
if self._pre_prompt is not None:
|
|
211
|
+
with contextlib.suppress(Exception):
|
|
212
|
+
self._pre_prompt()
|
|
205
213
|
with patch_stdout():
|
|
206
214
|
line: str = await self._session.prompt_async(placeholder=self._render_input_placeholder())
|
|
215
|
+
if self._post_prompt is not None:
|
|
216
|
+
with contextlib.suppress(Exception):
|
|
217
|
+
self._post_prompt()
|
|
207
218
|
|
|
208
219
|
# Extract images referenced in the input text
|
|
209
220
|
images = extract_images_from_text(line)
|
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
3
4
|
from collections.abc import Iterator
|
|
4
5
|
from contextlib import contextmanager
|
|
5
6
|
from dataclasses import dataclass
|
|
6
7
|
from typing import Any
|
|
7
8
|
|
|
8
|
-
from rich.console import Console
|
|
9
|
+
from rich.console import Console, Group, RenderableType
|
|
10
|
+
from rich.padding import Padding
|
|
9
11
|
from rich.spinner import Spinner
|
|
10
|
-
from rich.status import Status
|
|
11
12
|
from rich.style import Style, StyleType
|
|
12
13
|
from rich.text import Text
|
|
13
14
|
|
|
@@ -23,8 +24,9 @@ from klaude_code.ui.renderers import tools as r_tools
|
|
|
23
24
|
from klaude_code.ui.renderers import user_input as r_user_input
|
|
24
25
|
from klaude_code.ui.renderers.common import truncate_display
|
|
25
26
|
from klaude_code.ui.rich import status as r_status
|
|
27
|
+
from klaude_code.ui.rich.live import CropAboveLive, SingleLine
|
|
26
28
|
from klaude_code.ui.rich.quote import Quote
|
|
27
|
-
from klaude_code.ui.rich.status import ShimmerStatusText
|
|
29
|
+
from klaude_code.ui.rich.status import BreathingSpinner, ShimmerStatusText
|
|
28
30
|
from klaude_code.ui.rich.theme import ThemeKey, get_theme
|
|
29
31
|
|
|
30
32
|
|
|
@@ -42,10 +44,18 @@ class REPLRenderer:
|
|
|
42
44
|
self.themes = get_theme(theme)
|
|
43
45
|
self.console: Console = Console(theme=self.themes.app_theme)
|
|
44
46
|
self.console.push_theme(self.themes.markdown_theme)
|
|
45
|
-
self.
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
47
|
+
self._bottom_live: CropAboveLive | None = None
|
|
48
|
+
self._stream_renderable: RenderableType | None = None
|
|
49
|
+
self._stream_max_height: int = 0
|
|
50
|
+
self._stream_last_height: int = 0
|
|
51
|
+
self._stream_last_width: int = 0
|
|
52
|
+
self._spinner_visible: bool = False
|
|
53
|
+
|
|
54
|
+
self._status_text: ShimmerStatusText = ShimmerStatusText(const.STATUS_DEFAULT_TEXT)
|
|
55
|
+
self._status_spinner: Spinner = BreathingSpinner(
|
|
56
|
+
r_status.spinner_name(),
|
|
57
|
+
text=SingleLine(self._status_text),
|
|
58
|
+
style=ThemeKey.STATUS_SPINNER,
|
|
49
59
|
)
|
|
50
60
|
|
|
51
61
|
self.session_map: dict[str, SessionStatus] = {}
|
|
@@ -235,7 +245,11 @@ class REPLRenderer:
|
|
|
235
245
|
def display_task_finish(self, event: events.TaskFinishEvent) -> None:
|
|
236
246
|
if self.is_sub_agent_session(event.session_id):
|
|
237
247
|
session_status = self.session_map.get(event.session_id)
|
|
238
|
-
description =
|
|
248
|
+
description = (
|
|
249
|
+
session_status.sub_agent_state.sub_agent_desc
|
|
250
|
+
if session_status and session_status.sub_agent_state
|
|
251
|
+
else None
|
|
252
|
+
)
|
|
239
253
|
panel_style = self.get_session_sub_agent_background(event.session_id)
|
|
240
254
|
with self.session_print_context(event.session_id):
|
|
241
255
|
self.print(
|
|
@@ -265,16 +279,89 @@ class REPLRenderer:
|
|
|
265
279
|
|
|
266
280
|
def spinner_start(self) -> None:
|
|
267
281
|
"""Start the spinner animation."""
|
|
268
|
-
self.
|
|
282
|
+
self._spinner_visible = True
|
|
283
|
+
self._ensure_bottom_live_started()
|
|
284
|
+
self._refresh_bottom_live()
|
|
269
285
|
|
|
270
286
|
def spinner_stop(self) -> None:
|
|
271
287
|
"""Stop the spinner animation."""
|
|
272
|
-
self.
|
|
288
|
+
self._spinner_visible = False
|
|
289
|
+
self._refresh_bottom_live()
|
|
273
290
|
|
|
274
291
|
def spinner_update(self, status_text: str | Text, right_text: Text | None = None) -> None:
|
|
275
292
|
"""Update the spinner status text with optional right-aligned text."""
|
|
276
|
-
self.
|
|
293
|
+
self._status_text = ShimmerStatusText(status_text, right_text)
|
|
294
|
+
self._status_spinner.update(text=SingleLine(self._status_text), style=ThemeKey.STATUS_SPINNER)
|
|
295
|
+
self._refresh_bottom_live()
|
|
277
296
|
|
|
278
297
|
def spinner_renderable(self) -> Spinner:
|
|
279
298
|
"""Return the spinner's renderable for embedding in other components."""
|
|
280
|
-
return self.
|
|
299
|
+
return self._status_spinner
|
|
300
|
+
|
|
301
|
+
def set_stream_renderable(self, renderable: RenderableType | None) -> None:
|
|
302
|
+
"""Set the current streaming renderable displayed above the status line."""
|
|
303
|
+
|
|
304
|
+
if renderable is None:
|
|
305
|
+
self._stream_renderable = None
|
|
306
|
+
self._stream_max_height = 0
|
|
307
|
+
self._stream_last_height = 0
|
|
308
|
+
self._stream_last_width = 0
|
|
309
|
+
self._refresh_bottom_live()
|
|
310
|
+
return
|
|
311
|
+
|
|
312
|
+
self._ensure_bottom_live_started()
|
|
313
|
+
self._stream_renderable = renderable
|
|
314
|
+
|
|
315
|
+
height = len(self.console.render_lines(renderable, self.console.options, pad=False))
|
|
316
|
+
self._stream_last_height = height
|
|
317
|
+
self._stream_last_width = self.console.size.width
|
|
318
|
+
self._stream_max_height = max(self._stream_max_height, height)
|
|
319
|
+
self._refresh_bottom_live()
|
|
320
|
+
|
|
321
|
+
def _ensure_bottom_live_started(self) -> None:
|
|
322
|
+
if self._bottom_live is not None:
|
|
323
|
+
return
|
|
324
|
+
self._bottom_live = CropAboveLive(
|
|
325
|
+
Text(""),
|
|
326
|
+
console=self.console,
|
|
327
|
+
refresh_per_second=30,
|
|
328
|
+
transient=True,
|
|
329
|
+
redirect_stdout=False,
|
|
330
|
+
redirect_stderr=False,
|
|
331
|
+
)
|
|
332
|
+
self._bottom_live.start()
|
|
333
|
+
|
|
334
|
+
def _bottom_renderable(self) -> RenderableType:
|
|
335
|
+
stream = self._stream_renderable
|
|
336
|
+
if stream is not None:
|
|
337
|
+
current_width = self.console.size.width
|
|
338
|
+
if self._stream_last_width != current_width:
|
|
339
|
+
height = len(self.console.render_lines(stream, self.console.options, pad=False))
|
|
340
|
+
self._stream_last_height = height
|
|
341
|
+
self._stream_last_width = current_width
|
|
342
|
+
self._stream_max_height = max(self._stream_max_height, height)
|
|
343
|
+
else:
|
|
344
|
+
height = self._stream_last_height
|
|
345
|
+
|
|
346
|
+
pad_lines = max(self._stream_max_height - height, 0)
|
|
347
|
+
if pad_lines:
|
|
348
|
+
stream = Padding(stream, (0, 0, pad_lines, 0))
|
|
349
|
+
|
|
350
|
+
stream_part: RenderableType = stream if stream is not None else Group()
|
|
351
|
+
gap_part: RenderableType = Text("") if self._spinner_visible else Group()
|
|
352
|
+
status_part: RenderableType = SingleLine(self._status_spinner) if self._spinner_visible else Group()
|
|
353
|
+
return Group(stream_part, gap_part, status_part)
|
|
354
|
+
|
|
355
|
+
def _refresh_bottom_live(self) -> None:
|
|
356
|
+
if self._bottom_live is None:
|
|
357
|
+
return
|
|
358
|
+
self._bottom_live.update(self._bottom_renderable(), refresh=True)
|
|
359
|
+
|
|
360
|
+
def stop_bottom_live(self) -> None:
|
|
361
|
+
if self._bottom_live is None:
|
|
362
|
+
return
|
|
363
|
+
with contextlib.suppress(Exception):
|
|
364
|
+
# Avoid cursor restore when stopping right before prompt_toolkit.
|
|
365
|
+
self._bottom_live.transient = False
|
|
366
|
+
self._bottom_live.stop()
|
|
367
|
+
self._bottom_live = None
|
|
@@ -4,9 +4,10 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from typing import TYPE_CHECKING
|
|
6
6
|
|
|
7
|
+
from rich.cells import cell_len
|
|
7
8
|
from rich.console import ConsoleRenderable, RichCast
|
|
8
9
|
from rich.jupyter import JupyterMixin
|
|
9
|
-
from rich.measure import Measurement
|
|
10
|
+
from rich.measure import Measurement
|
|
10
11
|
from rich.segment import Segment
|
|
11
12
|
from rich.style import StyleType
|
|
12
13
|
|
|
@@ -58,17 +59,29 @@ class CodePanel(JupyterMixin):
|
|
|
58
59
|
self.expand = expand
|
|
59
60
|
self.padding = padding
|
|
60
61
|
|
|
62
|
+
@staticmethod
|
|
63
|
+
def _measure_max_line_cells(lines: list[list[Segment]]) -> int:
|
|
64
|
+
max_cells = 0
|
|
65
|
+
for line in lines:
|
|
66
|
+
plain = "".join(segment.text for segment in line).rstrip()
|
|
67
|
+
max_cells = max(max_cells, cell_len(plain))
|
|
68
|
+
return max_cells
|
|
69
|
+
|
|
61
70
|
def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
|
|
62
71
|
border_style = console.get_style(self.border_style)
|
|
63
72
|
max_width = options.max_width
|
|
64
73
|
pad = self.padding
|
|
65
74
|
|
|
75
|
+
max_content_width = max(max_width - pad * 2, 1)
|
|
76
|
+
|
|
66
77
|
# Measure the content width (account for padding)
|
|
67
78
|
if self.expand:
|
|
68
|
-
content_width =
|
|
79
|
+
content_width = max_content_width
|
|
69
80
|
else:
|
|
70
|
-
|
|
71
|
-
|
|
81
|
+
probe_options = options.update(width=max_content_width)
|
|
82
|
+
probe_lines = console.render_lines(self.renderable, probe_options, pad=False)
|
|
83
|
+
content_width = self._measure_max_line_cells(probe_lines)
|
|
84
|
+
content_width = max(1, min(content_width, max_content_width))
|
|
72
85
|
|
|
73
86
|
# Render content lines
|
|
74
87
|
child_options = options.update(width=content_width)
|
|
@@ -108,5 +121,11 @@ class CodePanel(JupyterMixin):
|
|
|
108
121
|
def __rich_measure__(self, console: Console, options: ConsoleOptions) -> Measurement:
|
|
109
122
|
if self.expand:
|
|
110
123
|
return Measurement(options.max_width, options.max_width)
|
|
111
|
-
|
|
124
|
+
max_width = options.max_width
|
|
125
|
+
max_content_width = max(max_width - self.padding * 2, 1)
|
|
126
|
+
probe_options = options.update(width=max_content_width)
|
|
127
|
+
probe_lines = console.render_lines(self.renderable, probe_options, pad=False)
|
|
128
|
+
content_width = self._measure_max_line_cells(probe_lines)
|
|
129
|
+
content_width = max(1, min(content_width, max_content_width))
|
|
130
|
+
width = content_width + self.padding * 2
|
|
112
131
|
return Measurement(width, width)
|
klaude_code/ui/rich/live.py
CHANGED
|
@@ -63,3 +63,20 @@ class CropAboveLive(Live):
|
|
|
63
63
|
|
|
64
64
|
def update(self, renderable: RenderableType, refresh: bool = True) -> None: # type: ignore[override]
|
|
65
65
|
super().update(CropAbove(renderable, style=self._crop_style), refresh=refresh)
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class SingleLine:
|
|
69
|
+
"""Render only the first line of a renderable.
|
|
70
|
+
|
|
71
|
+
This is used to ensure dynamic UI elements (spinners / status) never wrap
|
|
72
|
+
to multiple lines, which would appear as a vertical "jump".
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
def __init__(self, renderable: RenderableType) -> None:
|
|
76
|
+
self.renderable = renderable
|
|
77
|
+
|
|
78
|
+
def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
|
|
79
|
+
line_options = options.update(no_wrap=True, overflow="ellipsis", height=1)
|
|
80
|
+
lines = console.render_lines(self.renderable, line_options, pad=False)
|
|
81
|
+
if lines:
|
|
82
|
+
yield from lines[0]
|
klaude_code/ui/rich/markdown.py
CHANGED
|
@@ -7,11 +7,11 @@ import time
|
|
|
7
7
|
from collections.abc import Callable
|
|
8
8
|
from typing import Any, ClassVar
|
|
9
9
|
|
|
10
|
-
from
|
|
11
|
-
from
|
|
10
|
+
from markdown_it import MarkdownIt
|
|
11
|
+
from markdown_it.token import Token
|
|
12
|
+
from rich.console import Console, ConsoleOptions, RenderableType, RenderResult
|
|
12
13
|
from rich.markdown import CodeBlock, Heading, Markdown, MarkdownElement
|
|
13
14
|
from rich.rule import Rule
|
|
14
|
-
from rich.spinner import Spinner
|
|
15
15
|
from rich.style import Style, StyleType
|
|
16
16
|
from rich.syntax import Syntax
|
|
17
17
|
from rich.text import Text
|
|
@@ -94,19 +94,23 @@ class ThinkingMarkdown(Markdown):
|
|
|
94
94
|
|
|
95
95
|
|
|
96
96
|
class MarkdownStream:
|
|
97
|
-
"""
|
|
97
|
+
"""Block-based streaming Markdown renderer.
|
|
98
98
|
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
99
|
+
This renderer is optimized for terminal UX:
|
|
100
|
+
|
|
101
|
+
- Stable area: only prints *completed* Markdown blocks to scrollback (append-only).
|
|
102
|
+
- Live area: continuously repaints only the final *possibly incomplete* block.
|
|
103
|
+
|
|
104
|
+
Block boundaries are computed with `MarkdownIt("commonmark")` (token maps / top-level tokens).
|
|
105
|
+
Rendering is done with Rich Markdown (customizable via `markdown_class`).
|
|
102
106
|
"""
|
|
103
107
|
|
|
104
108
|
def __init__(
|
|
105
109
|
self,
|
|
110
|
+
console: Console,
|
|
106
111
|
mdargs: dict[str, Any] | None = None,
|
|
107
112
|
theme: Theme | None = None,
|
|
108
|
-
|
|
109
|
-
spinner: Spinner | None = None,
|
|
113
|
+
live_sink: Callable[[RenderableType | None], None] | None = None,
|
|
110
114
|
mark: str | None = None,
|
|
111
115
|
mark_style: StyleType | None = None,
|
|
112
116
|
left_margin: int = 0,
|
|
@@ -125,24 +129,23 @@ class MarkdownStream:
|
|
|
125
129
|
right_margin (int, optional): Number of columns to reserve on the right side
|
|
126
130
|
markdown_class: Markdown class to use for rendering (defaults to NoInsetMarkdown)
|
|
127
131
|
"""
|
|
128
|
-
self.
|
|
132
|
+
self._stable_rendered_lines: list[str] = []
|
|
133
|
+
self._stable_source_line_count: int = 0
|
|
129
134
|
|
|
130
135
|
if mdargs:
|
|
131
136
|
self.mdargs: dict[str, Any] = mdargs
|
|
132
137
|
else:
|
|
133
138
|
self.mdargs = {}
|
|
134
139
|
|
|
135
|
-
|
|
136
|
-
self.live: Live | None = None
|
|
140
|
+
self._live_sink = live_sink
|
|
137
141
|
|
|
138
142
|
# Streaming control
|
|
139
143
|
self.when: float = 0.0 # Timestamp of last update
|
|
140
144
|
self.min_delay: float = 1.0 / 20 # Minimum time between updates (20fps)
|
|
141
|
-
self.
|
|
145
|
+
self._parser: MarkdownIt = MarkdownIt("commonmark")
|
|
142
146
|
|
|
143
147
|
self.theme = theme
|
|
144
148
|
self.console = console
|
|
145
|
-
self.spinner: Spinner | None = spinner
|
|
146
149
|
self.mark: str | None = mark
|
|
147
150
|
self.mark_style: StyleType | None = mark_style
|
|
148
151
|
|
|
@@ -154,9 +157,117 @@ class MarkdownStream:
|
|
|
154
157
|
@property
|
|
155
158
|
def _live_started(self) -> bool:
|
|
156
159
|
"""Check if Live display has been started (derived from self.live)."""
|
|
157
|
-
return self.
|
|
160
|
+
return self._live_sink is not None
|
|
161
|
+
|
|
162
|
+
def _get_base_width(self) -> int:
|
|
163
|
+
return self.console.options.max_width
|
|
164
|
+
|
|
165
|
+
def compute_candidate_stable_line(self, text: str) -> int:
|
|
166
|
+
"""Return the start line of the last top-level block, or 0.
|
|
167
|
+
|
|
168
|
+
This value is not monotonic; callers should clamp it (e.g. with the
|
|
169
|
+
previous stable line) before using it to advance state.
|
|
170
|
+
"""
|
|
171
|
+
|
|
172
|
+
try:
|
|
173
|
+
tokens = self._parser.parse(text)
|
|
174
|
+
except Exception:
|
|
175
|
+
return 0
|
|
176
|
+
|
|
177
|
+
top_level: list[Token] = [token for token in tokens if token.level == 0 and token.map is not None]
|
|
178
|
+
if len(top_level) < 2:
|
|
179
|
+
return 0
|
|
180
|
+
|
|
181
|
+
last = top_level[-1]
|
|
182
|
+
assert last.map is not None
|
|
183
|
+
start_line = last.map[0]
|
|
184
|
+
return max(start_line, 0)
|
|
185
|
+
|
|
186
|
+
def split_blocks(self, text: str, *, min_stable_line: int = 0, final: bool = False) -> tuple[str, str, int]:
|
|
187
|
+
"""Split full markdown into stable and live sources.
|
|
188
|
+
|
|
189
|
+
Returns:
|
|
190
|
+
stable_source: Completed blocks (append-only)
|
|
191
|
+
live_source: Last (possibly incomplete) block
|
|
192
|
+
stable_line: Line index where live starts
|
|
193
|
+
"""
|
|
194
|
+
|
|
195
|
+
lines = text.splitlines(keepends=True)
|
|
196
|
+
line_count = len(lines)
|
|
197
|
+
|
|
198
|
+
stable_line = line_count if final else self.compute_candidate_stable_line(text)
|
|
199
|
+
|
|
200
|
+
stable_line = min(stable_line, line_count)
|
|
201
|
+
stable_line = max(stable_line, min_stable_line)
|
|
202
|
+
|
|
203
|
+
stable_source = "".join(lines[:stable_line])
|
|
204
|
+
live_source = "".join(lines[stable_line:])
|
|
205
|
+
return stable_source, live_source, stable_line
|
|
206
|
+
|
|
207
|
+
def render_ansi(self, text: str, *, apply_mark: bool) -> str:
|
|
208
|
+
"""Render markdown source to an ANSI string.
|
|
209
|
+
|
|
210
|
+
This is primarily intended for internal debugging and tests.
|
|
211
|
+
"""
|
|
212
|
+
|
|
213
|
+
return "".join(self._render_markdown_to_lines(text, apply_mark=apply_mark))
|
|
214
|
+
|
|
215
|
+
def render_stable_ansi(self, stable_source: str, *, has_live_suffix: bool, final: bool) -> str:
|
|
216
|
+
"""Render stable prefix to ANSI, preserving inter-block spacing."""
|
|
217
|
+
|
|
218
|
+
if not stable_source:
|
|
219
|
+
return ""
|
|
220
|
+
|
|
221
|
+
render_source = stable_source
|
|
222
|
+
if not final and has_live_suffix:
|
|
223
|
+
render_source = self._append_nonfinal_sentinel(stable_source)
|
|
224
|
+
|
|
225
|
+
return self.render_ansi(render_source, apply_mark=True)
|
|
226
|
+
|
|
227
|
+
@staticmethod
|
|
228
|
+
def normalize_live_ansi_for_boundary(*, stable_ansi: str, live_ansi: str) -> str:
|
|
229
|
+
"""Normalize whitespace at the stable/live boundary.
|
|
230
|
+
|
|
231
|
+
Some Rich Markdown blocks (e.g. lists) render with a leading blank line.
|
|
232
|
+
If the stable prefix already renders a trailing blank line, rendering the
|
|
233
|
+
live suffix separately may introduce an extra blank line that wouldn't
|
|
234
|
+
appear when rendering the full document.
|
|
158
235
|
|
|
159
|
-
|
|
236
|
+
This function removes leading blank lines from the live ANSI when the
|
|
237
|
+
stable ANSI already ends with a blank line.
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
stable_lines = stable_ansi.splitlines(keepends=True)
|
|
241
|
+
stable_ends_blank = bool(stable_lines) and not stable_lines[-1].strip()
|
|
242
|
+
if not stable_ends_blank:
|
|
243
|
+
return live_ansi
|
|
244
|
+
|
|
245
|
+
live_lines = live_ansi.splitlines(keepends=True)
|
|
246
|
+
while live_lines and not live_lines[0].strip():
|
|
247
|
+
live_lines.pop(0)
|
|
248
|
+
return "".join(live_lines)
|
|
249
|
+
|
|
250
|
+
def _append_nonfinal_sentinel(self, stable_source: str) -> str:
|
|
251
|
+
"""Make Rich render stable content as if it isn't the last block.
|
|
252
|
+
|
|
253
|
+
Rich Markdown may omit trailing spacing for the last block in a document.
|
|
254
|
+
When we render only the stable prefix (without the live suffix), we still
|
|
255
|
+
need the *inter-block* spacing to match the full document.
|
|
256
|
+
|
|
257
|
+
A harmless HTML comment block causes Rich Markdown to emit the expected
|
|
258
|
+
spacing while rendering no visible content.
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
if not stable_source:
|
|
262
|
+
return stable_source
|
|
263
|
+
|
|
264
|
+
if stable_source.endswith("\n\n"):
|
|
265
|
+
return stable_source + "<!-- -->"
|
|
266
|
+
if stable_source.endswith("\n"):
|
|
267
|
+
return stable_source + "\n<!-- -->"
|
|
268
|
+
return stable_source + "\n\n<!-- -->"
|
|
269
|
+
|
|
270
|
+
def _render_markdown_to_lines(self, text: str, *, apply_mark: bool) -> list[str]:
|
|
160
271
|
"""Render markdown text to a list of lines.
|
|
161
272
|
|
|
162
273
|
Args:
|
|
@@ -168,13 +279,8 @@ class MarkdownStream:
|
|
|
168
279
|
# Render the markdown to a string buffer
|
|
169
280
|
string_io = io.StringIO()
|
|
170
281
|
|
|
171
|
-
#
|
|
172
|
-
|
|
173
|
-
if self.console is not None:
|
|
174
|
-
base_width = self.console.options.max_width
|
|
175
|
-
else:
|
|
176
|
-
probe_console = Console(theme=self.theme)
|
|
177
|
-
base_width = probe_console.options.max_width
|
|
282
|
+
# Keep width stable across frames to prevent reflow/jitter.
|
|
283
|
+
base_width = self._get_base_width()
|
|
178
284
|
|
|
179
285
|
effective_width = max(base_width - self.left_margin - self.right_margin, 1)
|
|
180
286
|
|
|
@@ -195,7 +301,7 @@ class MarkdownStream:
|
|
|
195
301
|
indent_prefix = " " * self.left_margin if self.left_margin > 0 else ""
|
|
196
302
|
processed_lines: list[str] = []
|
|
197
303
|
mark_applied = False
|
|
198
|
-
use_mark = bool(self.mark) and self.left_margin >= 2
|
|
304
|
+
use_mark = apply_mark and bool(self.mark) and self.left_margin >= 2
|
|
199
305
|
|
|
200
306
|
# Pre-render styled mark if needed
|
|
201
307
|
styled_mark: str | None = None
|
|
@@ -227,102 +333,59 @@ class MarkdownStream:
|
|
|
227
333
|
|
|
228
334
|
def __del__(self) -> None:
|
|
229
335
|
"""Destructor to ensure Live display is properly cleaned up."""
|
|
230
|
-
if self.
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
336
|
+
if self._live_sink is None:
|
|
337
|
+
return
|
|
338
|
+
with contextlib.suppress(Exception):
|
|
339
|
+
self._live_sink(None)
|
|
234
340
|
|
|
235
341
|
def update(self, text: str, final: bool = False) -> None:
|
|
236
|
-
"""Update the
|
|
237
|
-
|
|
238
|
-
Args:
|
|
239
|
-
text (str): The markdown text received so far
|
|
240
|
-
final (bool): If True, this is the final update and we should clean up
|
|
342
|
+
"""Update the display with the latest full markdown buffer."""
|
|
241
343
|
|
|
242
|
-
|
|
243
|
-
which aren't considered stable. They may shift around as new chunks
|
|
244
|
-
are appended to the markdown text.
|
|
245
|
-
|
|
246
|
-
The stable lines emit to the console above the Live window.
|
|
247
|
-
The unstable lines emit into the Live window so they can be repainted.
|
|
248
|
-
|
|
249
|
-
Markdown going to the console works better in terminal scrollback buffers.
|
|
250
|
-
The live window doesn't play nice with terminal scrollback.
|
|
251
|
-
"""
|
|
252
|
-
if not self._live_started:
|
|
253
|
-
initial_content = self._live_renderable(Text(""), final=False)
|
|
254
|
-
# transient=False keeps final frame on screen after stop()
|
|
255
|
-
self.live = Live(
|
|
256
|
-
initial_content,
|
|
257
|
-
refresh_per_second=1.0 / self.min_delay,
|
|
258
|
-
console=self.console,
|
|
259
|
-
)
|
|
260
|
-
self.live.start()
|
|
261
|
-
|
|
262
|
-
if self.live is None:
|
|
344
|
+
if self._live_sink is None:
|
|
263
345
|
return
|
|
264
346
|
|
|
265
347
|
now = time.time()
|
|
266
|
-
# Throttle updates to maintain smooth rendering
|
|
267
348
|
if not final and now - self.when < self.min_delay:
|
|
268
349
|
return
|
|
269
350
|
self.when = now
|
|
270
351
|
|
|
271
|
-
|
|
272
|
-
start = time.time()
|
|
273
|
-
lines = self._render_markdown_to_lines(text)
|
|
274
|
-
render_time = time.time() - start
|
|
275
|
-
|
|
276
|
-
# Set min_delay to render time plus a small buffer
|
|
277
|
-
self.min_delay = min(max(render_time * 10, 1.0 / 20), 2)
|
|
278
|
-
|
|
279
|
-
num_lines = len(lines)
|
|
280
|
-
|
|
281
|
-
# Reserve last live_window lines for Live area to keep height stable
|
|
282
|
-
num_lines = max(num_lines - self.live_window, 0)
|
|
352
|
+
previous_stable_line = self._stable_source_line_count
|
|
283
353
|
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
354
|
+
stable_source, live_source, stable_line = self.split_blocks(
|
|
355
|
+
text,
|
|
356
|
+
min_stable_line=previous_stable_line,
|
|
357
|
+
final=final,
|
|
358
|
+
)
|
|
288
359
|
|
|
289
|
-
|
|
290
|
-
append_chunk = lines[num_printed:num_lines]
|
|
291
|
-
append_chunk_text = Text.from_ansi("".join(append_chunk))
|
|
292
|
-
live = self.live
|
|
293
|
-
assert live is not None
|
|
294
|
-
live.console.print(append_chunk_text)
|
|
295
|
-
self.printed = lines[:num_lines]
|
|
360
|
+
start = time.time()
|
|
296
361
|
|
|
297
|
-
|
|
362
|
+
stable_changed = final or stable_line > self._stable_source_line_count
|
|
363
|
+
if stable_changed and stable_source:
|
|
364
|
+
stable_ansi = self.render_stable_ansi(stable_source, has_live_suffix=bool(live_source), final=final)
|
|
365
|
+
stable_lines = stable_ansi.splitlines(keepends=True)
|
|
366
|
+
new_lines = stable_lines[len(self._stable_rendered_lines) :]
|
|
367
|
+
if new_lines:
|
|
368
|
+
stable_chunk = "".join(new_lines)
|
|
369
|
+
self.console.print(Text.from_ansi(stable_chunk), end="\n")
|
|
370
|
+
self._stable_rendered_lines = stable_lines
|
|
371
|
+
self._stable_source_line_count = stable_line
|
|
372
|
+
elif final and not stable_source:
|
|
373
|
+
self._stable_rendered_lines = []
|
|
374
|
+
self._stable_source_line_count = stable_line
|
|
298
375
|
|
|
299
|
-
# Final: render remaining lines without spinner, then stop Live
|
|
300
376
|
if final:
|
|
301
|
-
|
|
302
|
-
assert live is not None
|
|
303
|
-
rest = "".join(rest_lines)
|
|
304
|
-
rest_text = Text.from_ansi(rest)
|
|
305
|
-
final_renderable = self._live_renderable(rest_text, final=True)
|
|
306
|
-
live.update(final_renderable)
|
|
307
|
-
live.stop()
|
|
308
|
-
self.live = None
|
|
377
|
+
self._live_sink(None)
|
|
309
378
|
return
|
|
310
379
|
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
live = self.live
|
|
314
|
-
assert live is not None
|
|
315
|
-
live_renderable = self._live_renderable(rest, final)
|
|
316
|
-
live.update(live_renderable)
|
|
380
|
+
apply_mark_live = self._stable_source_line_count == 0
|
|
381
|
+
live_lines = self._render_markdown_to_lines(live_source, apply_mark=apply_mark_live)
|
|
317
382
|
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
else:
|
|
322
|
-
return Group(rest, Text(), self.spinner)
|
|
383
|
+
if self._stable_rendered_lines and not self._stable_rendered_lines[-1].strip():
|
|
384
|
+
while live_lines and not live_lines[0].strip():
|
|
385
|
+
live_lines.pop(0)
|
|
323
386
|
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
387
|
+
live_text = Text.from_ansi("".join(live_lines))
|
|
388
|
+
self._live_sink(live_text)
|
|
389
|
+
|
|
390
|
+
elapsed = time.time() - start
|
|
391
|
+
self.min_delay = min(max(elapsed * 6, 1.0 / 30), 0.5)
|
klaude_code/ui/rich/status.py
CHANGED
|
@@ -22,7 +22,7 @@ BREATHING_SPINNER_NAME = "dots"
|
|
|
22
22
|
|
|
23
23
|
# Alternating glyphs for the breathing spinner - switches at each "transparent" point
|
|
24
24
|
_BREATHING_SPINNER_GLYPHS_BASE = [
|
|
25
|
-
"
|
|
25
|
+
"✦",
|
|
26
26
|
]
|
|
27
27
|
|
|
28
28
|
# Shuffle glyphs on module load for variety across sessions
|
|
@@ -56,18 +56,12 @@ def _shimmer_profile(main_text: str) -> list[tuple[str, float]]:
|
|
|
56
56
|
char_count = len(chars)
|
|
57
57
|
period = char_count + padding * 2
|
|
58
58
|
|
|
59
|
-
#
|
|
60
|
-
|
|
61
|
-
# baseline text length and the configured sweep duration.
|
|
62
|
-
# The baseline is chosen to be close to the default
|
|
63
|
-
# "Thinking … (esc to interrupt)" status line.
|
|
64
|
-
baseline_chars = 30
|
|
65
|
-
base_period = baseline_chars + padding * 2
|
|
66
|
-
sweep_seconds = const.STATUS_SHIMMER_SWEEP_SECONDS
|
|
67
|
-
char_speed = base_period / sweep_seconds if sweep_seconds > 0 else base_period
|
|
59
|
+
# Use same period as breathing spinner for visual consistency
|
|
60
|
+
sweep_seconds = max(const.SPINNER_BREATH_PERIOD_SECONDS, 0.1)
|
|
68
61
|
|
|
69
62
|
elapsed = _elapsed_since_start()
|
|
70
|
-
|
|
63
|
+
# Complete one full sweep in sweep_seconds, regardless of text length
|
|
64
|
+
pos_f = (elapsed / sweep_seconds % 1.0) * period
|
|
71
65
|
pos = int(pos_f)
|
|
72
66
|
band_half_width = const.STATUS_SHIMMER_BAND_HALF_WIDTH
|
|
73
67
|
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: klaude-code
|
|
3
|
-
Version: 1.2.
|
|
3
|
+
Version: 1.2.25
|
|
4
4
|
Summary: Add your description here
|
|
5
5
|
Requires-Dist: anthropic>=0.66.0
|
|
6
6
|
Requires-Dist: chardet>=5.2.0
|
|
7
7
|
Requires-Dist: ddgs>=9.9.3
|
|
8
8
|
Requires-Dist: diff-match-patch>=20241021
|
|
9
|
+
Requires-Dist: markdown-it-py>=4.0.0
|
|
9
10
|
Requires-Dist: openai>=1.102.0
|
|
10
11
|
Requires-Dist: pillow>=12.0.0
|
|
11
12
|
Requires-Dist: prompt-toolkit>=3.0.52
|
|
@@ -11,7 +11,7 @@ klaude_code/cli/config_cmd.py,sha256=SBFmBnHvkf5IJtpsDDuHsHWQCmYd2i4PtIMBOKpxmOM
|
|
|
11
11
|
klaude_code/cli/debug.py,sha256=vizBXc3648vBZQonreMqvv_b5UdRgcQoOIT-iEIx1G4,2318
|
|
12
12
|
klaude_code/cli/list_model.py,sha256=9YOxhWE0J59NaY-SrgPA9_jA1A8rlOGwWmzK0TRuos4,8011
|
|
13
13
|
klaude_code/cli/main.py,sha256=pyU2W2X3lg7Z-4adiOzA9_2l-5QSejYm68HrhAiu470,10469
|
|
14
|
-
klaude_code/cli/runtime.py,sha256=
|
|
14
|
+
klaude_code/cli/runtime.py,sha256=XqF1d53UnIgnmnlqKGR3YBdBNlBFuoyc7BvUfr22CGw,14824
|
|
15
15
|
klaude_code/cli/self_update.py,sha256=fekLNRm3ivZ-Xbc-79rcgDBXbq-Zb-BkSQOGMRLeTAs,7986
|
|
16
16
|
klaude_code/cli/session_cmd.py,sha256=jAopkqq_DGgoDIcGxT-RSzn9R4yqBC8NCaNgK1GLqnQ,2634
|
|
17
17
|
klaude_code/command/__init__.py,sha256=B39fxrrvxb51B6qeQJoh3lXWCsPoI81BJvdSLb-8CYg,3117
|
|
@@ -30,14 +30,14 @@ klaude_code/command/registry.py,sha256=avTjsoyLv11SsLsY_qb3OpsRjsSyxIlu7uwJI0Nq6
|
|
|
30
30
|
klaude_code/command/release_notes_cmd.py,sha256=FIrBRfKTlXEp8mBh15buNjgOrl_GMX7FeeMWxYYBn1o,2674
|
|
31
31
|
klaude_code/command/status_cmd.py,sha256=sYmzfex7RVhgrBCjRyD8fsZ6ioZvjVzQ_-FvmcsA7fo,5365
|
|
32
32
|
klaude_code/command/terminal_setup_cmd.py,sha256=SivM1gX_anGY_8DCQNFZ5VblFqt4sVgCMEWPRlo6K5w,10911
|
|
33
|
-
klaude_code/command/thinking_cmd.py,sha256=
|
|
33
|
+
klaude_code/command/thinking_cmd.py,sha256=8EdSN6huXihM5NHJEryZLA7CkgRT7mZgMVTJsT1-x8U,9108
|
|
34
34
|
klaude_code/config/__init__.py,sha256=Qrqvi8nizkj6N77h2vDj0r4rbgCiqxvz2HLBPFuWulA,120
|
|
35
35
|
klaude_code/config/config.py,sha256=2jvM6a8zoC-TdRFaLIw3OW5paxxeXC6l-o05ds4RysA,7263
|
|
36
36
|
klaude_code/config/select_model.py,sha256=KCdFjaoHXyO9QidNna_OGdDrvlEXtRUXKfG-F8kdNLk,5188
|
|
37
|
-
klaude_code/const.py,sha256=
|
|
37
|
+
klaude_code/const.py,sha256=Hj7tBQLk51eELV-_nnE8nFoAvB_UKTeTkcr929f4hEI,4399
|
|
38
38
|
klaude_code/core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
39
39
|
klaude_code/core/agent.py,sha256=bWm-UFX_0-KAy5j_YHH8X8o3MJT4-40Ni2EaDP2SL5k,5819
|
|
40
|
-
klaude_code/core/executor.py,sha256=
|
|
40
|
+
klaude_code/core/executor.py,sha256=MkfKPOZWknjyQzSUl_qCGRxsyYY9zetQVnerO_dWqx4,26827
|
|
41
41
|
klaude_code/core/manager/__init__.py,sha256=hdIbpnYj6i18byiWjtJIm5l7NYYDQMvafw8fePVPydc,562
|
|
42
42
|
klaude_code/core/manager/llm_clients.py,sha256=X2oMFWgJcP0tK8GEtMMDYR3HyR6_H8FuyCqpzWF5x2k,871
|
|
43
43
|
klaude_code/core/manager/llm_clients_builder.py,sha256=pPZ_xBh-_ipV66L-9a1fnwNos4iik82Zkq0E0y3WrfI,1521
|
|
@@ -123,8 +123,8 @@ klaude_code/protocol/commands.py,sha256=GN6GX9fo7YYtfumrBTpOmOvZofsnzZN2SAxP2X0B
|
|
|
123
123
|
klaude_code/protocol/events.py,sha256=KUMf1rLNdHQO9cZiQ9Pa1VsKkP1PTMbUkp18bu_jGy8,3935
|
|
124
124
|
klaude_code/protocol/llm_param.py,sha256=cb4ubLq21PIsMOC8WJb0aid12z_sT1b7FsbNJMr-jLg,4255
|
|
125
125
|
klaude_code/protocol/model.py,sha256=aJUavwtGWY-XiDF6qk2ZV6FwEfjTqOnXZeznB7_zc_4,13606
|
|
126
|
-
klaude_code/protocol/op.py,sha256=
|
|
127
|
-
klaude_code/protocol/op_handler.py,sha256=
|
|
126
|
+
klaude_code/protocol/op.py,sha256=zG8AGFcTx1vIZFN0lNZjIjucjmDYM4eVOR7tRiLofF4,4589
|
|
127
|
+
klaude_code/protocol/op_handler.py,sha256=feTMdrz2QBwnjdv6ndizTinbBA9HFeH4oiBDeQBRKoY,1749
|
|
128
128
|
klaude_code/protocol/sub_agent/__init__.py,sha256=Abap5lPLgnSCQsVD3axfeqnj2UtxOcDLGX8e9HugfSU,3964
|
|
129
129
|
klaude_code/protocol/sub_agent/explore.py,sha256=Z4M7i98XBLew38ClXiW-hJteSYjMUu2b548rkR7JW3A,2579
|
|
130
130
|
klaude_code/protocol/sub_agent/oracle.py,sha256=0cbuutKQcvwaM--Q15mbkCdbpZMF4YjxDN1jkuGVKp4,3344
|
|
@@ -162,11 +162,11 @@ klaude_code/ui/modes/exec/display.py,sha256=m2kkgaUoGD9rEVUmcm7Vs_PyAI2iruKCJYRh
|
|
|
162
162
|
klaude_code/ui/modes/repl/__init__.py,sha256=35a6SUiL1SDi2i43X2VjHQw97rR7yhbLBzkGI5aC6Bc,1526
|
|
163
163
|
klaude_code/ui/modes/repl/clipboard.py,sha256=ZCpk7kRSXGhh0Q_BWtUUuSYT7ZOqRjAoRcg9T9n48Wo,5137
|
|
164
164
|
klaude_code/ui/modes/repl/completers.py,sha256=GIvUS9TAFMMPDpoXLuIupEccoqIMEpSEw4IZmKjVo4c,28560
|
|
165
|
-
klaude_code/ui/modes/repl/display.py,sha256=
|
|
166
|
-
klaude_code/ui/modes/repl/event_handler.py,sha256=
|
|
167
|
-
klaude_code/ui/modes/repl/input_prompt_toolkit.py,sha256=
|
|
165
|
+
klaude_code/ui/modes/repl/display.py,sha256=06wawOHWO2ItEA9EIEh97p3GDID7TJhAtpaA03nPQXs,2335
|
|
166
|
+
klaude_code/ui/modes/repl/event_handler.py,sha256=r-e12QmWz2_lGg0PilqFG0WB-BmsB0ygqZClkRA63CA,24672
|
|
167
|
+
klaude_code/ui/modes/repl/input_prompt_toolkit.py,sha256=F1p4JZp-KjDvTEZVR6bC0nb4ayd2VaEYsLmEA0KJOUM,9054
|
|
168
168
|
klaude_code/ui/modes/repl/key_bindings.py,sha256=Fxz9Ey2SnOHvfleMeSYVduxuofY0Yo-97hMRs-OMe-o,7800
|
|
169
|
-
klaude_code/ui/modes/repl/renderer.py,sha256=
|
|
169
|
+
klaude_code/ui/modes/repl/renderer.py,sha256=CsNMAf1GzGMyEfNsA0I68PQBfnI1KWpYewoVxEegv7w,15838
|
|
170
170
|
klaude_code/ui/renderers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
171
171
|
klaude_code/ui/renderers/assistant.py,sha256=nO_QNJ2e9TwtU2IlojO9lCMWNFUNmcE9Ezz-WW748_w,743
|
|
172
172
|
klaude_code/ui/renderers/common.py,sha256=5RdXC3ngtlhfmxRlbwOPtHtbXhAoWwbHoGX78tZcaTc,2284
|
|
@@ -180,12 +180,12 @@ klaude_code/ui/renderers/tools.py,sha256=W24deCM4GhZT69gzo3FwZ0QVYbQ-x5Bo2-y5nFk
|
|
|
180
180
|
klaude_code/ui/renderers/user_input.py,sha256=e2hZS7UUnzQuQ6UqzSKRDkFJMkKTLUoub1JclHMX40g,3941
|
|
181
181
|
klaude_code/ui/rich/__init__.py,sha256=zEZjnHR3Fnv_sFMxwIMjoJfwDoC4GRGv3lHJzAGRq_o,236
|
|
182
182
|
klaude_code/ui/rich/cjk_wrap.py,sha256=ncmifgTwF6q95iayHQyazGbntt7BRQb_Ed7aXc8JU6Y,7551
|
|
183
|
-
klaude_code/ui/rich/code_panel.py,sha256=
|
|
184
|
-
klaude_code/ui/rich/live.py,sha256=
|
|
185
|
-
klaude_code/ui/rich/markdown.py,sha256=
|
|
183
|
+
klaude_code/ui/rich/code_panel.py,sha256=ZKuJHh-kh-hIkBXSGLERLaDbJ7I9hvtvmYKocJn39_w,4744
|
|
184
|
+
klaude_code/ui/rich/live.py,sha256=qiBLPSE4KW_Dpemy5MZ5BKhkFWEN2fjXBiQHmhJrPSM,2722
|
|
185
|
+
klaude_code/ui/rich/markdown.py,sha256=LoI47hzyXi3vsLJ69Kfj2qlZhWah7bscQ7O-CjYV9rs,14564
|
|
186
186
|
klaude_code/ui/rich/quote.py,sha256=tZcxN73SfDBHF_qk0Jkh9gWBqPBn8VLp9RF36YRdKEM,1123
|
|
187
187
|
klaude_code/ui/rich/searchable_text.py,sha256=DCVZgEFv7_ergAvT2v7XrfQAUXUzhmAwuVAchlIx8RY,2448
|
|
188
|
-
klaude_code/ui/rich/status.py,sha256=
|
|
188
|
+
klaude_code/ui/rich/status.py,sha256=QHg4oWmPSQH19H81vOFpImEqWyDtAbIXjuCGsuDjBPA,9278
|
|
189
189
|
klaude_code/ui/rich/theme.py,sha256=GpPd_BD7rkCpmWDjdOYoW65UgJSMxAjA28Sgv5GbUNg,13291
|
|
190
190
|
klaude_code/ui/terminal/__init__.py,sha256=GIMnsEcIAGT_vBHvTlWEdyNmAEpruyscUA6M_j3GQZU,1412
|
|
191
191
|
klaude_code/ui/terminal/color.py,sha256=M-i09DVlLAhAyhQjfeAi7OipoGi1p_OVkaZxeRfykY0,7135
|
|
@@ -194,7 +194,7 @@ klaude_code/ui/terminal/notifier.py,sha256=wkRM66d98Oh6PujnN4bB7NiQxIYEHqQXverMK
|
|
|
194
194
|
klaude_code/ui/terminal/progress_bar.py,sha256=MDnhPbqCnN4GDgLOlxxOEVZPDwVC_XL2NM5sl1MFNcQ,2133
|
|
195
195
|
klaude_code/ui/utils/__init__.py,sha256=YEsCLjbCPaPza-UXTPUMTJTrc9BmNBUP5CbFWlshyOQ,15
|
|
196
196
|
klaude_code/ui/utils/common.py,sha256=tqHqwgLtAyP805kwRFyoAL4EgMutcNb3Y-GAXJ4IeuM,2263
|
|
197
|
-
klaude_code-1.2.
|
|
198
|
-
klaude_code-1.2.
|
|
199
|
-
klaude_code-1.2.
|
|
200
|
-
klaude_code-1.2.
|
|
197
|
+
klaude_code-1.2.25.dist-info/WHEEL,sha256=eh7sammvW2TypMMMGKgsM83HyA_3qQ5Lgg3ynoecH3M,79
|
|
198
|
+
klaude_code-1.2.25.dist-info/entry_points.txt,sha256=7CWKjolvs6dZiYHpelhA_FRJ-sVDh43eu3iWuOhKc_w,53
|
|
199
|
+
klaude_code-1.2.25.dist-info/METADATA,sha256=kgFYc3ithDjpNaEC_p6PQNa6E1eaYQZ2bi13r7PQQMs,7762
|
|
200
|
+
klaude_code-1.2.25.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|