klaude-code 2.8.1__py3-none-any.whl → 2.9.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- klaude_code/app/runtime.py +2 -1
- klaude_code/auth/antigravity/oauth.py +33 -38
- klaude_code/auth/antigravity/token_manager.py +0 -18
- klaude_code/auth/base.py +53 -0
- klaude_code/auth/claude/oauth.py +34 -49
- klaude_code/auth/codex/exceptions.py +0 -4
- klaude_code/auth/codex/oauth.py +32 -28
- klaude_code/auth/codex/token_manager.py +0 -18
- klaude_code/cli/cost_cmd.py +128 -39
- klaude_code/cli/list_model.py +27 -10
- klaude_code/cli/main.py +14 -3
- klaude_code/config/assets/builtin_config.yaml +25 -24
- klaude_code/config/config.py +47 -25
- klaude_code/config/sub_agent_model_helper.py +18 -13
- klaude_code/config/thinking.py +0 -8
- klaude_code/const.py +1 -1
- klaude_code/core/agent_profile.py +11 -56
- klaude_code/core/compaction/overflow.py +0 -4
- klaude_code/core/executor.py +33 -5
- klaude_code/core/manager/llm_clients.py +9 -1
- klaude_code/core/prompts/prompt-claude-code.md +4 -4
- klaude_code/core/reminders.py +21 -23
- klaude_code/core/task.py +1 -5
- klaude_code/core/tool/__init__.py +3 -2
- klaude_code/core/tool/file/apply_patch.py +0 -27
- klaude_code/core/tool/file/read_tool.md +3 -2
- klaude_code/core/tool/file/read_tool.py +27 -3
- klaude_code/core/tool/offload.py +0 -35
- klaude_code/core/tool/shell/bash_tool.py +1 -1
- klaude_code/core/tool/sub_agent/__init__.py +6 -0
- klaude_code/core/tool/sub_agent/image_gen.md +16 -0
- klaude_code/core/tool/sub_agent/image_gen.py +146 -0
- klaude_code/core/tool/sub_agent/task.md +20 -0
- klaude_code/core/tool/sub_agent/task.py +205 -0
- klaude_code/core/tool/tool_registry.py +0 -16
- klaude_code/core/turn.py +1 -1
- klaude_code/llm/anthropic/input.py +6 -5
- klaude_code/llm/antigravity/input.py +14 -7
- klaude_code/llm/bedrock_anthropic/__init__.py +3 -0
- klaude_code/llm/google/client.py +8 -6
- klaude_code/llm/google/input.py +20 -12
- klaude_code/llm/image.py +18 -11
- klaude_code/llm/input_common.py +32 -6
- klaude_code/llm/json_stable.py +37 -0
- klaude_code/llm/{codex → openai_codex}/__init__.py +1 -1
- klaude_code/llm/{codex → openai_codex}/client.py +24 -2
- klaude_code/llm/openai_codex/prompt_sync.py +237 -0
- klaude_code/llm/openai_compatible/client.py +3 -1
- klaude_code/llm/openai_compatible/input.py +0 -10
- klaude_code/llm/openai_compatible/stream.py +35 -10
- klaude_code/llm/{responses → openai_responses}/client.py +1 -1
- klaude_code/llm/{responses → openai_responses}/input.py +15 -5
- klaude_code/llm/registry.py +3 -8
- klaude_code/llm/stream_parts.py +3 -1
- klaude_code/llm/usage.py +1 -9
- klaude_code/protocol/events.py +2 -2
- klaude_code/protocol/message.py +3 -2
- klaude_code/protocol/model.py +34 -2
- klaude_code/protocol/op.py +13 -0
- klaude_code/protocol/op_handler.py +5 -0
- klaude_code/protocol/sub_agent/AGENTS.md +5 -5
- klaude_code/protocol/sub_agent/__init__.py +13 -34
- klaude_code/protocol/sub_agent/explore.py +7 -34
- klaude_code/protocol/sub_agent/image_gen.py +3 -74
- klaude_code/protocol/sub_agent/task.py +3 -47
- klaude_code/protocol/sub_agent/web.py +8 -52
- klaude_code/protocol/tools.py +2 -0
- klaude_code/session/session.py +80 -22
- klaude_code/session/store.py +0 -4
- klaude_code/skill/assets/deslop/SKILL.md +9 -0
- klaude_code/skill/system_skills.py +0 -20
- klaude_code/tui/command/fork_session_cmd.py +5 -2
- klaude_code/tui/command/resume_cmd.py +9 -2
- klaude_code/tui/command/sub_agent_model_cmd.py +85 -18
- klaude_code/tui/components/assistant.py +0 -26
- klaude_code/tui/components/bash_syntax.py +4 -0
- klaude_code/tui/components/command_output.py +3 -1
- klaude_code/tui/components/developer.py +3 -0
- klaude_code/tui/components/diffs.py +4 -209
- klaude_code/tui/components/errors.py +4 -0
- klaude_code/tui/components/mermaid_viewer.py +2 -2
- klaude_code/tui/components/metadata.py +0 -3
- klaude_code/tui/components/rich/markdown.py +120 -87
- klaude_code/tui/components/rich/status.py +2 -2
- klaude_code/tui/components/rich/theme.py +11 -6
- klaude_code/tui/components/sub_agent.py +2 -46
- klaude_code/tui/components/thinking.py +0 -33
- klaude_code/tui/components/tools.py +65 -21
- klaude_code/tui/components/user_input.py +2 -0
- klaude_code/tui/input/images.py +21 -18
- klaude_code/tui/input/key_bindings.py +2 -2
- klaude_code/tui/input/prompt_toolkit.py +49 -49
- klaude_code/tui/machine.py +29 -47
- klaude_code/tui/renderer.py +48 -33
- klaude_code/tui/runner.py +2 -1
- klaude_code/tui/terminal/image.py +27 -34
- klaude_code/ui/common.py +0 -70
- {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/METADATA +3 -6
- {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/RECORD +103 -99
- klaude_code/core/tool/sub_agent_tool.py +0 -126
- klaude_code/llm/bedrock/__init__.py +0 -3
- klaude_code/llm/openai_compatible/tool_call_accumulator.py +0 -108
- klaude_code/tui/components/rich/searchable_text.py +0 -68
- /klaude_code/llm/{bedrock → bedrock_anthropic}/client.py +0 -0
- /klaude_code/llm/{responses → openai_responses}/__init__.py +0 -0
- {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/WHEEL +0 -0
- {klaude_code-2.8.1.dist-info → klaude_code-2.9.1.dist-info}/entry_points.txt +0 -0
klaude_code/tui/machine.py
CHANGED
|
@@ -32,7 +32,6 @@ from klaude_code.tui.commands import (
|
|
|
32
32
|
RenderTaskFinish,
|
|
33
33
|
RenderTaskMetadata,
|
|
34
34
|
RenderTaskStart,
|
|
35
|
-
RenderThinkingHeader,
|
|
36
35
|
RenderToolCall,
|
|
37
36
|
RenderToolResult,
|
|
38
37
|
RenderTurnStart,
|
|
@@ -49,7 +48,7 @@ from klaude_code.tui.commands import (
|
|
|
49
48
|
from klaude_code.tui.components.rich import status as r_status
|
|
50
49
|
from klaude_code.tui.components.rich.theme import ThemeKey
|
|
51
50
|
from klaude_code.tui.components.thinking import extract_last_bold_header, normalize_thinking_content
|
|
52
|
-
from klaude_code.tui.components.tools import get_tool_active_form, is_sub_agent_tool
|
|
51
|
+
from klaude_code.tui.components.tools import get_task_active_form, get_tool_active_form, is_sub_agent_tool
|
|
53
52
|
|
|
54
53
|
# Tools that complete quickly and don't benefit from streaming activity display.
|
|
55
54
|
# For models without fine-grained tool JSON streaming (e.g., Gemini), showing these
|
|
@@ -68,25 +67,6 @@ FAST_TOOLS: frozenset[str] = frozenset(
|
|
|
68
67
|
)
|
|
69
68
|
|
|
70
69
|
|
|
71
|
-
@dataclass
|
|
72
|
-
class SubAgentThinkingHeaderState:
|
|
73
|
-
buffer: str = ""
|
|
74
|
-
last_header: str | None = None
|
|
75
|
-
|
|
76
|
-
def append_and_extract_new_header(self, content: str) -> str | None:
|
|
77
|
-
self.buffer += content
|
|
78
|
-
|
|
79
|
-
max_chars = 8192
|
|
80
|
-
if len(self.buffer) > max_chars:
|
|
81
|
-
self.buffer = self.buffer[-max_chars:]
|
|
82
|
-
|
|
83
|
-
header = extract_last_bold_header(normalize_thinking_content(self.buffer))
|
|
84
|
-
if header and header != self.last_header:
|
|
85
|
-
self.last_header = header
|
|
86
|
-
return header
|
|
87
|
-
return None
|
|
88
|
-
|
|
89
|
-
|
|
90
70
|
class ActivityState:
|
|
91
71
|
"""Tracks composing/tool activity for spinner display."""
|
|
92
72
|
|
|
@@ -97,10 +77,6 @@ class ActivityState:
|
|
|
97
77
|
self._sub_agent_tool_calls: dict[str, int] = {}
|
|
98
78
|
self._sub_agent_tool_calls_by_id: dict[str, str] = {}
|
|
99
79
|
|
|
100
|
-
@property
|
|
101
|
-
def is_composing(self) -> bool:
|
|
102
|
-
return self._composing and not self._tool_calls and not self._sub_agent_tool_calls
|
|
103
|
-
|
|
104
80
|
def set_composing(self, composing: bool) -> None:
|
|
105
81
|
self._composing = composing
|
|
106
82
|
if not composing:
|
|
@@ -114,7 +90,10 @@ class ActivityState:
|
|
|
114
90
|
|
|
115
91
|
def add_sub_agent_tool_call(self, tool_call_id: str, tool_name: str) -> None:
|
|
116
92
|
if tool_call_id in self._sub_agent_tool_calls_by_id:
|
|
117
|
-
|
|
93
|
+
old_tool_name = self._sub_agent_tool_calls_by_id[tool_call_id]
|
|
94
|
+
self._sub_agent_tool_calls[old_tool_name] = self._sub_agent_tool_calls.get(old_tool_name, 0) - 1
|
|
95
|
+
if self._sub_agent_tool_calls[old_tool_name] <= 0:
|
|
96
|
+
self._sub_agent_tool_calls.pop(old_tool_name, None)
|
|
118
97
|
self._sub_agent_tool_calls_by_id[tool_call_id] = tool_name
|
|
119
98
|
self._sub_agent_tool_calls[tool_name] = self._sub_agent_tool_calls.get(tool_name, 0) + 1
|
|
120
99
|
|
|
@@ -307,7 +286,6 @@ class SpinnerStatusState:
|
|
|
307
286
|
class _SessionState:
|
|
308
287
|
session_id: str
|
|
309
288
|
sub_agent_state: model.SubAgentState | None = None
|
|
310
|
-
sub_agent_thinking_header: SubAgentThinkingHeaderState | None = None
|
|
311
289
|
model_id: str | None = None
|
|
312
290
|
assistant_stream_active: bool = False
|
|
313
291
|
thinking_stream_active: bool = False
|
|
@@ -321,7 +299,7 @@ class _SessionState:
|
|
|
321
299
|
|
|
322
300
|
@property
|
|
323
301
|
def should_show_sub_agent_thinking_header(self) -> bool:
|
|
324
|
-
return bool(self.sub_agent_state and self.sub_agent_state.sub_agent_type ==
|
|
302
|
+
return bool(self.sub_agent_state and self.sub_agent_state.sub_agent_type == tools.IMAGE_GEN)
|
|
325
303
|
|
|
326
304
|
@property
|
|
327
305
|
def should_extract_reasoning_header(self) -> bool:
|
|
@@ -422,8 +400,6 @@ class DisplayStateMachine:
|
|
|
422
400
|
self._set_primary_if_needed(e.session_id)
|
|
423
401
|
if not is_replay:
|
|
424
402
|
cmds.append(TaskClockStart())
|
|
425
|
-
else:
|
|
426
|
-
s.sub_agent_thinking_header = SubAgentThinkingHeaderState()
|
|
427
403
|
|
|
428
404
|
if not is_replay:
|
|
429
405
|
cmds.append(SpinnerStart())
|
|
@@ -469,7 +445,11 @@ class DisplayStateMachine:
|
|
|
469
445
|
|
|
470
446
|
case events.ThinkingStartEvent() as e:
|
|
471
447
|
if s.is_sub_agent:
|
|
472
|
-
|
|
448
|
+
if not s.should_show_sub_agent_thinking_header:
|
|
449
|
+
return []
|
|
450
|
+
s.thinking_stream_active = True
|
|
451
|
+
cmds.append(StartThinkingStream(session_id=e.session_id))
|
|
452
|
+
return cmds
|
|
473
453
|
if not self._is_primary(e.session_id):
|
|
474
454
|
return []
|
|
475
455
|
s.thinking_stream_active = True
|
|
@@ -487,11 +467,7 @@ class DisplayStateMachine:
|
|
|
487
467
|
if s.is_sub_agent:
|
|
488
468
|
if not s.should_show_sub_agent_thinking_header:
|
|
489
469
|
return []
|
|
490
|
-
|
|
491
|
-
s.sub_agent_thinking_header = SubAgentThinkingHeaderState()
|
|
492
|
-
header = s.sub_agent_thinking_header.append_and_extract_new_header(e.content)
|
|
493
|
-
if header:
|
|
494
|
-
cmds.append(RenderThinkingHeader(session_id=e.session_id, header=header))
|
|
470
|
+
cmds.append(AppendThinking(session_id=e.session_id, content=e.content))
|
|
495
471
|
return cmds
|
|
496
472
|
|
|
497
473
|
if not self._is_primary(e.session_id):
|
|
@@ -511,7 +487,11 @@ class DisplayStateMachine:
|
|
|
511
487
|
|
|
512
488
|
case events.ThinkingEndEvent() as e:
|
|
513
489
|
if s.is_sub_agent:
|
|
514
|
-
|
|
490
|
+
if not s.should_show_sub_agent_thinking_header:
|
|
491
|
+
return []
|
|
492
|
+
s.thinking_stream_active = False
|
|
493
|
+
cmds.append(EndThinkingStream(session_id=e.session_id))
|
|
494
|
+
return cmds
|
|
515
495
|
if not self._is_primary(e.session_id):
|
|
516
496
|
return []
|
|
517
497
|
s.thinking_stream_active = False
|
|
@@ -629,12 +609,17 @@ class DisplayStateMachine:
|
|
|
629
609
|
primary.thinking_stream_active = False
|
|
630
610
|
cmds.append(EndThinkingStream(session_id=primary.session_id))
|
|
631
611
|
|
|
612
|
+
if not is_replay and e.tool_name == tools.TASK and not s.should_skip_tool_activity(e.tool_name):
|
|
613
|
+
tool_active_form = get_task_active_form(e.arguments)
|
|
614
|
+
self._spinner.add_sub_agent_tool_call(e.tool_call_id, tool_active_form)
|
|
615
|
+
cmds.extend(self._spinner_update_commands())
|
|
616
|
+
|
|
632
617
|
cmds.append(RenderToolCall(e))
|
|
633
618
|
return cmds
|
|
634
619
|
|
|
635
620
|
case events.ToolResultEvent() as e:
|
|
636
621
|
if not is_replay and is_sub_agent_tool(e.tool_name):
|
|
637
|
-
self._spinner.finish_sub_agent_tool_call(e.tool_call_id
|
|
622
|
+
self._spinner.finish_sub_agent_tool_call(e.tool_call_id)
|
|
638
623
|
cmds.extend(self._spinner_update_commands())
|
|
639
624
|
|
|
640
625
|
if s.is_sub_agent and not e.is_error:
|
|
@@ -675,15 +660,12 @@ class DisplayStateMachine:
|
|
|
675
660
|
case events.TaskFinishEvent() as e:
|
|
676
661
|
s.task_active = False
|
|
677
662
|
cmds.append(RenderTaskFinish(e))
|
|
678
|
-
if not s.is_sub_agent:
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
cmds.append(EmitTmuxSignal())
|
|
685
|
-
else:
|
|
686
|
-
s.sub_agent_thinking_header = None
|
|
663
|
+
if not s.is_sub_agent and not is_replay:
|
|
664
|
+
cmds.append(TaskClockClear())
|
|
665
|
+
self._spinner.reset()
|
|
666
|
+
cmds.append(SpinnerStop())
|
|
667
|
+
cmds.append(PrintRuleLine())
|
|
668
|
+
cmds.append(EmitTmuxSignal())
|
|
687
669
|
return cmds
|
|
688
670
|
|
|
689
671
|
case events.InterruptEvent() as e:
|
klaude_code/tui/renderer.py
CHANGED
|
@@ -67,7 +67,7 @@ from klaude_code.tui.components import thinking as c_thinking
|
|
|
67
67
|
from klaude_code.tui.components import tools as c_tools
|
|
68
68
|
from klaude_code.tui.components import user_input as c_user_input
|
|
69
69
|
from klaude_code.tui.components import welcome as c_welcome
|
|
70
|
-
from klaude_code.tui.components.common import truncate_head
|
|
70
|
+
from klaude_code.tui.components.common import create_grid, truncate_head
|
|
71
71
|
from klaude_code.tui.components.rich import status as r_status
|
|
72
72
|
from klaude_code.tui.components.rich.live import CropAboveLive, SingleLine
|
|
73
73
|
from klaude_code.tui.components.rich.markdown import MarkdownStream, NoInsetMarkdown, ThinkingMarkdown
|
|
@@ -168,6 +168,7 @@ class TUICommandRenderer:
|
|
|
168
168
|
self._sessions: dict[str, _SessionStatus] = {}
|
|
169
169
|
self._current_sub_agent_color: Style | None = None
|
|
170
170
|
self._sub_agent_color_index = 0
|
|
171
|
+
self._sub_agent_thinking_buffers: dict[str, str] = {}
|
|
171
172
|
|
|
172
173
|
# ---------------------------------------------------------------------
|
|
173
174
|
# Session helpers
|
|
@@ -184,13 +185,6 @@ class TUICommandRenderer:
|
|
|
184
185
|
def is_sub_agent_session(self, session_id: str) -> bool:
|
|
185
186
|
return session_id in self._sessions and self._sessions[session_id].sub_agent_state is not None
|
|
186
187
|
|
|
187
|
-
def _should_display_sub_agent_thinking_header(self, session_id: str) -> bool:
|
|
188
|
-
# Hardcoded: only show sub-agent thinking headers for ImageGen.
|
|
189
|
-
st = self._sessions.get(session_id)
|
|
190
|
-
if st is None or st.sub_agent_state is None:
|
|
191
|
-
return False
|
|
192
|
-
return st.sub_agent_state.sub_agent_type == "ImageGen"
|
|
193
|
-
|
|
194
188
|
def _advance_sub_agent_color_index(self) -> None:
|
|
195
189
|
palette_size = len(self.themes.sub_agent_colors)
|
|
196
190
|
if palette_size == 0:
|
|
@@ -332,8 +326,7 @@ class TUICommandRenderer:
|
|
|
332
326
|
if pad_lines:
|
|
333
327
|
stream = Padding(stream, (0, 0, pad_lines, 0))
|
|
334
328
|
stream_part = stream
|
|
335
|
-
|
|
336
|
-
gap_part = Text("") if self._spinner_visible else Group()
|
|
329
|
+
gap_part = Text("")
|
|
337
330
|
|
|
338
331
|
status_part: RenderableType = SingleLine(self._status_spinner) if self._spinner_visible else Group()
|
|
339
332
|
return Group(stream_part, gap_part, status_part)
|
|
@@ -388,6 +381,19 @@ class TUICommandRenderer:
|
|
|
388
381
|
def _flush_assistant(self) -> None:
|
|
389
382
|
self._assistant_stream.render()
|
|
390
383
|
|
|
384
|
+
def _render_sub_agent_thinking(self, content: str) -> None:
|
|
385
|
+
"""Render sub-agent thinking content as a single block."""
|
|
386
|
+
normalized = c_thinking.normalize_thinking_content(content)
|
|
387
|
+
if not normalized.strip():
|
|
388
|
+
return
|
|
389
|
+
md = ThinkingMarkdown(normalized, code_theme=self.themes.code_theme, style=ThemeKey.THINKING)
|
|
390
|
+
self.console.push_theme(self.themes.thinking_markdown_theme)
|
|
391
|
+
grid = create_grid()
|
|
392
|
+
grid.add_row(Text(c_thinking.THINKING_MESSAGE_MARK, style=ThemeKey.THINKING), md)
|
|
393
|
+
self.print(grid)
|
|
394
|
+
self.console.pop_theme()
|
|
395
|
+
self.print()
|
|
396
|
+
|
|
391
397
|
# ---------------------------------------------------------------------
|
|
392
398
|
# Event-specific rendering helpers
|
|
393
399
|
# ---------------------------------------------------------------------
|
|
@@ -417,21 +423,12 @@ class TUICommandRenderer:
|
|
|
417
423
|
if image_path is not None:
|
|
418
424
|
self.display_image(str(image_path))
|
|
419
425
|
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
self.print(renderable)
|
|
426
|
+
if not is_sub_agent and isinstance(e.ui_extra, model.ImageUIExtra):
|
|
427
|
+
self.display_image(e.ui_extra.file_path)
|
|
423
428
|
|
|
424
|
-
|
|
425
|
-
renderable = c_thinking.render_thinking(
|
|
426
|
-
content,
|
|
427
|
-
code_theme=self.themes.code_theme,
|
|
428
|
-
style=ThemeKey.THINKING,
|
|
429
|
-
)
|
|
429
|
+
renderable = c_tools.render_tool_result(e, code_theme=self.themes.code_theme, session_id=e.session_id)
|
|
430
430
|
if renderable is not None:
|
|
431
|
-
self.console.push_theme(theme=self.themes.thinking_markdown_theme)
|
|
432
431
|
self.print(renderable)
|
|
433
|
-
self.console.pop_theme()
|
|
434
|
-
self.print()
|
|
435
432
|
|
|
436
433
|
def display_thinking_header(self, header: str) -> None:
|
|
437
434
|
stripped = header.strip()
|
|
@@ -451,6 +448,13 @@ class TUICommandRenderer:
|
|
|
451
448
|
with self.session_print_context(e.session_id):
|
|
452
449
|
self.print(c_developer.render_developer_message(e))
|
|
453
450
|
|
|
451
|
+
# Display images from @ file references and user attachments
|
|
452
|
+
if e.item.ui_extra:
|
|
453
|
+
for ui_item in e.item.ui_extra.items:
|
|
454
|
+
if isinstance(ui_item, (model.AtFileImagesUIItem, model.UserImagesUIItem)):
|
|
455
|
+
for image_path in ui_item.paths:
|
|
456
|
+
self.display_image(image_path)
|
|
457
|
+
|
|
454
458
|
def display_command_output(self, e: events.CommandOutputEvent) -> None:
|
|
455
459
|
with self.session_print_context(e.session_id):
|
|
456
460
|
self.print(c_command_output.render_command_output(e))
|
|
@@ -543,9 +547,9 @@ class TUICommandRenderer:
|
|
|
543
547
|
)
|
|
544
548
|
self.console.print(
|
|
545
549
|
Rule(
|
|
546
|
-
Text("Context
|
|
550
|
+
Text("Context Compacted", style=ThemeKey.COMPACTION_SUMMARY),
|
|
547
551
|
characters="=",
|
|
548
|
-
style=ThemeKey.
|
|
552
|
+
style=ThemeKey.LINES,
|
|
549
553
|
)
|
|
550
554
|
)
|
|
551
555
|
self.print()
|
|
@@ -631,20 +635,31 @@ class TUICommandRenderer:
|
|
|
631
635
|
self.display_command_output(event)
|
|
632
636
|
case RenderTurnStart(event=event):
|
|
633
637
|
self.display_turn_start(event)
|
|
634
|
-
case StartThinkingStream():
|
|
635
|
-
if
|
|
638
|
+
case StartThinkingStream(session_id=session_id):
|
|
639
|
+
if self.is_sub_agent_session(session_id):
|
|
640
|
+
self._sub_agent_thinking_buffers[session_id] = ""
|
|
641
|
+
elif not self._thinking_stream.is_active:
|
|
636
642
|
self._thinking_stream.start(self._new_thinking_mdstream())
|
|
637
|
-
case AppendThinking(content=content):
|
|
638
|
-
if self.
|
|
643
|
+
case AppendThinking(session_id=session_id, content=content):
|
|
644
|
+
if self.is_sub_agent_session(session_id):
|
|
645
|
+
if session_id in self._sub_agent_thinking_buffers:
|
|
646
|
+
self._sub_agent_thinking_buffers[session_id] += content
|
|
647
|
+
elif self._thinking_stream.is_active:
|
|
639
648
|
first_delta = self._thinking_stream.buffer == ""
|
|
640
649
|
self._thinking_stream.append(content)
|
|
641
650
|
if first_delta:
|
|
642
651
|
self._thinking_stream.render(transform=c_thinking.normalize_thinking_content)
|
|
643
652
|
self._flush_thinking()
|
|
644
|
-
case EndThinkingStream():
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
653
|
+
case EndThinkingStream(session_id=session_id):
|
|
654
|
+
if self.is_sub_agent_session(session_id):
|
|
655
|
+
buf = self._sub_agent_thinking_buffers.pop(session_id, "")
|
|
656
|
+
if buf.strip():
|
|
657
|
+
with self.session_print_context(session_id):
|
|
658
|
+
self._render_sub_agent_thinking(buf)
|
|
659
|
+
else:
|
|
660
|
+
finalized = self._thinking_stream.finalize(transform=c_thinking.normalize_thinking_content)
|
|
661
|
+
if finalized:
|
|
662
|
+
self.print()
|
|
648
663
|
case StartAssistantStream():
|
|
649
664
|
if not self._assistant_stream.is_active:
|
|
650
665
|
self._assistant_stream.start(self._new_assistant_mdstream())
|
|
@@ -690,7 +705,7 @@ class TUICommandRenderer:
|
|
|
690
705
|
case PrintBlankLine():
|
|
691
706
|
self.print()
|
|
692
707
|
case PrintRuleLine():
|
|
693
|
-
self.console.print(Rule(characters="─", style=ThemeKey.
|
|
708
|
+
self.console.print(Rule(characters="─", style=ThemeKey.LINES_DIM))
|
|
694
709
|
case EmitOsc94Error():
|
|
695
710
|
emit_osc94(OSC94States.ERROR)
|
|
696
711
|
case EmitTmuxSignal():
|
klaude_code/tui/runner.py
CHANGED
|
@@ -327,5 +327,6 @@ async def run_interactive(init_config: AppInitConfig, session_id: str | None = N
|
|
|
327
327
|
if not exit_hint_printed:
|
|
328
328
|
active_session_id = components.executor.context.current_session_id()
|
|
329
329
|
if active_session_id and Session.exists(active_session_id):
|
|
330
|
+
short_id = Session.shortest_unique_prefix(active_session_id)
|
|
330
331
|
log(f"Session ID: {active_session_id}")
|
|
331
|
-
log(f"Resume with: klaude
|
|
332
|
+
log(f"Resume with: klaude -r {short_id}")
|
|
@@ -2,7 +2,6 @@ from __future__ import annotations
|
|
|
2
2
|
|
|
3
3
|
import base64
|
|
4
4
|
import shutil
|
|
5
|
-
import struct
|
|
6
5
|
import subprocess
|
|
7
6
|
import sys
|
|
8
7
|
import tempfile
|
|
@@ -12,12 +11,23 @@ from typing import IO
|
|
|
12
11
|
# Kitty graphics protocol chunk size (4096 is the recommended max)
|
|
13
12
|
_CHUNK_SIZE = 4096
|
|
14
13
|
|
|
15
|
-
# Max columns for
|
|
16
|
-
_MAX_COLS =
|
|
14
|
+
# Max columns for image display
|
|
15
|
+
_MAX_COLS = 80
|
|
17
16
|
|
|
18
17
|
# Image formats that need conversion to PNG
|
|
19
18
|
_NEEDS_CONVERSION = {".jpg", ".jpeg", ".gif", ".bmp", ".webp", ".tiff", ".tif"}
|
|
20
19
|
|
|
20
|
+
# Approximate pixels per terminal column (typical for most terminals)
|
|
21
|
+
_PIXELS_PER_COL = 9
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _get_png_width(data: bytes) -> int | None:
|
|
25
|
+
"""Extract width from PNG header (IHDR chunk)."""
|
|
26
|
+
# PNG signature (8 bytes) + IHDR length (4 bytes) + "IHDR" (4 bytes) + width (4 bytes)
|
|
27
|
+
if len(data) < 24 or data[:8] != b"\x89PNG\r\n\x1a\n":
|
|
28
|
+
return None
|
|
29
|
+
return int.from_bytes(data[16:20], "big")
|
|
30
|
+
|
|
21
31
|
|
|
22
32
|
def _convert_to_png(path: Path) -> bytes | None:
|
|
23
33
|
"""Convert image to PNG using sips (macOS) or convert (ImageMagick)."""
|
|
@@ -40,26 +50,10 @@ def _convert_to_png(path: Path) -> bytes | None:
|
|
|
40
50
|
return None
|
|
41
51
|
|
|
42
52
|
|
|
43
|
-
def _get_png_dimensions(data: bytes) -> tuple[int, int] | None:
|
|
44
|
-
"""Extract width and height from PNG file header."""
|
|
45
|
-
# PNG: 8-byte signature + IHDR chunk (4 len + 4 type + 4 width + 4 height)
|
|
46
|
-
if len(data) < 24 or data[:8] != b"\x89PNG\r\n\x1a\n":
|
|
47
|
-
return None
|
|
48
|
-
width, height = struct.unpack(">II", data[16:24])
|
|
49
|
-
return width, height
|
|
50
|
-
|
|
51
|
-
|
|
52
53
|
def print_kitty_image(file_path: str | Path, *, file: IO[str] | None = None) -> None:
|
|
53
54
|
"""Print an image to the terminal using Kitty graphics protocol.
|
|
54
55
|
|
|
55
|
-
|
|
56
|
-
with raw escape sequences. Image size adapts based on aspect ratio:
|
|
57
|
-
- Landscape images: fill terminal width
|
|
58
|
-
- Portrait images: limit height to avoid oversized display
|
|
59
|
-
|
|
60
|
-
Args:
|
|
61
|
-
file_path: Path to the image file (PNG recommended).
|
|
62
|
-
file: Output file stream. Defaults to stdout.
|
|
56
|
+
Only specifies column width; Kitty auto-scales height to preserve aspect ratio.
|
|
63
57
|
"""
|
|
64
58
|
path = Path(file_path) if isinstance(file_path, str) else file_path
|
|
65
59
|
if not path.exists():
|
|
@@ -80,20 +74,18 @@ def print_kitty_image(file_path: str | Path, *, file: IO[str] | None = None) ->
|
|
|
80
74
|
out = file or sys.stdout
|
|
81
75
|
|
|
82
76
|
term_size = shutil.get_terminal_size()
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
#
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
# Other images: limit width to 80% of terminal
|
|
93
|
-
size_param = f"c={min(_MAX_COLS, term_size.columns * 4 // 5)}"
|
|
77
|
+
target_cols = min(_MAX_COLS, term_size.columns)
|
|
78
|
+
|
|
79
|
+
# Only set column width if image is wider than target, to avoid upscaling small images
|
|
80
|
+
size_param = ""
|
|
81
|
+
img_width = _get_png_width(data)
|
|
82
|
+
if img_width is not None:
|
|
83
|
+
img_cols = img_width // _PIXELS_PER_COL
|
|
84
|
+
if img_cols > target_cols:
|
|
85
|
+
size_param = f"c={target_cols}"
|
|
94
86
|
else:
|
|
95
|
-
# Fallback:
|
|
96
|
-
size_param = f"c={
|
|
87
|
+
# Fallback: always constrain if we can't determine image size
|
|
88
|
+
size_param = f"c={target_cols}"
|
|
97
89
|
print("", file=out)
|
|
98
90
|
_write_kitty_graphics(out, encoded, size_param=size_param)
|
|
99
91
|
print("", file=out)
|
|
@@ -120,7 +112,8 @@ def _write_kitty_graphics(out: IO[str], encoded_data: str, *, size_param: str) -
|
|
|
120
112
|
|
|
121
113
|
if i == 0:
|
|
122
114
|
# First chunk: include control parameters
|
|
123
|
-
|
|
115
|
+
base_ctrl = f"a=T,f=100,{size_param}" if size_param else "a=T,f=100"
|
|
116
|
+
ctrl = f"{base_ctrl},m={0 if is_last else 1}"
|
|
124
117
|
out.write(f"\033_G{ctrl};{chunk}\033\\")
|
|
125
118
|
else:
|
|
126
119
|
# Subsequent chunks: only m parameter needed
|
klaude_code/ui/common.py
CHANGED
|
@@ -1,17 +1,8 @@
|
|
|
1
|
-
import re
|
|
2
|
-
import subprocess
|
|
3
|
-
from pathlib import Path
|
|
4
1
|
from typing import TYPE_CHECKING
|
|
5
2
|
|
|
6
3
|
if TYPE_CHECKING:
|
|
7
4
|
from klaude_code.protocol.llm_param import LLMConfigModelParameter, OpenRouterProviderRouting
|
|
8
5
|
|
|
9
|
-
LEADING_NEWLINES_REGEX = re.compile(r"^\n{2,}")
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
def remove_leading_newlines(text: str) -> str:
|
|
13
|
-
return text.lstrip("\n")
|
|
14
|
-
|
|
15
6
|
|
|
16
7
|
def format_number(tokens: int) -> str:
|
|
17
8
|
if tokens < 1000:
|
|
@@ -33,67 +24,6 @@ def format_number(tokens: int) -> str:
|
|
|
33
24
|
return f"{m}M{remaining}k"
|
|
34
25
|
|
|
35
26
|
|
|
36
|
-
def get_current_git_branch(path: Path | None = None) -> str | None:
|
|
37
|
-
"""Get current git branch name, return None if not in a git repository"""
|
|
38
|
-
if path is None:
|
|
39
|
-
path = Path.cwd()
|
|
40
|
-
|
|
41
|
-
try:
|
|
42
|
-
# Check if in git repository
|
|
43
|
-
git_dir = subprocess.run(
|
|
44
|
-
["git", "rev-parse", "--git-dir"],
|
|
45
|
-
cwd=path,
|
|
46
|
-
capture_output=True,
|
|
47
|
-
text=True,
|
|
48
|
-
timeout=2,
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
if git_dir.returncode != 0:
|
|
52
|
-
return None
|
|
53
|
-
|
|
54
|
-
# Get current branch name
|
|
55
|
-
result = subprocess.run(
|
|
56
|
-
["git", "branch", "--show-current"],
|
|
57
|
-
cwd=path,
|
|
58
|
-
capture_output=True,
|
|
59
|
-
text=True,
|
|
60
|
-
timeout=2,
|
|
61
|
-
)
|
|
62
|
-
|
|
63
|
-
if result.returncode == 0:
|
|
64
|
-
branch = result.stdout.strip()
|
|
65
|
-
return branch if branch else None
|
|
66
|
-
|
|
67
|
-
# Fallback: get HEAD reference
|
|
68
|
-
head_file = subprocess.run(
|
|
69
|
-
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
|
70
|
-
cwd=path,
|
|
71
|
-
capture_output=True,
|
|
72
|
-
text=True,
|
|
73
|
-
timeout=2,
|
|
74
|
-
)
|
|
75
|
-
|
|
76
|
-
if head_file.returncode == 0:
|
|
77
|
-
branch = head_file.stdout.strip()
|
|
78
|
-
return branch if branch and branch != "HEAD" else None
|
|
79
|
-
|
|
80
|
-
except (subprocess.TimeoutExpired, subprocess.SubprocessError, FileNotFoundError):
|
|
81
|
-
pass
|
|
82
|
-
|
|
83
|
-
return None
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
def show_path_with_tilde(path: Path | None = None):
|
|
87
|
-
if path is None:
|
|
88
|
-
path = Path.cwd()
|
|
89
|
-
|
|
90
|
-
try:
|
|
91
|
-
relative_path = path.relative_to(Path.home())
|
|
92
|
-
return f"~/{relative_path}"
|
|
93
|
-
except ValueError:
|
|
94
|
-
return str(path)
|
|
95
|
-
|
|
96
|
-
|
|
97
27
|
def format_model_params(model_params: "LLMConfigModelParameter") -> list[str]:
|
|
98
28
|
"""Format model parameters in a concise style.
|
|
99
29
|
|
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
Metadata-Version: 2.3
|
|
2
2
|
Name: klaude-code
|
|
3
|
-
Version: 2.
|
|
3
|
+
Version: 2.9.1
|
|
4
4
|
Summary: Minimal code agent CLI
|
|
5
5
|
Requires-Dist: anthropic>=0.66.0
|
|
6
6
|
Requires-Dist: chardet>=5.2.0
|
|
7
7
|
Requires-Dist: ddgs>=9.9.3
|
|
8
8
|
Requires-Dist: diff-match-patch>=20241021
|
|
9
|
+
Requires-Dist: filelock>=3.20.3
|
|
9
10
|
Requires-Dist: google-genai>=1.56.0
|
|
10
11
|
Requires-Dist: markdown-it-py>=4.0.0
|
|
11
12
|
Requires-Dist: openai>=1.102.0
|
|
@@ -23,7 +24,7 @@ Description-Content-Type: text/markdown
|
|
|
23
24
|
Minimal code agent CLI.
|
|
24
25
|
|
|
25
26
|
## Features
|
|
26
|
-
- **Multi-provider**: Anthropic Message API, OpenAI Responses API, OpenRouter,
|
|
27
|
+
- **Multi-provider**: Anthropic Message API, OpenAI Responses API, OpenRouter, ChatGPT Codex OAuth etc.
|
|
27
28
|
- **Keep reasoning item in context**: Interleaved thinking support
|
|
28
29
|
- **Model-aware tools**: Claude Code tool set for Opus, `apply_patch` for GPT-5/Codex
|
|
29
30
|
- **Reminders**: Cooldown-based todo tracking, instruction reinforcement and external file change reminder
|
|
@@ -107,7 +108,6 @@ On first run, you'll be prompted to select a model. Your choice is saved as `mai
|
|
|
107
108
|
| Provider | Env Variable | Models |
|
|
108
109
|
|-------------|-----------------------|-------------------------------------------------------------------------------|
|
|
109
110
|
| anthropic | `ANTHROPIC_API_KEY` | sonnet, opus |
|
|
110
|
-
| claude | N/A (OAuth) | sonnet@claude, opus@claude (requires Claude Pro/Max subscription) |
|
|
111
111
|
| openai | `OPENAI_API_KEY` | gpt-5.2 |
|
|
112
112
|
| openrouter | `OPENROUTER_API_KEY` | gpt-5.2, gpt-5.2-fast, gpt-5.1-codex-max, sonnet, opus, haiku, kimi, gemini-* |
|
|
113
113
|
| deepseek | `DEEPSEEK_API_KEY` | deepseek |
|
|
@@ -139,7 +139,6 @@ klaude auth login deepseek # Set DEEPSEEK_API_KEY
|
|
|
139
139
|
klaude auth login moonshot # Set MOONSHOT_API_KEY
|
|
140
140
|
|
|
141
141
|
# OAuth login for subscription-based providers
|
|
142
|
-
klaude auth login claude # Claude Pro/Max subscription
|
|
143
142
|
klaude auth login codex # ChatGPT Pro subscription
|
|
144
143
|
```
|
|
145
144
|
|
|
@@ -148,7 +147,6 @@ API keys are stored in `~/.klaude/klaude-auth.json` and used as fallback when en
|
|
|
148
147
|
To logout from OAuth providers:
|
|
149
148
|
|
|
150
149
|
```bash
|
|
151
|
-
klaude auth logout claude
|
|
152
150
|
klaude auth logout codex
|
|
153
151
|
```
|
|
154
152
|
|
|
@@ -201,7 +199,6 @@ provider_list:
|
|
|
201
199
|
##### Supported Protocols
|
|
202
200
|
|
|
203
201
|
- `anthropic` - Anthropic Messages API
|
|
204
|
-
- `claude_oauth` - Claude OAuth (for Claude Pro/Max subscribers)
|
|
205
202
|
- `openai` - OpenAI Chat Completion API
|
|
206
203
|
- `responses` - OpenAI Responses API (for o-series, GPT-5, Codex)
|
|
207
204
|
- `codex_oauth` - OpenAI Codex CLI (OAuth-based, for ChatGPT Pro subscribers)
|