klaude-code 2.5.1__py3-none-any.whl → 2.5.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. klaude_code/.DS_Store +0 -0
  2. klaude_code/cli/auth_cmd.py +2 -13
  3. klaude_code/cli/cost_cmd.py +10 -10
  4. klaude_code/cli/list_model.py +8 -0
  5. klaude_code/cli/main.py +41 -8
  6. klaude_code/cli/session_cmd.py +2 -11
  7. klaude_code/config/assets/builtin_config.yaml +45 -26
  8. klaude_code/config/config.py +30 -7
  9. klaude_code/config/model_matcher.py +3 -3
  10. klaude_code/config/sub_agent_model_helper.py +1 -1
  11. klaude_code/const.py +2 -1
  12. klaude_code/core/agent_profile.py +1 -0
  13. klaude_code/core/executor.py +4 -0
  14. klaude_code/core/loaded_skills.py +36 -0
  15. klaude_code/core/tool/context.py +1 -3
  16. klaude_code/core/tool/file/edit_tool.py +1 -1
  17. klaude_code/core/tool/file/read_tool.py +2 -2
  18. klaude_code/core/tool/file/write_tool.py +1 -1
  19. klaude_code/core/turn.py +19 -7
  20. klaude_code/llm/anthropic/client.py +97 -60
  21. klaude_code/llm/anthropic/input.py +20 -9
  22. klaude_code/llm/google/client.py +223 -148
  23. klaude_code/llm/google/input.py +44 -36
  24. klaude_code/llm/openai_compatible/stream.py +109 -99
  25. klaude_code/llm/openrouter/reasoning.py +4 -29
  26. klaude_code/llm/partial_message.py +2 -32
  27. klaude_code/llm/responses/client.py +99 -81
  28. klaude_code/llm/responses/input.py +11 -25
  29. klaude_code/llm/stream_parts.py +94 -0
  30. klaude_code/log.py +57 -0
  31. klaude_code/protocol/events/system.py +3 -0
  32. klaude_code/protocol/llm_param.py +1 -0
  33. klaude_code/session/export.py +259 -91
  34. klaude_code/session/templates/export_session.html +141 -59
  35. klaude_code/skill/.DS_Store +0 -0
  36. klaude_code/skill/assets/.DS_Store +0 -0
  37. klaude_code/skill/loader.py +1 -0
  38. klaude_code/tui/command/fork_session_cmd.py +14 -23
  39. klaude_code/tui/command/model_picker.py +2 -17
  40. klaude_code/tui/command/refresh_cmd.py +2 -0
  41. klaude_code/tui/command/resume_cmd.py +2 -18
  42. klaude_code/tui/command/sub_agent_model_cmd.py +5 -19
  43. klaude_code/tui/command/thinking_cmd.py +2 -14
  44. klaude_code/tui/components/common.py +1 -1
  45. klaude_code/tui/components/metadata.py +22 -21
  46. klaude_code/tui/components/rich/markdown.py +8 -0
  47. klaude_code/tui/components/rich/quote.py +36 -8
  48. klaude_code/tui/components/rich/theme.py +2 -0
  49. klaude_code/tui/components/welcome.py +32 -0
  50. klaude_code/tui/input/prompt_toolkit.py +3 -1
  51. klaude_code/tui/machine.py +19 -1
  52. klaude_code/tui/renderer.py +3 -4
  53. klaude_code/tui/terminal/selector.py +174 -31
  54. {klaude_code-2.5.1.dist-info → klaude_code-2.5.3.dist-info}/METADATA +1 -1
  55. {klaude_code-2.5.1.dist-info → klaude_code-2.5.3.dist-info}/RECORD +57 -53
  56. klaude_code/skill/assets/jj-workspace/SKILL.md +0 -20
  57. {klaude_code-2.5.1.dist-info → klaude_code-2.5.3.dist-info}/WHEEL +0 -0
  58. {klaude_code-2.5.1.dist-info → klaude_code-2.5.3.dist-info}/entry_points.txt +0 -0
@@ -3,26 +3,23 @@ import sys
3
3
  from dataclasses import dataclass
4
4
  from typing import Literal
5
5
 
6
- from prompt_toolkit.styles import Style
6
+ from prompt_toolkit.styles import Style, merge_styles
7
7
 
8
8
  from klaude_code.protocol import commands, events, message, model
9
9
  from klaude_code.tui.input.clipboard import copy_to_clipboard
10
- from klaude_code.tui.terminal.selector import SelectItem, select_one
10
+ from klaude_code.tui.terminal.selector import DEFAULT_PICKER_STYLE, SelectItem, select_one
11
11
 
12
12
  from .command_abc import Agent, CommandABC, CommandResult
13
13
 
14
- FORK_SELECT_STYLE = Style(
14
+ FORK_SELECT_STYLE = merge_styles(
15
15
  [
16
- ("msg", ""),
17
- ("meta", "fg:ansibrightblack"),
18
- ("separator", "fg:ansibrightblack"),
19
- ("assistant", "fg:ansiblue"),
20
- ("pointer", "bold fg:ansigreen"),
21
- ("search_prefix", "fg:ansibrightblack"),
22
- ("search_success", "noinherit fg:ansigreen"),
23
- ("search_none", "noinherit fg:ansired"),
24
- ("question", "bold"),
25
- ("text", ""),
16
+ DEFAULT_PICKER_STYLE,
17
+ Style(
18
+ [
19
+ ("separator", "fg:ansibrightblack"),
20
+ ("assistant", "fg:ansiblue"),
21
+ ]
22
+ ),
26
23
  ]
27
24
  )
28
25
 
@@ -144,6 +141,7 @@ def _build_select_items(fork_points: list[ForkPoint]) -> list[SelectItem[int]]:
144
141
  title=title_parts,
145
142
  value=fp.history_index,
146
143
  search_text=fp.user_message if not is_last else "fork entire conversation",
144
+ selectable=not is_first,
147
145
  )
148
146
  )
149
147
 
@@ -163,6 +161,9 @@ def _select_fork_point_sync(fork_points: list[ForkPoint]) -> int | Literal["canc
163
161
 
164
162
  # Default to the last option (fork entire conversation)
165
163
  last_value = items[-1].value
164
+ if last_value is None:
165
+ # Should not happen as we populate all items with int values
166
+ return -1
166
167
 
167
168
  # Non-interactive environments default to forking entire conversation
168
169
  if not sys.stdin.isatty() or not sys.stdout.isatty():
@@ -241,16 +242,6 @@ class ForkSessionCommand(CommandABC):
241
242
  )
242
243
  return CommandResult(events=[event])
243
244
 
244
- # First option (empty session) is just for UI display, not a valid fork point
245
- if selected == fork_points[0].history_index:
246
- event = events.CommandOutputEvent(
247
- session_id=agent.session.id,
248
- command_name=self.name,
249
- content="(cannot fork to empty session)",
250
- is_error=True,
251
- )
252
- return CommandResult(events=[event])
253
-
254
245
  # Perform the fork
255
246
  new_session = agent.session.fork(until_index=selected)
256
247
  await new_session.wait_for_flush()
@@ -72,9 +72,7 @@ def select_model_interactive(
72
72
  return ModelSelectResult(status=ModelSelectStatus.NON_TTY)
73
73
 
74
74
  # Interactive selection
75
- from prompt_toolkit.styles import Style
76
-
77
- from klaude_code.tui.terminal.selector import build_model_select_items, select_one
75
+ from klaude_code.tui.terminal.selector import DEFAULT_PICKER_STYLE, build_model_select_items, select_one
78
76
 
79
77
  names = [m.selector for m in result.filtered_models]
80
78
 
@@ -100,20 +98,7 @@ def select_model_interactive(
100
98
  pointer="→",
101
99
  use_search_filter=True,
102
100
  initial_value=initial_value,
103
- style=Style(
104
- [
105
- ("pointer", "ansigreen"),
106
- ("highlighted", "ansigreen"),
107
- ("msg", ""),
108
- ("meta", "fg:ansibrightblack"),
109
- ("text", "ansibrightblack"),
110
- ("question", "bold"),
111
- ("search_prefix", "ansibrightblack"),
112
- # search filter colors at the bottom
113
- ("search_success", "noinherit fg:ansigreen"),
114
- ("search_none", "noinherit fg:ansired"),
115
- ]
116
- ),
101
+ style=DEFAULT_PICKER_STYLE,
117
102
  )
118
103
  if isinstance(selected, str) and selected in names:
119
104
  return ModelSelectResult(status=ModelSelectStatus.SELECTED, model=selected)
@@ -1,3 +1,4 @@
1
+ from klaude_code.core.loaded_skills import get_loaded_skill_names_by_location
1
2
  from klaude_code.protocol import commands, events, message
2
3
 
3
4
  from .command_abc import Agent, CommandABC, CommandResult
@@ -30,6 +31,7 @@ class RefreshTerminalCommand(CommandABC):
30
31
  session_id=agent.session.id,
31
32
  work_dir=str(agent.session.work_dir),
32
33
  llm_config=agent.get_llm_client().get_llm_config(),
34
+ loaded_skills=get_loaded_skill_names_by_location(),
33
35
  ),
34
36
  events.ReplayHistoryEvent(
35
37
  session_id=agent.session.id,
@@ -1,28 +1,12 @@
1
1
  import asyncio
2
2
 
3
- from prompt_toolkit.styles import Style
4
-
5
3
  from klaude_code.log import log
6
4
  from klaude_code.protocol import commands, events, message, op
7
5
  from klaude_code.session.selector import build_session_select_options, format_user_messages_display
8
- from klaude_code.tui.terminal.selector import SelectItem, select_one
6
+ from klaude_code.tui.terminal.selector import DEFAULT_PICKER_STYLE, SelectItem, select_one
9
7
 
10
8
  from .command_abc import Agent, CommandABC, CommandResult
11
9
 
12
- SESSION_SELECT_STYLE = Style(
13
- [
14
- ("msg", "fg:ansibrightblack"),
15
- ("meta", ""),
16
- ("pointer", "bold fg:ansigreen"),
17
- ("highlighted", "fg:ansigreen"),
18
- ("search_prefix", "fg:ansibrightblack"),
19
- ("search_success", "noinherit fg:ansigreen"),
20
- ("search_none", "noinherit fg:ansired"),
21
- ("question", "bold"),
22
- ("text", ""),
23
- ]
24
- )
25
-
26
10
 
27
11
  def select_session_sync() -> str | None:
28
12
  """Interactive session selection (sync version for asyncio.to_thread)."""
@@ -62,7 +46,7 @@ def select_session_sync() -> str | None:
62
46
  message="Select a session to resume:",
63
47
  items=items,
64
48
  pointer="→",
65
- style=SESSION_SELECT_STYLE,
49
+ style=DEFAULT_PICKER_STYLE,
66
50
  )
67
51
  except KeyboardInterrupt:
68
52
  return None
@@ -4,27 +4,13 @@ from __future__ import annotations
4
4
 
5
5
  import asyncio
6
6
 
7
- from prompt_toolkit.styles import Style
8
-
9
7
  from klaude_code.config.config import load_config
10
8
  from klaude_code.config.sub_agent_model_helper import SubAgentModelHelper, SubAgentModelInfo
11
9
  from klaude_code.protocol import commands, events, message, op
12
- from klaude_code.tui.terminal.selector import SelectItem, build_model_select_items, select_one
10
+ from klaude_code.tui.terminal.selector import DEFAULT_PICKER_STYLE, SelectItem, build_model_select_items, select_one
13
11
 
14
12
  from .command_abc import Agent, CommandABC, CommandResult
15
13
 
16
- SELECT_STYLE = Style(
17
- [
18
- ("instruction", "ansibrightblack"),
19
- ("pointer", "ansigreen"),
20
- ("highlighted", "ansigreen"),
21
- ("text", "ansibrightblack"),
22
- ("question", "bold"),
23
- ("meta", "fg:ansibrightblack"),
24
- ("msg", ""),
25
- ]
26
- )
27
-
28
14
  USE_DEFAULT_BEHAVIOR = "__default__"
29
15
 
30
16
 
@@ -69,8 +55,8 @@ def _select_sub_agent_sync(
69
55
  result = select_one(
70
56
  message="Select sub-agent to configure:",
71
57
  items=items,
72
- pointer="->",
73
- style=SELECT_STYLE,
58
+ pointer="",
59
+ style=DEFAULT_PICKER_STYLE,
74
60
  use_search_filter=False,
75
61
  )
76
62
  return result if isinstance(result, str) else None
@@ -103,8 +89,8 @@ def _select_model_for_sub_agent_sync(
103
89
  result = select_one(
104
90
  message=f"Select model for {sub_agent_type}:",
105
91
  items=all_items,
106
- pointer="->",
107
- style=SELECT_STYLE,
92
+ pointer="",
93
+ style=DEFAULT_PICKER_STYLE,
108
94
  use_search_filter=True,
109
95
  )
110
96
  return result if isinstance(result, str) else None
@@ -1,23 +1,11 @@
1
1
  import asyncio
2
2
 
3
- from prompt_toolkit.styles import Style
4
-
5
3
  from klaude_code.config.thinking import get_thinking_picker_data, parse_thinking_value
6
4
  from klaude_code.protocol import commands, events, llm_param, message, op
7
- from klaude_code.tui.terminal.selector import SelectItem, select_one
5
+ from klaude_code.tui.terminal.selector import DEFAULT_PICKER_STYLE, SelectItem, select_one
8
6
 
9
7
  from .command_abc import Agent, CommandABC, CommandResult
10
8
 
11
- SELECT_STYLE = Style(
12
- [
13
- ("instruction", "ansibrightblack"),
14
- ("pointer", "ansigreen"),
15
- ("highlighted", "ansigreen"),
16
- ("text", "ansibrightblack"),
17
- ("question", "bold"),
18
- ]
19
- )
20
-
21
9
 
22
10
  def _select_thinking_sync(config: llm_param.LLMConfigParameter) -> llm_param.Thinking | None:
23
11
  """Select thinking level (sync version)."""
@@ -35,7 +23,7 @@ def _select_thinking_sync(config: llm_param.LLMConfigParameter) -> llm_param.Thi
35
23
  message=data.message,
36
24
  items=items,
37
25
  pointer="→",
38
- style=SELECT_STYLE,
26
+ style=DEFAULT_PICKER_STYLE,
39
27
  use_search_filter=False,
40
28
  )
41
29
  if result is None:
@@ -40,7 +40,7 @@ def truncate_middle(
40
40
  remaining = max(0, len(truncated_lines))
41
41
  return Text(f" … (more {remaining} lines)", style=ThemeKey.TOOL_RESULT_TRUNCATED)
42
42
 
43
- lines = text.split("\n")
43
+ lines = [line for line in text.split("\n") if line.strip()]
44
44
  truncated_lines = 0
45
45
  head_lines: list[str] = []
46
46
  tail_lines: list[str] = []
@@ -1,5 +1,4 @@
1
1
  from rich.console import Group, RenderableType
2
- from rich.padding import Padding
3
2
  from rich.text import Text
4
3
 
5
4
  from klaude_code.const import DEFAULT_MAX_TOKENS
@@ -12,14 +11,14 @@ from klaude_code.ui.common import format_number
12
11
  def _render_task_metadata_block(
13
12
  metadata: model.TaskMetadata,
14
13
  *,
15
- is_sub_agent: bool = False,
14
+ mark: Text,
16
15
  show_context_and_time: bool = True,
17
16
  ) -> RenderableType:
18
17
  """Render a single TaskMetadata block.
19
18
 
20
19
  Args:
21
20
  metadata: The TaskMetadata to render.
22
- is_sub_agent: Whether this is a sub-agent block.
21
+ mark: The mark to display in the first column.
23
22
  show_context_and_time: Whether to show context usage percent and time.
24
23
 
25
24
  Returns:
@@ -31,9 +30,6 @@ def _render_task_metadata_block(
31
30
  currency = metadata.usage.currency if metadata.usage else "USD"
32
31
  currency_symbol = "¥" if currency == "CNY" else "$"
33
32
 
34
- # First column: mark only
35
- mark = Text("└", style=ThemeKey.METADATA_DIM) if is_sub_agent else Text("◆", style=ThemeKey.METADATA)
36
-
37
33
  # Second column: model@provider description / tokens / cost / …
38
34
  content = Text()
39
35
  content.append_text(Text(metadata.model_name, style=ThemeKey.METADATA_BOLD))
@@ -43,7 +39,7 @@ def _render_task_metadata_block(
43
39
  )
44
40
  if metadata.description:
45
41
  content.append_text(Text(" ", style=ThemeKey.METADATA)).append_text(
46
- Text(metadata.description, style=ThemeKey.METADATA_DIM)
42
+ Text(metadata.description, style=ThemeKey.METADATA_ITALIC)
47
43
  )
48
44
 
49
45
  # All info parts (tokens, cost, context, etc.)
@@ -55,15 +51,15 @@ def _render_task_metadata_block(
55
51
  token_text.append("↑", style=ThemeKey.METADATA_DIM)
56
52
  token_text.append(format_number(metadata.usage.input_tokens), style=ThemeKey.METADATA)
57
53
  if metadata.usage.cached_tokens > 0:
58
- token_text.append(" ◎", style=ThemeKey.METADATA_DIM)
54
+ token_text.append(" ◎", style=ThemeKey.METADATA_DIM)
59
55
  token_text.append(format_number(metadata.usage.cached_tokens), style=ThemeKey.METADATA)
60
- token_text.append(" ↓", style=ThemeKey.METADATA_DIM)
56
+ token_text.append(" ↓", style=ThemeKey.METADATA_DIM)
61
57
  token_text.append(format_number(metadata.usage.output_tokens), style=ThemeKey.METADATA)
62
58
  if metadata.usage.reasoning_tokens > 0:
63
- token_text.append(" ∿", style=ThemeKey.METADATA_DIM)
59
+ token_text.append(" ∿", style=ThemeKey.METADATA_DIM)
64
60
  token_text.append(format_number(metadata.usage.reasoning_tokens), style=ThemeKey.METADATA)
65
61
  if metadata.usage.image_tokens > 0:
66
- token_text.append(" ", style=ThemeKey.METADATA_DIM)
62
+ token_text.append(" ", style=ThemeKey.METADATA_DIM)
67
63
  token_text.append(format_number(metadata.usage.image_tokens), style=ThemeKey.METADATA)
68
64
  parts.append(token_text)
69
65
 
@@ -130,11 +126,11 @@ def _render_task_metadata_block(
130
126
  )
131
127
 
132
128
  if parts:
133
- content.append_text(Text(" ", style=ThemeKey.METADATA_DIM))
134
- content.append_text(Text(" ", style=ThemeKey.METADATA_DIM).join(parts))
129
+ content.append_text(Text(" ", style=ThemeKey.METADATA_DIM))
130
+ content.append_text(Text(" ", style=ThemeKey.METADATA_DIM).join(parts))
135
131
 
136
132
  grid.add_row(mark, content)
137
- return grid if not is_sub_agent else Padding(grid, (0, 0, 0, 2))
133
+ return grid
138
134
 
139
135
 
140
136
  def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
@@ -144,16 +140,20 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
144
140
  if e.cancelled:
145
141
  renderables.append(Text())
146
142
 
147
- renderables.append(
148
- _render_task_metadata_block(e.metadata.main_agent, is_sub_agent=False, show_context_and_time=True)
149
- )
143
+ has_sub_agents = len(e.metadata.sub_agent_task_metadata) > 0
144
+ # Use an extra space for the main agent mark to align with two-character marks (├─, └─)
145
+ main_mark_text = "✓"
146
+ main_mark = Text(main_mark_text, style=ThemeKey.METADATA)
147
+
148
+ renderables.append(_render_task_metadata_block(e.metadata.main_agent, mark=main_mark, show_context_and_time=True))
150
149
 
151
150
  # Render each sub-agent metadata block
152
151
  for meta in e.metadata.sub_agent_task_metadata:
153
- renderables.append(_render_task_metadata_block(meta, is_sub_agent=True, show_context_and_time=True))
152
+ sub_mark = Text(" └", style=ThemeKey.METADATA_DIM)
153
+ renderables.append(_render_task_metadata_block(meta, mark=sub_mark, show_context_and_time=True))
154
154
 
155
155
  # Add total cost line when there are sub-agents
156
- if e.metadata.sub_agent_task_metadata:
156
+ if has_sub_agents:
157
157
  total_cost = 0.0
158
158
  currency = "USD"
159
159
  # Sum up costs from main agent and all sub-agents
@@ -166,12 +166,13 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
166
166
 
167
167
  currency_symbol = "¥" if currency == "CNY" else "$"
168
168
  total_line = Text.assemble(
169
- ("Σ ", ThemeKey.METADATA_DIM),
169
+ ("", ThemeKey.METADATA_DIM),
170
+ (" Σ ", ThemeKey.METADATA_DIM),
170
171
  ("total ", ThemeKey.METADATA_DIM),
171
172
  (currency_symbol, ThemeKey.METADATA_DIM),
172
173
  (f"{total_cost:.4f}", ThemeKey.METADATA_DIM),
173
174
  )
174
175
 
175
- renderables.append(Padding(total_line, (0, 0, 0, 2)))
176
+ renderables.append(total_line)
176
177
 
177
178
  return Group(*renderables)
@@ -290,6 +290,14 @@ class MarkdownStream:
290
290
 
291
291
  stable_source = "".join(lines[:stable_line])
292
292
  live_source = "".join(lines[stable_line:])
293
+
294
+ # If the "stable" prefix is only whitespace and we haven't stabilized any
295
+ # non-whitespace content yet, keep everything in the live buffer.
296
+ #
297
+ # This avoids cases where marks/indentation should apply to the first
298
+ # visible line, but would be suppressed because stable_line > 0.
299
+ if min_stable_line == 0 and stable_source.strip() == "":
300
+ return "", text, 0
293
301
  return stable_source, live_source, stable_line
294
302
 
295
303
  def render_ansi(self, text: str, *, apply_mark: bool) -> str:
@@ -1,6 +1,8 @@
1
1
  from typing import TYPE_CHECKING, Any, Self
2
2
 
3
+ from rich.cells import cell_len
3
4
  from rich.console import Console, ConsoleOptions, RenderResult
5
+ from rich.measure import Measurement
4
6
  from rich.segment import Segment
5
7
  from rich.style import Style
6
8
 
@@ -16,10 +18,20 @@ class Quote:
16
18
  self.prefix = prefix
17
19
  self.style = style
18
20
 
21
+ def __rich_measure__(self, console: Console, options: ConsoleOptions) -> Measurement:
22
+ prefix_width = cell_len(self.prefix)
23
+ available_width = max(1, options.max_width - prefix_width)
24
+ content_measurement = Measurement.get(console, options.update(width=available_width), self.content)
25
+
26
+ minimum = min(options.max_width, content_measurement.minimum + prefix_width)
27
+ maximum = min(options.max_width, content_measurement.maximum + prefix_width)
28
+ return Measurement(minimum, maximum)
29
+
19
30
  def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
20
31
  # Reduce width to leave space for prefix
21
- prefix_width = len(self.prefix)
22
- render_options = options.update(width=options.max_width - prefix_width)
32
+ prefix_width = cell_len(self.prefix)
33
+ available_width = max(1, options.max_width - prefix_width)
34
+ render_options = options.update(width=available_width)
23
35
 
24
36
  # Get style
25
37
  quote_style = console.get_style(self.style) if isinstance(self.style, str) else self.style
@@ -29,7 +41,9 @@ class Quote:
29
41
  new_line = Segment("\n")
30
42
 
31
43
  # Render content as lines
32
- lines = console.render_lines(self.content, render_options)
44
+ # Avoid padding to full width.
45
+ # Trailing spaces can cause terminals to reflow wrapped lines on resize.
46
+ lines = console.render_lines(self.content, render_options, pad=False)
33
47
 
34
48
  for line in lines:
35
49
  yield prefix_segment
@@ -57,6 +71,19 @@ class TreeQuote:
57
71
  self.style = style
58
72
  self.style_first = style_first
59
73
 
74
+ def __rich_measure__(self, console: Console, options: ConsoleOptions) -> Measurement:
75
+ prefix_width = max(
76
+ cell_len(self.prefix_middle),
77
+ cell_len(self.prefix_last),
78
+ cell_len(self.prefix_first) if self.prefix_first is not None else 0,
79
+ )
80
+ available_width = max(1, options.max_width - prefix_width)
81
+ content_measurement = Measurement.get(console, options.update(width=available_width), self.content)
82
+
83
+ minimum = min(options.max_width, content_measurement.minimum + prefix_width)
84
+ maximum = min(options.max_width, content_measurement.maximum + prefix_width)
85
+ return Measurement(minimum, maximum)
86
+
60
87
  @classmethod
61
88
  def for_tool_call(cls, content: "RenderableType", *, mark: str, style: str, style_first: str) -> Self:
62
89
  """Create a tree quote for tool call display.
@@ -85,17 +112,18 @@ class TreeQuote:
85
112
  def __rich_console__(self, console: Console, options: ConsoleOptions) -> RenderResult:
86
113
  # Reduce width to leave space for prefix
87
114
  prefix_width = max(
88
- len(self.prefix_middle),
89
- len(self.prefix_last),
90
- len(self.prefix_first) if self.prefix_first is not None else 0,
115
+ cell_len(self.prefix_middle),
116
+ cell_len(self.prefix_last),
117
+ cell_len(self.prefix_first) if self.prefix_first is not None else 0,
91
118
  )
92
- render_options = options.update(width=options.max_width - prefix_width)
119
+ available_width = max(1, options.max_width - prefix_width)
120
+ render_options = options.update(width=available_width)
93
121
 
94
122
  quote_style = console.get_style(self.style) if isinstance(self.style, str) else self.style
95
123
  first_style = console.get_style(self.style_first) if isinstance(self.style_first, str) else self.style_first
96
124
 
97
125
  new_line = Segment("\n")
98
- lines = console.render_lines(self.content, render_options)
126
+ lines = console.render_lines(self.content, render_options, pad=False)
99
127
  line_count = len(lines)
100
128
 
101
129
  for idx, line in enumerate(lines):
@@ -133,6 +133,7 @@ class ThemeKey(str, Enum):
133
133
  METADATA = "metadata"
134
134
  METADATA_DIM = "metadata.dim"
135
135
  METADATA_BOLD = "metadata.bold"
136
+ METADATA_ITALIC = "metadata.italic"
136
137
  # SPINNER_STATUS
137
138
  STATUS_SPINNER = "spinner.status"
138
139
  STATUS_TEXT = "spinner.status.text"
@@ -259,6 +260,7 @@ def get_theme(theme: str | None = None) -> Themes:
259
260
  ThemeKey.METADATA.value: palette.lavender,
260
261
  ThemeKey.METADATA_DIM.value: "dim " + palette.lavender,
261
262
  ThemeKey.METADATA_BOLD.value: "bold " + palette.lavender,
263
+ ThemeKey.METADATA_ITALIC.value: "italic " + palette.lavender,
262
264
  # STATUS
263
265
  ThemeKey.STATUS_SPINNER.value: palette.blue,
264
266
  ThemeKey.STATUS_TEXT.value: palette.blue,
@@ -59,6 +59,38 @@ def render_welcome(e: events.WelcomeEvent) -> RenderableType:
59
59
  )
60
60
  )
61
61
 
62
+ # Loaded skills summary is provided by core via WelcomeEvent to keep TUI decoupled.
63
+ loaded_skills = e.loaded_skills or {}
64
+ user_skills = loaded_skills.get("user") or []
65
+ project_skills = loaded_skills.get("project") or []
66
+ system_skills = loaded_skills.get("system") or []
67
+
68
+ skill_groups: list[tuple[str, list[str]]] = []
69
+ if user_skills:
70
+ skill_groups.append(("user", user_skills))
71
+ if project_skills:
72
+ skill_groups.append(("project", project_skills))
73
+ if system_skills:
74
+ skill_groups.append(("system", system_skills))
75
+
76
+ if skill_groups:
77
+ panel_content.append_text(Text("\n\n", style=ThemeKey.WELCOME_INFO))
78
+ panel_content.append_text(Text("skills", style=ThemeKey.WELCOME_HIGHLIGHT))
79
+
80
+ label_width = len("[project]")
81
+
82
+ for i, (group_name, skills) in enumerate(skill_groups):
83
+ is_last = i == len(skill_groups) - 1
84
+ prefix = "└─ " if is_last else "├─ "
85
+ label = f"[{group_name}]"
86
+ panel_content.append_text(
87
+ Text.assemble(
88
+ ("\n", ThemeKey.WELCOME_INFO),
89
+ (prefix, ThemeKey.LINES),
90
+ (f"{label.ljust(label_width)} {', '.join(skills)}", ThemeKey.WELCOME_INFO),
91
+ )
92
+ )
93
+
62
94
  border_style = ThemeKey.WELCOME_DEBUG_BORDER if debug_mode else ThemeKey.LINES
63
95
 
64
96
  if e.show_klaude_code_info:
@@ -315,9 +315,11 @@ class PromptToolkitInput(InputProviderABC):
315
315
  "msg": "",
316
316
  "meta": "fg:ansibrightblack",
317
317
  "frame.border": "fg:ansibrightblack dim",
318
- "search_prefix": "fg:ansibrightblack",
318
+ "search_prefix": "ansibrightblack",
319
319
  "search_placeholder": "fg:ansibrightblack italic",
320
320
  "search_input": "",
321
+ "search_success": "noinherit fg:ansigreen",
322
+ "search_none": "noinherit fg:ansired",
321
323
  # Empty bottom-toolbar style
322
324
  "bottom-toolbar": "bg:default fg:default noreverse",
323
325
  "bottom-toolbar.text": "bg:default fg:default noreverse",
@@ -243,7 +243,24 @@ class SpinnerStatusState:
243
243
  return Text(self._toast_status, style=ThemeKey.STATUS_TOAST)
244
244
 
245
245
  activity_text = self._activity.get_activity_text()
246
- base_status = self._reasoning_status or self._todo_status
246
+ todo_status = self._todo_status
247
+ reasoning_status = self._reasoning_status
248
+
249
+ if todo_status is not None:
250
+ base_status = todo_status
251
+ extra_reasoning = None if reasoning_status in (None, STATUS_THINKING_TEXT) else reasoning_status
252
+ else:
253
+ base_status = reasoning_status
254
+ extra_reasoning = None
255
+
256
+ if extra_reasoning is not None:
257
+ if activity_text is None:
258
+ activity_text = Text(extra_reasoning, style=ThemeKey.STATUS_TEXT_BOLD_ITALIC)
259
+ else:
260
+ prefixed = Text(extra_reasoning, style=ThemeKey.STATUS_TEXT_BOLD_ITALIC)
261
+ prefixed.append(" , ")
262
+ prefixed.append_text(activity_text)
263
+ activity_text = prefixed
247
264
 
248
265
  if base_status:
249
266
  # Default "Thinking ..." uses normal style; custom headers use bold italic
@@ -306,6 +323,7 @@ class _SessionState:
306
323
  @property
307
324
  def should_extract_reasoning_header(self) -> bool:
308
325
  """Gemini and GPT-5 models use markdown bold headers in thinking."""
326
+ return False # Temporarily disabled for all models
309
327
  if self.model_id is None:
310
328
  return False
311
329
  model_lower = self.model_id.lower()
@@ -64,7 +64,7 @@ from klaude_code.tui.components import thinking as c_thinking
64
64
  from klaude_code.tui.components import tools as c_tools
65
65
  from klaude_code.tui.components import user_input as c_user_input
66
66
  from klaude_code.tui.components import welcome as c_welcome
67
- from klaude_code.tui.components.common import truncate_head, truncate_middle
67
+ from klaude_code.tui.components.common import truncate_head
68
68
  from klaude_code.tui.components.rich import status as r_status
69
69
  from klaude_code.tui.components.rich.live import CropAboveLive, SingleLine
70
70
  from klaude_code.tui.components.rich.markdown import MarkdownStream, ThinkingMarkdown
@@ -488,7 +488,6 @@ class TUICommandRenderer:
488
488
  continue
489
489
  self.display_tool_call_result(e)
490
490
  case events.TaskMetadataEvent() as e:
491
- self.print()
492
491
  self.print(c_metadata.render_task_metadata(e))
493
492
  self.print()
494
493
  case events.InterruptEvent():
@@ -579,9 +578,9 @@ class TUICommandRenderer:
579
578
  def display_error(self, event: events.ErrorEvent) -> None:
580
579
  if event.session_id:
581
580
  with self.session_print_context(event.session_id):
582
- self.print(c_errors.render_error(truncate_middle(event.error_message)))
581
+ self.print(c_errors.render_error(Text(event.error_message)))
583
582
  else:
584
- self.print(c_errors.render_error(truncate_middle(event.error_message)))
583
+ self.print(c_errors.render_error(Text(event.error_message)))
585
584
 
586
585
  # ---------------------------------------------------------------------
587
586
  # Notifications