klaude-code 1.2.8__py3-none-any.whl → 1.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. klaude_code/auth/codex/__init__.py +1 -1
  2. klaude_code/cli/main.py +12 -1
  3. klaude_code/cli/runtime.py +7 -11
  4. klaude_code/command/__init__.py +68 -21
  5. klaude_code/command/clear_cmd.py +6 -2
  6. klaude_code/command/command_abc.py +5 -2
  7. klaude_code/command/diff_cmd.py +5 -2
  8. klaude_code/command/export_cmd.py +7 -4
  9. klaude_code/command/help_cmd.py +6 -2
  10. klaude_code/command/model_cmd.py +5 -2
  11. klaude_code/command/prompt-deslop.md +14 -0
  12. klaude_code/command/prompt_command.py +8 -3
  13. klaude_code/command/refresh_cmd.py +6 -2
  14. klaude_code/command/registry.py +17 -5
  15. klaude_code/command/release_notes_cmd.py +89 -0
  16. klaude_code/command/status_cmd.py +98 -56
  17. klaude_code/command/terminal_setup_cmd.py +7 -4
  18. klaude_code/const/__init__.py +1 -1
  19. klaude_code/core/agent.py +66 -26
  20. klaude_code/core/executor.py +2 -2
  21. klaude_code/core/manager/agent_manager.py +6 -7
  22. klaude_code/core/manager/llm_clients.py +47 -22
  23. klaude_code/core/manager/llm_clients_builder.py +19 -7
  24. klaude_code/core/manager/sub_agent_manager.py +6 -2
  25. klaude_code/core/prompt.py +38 -28
  26. klaude_code/core/reminders.py +4 -7
  27. klaude_code/core/task.py +59 -40
  28. klaude_code/core/tool/__init__.py +2 -0
  29. klaude_code/core/tool/file/_utils.py +30 -0
  30. klaude_code/core/tool/file/apply_patch_tool.py +1 -1
  31. klaude_code/core/tool/file/edit_tool.py +6 -31
  32. klaude_code/core/tool/file/multi_edit_tool.py +7 -32
  33. klaude_code/core/tool/file/read_tool.py +6 -18
  34. klaude_code/core/tool/file/write_tool.py +6 -31
  35. klaude_code/core/tool/memory/__init__.py +5 -0
  36. klaude_code/core/tool/memory/memory_tool.py +2 -2
  37. klaude_code/core/tool/memory/skill_loader.py +2 -1
  38. klaude_code/core/tool/memory/skill_tool.py +13 -0
  39. klaude_code/core/tool/sub_agent_tool.py +2 -1
  40. klaude_code/core/tool/todo/todo_write_tool.py +1 -1
  41. klaude_code/core/tool/todo/update_plan_tool.py +1 -1
  42. klaude_code/core/tool/tool_context.py +21 -4
  43. klaude_code/core/tool/tool_runner.py +5 -8
  44. klaude_code/core/tool/web/mermaid_tool.py +1 -4
  45. klaude_code/core/turn.py +40 -37
  46. klaude_code/llm/__init__.py +2 -12
  47. klaude_code/llm/anthropic/client.py +14 -44
  48. klaude_code/llm/client.py +2 -2
  49. klaude_code/llm/codex/client.py +4 -3
  50. klaude_code/llm/input_common.py +0 -6
  51. klaude_code/llm/openai_compatible/client.py +31 -74
  52. klaude_code/llm/openai_compatible/input.py +6 -4
  53. klaude_code/llm/openai_compatible/stream_processor.py +82 -0
  54. klaude_code/llm/openrouter/client.py +32 -62
  55. klaude_code/llm/openrouter/input.py +4 -27
  56. klaude_code/llm/registry.py +33 -7
  57. klaude_code/llm/responses/client.py +16 -48
  58. klaude_code/llm/responses/input.py +1 -1
  59. klaude_code/llm/usage.py +61 -11
  60. klaude_code/protocol/commands.py +1 -0
  61. klaude_code/protocol/events.py +11 -2
  62. klaude_code/protocol/model.py +147 -24
  63. klaude_code/protocol/op.py +1 -0
  64. klaude_code/protocol/sub_agent.py +5 -1
  65. klaude_code/session/export.py +56 -32
  66. klaude_code/session/session.py +43 -21
  67. klaude_code/session/templates/export_session.html +4 -1
  68. klaude_code/ui/core/input.py +1 -1
  69. klaude_code/ui/modes/repl/__init__.py +1 -5
  70. klaude_code/ui/modes/repl/clipboard.py +5 -5
  71. klaude_code/ui/modes/repl/event_handler.py +153 -54
  72. klaude_code/ui/modes/repl/renderer.py +4 -4
  73. klaude_code/ui/renderers/developer.py +35 -25
  74. klaude_code/ui/renderers/metadata.py +68 -30
  75. klaude_code/ui/renderers/tools.py +53 -87
  76. klaude_code/ui/rich/markdown.py +5 -5
  77. klaude_code/ui/terminal/control.py +2 -2
  78. klaude_code/version.py +3 -3
  79. {klaude_code-1.2.8.dist-info → klaude_code-1.2.10.dist-info}/METADATA +1 -1
  80. {klaude_code-1.2.8.dist-info → klaude_code-1.2.10.dist-info}/RECORD +82 -78
  81. {klaude_code-1.2.8.dist-info → klaude_code-1.2.10.dist-info}/WHEEL +0 -0
  82. {klaude_code-1.2.8.dist-info → klaude_code-1.2.10.dist-info}/entry_points.txt +0 -0
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from dataclasses import dataclass
3
4
  from typing import Awaitable, Callable
4
5
 
5
6
  from rich.text import Text
@@ -14,34 +15,130 @@ from klaude_code.ui.terminal.progress_bar import OSC94States, emit_osc94
14
15
  from klaude_code.ui.utils.debouncer import Debouncer
15
16
 
16
17
 
18
+ @dataclass
19
+ class ActiveStream:
20
+ """Active streaming state containing buffer and markdown renderer.
21
+
22
+ This represents an active streaming session where content is being
23
+ accumulated in a buffer and rendered via MarkdownStream.
24
+ When streaming ends, this object is replaced with None.
25
+ """
26
+
27
+ buffer: str
28
+ mdstream: MarkdownStream
29
+
30
+ def append(self, content: str) -> None:
31
+ self.buffer += content
32
+
33
+
17
34
  class StreamState:
35
+ """Manages assistant message streaming state.
36
+
37
+ The streaming state is either:
38
+ - None: No active stream
39
+ - ActiveStream: Active streaming with buffer and markdown renderer
40
+
41
+ This design ensures buffer and mdstream are always in sync.
42
+ """
43
+
18
44
  def __init__(self, interval: float, flush_handler: Callable[["StreamState"], Awaitable[None]]):
19
- self.buffer: str = ""
20
- self.mdstream: MarkdownStream | None = None
45
+ self._active: ActiveStream | None = None
21
46
  self._flush_handler = flush_handler
22
47
  self.debouncer = Debouncer(interval=interval, callback=self._debounced_flush)
23
48
 
24
49
  async def _debounced_flush(self) -> None:
25
50
  await self._flush_handler(self)
26
51
 
52
+ @property
53
+ def is_active(self) -> bool:
54
+ return self._active is not None
55
+
56
+ @property
57
+ def buffer(self) -> str:
58
+ return self._active.buffer if self._active else ""
59
+
60
+ @property
61
+ def mdstream(self) -> MarkdownStream | None:
62
+ return self._active.mdstream if self._active else None
63
+
64
+ def start(self, mdstream: MarkdownStream) -> None:
65
+ """Start a new streaming session."""
66
+ self._active = ActiveStream(buffer="", mdstream=mdstream)
67
+
27
68
  def append(self, content: str) -> None:
28
- self.buffer += content
69
+ """Append content to the buffer."""
70
+ if self._active:
71
+ self._active.append(content)
72
+
73
+ def finish(self) -> None:
74
+ """End the current streaming session."""
75
+ self._active = None
76
+
77
+
78
+ class ActivityState:
79
+ """Represents the current activity state for spinner display.
80
+
81
+ This is a discriminated union where the state is either:
82
+ - None (thinking/idle)
83
+ - Composing (assistant is streaming text)
84
+ - ToolCalls (one or more tool calls in progress)
85
+
86
+ Composing and ToolCalls are mutually exclusive - when tool calls start,
87
+ composing state is automatically cleared.
88
+ """
89
+
90
+ def __init__(self) -> None:
91
+ self._composing: bool = False
92
+ self._tool_calls: dict[str, int] = {}
93
+
94
+ @property
95
+ def is_composing(self) -> bool:
96
+ return self._composing and not self._tool_calls
97
+
98
+ @property
99
+ def has_tool_calls(self) -> bool:
100
+ return bool(self._tool_calls)
101
+
102
+ def set_composing(self, composing: bool) -> None:
103
+ self._composing = composing
104
+
105
+ def add_tool_call(self, tool_name: str) -> None:
106
+ self._tool_calls[tool_name] = self._tool_calls.get(tool_name, 0) + 1
29
107
 
30
- def clear(self) -> None:
31
- self.buffer = ""
108
+ def clear_tool_calls(self) -> None:
109
+ self._tool_calls = {}
110
+
111
+ def reset(self) -> None:
112
+ self._composing = False
113
+ self._tool_calls = {}
114
+
115
+ def get_activity_text(self) -> Text | None:
116
+ """Get activity text for display. Returns None if idle/thinking."""
117
+ if self._tool_calls:
118
+ activity_text = Text()
119
+ first = True
120
+ for name, count in self._tool_calls.items():
121
+ if not first:
122
+ activity_text.append(", ")
123
+ activity_text.append(name, style="bold")
124
+ if count > 1:
125
+ activity_text.append(f" x {count}")
126
+ first = False
127
+ return activity_text
128
+ if self._composing:
129
+ return Text("Composing")
130
+ return None
32
131
 
33
132
 
34
133
  class SpinnerStatusState:
35
134
  """Multi-layer spinner status state management.
36
135
 
37
- Layers (from low to high priority):
136
+ Composed of two independent layers:
38
137
  - base_status: Set by TodoChange, persistent within a turn
39
- - composing: True when assistant is streaming text
40
- - tool_calls: Accumulated from ToolCallStart, cleared at turn start
138
+ - activity: Current activity (composing or tool_calls), mutually exclusive
41
139
 
42
140
  Display logic:
43
- - If tool_calls: show base + tool_calls (composing is hidden)
44
- - Elif composing: show base + "Composing"
141
+ - If activity: show base + activity (if base exists) or activity + "..."
45
142
  - Elif base_status: show base_status
46
143
  - Else: show "Thinking …"
47
144
  """
@@ -50,14 +147,12 @@ class SpinnerStatusState:
50
147
 
51
148
  def __init__(self) -> None:
52
149
  self._base_status: str | None = None
53
- self._composing: bool = False
54
- self._tool_calls: dict[str, int] = {}
150
+ self._activity = ActivityState()
55
151
 
56
152
  def reset(self) -> None:
57
153
  """Reset all layers."""
58
154
  self._base_status = None
59
- self._composing = False
60
- self._tool_calls = {}
155
+ self._activity.reset()
61
156
 
62
157
  def set_base_status(self, status: str | None) -> None:
63
158
  """Set base status from TodoChange."""
@@ -65,37 +160,23 @@ class SpinnerStatusState:
65
160
 
66
161
  def set_composing(self, composing: bool) -> None:
67
162
  """Set composing state when assistant is streaming."""
68
- self._composing = composing
163
+ self._activity.set_composing(composing)
69
164
 
70
165
  def add_tool_call(self, tool_name: str) -> None:
71
166
  """Add a tool call to the accumulator."""
72
- self._tool_calls[tool_name] = self._tool_calls.get(tool_name, 0) + 1
167
+ self._activity.add_tool_call(tool_name)
73
168
 
74
169
  def clear_tool_calls(self) -> None:
75
- """Clear tool calls and composing state."""
76
- self._tool_calls = {}
170
+ """Clear tool calls."""
171
+ self._activity.clear_tool_calls()
77
172
 
78
173
  def clear_for_new_turn(self) -> None:
79
- """Clear tool calls and composing state for a new turn."""
80
- self._tool_calls = {}
81
- self._composing = False
174
+ """Clear activity state for a new turn."""
175
+ self._activity.reset()
82
176
 
83
177
  def get_status(self) -> Text:
84
178
  """Get current spinner status as rich Text."""
85
- # Build activity text (tool_calls or composing)
86
- activity_text: Text | None = None
87
- if self._tool_calls:
88
- activity_text = Text()
89
- first = True
90
- for name, count in self._tool_calls.items():
91
- if not first:
92
- activity_text.append(", ")
93
- activity_text.append(name, style="bold")
94
- if count > 1:
95
- activity_text.append(f" × {count}")
96
- first = False
97
- elif self._composing:
98
- activity_text = Text("Composing")
179
+ activity_text = self._activity.get_activity_text()
99
180
 
100
181
  if self._base_status:
101
182
  result = Text(self._base_status)
@@ -151,12 +232,14 @@ class DisplayEventHandler:
151
232
  await self._on_tool_call(e)
152
233
  case events.ToolResultEvent() as e:
153
234
  await self._on_tool_result(e)
154
- case events.ResponseMetadataEvent() as e:
155
- self._on_response_metadata(e)
235
+ case events.TaskMetadataEvent() as e:
236
+ self._on_task_metadata(e)
156
237
  case events.TodoChangeEvent() as e:
157
238
  self._on_todo_change(e)
158
239
  case events.TurnEndEvent():
159
240
  pass
241
+ case events.ResponseMetadataEvent():
242
+ pass # Internal event, not displayed
160
243
  case events.TaskFinishEvent() as e:
161
244
  await self._on_task_finish(e)
162
245
  case events.InterruptEvent() as e:
@@ -212,12 +295,12 @@ class DisplayEventHandler:
212
295
  return
213
296
  if len(event.content.strip()) == 0 and self.stage_manager.current_stage != Stage.ASSISTANT:
214
297
  return
215
- first_delta = self.assistant_stream.mdstream is None
298
+ first_delta = not self.assistant_stream.is_active
216
299
  if first_delta:
217
300
  self.spinner_status.set_composing(True)
218
301
  self.spinner_status.clear_tool_calls()
219
302
  self._update_spinner()
220
- self.assistant_stream.mdstream = MarkdownStream(
303
+ mdstream = MarkdownStream(
221
304
  mdargs={"code_theme": self.renderer.themes.code_theme},
222
305
  theme=self.renderer.themes.markdown_theme,
223
306
  console=self.renderer.console,
@@ -225,6 +308,7 @@ class DisplayEventHandler:
225
308
  mark="➤",
226
309
  indent=2,
227
310
  )
311
+ self.assistant_stream.start(mdstream)
228
312
  self.assistant_stream.append(event.content)
229
313
  if first_delta and self.assistant_stream.mdstream is not None:
230
314
  # Stop spinner and immediately start MarkdownStream's Live
@@ -239,13 +323,14 @@ class DisplayEventHandler:
239
323
  if self.renderer.is_sub_agent_session(event.session_id):
240
324
  return
241
325
  await self.stage_manager.transition_to(Stage.ASSISTANT)
242
- if self.assistant_stream.mdstream is not None:
326
+ if self.assistant_stream.is_active:
243
327
  self.assistant_stream.debouncer.cancel()
244
- self.assistant_stream.mdstream.update(event.content.strip(), final=True)
328
+ mdstream = self.assistant_stream.mdstream
329
+ assert mdstream is not None
330
+ mdstream.update(event.content.strip(), final=True)
245
331
  else:
246
332
  self.renderer.display_assistant_message(event.content)
247
- self.assistant_stream.clear()
248
- self.assistant_stream.mdstream = None
333
+ self.assistant_stream.finish()
249
334
  self.spinner_status.set_composing(False)
250
335
  self._update_spinner()
251
336
  await self.stage_manager.transition_to(Stage.WAITING)
@@ -269,8 +354,8 @@ class DisplayEventHandler:
269
354
  await self.stage_manager.transition_to(Stage.TOOL_RESULT)
270
355
  self.renderer.display_tool_call_result(event)
271
356
 
272
- def _on_response_metadata(self, event: events.ResponseMetadataEvent) -> None:
273
- self.renderer.display_response_metadata(event)
357
+ def _on_task_metadata(self, event: events.TaskMetadataEvent) -> None:
358
+ self.renderer.display_task_metadata(event)
274
359
 
275
360
  def _on_todo_change(self, event: events.TodoChangeEvent) -> None:
276
361
  active_form_status_text = self._extract_active_form_text(event)
@@ -314,11 +399,12 @@ class DisplayEventHandler:
314
399
  # ─────────────────────────────────────────────────────────────────────────────
315
400
 
316
401
  async def _finish_assistant_stream(self) -> None:
317
- if self.assistant_stream.mdstream is not None:
402
+ if self.assistant_stream.is_active:
318
403
  self.assistant_stream.debouncer.cancel()
319
- self.assistant_stream.mdstream.update(self.assistant_stream.buffer, final=True)
320
- self.assistant_stream.mdstream = None
321
- self.assistant_stream.clear()
404
+ mdstream = self.assistant_stream.mdstream
405
+ assert mdstream is not None
406
+ mdstream.update(self.assistant_stream.buffer, final=True)
407
+ self.assistant_stream.finish()
322
408
 
323
409
  def _print_thinking_prefix(self) -> None:
324
410
  self.renderer.display_thinking_prefix()
@@ -328,8 +414,10 @@ class DisplayEventHandler:
328
414
  self.renderer.spinner_update(self.spinner_status.get_status())
329
415
 
330
416
  async def _flush_assistant_buffer(self, state: StreamState) -> None:
331
- if state.mdstream is not None:
332
- state.mdstream.update(state.buffer)
417
+ if state.is_active:
418
+ mdstream = state.mdstream
419
+ assert mdstream is not None
420
+ mdstream.update(state.buffer)
333
421
 
334
422
  def _maybe_notify_task_finish(self, event: events.TaskFinishEvent) -> None:
335
423
  if self.notifier is None:
@@ -360,8 +448,19 @@ class DisplayEventHandler:
360
448
  status_text = ""
361
449
  for todo in todo_event.todos:
362
450
  if todo.status == "in_progress":
363
- if len(todo.activeForm) > 0:
364
- status_text = todo.activeForm
451
+ if len(todo.active_form) > 0:
452
+ status_text = todo.active_form
365
453
  if len(todo.content) > 0:
366
454
  status_text = todo.content
367
- return status_text.replace("\n", "")
455
+ status_text = status_text.replace("\n", "")
456
+ return self._truncate_status_text(status_text, max_length=30)
457
+
458
+ def _truncate_status_text(self, text: str, max_length: int) -> str:
459
+ """Truncate text to max_length while preserving complete words."""
460
+ if len(text) <= max_length:
461
+ return text
462
+ truncated = text[:max_length]
463
+ last_space = truncated.rfind(" ")
464
+ if last_space > 0:
465
+ return truncated[:last_space] + "..."
466
+ return truncated + "..."
@@ -184,8 +184,8 @@ class REPLRenderer:
184
184
  self.display_tool_call(tool_call_event)
185
185
  tool_call_dict.pop(tool_result_event.tool_call_id, None)
186
186
  self.display_tool_call_result(tool_result_event)
187
- case events.ResponseMetadataEvent() as metadata_event:
188
- self.print(r_metadata.render_response_metadata(metadata_event))
187
+ case events.TaskMetadataEvent() as metadata_event:
188
+ self.print(r_metadata.render_task_metadata(metadata_event))
189
189
  self.print()
190
190
  case events.InterruptEvent():
191
191
  self.print()
@@ -233,9 +233,9 @@ class REPLRenderer:
233
233
  self.print(renderable)
234
234
  self.print()
235
235
 
236
- def display_response_metadata(self, event: events.ResponseMetadataEvent) -> None:
236
+ def display_task_metadata(self, event: events.TaskMetadataEvent) -> None:
237
237
  with self.session_print_context(event.session_id):
238
- self.print(r_metadata.render_response_metadata(event))
238
+ self.print(r_metadata.render_task_metadata(event))
239
239
  self.print()
240
240
 
241
241
  def display_task_finish(self, event: events.TaskFinishEvent) -> None:
@@ -7,6 +7,7 @@ from klaude_code.protocol import commands, events, model
7
7
  from klaude_code.ui.renderers import diffs as r_diffs
8
8
  from klaude_code.ui.renderers.common import create_grid
9
9
  from klaude_code.ui.renderers.tools import render_path
10
+ from klaude_code.ui.rich.markdown import NoInsetMarkdown
10
11
  from klaude_code.ui.rich.theme import ThemeKey
11
12
  from klaude_code.ui.utils.common import truncate_display
12
13
 
@@ -100,6 +101,8 @@ def render_command_output(e: events.DeveloperMessageEvent) -> RenderableType:
100
101
  return Padding.indent(Text.from_markup(e.item.content or ""), level=2)
101
102
  case commands.CommandName.STATUS:
102
103
  return _render_status_output(e.item.command_output)
104
+ case commands.CommandName.RELEASE_NOTES:
105
+ return Padding.indent(NoInsetMarkdown(e.item.content or ""), level=2)
103
106
  case _:
104
107
  content = e.item.content or "(no content)"
105
108
  style = ThemeKey.TOOL_RESULT if not e.item.command_output.is_error else ThemeKey.ERROR
@@ -126,34 +129,41 @@ def _format_cost(cost: float | None, currency: str = "USD") -> str:
126
129
 
127
130
 
128
131
  def _render_status_output(command_output: model.CommandOutput) -> RenderableType:
129
- """Render session status as a two-column table with sections."""
130
- if not command_output.ui_extra or not command_output.ui_extra.session_status:
131
- return Text("(no status data)", style=ThemeKey.TOOL_RESULT)
132
+ """Render session status with total cost and per-model breakdown."""
133
+ if not isinstance(command_output.ui_extra, model.SessionStatusUIExtra):
134
+ return Text("(no status data)", style=ThemeKey.METADATA)
132
135
 
133
- status = command_output.ui_extra.session_status
136
+ status = command_output.ui_extra
134
137
  usage = status.usage
135
138
 
136
139
  table = Table.grid(padding=(0, 2))
137
- table.add_column(style=ThemeKey.TOOL_RESULT, no_wrap=True)
138
- table.add_column(style=ThemeKey.TOOL_RESULT, no_wrap=True)
139
- # Token Usage section
140
- table.add_row(Text("Token Usage", style="bold"), "")
141
- table.add_row("Input Tokens", _format_tokens(usage.input_tokens))
142
- if usage.cached_tokens > 0:
143
- table.add_row("Cached Tokens", _format_tokens(usage.cached_tokens))
144
- if usage.reasoning_tokens > 0:
145
- table.add_row("Reasoning Tokens", _format_tokens(usage.reasoning_tokens))
146
- table.add_row("Output Tokens", _format_tokens(usage.output_tokens))
147
- table.add_row("Total Tokens", _format_tokens(usage.total_tokens))
148
-
149
- # Cost section
150
- if usage.total_cost is not None:
151
- table.add_row("", "") # Empty line
152
- table.add_row(Text("Cost", style="bold"), "")
153
- table.add_row("Input Cost", _format_cost(usage.input_cost, usage.currency))
154
- if usage.cache_read_cost is not None and usage.cache_read_cost > 0:
155
- table.add_row("Cache Read Cost", _format_cost(usage.cache_read_cost, usage.currency))
156
- table.add_row("Output Cost", _format_cost(usage.output_cost, usage.currency))
157
- table.add_row("Total Cost", _format_cost(usage.total_cost, usage.currency))
140
+ table.add_column(style=ThemeKey.METADATA, overflow="fold")
141
+ table.add_column(style=ThemeKey.METADATA, overflow="fold")
142
+
143
+ # Total cost line
144
+ table.add_row(
145
+ Text("Total cost:", style=ThemeKey.METADATA_BOLD),
146
+ Text(_format_cost(usage.total_cost, usage.currency), style=ThemeKey.METADATA_BOLD),
147
+ )
148
+
149
+ # Per-model breakdown
150
+ if status.by_model:
151
+ table.add_row(Text("Usage by model:", style=ThemeKey.METADATA_BOLD), "")
152
+ for meta in status.by_model:
153
+ model_label = meta.model_name
154
+ if meta.provider:
155
+ model_label = f"{meta.model_name} ({meta.provider.lower().replace(' ', '-')})"
156
+
157
+ if meta.usage:
158
+ usage_detail = (
159
+ f"{_format_tokens(meta.usage.input_tokens)} input, "
160
+ f"{_format_tokens(meta.usage.output_tokens)} output, "
161
+ f"{_format_tokens(meta.usage.cached_tokens)} cache read, "
162
+ f"{_format_tokens(meta.usage.reasoning_tokens)} thinking, "
163
+ f"({_format_cost(meta.usage.total_cost, meta.usage.currency)})"
164
+ )
165
+ else:
166
+ usage_detail = "(no usage data)"
167
+ table.add_row(f"{model_label}:", usage_detail)
158
168
 
159
169
  return Padding.indent(table, level=2)
@@ -7,7 +7,7 @@ from rich.padding import Padding
7
7
  from rich.panel import Panel
8
8
  from rich.text import Text
9
9
 
10
- from klaude_code.protocol import events
10
+ from klaude_code.protocol import events, model
11
11
  from klaude_code.trace import is_debug_enabled
12
12
  from klaude_code.ui.rich.theme import ThemeKey
13
13
  from klaude_code.ui.utils.common import format_number
@@ -21,18 +21,34 @@ def _get_version() -> str:
21
21
  return "unknown"
22
22
 
23
23
 
24
- def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
25
- metadata = e.metadata
24
+ def _render_task_metadata_block(
25
+ metadata: model.TaskMetadata,
26
+ *,
27
+ indent: int = 0,
28
+ show_context_and_time: bool = True,
29
+ ) -> list[RenderableType]:
30
+ """Render a single TaskMetadata block.
26
31
 
32
+ Args:
33
+ metadata: The TaskMetadata to render.
34
+ indent: Number of spaces to indent (0 for main, 2 for sub-agents).
35
+ show_context_and_time: Whether to show context usage percent and time.
36
+
37
+ Returns:
38
+ List of renderables for this metadata block.
39
+ """
27
40
  # Get currency symbol
28
41
  currency = metadata.usage.currency if metadata.usage else "USD"
29
42
  currency_symbol = "¥" if currency == "CNY" else "$"
30
43
 
31
44
  # Line 1: Model and Provider
32
- model_text = Text()
33
- model_text.append_text(Text("- ", style=ThemeKey.METADATA_BOLD)).append_text(
34
- Text(metadata.model_name, style=ThemeKey.METADATA_BOLD)
45
+ prefix = (
46
+ Text(" " * indent + "• ", style=ThemeKey.METADATA_BOLD)
47
+ if indent == 0
48
+ else Text(" " * indent + "└ ", style=ThemeKey.METADATA_DIM)
35
49
  )
50
+ model_text = Text()
51
+ model_text.append_text(prefix).append_text(Text(metadata.model_name, style=ThemeKey.METADATA_BOLD))
36
52
  if metadata.provider is not None:
37
53
  model_text.append_text(Text("@", style=ThemeKey.METADATA)).append_text(
38
54
  Text(metadata.provider.lower().replace(" ", "-"), style=ThemeKey.METADATA)
@@ -41,7 +57,7 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
41
57
  renderables: list[RenderableType] = [model_text]
42
58
 
43
59
  # Line 2: Token consumption, Context, TPS, Cost
44
- parts: list[Text] = []
60
+ parts2: list[Text] = []
45
61
 
46
62
  if metadata.usage is not None:
47
63
  # Input
@@ -51,7 +67,7 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
51
67
  ]
52
68
  if metadata.usage.input_cost is not None:
53
69
  input_parts.append((f"({currency_symbol}{metadata.usage.input_cost:.4f})", ThemeKey.METADATA_DIM))
54
- parts.append(Text.assemble(*input_parts))
70
+ parts2.append(Text.assemble(*input_parts))
55
71
 
56
72
  # Cached
57
73
  if metadata.usage.cached_tokens > 0:
@@ -61,7 +77,7 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
61
77
  ]
62
78
  if metadata.usage.cache_read_cost is not None:
63
79
  cached_parts.append((f"({currency_symbol}{metadata.usage.cache_read_cost:.4f})", ThemeKey.METADATA_DIM))
64
- parts.append(Text.assemble(*cached_parts))
80
+ parts2.append(Text.assemble(*cached_parts))
65
81
 
66
82
  # Output
67
83
  output_parts: list[tuple[str, str]] = [
@@ -70,11 +86,11 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
70
86
  ]
71
87
  if metadata.usage.output_cost is not None:
72
88
  output_parts.append((f"({currency_symbol}{metadata.usage.output_cost:.4f})", ThemeKey.METADATA_DIM))
73
- parts.append(Text.assemble(*output_parts))
89
+ parts2.append(Text.assemble(*output_parts))
74
90
 
75
91
  # Reasoning
76
92
  if metadata.usage.reasoning_tokens > 0:
77
- parts.append(
93
+ parts2.append(
78
94
  Text.assemble(
79
95
  ("thinking", ThemeKey.METADATA_DIM),
80
96
  (":", ThemeKey.METADATA_DIM),
@@ -85,14 +101,30 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
85
101
  )
86
102
  )
87
103
 
88
- # Context
89
- if metadata.usage.context_usage_percent is not None:
90
- parts.append(
104
+ # Cost
105
+ if metadata.usage is not None and metadata.usage.total_cost is not None:
106
+ parts2.append(
107
+ Text.assemble(
108
+ ("cost", ThemeKey.METADATA_DIM),
109
+ (":", ThemeKey.METADATA_DIM),
110
+ (f"{currency_symbol}{metadata.usage.total_cost:.4f}", ThemeKey.METADATA_DIM),
111
+ )
112
+ )
113
+ if parts2:
114
+ line2 = Text(" / ", style=ThemeKey.METADATA_DIM).join(parts2)
115
+ renderables.append(Padding(line2, (0, 0, 0, indent + 2)))
116
+
117
+ parts3: list[Text] = []
118
+ if metadata.usage is not None:
119
+ # Context (only for main agent)
120
+ if show_context_and_time and metadata.usage.context_usage_percent is not None:
121
+ context_size = format_number(metadata.usage.context_token or 0)
122
+ parts3.append(
91
123
  Text.assemble(
92
124
  ("context", ThemeKey.METADATA_DIM),
93
125
  (":", ThemeKey.METADATA_DIM),
94
126
  (
95
- f"{metadata.usage.context_usage_percent:.1f}%",
127
+ f"{context_size}({metadata.usage.context_usage_percent:.1f}%)",
96
128
  ThemeKey.METADATA_DIM,
97
129
  ),
98
130
  )
@@ -100,7 +132,7 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
100
132
 
101
133
  # TPS
102
134
  if metadata.usage.throughput_tps is not None:
103
- parts.append(
135
+ parts3.append(
104
136
  Text.assemble(
105
137
  ("tps", ThemeKey.METADATA_DIM),
106
138
  (":", ThemeKey.METADATA_DIM),
@@ -109,8 +141,8 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
109
141
  )
110
142
 
111
143
  # Duration
112
- if metadata.task_duration_s is not None:
113
- parts.append(
144
+ if show_context_and_time and metadata.task_duration_s is not None:
145
+ parts3.append(
114
146
  Text.assemble(
115
147
  ("time", ThemeKey.METADATA_DIM),
116
148
  (":", ThemeKey.METADATA_DIM),
@@ -118,19 +150,25 @@ def render_response_metadata(e: events.ResponseMetadataEvent) -> RenderableType:
118
150
  )
119
151
  )
120
152
 
121
- # Cost
122
- if metadata.usage is not None and metadata.usage.total_cost is not None:
123
- parts.append(
124
- Text.assemble(
125
- ("cost", ThemeKey.METADATA_DIM),
126
- (":", ThemeKey.METADATA_DIM),
127
- (f"{currency_symbol}{metadata.usage.total_cost:.4f}", ThemeKey.METADATA_DIM),
128
- )
129
- )
153
+ if parts3:
154
+ line2 = Text(" / ", style=ThemeKey.METADATA_DIM).join(parts3)
155
+ renderables.append(Padding(line2, (0, 0, 0, indent + 2)))
156
+
157
+ return renderables
158
+
159
+
160
+ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
161
+ """Render task metadata including main agent and sub-agents, aggregated by model+provider."""
162
+ renderables: list[RenderableType] = []
163
+
164
+ renderables.extend(_render_task_metadata_block(e.metadata.main, indent=0, show_context_and_time=True))
165
+
166
+ # Aggregate by (model_name, provider), sorted by total_cost descending
167
+ sorted_items = model.TaskMetadata.aggregate_by_model(e.metadata.sub_agent_task_metadata)
130
168
 
131
- if parts:
132
- line2 = Text("/", style=ThemeKey.METADATA_DIM).join(parts)
133
- renderables.append(Padding(line2, (0, 0, 0, 2)))
169
+ # Render each aggregated model block
170
+ for meta in sorted_items:
171
+ renderables.extend(_render_task_metadata_block(meta, indent=2, show_context_and_time=False))
134
172
 
135
173
  return Group(*renderables)
136
174