klaude-code 2.9.0__py3-none-any.whl → 2.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (69) hide show
  1. klaude_code/app/runtime.py +1 -1
  2. klaude_code/auth/antigravity/oauth.py +33 -29
  3. klaude_code/auth/claude/oauth.py +34 -49
  4. klaude_code/cli/cost_cmd.py +4 -4
  5. klaude_code/cli/list_model.py +1 -2
  6. klaude_code/config/assets/builtin_config.yaml +17 -0
  7. klaude_code/const.py +4 -3
  8. klaude_code/core/agent_profile.py +2 -5
  9. klaude_code/core/bash_mode.py +276 -0
  10. klaude_code/core/executor.py +40 -7
  11. klaude_code/core/manager/llm_clients.py +1 -0
  12. klaude_code/core/manager/llm_clients_builder.py +2 -2
  13. klaude_code/core/memory.py +140 -0
  14. klaude_code/core/reminders.py +17 -89
  15. klaude_code/core/task.py +1 -1
  16. klaude_code/core/tool/file/read_tool.py +13 -2
  17. klaude_code/core/tool/shell/bash_tool.py +1 -1
  18. klaude_code/core/turn.py +10 -4
  19. klaude_code/llm/bedrock_anthropic/__init__.py +3 -0
  20. klaude_code/llm/input_common.py +18 -0
  21. klaude_code/llm/{codex → openai_codex}/__init__.py +1 -1
  22. klaude_code/llm/{codex → openai_codex}/client.py +3 -3
  23. klaude_code/llm/openai_compatible/client.py +3 -1
  24. klaude_code/llm/openai_compatible/stream.py +19 -9
  25. klaude_code/llm/{responses → openai_responses}/client.py +1 -1
  26. klaude_code/llm/registry.py +3 -3
  27. klaude_code/llm/stream_parts.py +3 -1
  28. klaude_code/llm/usage.py +1 -1
  29. klaude_code/protocol/events.py +17 -1
  30. klaude_code/protocol/message.py +1 -0
  31. klaude_code/protocol/model.py +14 -1
  32. klaude_code/protocol/op.py +12 -0
  33. klaude_code/protocol/op_handler.py +5 -0
  34. klaude_code/session/session.py +22 -1
  35. klaude_code/tui/command/resume_cmd.py +1 -1
  36. klaude_code/tui/commands.py +15 -0
  37. klaude_code/tui/components/bash_syntax.py +4 -0
  38. klaude_code/tui/components/command_output.py +4 -5
  39. klaude_code/tui/components/developer.py +1 -3
  40. klaude_code/tui/components/diffs.py +3 -2
  41. klaude_code/tui/components/metadata.py +23 -26
  42. klaude_code/tui/components/rich/code_panel.py +31 -16
  43. klaude_code/tui/components/rich/markdown.py +44 -28
  44. klaude_code/tui/components/rich/status.py +2 -2
  45. klaude_code/tui/components/rich/theme.py +28 -16
  46. klaude_code/tui/components/tools.py +23 -0
  47. klaude_code/tui/components/user_input.py +49 -58
  48. klaude_code/tui/components/welcome.py +47 -2
  49. klaude_code/tui/display.py +15 -7
  50. klaude_code/tui/input/completers.py +8 -0
  51. klaude_code/tui/input/key_bindings.py +37 -1
  52. klaude_code/tui/input/prompt_toolkit.py +58 -31
  53. klaude_code/tui/machine.py +87 -49
  54. klaude_code/tui/renderer.py +148 -30
  55. klaude_code/tui/runner.py +22 -0
  56. klaude_code/tui/terminal/image.py +24 -3
  57. klaude_code/tui/terminal/notifier.py +11 -12
  58. klaude_code/tui/terminal/selector.py +1 -1
  59. klaude_code/ui/terminal/title.py +4 -2
  60. {klaude_code-2.9.0.dist-info → klaude_code-2.10.0.dist-info}/METADATA +1 -1
  61. {klaude_code-2.9.0.dist-info → klaude_code-2.10.0.dist-info}/RECORD +67 -66
  62. klaude_code/llm/bedrock/__init__.py +0 -3
  63. klaude_code/tui/components/assistant.py +0 -2
  64. /klaude_code/llm/{bedrock → bedrock_anthropic}/client.py +0 -0
  65. /klaude_code/llm/{codex → openai_codex}/prompt_sync.py +0 -0
  66. /klaude_code/llm/{responses → openai_responses}/__init__.py +0 -0
  67. /klaude_code/llm/{responses → openai_responses}/input.py +0 -0
  68. {klaude_code-2.9.0.dist-info → klaude_code-2.10.0.dist-info}/WHEEL +0 -0
  69. {klaude_code-2.9.0.dist-info → klaude_code-2.10.0.dist-info}/entry_points.txt +0 -0
@@ -15,11 +15,11 @@ _REGISTRY: dict[llm_param.LLMClientProtocol, type["LLMClientABC"]] = {}
15
15
  _PROTOCOL_MODULES: dict[llm_param.LLMClientProtocol, str] = {
16
16
  llm_param.LLMClientProtocol.ANTHROPIC: "klaude_code.llm.anthropic",
17
17
  llm_param.LLMClientProtocol.CLAUDE_OAUTH: "klaude_code.llm.claude",
18
- llm_param.LLMClientProtocol.BEDROCK: "klaude_code.llm.bedrock",
19
- llm_param.LLMClientProtocol.CODEX_OAUTH: "klaude_code.llm.codex",
18
+ llm_param.LLMClientProtocol.BEDROCK: "klaude_code.llm.bedrock_anthropic",
19
+ llm_param.LLMClientProtocol.CODEX_OAUTH: "klaude_code.llm.openai_codex",
20
20
  llm_param.LLMClientProtocol.OPENAI: "klaude_code.llm.openai_compatible",
21
21
  llm_param.LLMClientProtocol.OPENROUTER: "klaude_code.llm.openrouter",
22
- llm_param.LLMClientProtocol.RESPONSES: "klaude_code.llm.responses",
22
+ llm_param.LLMClientProtocol.RESPONSES: "klaude_code.llm.openai_responses",
23
23
  llm_param.LLMClientProtocol.GOOGLE: "klaude_code.llm.google",
24
24
  llm_param.LLMClientProtocol.ANTIGRAVITY: "klaude_code.llm.antigravity",
25
25
  }
@@ -24,6 +24,7 @@ def append_thinking_text_part(
24
24
  text: str,
25
25
  *,
26
26
  model_id: str,
27
+ reasoning_field: str | None = None,
27
28
  force_new: bool = False,
28
29
  ) -> int | None:
29
30
  if not text:
@@ -35,10 +36,11 @@ def append_thinking_text_part(
35
36
  parts[-1] = message.ThinkingTextPart(
36
37
  text=last.text + text,
37
38
  model_id=model_id,
39
+ reasoning_field=reasoning_field or last.reasoning_field,
38
40
  )
39
41
  return len(parts) - 1
40
42
 
41
- parts.append(message.ThinkingTextPart(text=text, model_id=model_id))
43
+ parts.append(message.ThinkingTextPart(text=text, model_id=model_id, reasoning_field=reasoning_field))
42
44
  return len(parts) - 1
43
45
 
44
46
 
klaude_code/llm/usage.py CHANGED
@@ -28,7 +28,7 @@ def calculate_cost(usage: model.Usage, cost_config: llm_param.Cost | None) -> No
28
28
  usage.output_cost = (usage.output_tokens / 1_000_000) * cost_config.output
29
29
 
30
30
  # Cache read cost
31
- usage.cache_read_cost = (usage.cached_tokens / 1_000_000) * cost_config.cache_read
31
+ usage.cache_read_cost = (usage.cached_tokens / 1_000_000) * (cost_config.cache_read or cost_config.input)
32
32
 
33
33
  # Image generation cost
34
34
  usage.image_cost = (usage.image_tokens / 1_000_000) * cost_config.image
@@ -14,6 +14,9 @@ __all__ = [
14
14
  "AssistantTextDeltaEvent",
15
15
  "AssistantTextEndEvent",
16
16
  "AssistantTextStartEvent",
17
+ "BashCommandEndEvent",
18
+ "BashCommandOutputDeltaEvent",
19
+ "BashCommandStartEvent",
17
20
  "CommandOutputEvent",
18
21
  "CompactionEndEvent",
19
22
  "CompactionStartEvent",
@@ -81,6 +84,19 @@ class CommandOutputEvent(Event):
81
84
  is_error: bool = False
82
85
 
83
86
 
87
+ class BashCommandStartEvent(Event):
88
+ command: str
89
+
90
+
91
+ class BashCommandOutputDeltaEvent(Event):
92
+ content: str
93
+
94
+
95
+ class BashCommandEndEvent(Event):
96
+ exit_code: int | None = None
97
+ cancelled: bool = False
98
+
99
+
84
100
  class TaskStartEvent(Event):
85
101
  sub_agent_state: model.SubAgentState | None = None
86
102
  model_id: str | None = None
@@ -119,7 +135,6 @@ class UsageEvent(ResponseEvent):
119
135
 
120
136
  class TaskMetadataEvent(Event):
121
137
  metadata: model.TaskMetadataItem
122
- cancelled: bool = False
123
138
 
124
139
 
125
140
  class ThinkingStartEvent(ResponseEvent):
@@ -167,6 +182,7 @@ class WelcomeEvent(Event):
167
182
  llm_config: llm_param.LLMConfigParameter
168
183
  show_klaude_code_info: bool = True
169
184
  loaded_skills: dict[str, list[str]] = Field(default_factory=dict)
185
+ loaded_memories: dict[str, list[str]] = Field(default_factory=dict)
170
186
 
171
187
 
172
188
  class ErrorEvent(Event):
@@ -112,6 +112,7 @@ class ThinkingTextPart(BaseModel):
112
112
  id: str | None = None
113
113
  text: str
114
114
  model_id: str | None = None
115
+ reasoning_field: str | None = None # Original field name: reasoning_content, reasoning, reasoning_text
115
116
 
116
117
 
117
118
  class ThinkingSignaturePart(BaseModel):
@@ -228,6 +228,17 @@ class MarkdownDocUIExtra(BaseModel):
228
228
  content: str
229
229
 
230
230
 
231
+ class ReadPreviewLine(BaseModel):
232
+ line_no: int
233
+ content: str
234
+
235
+
236
+ class ReadPreviewUIExtra(BaseModel):
237
+ type: Literal["read_preview"] = "read_preview"
238
+ lines: list[ReadPreviewLine]
239
+ remaining_lines: int # lines not shown in preview
240
+
241
+
231
242
  class SessionStatusUIExtra(BaseModel):
232
243
  type: Literal["session_status"] = "session_status"
233
244
  usage: "Usage"
@@ -243,6 +254,7 @@ MultiUIExtraItem = (
243
254
  | ImageUIExtra
244
255
  | MarkdownDocUIExtra
245
256
  | SessionStatusUIExtra
257
+ | ReadPreviewUIExtra
246
258
  )
247
259
 
248
260
 
@@ -265,7 +277,8 @@ ToolResultUIExtra = Annotated[
265
277
  | ImageUIExtra
266
278
  | MarkdownDocUIExtra
267
279
  | SessionStatusUIExtra
268
- | MultiUIExtra,
280
+ | MultiUIExtra
281
+ | ReadPreviewUIExtra,
269
282
  Field(discriminator="type"),
270
283
  ]
271
284
 
@@ -24,6 +24,7 @@ class OperationType(Enum):
24
24
  """Enumeration of supported operation types."""
25
25
 
26
26
  RUN_AGENT = "run_agent"
27
+ RUN_BASH = "run_bash"
27
28
  CONTINUE_AGENT = "continue_agent"
28
29
  COMPACT_SESSION = "compact_session"
29
30
  CHANGE_MODEL = "change_model"
@@ -60,6 +61,17 @@ class RunAgentOperation(Operation):
60
61
  await handler.handle_run_agent(self)
61
62
 
62
63
 
64
+ class RunBashOperation(Operation):
65
+ """Operation for running a user-entered bash-mode command."""
66
+
67
+ type: OperationType = OperationType.RUN_BASH
68
+ session_id: str
69
+ command: str
70
+
71
+ async def execute(self, handler: OperationHandler) -> None:
72
+ await handler.handle_run_bash(self)
73
+
74
+
63
75
  class ContinueAgentOperation(Operation):
64
76
  """Operation for continuing an agent task without adding a new user message.
65
77
 
@@ -22,6 +22,7 @@ if TYPE_CHECKING:
22
22
  InterruptOperation,
23
23
  ResumeSessionOperation,
24
24
  RunAgentOperation,
25
+ RunBashOperation,
25
26
  )
26
27
 
27
28
 
@@ -32,6 +33,10 @@ class OperationHandler(Protocol):
32
33
  """Handle a run agent operation."""
33
34
  ...
34
35
 
36
+ async def handle_run_bash(self, operation: RunBashOperation) -> None:
37
+ """Handle a bash-mode command execution operation."""
38
+ ...
39
+
35
40
  async def handle_continue_agent(self, operation: ContinueAgentOperation) -> None:
36
41
  """Handle a continue agent operation (resume without adding user message)."""
37
42
  ...
@@ -316,10 +316,15 @@ class Session(BaseModel):
316
316
  prev_item: message.HistoryEvent | None = None
317
317
  last_assistant_content: str = ""
318
318
  report_back_result: str | None = None
319
+ pending_tool_calls: dict[str, events.ToolCallEvent] = {}
319
320
  history = self.conversation_history
320
321
  history_len = len(history)
321
322
  yield events.TaskStartEvent(session_id=self.id, sub_agent_state=self.sub_agent_state)
322
323
  for idx, it in enumerate(history):
324
+ # Flush pending tool calls if current item won't consume them
325
+ if pending_tool_calls and not isinstance(it, message.ToolResultMessage):
326
+ yield from pending_tool_calls.values()
327
+ pending_tool_calls.clear()
323
328
  if self.need_turn_start(prev_item, it):
324
329
  yield events.TurnStartEvent(session_id=self.id)
325
330
  match it:
@@ -331,6 +336,7 @@ class Session(BaseModel):
331
336
  # Reconstruct streaming boundaries from saved parts.
332
337
  # This allows replay to reuse the same TUI state machine as live events.
333
338
  thinking_open = False
339
+ thinking_had_content = False
334
340
  assistant_open = False
335
341
 
336
342
  for part in am.parts:
@@ -342,15 +348,23 @@ class Session(BaseModel):
342
348
  thinking_open = True
343
349
  yield events.ThinkingStartEvent(response_id=am.response_id, session_id=self.id)
344
350
  if part.text:
351
+ if thinking_had_content:
352
+ yield events.ThinkingDeltaEvent(
353
+ content=" \n \n",
354
+ response_id=am.response_id,
355
+ session_id=self.id,
356
+ )
345
357
  yield events.ThinkingDeltaEvent(
346
358
  content=part.text,
347
359
  response_id=am.response_id,
348
360
  session_id=self.id,
349
361
  )
362
+ thinking_had_content = True
350
363
  continue
351
364
 
352
365
  if thinking_open:
353
366
  thinking_open = False
367
+ thinking_had_content = False
354
368
  yield events.ThinkingEndEvent(response_id=am.response_id, session_id=self.id)
355
369
 
356
370
  if isinstance(part, message.TextPart):
@@ -380,7 +394,7 @@ class Session(BaseModel):
380
394
  continue
381
395
  if part.tool_name == tools.REPORT_BACK:
382
396
  report_back_result = part.arguments_json
383
- yield events.ToolCallEvent(
397
+ pending_tool_calls[part.call_id] = events.ToolCallEvent(
384
398
  tool_call_id=part.call_id,
385
399
  tool_name=part.tool_name,
386
400
  arguments=part.arguments_json,
@@ -390,6 +404,8 @@ class Session(BaseModel):
390
404
  if am.stop_reason == "aborted":
391
405
  yield events.InterruptEvent(session_id=self.id)
392
406
  case message.ToolResultMessage() as tr:
407
+ if tr.call_id in pending_tool_calls:
408
+ yield pending_tool_calls.pop(tr.call_id)
393
409
  status = "success" if tr.status == "success" else "error"
394
410
  # Check if this is the last tool result in the current turn
395
411
  next_item = history[idx + 1] if idx + 1 < history_len else None
@@ -437,6 +453,11 @@ class Session(BaseModel):
437
453
  pass
438
454
  prev_item = it
439
455
 
456
+ # Flush any remaining pending tool calls (e.g., from aborted or incomplete sessions)
457
+ if pending_tool_calls:
458
+ yield from pending_tool_calls.values()
459
+ pending_tool_calls.clear()
460
+
440
461
  has_structured_output = report_back_result is not None
441
462
  task_result = report_back_result if has_structured_output else last_assistant_content
442
463
 
@@ -34,7 +34,7 @@ def select_session_sync(session_ids: list[str] | None = None) -> str | None:
34
34
  if msg == "⋮":
35
35
  title.append(("class:msg", f" {msg}\n"))
36
36
  else:
37
- prefix = "└─" if is_last else "├─"
37
+ prefix = "╰─" if is_last else "├─"
38
38
  title.append(("fg:ansibrightblack dim", f" {prefix} "))
39
39
  title.append(("class:msg", f"{msg}\n"))
40
40
  title.append(("", "\n"))
@@ -38,6 +38,21 @@ class RenderCommandOutput(RenderCommand):
38
38
  event: events.CommandOutputEvent
39
39
 
40
40
 
41
+ @dataclass(frozen=True, slots=True)
42
+ class RenderBashCommandStart(RenderCommand):
43
+ event: events.BashCommandStartEvent
44
+
45
+
46
+ @dataclass(frozen=True, slots=True)
47
+ class AppendBashCommandOutput(RenderCommand):
48
+ event: events.BashCommandOutputDeltaEvent
49
+
50
+
51
+ @dataclass(frozen=True, slots=True)
52
+ class RenderBashCommandEnd(RenderCommand):
53
+ event: events.BashCommandEndEvent
54
+
55
+
41
56
  @dataclass(frozen=True, slots=True)
42
57
  class RenderTurnStart(RenderCommand):
43
58
  event: events.TurnStartEvent
@@ -187,6 +187,10 @@ def highlight_bash_command(command: str) -> Text:
187
187
  expect_subcommand = False
188
188
  elif token_type in (Token.Text.Whitespace,):
189
189
  result.append(token_value)
190
+ # Newline starts a new command context (like ; or &&)
191
+ if "\n" in token_value:
192
+ expect_command = True
193
+ expect_subcommand = False
190
194
  elif token_type == Token.Name.Builtin:
191
195
  # Built-in commands are always commands
192
196
  result.append(token_value, style=ThemeKey.BASH_COMMAND)
@@ -1,5 +1,4 @@
1
1
  from rich.console import RenderableType
2
- from rich.padding import Padding
3
2
  from rich.table import Table
4
3
  from rich.text import Text
5
4
 
@@ -19,7 +18,7 @@ def render_command_output(e: events.CommandOutputEvent) -> RenderableType:
19
18
  case _:
20
19
  content = e.content or "(no content)"
21
20
  style = ThemeKey.TOOL_RESULT if not e.is_error else ThemeKey.ERROR
22
- return Padding.indent(truncate_middle(content, base_style=style), level=2)
21
+ return truncate_middle(content, base_style=style)
23
22
 
24
23
 
25
24
  def _format_tokens(tokens: int) -> str:
@@ -44,7 +43,7 @@ def _format_cost(cost: float | None, currency: str = "USD") -> str:
44
43
  def _render_fork_session_output(e: events.CommandOutputEvent) -> RenderableType:
45
44
  """Render fork session output with usage instructions."""
46
45
  if not isinstance(e.ui_extra, model.SessionIdUIExtra):
47
- return Padding.indent(Text(e.content, style=ThemeKey.TOOL_RESULT), level=2)
46
+ return Text(e.content, style=ThemeKey.TOOL_RESULT)
48
47
 
49
48
  grid = Table.grid(padding=(0, 1))
50
49
  session_id = e.ui_extra.session_id
@@ -54,7 +53,7 @@ def _render_fork_session_output(e: events.CommandOutputEvent) -> RenderableType:
54
53
  grid.add_row(Text("Session forked. Resume command copied to clipboard:", style=ThemeKey.TOOL_RESULT))
55
54
  grid.add_row(Text(f" klaude -r {short_id}", style=ThemeKey.TOOL_RESULT_BOLD))
56
55
 
57
- return Padding.indent(grid, level=2)
56
+ return grid
58
57
 
59
58
 
60
59
  def _render_status_output(e: events.CommandOutputEvent) -> RenderableType:
@@ -95,4 +94,4 @@ def _render_status_output(e: events.CommandOutputEvent) -> RenderableType:
95
94
  usage_detail = "(no usage data)"
96
95
  table.add_row(f"{model_label}:", usage_detail)
97
96
 
98
- return Padding.indent(table, level=2)
97
+ return table
@@ -6,7 +6,7 @@ from klaude_code.tui.components.common import create_grid
6
6
  from klaude_code.tui.components.rich.theme import ThemeKey
7
7
  from klaude_code.tui.components.tools import render_path
8
8
 
9
- REMINDER_BULLET = " ⧉"
9
+ REMINDER_BULLET = "⧉"
10
10
 
11
11
 
12
12
  def need_render_developer_message(e: events.DeveloperMessageEvent) -> bool:
@@ -56,8 +56,6 @@ def render_developer_message(e: events.DeveloperMessageEvent) -> RenderableType:
56
56
  text = "Todo hasn't been updated recently"
57
57
  case "empty":
58
58
  text = "Todo list is empty"
59
- case _:
60
- text = "Todo reminder"
61
59
  grid = create_grid()
62
60
  grid.add_row(
63
61
  Text(REMINDER_BULLET, style=ThemeKey.REMINDER),
@@ -1,7 +1,7 @@
1
1
  from rich.console import RenderableType
2
2
  from rich.text import Text
3
3
 
4
- from klaude_code.const import DIFF_PREFIX_WIDTH
4
+ from klaude_code.const import DIFF_PREFIX_WIDTH, TAB_EXPAND_WIDTH
5
5
  from klaude_code.protocol import model
6
6
  from klaude_code.tui.components.common import create_grid
7
7
  from klaude_code.tui.components.rich.theme import ThemeKey
@@ -74,7 +74,8 @@ def _render_structured_line(line: model.DiffLine) -> Text:
74
74
  return Text("")
75
75
  text = Text()
76
76
  for span in line.spans:
77
- text.append(span.text, style=_span_style(line.kind, span.op))
77
+ content = span.text.expandtabs(TAB_EXPAND_WIDTH)
78
+ text.append(content, style=_span_style(line.kind, span.op))
78
79
  return text
79
80
 
80
81
 
@@ -34,8 +34,8 @@ def _render_task_metadata_block(
34
34
  content = Text()
35
35
  if metadata.provider is not None:
36
36
  content.append_text(Text(metadata.provider.lower().replace(" ", "-"), style=ThemeKey.METADATA))
37
- content.append_text(Text("/", style=ThemeKey.METADATA_DIM))
38
- content.append_text(Text(metadata.model_name, style=ThemeKey.METADATA_BOLD))
37
+ content.append_text(Text("/", style=ThemeKey.METADATA))
38
+ content.append_text(Text(metadata.model_name, style=ThemeKey.METADATA))
39
39
  if metadata.description:
40
40
  content.append_text(Text(" ", style=ThemeKey.METADATA)).append_text(
41
41
  Text(metadata.description, style=ThemeKey.METADATA_ITALIC)
@@ -47,18 +47,18 @@ def _render_task_metadata_block(
47
47
  if metadata.usage is not None:
48
48
  # Tokens: ↑37k ◎5k ↓907 ∿45k ⌗ 100
49
49
  token_text = Text()
50
- token_text.append("↑", style=ThemeKey.METADATA_DIM)
50
+ token_text.append("↑", style=ThemeKey.METADATA)
51
51
  token_text.append(format_number(metadata.usage.input_tokens), style=ThemeKey.METADATA)
52
52
  if metadata.usage.cached_tokens > 0:
53
- token_text.append(" ◎", style=ThemeKey.METADATA_DIM)
53
+ token_text.append(" ◎", style=ThemeKey.METADATA)
54
54
  token_text.append(format_number(metadata.usage.cached_tokens), style=ThemeKey.METADATA)
55
- token_text.append(" ↓", style=ThemeKey.METADATA_DIM)
55
+ token_text.append(" ↓", style=ThemeKey.METADATA)
56
56
  token_text.append(format_number(metadata.usage.output_tokens), style=ThemeKey.METADATA)
57
57
  if metadata.usage.reasoning_tokens > 0:
58
- token_text.append(" ∿", style=ThemeKey.METADATA_DIM)
58
+ token_text.append(" ∿", style=ThemeKey.METADATA)
59
59
  token_text.append(format_number(metadata.usage.reasoning_tokens), style=ThemeKey.METADATA)
60
60
  if metadata.usage.image_tokens > 0:
61
- token_text.append(" ⊡", style=ThemeKey.METADATA_DIM)
61
+ token_text.append(" ⊡", style=ThemeKey.METADATA)
62
62
  token_text.append(format_number(metadata.usage.image_tokens), style=ThemeKey.METADATA)
63
63
  parts.append(token_text)
64
64
 
@@ -66,7 +66,7 @@ def _render_task_metadata_block(
66
66
  if metadata.usage is not None and metadata.usage.total_cost is not None:
67
67
  parts.append(
68
68
  Text.assemble(
69
- (currency_symbol, ThemeKey.METADATA_DIM),
69
+ (currency_symbol, ThemeKey.METADATA),
70
70
  (f"{metadata.usage.total_cost:.4f}", ThemeKey.METADATA),
71
71
  )
72
72
  )
@@ -79,9 +79,9 @@ def _render_task_metadata_block(
79
79
  parts.append(
80
80
  Text.assemble(
81
81
  (context_size, ThemeKey.METADATA),
82
- ("/", ThemeKey.METADATA_DIM),
82
+ ("/", ThemeKey.METADATA),
83
83
  (effective_limit_str, ThemeKey.METADATA),
84
- (f"({metadata.usage.context_usage_percent:.1f}%)", ThemeKey.METADATA_DIM),
84
+ (f"({metadata.usage.context_usage_percent:.1f}%)", ThemeKey.METADATA),
85
85
  )
86
86
  )
87
87
 
@@ -90,7 +90,7 @@ def _render_task_metadata_block(
90
90
  parts.append(
91
91
  Text.assemble(
92
92
  (f"{metadata.usage.throughput_tps:.1f}", ThemeKey.METADATA),
93
- ("tps", ThemeKey.METADATA_DIM),
93
+ ("tps", ThemeKey.METADATA),
94
94
  )
95
95
  )
96
96
 
@@ -101,7 +101,7 @@ def _render_task_metadata_block(
101
101
  parts.append(
102
102
  Text.assemble(
103
103
  (ftl_str, ThemeKey.METADATA),
104
- ("-ftl", ThemeKey.METADATA_DIM),
104
+ ("-ftl", ThemeKey.METADATA),
105
105
  )
106
106
  )
107
107
 
@@ -110,7 +110,7 @@ def _render_task_metadata_block(
110
110
  parts.append(
111
111
  Text.assemble(
112
112
  (f"{metadata.task_duration_s:.1f}", ThemeKey.METADATA),
113
- ("s", ThemeKey.METADATA_DIM),
113
+ ("s", ThemeKey.METADATA),
114
114
  )
115
115
  )
116
116
 
@@ -120,13 +120,13 @@ def _render_task_metadata_block(
120
120
  parts.append(
121
121
  Text.assemble(
122
122
  (str(metadata.turn_count), ThemeKey.METADATA),
123
- (suffix, ThemeKey.METADATA_DIM),
123
+ (suffix, ThemeKey.METADATA),
124
124
  )
125
125
  )
126
126
 
127
127
  if parts:
128
- content.append_text(Text(" ", style=ThemeKey.METADATA_DIM))
129
- content.append_text(Text(" ", style=ThemeKey.METADATA_DIM).join(parts))
128
+ content.append_text(Text(" ", style=ThemeKey.METADATA))
129
+ content.append_text(Text(" ", style=ThemeKey.METADATA).join(parts))
130
130
 
131
131
  grid.add_row(mark, content)
132
132
  return grid
@@ -136,19 +136,16 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
136
136
  """Render task metadata including main agent and sub-agents."""
137
137
  renderables: list[RenderableType] = []
138
138
 
139
- if e.cancelled:
140
- renderables.append(Text())
141
-
142
139
  has_sub_agents = len(e.metadata.sub_agent_task_metadata) > 0
143
140
  # Use an extra space for the main agent mark to align with two-character marks (├─, └─)
144
- main_mark_text = ""
141
+ main_mark_text = ""
145
142
  main_mark = Text(main_mark_text, style=ThemeKey.METADATA)
146
143
 
147
144
  renderables.append(_render_task_metadata_block(e.metadata.main_agent, mark=main_mark, show_context_and_time=True))
148
145
 
149
146
  # Render each sub-agent metadata block
150
147
  for meta in e.metadata.sub_agent_task_metadata:
151
- sub_mark = Text(" └", style=ThemeKey.METADATA_DIM)
148
+ sub_mark = Text(" └", style=ThemeKey.METADATA)
152
149
  renderables.append(_render_task_metadata_block(meta, mark=sub_mark, show_context_and_time=True))
153
150
 
154
151
  # Add total cost line when there are sub-agents
@@ -165,11 +162,11 @@ def render_task_metadata(e: events.TaskMetadataEvent) -> RenderableType:
165
162
 
166
163
  currency_symbol = "¥" if currency == "CNY" else "$"
167
164
  total_line = Text.assemble(
168
- (" └", ThemeKey.METADATA_DIM),
169
- (" Σ ", ThemeKey.METADATA_DIM),
170
- ("total ", ThemeKey.METADATA_DIM),
171
- (currency_symbol, ThemeKey.METADATA_DIM),
172
- (f"{total_cost:.4f}", ThemeKey.METADATA_DIM),
165
+ (" └", ThemeKey.METADATA),
166
+ (" Σ ", ThemeKey.METADATA),
167
+ ("total ", ThemeKey.METADATA),
168
+ (currency_symbol, ThemeKey.METADATA),
169
+ (f"{total_cost:.4f}", ThemeKey.METADATA),
173
170
  )
174
171
 
175
172
  renderables.append(total_line)
@@ -14,12 +14,12 @@ from rich.style import StyleType
14
14
  if TYPE_CHECKING:
15
15
  from rich.console import Console, ConsoleOptions, RenderResult
16
16
 
17
- # Box drawing characters
18
- TOP_LEFT = "" # ┌
19
- TOP_RIGHT = "" # ┐
20
- BOTTOM_LEFT = "" # └
21
- BOTTOM_RIGHT = "" # ┘
22
- HORIZONTAL = "─" # ─
17
+ # Box drawing characters (rounded corners)
18
+ TOP_LEFT = ""
19
+ TOP_RIGHT = ""
20
+ BOTTOM_LEFT = ""
21
+ BOTTOM_RIGHT = ""
22
+ HORIZONTAL = "─"
23
23
 
24
24
 
25
25
  class CodePanel(JupyterMixin):
@@ -32,10 +32,10 @@ class CodePanel(JupyterMixin):
32
32
  >>> console.print(CodePanel(Syntax(code, "python")))
33
33
 
34
34
  Renders as:
35
- ┌──────────────────────────┐
35
+ ╭──────────────────────────╮
36
36
  code line 1
37
37
  code line 2
38
- └──────────────────────────┘
38
+ ╰──────────────────────────╯
39
39
  """
40
40
 
41
41
  def __init__(
@@ -44,7 +44,9 @@ class CodePanel(JupyterMixin):
44
44
  *,
45
45
  border_style: StyleType = "none",
46
46
  expand: bool = False,
47
- padding: int = 1,
47
+ padding: int = 0,
48
+ title: str | None = None,
49
+ title_style: StyleType = "none",
48
50
  ) -> None:
49
51
  """Initialize the CodePanel.
50
52
 
@@ -52,12 +54,16 @@ class CodePanel(JupyterMixin):
52
54
  renderable: A console renderable object.
53
55
  border_style: The style of the border. Defaults to "none".
54
56
  expand: If True, expand to fill available width. Defaults to False.
55
- padding: Left/right padding for content. Defaults to 1.
57
+ padding: Left/right padding for content. Defaults to 0.
58
+ title: Optional title to display in the top border. Defaults to None.
59
+ title_style: The style of the title. Defaults to "none".
56
60
  """
57
61
  self.renderable = renderable
58
62
  self.border_style = border_style
59
63
  self.expand = expand
60
64
  self.padding = padding
65
+ self.title = title
66
+ self.title_style = title_style
61
67
 
62
68
  @staticmethod
63
69
  def _measure_max_line_cells(lines: list[list[Segment]]) -> int:
@@ -93,11 +99,20 @@ class CodePanel(JupyterMixin):
93
99
  new_line = Segment.line()
94
100
  pad_segment = Segment(" " * pad) if pad > 0 else None
95
101
 
96
- # Top border: ┌───...───┐
97
- top_border = (
98
- TOP_LEFT + (HORIZONTAL * (border_width - 2)) + TOP_RIGHT if border_width >= 2 else HORIZONTAL * border_width
99
- )
100
- yield Segment(top_border, border_style)
102
+ # Top border: ╭───...───╮ or ╭ title ───...───╮
103
+ if self.title and border_width >= len(self.title) + 4:
104
+ title_part = f" {self.title} "
105
+ title_style = console.get_style(self.title_style)
106
+ remaining = border_width - 2 - len(title_part)
107
+ yield Segment(TOP_LEFT, border_style)
108
+ yield Segment(title_part, title_style)
109
+ yield Segment((HORIZONTAL * remaining) + TOP_RIGHT, border_style)
110
+ elif border_width >= 2:
111
+ top_border = TOP_LEFT + (HORIZONTAL * (border_width - 2)) + TOP_RIGHT
112
+ yield Segment(top_border, border_style)
113
+ else:
114
+ top_border = HORIZONTAL * border_width
115
+ yield Segment(top_border, border_style)
101
116
  yield new_line
102
117
 
103
118
  # Content lines with padding
@@ -109,7 +124,7 @@ class CodePanel(JupyterMixin):
109
124
  yield pad_segment
110
125
  yield new_line
111
126
 
112
- # Bottom border: └───...───┘
127
+ # Bottom border: ╰───...───╯
113
128
  bottom_border = (
114
129
  BOTTOM_LEFT + (HORIZONTAL * (border_width - 2)) + BOTTOM_RIGHT
115
130
  if border_width >= 2