klaude-code 1.2.8__py3-none-any.whl → 1.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. klaude_code/auth/codex/__init__.py +1 -1
  2. klaude_code/command/__init__.py +2 -0
  3. klaude_code/command/prompt-deslop.md +14 -0
  4. klaude_code/command/release_notes_cmd.py +86 -0
  5. klaude_code/command/status_cmd.py +92 -54
  6. klaude_code/core/agent.py +13 -19
  7. klaude_code/core/manager/sub_agent_manager.py +5 -1
  8. klaude_code/core/prompt.py +38 -28
  9. klaude_code/core/reminders.py +4 -4
  10. klaude_code/core/task.py +59 -40
  11. klaude_code/core/tool/__init__.py +2 -0
  12. klaude_code/core/tool/file/apply_patch_tool.py +1 -1
  13. klaude_code/core/tool/file/edit_tool.py +1 -1
  14. klaude_code/core/tool/file/multi_edit_tool.py +1 -1
  15. klaude_code/core/tool/file/write_tool.py +1 -1
  16. klaude_code/core/tool/memory/memory_tool.py +2 -2
  17. klaude_code/core/tool/sub_agent_tool.py +2 -1
  18. klaude_code/core/tool/todo/todo_write_tool.py +1 -1
  19. klaude_code/core/tool/todo/update_plan_tool.py +1 -1
  20. klaude_code/core/tool/tool_context.py +21 -4
  21. klaude_code/core/tool/tool_runner.py +5 -8
  22. klaude_code/core/tool/web/mermaid_tool.py +1 -4
  23. klaude_code/core/turn.py +40 -37
  24. klaude_code/llm/anthropic/client.py +13 -44
  25. klaude_code/llm/client.py +1 -1
  26. klaude_code/llm/codex/client.py +4 -3
  27. klaude_code/llm/input_common.py +0 -6
  28. klaude_code/llm/openai_compatible/client.py +28 -72
  29. klaude_code/llm/openai_compatible/input.py +6 -4
  30. klaude_code/llm/openai_compatible/stream_processor.py +82 -0
  31. klaude_code/llm/openrouter/client.py +29 -59
  32. klaude_code/llm/openrouter/input.py +4 -27
  33. klaude_code/llm/responses/client.py +15 -48
  34. klaude_code/llm/usage.py +51 -10
  35. klaude_code/protocol/commands.py +1 -0
  36. klaude_code/protocol/events.py +11 -2
  37. klaude_code/protocol/model.py +142 -24
  38. klaude_code/protocol/sub_agent.py +5 -1
  39. klaude_code/session/export.py +51 -27
  40. klaude_code/session/session.py +28 -16
  41. klaude_code/session/templates/export_session.html +4 -1
  42. klaude_code/ui/modes/repl/__init__.py +1 -5
  43. klaude_code/ui/modes/repl/event_handler.py +153 -54
  44. klaude_code/ui/modes/repl/renderer.py +4 -4
  45. klaude_code/ui/renderers/developer.py +35 -25
  46. klaude_code/ui/renderers/metadata.py +68 -30
  47. klaude_code/ui/renderers/tools.py +53 -87
  48. klaude_code/ui/rich/markdown.py +5 -5
  49. {klaude_code-1.2.8.dist-info → klaude_code-1.2.9.dist-info}/METADATA +1 -1
  50. {klaude_code-1.2.8.dist-info → klaude_code-1.2.9.dist-info}/RECORD +52 -49
  51. {klaude_code-1.2.8.dist-info → klaude_code-1.2.9.dist-info}/WHEEL +0 -0
  52. {klaude_code-1.2.8.dist-info → klaude_code-1.2.9.dist-info}/entry_points.txt +0 -0
@@ -159,20 +159,35 @@ def _format_cost(cost: float, currency: str = "USD") -> str:
159
159
  return f"{symbol}{cost:.4f}"
160
160
 
161
161
 
162
- def _render_metadata_item(item: model.ResponseMetadataItem) -> str:
163
- # Model Name [@ Provider]
162
+ def _render_single_metadata(
163
+ metadata: model.TaskMetadata,
164
+ *,
165
+ indent: int = 0,
166
+ show_context: bool = True,
167
+ ) -> str:
168
+ """Render a single TaskMetadata block as HTML.
169
+
170
+ Args:
171
+ metadata: The TaskMetadata to render.
172
+ indent: Number of spaces to indent (0 for main, 2 for sub-agents).
173
+ show_context: Whether to show context usage percent.
174
+
175
+ Returns:
176
+ HTML string for this metadata block.
177
+ """
164
178
  parts: list[str] = []
165
179
 
166
- model_parts = [f'<span class="metadata-model">{_escape_html(item.model_name)}</span>']
167
- if item.provider:
168
- provider = _escape_html(item.provider.lower().replace(" ", "-"))
180
+ # Model Name [@ Provider]
181
+ model_parts = [f'<span class="metadata-model">{_escape_html(metadata.model_name)}</span>']
182
+ if metadata.provider:
183
+ provider = _escape_html(metadata.provider.lower().replace(" ", "-"))
169
184
  model_parts.append(f'<span class="metadata-provider">@{provider}</span>')
170
185
 
171
186
  parts.append("".join(model_parts))
172
187
 
173
188
  # Stats
174
- if item.usage:
175
- u = item.usage
189
+ if metadata.usage:
190
+ u = metadata.usage
176
191
  # Input with cost
177
192
  input_stat = f"input: {_format_token_count(u.input_tokens)}"
178
193
  if u.input_cost is not None:
@@ -194,22 +209,39 @@ def _render_metadata_item(item: model.ResponseMetadataItem) -> str:
194
209
 
195
210
  if u.reasoning_tokens > 0:
196
211
  parts.append(f'<span class="metadata-stat">thinking: {_format_token_count(u.reasoning_tokens)}</span>')
197
- if u.context_usage_percent is not None:
212
+ if show_context and u.context_usage_percent is not None:
198
213
  parts.append(f'<span class="metadata-stat">context: {u.context_usage_percent:.1f}%</span>')
199
214
  if u.throughput_tps is not None:
200
215
  parts.append(f'<span class="metadata-stat">tps: {u.throughput_tps:.1f}</span>')
201
216
 
202
- if item.task_duration_s is not None:
203
- parts.append(f'<span class="metadata-stat">time: {item.task_duration_s:.1f}s</span>')
217
+ if metadata.task_duration_s is not None:
218
+ parts.append(f'<span class="metadata-stat">time: {metadata.task_duration_s:.1f}s</span>')
204
219
 
205
220
  # Total cost
206
- if item.usage is not None and item.usage.total_cost is not None:
207
- parts.append(f'<span class="metadata-stat">cost: {_format_cost(item.usage.total_cost, item.usage.currency)}</span>')
221
+ if metadata.usage is not None and metadata.usage.total_cost is not None:
222
+ parts.append(
223
+ f'<span class="metadata-stat">cost: {_format_cost(metadata.usage.total_cost, metadata.usage.currency)}</span>'
224
+ )
208
225
 
209
226
  divider = '<span class="metadata-divider">/</span>'
210
227
  joined_html = divider.join(parts)
211
228
 
212
- return f'<div class="response-metadata"><div class="metadata-line">{joined_html}</div></div>'
229
+ indent_style = f' style="padding-left: {indent}em;"' if indent > 0 else ""
230
+ return f'<div class="metadata-line"{indent_style}>{joined_html}</div>'
231
+
232
+
233
+ def _render_metadata_item(item: model.TaskMetadataItem) -> str:
234
+ """Render TaskMetadataItem including main agent and sub-agents."""
235
+ lines: list[str] = []
236
+
237
+ # Main agent metadata
238
+ lines.append(_render_single_metadata(item.main, indent=0, show_context=True))
239
+
240
+ # Sub-agent metadata with indent
241
+ for sub in item.sub_agent_task_metadata:
242
+ lines.append(_render_single_metadata(sub, indent=1, show_context=False))
243
+
244
+ return f'<div class="response-metadata">{"".join(lines)}</div>'
213
245
 
214
246
 
215
247
  def _render_assistant_message(index: int, content: str, timestamp: datetime) -> str:
@@ -336,11 +368,9 @@ def _render_diff_block(diff: str) -> str:
336
368
 
337
369
 
338
370
  def _get_diff_text(ui_extra: model.ToolResultUIExtra | None) -> str | None:
339
- if ui_extra is None:
340
- return None
341
- if ui_extra.type != model.ToolResultUIExtraType.DIFF_TEXT:
342
- return None
343
- return ui_extra.diff_text
371
+ if isinstance(ui_extra, model.DiffTextUIExtra):
372
+ return ui_extra.diff_text
373
+ return None
344
374
 
345
375
 
346
376
  def _get_mermaid_link_html(
@@ -355,9 +385,7 @@ def _get_mermaid_link_html(
355
385
  else:
356
386
  code = ""
357
387
 
358
- if not code and (
359
- ui_extra is None or ui_extra.type != model.ToolResultUIExtraType.MERMAID_LINK or not ui_extra.mermaid_link
360
- ):
388
+ if not code and not isinstance(ui_extra, model.MermaidLinkUIExtra):
361
389
  return None
362
390
 
363
391
  # Prepare code for rendering and copy
@@ -376,11 +404,7 @@ def _get_mermaid_link_html(
376
404
  f'<button type="button" class="copy-mermaid-btn" data-code="{escaped_code}" title="Copy Mermaid Code">Copy Code</button>'
377
405
  )
378
406
 
379
- link = (
380
- ui_extra.mermaid_link.link
381
- if (ui_extra and ui_extra.type == model.ToolResultUIExtraType.MERMAID_LINK and ui_extra.mermaid_link)
382
- else None
383
- )
407
+ link = ui_extra.link if isinstance(ui_extra, model.MermaidLinkUIExtra) else None
384
408
 
385
409
  if link:
386
410
  link_url = _escape_html(link)
@@ -544,7 +568,7 @@ def _build_messages_html(
544
568
  elif isinstance(item, model.AssistantMessageItem):
545
569
  assistant_counter += 1
546
570
  blocks.append(_render_assistant_message(assistant_counter, item.content or "", item.created_at))
547
- elif isinstance(item, model.ResponseMetadataItem):
571
+ elif isinstance(item, model.TaskMetadataItem):
548
572
  blocks.append(_render_metadata_item(item))
549
573
  elif isinstance(item, model.DeveloperMessageItem):
550
574
  content = _escape_html(item.content or "")
@@ -5,7 +5,7 @@ from collections.abc import Iterable, Sequence
5
5
  from pathlib import Path
6
6
  from typing import ClassVar
7
7
 
8
- from pydantic import BaseModel, Field
8
+ from pydantic import BaseModel, Field, PrivateAttr
9
9
 
10
10
  from klaude_code.protocol import events, model
11
11
 
@@ -19,8 +19,6 @@ class Session(BaseModel):
19
19
  file_tracker: dict[str, float] = Field(default_factory=dict)
20
20
  # Todo list for the session
21
21
  todos: list[model.TodoItem] = Field(default_factory=list) # pyright: ignore[reportUnknownVariableType]
22
- # Messages count, redundant state for performance optimization to avoid reading entire jsonl file
23
- messages_count: int = Field(default=0)
24
22
  # Model name used for this session
25
23
  # Used in list method SessionMetaBrief
26
24
  model_name: str | None = None
@@ -33,6 +31,27 @@ class Session(BaseModel):
33
31
  need_todo_empty_cooldown_counter: int = Field(exclude=True, default=0)
34
32
  need_todo_not_used_cooldown_counter: int = Field(exclude=True, default=0)
35
33
 
34
+ # Cached messages count (computed property)
35
+ _messages_count_cache: int | None = PrivateAttr(default=None)
36
+
37
+ @property
38
+ def messages_count(self) -> int:
39
+ """Count of user and assistant messages in conversation history.
40
+
41
+ This is a cached property that is invalidated when append_history is called.
42
+ """
43
+ if self._messages_count_cache is None:
44
+ self._messages_count_cache = sum(
45
+ 1
46
+ for it in self.conversation_history
47
+ if isinstance(it, (model.UserMessageItem, model.AssistantMessageItem))
48
+ )
49
+ return self._messages_count_cache
50
+
51
+ def _invalidate_messages_count_cache(self) -> None:
52
+ """Invalidate the cached messages count."""
53
+ self._messages_count_cache = None
54
+
36
55
  # Internal: mapping for (de)serialization of conversation items
37
56
  _TypeMap: ClassVar[dict[str, type[BaseModel]]] = {
38
57
  # Messages
@@ -50,7 +69,7 @@ class Session(BaseModel):
50
69
  "AssistantMessageDelta": model.AssistantMessageDelta,
51
70
  "StartItem": model.StartItem,
52
71
  "StreamErrorItem": model.StreamErrorItem,
53
- "ResponseMetadataItem": model.ResponseMetadataItem,
72
+ "TaskMetadataItem": model.TaskMetadataItem,
54
73
  "InterruptItem": model.InterruptItem,
55
74
  }
56
75
 
@@ -109,7 +128,6 @@ class Session(BaseModel):
109
128
  loaded_memory = list(raw.get("loaded_memory", []))
110
129
  created_at = float(raw.get("created_at", time.time()))
111
130
  updated_at = float(raw.get("updated_at", created_at))
112
- messages_count = int(raw.get("messages_count", 0))
113
131
  model_name = raw.get("model_name")
114
132
 
115
133
  sess = Session(
@@ -121,7 +139,6 @@ class Session(BaseModel):
121
139
  loaded_memory=loaded_memory,
122
140
  created_at=created_at,
123
141
  updated_at=updated_at,
124
- messages_count=messages_count,
125
142
  model_name=model_name,
126
143
  )
127
144
 
@@ -154,10 +171,7 @@ class Session(BaseModel):
154
171
  # Best-effort load; skip malformed lines
155
172
  continue
156
173
  sess.conversation_history = history
157
- # Update messages count based on loaded history (only UserMessageItem and AssistantMessageItem)
158
- sess.messages_count = sum(
159
- 1 for it in history if isinstance(it, (model.UserMessageItem, model.AssistantMessageItem))
160
- )
174
+ # messages_count is now a computed property, no need to set it
161
175
 
162
176
  return sess
163
177
 
@@ -190,10 +204,8 @@ class Session(BaseModel):
190
204
  def append_history(self, items: Sequence[model.ConversationItem]):
191
205
  # Append to in-memory history
192
206
  self.conversation_history.extend(items)
193
- # Update messages count (only UserMessageItem and AssistantMessageItem)
194
- self.messages_count += sum(
195
- 1 for it in items if isinstance(it, (model.UserMessageItem, model.AssistantMessageItem))
196
- )
207
+ # Invalidate messages count cache
208
+ self._invalidate_messages_count_cache()
197
209
 
198
210
  # Incrementally persist to JSONL under messages directory
199
211
  messages_dir = self._messages_dir()
@@ -295,8 +307,8 @@ class Session(BaseModel):
295
307
  content=ri.content,
296
308
  session_id=self.id,
297
309
  )
298
- case model.ResponseMetadataItem() as mt:
299
- yield events.ResponseMetadataEvent(
310
+ case model.TaskMetadataItem() as mt:
311
+ yield events.TaskMetadataEvent(
300
312
  session_id=self.id,
301
313
  metadata=mt,
302
314
  )
@@ -21,7 +21,7 @@
21
21
  rel="stylesheet"
22
22
  />
23
23
  <link
24
- href="https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:wght@400;500;700&family=IBM+Plex+Sans:wght@400;500;700&display=swap"
24
+ href="https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&family=IBM+Plex+Sans:ital,wght@0,400;0,500;0,700;1,400;1,500;1,700&display=swap"
25
25
  rel="stylesheet"
26
26
  />
27
27
  <style>
@@ -411,6 +411,9 @@
411
411
  font-size: var(--font-size-xs);
412
412
  color: var(--text-dim);
413
413
  border-left: 2px solid transparent;
414
+ display: flex;
415
+ flex-direction: column;
416
+ gap: 8px;
414
417
  }
415
418
  .metadata-line {
416
419
  display: flex;
@@ -22,11 +22,7 @@ def build_repl_status_snapshot(agent: "Agent | None", update_message: str | None
22
22
  tool_calls = 0
23
23
 
24
24
  if agent is not None:
25
- profile = agent.profile
26
- if profile is not None:
27
- model_name = profile.llm_client.model_name or ""
28
- else:
29
- model_name = "N/A"
25
+ model_name = agent.profile.llm_client.model_name or ""
30
26
 
31
27
  history = agent.session.conversation_history
32
28
  for item in history:
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from dataclasses import dataclass
3
4
  from typing import Awaitable, Callable
4
5
 
5
6
  from rich.text import Text
@@ -14,34 +15,130 @@ from klaude_code.ui.terminal.progress_bar import OSC94States, emit_osc94
14
15
  from klaude_code.ui.utils.debouncer import Debouncer
15
16
 
16
17
 
18
+ @dataclass
19
+ class ActiveStream:
20
+ """Active streaming state containing buffer and markdown renderer.
21
+
22
+ This represents an active streaming session where content is being
23
+ accumulated in a buffer and rendered via MarkdownStream.
24
+ When streaming ends, this object is replaced with None.
25
+ """
26
+
27
+ buffer: str
28
+ mdstream: MarkdownStream
29
+
30
+ def append(self, content: str) -> None:
31
+ self.buffer += content
32
+
33
+
17
34
  class StreamState:
35
+ """Manages assistant message streaming state.
36
+
37
+ The streaming state is either:
38
+ - None: No active stream
39
+ - ActiveStream: Active streaming with buffer and markdown renderer
40
+
41
+ This design ensures buffer and mdstream are always in sync.
42
+ """
43
+
18
44
  def __init__(self, interval: float, flush_handler: Callable[["StreamState"], Awaitable[None]]):
19
- self.buffer: str = ""
20
- self.mdstream: MarkdownStream | None = None
45
+ self._active: ActiveStream | None = None
21
46
  self._flush_handler = flush_handler
22
47
  self.debouncer = Debouncer(interval=interval, callback=self._debounced_flush)
23
48
 
24
49
  async def _debounced_flush(self) -> None:
25
50
  await self._flush_handler(self)
26
51
 
52
+ @property
53
+ def is_active(self) -> bool:
54
+ return self._active is not None
55
+
56
+ @property
57
+ def buffer(self) -> str:
58
+ return self._active.buffer if self._active else ""
59
+
60
+ @property
61
+ def mdstream(self) -> MarkdownStream | None:
62
+ return self._active.mdstream if self._active else None
63
+
64
+ def start(self, mdstream: MarkdownStream) -> None:
65
+ """Start a new streaming session."""
66
+ self._active = ActiveStream(buffer="", mdstream=mdstream)
67
+
27
68
  def append(self, content: str) -> None:
28
- self.buffer += content
69
+ """Append content to the buffer."""
70
+ if self._active:
71
+ self._active.append(content)
72
+
73
+ def finish(self) -> None:
74
+ """End the current streaming session."""
75
+ self._active = None
76
+
77
+
78
+ class ActivityState:
79
+ """Represents the current activity state for spinner display.
80
+
81
+ This is a discriminated union where the state is either:
82
+ - None (thinking/idle)
83
+ - Composing (assistant is streaming text)
84
+ - ToolCalls (one or more tool calls in progress)
85
+
86
+ Composing and ToolCalls are mutually exclusive - when tool calls start,
87
+ composing state is automatically cleared.
88
+ """
89
+
90
+ def __init__(self) -> None:
91
+ self._composing: bool = False
92
+ self._tool_calls: dict[str, int] = {}
93
+
94
+ @property
95
+ def is_composing(self) -> bool:
96
+ return self._composing and not self._tool_calls
97
+
98
+ @property
99
+ def has_tool_calls(self) -> bool:
100
+ return bool(self._tool_calls)
101
+
102
+ def set_composing(self, composing: bool) -> None:
103
+ self._composing = composing
104
+
105
+ def add_tool_call(self, tool_name: str) -> None:
106
+ self._tool_calls[tool_name] = self._tool_calls.get(tool_name, 0) + 1
29
107
 
30
- def clear(self) -> None:
31
- self.buffer = ""
108
+ def clear_tool_calls(self) -> None:
109
+ self._tool_calls = {}
110
+
111
+ def reset(self) -> None:
112
+ self._composing = False
113
+ self._tool_calls = {}
114
+
115
+ def get_activity_text(self) -> Text | None:
116
+ """Get activity text for display. Returns None if idle/thinking."""
117
+ if self._tool_calls:
118
+ activity_text = Text()
119
+ first = True
120
+ for name, count in self._tool_calls.items():
121
+ if not first:
122
+ activity_text.append(", ")
123
+ activity_text.append(name, style="bold")
124
+ if count > 1:
125
+ activity_text.append(f" x {count}")
126
+ first = False
127
+ return activity_text
128
+ if self._composing:
129
+ return Text("Composing")
130
+ return None
32
131
 
33
132
 
34
133
  class SpinnerStatusState:
35
134
  """Multi-layer spinner status state management.
36
135
 
37
- Layers (from low to high priority):
136
+ Composed of two independent layers:
38
137
  - base_status: Set by TodoChange, persistent within a turn
39
- - composing: True when assistant is streaming text
40
- - tool_calls: Accumulated from ToolCallStart, cleared at turn start
138
+ - activity: Current activity (composing or tool_calls), mutually exclusive
41
139
 
42
140
  Display logic:
43
- - If tool_calls: show base + tool_calls (composing is hidden)
44
- - Elif composing: show base + "Composing"
141
+ - If activity: show base + activity (if base exists) or activity + "..."
45
142
  - Elif base_status: show base_status
46
143
  - Else: show "Thinking …"
47
144
  """
@@ -50,14 +147,12 @@ class SpinnerStatusState:
50
147
 
51
148
  def __init__(self) -> None:
52
149
  self._base_status: str | None = None
53
- self._composing: bool = False
54
- self._tool_calls: dict[str, int] = {}
150
+ self._activity = ActivityState()
55
151
 
56
152
  def reset(self) -> None:
57
153
  """Reset all layers."""
58
154
  self._base_status = None
59
- self._composing = False
60
- self._tool_calls = {}
155
+ self._activity.reset()
61
156
 
62
157
  def set_base_status(self, status: str | None) -> None:
63
158
  """Set base status from TodoChange."""
@@ -65,37 +160,23 @@ class SpinnerStatusState:
65
160
 
66
161
  def set_composing(self, composing: bool) -> None:
67
162
  """Set composing state when assistant is streaming."""
68
- self._composing = composing
163
+ self._activity.set_composing(composing)
69
164
 
70
165
  def add_tool_call(self, tool_name: str) -> None:
71
166
  """Add a tool call to the accumulator."""
72
- self._tool_calls[tool_name] = self._tool_calls.get(tool_name, 0) + 1
167
+ self._activity.add_tool_call(tool_name)
73
168
 
74
169
  def clear_tool_calls(self) -> None:
75
- """Clear tool calls and composing state."""
76
- self._tool_calls = {}
170
+ """Clear tool calls."""
171
+ self._activity.clear_tool_calls()
77
172
 
78
173
  def clear_for_new_turn(self) -> None:
79
- """Clear tool calls and composing state for a new turn."""
80
- self._tool_calls = {}
81
- self._composing = False
174
+ """Clear activity state for a new turn."""
175
+ self._activity.reset()
82
176
 
83
177
  def get_status(self) -> Text:
84
178
  """Get current spinner status as rich Text."""
85
- # Build activity text (tool_calls or composing)
86
- activity_text: Text | None = None
87
- if self._tool_calls:
88
- activity_text = Text()
89
- first = True
90
- for name, count in self._tool_calls.items():
91
- if not first:
92
- activity_text.append(", ")
93
- activity_text.append(name, style="bold")
94
- if count > 1:
95
- activity_text.append(f" × {count}")
96
- first = False
97
- elif self._composing:
98
- activity_text = Text("Composing")
179
+ activity_text = self._activity.get_activity_text()
99
180
 
100
181
  if self._base_status:
101
182
  result = Text(self._base_status)
@@ -151,12 +232,14 @@ class DisplayEventHandler:
151
232
  await self._on_tool_call(e)
152
233
  case events.ToolResultEvent() as e:
153
234
  await self._on_tool_result(e)
154
- case events.ResponseMetadataEvent() as e:
155
- self._on_response_metadata(e)
235
+ case events.TaskMetadataEvent() as e:
236
+ self._on_task_metadata(e)
156
237
  case events.TodoChangeEvent() as e:
157
238
  self._on_todo_change(e)
158
239
  case events.TurnEndEvent():
159
240
  pass
241
+ case events.ResponseMetadataEvent():
242
+ pass # Internal event, not displayed
160
243
  case events.TaskFinishEvent() as e:
161
244
  await self._on_task_finish(e)
162
245
  case events.InterruptEvent() as e:
@@ -212,12 +295,12 @@ class DisplayEventHandler:
212
295
  return
213
296
  if len(event.content.strip()) == 0 and self.stage_manager.current_stage != Stage.ASSISTANT:
214
297
  return
215
- first_delta = self.assistant_stream.mdstream is None
298
+ first_delta = not self.assistant_stream.is_active
216
299
  if first_delta:
217
300
  self.spinner_status.set_composing(True)
218
301
  self.spinner_status.clear_tool_calls()
219
302
  self._update_spinner()
220
- self.assistant_stream.mdstream = MarkdownStream(
303
+ mdstream = MarkdownStream(
221
304
  mdargs={"code_theme": self.renderer.themes.code_theme},
222
305
  theme=self.renderer.themes.markdown_theme,
223
306
  console=self.renderer.console,
@@ -225,6 +308,7 @@ class DisplayEventHandler:
225
308
  mark="➤",
226
309
  indent=2,
227
310
  )
311
+ self.assistant_stream.start(mdstream)
228
312
  self.assistant_stream.append(event.content)
229
313
  if first_delta and self.assistant_stream.mdstream is not None:
230
314
  # Stop spinner and immediately start MarkdownStream's Live
@@ -239,13 +323,14 @@ class DisplayEventHandler:
239
323
  if self.renderer.is_sub_agent_session(event.session_id):
240
324
  return
241
325
  await self.stage_manager.transition_to(Stage.ASSISTANT)
242
- if self.assistant_stream.mdstream is not None:
326
+ if self.assistant_stream.is_active:
243
327
  self.assistant_stream.debouncer.cancel()
244
- self.assistant_stream.mdstream.update(event.content.strip(), final=True)
328
+ mdstream = self.assistant_stream.mdstream
329
+ assert mdstream is not None
330
+ mdstream.update(event.content.strip(), final=True)
245
331
  else:
246
332
  self.renderer.display_assistant_message(event.content)
247
- self.assistant_stream.clear()
248
- self.assistant_stream.mdstream = None
333
+ self.assistant_stream.finish()
249
334
  self.spinner_status.set_composing(False)
250
335
  self._update_spinner()
251
336
  await self.stage_manager.transition_to(Stage.WAITING)
@@ -269,8 +354,8 @@ class DisplayEventHandler:
269
354
  await self.stage_manager.transition_to(Stage.TOOL_RESULT)
270
355
  self.renderer.display_tool_call_result(event)
271
356
 
272
- def _on_response_metadata(self, event: events.ResponseMetadataEvent) -> None:
273
- self.renderer.display_response_metadata(event)
357
+ def _on_task_metadata(self, event: events.TaskMetadataEvent) -> None:
358
+ self.renderer.display_task_metadata(event)
274
359
 
275
360
  def _on_todo_change(self, event: events.TodoChangeEvent) -> None:
276
361
  active_form_status_text = self._extract_active_form_text(event)
@@ -314,11 +399,12 @@ class DisplayEventHandler:
314
399
  # ─────────────────────────────────────────────────────────────────────────────
315
400
 
316
401
  async def _finish_assistant_stream(self) -> None:
317
- if self.assistant_stream.mdstream is not None:
402
+ if self.assistant_stream.is_active:
318
403
  self.assistant_stream.debouncer.cancel()
319
- self.assistant_stream.mdstream.update(self.assistant_stream.buffer, final=True)
320
- self.assistant_stream.mdstream = None
321
- self.assistant_stream.clear()
404
+ mdstream = self.assistant_stream.mdstream
405
+ assert mdstream is not None
406
+ mdstream.update(self.assistant_stream.buffer, final=True)
407
+ self.assistant_stream.finish()
322
408
 
323
409
  def _print_thinking_prefix(self) -> None:
324
410
  self.renderer.display_thinking_prefix()
@@ -328,8 +414,10 @@ class DisplayEventHandler:
328
414
  self.renderer.spinner_update(self.spinner_status.get_status())
329
415
 
330
416
  async def _flush_assistant_buffer(self, state: StreamState) -> None:
331
- if state.mdstream is not None:
332
- state.mdstream.update(state.buffer)
417
+ if state.is_active:
418
+ mdstream = state.mdstream
419
+ assert mdstream is not None
420
+ mdstream.update(state.buffer)
333
421
 
334
422
  def _maybe_notify_task_finish(self, event: events.TaskFinishEvent) -> None:
335
423
  if self.notifier is None:
@@ -360,8 +448,19 @@ class DisplayEventHandler:
360
448
  status_text = ""
361
449
  for todo in todo_event.todos:
362
450
  if todo.status == "in_progress":
363
- if len(todo.activeForm) > 0:
364
- status_text = todo.activeForm
451
+ if len(todo.active_form) > 0:
452
+ status_text = todo.active_form
365
453
  if len(todo.content) > 0:
366
454
  status_text = todo.content
367
- return status_text.replace("\n", "")
455
+ status_text = status_text.replace("\n", "")
456
+ return self._truncate_status_text(status_text, max_length=30)
457
+
458
+ def _truncate_status_text(self, text: str, max_length: int) -> str:
459
+ """Truncate text to max_length while preserving complete words."""
460
+ if len(text) <= max_length:
461
+ return text
462
+ truncated = text[:max_length]
463
+ last_space = truncated.rfind(" ")
464
+ if last_space > 0:
465
+ return truncated[:last_space] + "..."
466
+ return truncated + "..."
@@ -184,8 +184,8 @@ class REPLRenderer:
184
184
  self.display_tool_call(tool_call_event)
185
185
  tool_call_dict.pop(tool_result_event.tool_call_id, None)
186
186
  self.display_tool_call_result(tool_result_event)
187
- case events.ResponseMetadataEvent() as metadata_event:
188
- self.print(r_metadata.render_response_metadata(metadata_event))
187
+ case events.TaskMetadataEvent() as metadata_event:
188
+ self.print(r_metadata.render_task_metadata(metadata_event))
189
189
  self.print()
190
190
  case events.InterruptEvent():
191
191
  self.print()
@@ -233,9 +233,9 @@ class REPLRenderer:
233
233
  self.print(renderable)
234
234
  self.print()
235
235
 
236
- def display_response_metadata(self, event: events.ResponseMetadataEvent) -> None:
236
+ def display_task_metadata(self, event: events.TaskMetadataEvent) -> None:
237
237
  with self.session_print_context(event.session_id):
238
- self.print(r_metadata.render_response_metadata(event))
238
+ self.print(r_metadata.render_task_metadata(event))
239
239
  self.print()
240
240
 
241
241
  def display_task_finish(self, event: events.TaskFinishEvent) -> None: