glaip-sdk 0.0.7__py3-none-any.whl → 0.6.5b6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (161) hide show
  1. glaip_sdk/__init__.py +6 -3
  2. glaip_sdk/_version.py +12 -5
  3. glaip_sdk/agents/__init__.py +27 -0
  4. glaip_sdk/agents/base.py +1126 -0
  5. glaip_sdk/branding.py +79 -15
  6. glaip_sdk/cli/account_store.py +540 -0
  7. glaip_sdk/cli/agent_config.py +2 -6
  8. glaip_sdk/cli/auth.py +699 -0
  9. glaip_sdk/cli/commands/__init__.py +2 -2
  10. glaip_sdk/cli/commands/accounts.py +746 -0
  11. glaip_sdk/cli/commands/agents.py +503 -183
  12. glaip_sdk/cli/commands/common_config.py +101 -0
  13. glaip_sdk/cli/commands/configure.py +774 -137
  14. glaip_sdk/cli/commands/mcps.py +1124 -181
  15. glaip_sdk/cli/commands/models.py +25 -10
  16. glaip_sdk/cli/commands/tools.py +144 -92
  17. glaip_sdk/cli/commands/transcripts.py +755 -0
  18. glaip_sdk/cli/commands/update.py +61 -0
  19. glaip_sdk/cli/config.py +95 -0
  20. glaip_sdk/cli/constants.py +38 -0
  21. glaip_sdk/cli/context.py +150 -0
  22. glaip_sdk/cli/core/__init__.py +79 -0
  23. glaip_sdk/cli/core/context.py +124 -0
  24. glaip_sdk/cli/core/output.py +846 -0
  25. glaip_sdk/cli/core/prompting.py +649 -0
  26. glaip_sdk/cli/core/rendering.py +187 -0
  27. glaip_sdk/cli/display.py +143 -53
  28. glaip_sdk/cli/hints.py +57 -0
  29. glaip_sdk/cli/io.py +24 -18
  30. glaip_sdk/cli/main.py +420 -145
  31. glaip_sdk/cli/masking.py +136 -0
  32. glaip_sdk/cli/mcp_validators.py +287 -0
  33. glaip_sdk/cli/pager.py +266 -0
  34. glaip_sdk/cli/parsers/__init__.py +7 -0
  35. glaip_sdk/cli/parsers/json_input.py +177 -0
  36. glaip_sdk/cli/resolution.py +28 -21
  37. glaip_sdk/cli/rich_helpers.py +27 -0
  38. glaip_sdk/cli/slash/__init__.py +15 -0
  39. glaip_sdk/cli/slash/accounts_controller.py +500 -0
  40. glaip_sdk/cli/slash/accounts_shared.py +75 -0
  41. glaip_sdk/cli/slash/agent_session.py +282 -0
  42. glaip_sdk/cli/slash/prompt.py +245 -0
  43. glaip_sdk/cli/slash/remote_runs_controller.py +566 -0
  44. glaip_sdk/cli/slash/session.py +1679 -0
  45. glaip_sdk/cli/slash/tui/__init__.py +9 -0
  46. glaip_sdk/cli/slash/tui/accounts.tcss +86 -0
  47. glaip_sdk/cli/slash/tui/accounts_app.py +872 -0
  48. glaip_sdk/cli/slash/tui/background_tasks.py +72 -0
  49. glaip_sdk/cli/slash/tui/loading.py +58 -0
  50. glaip_sdk/cli/slash/tui/remote_runs_app.py +628 -0
  51. glaip_sdk/cli/transcript/__init__.py +31 -0
  52. glaip_sdk/cli/transcript/cache.py +536 -0
  53. glaip_sdk/cli/transcript/capture.py +329 -0
  54. glaip_sdk/cli/transcript/export.py +38 -0
  55. glaip_sdk/cli/transcript/history.py +815 -0
  56. glaip_sdk/cli/transcript/launcher.py +77 -0
  57. glaip_sdk/cli/transcript/viewer.py +372 -0
  58. glaip_sdk/cli/update_notifier.py +290 -0
  59. glaip_sdk/cli/utils.py +247 -1238
  60. glaip_sdk/cli/validators.py +16 -18
  61. glaip_sdk/client/__init__.py +2 -1
  62. glaip_sdk/client/_agent_payloads.py +520 -0
  63. glaip_sdk/client/agent_runs.py +147 -0
  64. glaip_sdk/client/agents.py +940 -574
  65. glaip_sdk/client/base.py +163 -48
  66. glaip_sdk/client/main.py +35 -12
  67. glaip_sdk/client/mcps.py +126 -18
  68. glaip_sdk/client/run_rendering.py +415 -0
  69. glaip_sdk/client/shared.py +21 -0
  70. glaip_sdk/client/tools.py +195 -37
  71. glaip_sdk/client/validators.py +20 -48
  72. glaip_sdk/config/constants.py +15 -5
  73. glaip_sdk/exceptions.py +16 -9
  74. glaip_sdk/icons.py +25 -0
  75. glaip_sdk/mcps/__init__.py +21 -0
  76. glaip_sdk/mcps/base.py +345 -0
  77. glaip_sdk/models/__init__.py +90 -0
  78. glaip_sdk/models/agent.py +47 -0
  79. glaip_sdk/models/agent_runs.py +116 -0
  80. glaip_sdk/models/common.py +42 -0
  81. glaip_sdk/models/mcp.py +33 -0
  82. glaip_sdk/models/tool.py +33 -0
  83. glaip_sdk/payload_schemas/__init__.py +7 -0
  84. glaip_sdk/payload_schemas/agent.py +85 -0
  85. glaip_sdk/registry/__init__.py +55 -0
  86. glaip_sdk/registry/agent.py +164 -0
  87. glaip_sdk/registry/base.py +139 -0
  88. glaip_sdk/registry/mcp.py +253 -0
  89. glaip_sdk/registry/tool.py +231 -0
  90. glaip_sdk/rich_components.py +98 -2
  91. glaip_sdk/runner/__init__.py +59 -0
  92. glaip_sdk/runner/base.py +84 -0
  93. glaip_sdk/runner/deps.py +115 -0
  94. glaip_sdk/runner/langgraph.py +597 -0
  95. glaip_sdk/runner/mcp_adapter/__init__.py +13 -0
  96. glaip_sdk/runner/mcp_adapter/base_mcp_adapter.py +43 -0
  97. glaip_sdk/runner/mcp_adapter/langchain_mcp_adapter.py +158 -0
  98. glaip_sdk/runner/mcp_adapter/mcp_config_builder.py +95 -0
  99. glaip_sdk/runner/tool_adapter/__init__.py +18 -0
  100. glaip_sdk/runner/tool_adapter/base_tool_adapter.py +44 -0
  101. glaip_sdk/runner/tool_adapter/langchain_tool_adapter.py +177 -0
  102. glaip_sdk/tools/__init__.py +22 -0
  103. glaip_sdk/tools/base.py +435 -0
  104. glaip_sdk/utils/__init__.py +59 -13
  105. glaip_sdk/utils/a2a/__init__.py +34 -0
  106. glaip_sdk/utils/a2a/event_processor.py +188 -0
  107. glaip_sdk/utils/agent_config.py +53 -40
  108. glaip_sdk/utils/bundler.py +267 -0
  109. glaip_sdk/utils/client.py +111 -0
  110. glaip_sdk/utils/client_utils.py +58 -26
  111. glaip_sdk/utils/datetime_helpers.py +58 -0
  112. glaip_sdk/utils/discovery.py +78 -0
  113. glaip_sdk/utils/display.py +65 -32
  114. glaip_sdk/utils/export.py +143 -0
  115. glaip_sdk/utils/general.py +1 -36
  116. glaip_sdk/utils/import_export.py +20 -25
  117. glaip_sdk/utils/import_resolver.py +492 -0
  118. glaip_sdk/utils/instructions.py +101 -0
  119. glaip_sdk/utils/rendering/__init__.py +115 -1
  120. glaip_sdk/utils/rendering/formatting.py +85 -43
  121. glaip_sdk/utils/rendering/layout/__init__.py +64 -0
  122. glaip_sdk/utils/rendering/{renderer → layout}/panels.py +51 -19
  123. glaip_sdk/utils/rendering/layout/progress.py +202 -0
  124. glaip_sdk/utils/rendering/layout/summary.py +74 -0
  125. glaip_sdk/utils/rendering/layout/transcript.py +606 -0
  126. glaip_sdk/utils/rendering/models.py +39 -7
  127. glaip_sdk/utils/rendering/renderer/__init__.py +9 -51
  128. glaip_sdk/utils/rendering/renderer/base.py +672 -759
  129. glaip_sdk/utils/rendering/renderer/config.py +4 -10
  130. glaip_sdk/utils/rendering/renderer/debug.py +75 -22
  131. glaip_sdk/utils/rendering/renderer/factory.py +138 -0
  132. glaip_sdk/utils/rendering/renderer/stream.py +13 -54
  133. glaip_sdk/utils/rendering/renderer/summary_window.py +79 -0
  134. glaip_sdk/utils/rendering/renderer/thinking.py +273 -0
  135. glaip_sdk/utils/rendering/renderer/toggle.py +182 -0
  136. glaip_sdk/utils/rendering/renderer/tool_panels.py +442 -0
  137. glaip_sdk/utils/rendering/renderer/transcript_mode.py +162 -0
  138. glaip_sdk/utils/rendering/state.py +204 -0
  139. glaip_sdk/utils/rendering/step_tree_state.py +100 -0
  140. glaip_sdk/utils/rendering/steps/__init__.py +34 -0
  141. glaip_sdk/utils/rendering/steps/event_processor.py +778 -0
  142. glaip_sdk/utils/rendering/steps/format.py +176 -0
  143. glaip_sdk/utils/rendering/steps/manager.py +387 -0
  144. glaip_sdk/utils/rendering/timing.py +36 -0
  145. glaip_sdk/utils/rendering/viewer/__init__.py +21 -0
  146. glaip_sdk/utils/rendering/viewer/presenter.py +184 -0
  147. glaip_sdk/utils/resource_refs.py +29 -26
  148. glaip_sdk/utils/runtime_config.py +422 -0
  149. glaip_sdk/utils/serialization.py +184 -51
  150. glaip_sdk/utils/sync.py +142 -0
  151. glaip_sdk/utils/tool_detection.py +33 -0
  152. glaip_sdk/utils/validation.py +21 -30
  153. {glaip_sdk-0.0.7.dist-info → glaip_sdk-0.6.5b6.dist-info}/METADATA +58 -12
  154. glaip_sdk-0.6.5b6.dist-info/RECORD +159 -0
  155. {glaip_sdk-0.0.7.dist-info → glaip_sdk-0.6.5b6.dist-info}/WHEEL +1 -1
  156. glaip_sdk/models.py +0 -250
  157. glaip_sdk/utils/rendering/renderer/progress.py +0 -118
  158. glaip_sdk/utils/rendering/steps.py +0 -232
  159. glaip_sdk/utils/rich_utils.py +0 -29
  160. glaip_sdk-0.0.7.dist-info/RECORD +0 -55
  161. {glaip_sdk-0.0.7.dist-info → glaip_sdk-0.6.5b6.dist-info}/entry_points.txt +0 -0
@@ -8,7 +8,7 @@ from __future__ import annotations
8
8
 
9
9
  import json
10
10
  import logging
11
- from dataclasses import dataclass
11
+ from datetime import datetime, timezone
12
12
  from time import monotonic
13
13
  from typing import Any
14
14
 
@@ -16,52 +16,64 @@ from rich.console import Console as RichConsole
16
16
  from rich.console import Group
17
17
  from rich.live import Live
18
18
  from rich.markdown import Markdown
19
+ from rich.spinner import Spinner
19
20
  from rich.text import Text
20
21
 
22
+ from glaip_sdk.icons import ICON_AGENT, ICON_AGENT_STEP, ICON_DELEGATE, ICON_TOOL_STEP
21
23
  from glaip_sdk.rich_components import AIPPanel
22
24
  from glaip_sdk.utils.rendering.formatting import (
23
25
  format_main_title,
24
- get_spinner_char,
25
26
  is_step_finished,
27
+ normalise_display_label,
26
28
  )
27
29
  from glaip_sdk.utils.rendering.models import RunStats, Step
28
- from glaip_sdk.utils.rendering.renderer.config import RendererConfig
29
- from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
30
- from glaip_sdk.utils.rendering.renderer.panels import (
31
- create_final_panel,
32
- create_main_panel,
33
- create_tool_panel,
34
- )
35
- from glaip_sdk.utils.rendering.renderer.progress import (
30
+ from glaip_sdk.utils.rendering.layout.panels import create_main_panel
31
+ from glaip_sdk.utils.rendering.layout.progress import (
32
+ build_progress_footer,
36
33
  format_elapsed_time,
37
- format_tool_title,
38
34
  format_working_indicator,
39
- get_spinner,
35
+ get_spinner_char,
40
36
  is_delegation_tool,
41
37
  )
38
+ from glaip_sdk.utils.rendering.layout.summary import render_summary_panels
39
+ from glaip_sdk.utils.rendering.layout.transcript import (
40
+ DEFAULT_TRANSCRIPT_THEME,
41
+ TranscriptSnapshot,
42
+ build_final_panel,
43
+ build_transcript_snapshot,
44
+ build_transcript_view,
45
+ extract_query_from_meta,
46
+ format_final_panel_title,
47
+ )
48
+ from glaip_sdk.utils.rendering.renderer.config import RendererConfig
49
+ from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
42
50
  from glaip_sdk.utils.rendering.renderer.stream import StreamProcessor
43
- from glaip_sdk.utils.rendering.steps import StepManager
51
+ from glaip_sdk.utils.rendering.renderer.thinking import ThinkingScopeController
52
+ from glaip_sdk.utils.rendering.renderer.tool_panels import ToolPanelController
53
+ from glaip_sdk.utils.rendering.renderer.transcript_mode import TranscriptModeMixin
54
+ from glaip_sdk.utils.rendering.state import (
55
+ RendererState,
56
+ TranscriptBuffer,
57
+ coerce_received_at,
58
+ truncate_display,
59
+ )
60
+ from glaip_sdk.utils.rendering.steps import (
61
+ StepManager,
62
+ format_step_label,
63
+ )
64
+ from glaip_sdk.utils.rendering.timing import coerce_server_time
65
+
66
+ _NO_STEPS_TEXT = Text("No steps yet", style="dim")
44
67
 
45
68
  # Configure logger
46
69
  logger = logging.getLogger("glaip_sdk.run_renderer")
47
70
 
71
+ # Constants
72
+ RUNNING_STATUS_HINTS = {"running", "started", "pending", "working"}
73
+ ARGS_VALUE_MAX_LEN = 160
48
74
 
49
- @dataclass
50
- class RendererState:
51
- """Internal state for the renderer."""
52
-
53
- buffer: list[str] = None
54
- final_text: str = ""
55
- streaming_started_at: float | None = None
56
- printed_final_panel: bool = False
57
- finalizing_ui: bool = False
58
-
59
- def __post_init__(self) -> None:
60
- if self.buffer is None:
61
- self.buffer = []
62
75
 
63
-
64
- class RichStreamRenderer:
76
+ class RichStreamRenderer(TranscriptModeMixin):
65
77
  """Live, modern terminal renderer for agent execution with rich visual output."""
66
78
 
67
79
  def __init__(
@@ -70,6 +82,8 @@ class RichStreamRenderer:
70
82
  *,
71
83
  cfg: RendererConfig | None = None,
72
84
  verbose: bool = False,
85
+ transcript_buffer: TranscriptBuffer | None = None,
86
+ callbacks: dict[str, Any] | None = None,
73
87
  ) -> None:
74
88
  """Initialize the renderer.
75
89
 
@@ -77,7 +91,10 @@ class RichStreamRenderer:
77
91
  console: Rich console instance
78
92
  cfg: Renderer configuration
79
93
  verbose: Whether to enable verbose mode
94
+ transcript_buffer: Optional transcript buffer for capturing output
95
+ callbacks: Optional dictionary of callback functions
80
96
  """
97
+ super().__init__()
81
98
  self.console = console or RichConsole()
82
99
  self.cfg = cfg or RendererConfig()
83
100
  self.verbose = verbose
@@ -85,19 +102,36 @@ class RichStreamRenderer:
85
102
  # Initialize components
86
103
  self.stream_processor = StreamProcessor()
87
104
  self.state = RendererState()
105
+ if transcript_buffer is not None:
106
+ self.state.buffer = transcript_buffer
107
+
108
+ self._callbacks = callbacks or {}
88
109
 
89
110
  # Initialize step manager and other state
90
- self.steps = StepManager()
111
+ self.steps = StepManager(max_steps=self.cfg.summary_max_steps)
91
112
  # Live display instance (single source of truth)
92
113
  self.live: Live | None = None
114
+ self._step_spinners: dict[str, Spinner] = {}
115
+ self._last_steps_panel_template: Any | None = None
93
116
 
94
- # Context and tool tracking
95
- self.context_order: list[str] = []
96
- self.context_parent: dict[str, str] = {}
97
- self.tool_order: list[str] = []
98
- self.context_panels: dict[str, list[str]] = {}
99
- self.context_meta: dict[str, dict[str, Any]] = {}
100
- self.tool_panels: dict[str, dict[str, Any]] = {}
117
+ # Tool tracking and thinking scopes
118
+ self._step_server_start_times: dict[str, float] = {}
119
+ self.tool_controller = ToolPanelController(
120
+ steps=self.steps,
121
+ stream_processor=self.stream_processor,
122
+ console=self.console,
123
+ cfg=self.cfg,
124
+ step_server_start_times=self._step_server_start_times,
125
+ output_prefix="**Output:**\n",
126
+ )
127
+ self.thinking_controller = ThinkingScopeController(
128
+ self.steps,
129
+ step_server_start_times=self._step_server_start_times,
130
+ )
131
+ self._root_agent_friendly: str | None = None
132
+ self._root_agent_step_id: str | None = None
133
+ self._root_query: str | None = None
134
+ self._root_query_attached: bool = False
101
135
 
102
136
  # Timing
103
137
  self._started_at: float | None = None
@@ -105,11 +139,12 @@ class RichStreamRenderer:
105
139
  # Header/text
106
140
  self.header_text: str = ""
107
141
  # Track per-step server start times for accurate elapsed labels
108
- self._step_server_start_times: dict[str, float] = {}
109
-
110
142
  # Output formatting constants
111
143
  self.OUTPUT_PREFIX: str = "**Output:**\n"
112
144
 
145
+ self._final_transcript_snapshot: TranscriptSnapshot | None = None
146
+ self._final_transcript_renderables: tuple[list[Any], list[Any]] | None = None
147
+
113
148
  def on_start(self, meta: dict[str, Any]) -> None:
114
149
  """Handle renderer start event."""
115
150
  if self.cfg.live:
@@ -118,7 +153,24 @@ class RichStreamRenderer:
118
153
 
119
154
  # Set up initial state
120
155
  self._started_at = monotonic()
121
- self.stream_processor.streaming_started_at = self._started_at
156
+ try:
157
+ self.state.meta = json.loads(json.dumps(meta))
158
+ except Exception:
159
+ self.state.meta = dict(meta)
160
+
161
+ meta_payload = meta or {}
162
+ self.steps.set_root_agent(meta_payload.get("agent_id"))
163
+ self._root_agent_friendly = self._humanize_agent_slug(meta_payload.get("agent_name"))
164
+ self._root_query = truncate_display(
165
+ meta_payload.get("input_message")
166
+ or meta_payload.get("query")
167
+ or meta_payload.get("message")
168
+ or (meta_payload.get("meta") or {}).get("input_message")
169
+ or ""
170
+ )
171
+ if not self._root_query:
172
+ self._root_query = None
173
+ self._root_query_attached = False
122
174
 
123
175
  # Print compact header and user request (parity with old renderer)
124
176
  self._render_header(meta)
@@ -138,7 +190,7 @@ class RichStreamRenderer:
138
190
 
139
191
  def _build_header_parts(self, meta: dict[str, Any]) -> list[str]:
140
192
  """Build header text parts from metadata."""
141
- parts: list[str] = ["🤖"]
193
+ parts: list[str] = [ICON_AGENT]
142
194
  agent_name = meta.get("agent_name", "agent")
143
195
  if agent_name:
144
196
  parts.append(agent_name)
@@ -169,583 +221,535 @@ class RichStreamRenderer:
169
221
  except Exception:
170
222
  logger.exception("Failed to print header fallback")
171
223
 
224
+ def _build_user_query_panel(self, query: str) -> AIPPanel:
225
+ """Create the panel used to display the user request."""
226
+ return AIPPanel(
227
+ Markdown(f"**Query:** {query}"),
228
+ title="User Request",
229
+ border_style="#d97706",
230
+ padding=(0, 1),
231
+ )
232
+
172
233
  def _render_user_query(self, meta: dict[str, Any]) -> None:
173
234
  """Render the user query panel."""
174
- query = meta.get("input_message") or meta.get("query") or meta.get("message")
235
+ query = extract_query_from_meta(meta)
175
236
  if not query:
176
237
  return
238
+ self.console.print(self._build_user_query_panel(query))
239
+
240
+ def _render_summary_static_sections(self) -> None:
241
+ """Re-render header and user query when returning to summary mode."""
242
+ meta = getattr(self.state, "meta", None)
243
+ if meta:
244
+ self._render_header(meta)
245
+ elif self.header_text and not self._render_header_rule():
246
+ self._render_header_fallback()
177
247
 
178
- self.console.print(
179
- AIPPanel(
180
- Markdown(f"**Query:** {query}"),
181
- title="User Request",
182
- border_style="yellow",
183
- padding=(0, 1),
184
- )
185
- )
248
+ query = extract_query_from_meta(meta) or self._root_query
249
+ if query:
250
+ self.console.print(self._build_user_query_panel(query))
251
+
252
+ def _render_summary_after_transcript_toggle(self) -> None:
253
+ """Render the summary panel after leaving transcript mode."""
254
+ if self.state.finalizing_ui:
255
+ self._render_final_summary_panels()
256
+ elif self.live:
257
+ self._refresh_live_panels()
258
+ else:
259
+ self._render_static_summary_panels()
260
+
261
+ def _render_final_summary_panels(self) -> None:
262
+ """Render a static summary and disable live mode for final output."""
263
+ self.cfg.live = False
264
+ self.live = None
265
+ self._render_static_summary_panels()
266
+
267
+ def _render_static_summary_panels(self) -> None:
268
+ """Render the steps and main panels in a static (non-live) layout."""
269
+ summary_window = self._summary_window_size()
270
+ window_arg = summary_window if summary_window > 0 else None
271
+ status_overrides = self._build_step_status_overrides()
272
+ for renderable in render_summary_panels(
273
+ self.state,
274
+ self.steps,
275
+ summary_window=window_arg,
276
+ include_query_panel=False,
277
+ step_status_overrides=status_overrides,
278
+ ):
279
+ self.console.print(renderable)
280
+
281
+ def _ensure_streaming_started_baseline(self, timestamp: float) -> None:
282
+ """Synchronize streaming start state across renderer components."""
283
+ self.state.start_stream_timer(timestamp)
284
+ self.stream_processor.streaming_started_at = timestamp
285
+ self._started_at = timestamp
186
286
 
187
287
  def on_event(self, ev: dict[str, Any]) -> None:
188
288
  """Handle streaming events from the backend."""
189
- # Reset event tracking
289
+ received_at = self._resolve_received_timestamp(ev)
290
+ self._capture_event(ev, received_at)
190
291
  self.stream_processor.reset_event_tracking()
191
292
 
192
- # Track streaming start time
193
- if self.state.streaming_started_at is None:
194
- self.state.streaming_started_at = monotonic()
293
+ self._sync_stream_start(ev, received_at)
195
294
 
196
- # Extract event metadata
197
295
  metadata = self.stream_processor.extract_event_metadata(ev)
198
- kind = metadata["kind"]
199
- context_id = metadata["context_id"]
200
- content = metadata["content"]
201
296
 
202
- # Render debug event panel if verbose mode is enabled
203
- if self.verbose:
204
- render_debug_event(ev, self.console, self.state.streaming_started_at)
297
+ self._maybe_render_debug(ev, received_at)
298
+ try:
299
+ self._dispatch_event(ev, metadata)
300
+ finally:
301
+ self.stream_processor.update_timing(metadata.get("context_id"))
302
+
303
+ def _resolve_received_timestamp(self, ev: dict[str, Any]) -> datetime:
304
+ """Return the timestamp an event was received, normalising inputs."""
305
+ received_at = coerce_received_at(ev.get("received_at"))
306
+ if received_at is None:
307
+ received_at = datetime.now(timezone.utc)
308
+
309
+ if self.state.streaming_started_event_ts is None:
310
+ self.state.streaming_started_event_ts = received_at
311
+
312
+ return received_at
313
+
314
+ def _sync_stream_start(self, ev: dict[str, Any], received_at: datetime | None) -> None:
315
+ """Ensure renderer and stream processor share a streaming baseline."""
316
+ baseline = self.state.streaming_started_at
317
+ if baseline is None:
318
+ baseline = monotonic()
319
+ self._ensure_streaming_started_baseline(baseline)
320
+ elif getattr(self.stream_processor, "streaming_started_at", None) is None:
321
+ self._ensure_streaming_started_baseline(baseline)
322
+
323
+ if ev.get("status") == "streaming_started":
324
+ self.state.streaming_started_event_ts = received_at
325
+ self._ensure_streaming_started_baseline(monotonic())
326
+
327
+ def _maybe_render_debug(
328
+ self, ev: dict[str, Any], received_at: datetime
329
+ ) -> None: # pragma: no cover - guard rails for verbose mode
330
+ """Render debug view when verbose mode is enabled."""
331
+ if not self.verbose:
332
+ return
205
333
 
206
- # Update timing
207
- self.stream_processor.update_timing(context_id)
334
+ self._ensure_transcript_header()
335
+ render_debug_event(
336
+ ev,
337
+ self.console,
338
+ received_ts=received_at,
339
+ baseline_ts=self.state.streaming_started_event_ts,
340
+ )
341
+ self._print_transcript_hint()
208
342
 
209
- # Handle different event types
210
- if kind == "status":
211
- # Status events
212
- status = ev.get("status")
213
- if status == "streaming_started":
214
- self.state.streaming_started_at = monotonic()
215
- return
343
+ def _dispatch_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
344
+ """Route events to the appropriate renderer handlers."""
345
+ kind = metadata["kind"]
346
+ content = metadata["content"]
216
347
 
348
+ if kind == "status":
349
+ self._handle_status_event(ev)
217
350
  elif kind == "content":
218
- # Content streaming events
219
- if content:
220
- self.state.buffer.append(content)
221
- self._ensure_live()
222
- return
223
-
351
+ self._handle_content_event(content)
224
352
  elif kind == "final_response":
225
- # Final response events
226
- if content:
227
- self.state.buffer.append(content)
228
- self.state.final_text = content
229
- self._ensure_live()
353
+ self._handle_final_response_event(content, metadata)
354
+ elif kind in {"agent_step", "agent_thinking_step"}:
355
+ self._handle_agent_step_event(ev, metadata)
356
+ else:
357
+ self._ensure_live()
230
358
 
231
- # In verbose mode, show the final result in a panel
232
- if self.verbose and content and content.strip():
233
- final_panel = create_final_panel(content, theme=self.cfg.theme)
234
- self.console.print(final_panel)
235
- self.state.printed_final_panel = True
359
+ def _handle_status_event(self, ev: dict[str, Any]) -> None:
360
+ """Handle status events."""
361
+ status = ev.get("status")
362
+ if status == "streaming_started":
236
363
  return
237
364
 
238
- elif kind in {"agent_step", "agent_thinking_step"}:
239
- # Agent step events
240
- # Note: Thinking gaps are primarily a visual aid. Keep minimal here.
241
-
242
- # Extract tool information
243
- (
244
- tool_name,
245
- tool_args,
246
- tool_out,
247
- tool_calls_info,
248
- ) = self.stream_processor.parse_tool_calls(ev)
249
-
250
- # Track tools and sub-agents
251
- self.stream_processor.track_tools_and_agents(
252
- tool_name, tool_calls_info, is_delegation_tool
253
- )
365
+ def _handle_content_event(self, content: str) -> None:
366
+ """Handle content streaming events."""
367
+ if content:
368
+ self.state.append_transcript_text(content)
369
+ self._ensure_live()
370
+
371
+ def _handle_final_response_event(self, content: str, metadata: dict[str, Any]) -> None:
372
+ """Handle final response events."""
373
+ if content:
374
+ self.state.append_transcript_text(content)
375
+ self.state.set_final_output(content)
376
+
377
+ meta_payload = metadata.get("metadata") or {}
378
+ final_time = coerce_server_time(meta_payload.get("time"))
379
+ self._update_final_duration(final_time)
380
+ self.thinking_controller.close_active_scopes(final_time)
381
+ self._finish_running_steps()
382
+ self.tool_controller.finish_all_panels()
383
+ self._normalise_finished_icons()
384
+
385
+ self._ensure_live()
386
+ self._print_final_panel_if_needed()
387
+
388
+ def _normalise_finished_icons(self) -> None:
389
+ """Ensure finished steps release any running spinners."""
390
+ for step in self.steps.by_id.values():
391
+ if getattr(step, "status", None) != "running":
392
+ self._step_spinners.pop(step.step_id, None)
393
+
394
+ def _handle_agent_step_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
395
+ """Handle agent step events."""
396
+ # Extract tool information using stream processor
397
+ tool_calls_result = self.stream_processor.parse_tool_calls(ev)
398
+ tool_name, tool_args, tool_out, tool_calls_info = tool_calls_result
399
+
400
+ payload = metadata.get("metadata") or {}
254
401
 
255
- # Handle tool execution
256
- self._handle_agent_step(ev, tool_name, tool_args, tool_out, tool_calls_info)
402
+ tracked_step: Step | None = None
403
+ try:
404
+ tracked_step = self.steps.apply_event(ev)
405
+ except ValueError:
406
+ logger.debug("Malformed step event skipped", exc_info=True)
407
+ else:
408
+ self._record_step_server_start(tracked_step, payload)
409
+ self.thinking_controller.update_timeline(
410
+ tracked_step,
411
+ payload,
412
+ enabled=self.cfg.render_thinking,
413
+ )
414
+ self._maybe_override_root_agent_label(tracked_step, payload)
415
+ self._maybe_attach_root_query(tracked_step)
416
+
417
+ # Track tools and sub-agents for transcript/debug context
418
+ self.stream_processor.track_tools_and_agents(tool_name, tool_calls_info, is_delegation_tool)
419
+
420
+ # Handle tool execution
421
+ self.tool_controller.handle_agent_step(
422
+ ev,
423
+ tool_name,
424
+ tool_args,
425
+ tool_out,
426
+ tool_calls_info,
427
+ tracked_step=tracked_step,
428
+ )
257
429
 
258
430
  # Update live display
259
431
  self._ensure_live()
260
432
 
261
- def _finish_running_steps(self) -> None:
262
- """Mark any running steps as finished to avoid lingering spinners."""
263
- for st in list(self.steps.by_id.values()):
264
- if not is_step_finished(st):
265
- st.finish(None)
433
+ def _maybe_attach_root_query(self, step: Step | None) -> None:
434
+ """Attach the user query to the root agent step for display."""
435
+ if not step or self._root_query_attached or not self._root_query or step.kind != "agent" or step.parent_id:
436
+ return
266
437
 
267
- def _finish_tool_panels(self) -> None:
268
- """Mark unfinished tool panels as finished."""
438
+ args = dict(getattr(step, "args", {}) or {})
439
+ args.setdefault("query", self._root_query)
440
+ step.args = args
441
+ self._root_query_attached = True
442
+
443
+ def _record_step_server_start(self, step: Step | None, payload: dict[str, Any]) -> None:
444
+ """Store server-provided start times for elapsed calculations."""
445
+ if not step:
446
+ return
447
+ server_time = payload.get("time")
448
+ if not isinstance(server_time, (int, float)):
449
+ return
450
+ self._step_server_start_times.setdefault(step.step_id, float(server_time))
451
+
452
+ def _maybe_override_root_agent_label(self, step: Step | None, payload: dict[str, Any]) -> None:
453
+ """Ensure the root agent row uses the human-friendly name and shows the ID."""
454
+ if not step or step.kind != "agent" or step.parent_id:
455
+ return
456
+ friendly = self._root_agent_friendly or self._humanize_agent_slug((payload or {}).get("agent_name"))
457
+ if not friendly:
458
+ return
459
+ agent_identifier = step.name or step.step_id
460
+ if not agent_identifier:
461
+ return
462
+ step.display_label = normalise_display_label(f"{ICON_AGENT} {friendly} ({agent_identifier})")
463
+ if not self._root_agent_step_id:
464
+ self._root_agent_step_id = step.step_id
465
+
466
+ # Thinking scope management is handled by ThinkingScopeController.
467
+
468
+ def _apply_root_duration(self, duration_seconds: float | None) -> None:
469
+ """Propagate the final run duration to the root agent step."""
470
+ if duration_seconds is None or not self._root_agent_step_id:
471
+ return
472
+ root_step = self.steps.by_id.get(self._root_agent_step_id)
473
+ if not root_step:
474
+ return
269
475
  try:
270
- items = list(self.tool_panels.items())
271
- except Exception: # pragma: no cover - defensive guard
272
- logger.exception("Failed to iterate tool panels during cleanup")
476
+ duration_ms = max(0, int(round(float(duration_seconds) * 1000)))
477
+ except Exception:
273
478
  return
479
+ root_step.duration_ms = duration_ms
480
+ root_step.duration_source = root_step.duration_source or "run"
481
+ root_step.status = "finished"
482
+
483
+ @staticmethod
484
+ def _humanize_agent_slug(value: Any) -> str | None:
485
+ """Convert a slugified agent name into Title Case."""
486
+ if not isinstance(value, str):
487
+ return None
488
+ cleaned = value.replace("_", " ").replace("-", " ").strip()
489
+ if not cleaned:
490
+ return None
491
+ parts = [part for part in cleaned.split() if part]
492
+ return " ".join(part[:1].upper() + part[1:] for part in parts)
274
493
 
275
- for _sid, meta in items:
276
- if meta.get("status") != "finished":
277
- meta["status"] = "finished"
494
+ def _finish_running_steps(self) -> None:
495
+ """Mark any running steps as finished to avoid lingering spinners."""
496
+ for st in self.steps.by_id.values():
497
+ if not is_step_finished(st):
498
+ self._mark_incomplete_step(st)
499
+
500
+ def _mark_incomplete_step(self, step: Step) -> None:
501
+ """Mark a lingering step as incomplete/warning with unknown duration."""
502
+ step.status = "finished"
503
+ step.duration_unknown = True
504
+ if step.duration_ms is None:
505
+ step.duration_ms = 0
506
+ step.duration_source = step.duration_source or "unknown"
278
507
 
279
508
  def _stop_live_display(self) -> None:
280
509
  """Stop live display and clean up."""
281
510
  self._shutdown_live()
282
511
 
283
512
  def _print_final_panel_if_needed(self) -> None:
284
- """Print final result panel if verbose mode and content available."""
285
- if self.verbose and not self.state.printed_final_panel:
286
- body = ("".join(self.state.buffer) or "").strip()
287
- if body:
288
- final_panel = create_final_panel(body, theme=self.cfg.theme)
289
- self.console.print(final_panel)
290
- self.state.printed_final_panel = True
291
-
292
- def on_complete(self, _stats: RunStats) -> None:
513
+ """Print final result when configuration requires it."""
514
+ if self.state.printed_final_output:
515
+ return
516
+
517
+ body = (self.state.final_text or self.state.buffer.render() or "").strip()
518
+ if not body:
519
+ return
520
+
521
+ if getattr(self, "_transcript_mode_enabled", False):
522
+ return
523
+
524
+ if self.verbose:
525
+ panel = build_final_panel(
526
+ self.state,
527
+ title=self._final_panel_title(),
528
+ )
529
+ if panel is None:
530
+ return
531
+ self.console.print(panel)
532
+ self.state.printed_final_output = True
533
+
534
+ def finalize(self) -> tuple[list[Any], list[Any]]:
535
+ """Compose the final transcript renderables."""
536
+ return self._compose_final_transcript()
537
+
538
+ def _compose_final_transcript(self) -> tuple[list[Any], list[Any]]:
539
+ """Build the transcript snapshot used for final summaries."""
540
+ summary_window = self._summary_window_size()
541
+ summary_window = summary_window if summary_window > 0 else None
542
+ snapshot = build_transcript_snapshot(
543
+ self.state,
544
+ self.steps,
545
+ query_text=extract_query_from_meta(self.state.meta),
546
+ meta=self.state.meta,
547
+ summary_window=summary_window,
548
+ step_status_overrides=self._build_step_status_overrides(),
549
+ )
550
+ header, body = build_transcript_view(snapshot)
551
+ self._final_transcript_snapshot = snapshot
552
+ self._final_transcript_renderables = (header, body)
553
+ return header, body
554
+
555
+ def _render_final_summary(self, header: list[Any], body: list[Any]) -> None:
556
+ """Print the composed transcript summary for non-live renders."""
557
+ renderables = list(header) + list(body)
558
+ for renderable in renderables:
559
+ try:
560
+ self.console.print(renderable)
561
+ self.console.print()
562
+ except Exception:
563
+ pass
564
+
565
+ def on_complete(self, stats: RunStats) -> None:
293
566
  """Handle completion event."""
294
567
  self.state.finalizing_ui = True
295
568
 
569
+ self._handle_stats_duration(stats)
570
+ self.thinking_controller.close_active_scopes(self.state.final_duration_seconds)
571
+ self._cleanup_ui_elements()
572
+ self._finalize_display()
573
+ self._print_completion_message()
574
+
575
+ def _handle_stats_duration(self, stats: RunStats) -> None:
576
+ """Handle stats processing and duration calculation."""
577
+ if not isinstance(stats, RunStats):
578
+ return
579
+
580
+ duration = None
581
+ try:
582
+ if stats.finished_at is not None and stats.started_at is not None:
583
+ duration = max(0.0, float(stats.finished_at) - float(stats.started_at))
584
+ except Exception:
585
+ duration = None
586
+
587
+ if duration is not None:
588
+ self._update_final_duration(duration, overwrite=True)
589
+
590
+ def _cleanup_ui_elements(self) -> None:
591
+ """Clean up running UI elements."""
296
592
  # Mark any running steps as finished to avoid lingering spinners
297
593
  self._finish_running_steps()
298
594
 
299
595
  # Mark unfinished tool panels as finished
300
- self._finish_tool_panels()
596
+ self.tool_controller.finish_all_panels()
301
597
 
598
+ def _finalize_display(self) -> None:
599
+ """Finalize live display and render final output."""
302
600
  # Final refresh
303
601
  self._ensure_live()
304
602
 
603
+ header, body = self.finalize()
604
+
305
605
  # Stop live display
306
606
  self._stop_live_display()
307
607
 
308
- # Print final panel if needed
309
- self._print_final_panel_if_needed()
608
+ # Render final output based on configuration
609
+ if self.cfg.live:
610
+ self._print_final_panel_if_needed()
611
+ else:
612
+ self._render_final_summary(header, body)
310
613
 
311
- def _ensure_live(self) -> None:
312
- """Ensure live display is updated."""
313
- # Lazily create Live if needed
314
- if self.live is None and self.cfg.live:
614
+ def _print_completion_message(self) -> None:
615
+ """Print completion message based on current mode."""
616
+ if self._transcript_mode_enabled:
315
617
  try:
316
- self.live = Live(
317
- console=self.console,
318
- refresh_per_second=1 / self.cfg.refresh_debounce,
319
- transient=not self.cfg.persist_live,
618
+ self.console.print(
619
+ "[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events. "
620
+ "Use the post-run viewer for export.[/dim]"
320
621
  )
321
- self.live.start()
322
622
  except Exception:
323
- self.live = None
324
-
325
- if self.live:
326
- panels = [self._render_main_panel()]
327
- steps_renderable = self._render_steps_text()
328
- panels.append(
329
- AIPPanel(
330
- steps_renderable,
331
- title="Steps",
332
- border_style="blue",
333
- )
334
- )
335
- panels.extend(self._render_tool_panels())
336
- self.live.update(Group(*panels))
337
-
338
- def _render_main_panel(self) -> Any:
339
- """Render the main content panel."""
340
- body = "".join(self.state.buffer).strip()
341
- # Dynamic title with spinner + elapsed/hints
342
- title = self._format_enhanced_main_title()
343
- return create_main_panel(body, title, self.cfg.theme)
344
-
345
- def _maybe_insert_thinking_gap(
346
- self, task_id: str | None, context_id: str | None
347
- ) -> None:
348
- """Insert thinking gap if needed."""
349
- # Implementation would track thinking states
350
- pass
351
-
352
- def _ensure_tool_panel(
353
- self, name: str, args: Any, task_id: str, context_id: str
354
- ) -> str:
355
- """Ensure a tool panel exists and return its ID."""
356
- formatted_title = format_tool_title(name)
357
- is_delegation = is_delegation_tool(name)
358
- tool_sid = f"tool_{name}_{task_id}_{context_id}"
359
-
360
- if tool_sid not in self.tool_panels:
361
- self.tool_panels[tool_sid] = {
362
- "title": formatted_title,
363
- "status": "running",
364
- "started_at": monotonic(),
365
- "server_started_at": self.stream_processor.server_elapsed_time,
366
- "chunks": [],
367
- "args": args or {},
368
- "output": None,
369
- "is_delegation": is_delegation,
370
- }
371
- # Add Args section once
372
- if args:
373
- try:
374
- args_content = (
375
- "**Args:**\n```json\n"
376
- + json.dumps(args, indent=2)
377
- + "\n```\n\n"
378
- )
379
- except Exception:
380
- args_content = f"**Args:**\n{args}\n\n"
381
- self.tool_panels[tool_sid]["chunks"].append(args_content)
382
- self.tool_order.append(tool_sid)
383
-
384
- return tool_sid
385
-
386
- def _start_tool_step(
387
- self,
388
- task_id: str,
389
- context_id: str,
390
- tool_name: str,
391
- tool_args: Any,
392
- _tool_sid: str,
393
- ) -> Step | None:
394
- """Start or get a step for a tool."""
395
- if is_delegation_tool(tool_name):
396
- st = self.steps.start_or_get(
397
- task_id=task_id,
398
- context_id=context_id,
399
- kind="delegate",
400
- name=tool_name,
401
- args=tool_args,
402
- )
623
+ pass
403
624
  else:
404
- st = self.steps.start_or_get(
405
- task_id=task_id,
406
- context_id=context_id,
407
- kind="tool",
408
- name=tool_name,
409
- args=tool_args,
410
- )
411
-
412
- # Record server start time for this step if available
413
- if st and self.stream_processor.server_elapsed_time is not None:
414
- self._step_server_start_times[st.step_id] = (
415
- self.stream_processor.server_elapsed_time
416
- )
417
-
418
- return st
625
+ # No transcript toggle in summary mode; nothing to print here.
626
+ return
419
627
 
420
- def _process_additional_tool_calls(
421
- self,
422
- tool_calls_info: list[tuple[str, Any, Any]],
423
- tool_name: str,
424
- task_id: str,
425
- context_id: str,
426
- ) -> None:
427
- """Process additional tool calls to avoid duplicates."""
428
- for call_name, call_args, _ in tool_calls_info or []:
429
- if call_name and call_name != tool_name:
430
- self._ensure_tool_panel(call_name, call_args, task_id, context_id)
431
- if is_delegation_tool(call_name):
432
- st2 = self.steps.start_or_get(
433
- task_id=task_id,
434
- context_id=context_id,
435
- kind="delegate",
436
- name=call_name,
437
- args=call_args,
438
- )
439
- else:
440
- st2 = self.steps.start_or_get(
441
- task_id=task_id,
442
- context_id=context_id,
443
- kind="tool",
444
- name=call_name,
445
- args=call_args,
446
- )
447
- if self.stream_processor.server_elapsed_time is not None and st2:
448
- self._step_server_start_times[st2.step_id] = (
449
- self.stream_processor.server_elapsed_time
450
- )
451
-
452
- def _detect_tool_completion(
453
- self, metadata: dict, content: str
454
- ) -> tuple[bool, str | None, Any]:
455
- """Detect if a tool has completed and return completion info."""
456
- tool_info = metadata.get("tool_info", {}) if isinstance(metadata, dict) else {}
457
-
458
- if tool_info.get("status") == "finished" and tool_info.get("name"):
459
- return True, tool_info.get("name"), tool_info.get("output")
460
- elif content and isinstance(content, str) and content.startswith("Completed "):
461
- # content like "Completed google_serper"
462
- tname = content.replace("Completed ", "").strip()
463
- if tname:
464
- output = (
465
- tool_info.get("output") if tool_info.get("name") == tname else None
466
- )
467
- return True, tname, output
468
- elif metadata.get("status") == "finished" and tool_info.get("name"):
469
- return True, tool_info.get("name"), tool_info.get("output")
628
+ def _ensure_live(self) -> None:
629
+ """Ensure live display is updated."""
630
+ if getattr(self, "_transcript_mode_enabled", False):
631
+ return
632
+ if not self._ensure_live_stack():
633
+ return
470
634
 
471
- return False, None, None
635
+ self._start_live_if_needed()
472
636
 
473
- def _get_tool_session_id(
474
- self, finished_tool_name: str, task_id: str, context_id: str
475
- ) -> str:
476
- """Generate tool session ID."""
477
- return f"tool_{finished_tool_name}_{task_id}_{context_id}"
637
+ if self.live:
638
+ self._refresh_live_panels()
639
+ if (
640
+ not self._transcript_mode_enabled
641
+ and not self.state.finalizing_ui
642
+ and not self._summary_hint_printed_once
643
+ ):
644
+ self._print_summary_hint(force=True)
478
645
 
479
- def _calculate_tool_duration(self, meta: dict[str, Any]) -> float | None:
480
- """Calculate tool duration from metadata."""
481
- server_now = self.stream_processor.server_elapsed_time
482
- server_start = meta.get("server_started_at")
483
- dur = None
646
+ def _ensure_live_stack(self) -> bool:
647
+ """Guarantee the console exposes the internal live stack Rich expects."""
648
+ live_stack = getattr(self.console, "_live_stack", None)
649
+ if isinstance(live_stack, list):
650
+ return True
484
651
 
485
652
  try:
486
- if isinstance(server_now, (int, float)) and server_start is not None:
487
- dur = max(0.0, float(server_now) - float(server_start))
488
- else:
489
- started_at = meta.get("started_at")
490
- if started_at is not None:
491
- started_at_float = float(started_at)
492
- dur = max(0.0, float(monotonic()) - started_at_float)
493
- except (TypeError, ValueError):
494
- logger.exception("Failed to calculate tool duration")
495
- return None
496
-
497
- return dur
498
-
499
- def _update_tool_metadata(self, meta: dict[str, Any], dur: float | None) -> None:
500
- """Update tool metadata with duration information."""
501
- if dur is not None:
502
- meta["duration_seconds"] = dur
503
- meta["server_finished_at"] = (
504
- self.stream_processor.server_elapsed_time
505
- if isinstance(self.stream_processor.server_elapsed_time, int | float)
506
- else None
507
- )
508
- meta["finished_at"] = monotonic()
509
-
510
- def _add_tool_output_to_panel(
511
- self, meta: dict[str, Any], finished_tool_output: Any, finished_tool_name: str
512
- ) -> None:
513
- """Add tool output to panel metadata."""
514
- if finished_tool_output is not None:
515
- meta["chunks"].append(
516
- self._format_output_block(finished_tool_output, finished_tool_name)
653
+ self.console._live_stack = [] # type: ignore[attr-defined]
654
+ return True
655
+ except Exception:
656
+ # If the console forbids attribute assignment we simply skip the live
657
+ # update for this cycle and fall back to buffered printing.
658
+ logger.debug(
659
+ "Console missing _live_stack; skipping live UI initialisation",
660
+ exc_info=True,
517
661
  )
518
- meta["output"] = finished_tool_output
519
-
520
- def _mark_panel_as_finished(self, meta: dict[str, Any], tool_sid: str) -> None:
521
- """Mark panel as finished and ensure visibility."""
522
- if meta.get("status") != "finished":
523
- meta["status"] = "finished"
524
-
525
- dur = self._calculate_tool_duration(meta)
526
- self._update_tool_metadata(meta, dur)
527
-
528
- # Ensure this finished panel is visible in this frame
529
- self.stream_processor.current_event_finished_panels.add(tool_sid)
662
+ return False
530
663
 
531
- def _finish_tool_panel(
532
- self,
533
- finished_tool_name: str,
534
- finished_tool_output: Any,
535
- task_id: str,
536
- context_id: str,
537
- ) -> None:
538
- """Finish a tool panel and update its status."""
539
- tool_sid = self._get_tool_session_id(finished_tool_name, task_id, context_id)
540
- if tool_sid not in self.tool_panels:
664
+ def _start_live_if_needed(self) -> None:
665
+ """Create and start a Live instance when configuration allows."""
666
+ if self.live is not None or not self.cfg.live:
541
667
  return
542
668
 
543
- meta = self.tool_panels[tool_sid]
544
- self._mark_panel_as_finished(meta, tool_sid)
545
- self._add_tool_output_to_panel(meta, finished_tool_output, finished_tool_name)
546
-
547
- def _get_step_duration(
548
- self, finished_tool_name: str, task_id: str, context_id: str
549
- ) -> float | None:
550
- """Get step duration from tool panels."""
551
- tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
552
- return self.tool_panels.get(tool_sid, {}).get("duration_seconds")
553
-
554
- def _finish_delegation_step(
555
- self,
556
- finished_tool_name: str,
557
- finished_tool_output: Any,
558
- task_id: str,
559
- context_id: str,
560
- step_duration: float | None,
561
- ) -> None:
562
- """Finish a delegation step."""
563
- self.steps.finish(
564
- task_id=task_id,
565
- context_id=context_id,
566
- kind="delegate",
567
- name=finished_tool_name,
568
- output=finished_tool_output,
569
- duration_raw=step_duration,
570
- )
571
-
572
- def _finish_tool_step_type(
573
- self,
574
- finished_tool_name: str,
575
- finished_tool_output: Any,
576
- task_id: str,
577
- context_id: str,
578
- step_duration: float | None,
579
- ) -> None:
580
- """Finish a regular tool step."""
581
- self.steps.finish(
582
- task_id=task_id,
583
- context_id=context_id,
584
- kind="tool",
585
- name=finished_tool_name,
586
- output=finished_tool_output,
587
- duration_raw=step_duration,
588
- )
589
-
590
- def _finish_tool_step(
591
- self,
592
- finished_tool_name: str,
593
- finished_tool_output: Any,
594
- task_id: str,
595
- context_id: str,
596
- ) -> None:
597
- """Finish the corresponding step for a completed tool."""
598
- step_duration = self._get_step_duration(finished_tool_name, task_id, context_id)
599
-
600
- if is_delegation_tool(finished_tool_name):
601
- self._finish_delegation_step(
602
- finished_tool_name,
603
- finished_tool_output,
604
- task_id,
605
- context_id,
606
- step_duration,
607
- )
608
- else:
609
- self._finish_tool_step_type(
610
- finished_tool_name,
611
- finished_tool_output,
612
- task_id,
613
- context_id,
614
- step_duration,
669
+ try:
670
+ self.live = Live(
671
+ console=self.console,
672
+ refresh_per_second=1 / self.cfg.refresh_debounce,
673
+ transient=not self.cfg.persist_live,
615
674
  )
675
+ self.live.start()
676
+ except Exception:
677
+ self.live = None
616
678
 
617
- def _should_create_snapshot(self, tool_sid: str) -> bool:
618
- """Check if a snapshot should be created."""
619
- return self.cfg.append_finished_snapshots and not self.tool_panels.get(
620
- tool_sid, {}
621
- ).get("snapshot_printed")
622
-
623
- def _get_snapshot_title(self, meta: dict[str, Any], finished_tool_name: str) -> str:
624
- """Get the title for the snapshot."""
625
- adjusted_title = meta.get("title") or finished_tool_name
626
-
627
- # Add elapsed time to title
628
- dur = meta.get("duration_seconds")
629
- if isinstance(dur, int | float):
630
- elapsed_str = self._format_snapshot_duration(dur)
631
- adjusted_title = f"{adjusted_title} · {elapsed_str}"
632
-
633
- return adjusted_title
679
+ def _refresh_live_panels(self) -> None:
680
+ """Render panels and push them to the active Live display."""
681
+ if not self.live:
682
+ return
634
683
 
635
- def _format_snapshot_duration(self, dur: int | float) -> str:
636
- """Format duration for snapshot title."""
637
- try:
638
- # Handle invalid types
639
- if not isinstance(dur, (int, float)):
640
- return "<1ms"
641
-
642
- if dur >= 1:
643
- return f"{dur:.2f}s"
644
- elif int(dur * 1000) > 0:
645
- return f"{int(dur * 1000)}ms"
646
- else:
647
- return "<1ms"
648
- except (TypeError, ValueError, OverflowError):
649
- return "<1ms"
650
-
651
- def _clamp_snapshot_body(self, body_text: str) -> str:
652
- """Clamp snapshot body to configured limits."""
653
- max_lines = int(self.cfg.snapshot_max_lines or 0) or 60
654
- lines = body_text.splitlines()
655
- if len(lines) > max_lines:
656
- lines = lines[:max_lines] + ["… (truncated)"]
657
- body_text = "\n".join(lines)
658
-
659
- max_chars = int(self.cfg.snapshot_max_chars or 0) or 4000
660
- if len(body_text) > max_chars:
661
- body_text = body_text[: max_chars - 12] + "\n… (truncated)"
662
-
663
- return body_text
664
-
665
- def _create_snapshot_panel(
666
- self, adjusted_title: str, body_text: str, finished_tool_name: str
667
- ) -> Any:
668
- """Create the snapshot panel."""
669
- return create_tool_panel(
670
- title=adjusted_title,
671
- content=body_text or "(no output)",
672
- status="finished",
673
- theme=self.cfg.theme,
674
- is_delegation=is_delegation_tool(finished_tool_name),
684
+ steps_body = self._render_steps_text()
685
+ template_panel = getattr(self, "_last_steps_panel_template", None)
686
+ if template_panel is None:
687
+ template_panel = self._resolve_steps_panel()
688
+ steps_panel = AIPPanel(
689
+ steps_body,
690
+ title=getattr(template_panel, "title", "Steps"),
691
+ border_style=getattr(template_panel, "border_style", "blue"),
692
+ padding=getattr(template_panel, "padding", (0, 1)),
675
693
  )
676
694
 
677
- def _print_and_mark_snapshot(self, tool_sid: str, snapshot_panel: Any) -> None:
678
- """Print snapshot and mark as printed."""
679
- self.console.print(snapshot_panel)
680
- self.tool_panels[tool_sid]["snapshot_printed"] = True
695
+ main_panel = self._render_main_panel()
696
+ panels = self._build_live_panels(main_panel, steps_panel)
681
697
 
682
- def _create_tool_snapshot(
683
- self, finished_tool_name: str, task_id: str, context_id: str
684
- ) -> None:
685
- """Create and print a snapshot for a finished tool."""
686
- tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
698
+ self.live.update(Group(*panels))
687
699
 
688
- if not self._should_create_snapshot(tool_sid):
689
- return
700
+ def _build_live_panels(
701
+ self,
702
+ main_panel: Any,
703
+ steps_panel: Any,
704
+ ) -> list[Any]:
705
+ """Assemble the panel order for the live display."""
706
+ if self.verbose:
707
+ return [main_panel, steps_panel]
690
708
 
691
- meta = self.tool_panels[tool_sid]
692
- adjusted_title = self._get_snapshot_title(meta, finished_tool_name)
709
+ return [steps_panel, main_panel]
693
710
 
694
- # Compose body from chunks and clamp
695
- body_text = "".join(meta.get("chunks") or [])
696
- body_text = self._clamp_snapshot_body(body_text)
711
+ def _render_main_panel(self) -> Any:
712
+ """Render the main content panel."""
713
+ body = self.state.buffer.render().strip()
714
+ theme = DEFAULT_TRANSCRIPT_THEME
715
+ if not self.verbose:
716
+ panel = build_final_panel(self.state, theme=theme)
717
+ if panel is not None:
718
+ return panel
719
+ # Dynamic title with spinner + elapsed/hints
720
+ title = self._format_enhanced_main_title()
721
+ return create_main_panel(body, title, theme)
697
722
 
698
- snapshot_panel = self._create_snapshot_panel(
699
- adjusted_title, body_text, finished_tool_name
700
- )
723
+ def _final_panel_title(self) -> str:
724
+ """Compose title for the final result panel including duration."""
725
+ return format_final_panel_title(self.state)
701
726
 
702
- self._print_and_mark_snapshot(tool_sid, snapshot_panel)
727
+ def apply_verbosity(self, verbose: bool) -> None:
728
+ """Update verbose behaviour at runtime."""
729
+ if self.verbose == verbose:
730
+ return
703
731
 
704
- def _handle_agent_step(
705
- self,
706
- event: dict[str, Any],
707
- tool_name: str | None,
708
- tool_args: Any,
709
- _tool_out: Any,
710
- tool_calls_info: list[tuple[str, Any, Any]],
711
- ) -> None:
712
- """Handle agent step event."""
713
- metadata = event.get("metadata", {})
714
- task_id = event.get("task_id")
715
- context_id = event.get("context_id")
716
- content = event.get("content", "")
717
-
718
- # Create steps and panels for the primary tool
719
- if tool_name:
720
- tool_sid = self._ensure_tool_panel(
721
- tool_name, tool_args, task_id, context_id
722
- )
723
- self._start_tool_step(task_id, context_id, tool_name, tool_args, tool_sid)
732
+ self.verbose = verbose
733
+ desired_live = not verbose
734
+ if desired_live != self.cfg.live:
735
+ self.cfg.live = desired_live
736
+ if not desired_live:
737
+ self._shutdown_live()
738
+ else:
739
+ self._ensure_live()
724
740
 
725
- # Handle additional tool calls
726
- self._process_additional_tool_calls(
727
- tool_calls_info, tool_name, task_id, context_id
728
- )
741
+ if self.cfg.live:
742
+ self._ensure_live()
729
743
 
730
- # Check for tool completion
731
- (
732
- is_tool_finished,
733
- finished_tool_name,
734
- finished_tool_output,
735
- ) = self._detect_tool_completion(metadata, content)
744
+ # Transcript helper implementations live in TranscriptModeMixin.
736
745
 
737
- if is_tool_finished and finished_tool_name:
738
- self._finish_tool_panel(
739
- finished_tool_name, finished_tool_output, task_id, context_id
740
- )
741
- self._finish_tool_step(
742
- finished_tool_name, finished_tool_output, task_id, context_id
743
- )
744
- self._create_tool_snapshot(finished_tool_name, task_id, context_id)
746
+ def get_aggregated_output(self) -> str:
747
+ """Return the concatenated assistant output collected so far."""
748
+ return self.state.buffer.render().strip()
745
749
 
746
- def _spinner(self) -> str:
747
- """Return spinner character."""
748
- return get_spinner()
750
+ def get_transcript_events(self) -> list[dict[str, Any]]:
751
+ """Return captured SSE events."""
752
+ return list(self.state.events)
749
753
 
750
754
  def _format_working_indicator(self, started_at: float | None) -> str:
751
755
  """Format working indicator."""
@@ -760,6 +764,11 @@ class RichStreamRenderer:
760
764
  self._shutdown_live()
761
765
 
762
766
  def __del__(self) -> None:
767
+ """Destructor that ensures live rendering is properly shut down.
768
+
769
+ This is a safety net to prevent resource leaks if the renderer
770
+ is not explicitly stopped.
771
+ """
763
772
  # Destructors must never raise
764
773
  try:
765
774
  self._shutdown_live(reset_attr=False)
@@ -784,9 +793,7 @@ class RichStreamRenderer:
784
793
 
785
794
  def _get_analysis_progress_info(self) -> dict[str, Any]:
786
795
  total_steps = len(self.steps.order)
787
- completed_steps = sum(
788
- 1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid])
789
- )
796
+ completed_steps = sum(1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid]))
790
797
  current_step = None
791
798
  for sid in self.steps.order:
792
799
  if not is_step_finished(self.steps.by_id[sid]):
@@ -794,13 +801,11 @@ class RichStreamRenderer:
794
801
  break
795
802
  # Prefer server elapsed time when available
796
803
  elapsed = 0.0
797
- if isinstance(self.stream_processor.server_elapsed_time, int | float):
804
+ if isinstance(self.stream_processor.server_elapsed_time, (int, float)):
798
805
  elapsed = float(self.stream_processor.server_elapsed_time)
799
806
  elif self._started_at is not None:
800
807
  elapsed = monotonic() - self._started_at
801
- progress_percent = (
802
- int((completed_steps / total_steps) * 100) if total_steps else 0
803
- )
808
+ progress_percent = int((completed_steps / total_steps) * 100) if total_steps else 0
804
809
  return {
805
810
  "total_steps": total_steps,
806
811
  "completed_steps": completed_steps,
@@ -844,39 +849,52 @@ class RichStreamRenderer:
844
849
  def _get_step_icon(self, step_kind: str) -> str:
845
850
  """Get icon for step kind."""
846
851
  if step_kind == "tool":
847
- return "⚙️"
852
+ return ICON_TOOL_STEP
848
853
  elif step_kind == "delegate":
849
- return "🤝"
854
+ return ICON_DELEGATE
850
855
  elif step_kind == "agent":
851
- return "🧠"
856
+ return ICON_AGENT_STEP
852
857
  return ""
853
858
 
854
859
  def _format_step_status(self, step: Step) -> str:
855
860
  """Format step status with elapsed time or duration."""
856
861
  if is_step_finished(step):
857
- if step.duration_ms is None:
858
- return "[<1ms]"
859
- elif step.duration_ms >= 1000:
860
- return f"[{step.duration_ms / 1000:.2f}s]"
861
- elif step.duration_ms > 0:
862
- return f"[{step.duration_ms}ms]"
863
- return "[<1ms]"
862
+ return self._format_finished_badge(step)
864
863
  else:
865
864
  # Calculate elapsed time for running steps
866
865
  elapsed = self._calculate_step_elapsed_time(step)
867
- if elapsed >= 1:
866
+ if elapsed >= 0.1:
868
867
  return f"[{elapsed:.2f}s]"
869
- ms = int(elapsed * 1000)
870
- return f"[{ms}ms]" if ms > 0 else "[<1ms]"
868
+ ms = int(round(elapsed * 1000))
869
+ if ms <= 0:
870
+ return ""
871
+ return f"[{ms}ms]"
872
+
873
+ def _format_finished_badge(self, step: Step) -> str:
874
+ """Compose duration badge for finished steps including source tagging."""
875
+ if getattr(step, "duration_unknown", False) is True:
876
+ payload = "??s"
877
+ else:
878
+ duration_ms = step.duration_ms
879
+ if duration_ms is None:
880
+ payload = "<1ms"
881
+ elif duration_ms < 0:
882
+ payload = "<1ms"
883
+ elif duration_ms >= 100:
884
+ payload = f"{duration_ms / 1000:.2f}s"
885
+ elif duration_ms > 0:
886
+ payload = f"{duration_ms}ms"
887
+ else:
888
+ payload = "<1ms"
889
+
890
+ return f"[{payload}]"
871
891
 
872
892
  def _calculate_step_elapsed_time(self, step: Step) -> float:
873
893
  """Calculate elapsed time for a running step."""
874
894
  server_elapsed = self.stream_processor.server_elapsed_time
875
895
  server_start = self._step_server_start_times.get(step.step_id)
876
896
 
877
- if isinstance(server_elapsed, int | float) and isinstance(
878
- server_start, int | float
879
- ):
897
+ if isinstance(server_elapsed, (int, float)) and isinstance(server_start, (int, float)):
880
898
  return max(0.0, float(server_elapsed) - float(server_start))
881
899
 
882
900
  try:
@@ -890,6 +908,10 @@ class RichStreamRenderer:
890
908
  return step.name
891
909
  return "thinking..." if step.kind == "agent" else f"{step.kind} step"
892
910
 
911
+ def _resolve_step_label(self, step: Step) -> str:
912
+ """Return the display label for a step with sensible fallbacks."""
913
+ return format_step_label(step)
914
+
893
915
  def _check_parallel_tools(self) -> dict[tuple[str | None, str | None], list]:
894
916
  """Check for parallel running tools."""
895
917
  running_by_ctx: dict[tuple[str | None, str | None], list] = {}
@@ -900,212 +922,103 @@ class RichStreamRenderer:
900
922
  running_by_ctx.setdefault(key, []).append(st)
901
923
  return running_by_ctx
902
924
 
903
- def _render_steps_text(self) -> Text:
904
- """Render the steps panel content."""
905
- if not (self.steps.order or self.steps.children):
906
- return Text("No steps yet", style="dim")
907
-
908
- running_by_ctx = self._check_parallel_tools()
909
- lines: list[str] = []
910
-
925
+ def _is_parallel_tool(
926
+ self,
927
+ step: Step,
928
+ running_by_ctx: dict[tuple[str | None, str | None], list],
929
+ ) -> bool:
930
+ """Return True if multiple tools are running in the same context."""
931
+ key = (step.task_id, step.context_id)
932
+ return len(running_by_ctx.get(key, [])) > 1
933
+
934
+ def _build_step_status_overrides(self) -> dict[str, str]:
935
+ """Return status text overrides for steps (running duration badges)."""
936
+ overrides: dict[str, str] = {}
911
937
  for sid in self.steps.order:
912
- st = self.steps.by_id[sid]
913
- status_br = self._format_step_status(st)
914
- display_name = self._get_step_display_name(st)
915
- tail = " ✓" if is_step_finished(st) else ""
916
-
917
- # Add parallel indicator for running tools
918
- if st.kind == "tool" and not is_step_finished(st):
919
- key = (st.task_id, st.context_id)
920
- if len(running_by_ctx.get(key, [])) > 1:
921
- status_br = status_br.replace("]", " 🔄]")
922
-
923
- icon = self._get_step_icon(st.kind)
924
- lines.append(f"{icon} {display_name} {status_br}{tail}")
925
-
926
- return Text("\n".join(lines), style="dim")
927
-
928
- def _should_skip_finished_panel(self, sid: str, status: str) -> bool:
929
- """Check if a finished panel should be skipped."""
930
- if status != "finished":
931
- return False
932
-
933
- if getattr(self.cfg, "append_finished_snapshots", False):
934
- return True
935
-
936
- return (
937
- not self.state.finalizing_ui
938
- and sid not in self.stream_processor.current_event_finished_panels
939
- )
940
-
941
- def _calculate_elapsed_time(self, meta: dict[str, Any]) -> str:
942
- """Calculate elapsed time string for running tools."""
943
- server_elapsed = self.stream_processor.server_elapsed_time
944
- server_start = meta.get("server_started_at")
945
-
946
- if isinstance(server_elapsed, int | float) and isinstance(
947
- server_start, int | float
948
- ):
949
- elapsed = max(0.0, float(server_elapsed) - float(server_start))
950
- else:
951
- elapsed = max(0.0, monotonic() - (meta.get("started_at") or 0.0))
952
-
953
- return self._format_elapsed_time(elapsed)
954
-
955
- def _format_elapsed_time(self, elapsed: float) -> str:
956
- """Format elapsed time as a readable string."""
957
- if elapsed >= 1:
958
- return f"{elapsed:.2f}s"
959
- elif int(elapsed * 1000) > 0:
960
- return f"{int(elapsed * 1000)}ms"
961
- else:
962
- return "<1ms"
963
-
964
- def _calculate_finished_duration(self, meta: dict[str, Any]) -> str | None:
965
- """Calculate duration string for finished tools."""
966
- dur = meta.get("duration_seconds")
967
- if isinstance(dur, int | float):
968
- return self._format_elapsed_time(dur)
969
-
970
- try:
971
- server_now = self.stream_processor.server_elapsed_time
972
- server_start = meta.get("server_started_at")
973
- if isinstance(server_now, int | float) and isinstance(
974
- server_start, int | float
975
- ):
976
- dur = max(0.0, float(server_now) - float(server_start))
977
- elif meta.get("started_at") is not None:
978
- dur = max(0.0, float(monotonic() - meta.get("started_at")))
979
- except Exception:
980
- dur = None
981
-
982
- return self._format_elapsed_time(dur) if isinstance(dur, int | float) else None
983
-
984
- def _process_running_tool_panel(
985
- self, title: str, meta: dict[str, Any], body: str
986
- ) -> tuple[str, str]:
987
- """Process a running tool panel."""
988
- elapsed_str = self._calculate_elapsed_time(meta)
989
- adjusted_title = f"{title} · {elapsed_str}"
990
- chip = f"⏱ {elapsed_str}"
991
-
992
- if not body:
993
- body = chip
994
- else:
995
- body = f"{body}\n\n{chip}"
996
-
997
- return adjusted_title, body
998
-
999
- def _process_finished_tool_panel(self, title: str, meta: dict[str, Any]) -> str:
1000
- """Process a finished tool panel."""
1001
- duration_str = self._calculate_finished_duration(meta)
1002
- return f"{title} · {duration_str}" if duration_str else title
1003
-
1004
- def _create_tool_panel_for_session(
1005
- self, sid: str, meta: dict[str, Any]
1006
- ) -> AIPPanel | None:
1007
- """Create a single tool panel for the session."""
1008
- title = meta.get("title") or "Tool"
1009
- status = meta.get("status") or "running"
1010
- chunks = meta.get("chunks") or []
1011
- is_delegation = bool(meta.get("is_delegation"))
1012
-
1013
- if self._should_skip_finished_panel(sid, status):
1014
- return None
1015
-
1016
- body = "".join(chunks)
1017
- adjusted_title = title
1018
-
1019
- if status == "running":
1020
- adjusted_title, body = self._process_running_tool_panel(title, meta, body)
1021
- elif status == "finished":
1022
- adjusted_title = self._process_finished_tool_panel(title, meta)
1023
-
1024
- return create_tool_panel(
1025
- title=adjusted_title,
1026
- content=body or "Processing...",
1027
- status=status,
1028
- theme=self.cfg.theme,
1029
- is_delegation=is_delegation,
938
+ step = self.steps.by_id.get(sid)
939
+ if not step:
940
+ continue
941
+ try:
942
+ status_text = self._format_step_status(step)
943
+ except Exception:
944
+ status_text = ""
945
+ if status_text:
946
+ overrides[sid] = status_text
947
+ return overrides
948
+
949
+ def _resolve_steps_panel(self) -> AIPPanel:
950
+ """Return the shared steps panel renderable generated by layout helpers."""
951
+ window_arg = self._summary_window_size()
952
+ window_arg = window_arg if window_arg > 0 else None
953
+ panels = render_summary_panels(
954
+ self.state,
955
+ self.steps,
956
+ summary_window=window_arg,
957
+ include_query_panel=False,
958
+ include_final_panel=False,
959
+ step_status_overrides=self._build_step_status_overrides(),
1030
960
  )
1031
-
1032
- def _render_tool_panels(self) -> list[AIPPanel]:
1033
- """Render tool execution output panels."""
1034
- panels: list[AIPPanel] = []
1035
- for sid in self.tool_order:
1036
- meta = self.tool_panels.get(sid) or {}
1037
- panel = self._create_tool_panel_for_session(sid, meta)
1038
- if panel:
1039
- panels.append(panel)
1040
-
1041
- return panels
1042
-
1043
- def _format_dict_or_list_output(self, output_value: dict | list) -> str:
1044
- """Format dict/list output as pretty JSON."""
1045
- try:
1046
- return (
1047
- self.OUTPUT_PREFIX
1048
- + "```json\n"
1049
- + json.dumps(output_value, indent=2)
1050
- + "\n```\n"
961
+ steps_panel = next((panel for panel in panels if getattr(panel, "title", "").lower() == "steps"), None)
962
+ panel_cls = AIPPanel if isinstance(AIPPanel, type) else None
963
+ if steps_panel is not None and (panel_cls is None or isinstance(steps_panel, panel_cls)):
964
+ return steps_panel
965
+ return AIPPanel(_NO_STEPS_TEXT.copy(), title="Steps", border_style="blue")
966
+
967
+ def _prepare_steps_renderable(self, *, include_progress: bool) -> tuple[AIPPanel, Any]:
968
+ """Return the template panel and content renderable for steps."""
969
+ panel = self._resolve_steps_panel()
970
+ self._last_steps_panel_template = panel
971
+ base_renderable: Any = getattr(panel, "renderable", panel)
972
+
973
+ if include_progress and not self.state.finalizing_ui:
974
+ footer = build_progress_footer(
975
+ state=self.state,
976
+ steps=self.steps,
977
+ started_at=self._started_at,
978
+ server_elapsed_time=self.stream_processor.server_elapsed_time,
1051
979
  )
1052
- except Exception:
1053
- return self.OUTPUT_PREFIX + str(output_value) + "\n"
1054
-
1055
- def _clean_sub_agent_prefix(self, output: str, tool_name: str | None) -> str:
1056
- """Clean sub-agent name prefix from output."""
1057
- if not (tool_name and is_delegation_tool(tool_name)):
1058
- return output
1059
-
1060
- sub = tool_name
1061
- if tool_name.startswith("delegate_to_"):
1062
- sub = tool_name.replace("delegate_to_", "")
1063
- elif tool_name.startswith("delegate_"):
1064
- sub = tool_name.replace("delegate_", "")
1065
- prefix = f"[{sub}]"
1066
- if output.startswith(prefix):
1067
- return output[len(prefix) :].lstrip()
1068
-
1069
- return output
1070
-
1071
- def _format_json_string_output(self, output: str) -> str:
1072
- """Format string that looks like JSON."""
980
+ if footer is not None:
981
+ if isinstance(base_renderable, Group):
982
+ base_renderable = Group(*base_renderable.renderables, footer)
983
+ else:
984
+ base_renderable = Group(base_renderable, footer)
985
+ return panel, base_renderable
986
+
987
+ def _build_steps_body(self, *, include_progress: bool) -> Any:
988
+ """Return the rendered steps body with optional progress footer."""
989
+ _, renderable = self._prepare_steps_renderable(include_progress=include_progress)
990
+ if isinstance(renderable, Group):
991
+ return renderable
992
+ return Group(renderable)
993
+
994
+ def _render_steps_text(self) -> Any:
995
+ """Return the rendered steps body used by transcript capture."""
996
+ return self._build_steps_body(include_progress=True)
997
+
998
+ def _summary_window_size(self) -> int:
999
+ """Return the active window size for step display."""
1000
+ if self.state.finalizing_ui:
1001
+ return 0
1002
+ return int(self.cfg.summary_display_window or 0)
1003
+
1004
+ def _update_final_duration(self, duration: float | None, *, overwrite: bool = False) -> None:
1005
+ """Store formatted duration for eventual final panels."""
1006
+ if duration is None:
1007
+ return
1008
+
1073
1009
  try:
1074
- parsed = json.loads(output)
1075
- return (
1076
- self.OUTPUT_PREFIX
1077
- + "```json\n"
1078
- + json.dumps(parsed, indent=2)
1079
- + "\n```\n"
1080
- )
1010
+ duration_val = max(0.0, float(duration))
1081
1011
  except Exception:
1082
- return self.OUTPUT_PREFIX + output + "\n"
1012
+ return
1083
1013
 
1084
- def _format_string_output(self, output: str, tool_name: str | None) -> str:
1085
- """Format string output with optional prefix cleaning."""
1086
- s = output.strip()
1087
- s = self._clean_sub_agent_prefix(s, tool_name)
1014
+ existing = self.state.final_duration_seconds
1088
1015
 
1089
- # If looks like JSON, pretty print it
1090
- if (s.startswith("{") and s.endswith("}")) or (
1091
- s.startswith("[") and s.endswith("]")
1092
- ):
1093
- return self._format_json_string_output(s)
1016
+ if not overwrite and existing is not None:
1017
+ return
1094
1018
 
1095
- return self.OUTPUT_PREFIX + s + "\n"
1019
+ if overwrite and existing is not None:
1020
+ duration_val = max(existing, duration_val)
1096
1021
 
1097
- def _format_other_output(self, output_value: Any) -> str:
1098
- """Format other types of output."""
1099
- try:
1100
- return self.OUTPUT_PREFIX + json.dumps(output_value, indent=2) + "\n"
1101
- except Exception:
1102
- return self.OUTPUT_PREFIX + str(output_value) + "\n"
1103
-
1104
- def _format_output_block(self, output_value: Any, tool_name: str | None) -> str:
1105
- """Format an output value for panel display."""
1106
- if isinstance(output_value, dict | list):
1107
- return self._format_dict_or_list_output(output_value)
1108
- elif isinstance(output_value, str):
1109
- return self._format_string_output(output_value, tool_name)
1110
- else:
1111
- return self._format_other_output(output_value)
1022
+ formatted = format_elapsed_time(duration_val)
1023
+ self.state.mark_final_duration(duration_val, formatted=formatted)
1024
+ self._apply_root_duration(duration_val)