glaip-sdk 0.0.20__py3-none-any.whl → 0.6.5b6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (157) hide show
  1. glaip_sdk/__init__.py +5 -2
  2. glaip_sdk/_version.py +10 -3
  3. glaip_sdk/agents/__init__.py +27 -0
  4. glaip_sdk/agents/base.py +1126 -0
  5. glaip_sdk/branding.py +15 -6
  6. glaip_sdk/cli/account_store.py +540 -0
  7. glaip_sdk/cli/agent_config.py +2 -6
  8. glaip_sdk/cli/auth.py +265 -45
  9. glaip_sdk/cli/commands/__init__.py +2 -2
  10. glaip_sdk/cli/commands/accounts.py +746 -0
  11. glaip_sdk/cli/commands/agents.py +270 -173
  12. glaip_sdk/cli/commands/common_config.py +101 -0
  13. glaip_sdk/cli/commands/configure.py +735 -143
  14. glaip_sdk/cli/commands/mcps.py +265 -134
  15. glaip_sdk/cli/commands/models.py +13 -9
  16. glaip_sdk/cli/commands/tools.py +67 -88
  17. glaip_sdk/cli/commands/transcripts.py +755 -0
  18. glaip_sdk/cli/commands/update.py +3 -8
  19. glaip_sdk/cli/config.py +49 -7
  20. glaip_sdk/cli/constants.py +38 -0
  21. glaip_sdk/cli/context.py +8 -0
  22. glaip_sdk/cli/core/__init__.py +79 -0
  23. glaip_sdk/cli/core/context.py +124 -0
  24. glaip_sdk/cli/core/output.py +846 -0
  25. glaip_sdk/cli/core/prompting.py +649 -0
  26. glaip_sdk/cli/core/rendering.py +187 -0
  27. glaip_sdk/cli/display.py +45 -32
  28. glaip_sdk/cli/hints.py +57 -0
  29. glaip_sdk/cli/io.py +14 -17
  30. glaip_sdk/cli/main.py +232 -143
  31. glaip_sdk/cli/masking.py +21 -33
  32. glaip_sdk/cli/mcp_validators.py +5 -15
  33. glaip_sdk/cli/pager.py +12 -19
  34. glaip_sdk/cli/parsers/__init__.py +1 -3
  35. glaip_sdk/cli/parsers/json_input.py +11 -22
  36. glaip_sdk/cli/resolution.py +3 -9
  37. glaip_sdk/cli/rich_helpers.py +1 -3
  38. glaip_sdk/cli/slash/__init__.py +0 -9
  39. glaip_sdk/cli/slash/accounts_controller.py +500 -0
  40. glaip_sdk/cli/slash/accounts_shared.py +75 -0
  41. glaip_sdk/cli/slash/agent_session.py +61 -28
  42. glaip_sdk/cli/slash/prompt.py +13 -10
  43. glaip_sdk/cli/slash/remote_runs_controller.py +566 -0
  44. glaip_sdk/cli/slash/session.py +772 -222
  45. glaip_sdk/cli/slash/tui/__init__.py +9 -0
  46. glaip_sdk/cli/slash/tui/accounts.tcss +86 -0
  47. glaip_sdk/cli/slash/tui/accounts_app.py +872 -0
  48. glaip_sdk/cli/slash/tui/background_tasks.py +72 -0
  49. glaip_sdk/cli/slash/tui/loading.py +58 -0
  50. glaip_sdk/cli/slash/tui/remote_runs_app.py +628 -0
  51. glaip_sdk/cli/transcript/__init__.py +12 -52
  52. glaip_sdk/cli/transcript/cache.py +258 -60
  53. glaip_sdk/cli/transcript/capture.py +72 -21
  54. glaip_sdk/cli/transcript/history.py +815 -0
  55. glaip_sdk/cli/transcript/launcher.py +1 -3
  56. glaip_sdk/cli/transcript/viewer.py +77 -329
  57. glaip_sdk/cli/update_notifier.py +177 -24
  58. glaip_sdk/cli/utils.py +242 -1309
  59. glaip_sdk/cli/validators.py +16 -18
  60. glaip_sdk/client/__init__.py +2 -1
  61. glaip_sdk/client/_agent_payloads.py +53 -37
  62. glaip_sdk/client/agent_runs.py +147 -0
  63. glaip_sdk/client/agents.py +320 -92
  64. glaip_sdk/client/base.py +78 -35
  65. glaip_sdk/client/main.py +19 -10
  66. glaip_sdk/client/mcps.py +123 -15
  67. glaip_sdk/client/run_rendering.py +218 -78
  68. glaip_sdk/client/shared.py +21 -0
  69. glaip_sdk/client/tools.py +161 -34
  70. glaip_sdk/client/validators.py +20 -48
  71. glaip_sdk/config/constants.py +11 -0
  72. glaip_sdk/exceptions.py +1 -3
  73. glaip_sdk/icons.py +9 -3
  74. glaip_sdk/mcps/__init__.py +21 -0
  75. glaip_sdk/mcps/base.py +345 -0
  76. glaip_sdk/models/__init__.py +90 -0
  77. glaip_sdk/models/agent.py +47 -0
  78. glaip_sdk/models/agent_runs.py +116 -0
  79. glaip_sdk/models/common.py +42 -0
  80. glaip_sdk/models/mcp.py +33 -0
  81. glaip_sdk/models/tool.py +33 -0
  82. glaip_sdk/payload_schemas/__init__.py +1 -13
  83. glaip_sdk/payload_schemas/agent.py +1 -3
  84. glaip_sdk/registry/__init__.py +55 -0
  85. glaip_sdk/registry/agent.py +164 -0
  86. glaip_sdk/registry/base.py +139 -0
  87. glaip_sdk/registry/mcp.py +253 -0
  88. glaip_sdk/registry/tool.py +231 -0
  89. glaip_sdk/rich_components.py +58 -2
  90. glaip_sdk/runner/__init__.py +59 -0
  91. glaip_sdk/runner/base.py +84 -0
  92. glaip_sdk/runner/deps.py +115 -0
  93. glaip_sdk/runner/langgraph.py +597 -0
  94. glaip_sdk/runner/mcp_adapter/__init__.py +13 -0
  95. glaip_sdk/runner/mcp_adapter/base_mcp_adapter.py +43 -0
  96. glaip_sdk/runner/mcp_adapter/langchain_mcp_adapter.py +158 -0
  97. glaip_sdk/runner/mcp_adapter/mcp_config_builder.py +95 -0
  98. glaip_sdk/runner/tool_adapter/__init__.py +18 -0
  99. glaip_sdk/runner/tool_adapter/base_tool_adapter.py +44 -0
  100. glaip_sdk/runner/tool_adapter/langchain_tool_adapter.py +177 -0
  101. glaip_sdk/tools/__init__.py +22 -0
  102. glaip_sdk/tools/base.py +435 -0
  103. glaip_sdk/utils/__init__.py +58 -12
  104. glaip_sdk/utils/a2a/__init__.py +34 -0
  105. glaip_sdk/utils/a2a/event_processor.py +188 -0
  106. glaip_sdk/utils/agent_config.py +4 -14
  107. glaip_sdk/utils/bundler.py +267 -0
  108. glaip_sdk/utils/client.py +111 -0
  109. glaip_sdk/utils/client_utils.py +46 -28
  110. glaip_sdk/utils/datetime_helpers.py +58 -0
  111. glaip_sdk/utils/discovery.py +78 -0
  112. glaip_sdk/utils/display.py +25 -21
  113. glaip_sdk/utils/export.py +143 -0
  114. glaip_sdk/utils/general.py +1 -36
  115. glaip_sdk/utils/import_export.py +15 -16
  116. glaip_sdk/utils/import_resolver.py +492 -0
  117. glaip_sdk/utils/instructions.py +101 -0
  118. glaip_sdk/utils/rendering/__init__.py +115 -1
  119. glaip_sdk/utils/rendering/formatting.py +38 -23
  120. glaip_sdk/utils/rendering/layout/__init__.py +64 -0
  121. glaip_sdk/utils/rendering/{renderer → layout}/panels.py +10 -3
  122. glaip_sdk/utils/rendering/{renderer → layout}/progress.py +73 -12
  123. glaip_sdk/utils/rendering/layout/summary.py +74 -0
  124. glaip_sdk/utils/rendering/layout/transcript.py +606 -0
  125. glaip_sdk/utils/rendering/models.py +18 -8
  126. glaip_sdk/utils/rendering/renderer/__init__.py +9 -51
  127. glaip_sdk/utils/rendering/renderer/base.py +476 -882
  128. glaip_sdk/utils/rendering/renderer/config.py +4 -10
  129. glaip_sdk/utils/rendering/renderer/debug.py +30 -34
  130. glaip_sdk/utils/rendering/renderer/factory.py +138 -0
  131. glaip_sdk/utils/rendering/renderer/stream.py +13 -54
  132. glaip_sdk/utils/rendering/renderer/summary_window.py +79 -0
  133. glaip_sdk/utils/rendering/renderer/thinking.py +273 -0
  134. glaip_sdk/utils/rendering/renderer/toggle.py +182 -0
  135. glaip_sdk/utils/rendering/renderer/tool_panels.py +442 -0
  136. glaip_sdk/utils/rendering/renderer/transcript_mode.py +162 -0
  137. glaip_sdk/utils/rendering/state.py +204 -0
  138. glaip_sdk/utils/rendering/step_tree_state.py +100 -0
  139. glaip_sdk/utils/rendering/steps/__init__.py +34 -0
  140. glaip_sdk/utils/rendering/steps/event_processor.py +778 -0
  141. glaip_sdk/utils/rendering/steps/format.py +176 -0
  142. glaip_sdk/utils/rendering/{steps.py → steps/manager.py} +122 -26
  143. glaip_sdk/utils/rendering/timing.py +36 -0
  144. glaip_sdk/utils/rendering/viewer/__init__.py +21 -0
  145. glaip_sdk/utils/rendering/viewer/presenter.py +184 -0
  146. glaip_sdk/utils/resource_refs.py +29 -26
  147. glaip_sdk/utils/runtime_config.py +422 -0
  148. glaip_sdk/utils/serialization.py +32 -46
  149. glaip_sdk/utils/sync.py +142 -0
  150. glaip_sdk/utils/tool_detection.py +33 -0
  151. glaip_sdk/utils/validation.py +20 -28
  152. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.6.5b6.dist-info}/METADATA +49 -4
  153. glaip_sdk-0.6.5b6.dist-info/RECORD +159 -0
  154. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.6.5b6.dist-info}/WHEEL +1 -1
  155. glaip_sdk/models.py +0 -259
  156. glaip_sdk-0.0.20.dist-info/RECORD +0 -80
  157. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.6.5b6.dist-info}/entry_points.txt +0 -0
@@ -8,12 +8,10 @@ from __future__ import annotations
8
8
 
9
9
  import json
10
10
  import logging
11
- from dataclasses import dataclass, field
12
11
  from datetime import datetime, timezone
13
12
  from time import monotonic
14
13
  from typing import Any
15
14
 
16
- from rich.align import Align
17
15
  from rich.console import Console as RichConsole
18
16
  from rich.console import Group
19
17
  from rich.live import Live
@@ -25,78 +23,57 @@ from glaip_sdk.icons import ICON_AGENT, ICON_AGENT_STEP, ICON_DELEGATE, ICON_TOO
25
23
  from glaip_sdk.rich_components import AIPPanel
26
24
  from glaip_sdk.utils.rendering.formatting import (
27
25
  format_main_title,
28
- get_spinner_char,
29
26
  is_step_finished,
27
+ normalise_display_label,
30
28
  )
31
29
  from glaip_sdk.utils.rendering.models import RunStats, Step
32
- from glaip_sdk.utils.rendering.renderer.config import RendererConfig
33
- from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
34
- from glaip_sdk.utils.rendering.renderer.panels import (
35
- create_final_panel,
36
- create_main_panel,
37
- create_tool_panel,
38
- )
39
- from glaip_sdk.utils.rendering.renderer.progress import (
30
+ from glaip_sdk.utils.rendering.layout.panels import create_main_panel
31
+ from glaip_sdk.utils.rendering.layout.progress import (
32
+ build_progress_footer,
40
33
  format_elapsed_time,
41
- format_tool_title,
42
34
  format_working_indicator,
43
- get_spinner,
35
+ get_spinner_char,
44
36
  is_delegation_tool,
45
37
  )
38
+ from glaip_sdk.utils.rendering.layout.summary import render_summary_panels
39
+ from glaip_sdk.utils.rendering.layout.transcript import (
40
+ DEFAULT_TRANSCRIPT_THEME,
41
+ TranscriptSnapshot,
42
+ build_final_panel,
43
+ build_transcript_snapshot,
44
+ build_transcript_view,
45
+ extract_query_from_meta,
46
+ format_final_panel_title,
47
+ )
48
+ from glaip_sdk.utils.rendering.renderer.config import RendererConfig
49
+ from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
46
50
  from glaip_sdk.utils.rendering.renderer.stream import StreamProcessor
47
- from glaip_sdk.utils.rendering.steps import StepManager
51
+ from glaip_sdk.utils.rendering.renderer.thinking import ThinkingScopeController
52
+ from glaip_sdk.utils.rendering.renderer.tool_panels import ToolPanelController
53
+ from glaip_sdk.utils.rendering.renderer.transcript_mode import TranscriptModeMixin
54
+ from glaip_sdk.utils.rendering.state import (
55
+ RendererState,
56
+ TranscriptBuffer,
57
+ coerce_received_at,
58
+ truncate_display,
59
+ )
60
+ from glaip_sdk.utils.rendering.steps import (
61
+ StepManager,
62
+ format_step_label,
63
+ )
64
+ from glaip_sdk.utils.rendering.timing import coerce_server_time
65
+
66
+ _NO_STEPS_TEXT = Text("No steps yet", style="dim")
48
67
 
49
68
  # Configure logger
50
69
  logger = logging.getLogger("glaip_sdk.run_renderer")
51
70
 
52
71
  # Constants
53
- LESS_THAN_1MS = "[<1ms]"
54
-
55
-
56
- def _coerce_received_at(value: Any) -> datetime | None:
57
- """Coerce a received_at value to an aware datetime if possible."""
58
- if value is None:
59
- return None
60
-
61
- if isinstance(value, datetime):
62
- return value if value.tzinfo else value.replace(tzinfo=timezone.utc)
63
-
64
- if isinstance(value, str):
65
- try:
66
- normalised = value.replace("Z", "+00:00")
67
- dt = datetime.fromisoformat(normalised)
68
- except ValueError:
69
- return None
70
- return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
72
+ RUNNING_STATUS_HINTS = {"running", "started", "pending", "working"}
73
+ ARGS_VALUE_MAX_LEN = 160
71
74
 
72
- return None
73
75
 
74
-
75
- @dataclass
76
- class RendererState:
77
- """Internal state for the renderer."""
78
-
79
- buffer: list[str] | None = None
80
- final_text: str = ""
81
- streaming_started_at: float | None = None
82
- printed_final_output: bool = False
83
- finalizing_ui: bool = False
84
- final_duration_seconds: float | None = None
85
- final_duration_text: str | None = None
86
- events: list[dict[str, Any]] = field(default_factory=list)
87
- meta: dict[str, Any] = field(default_factory=dict)
88
- streaming_started_event_ts: datetime | None = None
89
-
90
- def __post_init__(self) -> None:
91
- """Initialize renderer state after dataclass creation.
92
-
93
- Ensures buffer is initialized as an empty list if not provided.
94
- """
95
- if self.buffer is None:
96
- self.buffer = []
97
-
98
-
99
- class RichStreamRenderer:
76
+ class RichStreamRenderer(TranscriptModeMixin):
100
77
  """Live, modern terminal renderer for agent execution with rich visual output."""
101
78
 
102
79
  def __init__(
@@ -105,6 +82,8 @@ class RichStreamRenderer:
105
82
  *,
106
83
  cfg: RendererConfig | None = None,
107
84
  verbose: bool = False,
85
+ transcript_buffer: TranscriptBuffer | None = None,
86
+ callbacks: dict[str, Any] | None = None,
108
87
  ) -> None:
109
88
  """Initialize the renderer.
110
89
 
@@ -112,7 +91,10 @@ class RichStreamRenderer:
112
91
  console: Rich console instance
113
92
  cfg: Renderer configuration
114
93
  verbose: Whether to enable verbose mode
94
+ transcript_buffer: Optional transcript buffer for capturing output
95
+ callbacks: Optional dictionary of callback functions
115
96
  """
97
+ super().__init__()
116
98
  self.console = console or RichConsole()
117
99
  self.cfg = cfg or RendererConfig()
118
100
  self.verbose = verbose
@@ -120,19 +102,36 @@ class RichStreamRenderer:
120
102
  # Initialize components
121
103
  self.stream_processor = StreamProcessor()
122
104
  self.state = RendererState()
105
+ if transcript_buffer is not None:
106
+ self.state.buffer = transcript_buffer
107
+
108
+ self._callbacks = callbacks or {}
123
109
 
124
110
  # Initialize step manager and other state
125
- self.steps = StepManager()
111
+ self.steps = StepManager(max_steps=self.cfg.summary_max_steps)
126
112
  # Live display instance (single source of truth)
127
113
  self.live: Live | None = None
114
+ self._step_spinners: dict[str, Spinner] = {}
115
+ self._last_steps_panel_template: Any | None = None
128
116
 
129
- # Context and tool tracking
130
- self.context_order: list[str] = []
131
- self.context_parent: dict[str, str] = {}
132
- self.tool_order: list[str] = []
133
- self.context_panels: dict[str, list[str]] = {}
134
- self.context_meta: dict[str, dict[str, Any]] = {}
135
- self.tool_panels: dict[str, dict[str, Any]] = {}
117
+ # Tool tracking and thinking scopes
118
+ self._step_server_start_times: dict[str, float] = {}
119
+ self.tool_controller = ToolPanelController(
120
+ steps=self.steps,
121
+ stream_processor=self.stream_processor,
122
+ console=self.console,
123
+ cfg=self.cfg,
124
+ step_server_start_times=self._step_server_start_times,
125
+ output_prefix="**Output:**\n",
126
+ )
127
+ self.thinking_controller = ThinkingScopeController(
128
+ self.steps,
129
+ step_server_start_times=self._step_server_start_times,
130
+ )
131
+ self._root_agent_friendly: str | None = None
132
+ self._root_agent_step_id: str | None = None
133
+ self._root_query: str | None = None
134
+ self._root_query_attached: bool = False
136
135
 
137
136
  # Timing
138
137
  self._started_at: float | None = None
@@ -140,11 +139,12 @@ class RichStreamRenderer:
140
139
  # Header/text
141
140
  self.header_text: str = ""
142
141
  # Track per-step server start times for accurate elapsed labels
143
- self._step_server_start_times: dict[str, float] = {}
144
-
145
142
  # Output formatting constants
146
143
  self.OUTPUT_PREFIX: str = "**Output:**\n"
147
144
 
145
+ self._final_transcript_snapshot: TranscriptSnapshot | None = None
146
+ self._final_transcript_renderables: tuple[list[Any], list[Any]] | None = None
147
+
148
148
  def on_start(self, meta: dict[str, Any]) -> None:
149
149
  """Handle renderer start event."""
150
150
  if self.cfg.live:
@@ -158,6 +158,20 @@ class RichStreamRenderer:
158
158
  except Exception:
159
159
  self.state.meta = dict(meta)
160
160
 
161
+ meta_payload = meta or {}
162
+ self.steps.set_root_agent(meta_payload.get("agent_id"))
163
+ self._root_agent_friendly = self._humanize_agent_slug(meta_payload.get("agent_name"))
164
+ self._root_query = truncate_display(
165
+ meta_payload.get("input_message")
166
+ or meta_payload.get("query")
167
+ or meta_payload.get("message")
168
+ or (meta_payload.get("meta") or {}).get("input_message")
169
+ or ""
170
+ )
171
+ if not self._root_query:
172
+ self._root_query = None
173
+ self._root_query_attached = False
174
+
161
175
  # Print compact header and user request (parity with old renderer)
162
176
  self._render_header(meta)
163
177
  self._render_user_query(meta)
@@ -207,24 +221,66 @@ class RichStreamRenderer:
207
221
  except Exception:
208
222
  logger.exception("Failed to print header fallback")
209
223
 
224
+ def _build_user_query_panel(self, query: str) -> AIPPanel:
225
+ """Create the panel used to display the user request."""
226
+ return AIPPanel(
227
+ Markdown(f"**Query:** {query}"),
228
+ title="User Request",
229
+ border_style="#d97706",
230
+ padding=(0, 1),
231
+ )
232
+
210
233
  def _render_user_query(self, meta: dict[str, Any]) -> None:
211
234
  """Render the user query panel."""
212
- query = meta.get("input_message") or meta.get("query") or meta.get("message")
235
+ query = extract_query_from_meta(meta)
213
236
  if not query:
214
237
  return
238
+ self.console.print(self._build_user_query_panel(query))
239
+
240
+ def _render_summary_static_sections(self) -> None:
241
+ """Re-render header and user query when returning to summary mode."""
242
+ meta = getattr(self.state, "meta", None)
243
+ if meta:
244
+ self._render_header(meta)
245
+ elif self.header_text and not self._render_header_rule():
246
+ self._render_header_fallback()
215
247
 
216
- self.console.print(
217
- AIPPanel(
218
- Markdown(f"**Query:** {query}"),
219
- title="User Request",
220
- border_style="#d97706",
221
- padding=(0, 1),
222
- )
223
- )
248
+ query = extract_query_from_meta(meta) or self._root_query
249
+ if query:
250
+ self.console.print(self._build_user_query_panel(query))
251
+
252
+ def _render_summary_after_transcript_toggle(self) -> None:
253
+ """Render the summary panel after leaving transcript mode."""
254
+ if self.state.finalizing_ui:
255
+ self._render_final_summary_panels()
256
+ elif self.live:
257
+ self._refresh_live_panels()
258
+ else:
259
+ self._render_static_summary_panels()
260
+
261
+ def _render_final_summary_panels(self) -> None:
262
+ """Render a static summary and disable live mode for final output."""
263
+ self.cfg.live = False
264
+ self.live = None
265
+ self._render_static_summary_panels()
266
+
267
+ def _render_static_summary_panels(self) -> None:
268
+ """Render the steps and main panels in a static (non-live) layout."""
269
+ summary_window = self._summary_window_size()
270
+ window_arg = summary_window if summary_window > 0 else None
271
+ status_overrides = self._build_step_status_overrides()
272
+ for renderable in render_summary_panels(
273
+ self.state,
274
+ self.steps,
275
+ summary_window=window_arg,
276
+ include_query_panel=False,
277
+ step_status_overrides=status_overrides,
278
+ ):
279
+ self.console.print(renderable)
224
280
 
225
281
  def _ensure_streaming_started_baseline(self, timestamp: float) -> None:
226
282
  """Synchronize streaming start state across renderer components."""
227
- self.state.streaming_started_at = timestamp
283
+ self.state.start_stream_timer(timestamp)
228
284
  self.stream_processor.streaming_started_at = timestamp
229
285
  self._started_at = timestamp
230
286
 
@@ -237,14 +293,16 @@ class RichStreamRenderer:
237
293
  self._sync_stream_start(ev, received_at)
238
294
 
239
295
  metadata = self.stream_processor.extract_event_metadata(ev)
240
- self.stream_processor.update_timing(metadata["context_id"])
241
296
 
242
297
  self._maybe_render_debug(ev, received_at)
243
- self._dispatch_event(ev, metadata)
298
+ try:
299
+ self._dispatch_event(ev, metadata)
300
+ finally:
301
+ self.stream_processor.update_timing(metadata.get("context_id"))
244
302
 
245
303
  def _resolve_received_timestamp(self, ev: dict[str, Any]) -> datetime:
246
304
  """Return the timestamp an event was received, normalising inputs."""
247
- received_at = _coerce_received_at(ev.get("received_at"))
305
+ received_at = coerce_received_at(ev.get("received_at"))
248
306
  if received_at is None:
249
307
  received_at = datetime.now(timezone.utc)
250
308
 
@@ -253,9 +311,7 @@ class RichStreamRenderer:
253
311
 
254
312
  return received_at
255
313
 
256
- def _sync_stream_start(
257
- self, ev: dict[str, Any], received_at: datetime | None
258
- ) -> None:
314
+ def _sync_stream_start(self, ev: dict[str, Any], received_at: datetime | None) -> None:
259
315
  """Ensure renderer and stream processor share a streaming baseline."""
260
316
  baseline = self.state.streaming_started_at
261
317
  if baseline is None:
@@ -275,12 +331,14 @@ class RichStreamRenderer:
275
331
  if not self.verbose:
276
332
  return
277
333
 
334
+ self._ensure_transcript_header()
278
335
  render_debug_event(
279
336
  ev,
280
337
  self.console,
281
338
  received_ts=received_at,
282
339
  baseline_ts=self.state.streaming_started_event_ts,
283
340
  )
341
+ self._print_transcript_hint()
284
342
 
285
343
  def _dispatch_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
286
344
  """Route events to the appropriate renderer handlers."""
@@ -294,7 +352,7 @@ class RichStreamRenderer:
294
352
  elif kind == "final_response":
295
353
  self._handle_final_response_event(content, metadata)
296
354
  elif kind in {"agent_step", "agent_thinking_step"}:
297
- self._handle_agent_step_event(ev)
355
+ self._handle_agent_step_event(ev, metadata)
298
356
  else:
299
357
  self._ensure_live()
300
358
 
@@ -307,61 +365,145 @@ class RichStreamRenderer:
307
365
  def _handle_content_event(self, content: str) -> None:
308
366
  """Handle content streaming events."""
309
367
  if content:
310
- self.state.buffer.append(content)
368
+ self.state.append_transcript_text(content)
311
369
  self._ensure_live()
312
370
 
313
- def _handle_final_response_event(
314
- self, content: str, metadata: dict[str, Any]
315
- ) -> None:
371
+ def _handle_final_response_event(self, content: str, metadata: dict[str, Any]) -> None:
316
372
  """Handle final response events."""
317
373
  if content:
318
- self.state.buffer.append(content)
319
- self.state.final_text = content
374
+ self.state.append_transcript_text(content)
375
+ self.state.set_final_output(content)
320
376
 
321
377
  meta_payload = metadata.get("metadata") or {}
322
- self._update_final_duration(meta_payload.get("time"))
378
+ final_time = coerce_server_time(meta_payload.get("time"))
379
+ self._update_final_duration(final_time)
380
+ self.thinking_controller.close_active_scopes(final_time)
381
+ self._finish_running_steps()
382
+ self.tool_controller.finish_all_panels()
383
+ self._normalise_finished_icons()
323
384
 
324
- self._ensure_live()
325
- self._print_final_panel_if_needed()
385
+ self._ensure_live()
386
+ self._print_final_panel_if_needed()
387
+
388
+ def _normalise_finished_icons(self) -> None:
389
+ """Ensure finished steps release any running spinners."""
390
+ for step in self.steps.by_id.values():
391
+ if getattr(step, "status", None) != "running":
392
+ self._step_spinners.pop(step.step_id, None)
326
393
 
327
- def _handle_agent_step_event(self, ev: dict[str, Any]) -> None:
394
+ def _handle_agent_step_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
328
395
  """Handle agent step events."""
329
- # Extract tool information
330
- (
396
+ # Extract tool information using stream processor
397
+ tool_calls_result = self.stream_processor.parse_tool_calls(ev)
398
+ tool_name, tool_args, tool_out, tool_calls_info = tool_calls_result
399
+
400
+ payload = metadata.get("metadata") or {}
401
+
402
+ tracked_step: Step | None = None
403
+ try:
404
+ tracked_step = self.steps.apply_event(ev)
405
+ except ValueError:
406
+ logger.debug("Malformed step event skipped", exc_info=True)
407
+ else:
408
+ self._record_step_server_start(tracked_step, payload)
409
+ self.thinking_controller.update_timeline(
410
+ tracked_step,
411
+ payload,
412
+ enabled=self.cfg.render_thinking,
413
+ )
414
+ self._maybe_override_root_agent_label(tracked_step, payload)
415
+ self._maybe_attach_root_query(tracked_step)
416
+
417
+ # Track tools and sub-agents for transcript/debug context
418
+ self.stream_processor.track_tools_and_agents(tool_name, tool_calls_info, is_delegation_tool)
419
+
420
+ # Handle tool execution
421
+ self.tool_controller.handle_agent_step(
422
+ ev,
331
423
  tool_name,
332
424
  tool_args,
333
425
  tool_out,
334
426
  tool_calls_info,
335
- ) = self.stream_processor.parse_tool_calls(ev)
336
-
337
- # Track tools and sub-agents
338
- self.stream_processor.track_tools_and_agents(
339
- tool_name, tool_calls_info, is_delegation_tool
427
+ tracked_step=tracked_step,
340
428
  )
341
429
 
342
- # Handle tool execution
343
- self._handle_agent_step(ev, tool_name, tool_args, tool_out, tool_calls_info)
344
-
345
430
  # Update live display
346
431
  self._ensure_live()
347
432
 
433
+ def _maybe_attach_root_query(self, step: Step | None) -> None:
434
+ """Attach the user query to the root agent step for display."""
435
+ if not step or self._root_query_attached or not self._root_query or step.kind != "agent" or step.parent_id:
436
+ return
437
+
438
+ args = dict(getattr(step, "args", {}) or {})
439
+ args.setdefault("query", self._root_query)
440
+ step.args = args
441
+ self._root_query_attached = True
442
+
443
+ def _record_step_server_start(self, step: Step | None, payload: dict[str, Any]) -> None:
444
+ """Store server-provided start times for elapsed calculations."""
445
+ if not step:
446
+ return
447
+ server_time = payload.get("time")
448
+ if not isinstance(server_time, (int, float)):
449
+ return
450
+ self._step_server_start_times.setdefault(step.step_id, float(server_time))
451
+
452
+ def _maybe_override_root_agent_label(self, step: Step | None, payload: dict[str, Any]) -> None:
453
+ """Ensure the root agent row uses the human-friendly name and shows the ID."""
454
+ if not step or step.kind != "agent" or step.parent_id:
455
+ return
456
+ friendly = self._root_agent_friendly or self._humanize_agent_slug((payload or {}).get("agent_name"))
457
+ if not friendly:
458
+ return
459
+ agent_identifier = step.name or step.step_id
460
+ if not agent_identifier:
461
+ return
462
+ step.display_label = normalise_display_label(f"{ICON_AGENT} {friendly} ({agent_identifier})")
463
+ if not self._root_agent_step_id:
464
+ self._root_agent_step_id = step.step_id
465
+
466
+ # Thinking scope management is handled by ThinkingScopeController.
467
+
468
+ def _apply_root_duration(self, duration_seconds: float | None) -> None:
469
+ """Propagate the final run duration to the root agent step."""
470
+ if duration_seconds is None or not self._root_agent_step_id:
471
+ return
472
+ root_step = self.steps.by_id.get(self._root_agent_step_id)
473
+ if not root_step:
474
+ return
475
+ try:
476
+ duration_ms = max(0, int(round(float(duration_seconds) * 1000)))
477
+ except Exception:
478
+ return
479
+ root_step.duration_ms = duration_ms
480
+ root_step.duration_source = root_step.duration_source or "run"
481
+ root_step.status = "finished"
482
+
483
+ @staticmethod
484
+ def _humanize_agent_slug(value: Any) -> str | None:
485
+ """Convert a slugified agent name into Title Case."""
486
+ if not isinstance(value, str):
487
+ return None
488
+ cleaned = value.replace("_", " ").replace("-", " ").strip()
489
+ if not cleaned:
490
+ return None
491
+ parts = [part for part in cleaned.split() if part]
492
+ return " ".join(part[:1].upper() + part[1:] for part in parts)
493
+
348
494
  def _finish_running_steps(self) -> None:
349
495
  """Mark any running steps as finished to avoid lingering spinners."""
350
496
  for st in self.steps.by_id.values():
351
497
  if not is_step_finished(st):
352
- st.finish(None)
498
+ self._mark_incomplete_step(st)
353
499
 
354
- def _finish_tool_panels(self) -> None:
355
- """Mark unfinished tool panels as finished."""
356
- try:
357
- items = list(self.tool_panels.items())
358
- except Exception: # pragma: no cover - defensive guard
359
- logger.exception("Failed to iterate tool panels during cleanup")
360
- return
361
-
362
- for _sid, meta in items:
363
- if meta.get("status") != "finished":
364
- meta["status"] = "finished"
500
+ def _mark_incomplete_step(self, step: Step) -> None:
501
+ """Mark a lingering step as incomplete/warning with unknown duration."""
502
+ step.status = "finished"
503
+ step.duration_unknown = True
504
+ if step.duration_ms is None:
505
+ step.duration_ms = 0
506
+ step.duration_source = step.duration_source or "unknown"
365
507
 
366
508
  def _stop_live_display(self) -> None:
367
509
  """Stop live display and clean up."""
@@ -372,53 +514,121 @@ class RichStreamRenderer:
372
514
  if self.state.printed_final_output:
373
515
  return
374
516
 
375
- body = (self.state.final_text or "".join(self.state.buffer) or "").strip()
517
+ body = (self.state.final_text or self.state.buffer.render() or "").strip()
376
518
  if not body:
377
519
  return
378
520
 
521
+ if getattr(self, "_transcript_mode_enabled", False):
522
+ return
523
+
379
524
  if self.verbose:
380
- final_panel = create_final_panel(
381
- body,
525
+ panel = build_final_panel(
526
+ self.state,
382
527
  title=self._final_panel_title(),
383
- theme=self.cfg.theme,
384
528
  )
385
- self.console.print(final_panel)
529
+ if panel is None:
530
+ return
531
+ self.console.print(panel)
386
532
  self.state.printed_final_output = True
387
533
 
534
+ def finalize(self) -> tuple[list[Any], list[Any]]:
535
+ """Compose the final transcript renderables."""
536
+ return self._compose_final_transcript()
537
+
538
+ def _compose_final_transcript(self) -> tuple[list[Any], list[Any]]:
539
+ """Build the transcript snapshot used for final summaries."""
540
+ summary_window = self._summary_window_size()
541
+ summary_window = summary_window if summary_window > 0 else None
542
+ snapshot = build_transcript_snapshot(
543
+ self.state,
544
+ self.steps,
545
+ query_text=extract_query_from_meta(self.state.meta),
546
+ meta=self.state.meta,
547
+ summary_window=summary_window,
548
+ step_status_overrides=self._build_step_status_overrides(),
549
+ )
550
+ header, body = build_transcript_view(snapshot)
551
+ self._final_transcript_snapshot = snapshot
552
+ self._final_transcript_renderables = (header, body)
553
+ return header, body
554
+
555
+ def _render_final_summary(self, header: list[Any], body: list[Any]) -> None:
556
+ """Print the composed transcript summary for non-live renders."""
557
+ renderables = list(header) + list(body)
558
+ for renderable in renderables:
559
+ try:
560
+ self.console.print(renderable)
561
+ self.console.print()
562
+ except Exception:
563
+ pass
564
+
388
565
  def on_complete(self, stats: RunStats) -> None:
389
566
  """Handle completion event."""
390
567
  self.state.finalizing_ui = True
391
568
 
392
- if isinstance(stats, RunStats):
569
+ self._handle_stats_duration(stats)
570
+ self.thinking_controller.close_active_scopes(self.state.final_duration_seconds)
571
+ self._cleanup_ui_elements()
572
+ self._finalize_display()
573
+ self._print_completion_message()
574
+
575
+ def _handle_stats_duration(self, stats: RunStats) -> None:
576
+ """Handle stats processing and duration calculation."""
577
+ if not isinstance(stats, RunStats):
578
+ return
579
+
580
+ duration = None
581
+ try:
582
+ if stats.finished_at is not None and stats.started_at is not None:
583
+ duration = max(0.0, float(stats.finished_at) - float(stats.started_at))
584
+ except Exception:
393
585
  duration = None
394
- try:
395
- if stats.finished_at is not None and stats.started_at is not None:
396
- duration = max(
397
- 0.0, float(stats.finished_at) - float(stats.started_at)
398
- )
399
- except Exception:
400
- duration = None
401
586
 
402
- if duration is not None:
403
- self._update_final_duration(duration, overwrite=True)
587
+ if duration is not None:
588
+ self._update_final_duration(duration, overwrite=True)
404
589
 
590
+ def _cleanup_ui_elements(self) -> None:
591
+ """Clean up running UI elements."""
405
592
  # Mark any running steps as finished to avoid lingering spinners
406
593
  self._finish_running_steps()
407
594
 
408
595
  # Mark unfinished tool panels as finished
409
- self._finish_tool_panels()
596
+ self.tool_controller.finish_all_panels()
410
597
 
598
+ def _finalize_display(self) -> None:
599
+ """Finalize live display and render final output."""
411
600
  # Final refresh
412
601
  self._ensure_live()
413
602
 
603
+ header, body = self.finalize()
604
+
414
605
  # Stop live display
415
606
  self._stop_live_display()
416
607
 
417
608
  # Render final output based on configuration
418
- self._print_final_panel_if_needed()
609
+ if self.cfg.live:
610
+ self._print_final_panel_if_needed()
611
+ else:
612
+ self._render_final_summary(header, body)
613
+
614
+ def _print_completion_message(self) -> None:
615
+ """Print completion message based on current mode."""
616
+ if self._transcript_mode_enabled:
617
+ try:
618
+ self.console.print(
619
+ "[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events. "
620
+ "Use the post-run viewer for export.[/dim]"
621
+ )
622
+ except Exception:
623
+ pass
624
+ else:
625
+ # No transcript toggle in summary mode; nothing to print here.
626
+ return
419
627
 
420
628
  def _ensure_live(self) -> None:
421
629
  """Ensure live display is updated."""
630
+ if getattr(self, "_transcript_mode_enabled", False):
631
+ return
422
632
  if not self._ensure_live_stack():
423
633
  return
424
634
 
@@ -426,6 +636,12 @@ class RichStreamRenderer:
426
636
 
427
637
  if self.live:
428
638
  self._refresh_live_panels()
639
+ if (
640
+ not self._transcript_mode_enabled
641
+ and not self.state.finalizing_ui
642
+ and not self._summary_hint_printed_once
643
+ ):
644
+ self._print_summary_hint(force=True)
429
645
 
430
646
  def _ensure_live_stack(self) -> bool:
431
647
  """Guarantee the console exposes the internal live stack Rich expects."""
@@ -465,15 +681,19 @@ class RichStreamRenderer:
465
681
  if not self.live:
466
682
  return
467
683
 
468
- main_panel = self._render_main_panel()
469
- steps_renderable = self._render_steps_text()
684
+ steps_body = self._render_steps_text()
685
+ template_panel = getattr(self, "_last_steps_panel_template", None)
686
+ if template_panel is None:
687
+ template_panel = self._resolve_steps_panel()
470
688
  steps_panel = AIPPanel(
471
- steps_renderable,
472
- title="Steps",
473
- border_style="blue",
689
+ steps_body,
690
+ title=getattr(template_panel, "title", "Steps"),
691
+ border_style=getattr(template_panel, "border_style", "blue"),
692
+ padding=getattr(template_panel, "padding", (0, 1)),
474
693
  )
475
- tool_panels = self._render_tool_panels()
476
- panels = self._build_live_panels(main_panel, steps_panel, tool_panels)
694
+
695
+ main_panel = self._render_main_panel()
696
+ panels = self._build_live_panels(main_panel, steps_panel)
477
697
 
478
698
  self.live.update(Group(*panels))
479
699
 
@@ -481,40 +701,28 @@ class RichStreamRenderer:
481
701
  self,
482
702
  main_panel: Any,
483
703
  steps_panel: Any,
484
- tool_panels: list[Any],
485
704
  ) -> list[Any]:
486
705
  """Assemble the panel order for the live display."""
487
706
  if self.verbose:
488
- return [main_panel, steps_panel, *tool_panels]
707
+ return [main_panel, steps_panel]
489
708
 
490
- panels: list[Any] = [steps_panel]
491
- if tool_panels:
492
- panels.extend(tool_panels)
493
- panels.append(main_panel)
494
- return panels
709
+ return [steps_panel, main_panel]
495
710
 
496
711
  def _render_main_panel(self) -> Any:
497
712
  """Render the main content panel."""
498
- body = "".join(self.state.buffer).strip()
713
+ body = self.state.buffer.render().strip()
714
+ theme = DEFAULT_TRANSCRIPT_THEME
499
715
  if not self.verbose:
500
- final_content = (self.state.final_text or "").strip()
501
- if final_content:
502
- title = self._final_panel_title()
503
- return create_final_panel(
504
- final_content,
505
- title=title,
506
- theme=self.cfg.theme,
507
- )
716
+ panel = build_final_panel(self.state, theme=theme)
717
+ if panel is not None:
718
+ return panel
508
719
  # Dynamic title with spinner + elapsed/hints
509
720
  title = self._format_enhanced_main_title()
510
- return create_main_panel(body, title, self.cfg.theme)
721
+ return create_main_panel(body, title, theme)
511
722
 
512
723
  def _final_panel_title(self) -> str:
513
724
  """Compose title for the final result panel including duration."""
514
- title = "Final Result"
515
- if self.state.final_duration_text:
516
- title = f"{title} · {self.state.final_duration_text}"
517
- return title
725
+ return format_final_panel_title(self.state)
518
726
 
519
727
  def apply_verbosity(self, verbose: bool) -> None:
520
728
  """Update verbose behaviour at runtime."""
@@ -522,8 +730,6 @@ class RichStreamRenderer:
522
730
  return
523
731
 
524
732
  self.verbose = verbose
525
- self.cfg.style = "debug" if verbose else "pretty"
526
-
527
733
  desired_live = not verbose
528
734
  if desired_live != self.cfg.live:
529
735
  self.cfg.live = desired_live
@@ -535,459 +741,16 @@ class RichStreamRenderer:
535
741
  if self.cfg.live:
536
742
  self._ensure_live()
537
743
 
538
- # ------------------------------------------------------------------
539
- # Transcript helpers
540
- # ------------------------------------------------------------------
541
- def _capture_event(
542
- self, ev: dict[str, Any], received_at: datetime | None = None
543
- ) -> None:
544
- """Capture a deep copy of SSE events for transcript replay."""
545
- try:
546
- captured = json.loads(json.dumps(ev))
547
- except Exception:
548
- captured = ev
549
-
550
- if received_at is not None:
551
- try:
552
- captured["received_at"] = received_at.isoformat()
553
- except Exception:
554
- try:
555
- captured["received_at"] = str(received_at)
556
- except Exception:
557
- captured["received_at"] = repr(received_at)
558
-
559
- self.state.events.append(captured)
744
+ # Transcript helper implementations live in TranscriptModeMixin.
560
745
 
561
746
  def get_aggregated_output(self) -> str:
562
747
  """Return the concatenated assistant output collected so far."""
563
- return ("".join(self.state.buffer or [])).strip()
748
+ return self.state.buffer.render().strip()
564
749
 
565
750
  def get_transcript_events(self) -> list[dict[str, Any]]:
566
751
  """Return captured SSE events."""
567
752
  return list(self.state.events)
568
753
 
569
- def _maybe_insert_thinking_gap(
570
- self, task_id: str | None, context_id: str | None
571
- ) -> None:
572
- """Insert thinking gap if needed."""
573
- # Implementation would track thinking states
574
- pass
575
-
576
- def _ensure_tool_panel(
577
- self, name: str, args: Any, task_id: str, context_id: str
578
- ) -> str:
579
- """Ensure a tool panel exists and return its ID."""
580
- formatted_title = format_tool_title(name)
581
- is_delegation = is_delegation_tool(name)
582
- tool_sid = f"tool_{name}_{task_id}_{context_id}"
583
-
584
- if tool_sid not in self.tool_panels:
585
- self.tool_panels[tool_sid] = {
586
- "title": formatted_title,
587
- "status": "running",
588
- "started_at": monotonic(),
589
- "server_started_at": self.stream_processor.server_elapsed_time,
590
- "chunks": [],
591
- "args": args or {},
592
- "output": None,
593
- "is_delegation": is_delegation,
594
- }
595
- # Add Args section once
596
- if args:
597
- try:
598
- args_content = (
599
- "**Args:**\n```json\n"
600
- + json.dumps(args, indent=2)
601
- + "\n```\n\n"
602
- )
603
- except Exception:
604
- args_content = f"**Args:**\n{args}\n\n"
605
- self.tool_panels[tool_sid]["chunks"].append(args_content)
606
- self.tool_order.append(tool_sid)
607
-
608
- return tool_sid
609
-
610
- def _start_tool_step(
611
- self,
612
- task_id: str,
613
- context_id: str,
614
- tool_name: str,
615
- tool_args: Any,
616
- _tool_sid: str,
617
- ) -> Step | None:
618
- """Start or get a step for a tool."""
619
- if is_delegation_tool(tool_name):
620
- st = self.steps.start_or_get(
621
- task_id=task_id,
622
- context_id=context_id,
623
- kind="delegate",
624
- name=tool_name,
625
- args=tool_args,
626
- )
627
- else:
628
- st = self.steps.start_or_get(
629
- task_id=task_id,
630
- context_id=context_id,
631
- kind="tool",
632
- name=tool_name,
633
- args=tool_args,
634
- )
635
-
636
- # Record server start time for this step if available
637
- if st and self.stream_processor.server_elapsed_time is not None:
638
- self._step_server_start_times[st.step_id] = (
639
- self.stream_processor.server_elapsed_time
640
- )
641
-
642
- return st
643
-
644
- def _process_additional_tool_calls(
645
- self,
646
- tool_calls_info: list[tuple[str, Any, Any]],
647
- tool_name: str,
648
- task_id: str,
649
- context_id: str,
650
- ) -> None:
651
- """Process additional tool calls to avoid duplicates."""
652
- for call_name, call_args, _ in tool_calls_info or []:
653
- if call_name and call_name != tool_name:
654
- self._process_single_tool_call(
655
- call_name, call_args, task_id, context_id
656
- )
657
-
658
- def _process_single_tool_call(
659
- self, call_name: str, call_args: Any, task_id: str, context_id: str
660
- ) -> None:
661
- """Process a single additional tool call."""
662
- self._ensure_tool_panel(call_name, call_args, task_id, context_id)
663
-
664
- st2 = self._create_step_for_tool_call(call_name, call_args, task_id, context_id)
665
-
666
- if self.stream_processor.server_elapsed_time is not None and st2:
667
- self._step_server_start_times[st2.step_id] = (
668
- self.stream_processor.server_elapsed_time
669
- )
670
-
671
- def _create_step_for_tool_call(
672
- self, call_name: str, call_args: Any, task_id: str, context_id: str
673
- ) -> Any:
674
- """Create appropriate step for tool call."""
675
- if is_delegation_tool(call_name):
676
- return self.steps.start_or_get(
677
- task_id=task_id,
678
- context_id=context_id,
679
- kind="delegate",
680
- name=call_name,
681
- args=call_args,
682
- )
683
- else:
684
- return self.steps.start_or_get(
685
- task_id=task_id,
686
- context_id=context_id,
687
- kind="tool",
688
- name=call_name,
689
- args=call_args,
690
- )
691
-
692
- def _detect_tool_completion(
693
- self, metadata: dict, content: str
694
- ) -> tuple[bool, str | None, Any]:
695
- """Detect if a tool has completed and return completion info."""
696
- tool_info = metadata.get("tool_info", {}) if isinstance(metadata, dict) else {}
697
-
698
- if tool_info.get("status") == "finished" and tool_info.get("name"):
699
- return True, tool_info.get("name"), tool_info.get("output")
700
- elif content and isinstance(content, str) and content.startswith("Completed "):
701
- # content like "Completed google_serper"
702
- tname = content.replace("Completed ", "").strip()
703
- if tname:
704
- output = (
705
- tool_info.get("output") if tool_info.get("name") == tname else None
706
- )
707
- return True, tname, output
708
- elif metadata.get("status") == "finished" and tool_info.get("name"):
709
- return True, tool_info.get("name"), tool_info.get("output")
710
-
711
- return False, None, None
712
-
713
- def _get_tool_session_id(
714
- self, finished_tool_name: str, task_id: str, context_id: str
715
- ) -> str:
716
- """Generate tool session ID."""
717
- return f"tool_{finished_tool_name}_{task_id}_{context_id}"
718
-
719
- def _calculate_tool_duration(self, meta: dict[str, Any]) -> float | None:
720
- """Calculate tool duration from metadata."""
721
- server_now = self.stream_processor.server_elapsed_time
722
- server_start = meta.get("server_started_at")
723
- dur = None
724
-
725
- try:
726
- if isinstance(server_now, (int, float)) and server_start is not None:
727
- dur = max(0.0, float(server_now) - float(server_start))
728
- else:
729
- started_at = meta.get("started_at")
730
- if started_at is not None:
731
- started_at_float = float(started_at)
732
- dur = max(0.0, float(monotonic()) - started_at_float)
733
- except (TypeError, ValueError):
734
- logger.exception("Failed to calculate tool duration")
735
- return None
736
-
737
- return dur
738
-
739
- def _update_tool_metadata(self, meta: dict[str, Any], dur: float | None) -> None:
740
- """Update tool metadata with duration information."""
741
- if dur is not None:
742
- meta["duration_seconds"] = dur
743
- meta["server_finished_at"] = (
744
- self.stream_processor.server_elapsed_time
745
- if isinstance(self.stream_processor.server_elapsed_time, int | float)
746
- else None
747
- )
748
- meta["finished_at"] = monotonic()
749
-
750
- def _add_tool_output_to_panel(
751
- self, meta: dict[str, Any], finished_tool_output: Any, finished_tool_name: str
752
- ) -> None:
753
- """Add tool output to panel metadata."""
754
- if finished_tool_output is not None:
755
- meta["chunks"].append(
756
- self._format_output_block(finished_tool_output, finished_tool_name)
757
- )
758
- meta["output"] = finished_tool_output
759
-
760
- def _mark_panel_as_finished(self, meta: dict[str, Any], tool_sid: str) -> None:
761
- """Mark panel as finished and ensure visibility."""
762
- if meta.get("status") != "finished":
763
- meta["status"] = "finished"
764
-
765
- dur = self._calculate_tool_duration(meta)
766
- self._update_tool_metadata(meta, dur)
767
-
768
- # Ensure this finished panel is visible in this frame
769
- self.stream_processor.current_event_finished_panels.add(tool_sid)
770
-
771
- def _finish_tool_panel(
772
- self,
773
- finished_tool_name: str,
774
- finished_tool_output: Any,
775
- task_id: str,
776
- context_id: str,
777
- ) -> None:
778
- """Finish a tool panel and update its status."""
779
- tool_sid = self._get_tool_session_id(finished_tool_name, task_id, context_id)
780
- if tool_sid not in self.tool_panels:
781
- return
782
-
783
- meta = self.tool_panels[tool_sid]
784
- self._mark_panel_as_finished(meta, tool_sid)
785
- self._add_tool_output_to_panel(meta, finished_tool_output, finished_tool_name)
786
-
787
- def _get_step_duration(
788
- self, finished_tool_name: str, task_id: str, context_id: str
789
- ) -> float | None:
790
- """Get step duration from tool panels."""
791
- tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
792
- return self.tool_panels.get(tool_sid, {}).get("duration_seconds")
793
-
794
- def _finish_delegation_step(
795
- self,
796
- finished_tool_name: str,
797
- finished_tool_output: Any,
798
- task_id: str,
799
- context_id: str,
800
- step_duration: float | None,
801
- ) -> None:
802
- """Finish a delegation step."""
803
- self.steps.finish(
804
- task_id=task_id,
805
- context_id=context_id,
806
- kind="delegate",
807
- name=finished_tool_name,
808
- output=finished_tool_output,
809
- duration_raw=step_duration,
810
- )
811
-
812
- def _finish_tool_step_type(
813
- self,
814
- finished_tool_name: str,
815
- finished_tool_output: Any,
816
- task_id: str,
817
- context_id: str,
818
- step_duration: float | None,
819
- ) -> None:
820
- """Finish a regular tool step."""
821
- self.steps.finish(
822
- task_id=task_id,
823
- context_id=context_id,
824
- kind="tool",
825
- name=finished_tool_name,
826
- output=finished_tool_output,
827
- duration_raw=step_duration,
828
- )
829
-
830
- def _finish_tool_step(
831
- self,
832
- finished_tool_name: str,
833
- finished_tool_output: Any,
834
- task_id: str,
835
- context_id: str,
836
- ) -> None:
837
- """Finish the corresponding step for a completed tool."""
838
- step_duration = self._get_step_duration(finished_tool_name, task_id, context_id)
839
-
840
- if is_delegation_tool(finished_tool_name):
841
- self._finish_delegation_step(
842
- finished_tool_name,
843
- finished_tool_output,
844
- task_id,
845
- context_id,
846
- step_duration,
847
- )
848
- else:
849
- self._finish_tool_step_type(
850
- finished_tool_name,
851
- finished_tool_output,
852
- task_id,
853
- context_id,
854
- step_duration,
855
- )
856
-
857
- def _should_create_snapshot(self, tool_sid: str) -> bool:
858
- """Check if a snapshot should be created."""
859
- return self.cfg.append_finished_snapshots and not self.tool_panels.get(
860
- tool_sid, {}
861
- ).get("snapshot_printed")
862
-
863
- def _get_snapshot_title(self, meta: dict[str, Any], finished_tool_name: str) -> str:
864
- """Get the title for the snapshot."""
865
- adjusted_title = meta.get("title") or finished_tool_name
866
-
867
- # Add elapsed time to title
868
- dur = meta.get("duration_seconds")
869
- if isinstance(dur, int | float):
870
- elapsed_str = self._format_snapshot_duration(dur)
871
- adjusted_title = f"{adjusted_title} · {elapsed_str}"
872
-
873
- return adjusted_title
874
-
875
- def _format_snapshot_duration(self, dur: int | float) -> str:
876
- """Format duration for snapshot title."""
877
- try:
878
- # Handle invalid types
879
- if not isinstance(dur, (int, float)):
880
- return "<1ms"
881
-
882
- if dur >= 1:
883
- return f"{dur:.2f}s"
884
- elif int(dur * 1000) > 0:
885
- return f"{int(dur * 1000)}ms"
886
- else:
887
- return "<1ms"
888
- except (TypeError, ValueError, OverflowError):
889
- return "<1ms"
890
-
891
- def _clamp_snapshot_body(self, body_text: str) -> str:
892
- """Clamp snapshot body to configured limits."""
893
- max_lines = int(self.cfg.snapshot_max_lines or 0)
894
- lines = body_text.splitlines()
895
- if max_lines > 0 and len(lines) > max_lines:
896
- lines = lines[:max_lines] + ["… (truncated)"]
897
- body_text = "\n".join(lines)
898
-
899
- max_chars = int(self.cfg.snapshot_max_chars or 0)
900
- if max_chars > 0 and len(body_text) > max_chars:
901
- suffix = "\n… (truncated)"
902
- body_text = body_text[: max_chars - len(suffix)] + suffix
903
-
904
- return body_text
905
-
906
- def _create_snapshot_panel(
907
- self, adjusted_title: str, body_text: str, finished_tool_name: str
908
- ) -> Any:
909
- """Create the snapshot panel."""
910
- return create_tool_panel(
911
- title=adjusted_title,
912
- content=body_text or "(no output)",
913
- status="finished",
914
- theme=self.cfg.theme,
915
- is_delegation=is_delegation_tool(finished_tool_name),
916
- )
917
-
918
- def _print_and_mark_snapshot(self, tool_sid: str, snapshot_panel: Any) -> None:
919
- """Print snapshot and mark as printed."""
920
- self.console.print(snapshot_panel)
921
- self.tool_panels[tool_sid]["snapshot_printed"] = True
922
-
923
- def _create_tool_snapshot(
924
- self, finished_tool_name: str, task_id: str, context_id: str
925
- ) -> None:
926
- """Create and print a snapshot for a finished tool."""
927
- tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
928
-
929
- if not self._should_create_snapshot(tool_sid):
930
- return
931
-
932
- meta = self.tool_panels[tool_sid]
933
- adjusted_title = self._get_snapshot_title(meta, finished_tool_name)
934
-
935
- # Compose body from chunks and clamp
936
- body_text = "".join(meta.get("chunks") or [])
937
- body_text = self._clamp_snapshot_body(body_text)
938
-
939
- snapshot_panel = self._create_snapshot_panel(
940
- adjusted_title, body_text, finished_tool_name
941
- )
942
-
943
- self._print_and_mark_snapshot(tool_sid, snapshot_panel)
944
-
945
- def _handle_agent_step(
946
- self,
947
- event: dict[str, Any],
948
- tool_name: str | None,
949
- tool_args: Any,
950
- _tool_out: Any,
951
- tool_calls_info: list[tuple[str, Any, Any]],
952
- ) -> None:
953
- """Handle agent step event."""
954
- metadata = event.get("metadata", {})
955
- task_id = event.get("task_id")
956
- context_id = event.get("context_id")
957
- content = event.get("content", "")
958
-
959
- # Create steps and panels for the primary tool
960
- if tool_name:
961
- tool_sid = self._ensure_tool_panel(
962
- tool_name, tool_args, task_id, context_id
963
- )
964
- self._start_tool_step(task_id, context_id, tool_name, tool_args, tool_sid)
965
-
966
- # Handle additional tool calls
967
- self._process_additional_tool_calls(
968
- tool_calls_info, tool_name, task_id, context_id
969
- )
970
-
971
- # Check for tool completion
972
- (
973
- is_tool_finished,
974
- finished_tool_name,
975
- finished_tool_output,
976
- ) = self._detect_tool_completion(metadata, content)
977
-
978
- if is_tool_finished and finished_tool_name:
979
- self._finish_tool_panel(
980
- finished_tool_name, finished_tool_output, task_id, context_id
981
- )
982
- self._finish_tool_step(
983
- finished_tool_name, finished_tool_output, task_id, context_id
984
- )
985
- self._create_tool_snapshot(finished_tool_name, task_id, context_id)
986
-
987
- def _spinner(self) -> str:
988
- """Return spinner character."""
989
- return get_spinner()
990
-
991
754
  def _format_working_indicator(self, started_at: float | None) -> str:
992
755
  """Format working indicator."""
993
756
  return format_working_indicator(
@@ -1030,9 +793,7 @@ class RichStreamRenderer:
1030
793
 
1031
794
  def _get_analysis_progress_info(self) -> dict[str, Any]:
1032
795
  total_steps = len(self.steps.order)
1033
- completed_steps = sum(
1034
- 1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid])
1035
- )
796
+ completed_steps = sum(1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid]))
1036
797
  current_step = None
1037
798
  for sid in self.steps.order:
1038
799
  if not is_step_finished(self.steps.by_id[sid]):
@@ -1040,13 +801,11 @@ class RichStreamRenderer:
1040
801
  break
1041
802
  # Prefer server elapsed time when available
1042
803
  elapsed = 0.0
1043
- if isinstance(self.stream_processor.server_elapsed_time, int | float):
804
+ if isinstance(self.stream_processor.server_elapsed_time, (int, float)):
1044
805
  elapsed = float(self.stream_processor.server_elapsed_time)
1045
806
  elif self._started_at is not None:
1046
807
  elapsed = monotonic() - self._started_at
1047
- progress_percent = (
1048
- int((completed_steps / total_steps) * 100) if total_steps else 0
1049
- )
808
+ progress_percent = int((completed_steps / total_steps) * 100) if total_steps else 0
1050
809
  return {
1051
810
  "total_steps": total_steps,
1052
811
  "completed_steps": completed_steps,
@@ -1100,29 +859,42 @@ class RichStreamRenderer:
1100
859
  def _format_step_status(self, step: Step) -> str:
1101
860
  """Format step status with elapsed time or duration."""
1102
861
  if is_step_finished(step):
1103
- if step.duration_ms is None:
1104
- return LESS_THAN_1MS
1105
- elif step.duration_ms >= 1000:
1106
- return f"[{step.duration_ms / 1000:.2f}s]"
1107
- elif step.duration_ms > 0:
1108
- return f"[{step.duration_ms}ms]"
1109
- return LESS_THAN_1MS
862
+ return self._format_finished_badge(step)
1110
863
  else:
1111
864
  # Calculate elapsed time for running steps
1112
865
  elapsed = self._calculate_step_elapsed_time(step)
1113
- if elapsed >= 1:
866
+ if elapsed >= 0.1:
1114
867
  return f"[{elapsed:.2f}s]"
1115
- ms = int(elapsed * 1000)
1116
- return f"[{ms}ms]" if ms > 0 else LESS_THAN_1MS
868
+ ms = int(round(elapsed * 1000))
869
+ if ms <= 0:
870
+ return ""
871
+ return f"[{ms}ms]"
872
+
873
+ def _format_finished_badge(self, step: Step) -> str:
874
+ """Compose duration badge for finished steps including source tagging."""
875
+ if getattr(step, "duration_unknown", False) is True:
876
+ payload = "??s"
877
+ else:
878
+ duration_ms = step.duration_ms
879
+ if duration_ms is None:
880
+ payload = "<1ms"
881
+ elif duration_ms < 0:
882
+ payload = "<1ms"
883
+ elif duration_ms >= 100:
884
+ payload = f"{duration_ms / 1000:.2f}s"
885
+ elif duration_ms > 0:
886
+ payload = f"{duration_ms}ms"
887
+ else:
888
+ payload = "<1ms"
889
+
890
+ return f"[{payload}]"
1117
891
 
1118
892
  def _calculate_step_elapsed_time(self, step: Step) -> float:
1119
893
  """Calculate elapsed time for a running step."""
1120
894
  server_elapsed = self.stream_processor.server_elapsed_time
1121
895
  server_start = self._step_server_start_times.get(step.step_id)
1122
896
 
1123
- if isinstance(server_elapsed, int | float) and isinstance(
1124
- server_start, int | float
1125
- ):
897
+ if isinstance(server_elapsed, (int, float)) and isinstance(server_start, (int, float)):
1126
898
  return max(0.0, float(server_elapsed) - float(server_start))
1127
899
 
1128
900
  try:
@@ -1136,6 +908,10 @@ class RichStreamRenderer:
1136
908
  return step.name
1137
909
  return "thinking..." if step.kind == "agent" else f"{step.kind} step"
1138
910
 
911
+ def _resolve_step_label(self, step: Step) -> str:
912
+ """Return the display label for a step with sensible fallbacks."""
913
+ return format_step_label(step)
914
+
1139
915
  def _check_parallel_tools(self) -> dict[tuple[str | None, str | None], list]:
1140
916
  """Check for parallel running tools."""
1141
917
  running_by_ctx: dict[tuple[str | None, str | None], list] = {}
@@ -1155,72 +931,77 @@ class RichStreamRenderer:
1155
931
  key = (step.task_id, step.context_id)
1156
932
  return len(running_by_ctx.get(key, [])) > 1
1157
933
 
1158
- def _compose_step_renderable(
1159
- self,
1160
- step: Step,
1161
- running_by_ctx: dict[tuple[str | None, str | None], list],
1162
- ) -> Any:
1163
- """Compose a single renderable for the steps panel."""
1164
- finished = is_step_finished(step)
1165
- status_br = self._format_step_status(step)
1166
- display_name = self._get_step_display_name(step)
1167
-
1168
- if (
1169
- not finished
1170
- and step.kind == "tool"
1171
- and self._is_parallel_tool(step, running_by_ctx)
1172
- ):
1173
- status_br = status_br.replace("]", " 🔄]")
1174
-
1175
- icon = self._get_step_icon(step.kind)
1176
- text_line = Text(style="dim")
1177
- text_line.append(icon)
1178
- text_line.append(" ")
1179
- text_line.append(display_name)
1180
- if status_br:
1181
- text_line.append(" ")
1182
- text_line.append(status_br)
1183
- if finished:
1184
- text_line.append(" ✓")
1185
-
1186
- if finished:
1187
- return text_line
1188
-
1189
- spinner = Spinner("dots", text=text_line, style="dim")
1190
- return Align.left(spinner)
1191
-
1192
- def _render_steps_text(self) -> Any:
1193
- """Render the steps panel content."""
1194
- if not (self.steps.order or self.steps.children):
1195
- return Text("No steps yet", style="dim")
1196
-
1197
- running_by_ctx = self._check_parallel_tools()
1198
- renderables: list[Any] = []
934
+ def _build_step_status_overrides(self) -> dict[str, str]:
935
+ """Return status text overrides for steps (running duration badges)."""
936
+ overrides: dict[str, str] = {}
1199
937
  for sid in self.steps.order:
1200
- line = self._compose_step_renderable(self.steps.by_id[sid], running_by_ctx)
1201
- renderables.append(line)
1202
-
1203
- if not renderables:
1204
- return Text("No steps yet", style="dim")
1205
-
1206
- return Group(*renderables)
1207
-
1208
- def _should_skip_finished_panel(self, sid: str, status: str) -> bool:
1209
- """Check if a finished panel should be skipped."""
1210
- if status != "finished":
1211
- return False
938
+ step = self.steps.by_id.get(sid)
939
+ if not step:
940
+ continue
941
+ try:
942
+ status_text = self._format_step_status(step)
943
+ except Exception:
944
+ status_text = ""
945
+ if status_text:
946
+ overrides[sid] = status_text
947
+ return overrides
948
+
949
+ def _resolve_steps_panel(self) -> AIPPanel:
950
+ """Return the shared steps panel renderable generated by layout helpers."""
951
+ window_arg = self._summary_window_size()
952
+ window_arg = window_arg if window_arg > 0 else None
953
+ panels = render_summary_panels(
954
+ self.state,
955
+ self.steps,
956
+ summary_window=window_arg,
957
+ include_query_panel=False,
958
+ include_final_panel=False,
959
+ step_status_overrides=self._build_step_status_overrides(),
960
+ )
961
+ steps_panel = next((panel for panel in panels if getattr(panel, "title", "").lower() == "steps"), None)
962
+ panel_cls = AIPPanel if isinstance(AIPPanel, type) else None
963
+ if steps_panel is not None and (panel_cls is None or isinstance(steps_panel, panel_cls)):
964
+ return steps_panel
965
+ return AIPPanel(_NO_STEPS_TEXT.copy(), title="Steps", border_style="blue")
966
+
967
+ def _prepare_steps_renderable(self, *, include_progress: bool) -> tuple[AIPPanel, Any]:
968
+ """Return the template panel and content renderable for steps."""
969
+ panel = self._resolve_steps_panel()
970
+ self._last_steps_panel_template = panel
971
+ base_renderable: Any = getattr(panel, "renderable", panel)
972
+
973
+ if include_progress and not self.state.finalizing_ui:
974
+ footer = build_progress_footer(
975
+ state=self.state,
976
+ steps=self.steps,
977
+ started_at=self._started_at,
978
+ server_elapsed_time=self.stream_processor.server_elapsed_time,
979
+ )
980
+ if footer is not None:
981
+ if isinstance(base_renderable, Group):
982
+ base_renderable = Group(*base_renderable.renderables, footer)
983
+ else:
984
+ base_renderable = Group(base_renderable, footer)
985
+ return panel, base_renderable
986
+
987
+ def _build_steps_body(self, *, include_progress: bool) -> Any:
988
+ """Return the rendered steps body with optional progress footer."""
989
+ _, renderable = self._prepare_steps_renderable(include_progress=include_progress)
990
+ if isinstance(renderable, Group):
991
+ return renderable
992
+ return Group(renderable)
1212
993
 
1213
- if getattr(self.cfg, "append_finished_snapshots", False):
1214
- return True
994
+ def _render_steps_text(self) -> Any:
995
+ """Return the rendered steps body used by transcript capture."""
996
+ return self._build_steps_body(include_progress=True)
1215
997
 
1216
- return (
1217
- not self.state.finalizing_ui
1218
- and sid not in self.stream_processor.current_event_finished_panels
1219
- )
998
+ def _summary_window_size(self) -> int:
999
+ """Return the active window size for step display."""
1000
+ if self.state.finalizing_ui:
1001
+ return 0
1002
+ return int(self.cfg.summary_display_window or 0)
1220
1003
 
1221
- def _update_final_duration(
1222
- self, duration: float | None, *, overwrite: bool = False
1223
- ) -> None:
1004
+ def _update_final_duration(self, duration: float | None, *, overwrite: bool = False) -> None:
1224
1005
  """Store formatted duration for eventual final panels."""
1225
1006
  if duration is None:
1226
1007
  return
@@ -1238,193 +1019,6 @@ class RichStreamRenderer:
1238
1019
  if overwrite and existing is not None:
1239
1020
  duration_val = max(existing, duration_val)
1240
1021
 
1241
- self.state.final_duration_seconds = duration_val
1242
- self.state.final_duration_text = self._format_elapsed_time(duration_val)
1243
-
1244
- def _calculate_elapsed_time(self, meta: dict[str, Any]) -> str:
1245
- """Calculate elapsed time string for running tools."""
1246
- server_elapsed = self.stream_processor.server_elapsed_time
1247
- server_start = meta.get("server_started_at")
1248
-
1249
- if isinstance(server_elapsed, int | float) and isinstance(
1250
- server_start, int | float
1251
- ):
1252
- elapsed = max(0.0, float(server_elapsed) - float(server_start))
1253
- else:
1254
- elapsed = max(0.0, monotonic() - (meta.get("started_at") or 0.0))
1255
-
1256
- return self._format_elapsed_time(elapsed)
1257
-
1258
- def _format_elapsed_time(self, elapsed: float) -> str:
1259
- """Format elapsed time as a readable string."""
1260
- if elapsed >= 1:
1261
- return f"{elapsed:.2f}s"
1262
- elif int(elapsed * 1000) > 0:
1263
- return f"{int(elapsed * 1000)}ms"
1264
- else:
1265
- return "<1ms"
1266
-
1267
- def _calculate_finished_duration(self, meta: dict[str, Any]) -> str | None:
1268
- """Calculate duration string for finished tools."""
1269
- dur = meta.get("duration_seconds")
1270
- if isinstance(dur, int | float):
1271
- return self._format_elapsed_time(dur)
1272
-
1273
- try:
1274
- server_now = self.stream_processor.server_elapsed_time
1275
- server_start = meta.get("server_started_at")
1276
- if isinstance(server_now, int | float) and isinstance(
1277
- server_start, int | float
1278
- ):
1279
- dur = max(0.0, float(server_now) - float(server_start))
1280
- elif meta.get("started_at") is not None:
1281
- dur = max(0.0, float(monotonic() - meta.get("started_at")))
1282
- except Exception:
1283
- dur = None
1284
-
1285
- return self._format_elapsed_time(dur) if isinstance(dur, int | float) else None
1286
-
1287
- def _process_running_tool_panel(
1288
- self,
1289
- title: str,
1290
- meta: dict[str, Any],
1291
- body: str,
1292
- *,
1293
- include_spinner: bool = False,
1294
- ) -> tuple[str, str] | tuple[str, str, str | None]:
1295
- """Process a running tool panel."""
1296
- elapsed_str = self._calculate_elapsed_time(meta)
1297
- adjusted_title = f"{title} · {elapsed_str}"
1298
- chip = f"⏱ {elapsed_str}"
1299
- spinner_message: str | None = None
1300
-
1301
- if not body.strip():
1302
- body = ""
1303
- spinner_message = f"{title} running... {elapsed_str}"
1304
- else:
1305
- body = f"{body}\n\n{chip}"
1306
-
1307
- if include_spinner:
1308
- return adjusted_title, body, spinner_message
1309
- return adjusted_title, body
1310
-
1311
- def _process_finished_tool_panel(self, title: str, meta: dict[str, Any]) -> str:
1312
- """Process a finished tool panel."""
1313
- duration_str = self._calculate_finished_duration(meta)
1314
- return f"{title} · {duration_str}" if duration_str else title
1315
-
1316
- def _create_tool_panel_for_session(
1317
- self, sid: str, meta: dict[str, Any]
1318
- ) -> AIPPanel | None:
1319
- """Create a single tool panel for the session."""
1320
- title = meta.get("title") or "Tool"
1321
- status = meta.get("status") or "running"
1322
- chunks = meta.get("chunks") or []
1323
- is_delegation = bool(meta.get("is_delegation"))
1324
-
1325
- if self._should_skip_finished_panel(sid, status):
1326
- return None
1327
-
1328
- body = "".join(chunks)
1329
- adjusted_title = title
1330
-
1331
- spinner_message: str | None = None
1332
-
1333
- if status == "running":
1334
- adjusted_title, body, spinner_message = self._process_running_tool_panel(
1335
- title, meta, body, include_spinner=True
1336
- )
1337
- elif status == "finished":
1338
- adjusted_title = self._process_finished_tool_panel(title, meta)
1339
-
1340
- return create_tool_panel(
1341
- title=adjusted_title,
1342
- content=body,
1343
- status=status,
1344
- theme=self.cfg.theme,
1345
- is_delegation=is_delegation,
1346
- spinner_message=spinner_message,
1347
- )
1348
-
1349
- def _render_tool_panels(self) -> list[AIPPanel]:
1350
- """Render tool execution output panels."""
1351
- if not getattr(self.cfg, "show_delegate_tool_panels", False):
1352
- return []
1353
- panels: list[AIPPanel] = []
1354
- for sid in self.tool_order:
1355
- meta = self.tool_panels.get(sid) or {}
1356
- panel = self._create_tool_panel_for_session(sid, meta)
1357
- if panel:
1358
- panels.append(panel)
1359
-
1360
- return panels
1361
-
1362
- def _format_dict_or_list_output(self, output_value: dict | list) -> str:
1363
- """Format dict/list output as pretty JSON."""
1364
- try:
1365
- return (
1366
- self.OUTPUT_PREFIX
1367
- + "```json\n"
1368
- + json.dumps(output_value, indent=2)
1369
- + "\n```\n"
1370
- )
1371
- except Exception:
1372
- return self.OUTPUT_PREFIX + str(output_value) + "\n"
1373
-
1374
- def _clean_sub_agent_prefix(self, output: str, tool_name: str | None) -> str:
1375
- """Clean sub-agent name prefix from output."""
1376
- if not (tool_name and is_delegation_tool(tool_name)):
1377
- return output
1378
-
1379
- sub = tool_name
1380
- if tool_name.startswith("delegate_to_"):
1381
- sub = tool_name.replace("delegate_to_", "")
1382
- elif tool_name.startswith("delegate_"):
1383
- sub = tool_name.replace("delegate_", "")
1384
- prefix = f"[{sub}]"
1385
- if output.startswith(prefix):
1386
- return output[len(prefix) :].lstrip()
1387
-
1388
- return output
1389
-
1390
- def _format_json_string_output(self, output: str) -> str:
1391
- """Format string that looks like JSON."""
1392
- try:
1393
- parsed = json.loads(output)
1394
- return (
1395
- self.OUTPUT_PREFIX
1396
- + "```json\n"
1397
- + json.dumps(parsed, indent=2)
1398
- + "\n```\n"
1399
- )
1400
- except Exception:
1401
- return self.OUTPUT_PREFIX + output + "\n"
1402
-
1403
- def _format_string_output(self, output: str, tool_name: str | None) -> str:
1404
- """Format string output with optional prefix cleaning."""
1405
- s = output.strip()
1406
- s = self._clean_sub_agent_prefix(s, tool_name)
1407
-
1408
- # If looks like JSON, pretty print it
1409
- if (s.startswith("{") and s.endswith("}")) or (
1410
- s.startswith("[") and s.endswith("]")
1411
- ):
1412
- return self._format_json_string_output(s)
1413
-
1414
- return self.OUTPUT_PREFIX + s + "\n"
1415
-
1416
- def _format_other_output(self, output_value: Any) -> str:
1417
- """Format other types of output."""
1418
- try:
1419
- return self.OUTPUT_PREFIX + json.dumps(output_value, indent=2) + "\n"
1420
- except Exception:
1421
- return self.OUTPUT_PREFIX + str(output_value) + "\n"
1422
-
1423
- def _format_output_block(self, output_value: Any, tool_name: str | None) -> str:
1424
- """Format an output value for panel display."""
1425
- if isinstance(output_value, dict | list):
1426
- return self._format_dict_or_list_output(output_value)
1427
- elif isinstance(output_value, str):
1428
- return self._format_string_output(output_value, tool_name)
1429
- else:
1430
- return self._format_other_output(output_value)
1022
+ formatted = format_elapsed_time(duration_val)
1023
+ self.state.mark_final_duration(duration_val, formatted=formatted)
1024
+ self._apply_root_duration(duration_val)