glaip-sdk 0.0.20__py3-none-any.whl → 0.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (216) hide show
  1. glaip_sdk/__init__.py +44 -4
  2. glaip_sdk/_version.py +10 -3
  3. glaip_sdk/agents/__init__.py +27 -0
  4. glaip_sdk/agents/base.py +1250 -0
  5. glaip_sdk/branding.py +15 -6
  6. glaip_sdk/cli/account_store.py +540 -0
  7. glaip_sdk/cli/agent_config.py +2 -6
  8. glaip_sdk/cli/auth.py +271 -45
  9. glaip_sdk/cli/commands/__init__.py +2 -2
  10. glaip_sdk/cli/commands/accounts.py +746 -0
  11. glaip_sdk/cli/commands/agents/__init__.py +119 -0
  12. glaip_sdk/cli/commands/agents/_common.py +561 -0
  13. glaip_sdk/cli/commands/agents/create.py +151 -0
  14. glaip_sdk/cli/commands/agents/delete.py +64 -0
  15. glaip_sdk/cli/commands/agents/get.py +89 -0
  16. glaip_sdk/cli/commands/agents/list.py +129 -0
  17. glaip_sdk/cli/commands/agents/run.py +264 -0
  18. glaip_sdk/cli/commands/agents/sync_langflow.py +72 -0
  19. glaip_sdk/cli/commands/agents/update.py +112 -0
  20. glaip_sdk/cli/commands/common_config.py +104 -0
  21. glaip_sdk/cli/commands/configure.py +734 -143
  22. glaip_sdk/cli/commands/mcps/__init__.py +94 -0
  23. glaip_sdk/cli/commands/mcps/_common.py +459 -0
  24. glaip_sdk/cli/commands/mcps/connect.py +82 -0
  25. glaip_sdk/cli/commands/mcps/create.py +152 -0
  26. glaip_sdk/cli/commands/mcps/delete.py +73 -0
  27. glaip_sdk/cli/commands/mcps/get.py +212 -0
  28. glaip_sdk/cli/commands/mcps/list.py +69 -0
  29. glaip_sdk/cli/commands/mcps/tools.py +235 -0
  30. glaip_sdk/cli/commands/mcps/update.py +190 -0
  31. glaip_sdk/cli/commands/models.py +14 -12
  32. glaip_sdk/cli/commands/shared/__init__.py +21 -0
  33. glaip_sdk/cli/commands/shared/formatters.py +91 -0
  34. glaip_sdk/cli/commands/tools/__init__.py +69 -0
  35. glaip_sdk/cli/commands/tools/_common.py +80 -0
  36. glaip_sdk/cli/commands/tools/create.py +228 -0
  37. glaip_sdk/cli/commands/tools/delete.py +61 -0
  38. glaip_sdk/cli/commands/tools/get.py +103 -0
  39. glaip_sdk/cli/commands/tools/list.py +69 -0
  40. glaip_sdk/cli/commands/tools/script.py +49 -0
  41. glaip_sdk/cli/commands/tools/update.py +102 -0
  42. glaip_sdk/cli/commands/transcripts/__init__.py +90 -0
  43. glaip_sdk/cli/commands/transcripts/_common.py +9 -0
  44. glaip_sdk/cli/commands/transcripts/clear.py +5 -0
  45. glaip_sdk/cli/commands/transcripts/detail.py +5 -0
  46. glaip_sdk/cli/commands/transcripts_original.py +756 -0
  47. glaip_sdk/cli/commands/update.py +164 -23
  48. glaip_sdk/cli/config.py +49 -7
  49. glaip_sdk/cli/constants.py +38 -0
  50. glaip_sdk/cli/context.py +8 -0
  51. glaip_sdk/cli/core/__init__.py +79 -0
  52. glaip_sdk/cli/core/context.py +124 -0
  53. glaip_sdk/cli/core/output.py +851 -0
  54. glaip_sdk/cli/core/prompting.py +649 -0
  55. glaip_sdk/cli/core/rendering.py +187 -0
  56. glaip_sdk/cli/display.py +45 -32
  57. glaip_sdk/cli/entrypoint.py +20 -0
  58. glaip_sdk/cli/hints.py +57 -0
  59. glaip_sdk/cli/io.py +14 -17
  60. glaip_sdk/cli/main.py +344 -167
  61. glaip_sdk/cli/masking.py +21 -33
  62. glaip_sdk/cli/mcp_validators.py +5 -15
  63. glaip_sdk/cli/pager.py +15 -22
  64. glaip_sdk/cli/parsers/__init__.py +1 -3
  65. glaip_sdk/cli/parsers/json_input.py +11 -22
  66. glaip_sdk/cli/resolution.py +5 -10
  67. glaip_sdk/cli/rich_helpers.py +1 -3
  68. glaip_sdk/cli/slash/__init__.py +0 -9
  69. glaip_sdk/cli/slash/accounts_controller.py +580 -0
  70. glaip_sdk/cli/slash/accounts_shared.py +75 -0
  71. glaip_sdk/cli/slash/agent_session.py +65 -29
  72. glaip_sdk/cli/slash/prompt.py +24 -10
  73. glaip_sdk/cli/slash/remote_runs_controller.py +566 -0
  74. glaip_sdk/cli/slash/session.py +827 -232
  75. glaip_sdk/cli/slash/tui/__init__.py +34 -0
  76. glaip_sdk/cli/slash/tui/accounts.tcss +88 -0
  77. glaip_sdk/cli/slash/tui/accounts_app.py +933 -0
  78. glaip_sdk/cli/slash/tui/background_tasks.py +72 -0
  79. glaip_sdk/cli/slash/tui/clipboard.py +147 -0
  80. glaip_sdk/cli/slash/tui/context.py +59 -0
  81. glaip_sdk/cli/slash/tui/keybind_registry.py +235 -0
  82. glaip_sdk/cli/slash/tui/loading.py +58 -0
  83. glaip_sdk/cli/slash/tui/remote_runs_app.py +628 -0
  84. glaip_sdk/cli/slash/tui/terminal.py +402 -0
  85. glaip_sdk/cli/slash/tui/theme/__init__.py +15 -0
  86. glaip_sdk/cli/slash/tui/theme/catalog.py +79 -0
  87. glaip_sdk/cli/slash/tui/theme/manager.py +86 -0
  88. glaip_sdk/cli/slash/tui/theme/tokens.py +55 -0
  89. glaip_sdk/cli/slash/tui/toast.py +123 -0
  90. glaip_sdk/cli/transcript/__init__.py +12 -52
  91. glaip_sdk/cli/transcript/cache.py +258 -60
  92. glaip_sdk/cli/transcript/capture.py +72 -21
  93. glaip_sdk/cli/transcript/history.py +815 -0
  94. glaip_sdk/cli/transcript/launcher.py +1 -3
  95. glaip_sdk/cli/transcript/viewer.py +79 -329
  96. glaip_sdk/cli/update_notifier.py +385 -24
  97. glaip_sdk/cli/validators.py +16 -18
  98. glaip_sdk/client/__init__.py +3 -1
  99. glaip_sdk/client/_schedule_payloads.py +89 -0
  100. glaip_sdk/client/agent_runs.py +147 -0
  101. glaip_sdk/client/agents.py +370 -100
  102. glaip_sdk/client/base.py +78 -35
  103. glaip_sdk/client/hitl.py +136 -0
  104. glaip_sdk/client/main.py +25 -10
  105. glaip_sdk/client/mcps.py +166 -27
  106. glaip_sdk/client/payloads/agent/__init__.py +23 -0
  107. glaip_sdk/client/{_agent_payloads.py → payloads/agent/requests.py} +65 -74
  108. glaip_sdk/client/payloads/agent/responses.py +43 -0
  109. glaip_sdk/client/run_rendering.py +583 -79
  110. glaip_sdk/client/schedules.py +439 -0
  111. glaip_sdk/client/shared.py +21 -0
  112. glaip_sdk/client/tools.py +214 -56
  113. glaip_sdk/client/validators.py +20 -48
  114. glaip_sdk/config/constants.py +11 -0
  115. glaip_sdk/exceptions.py +1 -3
  116. glaip_sdk/hitl/__init__.py +48 -0
  117. glaip_sdk/hitl/base.py +64 -0
  118. glaip_sdk/hitl/callback.py +43 -0
  119. glaip_sdk/hitl/local.py +121 -0
  120. glaip_sdk/hitl/remote.py +523 -0
  121. glaip_sdk/icons.py +9 -3
  122. glaip_sdk/mcps/__init__.py +21 -0
  123. glaip_sdk/mcps/base.py +345 -0
  124. glaip_sdk/models/__init__.py +107 -0
  125. glaip_sdk/models/agent.py +47 -0
  126. glaip_sdk/models/agent_runs.py +117 -0
  127. glaip_sdk/models/common.py +42 -0
  128. glaip_sdk/models/mcp.py +33 -0
  129. glaip_sdk/models/schedule.py +224 -0
  130. glaip_sdk/models/tool.py +33 -0
  131. glaip_sdk/payload_schemas/__init__.py +1 -13
  132. glaip_sdk/payload_schemas/agent.py +1 -3
  133. glaip_sdk/registry/__init__.py +55 -0
  134. glaip_sdk/registry/agent.py +164 -0
  135. glaip_sdk/registry/base.py +139 -0
  136. glaip_sdk/registry/mcp.py +253 -0
  137. glaip_sdk/registry/tool.py +445 -0
  138. glaip_sdk/rich_components.py +58 -2
  139. glaip_sdk/runner/__init__.py +76 -0
  140. glaip_sdk/runner/base.py +84 -0
  141. glaip_sdk/runner/deps.py +112 -0
  142. glaip_sdk/runner/langgraph.py +872 -0
  143. glaip_sdk/runner/logging_config.py +77 -0
  144. glaip_sdk/runner/mcp_adapter/__init__.py +13 -0
  145. glaip_sdk/runner/mcp_adapter/base_mcp_adapter.py +43 -0
  146. glaip_sdk/runner/mcp_adapter/langchain_mcp_adapter.py +257 -0
  147. glaip_sdk/runner/mcp_adapter/mcp_config_builder.py +95 -0
  148. glaip_sdk/runner/tool_adapter/__init__.py +18 -0
  149. glaip_sdk/runner/tool_adapter/base_tool_adapter.py +44 -0
  150. glaip_sdk/runner/tool_adapter/langchain_tool_adapter.py +242 -0
  151. glaip_sdk/schedules/__init__.py +22 -0
  152. glaip_sdk/schedules/base.py +291 -0
  153. glaip_sdk/tools/__init__.py +22 -0
  154. glaip_sdk/tools/base.py +468 -0
  155. glaip_sdk/utils/__init__.py +59 -12
  156. glaip_sdk/utils/a2a/__init__.py +34 -0
  157. glaip_sdk/utils/a2a/event_processor.py +188 -0
  158. glaip_sdk/utils/agent_config.py +4 -14
  159. glaip_sdk/utils/bundler.py +403 -0
  160. glaip_sdk/utils/client.py +111 -0
  161. glaip_sdk/utils/client_utils.py +46 -28
  162. glaip_sdk/utils/datetime_helpers.py +58 -0
  163. glaip_sdk/utils/discovery.py +78 -0
  164. glaip_sdk/utils/display.py +25 -21
  165. glaip_sdk/utils/export.py +143 -0
  166. glaip_sdk/utils/general.py +1 -36
  167. glaip_sdk/utils/import_export.py +15 -16
  168. glaip_sdk/utils/import_resolver.py +524 -0
  169. glaip_sdk/utils/instructions.py +101 -0
  170. glaip_sdk/utils/rendering/__init__.py +115 -1
  171. glaip_sdk/utils/rendering/formatting.py +38 -23
  172. glaip_sdk/utils/rendering/layout/__init__.py +64 -0
  173. glaip_sdk/utils/rendering/{renderer → layout}/panels.py +10 -3
  174. glaip_sdk/utils/rendering/{renderer → layout}/progress.py +73 -12
  175. glaip_sdk/utils/rendering/layout/summary.py +74 -0
  176. glaip_sdk/utils/rendering/layout/transcript.py +606 -0
  177. glaip_sdk/utils/rendering/models.py +18 -8
  178. glaip_sdk/utils/rendering/renderer/__init__.py +9 -51
  179. glaip_sdk/utils/rendering/renderer/base.py +534 -882
  180. glaip_sdk/utils/rendering/renderer/config.py +4 -10
  181. glaip_sdk/utils/rendering/renderer/debug.py +30 -34
  182. glaip_sdk/utils/rendering/renderer/factory.py +138 -0
  183. glaip_sdk/utils/rendering/renderer/stream.py +13 -54
  184. glaip_sdk/utils/rendering/renderer/summary_window.py +79 -0
  185. glaip_sdk/utils/rendering/renderer/thinking.py +273 -0
  186. glaip_sdk/utils/rendering/renderer/toggle.py +182 -0
  187. glaip_sdk/utils/rendering/renderer/tool_panels.py +442 -0
  188. glaip_sdk/utils/rendering/renderer/transcript_mode.py +162 -0
  189. glaip_sdk/utils/rendering/state.py +204 -0
  190. glaip_sdk/utils/rendering/step_tree_state.py +100 -0
  191. glaip_sdk/utils/rendering/steps/__init__.py +34 -0
  192. glaip_sdk/utils/rendering/steps/event_processor.py +778 -0
  193. glaip_sdk/utils/rendering/steps/format.py +176 -0
  194. glaip_sdk/utils/rendering/{steps.py → steps/manager.py} +122 -26
  195. glaip_sdk/utils/rendering/timing.py +36 -0
  196. glaip_sdk/utils/rendering/viewer/__init__.py +21 -0
  197. glaip_sdk/utils/rendering/viewer/presenter.py +184 -0
  198. glaip_sdk/utils/resource_refs.py +29 -26
  199. glaip_sdk/utils/runtime_config.py +425 -0
  200. glaip_sdk/utils/serialization.py +32 -46
  201. glaip_sdk/utils/sync.py +162 -0
  202. glaip_sdk/utils/tool_detection.py +301 -0
  203. glaip_sdk/utils/tool_storage_provider.py +140 -0
  204. glaip_sdk/utils/validation.py +20 -28
  205. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.7.7.dist-info}/METADATA +78 -23
  206. glaip_sdk-0.7.7.dist-info/RECORD +213 -0
  207. {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.7.7.dist-info}/WHEEL +2 -1
  208. glaip_sdk-0.7.7.dist-info/entry_points.txt +2 -0
  209. glaip_sdk-0.7.7.dist-info/top_level.txt +1 -0
  210. glaip_sdk/cli/commands/agents.py +0 -1412
  211. glaip_sdk/cli/commands/mcps.py +0 -1225
  212. glaip_sdk/cli/commands/tools.py +0 -597
  213. glaip_sdk/cli/utils.py +0 -1330
  214. glaip_sdk/models.py +0 -259
  215. glaip_sdk-0.0.20.dist-info/RECORD +0 -80
  216. glaip_sdk-0.0.20.dist-info/entry_points.txt +0 -3
@@ -8,12 +8,11 @@ from __future__ import annotations
8
8
 
9
9
  import json
10
10
  import logging
11
- from dataclasses import dataclass, field
11
+ import sys
12
12
  from datetime import datetime, timezone
13
13
  from time import monotonic
14
14
  from typing import Any
15
15
 
16
- from rich.align import Align
17
16
  from rich.console import Console as RichConsole
18
17
  from rich.console import Group
19
18
  from rich.live import Live
@@ -25,78 +24,57 @@ from glaip_sdk.icons import ICON_AGENT, ICON_AGENT_STEP, ICON_DELEGATE, ICON_TOO
25
24
  from glaip_sdk.rich_components import AIPPanel
26
25
  from glaip_sdk.utils.rendering.formatting import (
27
26
  format_main_title,
28
- get_spinner_char,
29
27
  is_step_finished,
28
+ normalise_display_label,
30
29
  )
31
30
  from glaip_sdk.utils.rendering.models import RunStats, Step
32
- from glaip_sdk.utils.rendering.renderer.config import RendererConfig
33
- from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
34
- from glaip_sdk.utils.rendering.renderer.panels import (
35
- create_final_panel,
36
- create_main_panel,
37
- create_tool_panel,
38
- )
39
- from glaip_sdk.utils.rendering.renderer.progress import (
31
+ from glaip_sdk.utils.rendering.layout.panels import create_main_panel
32
+ from glaip_sdk.utils.rendering.layout.progress import (
33
+ build_progress_footer,
40
34
  format_elapsed_time,
41
- format_tool_title,
42
35
  format_working_indicator,
43
- get_spinner,
36
+ get_spinner_char,
44
37
  is_delegation_tool,
45
38
  )
39
+ from glaip_sdk.utils.rendering.layout.summary import render_summary_panels
40
+ from glaip_sdk.utils.rendering.layout.transcript import (
41
+ DEFAULT_TRANSCRIPT_THEME,
42
+ TranscriptSnapshot,
43
+ build_final_panel,
44
+ build_transcript_snapshot,
45
+ build_transcript_view,
46
+ extract_query_from_meta,
47
+ format_final_panel_title,
48
+ )
49
+ from glaip_sdk.utils.rendering.renderer.config import RendererConfig
50
+ from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
46
51
  from glaip_sdk.utils.rendering.renderer.stream import StreamProcessor
47
- from glaip_sdk.utils.rendering.steps import StepManager
52
+ from glaip_sdk.utils.rendering.renderer.thinking import ThinkingScopeController
53
+ from glaip_sdk.utils.rendering.renderer.tool_panels import ToolPanelController
54
+ from glaip_sdk.utils.rendering.renderer.transcript_mode import TranscriptModeMixin
55
+ from glaip_sdk.utils.rendering.state import (
56
+ RendererState,
57
+ TranscriptBuffer,
58
+ coerce_received_at,
59
+ truncate_display,
60
+ )
61
+ from glaip_sdk.utils.rendering.steps import (
62
+ StepManager,
63
+ format_step_label,
64
+ )
65
+ from glaip_sdk.utils.rendering.timing import coerce_server_time
66
+
67
+ _NO_STEPS_TEXT = Text("No steps yet", style="dim")
48
68
 
49
69
  # Configure logger
50
70
  logger = logging.getLogger("glaip_sdk.run_renderer")
51
71
 
52
72
  # Constants
53
- LESS_THAN_1MS = "[<1ms]"
54
-
55
-
56
- def _coerce_received_at(value: Any) -> datetime | None:
57
- """Coerce a received_at value to an aware datetime if possible."""
58
- if value is None:
59
- return None
60
-
61
- if isinstance(value, datetime):
62
- return value if value.tzinfo else value.replace(tzinfo=timezone.utc)
63
-
64
- if isinstance(value, str):
65
- try:
66
- normalised = value.replace("Z", "+00:00")
67
- dt = datetime.fromisoformat(normalised)
68
- except ValueError:
69
- return None
70
- return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
71
-
72
- return None
73
+ RUNNING_STATUS_HINTS = {"running", "started", "pending", "working"}
74
+ ARGS_VALUE_MAX_LEN = 160
73
75
 
74
76
 
75
- @dataclass
76
- class RendererState:
77
- """Internal state for the renderer."""
78
-
79
- buffer: list[str] | None = None
80
- final_text: str = ""
81
- streaming_started_at: float | None = None
82
- printed_final_output: bool = False
83
- finalizing_ui: bool = False
84
- final_duration_seconds: float | None = None
85
- final_duration_text: str | None = None
86
- events: list[dict[str, Any]] = field(default_factory=list)
87
- meta: dict[str, Any] = field(default_factory=dict)
88
- streaming_started_event_ts: datetime | None = None
89
-
90
- def __post_init__(self) -> None:
91
- """Initialize renderer state after dataclass creation.
92
-
93
- Ensures buffer is initialized as an empty list if not provided.
94
- """
95
- if self.buffer is None:
96
- self.buffer = []
97
-
98
-
99
- class RichStreamRenderer:
77
+ class RichStreamRenderer(TranscriptModeMixin):
100
78
  """Live, modern terminal renderer for agent execution with rich visual output."""
101
79
 
102
80
  def __init__(
@@ -105,6 +83,8 @@ class RichStreamRenderer:
105
83
  *,
106
84
  cfg: RendererConfig | None = None,
107
85
  verbose: bool = False,
86
+ transcript_buffer: TranscriptBuffer | None = None,
87
+ callbacks: dict[str, Any] | None = None,
108
88
  ) -> None:
109
89
  """Initialize the renderer.
110
90
 
@@ -112,7 +92,10 @@ class RichStreamRenderer:
112
92
  console: Rich console instance
113
93
  cfg: Renderer configuration
114
94
  verbose: Whether to enable verbose mode
95
+ transcript_buffer: Optional transcript buffer for capturing output
96
+ callbacks: Optional dictionary of callback functions
115
97
  """
98
+ super().__init__()
116
99
  self.console = console or RichConsole()
117
100
  self.cfg = cfg or RendererConfig()
118
101
  self.verbose = verbose
@@ -120,19 +103,36 @@ class RichStreamRenderer:
120
103
  # Initialize components
121
104
  self.stream_processor = StreamProcessor()
122
105
  self.state = RendererState()
106
+ if transcript_buffer is not None:
107
+ self.state.buffer = transcript_buffer
108
+
109
+ self._callbacks = callbacks or {}
123
110
 
124
111
  # Initialize step manager and other state
125
- self.steps = StepManager()
112
+ self.steps = StepManager(max_steps=self.cfg.summary_max_steps)
126
113
  # Live display instance (single source of truth)
127
114
  self.live: Live | None = None
115
+ self._step_spinners: dict[str, Spinner] = {}
116
+ self._last_steps_panel_template: Any | None = None
128
117
 
129
- # Context and tool tracking
130
- self.context_order: list[str] = []
131
- self.context_parent: dict[str, str] = {}
132
- self.tool_order: list[str] = []
133
- self.context_panels: dict[str, list[str]] = {}
134
- self.context_meta: dict[str, dict[str, Any]] = {}
135
- self.tool_panels: dict[str, dict[str, Any]] = {}
118
+ # Tool tracking and thinking scopes
119
+ self._step_server_start_times: dict[str, float] = {}
120
+ self.tool_controller = ToolPanelController(
121
+ steps=self.steps,
122
+ stream_processor=self.stream_processor,
123
+ console=self.console,
124
+ cfg=self.cfg,
125
+ step_server_start_times=self._step_server_start_times,
126
+ output_prefix="**Output:**\n",
127
+ )
128
+ self.thinking_controller = ThinkingScopeController(
129
+ self.steps,
130
+ step_server_start_times=self._step_server_start_times,
131
+ )
132
+ self._root_agent_friendly: str | None = None
133
+ self._root_agent_step_id: str | None = None
134
+ self._root_query: str | None = None
135
+ self._root_query_attached: bool = False
136
136
 
137
137
  # Timing
138
138
  self._started_at: float | None = None
@@ -140,11 +140,12 @@ class RichStreamRenderer:
140
140
  # Header/text
141
141
  self.header_text: str = ""
142
142
  # Track per-step server start times for accurate elapsed labels
143
- self._step_server_start_times: dict[str, float] = {}
144
-
145
143
  # Output formatting constants
146
144
  self.OUTPUT_PREFIX: str = "**Output:**\n"
147
145
 
146
+ self._final_transcript_snapshot: TranscriptSnapshot | None = None
147
+ self._final_transcript_renderables: tuple[list[Any], list[Any]] | None = None
148
+
148
149
  def on_start(self, meta: dict[str, Any]) -> None:
149
150
  """Handle renderer start event."""
150
151
  if self.cfg.live:
@@ -158,6 +159,20 @@ class RichStreamRenderer:
158
159
  except Exception:
159
160
  self.state.meta = dict(meta)
160
161
 
162
+ meta_payload = meta or {}
163
+ self.steps.set_root_agent(meta_payload.get("agent_id"))
164
+ self._root_agent_friendly = self._humanize_agent_slug(meta_payload.get("agent_name"))
165
+ self._root_query = truncate_display(
166
+ meta_payload.get("input_message")
167
+ or meta_payload.get("query")
168
+ or meta_payload.get("message")
169
+ or (meta_payload.get("meta") or {}).get("input_message")
170
+ or ""
171
+ )
172
+ if not self._root_query:
173
+ self._root_query = None
174
+ self._root_query_attached = False
175
+
161
176
  # Print compact header and user request (parity with old renderer)
162
177
  self._render_header(meta)
163
178
  self._render_user_query(meta)
@@ -207,24 +222,66 @@ class RichStreamRenderer:
207
222
  except Exception:
208
223
  logger.exception("Failed to print header fallback")
209
224
 
225
+ def _build_user_query_panel(self, query: str) -> AIPPanel:
226
+ """Create the panel used to display the user request."""
227
+ return AIPPanel(
228
+ Markdown(f"**Query:** {query}"),
229
+ title="User Request",
230
+ border_style="#d97706",
231
+ padding=(0, 1),
232
+ )
233
+
210
234
  def _render_user_query(self, meta: dict[str, Any]) -> None:
211
235
  """Render the user query panel."""
212
- query = meta.get("input_message") or meta.get("query") or meta.get("message")
236
+ query = extract_query_from_meta(meta)
213
237
  if not query:
214
238
  return
239
+ self.console.print(self._build_user_query_panel(query))
240
+
241
+ def _render_summary_static_sections(self) -> None:
242
+ """Re-render header and user query when returning to summary mode."""
243
+ meta = getattr(self.state, "meta", None)
244
+ if meta:
245
+ self._render_header(meta)
246
+ elif self.header_text and not self._render_header_rule():
247
+ self._render_header_fallback()
215
248
 
216
- self.console.print(
217
- AIPPanel(
218
- Markdown(f"**Query:** {query}"),
219
- title="User Request",
220
- border_style="#d97706",
221
- padding=(0, 1),
222
- )
223
- )
249
+ query = extract_query_from_meta(meta) or self._root_query
250
+ if query:
251
+ self.console.print(self._build_user_query_panel(query))
252
+
253
+ def _render_summary_after_transcript_toggle(self) -> None:
254
+ """Render the summary panel after leaving transcript mode."""
255
+ if self.state.finalizing_ui:
256
+ self._render_final_summary_panels()
257
+ elif self.live:
258
+ self._refresh_live_panels()
259
+ else:
260
+ self._render_static_summary_panels()
261
+
262
+ def _render_final_summary_panels(self) -> None:
263
+ """Render a static summary and disable live mode for final output."""
264
+ self.cfg.live = False
265
+ self.live = None
266
+ self._render_static_summary_panels()
267
+
268
+ def _render_static_summary_panels(self) -> None:
269
+ """Render the steps and main panels in a static (non-live) layout."""
270
+ summary_window = self._summary_window_size()
271
+ window_arg = summary_window if summary_window > 0 else None
272
+ status_overrides = self._build_step_status_overrides()
273
+ for renderable in render_summary_panels(
274
+ self.state,
275
+ self.steps,
276
+ summary_window=window_arg,
277
+ include_query_panel=False,
278
+ step_status_overrides=status_overrides,
279
+ ):
280
+ self.console.print(renderable)
224
281
 
225
282
  def _ensure_streaming_started_baseline(self, timestamp: float) -> None:
226
283
  """Synchronize streaming start state across renderer components."""
227
- self.state.streaming_started_at = timestamp
284
+ self.state.start_stream_timer(timestamp)
228
285
  self.stream_processor.streaming_started_at = timestamp
229
286
  self._started_at = timestamp
230
287
 
@@ -237,14 +294,16 @@ class RichStreamRenderer:
237
294
  self._sync_stream_start(ev, received_at)
238
295
 
239
296
  metadata = self.stream_processor.extract_event_metadata(ev)
240
- self.stream_processor.update_timing(metadata["context_id"])
241
297
 
242
298
  self._maybe_render_debug(ev, received_at)
243
- self._dispatch_event(ev, metadata)
299
+ try:
300
+ self._dispatch_event(ev, metadata)
301
+ finally:
302
+ self.stream_processor.update_timing(metadata.get("context_id"))
244
303
 
245
304
  def _resolve_received_timestamp(self, ev: dict[str, Any]) -> datetime:
246
305
  """Return the timestamp an event was received, normalising inputs."""
247
- received_at = _coerce_received_at(ev.get("received_at"))
306
+ received_at = coerce_received_at(ev.get("received_at"))
248
307
  if received_at is None:
249
308
  received_at = datetime.now(timezone.utc)
250
309
 
@@ -253,9 +312,7 @@ class RichStreamRenderer:
253
312
 
254
313
  return received_at
255
314
 
256
- def _sync_stream_start(
257
- self, ev: dict[str, Any], received_at: datetime | None
258
- ) -> None:
315
+ def _sync_stream_start(self, ev: dict[str, Any], received_at: datetime | None) -> None:
259
316
  """Ensure renderer and stream processor share a streaming baseline."""
260
317
  baseline = self.state.streaming_started_at
261
318
  if baseline is None:
@@ -275,12 +332,14 @@ class RichStreamRenderer:
275
332
  if not self.verbose:
276
333
  return
277
334
 
335
+ self._ensure_transcript_header()
278
336
  render_debug_event(
279
337
  ev,
280
338
  self.console,
281
339
  received_ts=received_at,
282
340
  baseline_ts=self.state.streaming_started_event_ts,
283
341
  )
342
+ self._print_transcript_hint()
284
343
 
285
344
  def _dispatch_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
286
345
  """Route events to the appropriate renderer handlers."""
@@ -291,10 +350,13 @@ class RichStreamRenderer:
291
350
  self._handle_status_event(ev)
292
351
  elif kind == "content":
293
352
  self._handle_content_event(content)
353
+ elif kind == "token":
354
+ # Token events should stream content incrementally with immediate console output
355
+ self._handle_token_event(content)
294
356
  elif kind == "final_response":
295
357
  self._handle_final_response_event(content, metadata)
296
358
  elif kind in {"agent_step", "agent_thinking_step"}:
297
- self._handle_agent_step_event(ev)
359
+ self._handle_agent_step_event(ev, metadata)
298
360
  else:
299
361
  self._ensure_live()
300
362
 
@@ -307,61 +369,170 @@ class RichStreamRenderer:
307
369
  def _handle_content_event(self, content: str) -> None:
308
370
  """Handle content streaming events."""
309
371
  if content:
310
- self.state.buffer.append(content)
372
+ self.state.append_transcript_text(content)
311
373
  self._ensure_live()
312
374
 
313
- def _handle_final_response_event(
314
- self, content: str, metadata: dict[str, Any]
315
- ) -> None:
375
+ def _handle_token_event(self, content: str) -> None:
376
+ """Handle token streaming events - print immediately for real-time streaming."""
377
+ if content:
378
+ self.state.append_transcript_text(content)
379
+ # Print token content directly to stdout for immediate visibility when not verbose
380
+ # This bypasses Rich's Live display which has refresh rate limitations
381
+ if not self.verbose:
382
+ try:
383
+ # Mark that we're streaming tokens directly to prevent Live display from starting
384
+ self._streaming_tokens_directly = True
385
+ # Stop Live display if active to prevent it from intercepting stdout
386
+ # and causing each token to appear on a new line
387
+ if self.live is not None:
388
+ self._stop_live_display()
389
+ # Write directly to stdout - tokens will stream on the same line
390
+ # since we're bypassing Rich's console which adds newlines
391
+ sys.stdout.write(content)
392
+ sys.stdout.flush()
393
+ except Exception:
394
+ # Fallback to live display if direct write fails
395
+ self._ensure_live()
396
+ else:
397
+ # In verbose mode, use normal live display (debug panels handle the output)
398
+ self._ensure_live()
399
+
400
+ def _handle_final_response_event(self, content: str, metadata: dict[str, Any]) -> None:
316
401
  """Handle final response events."""
317
402
  if content:
318
- self.state.buffer.append(content)
319
- self.state.final_text = content
403
+ self.state.append_transcript_text(content)
404
+ self.state.set_final_output(content)
320
405
 
321
406
  meta_payload = metadata.get("metadata") or {}
322
- self._update_final_duration(meta_payload.get("time"))
407
+ final_time = coerce_server_time(meta_payload.get("time"))
408
+ self._update_final_duration(final_time)
409
+ self.thinking_controller.close_active_scopes(final_time)
410
+ self._finish_running_steps()
411
+ self.tool_controller.finish_all_panels()
412
+ self._normalise_finished_icons()
323
413
 
324
- self._ensure_live()
325
- self._print_final_panel_if_needed()
414
+ self._ensure_live()
415
+ self._print_final_panel_if_needed()
326
416
 
327
- def _handle_agent_step_event(self, ev: dict[str, Any]) -> None:
417
+ def _normalise_finished_icons(self) -> None:
418
+ """Ensure finished steps release any running spinners."""
419
+ for step in self.steps.by_id.values():
420
+ if getattr(step, "status", None) != "running":
421
+ self._step_spinners.pop(step.step_id, None)
422
+
423
+ def _handle_agent_step_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
328
424
  """Handle agent step events."""
329
- # Extract tool information
330
- (
425
+ # Extract tool information using stream processor
426
+ tool_calls_result = self.stream_processor.parse_tool_calls(ev)
427
+ tool_name, tool_args, tool_out, tool_calls_info = tool_calls_result
428
+
429
+ payload = metadata.get("metadata") or {}
430
+
431
+ tracked_step: Step | None = None
432
+ try:
433
+ tracked_step = self.steps.apply_event(ev)
434
+ except ValueError:
435
+ logger.debug("Malformed step event skipped", exc_info=True)
436
+ else:
437
+ self._record_step_server_start(tracked_step, payload)
438
+ self.thinking_controller.update_timeline(
439
+ tracked_step,
440
+ payload,
441
+ enabled=self.cfg.render_thinking,
442
+ )
443
+ self._maybe_override_root_agent_label(tracked_step, payload)
444
+ self._maybe_attach_root_query(tracked_step)
445
+
446
+ # Track tools and sub-agents for transcript/debug context
447
+ self.stream_processor.track_tools_and_agents(tool_name, tool_calls_info, is_delegation_tool)
448
+
449
+ # Handle tool execution
450
+ self.tool_controller.handle_agent_step(
451
+ ev,
331
452
  tool_name,
332
453
  tool_args,
333
454
  tool_out,
334
455
  tool_calls_info,
335
- ) = self.stream_processor.parse_tool_calls(ev)
336
-
337
- # Track tools and sub-agents
338
- self.stream_processor.track_tools_and_agents(
339
- tool_name, tool_calls_info, is_delegation_tool
456
+ tracked_step=tracked_step,
340
457
  )
341
458
 
342
- # Handle tool execution
343
- self._handle_agent_step(ev, tool_name, tool_args, tool_out, tool_calls_info)
344
-
345
459
  # Update live display
346
460
  self._ensure_live()
347
461
 
462
+ def _maybe_attach_root_query(self, step: Step | None) -> None:
463
+ """Attach the user query to the root agent step for display."""
464
+ if not step or self._root_query_attached or not self._root_query or step.kind != "agent" or step.parent_id:
465
+ return
466
+
467
+ args = dict(getattr(step, "args", {}) or {})
468
+ args.setdefault("query", self._root_query)
469
+ step.args = args
470
+ self._root_query_attached = True
471
+
472
+ def _record_step_server_start(self, step: Step | None, payload: dict[str, Any]) -> None:
473
+ """Store server-provided start times for elapsed calculations."""
474
+ if not step:
475
+ return
476
+ server_time = payload.get("time")
477
+ if not isinstance(server_time, (int, float)):
478
+ return
479
+ self._step_server_start_times.setdefault(step.step_id, float(server_time))
480
+
481
+ def _maybe_override_root_agent_label(self, step: Step | None, payload: dict[str, Any]) -> None:
482
+ """Ensure the root agent row uses the human-friendly name and shows the ID."""
483
+ if not step or step.kind != "agent" or step.parent_id:
484
+ return
485
+ friendly = self._root_agent_friendly or self._humanize_agent_slug((payload or {}).get("agent_name"))
486
+ if not friendly:
487
+ return
488
+ agent_identifier = step.name or step.step_id
489
+ if not agent_identifier:
490
+ return
491
+ step.display_label = normalise_display_label(f"{ICON_AGENT} {friendly} ({agent_identifier})")
492
+ if not self._root_agent_step_id:
493
+ self._root_agent_step_id = step.step_id
494
+
495
+ # Thinking scope management is handled by ThinkingScopeController.
496
+
497
+ def _apply_root_duration(self, duration_seconds: float | None) -> None:
498
+ """Propagate the final run duration to the root agent step."""
499
+ if duration_seconds is None or not self._root_agent_step_id:
500
+ return
501
+ root_step = self.steps.by_id.get(self._root_agent_step_id)
502
+ if not root_step:
503
+ return
504
+ try:
505
+ duration_ms = max(0, int(round(float(duration_seconds) * 1000)))
506
+ except Exception:
507
+ return
508
+ root_step.duration_ms = duration_ms
509
+ root_step.duration_source = root_step.duration_source or "run"
510
+ root_step.status = "finished"
511
+
512
+ @staticmethod
513
+ def _humanize_agent_slug(value: Any) -> str | None:
514
+ """Convert a slugified agent name into Title Case."""
515
+ if not isinstance(value, str):
516
+ return None
517
+ cleaned = value.replace("_", " ").replace("-", " ").strip()
518
+ if not cleaned:
519
+ return None
520
+ parts = [part for part in cleaned.split() if part]
521
+ return " ".join(part[:1].upper() + part[1:] for part in parts)
522
+
348
523
  def _finish_running_steps(self) -> None:
349
524
  """Mark any running steps as finished to avoid lingering spinners."""
350
525
  for st in self.steps.by_id.values():
351
526
  if not is_step_finished(st):
352
- st.finish(None)
527
+ self._mark_incomplete_step(st)
353
528
 
354
- def _finish_tool_panels(self) -> None:
355
- """Mark unfinished tool panels as finished."""
356
- try:
357
- items = list(self.tool_panels.items())
358
- except Exception: # pragma: no cover - defensive guard
359
- logger.exception("Failed to iterate tool panels during cleanup")
360
- return
361
-
362
- for _sid, meta in items:
363
- if meta.get("status") != "finished":
364
- meta["status"] = "finished"
529
+ def _mark_incomplete_step(self, step: Step) -> None:
530
+ """Mark a lingering step as incomplete/warning with unknown duration."""
531
+ step.status = "finished"
532
+ step.duration_unknown = True
533
+ if step.duration_ms is None:
534
+ step.duration_ms = 0
535
+ step.duration_source = step.duration_source or "unknown"
365
536
 
366
537
  def _stop_live_display(self) -> None:
367
538
  """Stop live display and clean up."""
@@ -372,53 +543,150 @@ class RichStreamRenderer:
372
543
  if self.state.printed_final_output:
373
544
  return
374
545
 
375
- body = (self.state.final_text or "".join(self.state.buffer) or "").strip()
546
+ body = (self.state.final_text or self.state.buffer.render() or "").strip()
376
547
  if not body:
377
548
  return
378
549
 
550
+ if getattr(self, "_transcript_mode_enabled", False):
551
+ return
552
+
553
+ # When verbose=False and tokens were streamed directly, skip final panel
554
+ # The user's script will print the final result, avoiding duplication
555
+ if not self.verbose and getattr(self, "_streaming_tokens_directly", False):
556
+ # Add a newline after streaming tokens for clean separation
557
+ try:
558
+ sys.stdout.write("\n")
559
+ sys.stdout.flush()
560
+ except Exception:
561
+ pass
562
+ self.state.printed_final_output = True
563
+ return
564
+
379
565
  if self.verbose:
380
- final_panel = create_final_panel(
381
- body,
566
+ panel = build_final_panel(
567
+ self.state,
382
568
  title=self._final_panel_title(),
383
- theme=self.cfg.theme,
384
569
  )
385
- self.console.print(final_panel)
570
+ if panel is None:
571
+ return
572
+ self.console.print(panel)
386
573
  self.state.printed_final_output = True
387
574
 
575
+ def finalize(self) -> tuple[list[Any], list[Any]]:
576
+ """Compose the final transcript renderables."""
577
+ return self._compose_final_transcript()
578
+
579
+ def _compose_final_transcript(self) -> tuple[list[Any], list[Any]]:
580
+ """Build the transcript snapshot used for final summaries."""
581
+ summary_window = self._summary_window_size()
582
+ summary_window = summary_window if summary_window > 0 else None
583
+ snapshot = build_transcript_snapshot(
584
+ self.state,
585
+ self.steps,
586
+ query_text=extract_query_from_meta(self.state.meta),
587
+ meta=self.state.meta,
588
+ summary_window=summary_window,
589
+ step_status_overrides=self._build_step_status_overrides(),
590
+ )
591
+ header, body = build_transcript_view(snapshot)
592
+ self._final_transcript_snapshot = snapshot
593
+ self._final_transcript_renderables = (header, body)
594
+ return header, body
595
+
596
+ def _render_final_summary(self, header: list[Any], body: list[Any]) -> None:
597
+ """Print the composed transcript summary for non-live renders."""
598
+ renderables = list(header) + list(body)
599
+ for renderable in renderables:
600
+ try:
601
+ self.console.print(renderable)
602
+ self.console.print()
603
+ except Exception:
604
+ pass
605
+
388
606
  def on_complete(self, stats: RunStats) -> None:
389
607
  """Handle completion event."""
390
608
  self.state.finalizing_ui = True
391
609
 
392
- if isinstance(stats, RunStats):
610
+ self._handle_stats_duration(stats)
611
+ self.thinking_controller.close_active_scopes(self.state.final_duration_seconds)
612
+ self._cleanup_ui_elements()
613
+ self._finalize_display()
614
+ self._print_completion_message()
615
+
616
+ def _handle_stats_duration(self, stats: RunStats) -> None:
617
+ """Handle stats processing and duration calculation."""
618
+ if not isinstance(stats, RunStats):
619
+ return
620
+
621
+ duration = None
622
+ try:
623
+ if stats.finished_at is not None and stats.started_at is not None:
624
+ duration = max(0.0, float(stats.finished_at) - float(stats.started_at))
625
+ except Exception:
393
626
  duration = None
394
- try:
395
- if stats.finished_at is not None and stats.started_at is not None:
396
- duration = max(
397
- 0.0, float(stats.finished_at) - float(stats.started_at)
398
- )
399
- except Exception:
400
- duration = None
401
627
 
402
- if duration is not None:
403
- self._update_final_duration(duration, overwrite=True)
628
+ if duration is not None:
629
+ self._update_final_duration(duration, overwrite=True)
404
630
 
631
+ def _cleanup_ui_elements(self) -> None:
632
+ """Clean up running UI elements."""
405
633
  # Mark any running steps as finished to avoid lingering spinners
406
634
  self._finish_running_steps()
407
635
 
408
636
  # Mark unfinished tool panels as finished
409
- self._finish_tool_panels()
637
+ self.tool_controller.finish_all_panels()
638
+
639
+ def _finalize_display(self) -> None:
640
+ """Finalize live display and render final output."""
641
+ # When verbose=False and tokens were streamed directly, skip live display updates
642
+ # to avoid showing duplicate final result
643
+ if not self.verbose and getattr(self, "_streaming_tokens_directly", False):
644
+ # Just add a newline after streaming tokens for clean separation
645
+ try:
646
+ sys.stdout.write("\n")
647
+ sys.stdout.flush()
648
+ except Exception:
649
+ pass
650
+ self._stop_live_display()
651
+ self.state.printed_final_output = True
652
+ return
410
653
 
411
654
  # Final refresh
412
655
  self._ensure_live()
413
656
 
657
+ header, body = self.finalize()
658
+
414
659
  # Stop live display
415
660
  self._stop_live_display()
416
661
 
417
662
  # Render final output based on configuration
418
- self._print_final_panel_if_needed()
663
+ if self.cfg.live:
664
+ self._print_final_panel_if_needed()
665
+ else:
666
+ self._render_final_summary(header, body)
667
+
668
+ def _print_completion_message(self) -> None:
669
+ """Print completion message based on current mode."""
670
+ if self._transcript_mode_enabled:
671
+ try:
672
+ self.console.print(
673
+ "[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events. "
674
+ "Use the post-run viewer for export.[/dim]"
675
+ )
676
+ except Exception:
677
+ pass
678
+ else:
679
+ # No transcript toggle in summary mode; nothing to print here.
680
+ return
419
681
 
420
682
  def _ensure_live(self) -> None:
421
683
  """Ensure live display is updated."""
684
+ if getattr(self, "_transcript_mode_enabled", False):
685
+ return
686
+ # When verbose=False, don't start Live display if we're streaming tokens directly
687
+ # This prevents Live from intercepting stdout and causing tokens to appear on separate lines
688
+ if not self.verbose and getattr(self, "_streaming_tokens_directly", False):
689
+ return
422
690
  if not self._ensure_live_stack():
423
691
  return
424
692
 
@@ -426,6 +694,12 @@ class RichStreamRenderer:
426
694
 
427
695
  if self.live:
428
696
  self._refresh_live_panels()
697
+ if (
698
+ not self._transcript_mode_enabled
699
+ and not self.state.finalizing_ui
700
+ and not self._summary_hint_printed_once
701
+ ):
702
+ self._print_summary_hint(force=True)
429
703
 
430
704
  def _ensure_live_stack(self) -> bool:
431
705
  """Guarantee the console exposes the internal live stack Rich expects."""
@@ -465,15 +739,19 @@ class RichStreamRenderer:
465
739
  if not self.live:
466
740
  return
467
741
 
468
- main_panel = self._render_main_panel()
469
- steps_renderable = self._render_steps_text()
742
+ steps_body = self._render_steps_text()
743
+ template_panel = getattr(self, "_last_steps_panel_template", None)
744
+ if template_panel is None:
745
+ template_panel = self._resolve_steps_panel()
470
746
  steps_panel = AIPPanel(
471
- steps_renderable,
472
- title="Steps",
473
- border_style="blue",
747
+ steps_body,
748
+ title=getattr(template_panel, "title", "Steps"),
749
+ border_style=getattr(template_panel, "border_style", "blue"),
750
+ padding=getattr(template_panel, "padding", (0, 1)),
474
751
  )
475
- tool_panels = self._render_tool_panels()
476
- panels = self._build_live_panels(main_panel, steps_panel, tool_panels)
752
+
753
+ main_panel = self._render_main_panel()
754
+ panels = self._build_live_panels(main_panel, steps_panel)
477
755
 
478
756
  self.live.update(Group(*panels))
479
757
 
@@ -481,40 +759,28 @@ class RichStreamRenderer:
481
759
  self,
482
760
  main_panel: Any,
483
761
  steps_panel: Any,
484
- tool_panels: list[Any],
485
762
  ) -> list[Any]:
486
763
  """Assemble the panel order for the live display."""
487
764
  if self.verbose:
488
- return [main_panel, steps_panel, *tool_panels]
765
+ return [main_panel, steps_panel]
489
766
 
490
- panels: list[Any] = [steps_panel]
491
- if tool_panels:
492
- panels.extend(tool_panels)
493
- panels.append(main_panel)
494
- return panels
767
+ return [steps_panel, main_panel]
495
768
 
496
769
  def _render_main_panel(self) -> Any:
497
770
  """Render the main content panel."""
498
- body = "".join(self.state.buffer).strip()
771
+ body = self.state.buffer.render().strip()
772
+ theme = DEFAULT_TRANSCRIPT_THEME
499
773
  if not self.verbose:
500
- final_content = (self.state.final_text or "").strip()
501
- if final_content:
502
- title = self._final_panel_title()
503
- return create_final_panel(
504
- final_content,
505
- title=title,
506
- theme=self.cfg.theme,
507
- )
774
+ panel = build_final_panel(self.state, theme=theme)
775
+ if panel is not None:
776
+ return panel
508
777
  # Dynamic title with spinner + elapsed/hints
509
778
  title = self._format_enhanced_main_title()
510
- return create_main_panel(body, title, self.cfg.theme)
779
+ return create_main_panel(body, title, theme)
511
780
 
512
781
  def _final_panel_title(self) -> str:
513
782
  """Compose title for the final result panel including duration."""
514
- title = "Final Result"
515
- if self.state.final_duration_text:
516
- title = f"{title} · {self.state.final_duration_text}"
517
- return title
783
+ return format_final_panel_title(self.state)
518
784
 
519
785
  def apply_verbosity(self, verbose: bool) -> None:
520
786
  """Update verbose behaviour at runtime."""
@@ -522,8 +788,6 @@ class RichStreamRenderer:
522
788
  return
523
789
 
524
790
  self.verbose = verbose
525
- self.cfg.style = "debug" if verbose else "pretty"
526
-
527
791
  desired_live = not verbose
528
792
  if desired_live != self.cfg.live:
529
793
  self.cfg.live = desired_live
@@ -535,459 +799,16 @@ class RichStreamRenderer:
535
799
  if self.cfg.live:
536
800
  self._ensure_live()
537
801
 
538
- # ------------------------------------------------------------------
539
- # Transcript helpers
540
- # ------------------------------------------------------------------
541
- def _capture_event(
542
- self, ev: dict[str, Any], received_at: datetime | None = None
543
- ) -> None:
544
- """Capture a deep copy of SSE events for transcript replay."""
545
- try:
546
- captured = json.loads(json.dumps(ev))
547
- except Exception:
548
- captured = ev
549
-
550
- if received_at is not None:
551
- try:
552
- captured["received_at"] = received_at.isoformat()
553
- except Exception:
554
- try:
555
- captured["received_at"] = str(received_at)
556
- except Exception:
557
- captured["received_at"] = repr(received_at)
558
-
559
- self.state.events.append(captured)
802
+ # Transcript helper implementations live in TranscriptModeMixin.
560
803
 
561
804
  def get_aggregated_output(self) -> str:
562
805
  """Return the concatenated assistant output collected so far."""
563
- return ("".join(self.state.buffer or [])).strip()
806
+ return self.state.buffer.render().strip()
564
807
 
565
808
  def get_transcript_events(self) -> list[dict[str, Any]]:
566
809
  """Return captured SSE events."""
567
810
  return list(self.state.events)
568
811
 
569
- def _maybe_insert_thinking_gap(
570
- self, task_id: str | None, context_id: str | None
571
- ) -> None:
572
- """Insert thinking gap if needed."""
573
- # Implementation would track thinking states
574
- pass
575
-
576
- def _ensure_tool_panel(
577
- self, name: str, args: Any, task_id: str, context_id: str
578
- ) -> str:
579
- """Ensure a tool panel exists and return its ID."""
580
- formatted_title = format_tool_title(name)
581
- is_delegation = is_delegation_tool(name)
582
- tool_sid = f"tool_{name}_{task_id}_{context_id}"
583
-
584
- if tool_sid not in self.tool_panels:
585
- self.tool_panels[tool_sid] = {
586
- "title": formatted_title,
587
- "status": "running",
588
- "started_at": monotonic(),
589
- "server_started_at": self.stream_processor.server_elapsed_time,
590
- "chunks": [],
591
- "args": args or {},
592
- "output": None,
593
- "is_delegation": is_delegation,
594
- }
595
- # Add Args section once
596
- if args:
597
- try:
598
- args_content = (
599
- "**Args:**\n```json\n"
600
- + json.dumps(args, indent=2)
601
- + "\n```\n\n"
602
- )
603
- except Exception:
604
- args_content = f"**Args:**\n{args}\n\n"
605
- self.tool_panels[tool_sid]["chunks"].append(args_content)
606
- self.tool_order.append(tool_sid)
607
-
608
- return tool_sid
609
-
610
- def _start_tool_step(
611
- self,
612
- task_id: str,
613
- context_id: str,
614
- tool_name: str,
615
- tool_args: Any,
616
- _tool_sid: str,
617
- ) -> Step | None:
618
- """Start or get a step for a tool."""
619
- if is_delegation_tool(tool_name):
620
- st = self.steps.start_or_get(
621
- task_id=task_id,
622
- context_id=context_id,
623
- kind="delegate",
624
- name=tool_name,
625
- args=tool_args,
626
- )
627
- else:
628
- st = self.steps.start_or_get(
629
- task_id=task_id,
630
- context_id=context_id,
631
- kind="tool",
632
- name=tool_name,
633
- args=tool_args,
634
- )
635
-
636
- # Record server start time for this step if available
637
- if st and self.stream_processor.server_elapsed_time is not None:
638
- self._step_server_start_times[st.step_id] = (
639
- self.stream_processor.server_elapsed_time
640
- )
641
-
642
- return st
643
-
644
- def _process_additional_tool_calls(
645
- self,
646
- tool_calls_info: list[tuple[str, Any, Any]],
647
- tool_name: str,
648
- task_id: str,
649
- context_id: str,
650
- ) -> None:
651
- """Process additional tool calls to avoid duplicates."""
652
- for call_name, call_args, _ in tool_calls_info or []:
653
- if call_name and call_name != tool_name:
654
- self._process_single_tool_call(
655
- call_name, call_args, task_id, context_id
656
- )
657
-
658
- def _process_single_tool_call(
659
- self, call_name: str, call_args: Any, task_id: str, context_id: str
660
- ) -> None:
661
- """Process a single additional tool call."""
662
- self._ensure_tool_panel(call_name, call_args, task_id, context_id)
663
-
664
- st2 = self._create_step_for_tool_call(call_name, call_args, task_id, context_id)
665
-
666
- if self.stream_processor.server_elapsed_time is not None and st2:
667
- self._step_server_start_times[st2.step_id] = (
668
- self.stream_processor.server_elapsed_time
669
- )
670
-
671
- def _create_step_for_tool_call(
672
- self, call_name: str, call_args: Any, task_id: str, context_id: str
673
- ) -> Any:
674
- """Create appropriate step for tool call."""
675
- if is_delegation_tool(call_name):
676
- return self.steps.start_or_get(
677
- task_id=task_id,
678
- context_id=context_id,
679
- kind="delegate",
680
- name=call_name,
681
- args=call_args,
682
- )
683
- else:
684
- return self.steps.start_or_get(
685
- task_id=task_id,
686
- context_id=context_id,
687
- kind="tool",
688
- name=call_name,
689
- args=call_args,
690
- )
691
-
692
- def _detect_tool_completion(
693
- self, metadata: dict, content: str
694
- ) -> tuple[bool, str | None, Any]:
695
- """Detect if a tool has completed and return completion info."""
696
- tool_info = metadata.get("tool_info", {}) if isinstance(metadata, dict) else {}
697
-
698
- if tool_info.get("status") == "finished" and tool_info.get("name"):
699
- return True, tool_info.get("name"), tool_info.get("output")
700
- elif content and isinstance(content, str) and content.startswith("Completed "):
701
- # content like "Completed google_serper"
702
- tname = content.replace("Completed ", "").strip()
703
- if tname:
704
- output = (
705
- tool_info.get("output") if tool_info.get("name") == tname else None
706
- )
707
- return True, tname, output
708
- elif metadata.get("status") == "finished" and tool_info.get("name"):
709
- return True, tool_info.get("name"), tool_info.get("output")
710
-
711
- return False, None, None
712
-
713
- def _get_tool_session_id(
714
- self, finished_tool_name: str, task_id: str, context_id: str
715
- ) -> str:
716
- """Generate tool session ID."""
717
- return f"tool_{finished_tool_name}_{task_id}_{context_id}"
718
-
719
- def _calculate_tool_duration(self, meta: dict[str, Any]) -> float | None:
720
- """Calculate tool duration from metadata."""
721
- server_now = self.stream_processor.server_elapsed_time
722
- server_start = meta.get("server_started_at")
723
- dur = None
724
-
725
- try:
726
- if isinstance(server_now, (int, float)) and server_start is not None:
727
- dur = max(0.0, float(server_now) - float(server_start))
728
- else:
729
- started_at = meta.get("started_at")
730
- if started_at is not None:
731
- started_at_float = float(started_at)
732
- dur = max(0.0, float(monotonic()) - started_at_float)
733
- except (TypeError, ValueError):
734
- logger.exception("Failed to calculate tool duration")
735
- return None
736
-
737
- return dur
738
-
739
- def _update_tool_metadata(self, meta: dict[str, Any], dur: float | None) -> None:
740
- """Update tool metadata with duration information."""
741
- if dur is not None:
742
- meta["duration_seconds"] = dur
743
- meta["server_finished_at"] = (
744
- self.stream_processor.server_elapsed_time
745
- if isinstance(self.stream_processor.server_elapsed_time, int | float)
746
- else None
747
- )
748
- meta["finished_at"] = monotonic()
749
-
750
- def _add_tool_output_to_panel(
751
- self, meta: dict[str, Any], finished_tool_output: Any, finished_tool_name: str
752
- ) -> None:
753
- """Add tool output to panel metadata."""
754
- if finished_tool_output is not None:
755
- meta["chunks"].append(
756
- self._format_output_block(finished_tool_output, finished_tool_name)
757
- )
758
- meta["output"] = finished_tool_output
759
-
760
- def _mark_panel_as_finished(self, meta: dict[str, Any], tool_sid: str) -> None:
761
- """Mark panel as finished and ensure visibility."""
762
- if meta.get("status") != "finished":
763
- meta["status"] = "finished"
764
-
765
- dur = self._calculate_tool_duration(meta)
766
- self._update_tool_metadata(meta, dur)
767
-
768
- # Ensure this finished panel is visible in this frame
769
- self.stream_processor.current_event_finished_panels.add(tool_sid)
770
-
771
- def _finish_tool_panel(
772
- self,
773
- finished_tool_name: str,
774
- finished_tool_output: Any,
775
- task_id: str,
776
- context_id: str,
777
- ) -> None:
778
- """Finish a tool panel and update its status."""
779
- tool_sid = self._get_tool_session_id(finished_tool_name, task_id, context_id)
780
- if tool_sid not in self.tool_panels:
781
- return
782
-
783
- meta = self.tool_panels[tool_sid]
784
- self._mark_panel_as_finished(meta, tool_sid)
785
- self._add_tool_output_to_panel(meta, finished_tool_output, finished_tool_name)
786
-
787
- def _get_step_duration(
788
- self, finished_tool_name: str, task_id: str, context_id: str
789
- ) -> float | None:
790
- """Get step duration from tool panels."""
791
- tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
792
- return self.tool_panels.get(tool_sid, {}).get("duration_seconds")
793
-
794
- def _finish_delegation_step(
795
- self,
796
- finished_tool_name: str,
797
- finished_tool_output: Any,
798
- task_id: str,
799
- context_id: str,
800
- step_duration: float | None,
801
- ) -> None:
802
- """Finish a delegation step."""
803
- self.steps.finish(
804
- task_id=task_id,
805
- context_id=context_id,
806
- kind="delegate",
807
- name=finished_tool_name,
808
- output=finished_tool_output,
809
- duration_raw=step_duration,
810
- )
811
-
812
- def _finish_tool_step_type(
813
- self,
814
- finished_tool_name: str,
815
- finished_tool_output: Any,
816
- task_id: str,
817
- context_id: str,
818
- step_duration: float | None,
819
- ) -> None:
820
- """Finish a regular tool step."""
821
- self.steps.finish(
822
- task_id=task_id,
823
- context_id=context_id,
824
- kind="tool",
825
- name=finished_tool_name,
826
- output=finished_tool_output,
827
- duration_raw=step_duration,
828
- )
829
-
830
- def _finish_tool_step(
831
- self,
832
- finished_tool_name: str,
833
- finished_tool_output: Any,
834
- task_id: str,
835
- context_id: str,
836
- ) -> None:
837
- """Finish the corresponding step for a completed tool."""
838
- step_duration = self._get_step_duration(finished_tool_name, task_id, context_id)
839
-
840
- if is_delegation_tool(finished_tool_name):
841
- self._finish_delegation_step(
842
- finished_tool_name,
843
- finished_tool_output,
844
- task_id,
845
- context_id,
846
- step_duration,
847
- )
848
- else:
849
- self._finish_tool_step_type(
850
- finished_tool_name,
851
- finished_tool_output,
852
- task_id,
853
- context_id,
854
- step_duration,
855
- )
856
-
857
- def _should_create_snapshot(self, tool_sid: str) -> bool:
858
- """Check if a snapshot should be created."""
859
- return self.cfg.append_finished_snapshots and not self.tool_panels.get(
860
- tool_sid, {}
861
- ).get("snapshot_printed")
862
-
863
- def _get_snapshot_title(self, meta: dict[str, Any], finished_tool_name: str) -> str:
864
- """Get the title for the snapshot."""
865
- adjusted_title = meta.get("title") or finished_tool_name
866
-
867
- # Add elapsed time to title
868
- dur = meta.get("duration_seconds")
869
- if isinstance(dur, int | float):
870
- elapsed_str = self._format_snapshot_duration(dur)
871
- adjusted_title = f"{adjusted_title} · {elapsed_str}"
872
-
873
- return adjusted_title
874
-
875
- def _format_snapshot_duration(self, dur: int | float) -> str:
876
- """Format duration for snapshot title."""
877
- try:
878
- # Handle invalid types
879
- if not isinstance(dur, (int, float)):
880
- return "<1ms"
881
-
882
- if dur >= 1:
883
- return f"{dur:.2f}s"
884
- elif int(dur * 1000) > 0:
885
- return f"{int(dur * 1000)}ms"
886
- else:
887
- return "<1ms"
888
- except (TypeError, ValueError, OverflowError):
889
- return "<1ms"
890
-
891
- def _clamp_snapshot_body(self, body_text: str) -> str:
892
- """Clamp snapshot body to configured limits."""
893
- max_lines = int(self.cfg.snapshot_max_lines or 0)
894
- lines = body_text.splitlines()
895
- if max_lines > 0 and len(lines) > max_lines:
896
- lines = lines[:max_lines] + ["… (truncated)"]
897
- body_text = "\n".join(lines)
898
-
899
- max_chars = int(self.cfg.snapshot_max_chars or 0)
900
- if max_chars > 0 and len(body_text) > max_chars:
901
- suffix = "\n… (truncated)"
902
- body_text = body_text[: max_chars - len(suffix)] + suffix
903
-
904
- return body_text
905
-
906
- def _create_snapshot_panel(
907
- self, adjusted_title: str, body_text: str, finished_tool_name: str
908
- ) -> Any:
909
- """Create the snapshot panel."""
910
- return create_tool_panel(
911
- title=adjusted_title,
912
- content=body_text or "(no output)",
913
- status="finished",
914
- theme=self.cfg.theme,
915
- is_delegation=is_delegation_tool(finished_tool_name),
916
- )
917
-
918
- def _print_and_mark_snapshot(self, tool_sid: str, snapshot_panel: Any) -> None:
919
- """Print snapshot and mark as printed."""
920
- self.console.print(snapshot_panel)
921
- self.tool_panels[tool_sid]["snapshot_printed"] = True
922
-
923
- def _create_tool_snapshot(
924
- self, finished_tool_name: str, task_id: str, context_id: str
925
- ) -> None:
926
- """Create and print a snapshot for a finished tool."""
927
- tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
928
-
929
- if not self._should_create_snapshot(tool_sid):
930
- return
931
-
932
- meta = self.tool_panels[tool_sid]
933
- adjusted_title = self._get_snapshot_title(meta, finished_tool_name)
934
-
935
- # Compose body from chunks and clamp
936
- body_text = "".join(meta.get("chunks") or [])
937
- body_text = self._clamp_snapshot_body(body_text)
938
-
939
- snapshot_panel = self._create_snapshot_panel(
940
- adjusted_title, body_text, finished_tool_name
941
- )
942
-
943
- self._print_and_mark_snapshot(tool_sid, snapshot_panel)
944
-
945
- def _handle_agent_step(
946
- self,
947
- event: dict[str, Any],
948
- tool_name: str | None,
949
- tool_args: Any,
950
- _tool_out: Any,
951
- tool_calls_info: list[tuple[str, Any, Any]],
952
- ) -> None:
953
- """Handle agent step event."""
954
- metadata = event.get("metadata", {})
955
- task_id = event.get("task_id")
956
- context_id = event.get("context_id")
957
- content = event.get("content", "")
958
-
959
- # Create steps and panels for the primary tool
960
- if tool_name:
961
- tool_sid = self._ensure_tool_panel(
962
- tool_name, tool_args, task_id, context_id
963
- )
964
- self._start_tool_step(task_id, context_id, tool_name, tool_args, tool_sid)
965
-
966
- # Handle additional tool calls
967
- self._process_additional_tool_calls(
968
- tool_calls_info, tool_name, task_id, context_id
969
- )
970
-
971
- # Check for tool completion
972
- (
973
- is_tool_finished,
974
- finished_tool_name,
975
- finished_tool_output,
976
- ) = self._detect_tool_completion(metadata, content)
977
-
978
- if is_tool_finished and finished_tool_name:
979
- self._finish_tool_panel(
980
- finished_tool_name, finished_tool_output, task_id, context_id
981
- )
982
- self._finish_tool_step(
983
- finished_tool_name, finished_tool_output, task_id, context_id
984
- )
985
- self._create_tool_snapshot(finished_tool_name, task_id, context_id)
986
-
987
- def _spinner(self) -> str:
988
- """Return spinner character."""
989
- return get_spinner()
990
-
991
812
  def _format_working_indicator(self, started_at: float | None) -> str:
992
813
  """Format working indicator."""
993
814
  return format_working_indicator(
@@ -1030,9 +851,7 @@ class RichStreamRenderer:
1030
851
 
1031
852
  def _get_analysis_progress_info(self) -> dict[str, Any]:
1032
853
  total_steps = len(self.steps.order)
1033
- completed_steps = sum(
1034
- 1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid])
1035
- )
854
+ completed_steps = sum(1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid]))
1036
855
  current_step = None
1037
856
  for sid in self.steps.order:
1038
857
  if not is_step_finished(self.steps.by_id[sid]):
@@ -1040,13 +859,11 @@ class RichStreamRenderer:
1040
859
  break
1041
860
  # Prefer server elapsed time when available
1042
861
  elapsed = 0.0
1043
- if isinstance(self.stream_processor.server_elapsed_time, int | float):
862
+ if isinstance(self.stream_processor.server_elapsed_time, (int, float)):
1044
863
  elapsed = float(self.stream_processor.server_elapsed_time)
1045
864
  elif self._started_at is not None:
1046
865
  elapsed = monotonic() - self._started_at
1047
- progress_percent = (
1048
- int((completed_steps / total_steps) * 100) if total_steps else 0
1049
- )
866
+ progress_percent = int((completed_steps / total_steps) * 100) if total_steps else 0
1050
867
  return {
1051
868
  "total_steps": total_steps,
1052
869
  "completed_steps": completed_steps,
@@ -1100,29 +917,42 @@ class RichStreamRenderer:
1100
917
  def _format_step_status(self, step: Step) -> str:
1101
918
  """Format step status with elapsed time or duration."""
1102
919
  if is_step_finished(step):
1103
- if step.duration_ms is None:
1104
- return LESS_THAN_1MS
1105
- elif step.duration_ms >= 1000:
1106
- return f"[{step.duration_ms / 1000:.2f}s]"
1107
- elif step.duration_ms > 0:
1108
- return f"[{step.duration_ms}ms]"
1109
- return LESS_THAN_1MS
920
+ return self._format_finished_badge(step)
1110
921
  else:
1111
922
  # Calculate elapsed time for running steps
1112
923
  elapsed = self._calculate_step_elapsed_time(step)
1113
- if elapsed >= 1:
924
+ if elapsed >= 0.1:
1114
925
  return f"[{elapsed:.2f}s]"
1115
- ms = int(elapsed * 1000)
1116
- return f"[{ms}ms]" if ms > 0 else LESS_THAN_1MS
926
+ ms = int(round(elapsed * 1000))
927
+ if ms <= 0:
928
+ return ""
929
+ return f"[{ms}ms]"
930
+
931
+ def _format_finished_badge(self, step: Step) -> str:
932
+ """Compose duration badge for finished steps including source tagging."""
933
+ if getattr(step, "duration_unknown", False) is True:
934
+ payload = "??s"
935
+ else:
936
+ duration_ms = step.duration_ms
937
+ if duration_ms is None:
938
+ payload = "<1ms"
939
+ elif duration_ms < 0:
940
+ payload = "<1ms"
941
+ elif duration_ms >= 100:
942
+ payload = f"{duration_ms / 1000:.2f}s"
943
+ elif duration_ms > 0:
944
+ payload = f"{duration_ms}ms"
945
+ else:
946
+ payload = "<1ms"
947
+
948
+ return f"[{payload}]"
1117
949
 
1118
950
  def _calculate_step_elapsed_time(self, step: Step) -> float:
1119
951
  """Calculate elapsed time for a running step."""
1120
952
  server_elapsed = self.stream_processor.server_elapsed_time
1121
953
  server_start = self._step_server_start_times.get(step.step_id)
1122
954
 
1123
- if isinstance(server_elapsed, int | float) and isinstance(
1124
- server_start, int | float
1125
- ):
955
+ if isinstance(server_elapsed, (int, float)) and isinstance(server_start, (int, float)):
1126
956
  return max(0.0, float(server_elapsed) - float(server_start))
1127
957
 
1128
958
  try:
@@ -1136,6 +966,10 @@ class RichStreamRenderer:
1136
966
  return step.name
1137
967
  return "thinking..." if step.kind == "agent" else f"{step.kind} step"
1138
968
 
969
+ def _resolve_step_label(self, step: Step) -> str:
970
+ """Return the display label for a step with sensible fallbacks."""
971
+ return format_step_label(step)
972
+
1139
973
  def _check_parallel_tools(self) -> dict[tuple[str | None, str | None], list]:
1140
974
  """Check for parallel running tools."""
1141
975
  running_by_ctx: dict[tuple[str | None, str | None], list] = {}
@@ -1155,72 +989,77 @@ class RichStreamRenderer:
1155
989
  key = (step.task_id, step.context_id)
1156
990
  return len(running_by_ctx.get(key, [])) > 1
1157
991
 
1158
- def _compose_step_renderable(
1159
- self,
1160
- step: Step,
1161
- running_by_ctx: dict[tuple[str | None, str | None], list],
1162
- ) -> Any:
1163
- """Compose a single renderable for the steps panel."""
1164
- finished = is_step_finished(step)
1165
- status_br = self._format_step_status(step)
1166
- display_name = self._get_step_display_name(step)
1167
-
1168
- if (
1169
- not finished
1170
- and step.kind == "tool"
1171
- and self._is_parallel_tool(step, running_by_ctx)
1172
- ):
1173
- status_br = status_br.replace("]", " 🔄]")
1174
-
1175
- icon = self._get_step_icon(step.kind)
1176
- text_line = Text(style="dim")
1177
- text_line.append(icon)
1178
- text_line.append(" ")
1179
- text_line.append(display_name)
1180
- if status_br:
1181
- text_line.append(" ")
1182
- text_line.append(status_br)
1183
- if finished:
1184
- text_line.append(" ✓")
1185
-
1186
- if finished:
1187
- return text_line
1188
-
1189
- spinner = Spinner("dots", text=text_line, style="dim")
1190
- return Align.left(spinner)
1191
-
1192
- def _render_steps_text(self) -> Any:
1193
- """Render the steps panel content."""
1194
- if not (self.steps.order or self.steps.children):
1195
- return Text("No steps yet", style="dim")
1196
-
1197
- running_by_ctx = self._check_parallel_tools()
1198
- renderables: list[Any] = []
992
+ def _build_step_status_overrides(self) -> dict[str, str]:
993
+ """Return status text overrides for steps (running duration badges)."""
994
+ overrides: dict[str, str] = {}
1199
995
  for sid in self.steps.order:
1200
- line = self._compose_step_renderable(self.steps.by_id[sid], running_by_ctx)
1201
- renderables.append(line)
1202
-
1203
- if not renderables:
1204
- return Text("No steps yet", style="dim")
1205
-
1206
- return Group(*renderables)
1207
-
1208
- def _should_skip_finished_panel(self, sid: str, status: str) -> bool:
1209
- """Check if a finished panel should be skipped."""
1210
- if status != "finished":
1211
- return False
996
+ step = self.steps.by_id.get(sid)
997
+ if not step:
998
+ continue
999
+ try:
1000
+ status_text = self._format_step_status(step)
1001
+ except Exception:
1002
+ status_text = ""
1003
+ if status_text:
1004
+ overrides[sid] = status_text
1005
+ return overrides
1006
+
1007
+ def _resolve_steps_panel(self) -> AIPPanel:
1008
+ """Return the shared steps panel renderable generated by layout helpers."""
1009
+ window_arg = self._summary_window_size()
1010
+ window_arg = window_arg if window_arg > 0 else None
1011
+ panels = render_summary_panels(
1012
+ self.state,
1013
+ self.steps,
1014
+ summary_window=window_arg,
1015
+ include_query_panel=False,
1016
+ include_final_panel=False,
1017
+ step_status_overrides=self._build_step_status_overrides(),
1018
+ )
1019
+ steps_panel = next((panel for panel in panels if getattr(panel, "title", "").lower() == "steps"), None)
1020
+ panel_cls = AIPPanel if isinstance(AIPPanel, type) else None
1021
+ if steps_panel is not None and (panel_cls is None or isinstance(steps_panel, panel_cls)):
1022
+ return steps_panel
1023
+ return AIPPanel(_NO_STEPS_TEXT.copy(), title="Steps", border_style="blue")
1024
+
1025
+ def _prepare_steps_renderable(self, *, include_progress: bool) -> tuple[AIPPanel, Any]:
1026
+ """Return the template panel and content renderable for steps."""
1027
+ panel = self._resolve_steps_panel()
1028
+ self._last_steps_panel_template = panel
1029
+ base_renderable: Any = getattr(panel, "renderable", panel)
1030
+
1031
+ if include_progress and not self.state.finalizing_ui:
1032
+ footer = build_progress_footer(
1033
+ state=self.state,
1034
+ steps=self.steps,
1035
+ started_at=self._started_at,
1036
+ server_elapsed_time=self.stream_processor.server_elapsed_time,
1037
+ )
1038
+ if footer is not None:
1039
+ if isinstance(base_renderable, Group):
1040
+ base_renderable = Group(*base_renderable.renderables, footer)
1041
+ else:
1042
+ base_renderable = Group(base_renderable, footer)
1043
+ return panel, base_renderable
1044
+
1045
+ def _build_steps_body(self, *, include_progress: bool) -> Any:
1046
+ """Return the rendered steps body with optional progress footer."""
1047
+ _, renderable = self._prepare_steps_renderable(include_progress=include_progress)
1048
+ if isinstance(renderable, Group):
1049
+ return renderable
1050
+ return Group(renderable)
1212
1051
 
1213
- if getattr(self.cfg, "append_finished_snapshots", False):
1214
- return True
1052
+ def _render_steps_text(self) -> Any:
1053
+ """Return the rendered steps body used by transcript capture."""
1054
+ return self._build_steps_body(include_progress=True)
1215
1055
 
1216
- return (
1217
- not self.state.finalizing_ui
1218
- and sid not in self.stream_processor.current_event_finished_panels
1219
- )
1056
+ def _summary_window_size(self) -> int:
1057
+ """Return the active window size for step display."""
1058
+ if self.state.finalizing_ui:
1059
+ return 0
1060
+ return int(self.cfg.summary_display_window or 0)
1220
1061
 
1221
- def _update_final_duration(
1222
- self, duration: float | None, *, overwrite: bool = False
1223
- ) -> None:
1062
+ def _update_final_duration(self, duration: float | None, *, overwrite: bool = False) -> None:
1224
1063
  """Store formatted duration for eventual final panels."""
1225
1064
  if duration is None:
1226
1065
  return
@@ -1238,193 +1077,6 @@ class RichStreamRenderer:
1238
1077
  if overwrite and existing is not None:
1239
1078
  duration_val = max(existing, duration_val)
1240
1079
 
1241
- self.state.final_duration_seconds = duration_val
1242
- self.state.final_duration_text = self._format_elapsed_time(duration_val)
1243
-
1244
- def _calculate_elapsed_time(self, meta: dict[str, Any]) -> str:
1245
- """Calculate elapsed time string for running tools."""
1246
- server_elapsed = self.stream_processor.server_elapsed_time
1247
- server_start = meta.get("server_started_at")
1248
-
1249
- if isinstance(server_elapsed, int | float) and isinstance(
1250
- server_start, int | float
1251
- ):
1252
- elapsed = max(0.0, float(server_elapsed) - float(server_start))
1253
- else:
1254
- elapsed = max(0.0, monotonic() - (meta.get("started_at") or 0.0))
1255
-
1256
- return self._format_elapsed_time(elapsed)
1257
-
1258
- def _format_elapsed_time(self, elapsed: float) -> str:
1259
- """Format elapsed time as a readable string."""
1260
- if elapsed >= 1:
1261
- return f"{elapsed:.2f}s"
1262
- elif int(elapsed * 1000) > 0:
1263
- return f"{int(elapsed * 1000)}ms"
1264
- else:
1265
- return "<1ms"
1266
-
1267
- def _calculate_finished_duration(self, meta: dict[str, Any]) -> str | None:
1268
- """Calculate duration string for finished tools."""
1269
- dur = meta.get("duration_seconds")
1270
- if isinstance(dur, int | float):
1271
- return self._format_elapsed_time(dur)
1272
-
1273
- try:
1274
- server_now = self.stream_processor.server_elapsed_time
1275
- server_start = meta.get("server_started_at")
1276
- if isinstance(server_now, int | float) and isinstance(
1277
- server_start, int | float
1278
- ):
1279
- dur = max(0.0, float(server_now) - float(server_start))
1280
- elif meta.get("started_at") is not None:
1281
- dur = max(0.0, float(monotonic() - meta.get("started_at")))
1282
- except Exception:
1283
- dur = None
1284
-
1285
- return self._format_elapsed_time(dur) if isinstance(dur, int | float) else None
1286
-
1287
- def _process_running_tool_panel(
1288
- self,
1289
- title: str,
1290
- meta: dict[str, Any],
1291
- body: str,
1292
- *,
1293
- include_spinner: bool = False,
1294
- ) -> tuple[str, str] | tuple[str, str, str | None]:
1295
- """Process a running tool panel."""
1296
- elapsed_str = self._calculate_elapsed_time(meta)
1297
- adjusted_title = f"{title} · {elapsed_str}"
1298
- chip = f"⏱ {elapsed_str}"
1299
- spinner_message: str | None = None
1300
-
1301
- if not body.strip():
1302
- body = ""
1303
- spinner_message = f"{title} running... {elapsed_str}"
1304
- else:
1305
- body = f"{body}\n\n{chip}"
1306
-
1307
- if include_spinner:
1308
- return adjusted_title, body, spinner_message
1309
- return adjusted_title, body
1310
-
1311
- def _process_finished_tool_panel(self, title: str, meta: dict[str, Any]) -> str:
1312
- """Process a finished tool panel."""
1313
- duration_str = self._calculate_finished_duration(meta)
1314
- return f"{title} · {duration_str}" if duration_str else title
1315
-
1316
- def _create_tool_panel_for_session(
1317
- self, sid: str, meta: dict[str, Any]
1318
- ) -> AIPPanel | None:
1319
- """Create a single tool panel for the session."""
1320
- title = meta.get("title") or "Tool"
1321
- status = meta.get("status") or "running"
1322
- chunks = meta.get("chunks") or []
1323
- is_delegation = bool(meta.get("is_delegation"))
1324
-
1325
- if self._should_skip_finished_panel(sid, status):
1326
- return None
1327
-
1328
- body = "".join(chunks)
1329
- adjusted_title = title
1330
-
1331
- spinner_message: str | None = None
1332
-
1333
- if status == "running":
1334
- adjusted_title, body, spinner_message = self._process_running_tool_panel(
1335
- title, meta, body, include_spinner=True
1336
- )
1337
- elif status == "finished":
1338
- adjusted_title = self._process_finished_tool_panel(title, meta)
1339
-
1340
- return create_tool_panel(
1341
- title=adjusted_title,
1342
- content=body,
1343
- status=status,
1344
- theme=self.cfg.theme,
1345
- is_delegation=is_delegation,
1346
- spinner_message=spinner_message,
1347
- )
1348
-
1349
- def _render_tool_panels(self) -> list[AIPPanel]:
1350
- """Render tool execution output panels."""
1351
- if not getattr(self.cfg, "show_delegate_tool_panels", False):
1352
- return []
1353
- panels: list[AIPPanel] = []
1354
- for sid in self.tool_order:
1355
- meta = self.tool_panels.get(sid) or {}
1356
- panel = self._create_tool_panel_for_session(sid, meta)
1357
- if panel:
1358
- panels.append(panel)
1359
-
1360
- return panels
1361
-
1362
- def _format_dict_or_list_output(self, output_value: dict | list) -> str:
1363
- """Format dict/list output as pretty JSON."""
1364
- try:
1365
- return (
1366
- self.OUTPUT_PREFIX
1367
- + "```json\n"
1368
- + json.dumps(output_value, indent=2)
1369
- + "\n```\n"
1370
- )
1371
- except Exception:
1372
- return self.OUTPUT_PREFIX + str(output_value) + "\n"
1373
-
1374
- def _clean_sub_agent_prefix(self, output: str, tool_name: str | None) -> str:
1375
- """Clean sub-agent name prefix from output."""
1376
- if not (tool_name and is_delegation_tool(tool_name)):
1377
- return output
1378
-
1379
- sub = tool_name
1380
- if tool_name.startswith("delegate_to_"):
1381
- sub = tool_name.replace("delegate_to_", "")
1382
- elif tool_name.startswith("delegate_"):
1383
- sub = tool_name.replace("delegate_", "")
1384
- prefix = f"[{sub}]"
1385
- if output.startswith(prefix):
1386
- return output[len(prefix) :].lstrip()
1387
-
1388
- return output
1389
-
1390
- def _format_json_string_output(self, output: str) -> str:
1391
- """Format string that looks like JSON."""
1392
- try:
1393
- parsed = json.loads(output)
1394
- return (
1395
- self.OUTPUT_PREFIX
1396
- + "```json\n"
1397
- + json.dumps(parsed, indent=2)
1398
- + "\n```\n"
1399
- )
1400
- except Exception:
1401
- return self.OUTPUT_PREFIX + output + "\n"
1402
-
1403
- def _format_string_output(self, output: str, tool_name: str | None) -> str:
1404
- """Format string output with optional prefix cleaning."""
1405
- s = output.strip()
1406
- s = self._clean_sub_agent_prefix(s, tool_name)
1407
-
1408
- # If looks like JSON, pretty print it
1409
- if (s.startswith("{") and s.endswith("}")) or (
1410
- s.startswith("[") and s.endswith("]")
1411
- ):
1412
- return self._format_json_string_output(s)
1413
-
1414
- return self.OUTPUT_PREFIX + s + "\n"
1415
-
1416
- def _format_other_output(self, output_value: Any) -> str:
1417
- """Format other types of output."""
1418
- try:
1419
- return self.OUTPUT_PREFIX + json.dumps(output_value, indent=2) + "\n"
1420
- except Exception:
1421
- return self.OUTPUT_PREFIX + str(output_value) + "\n"
1422
-
1423
- def _format_output_block(self, output_value: Any, tool_name: str | None) -> str:
1424
- """Format an output value for panel display."""
1425
- if isinstance(output_value, dict | list):
1426
- return self._format_dict_or_list_output(output_value)
1427
- elif isinstance(output_value, str):
1428
- return self._format_string_output(output_value, tool_name)
1429
- else:
1430
- return self._format_other_output(output_value)
1080
+ formatted = format_elapsed_time(duration_val)
1081
+ self.state.mark_final_duration(duration_val, formatted=formatted)
1082
+ self._apply_root_duration(duration_val)