glaip-sdk 0.6.15b2__py3-none-any.whl → 0.6.15b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/agents/__init__.py +27 -0
- glaip_sdk/agents/base.py +1196 -0
- glaip_sdk/cli/__init__.py +9 -0
- glaip_sdk/cli/account_store.py +540 -0
- glaip_sdk/cli/agent_config.py +78 -0
- glaip_sdk/cli/auth.py +699 -0
- glaip_sdk/cli/commands/__init__.py +5 -0
- glaip_sdk/cli/commands/accounts.py +746 -0
- glaip_sdk/cli/commands/agents.py +1509 -0
- glaip_sdk/cli/commands/common_config.py +104 -0
- glaip_sdk/cli/commands/configure.py +896 -0
- glaip_sdk/cli/commands/mcps.py +1356 -0
- glaip_sdk/cli/commands/models.py +69 -0
- glaip_sdk/cli/commands/tools.py +576 -0
- glaip_sdk/cli/commands/transcripts.py +755 -0
- glaip_sdk/cli/commands/update.py +61 -0
- glaip_sdk/cli/config.py +95 -0
- glaip_sdk/cli/constants.py +38 -0
- glaip_sdk/cli/context.py +150 -0
- glaip_sdk/cli/core/__init__.py +79 -0
- glaip_sdk/cli/core/context.py +124 -0
- glaip_sdk/cli/core/output.py +851 -0
- glaip_sdk/cli/core/prompting.py +649 -0
- glaip_sdk/cli/core/rendering.py +187 -0
- glaip_sdk/cli/display.py +355 -0
- glaip_sdk/cli/hints.py +57 -0
- glaip_sdk/cli/io.py +112 -0
- glaip_sdk/cli/main.py +615 -0
- glaip_sdk/cli/masking.py +136 -0
- glaip_sdk/cli/mcp_validators.py +287 -0
- glaip_sdk/cli/pager.py +266 -0
- glaip_sdk/cli/parsers/__init__.py +7 -0
- glaip_sdk/cli/parsers/json_input.py +177 -0
- glaip_sdk/cli/resolution.py +67 -0
- glaip_sdk/cli/rich_helpers.py +27 -0
- glaip_sdk/cli/slash/__init__.py +15 -0
- glaip_sdk/cli/slash/accounts_controller.py +578 -0
- glaip_sdk/cli/slash/accounts_shared.py +75 -0
- glaip_sdk/cli/slash/agent_session.py +285 -0
- glaip_sdk/cli/slash/prompt.py +256 -0
- glaip_sdk/cli/slash/remote_runs_controller.py +566 -0
- glaip_sdk/cli/slash/session.py +1708 -0
- glaip_sdk/cli/slash/tui/__init__.py +9 -0
- glaip_sdk/cli/slash/tui/accounts_app.py +876 -0
- glaip_sdk/cli/slash/tui/background_tasks.py +72 -0
- glaip_sdk/cli/slash/tui/loading.py +58 -0
- glaip_sdk/cli/slash/tui/remote_runs_app.py +628 -0
- glaip_sdk/cli/transcript/__init__.py +31 -0
- glaip_sdk/cli/transcript/cache.py +536 -0
- glaip_sdk/cli/transcript/capture.py +329 -0
- glaip_sdk/cli/transcript/export.py +38 -0
- glaip_sdk/cli/transcript/history.py +815 -0
- glaip_sdk/cli/transcript/launcher.py +77 -0
- glaip_sdk/cli/transcript/viewer.py +374 -0
- glaip_sdk/cli/update_notifier.py +290 -0
- glaip_sdk/cli/utils.py +263 -0
- glaip_sdk/cli/validators.py +238 -0
- glaip_sdk/client/__init__.py +11 -0
- glaip_sdk/client/_agent_payloads.py +520 -0
- glaip_sdk/client/agent_runs.py +147 -0
- glaip_sdk/client/agents.py +1335 -0
- glaip_sdk/client/base.py +502 -0
- glaip_sdk/client/main.py +249 -0
- glaip_sdk/client/mcps.py +370 -0
- glaip_sdk/client/run_rendering.py +700 -0
- glaip_sdk/client/shared.py +21 -0
- glaip_sdk/client/tools.py +661 -0
- glaip_sdk/client/validators.py +198 -0
- glaip_sdk/config/constants.py +52 -0
- glaip_sdk/mcps/__init__.py +21 -0
- glaip_sdk/mcps/base.py +345 -0
- glaip_sdk/models/__init__.py +90 -0
- glaip_sdk/models/agent.py +47 -0
- glaip_sdk/models/agent_runs.py +116 -0
- glaip_sdk/models/common.py +42 -0
- glaip_sdk/models/mcp.py +33 -0
- glaip_sdk/models/tool.py +33 -0
- glaip_sdk/payload_schemas/__init__.py +7 -0
- glaip_sdk/payload_schemas/agent.py +85 -0
- glaip_sdk/registry/__init__.py +55 -0
- glaip_sdk/registry/agent.py +164 -0
- glaip_sdk/registry/base.py +139 -0
- glaip_sdk/registry/mcp.py +253 -0
- glaip_sdk/registry/tool.py +232 -0
- glaip_sdk/runner/__init__.py +59 -0
- glaip_sdk/runner/base.py +84 -0
- glaip_sdk/runner/deps.py +112 -0
- glaip_sdk/runner/langgraph.py +782 -0
- glaip_sdk/runner/mcp_adapter/__init__.py +13 -0
- glaip_sdk/runner/mcp_adapter/base_mcp_adapter.py +43 -0
- glaip_sdk/runner/mcp_adapter/langchain_mcp_adapter.py +257 -0
- glaip_sdk/runner/mcp_adapter/mcp_config_builder.py +95 -0
- glaip_sdk/runner/tool_adapter/__init__.py +18 -0
- glaip_sdk/runner/tool_adapter/base_tool_adapter.py +44 -0
- glaip_sdk/runner/tool_adapter/langchain_tool_adapter.py +219 -0
- glaip_sdk/tools/__init__.py +22 -0
- glaip_sdk/tools/base.py +435 -0
- glaip_sdk/utils/__init__.py +86 -0
- glaip_sdk/utils/a2a/__init__.py +34 -0
- glaip_sdk/utils/a2a/event_processor.py +188 -0
- glaip_sdk/utils/agent_config.py +194 -0
- glaip_sdk/utils/bundler.py +267 -0
- glaip_sdk/utils/client.py +111 -0
- glaip_sdk/utils/client_utils.py +486 -0
- glaip_sdk/utils/datetime_helpers.py +58 -0
- glaip_sdk/utils/discovery.py +78 -0
- glaip_sdk/utils/display.py +135 -0
- glaip_sdk/utils/export.py +143 -0
- glaip_sdk/utils/general.py +61 -0
- glaip_sdk/utils/import_export.py +168 -0
- glaip_sdk/utils/import_resolver.py +492 -0
- glaip_sdk/utils/instructions.py +101 -0
- glaip_sdk/utils/rendering/__init__.py +115 -0
- glaip_sdk/utils/rendering/formatting.py +264 -0
- glaip_sdk/utils/rendering/layout/__init__.py +64 -0
- glaip_sdk/utils/rendering/layout/panels.py +156 -0
- glaip_sdk/utils/rendering/layout/progress.py +202 -0
- glaip_sdk/utils/rendering/layout/summary.py +74 -0
- glaip_sdk/utils/rendering/layout/transcript.py +606 -0
- glaip_sdk/utils/rendering/models.py +85 -0
- glaip_sdk/utils/rendering/renderer/__init__.py +55 -0
- glaip_sdk/utils/rendering/renderer/base.py +1024 -0
- glaip_sdk/utils/rendering/renderer/config.py +27 -0
- glaip_sdk/utils/rendering/renderer/console.py +55 -0
- glaip_sdk/utils/rendering/renderer/debug.py +178 -0
- glaip_sdk/utils/rendering/renderer/factory.py +138 -0
- glaip_sdk/utils/rendering/renderer/stream.py +202 -0
- glaip_sdk/utils/rendering/renderer/summary_window.py +79 -0
- glaip_sdk/utils/rendering/renderer/thinking.py +273 -0
- glaip_sdk/utils/rendering/renderer/toggle.py +182 -0
- glaip_sdk/utils/rendering/renderer/tool_panels.py +442 -0
- glaip_sdk/utils/rendering/renderer/transcript_mode.py +162 -0
- glaip_sdk/utils/rendering/state.py +204 -0
- glaip_sdk/utils/rendering/step_tree_state.py +100 -0
- glaip_sdk/utils/rendering/steps/__init__.py +34 -0
- glaip_sdk/utils/rendering/steps/event_processor.py +778 -0
- glaip_sdk/utils/rendering/steps/format.py +176 -0
- glaip_sdk/utils/rendering/steps/manager.py +387 -0
- glaip_sdk/utils/rendering/timing.py +36 -0
- glaip_sdk/utils/rendering/viewer/__init__.py +21 -0
- glaip_sdk/utils/rendering/viewer/presenter.py +184 -0
- glaip_sdk/utils/resource_refs.py +195 -0
- glaip_sdk/utils/run_renderer.py +41 -0
- glaip_sdk/utils/runtime_config.py +425 -0
- glaip_sdk/utils/serialization.py +424 -0
- glaip_sdk/utils/sync.py +142 -0
- glaip_sdk/utils/tool_detection.py +33 -0
- glaip_sdk/utils/validation.py +264 -0
- {glaip_sdk-0.6.15b2.dist-info → glaip_sdk-0.6.15b3.dist-info}/METADATA +1 -1
- glaip_sdk-0.6.15b3.dist-info/RECORD +160 -0
- glaip_sdk-0.6.15b2.dist-info/RECORD +0 -12
- {glaip_sdk-0.6.15b2.dist-info → glaip_sdk-0.6.15b3.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.6.15b2.dist-info → glaip_sdk-0.6.15b3.dist-info}/entry_points.txt +0 -0
- {glaip_sdk-0.6.15b2.dist-info → glaip_sdk-0.6.15b3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1024 @@
|
|
|
1
|
+
"""Base renderer class that orchestrates all rendering components.
|
|
2
|
+
|
|
3
|
+
Authors:
|
|
4
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import json
|
|
10
|
+
import logging
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
from time import monotonic
|
|
13
|
+
from typing import Any
|
|
14
|
+
|
|
15
|
+
from rich.console import Console as RichConsole
|
|
16
|
+
from rich.console import Group
|
|
17
|
+
from rich.live import Live
|
|
18
|
+
from rich.markdown import Markdown
|
|
19
|
+
from rich.spinner import Spinner
|
|
20
|
+
from rich.text import Text
|
|
21
|
+
|
|
22
|
+
from glaip_sdk.icons import ICON_AGENT, ICON_AGENT_STEP, ICON_DELEGATE, ICON_TOOL_STEP
|
|
23
|
+
from glaip_sdk.rich_components import AIPPanel
|
|
24
|
+
from glaip_sdk.utils.rendering.formatting import (
|
|
25
|
+
format_main_title,
|
|
26
|
+
is_step_finished,
|
|
27
|
+
normalise_display_label,
|
|
28
|
+
)
|
|
29
|
+
from glaip_sdk.utils.rendering.models import RunStats, Step
|
|
30
|
+
from glaip_sdk.utils.rendering.layout.panels import create_main_panel
|
|
31
|
+
from glaip_sdk.utils.rendering.layout.progress import (
|
|
32
|
+
build_progress_footer,
|
|
33
|
+
format_elapsed_time,
|
|
34
|
+
format_working_indicator,
|
|
35
|
+
get_spinner_char,
|
|
36
|
+
is_delegation_tool,
|
|
37
|
+
)
|
|
38
|
+
from glaip_sdk.utils.rendering.layout.summary import render_summary_panels
|
|
39
|
+
from glaip_sdk.utils.rendering.layout.transcript import (
|
|
40
|
+
DEFAULT_TRANSCRIPT_THEME,
|
|
41
|
+
TranscriptSnapshot,
|
|
42
|
+
build_final_panel,
|
|
43
|
+
build_transcript_snapshot,
|
|
44
|
+
build_transcript_view,
|
|
45
|
+
extract_query_from_meta,
|
|
46
|
+
format_final_panel_title,
|
|
47
|
+
)
|
|
48
|
+
from glaip_sdk.utils.rendering.renderer.config import RendererConfig
|
|
49
|
+
from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
|
|
50
|
+
from glaip_sdk.utils.rendering.renderer.stream import StreamProcessor
|
|
51
|
+
from glaip_sdk.utils.rendering.renderer.thinking import ThinkingScopeController
|
|
52
|
+
from glaip_sdk.utils.rendering.renderer.tool_panels import ToolPanelController
|
|
53
|
+
from glaip_sdk.utils.rendering.renderer.transcript_mode import TranscriptModeMixin
|
|
54
|
+
from glaip_sdk.utils.rendering.state import (
|
|
55
|
+
RendererState,
|
|
56
|
+
TranscriptBuffer,
|
|
57
|
+
coerce_received_at,
|
|
58
|
+
truncate_display,
|
|
59
|
+
)
|
|
60
|
+
from glaip_sdk.utils.rendering.steps import (
|
|
61
|
+
StepManager,
|
|
62
|
+
format_step_label,
|
|
63
|
+
)
|
|
64
|
+
from glaip_sdk.utils.rendering.timing import coerce_server_time
|
|
65
|
+
|
|
66
|
+
_NO_STEPS_TEXT = Text("No steps yet", style="dim")
|
|
67
|
+
|
|
68
|
+
# Configure logger
|
|
69
|
+
logger = logging.getLogger("glaip_sdk.run_renderer")
|
|
70
|
+
|
|
71
|
+
# Constants
|
|
72
|
+
RUNNING_STATUS_HINTS = {"running", "started", "pending", "working"}
|
|
73
|
+
ARGS_VALUE_MAX_LEN = 160
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class RichStreamRenderer(TranscriptModeMixin):
|
|
77
|
+
"""Live, modern terminal renderer for agent execution with rich visual output."""
|
|
78
|
+
|
|
79
|
+
def __init__(
|
|
80
|
+
self,
|
|
81
|
+
console: RichConsole | None = None,
|
|
82
|
+
*,
|
|
83
|
+
cfg: RendererConfig | None = None,
|
|
84
|
+
verbose: bool = False,
|
|
85
|
+
transcript_buffer: TranscriptBuffer | None = None,
|
|
86
|
+
callbacks: dict[str, Any] | None = None,
|
|
87
|
+
) -> None:
|
|
88
|
+
"""Initialize the renderer.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
console: Rich console instance
|
|
92
|
+
cfg: Renderer configuration
|
|
93
|
+
verbose: Whether to enable verbose mode
|
|
94
|
+
transcript_buffer: Optional transcript buffer for capturing output
|
|
95
|
+
callbacks: Optional dictionary of callback functions
|
|
96
|
+
"""
|
|
97
|
+
super().__init__()
|
|
98
|
+
self.console = console or RichConsole()
|
|
99
|
+
self.cfg = cfg or RendererConfig()
|
|
100
|
+
self.verbose = verbose
|
|
101
|
+
|
|
102
|
+
# Initialize components
|
|
103
|
+
self.stream_processor = StreamProcessor()
|
|
104
|
+
self.state = RendererState()
|
|
105
|
+
if transcript_buffer is not None:
|
|
106
|
+
self.state.buffer = transcript_buffer
|
|
107
|
+
|
|
108
|
+
self._callbacks = callbacks or {}
|
|
109
|
+
|
|
110
|
+
# Initialize step manager and other state
|
|
111
|
+
self.steps = StepManager(max_steps=self.cfg.summary_max_steps)
|
|
112
|
+
# Live display instance (single source of truth)
|
|
113
|
+
self.live: Live | None = None
|
|
114
|
+
self._step_spinners: dict[str, Spinner] = {}
|
|
115
|
+
self._last_steps_panel_template: Any | None = None
|
|
116
|
+
|
|
117
|
+
# Tool tracking and thinking scopes
|
|
118
|
+
self._step_server_start_times: dict[str, float] = {}
|
|
119
|
+
self.tool_controller = ToolPanelController(
|
|
120
|
+
steps=self.steps,
|
|
121
|
+
stream_processor=self.stream_processor,
|
|
122
|
+
console=self.console,
|
|
123
|
+
cfg=self.cfg,
|
|
124
|
+
step_server_start_times=self._step_server_start_times,
|
|
125
|
+
output_prefix="**Output:**\n",
|
|
126
|
+
)
|
|
127
|
+
self.thinking_controller = ThinkingScopeController(
|
|
128
|
+
self.steps,
|
|
129
|
+
step_server_start_times=self._step_server_start_times,
|
|
130
|
+
)
|
|
131
|
+
self._root_agent_friendly: str | None = None
|
|
132
|
+
self._root_agent_step_id: str | None = None
|
|
133
|
+
self._root_query: str | None = None
|
|
134
|
+
self._root_query_attached: bool = False
|
|
135
|
+
|
|
136
|
+
# Timing
|
|
137
|
+
self._started_at: float | None = None
|
|
138
|
+
|
|
139
|
+
# Header/text
|
|
140
|
+
self.header_text: str = ""
|
|
141
|
+
# Track per-step server start times for accurate elapsed labels
|
|
142
|
+
# Output formatting constants
|
|
143
|
+
self.OUTPUT_PREFIX: str = "**Output:**\n"
|
|
144
|
+
|
|
145
|
+
self._final_transcript_snapshot: TranscriptSnapshot | None = None
|
|
146
|
+
self._final_transcript_renderables: tuple[list[Any], list[Any]] | None = None
|
|
147
|
+
|
|
148
|
+
def on_start(self, meta: dict[str, Any]) -> None:
|
|
149
|
+
"""Handle renderer start event."""
|
|
150
|
+
if self.cfg.live:
|
|
151
|
+
# Defer creating Live to _ensure_live so tests and prod both work
|
|
152
|
+
pass
|
|
153
|
+
|
|
154
|
+
# Set up initial state
|
|
155
|
+
self._started_at = monotonic()
|
|
156
|
+
try:
|
|
157
|
+
self.state.meta = json.loads(json.dumps(meta))
|
|
158
|
+
except Exception:
|
|
159
|
+
self.state.meta = dict(meta)
|
|
160
|
+
|
|
161
|
+
meta_payload = meta or {}
|
|
162
|
+
self.steps.set_root_agent(meta_payload.get("agent_id"))
|
|
163
|
+
self._root_agent_friendly = self._humanize_agent_slug(meta_payload.get("agent_name"))
|
|
164
|
+
self._root_query = truncate_display(
|
|
165
|
+
meta_payload.get("input_message")
|
|
166
|
+
or meta_payload.get("query")
|
|
167
|
+
or meta_payload.get("message")
|
|
168
|
+
or (meta_payload.get("meta") or {}).get("input_message")
|
|
169
|
+
or ""
|
|
170
|
+
)
|
|
171
|
+
if not self._root_query:
|
|
172
|
+
self._root_query = None
|
|
173
|
+
self._root_query_attached = False
|
|
174
|
+
|
|
175
|
+
# Print compact header and user request (parity with old renderer)
|
|
176
|
+
self._render_header(meta)
|
|
177
|
+
self._render_user_query(meta)
|
|
178
|
+
|
|
179
|
+
def _render_header(self, meta: dict[str, Any]) -> None:
|
|
180
|
+
"""Render the agent header with metadata."""
|
|
181
|
+
parts = self._build_header_parts(meta)
|
|
182
|
+
self.header_text = " ".join(parts)
|
|
183
|
+
|
|
184
|
+
if not self.header_text:
|
|
185
|
+
return
|
|
186
|
+
|
|
187
|
+
# Use a rule-like header for readability with fallback
|
|
188
|
+
if not self._render_header_rule():
|
|
189
|
+
self._render_header_fallback()
|
|
190
|
+
|
|
191
|
+
def _build_header_parts(self, meta: dict[str, Any]) -> list[str]:
|
|
192
|
+
"""Build header text parts from metadata."""
|
|
193
|
+
parts: list[str] = [ICON_AGENT]
|
|
194
|
+
agent_name = meta.get("agent_name", "agent")
|
|
195
|
+
if agent_name:
|
|
196
|
+
parts.append(agent_name)
|
|
197
|
+
|
|
198
|
+
model = meta.get("model", "")
|
|
199
|
+
if model:
|
|
200
|
+
parts.extend(["•", model])
|
|
201
|
+
|
|
202
|
+
run_id = meta.get("run_id", "")
|
|
203
|
+
if run_id:
|
|
204
|
+
parts.extend(["•", run_id])
|
|
205
|
+
|
|
206
|
+
return parts
|
|
207
|
+
|
|
208
|
+
def _render_header_rule(self) -> bool:
|
|
209
|
+
"""Render header as a rule. Returns True if successful."""
|
|
210
|
+
try:
|
|
211
|
+
self.console.rule(self.header_text)
|
|
212
|
+
return True
|
|
213
|
+
except Exception: # pragma: no cover - defensive fallback
|
|
214
|
+
logger.exception("Failed to render header rule")
|
|
215
|
+
return False
|
|
216
|
+
|
|
217
|
+
def _render_header_fallback(self) -> None:
|
|
218
|
+
"""Fallback header rendering."""
|
|
219
|
+
try:
|
|
220
|
+
self.console.print(self.header_text)
|
|
221
|
+
except Exception:
|
|
222
|
+
logger.exception("Failed to print header fallback")
|
|
223
|
+
|
|
224
|
+
def _build_user_query_panel(self, query: str) -> AIPPanel:
|
|
225
|
+
"""Create the panel used to display the user request."""
|
|
226
|
+
return AIPPanel(
|
|
227
|
+
Markdown(f"**Query:** {query}"),
|
|
228
|
+
title="User Request",
|
|
229
|
+
border_style="#d97706",
|
|
230
|
+
padding=(0, 1),
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
def _render_user_query(self, meta: dict[str, Any]) -> None:
|
|
234
|
+
"""Render the user query panel."""
|
|
235
|
+
query = extract_query_from_meta(meta)
|
|
236
|
+
if not query:
|
|
237
|
+
return
|
|
238
|
+
self.console.print(self._build_user_query_panel(query))
|
|
239
|
+
|
|
240
|
+
def _render_summary_static_sections(self) -> None:
|
|
241
|
+
"""Re-render header and user query when returning to summary mode."""
|
|
242
|
+
meta = getattr(self.state, "meta", None)
|
|
243
|
+
if meta:
|
|
244
|
+
self._render_header(meta)
|
|
245
|
+
elif self.header_text and not self._render_header_rule():
|
|
246
|
+
self._render_header_fallback()
|
|
247
|
+
|
|
248
|
+
query = extract_query_from_meta(meta) or self._root_query
|
|
249
|
+
if query:
|
|
250
|
+
self.console.print(self._build_user_query_panel(query))
|
|
251
|
+
|
|
252
|
+
def _render_summary_after_transcript_toggle(self) -> None:
|
|
253
|
+
"""Render the summary panel after leaving transcript mode."""
|
|
254
|
+
if self.state.finalizing_ui:
|
|
255
|
+
self._render_final_summary_panels()
|
|
256
|
+
elif self.live:
|
|
257
|
+
self._refresh_live_panels()
|
|
258
|
+
else:
|
|
259
|
+
self._render_static_summary_panels()
|
|
260
|
+
|
|
261
|
+
def _render_final_summary_panels(self) -> None:
|
|
262
|
+
"""Render a static summary and disable live mode for final output."""
|
|
263
|
+
self.cfg.live = False
|
|
264
|
+
self.live = None
|
|
265
|
+
self._render_static_summary_panels()
|
|
266
|
+
|
|
267
|
+
def _render_static_summary_panels(self) -> None:
|
|
268
|
+
"""Render the steps and main panels in a static (non-live) layout."""
|
|
269
|
+
summary_window = self._summary_window_size()
|
|
270
|
+
window_arg = summary_window if summary_window > 0 else None
|
|
271
|
+
status_overrides = self._build_step_status_overrides()
|
|
272
|
+
for renderable in render_summary_panels(
|
|
273
|
+
self.state,
|
|
274
|
+
self.steps,
|
|
275
|
+
summary_window=window_arg,
|
|
276
|
+
include_query_panel=False,
|
|
277
|
+
step_status_overrides=status_overrides,
|
|
278
|
+
):
|
|
279
|
+
self.console.print(renderable)
|
|
280
|
+
|
|
281
|
+
def _ensure_streaming_started_baseline(self, timestamp: float) -> None:
|
|
282
|
+
"""Synchronize streaming start state across renderer components."""
|
|
283
|
+
self.state.start_stream_timer(timestamp)
|
|
284
|
+
self.stream_processor.streaming_started_at = timestamp
|
|
285
|
+
self._started_at = timestamp
|
|
286
|
+
|
|
287
|
+
def on_event(self, ev: dict[str, Any]) -> None:
|
|
288
|
+
"""Handle streaming events from the backend."""
|
|
289
|
+
received_at = self._resolve_received_timestamp(ev)
|
|
290
|
+
self._capture_event(ev, received_at)
|
|
291
|
+
self.stream_processor.reset_event_tracking()
|
|
292
|
+
|
|
293
|
+
self._sync_stream_start(ev, received_at)
|
|
294
|
+
|
|
295
|
+
metadata = self.stream_processor.extract_event_metadata(ev)
|
|
296
|
+
|
|
297
|
+
self._maybe_render_debug(ev, received_at)
|
|
298
|
+
try:
|
|
299
|
+
self._dispatch_event(ev, metadata)
|
|
300
|
+
finally:
|
|
301
|
+
self.stream_processor.update_timing(metadata.get("context_id"))
|
|
302
|
+
|
|
303
|
+
def _resolve_received_timestamp(self, ev: dict[str, Any]) -> datetime:
|
|
304
|
+
"""Return the timestamp an event was received, normalising inputs."""
|
|
305
|
+
received_at = coerce_received_at(ev.get("received_at"))
|
|
306
|
+
if received_at is None:
|
|
307
|
+
received_at = datetime.now(timezone.utc)
|
|
308
|
+
|
|
309
|
+
if self.state.streaming_started_event_ts is None:
|
|
310
|
+
self.state.streaming_started_event_ts = received_at
|
|
311
|
+
|
|
312
|
+
return received_at
|
|
313
|
+
|
|
314
|
+
def _sync_stream_start(self, ev: dict[str, Any], received_at: datetime | None) -> None:
|
|
315
|
+
"""Ensure renderer and stream processor share a streaming baseline."""
|
|
316
|
+
baseline = self.state.streaming_started_at
|
|
317
|
+
if baseline is None:
|
|
318
|
+
baseline = monotonic()
|
|
319
|
+
self._ensure_streaming_started_baseline(baseline)
|
|
320
|
+
elif getattr(self.stream_processor, "streaming_started_at", None) is None:
|
|
321
|
+
self._ensure_streaming_started_baseline(baseline)
|
|
322
|
+
|
|
323
|
+
if ev.get("status") == "streaming_started":
|
|
324
|
+
self.state.streaming_started_event_ts = received_at
|
|
325
|
+
self._ensure_streaming_started_baseline(monotonic())
|
|
326
|
+
|
|
327
|
+
def _maybe_render_debug(
|
|
328
|
+
self, ev: dict[str, Any], received_at: datetime
|
|
329
|
+
) -> None: # pragma: no cover - guard rails for verbose mode
|
|
330
|
+
"""Render debug view when verbose mode is enabled."""
|
|
331
|
+
if not self.verbose:
|
|
332
|
+
return
|
|
333
|
+
|
|
334
|
+
self._ensure_transcript_header()
|
|
335
|
+
render_debug_event(
|
|
336
|
+
ev,
|
|
337
|
+
self.console,
|
|
338
|
+
received_ts=received_at,
|
|
339
|
+
baseline_ts=self.state.streaming_started_event_ts,
|
|
340
|
+
)
|
|
341
|
+
self._print_transcript_hint()
|
|
342
|
+
|
|
343
|
+
def _dispatch_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
|
|
344
|
+
"""Route events to the appropriate renderer handlers."""
|
|
345
|
+
kind = metadata["kind"]
|
|
346
|
+
content = metadata["content"]
|
|
347
|
+
|
|
348
|
+
if kind == "status":
|
|
349
|
+
self._handle_status_event(ev)
|
|
350
|
+
elif kind == "content":
|
|
351
|
+
self._handle_content_event(content)
|
|
352
|
+
elif kind == "final_response":
|
|
353
|
+
self._handle_final_response_event(content, metadata)
|
|
354
|
+
elif kind in {"agent_step", "agent_thinking_step"}:
|
|
355
|
+
self._handle_agent_step_event(ev, metadata)
|
|
356
|
+
else:
|
|
357
|
+
self._ensure_live()
|
|
358
|
+
|
|
359
|
+
def _handle_status_event(self, ev: dict[str, Any]) -> None:
|
|
360
|
+
"""Handle status events."""
|
|
361
|
+
status = ev.get("status")
|
|
362
|
+
if status == "streaming_started":
|
|
363
|
+
return
|
|
364
|
+
|
|
365
|
+
def _handle_content_event(self, content: str) -> None:
|
|
366
|
+
"""Handle content streaming events."""
|
|
367
|
+
if content:
|
|
368
|
+
self.state.append_transcript_text(content)
|
|
369
|
+
self._ensure_live()
|
|
370
|
+
|
|
371
|
+
def _handle_final_response_event(self, content: str, metadata: dict[str, Any]) -> None:
|
|
372
|
+
"""Handle final response events."""
|
|
373
|
+
if content:
|
|
374
|
+
self.state.append_transcript_text(content)
|
|
375
|
+
self.state.set_final_output(content)
|
|
376
|
+
|
|
377
|
+
meta_payload = metadata.get("metadata") or {}
|
|
378
|
+
final_time = coerce_server_time(meta_payload.get("time"))
|
|
379
|
+
self._update_final_duration(final_time)
|
|
380
|
+
self.thinking_controller.close_active_scopes(final_time)
|
|
381
|
+
self._finish_running_steps()
|
|
382
|
+
self.tool_controller.finish_all_panels()
|
|
383
|
+
self._normalise_finished_icons()
|
|
384
|
+
|
|
385
|
+
self._ensure_live()
|
|
386
|
+
self._print_final_panel_if_needed()
|
|
387
|
+
|
|
388
|
+
def _normalise_finished_icons(self) -> None:
|
|
389
|
+
"""Ensure finished steps release any running spinners."""
|
|
390
|
+
for step in self.steps.by_id.values():
|
|
391
|
+
if getattr(step, "status", None) != "running":
|
|
392
|
+
self._step_spinners.pop(step.step_id, None)
|
|
393
|
+
|
|
394
|
+
def _handle_agent_step_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
|
|
395
|
+
"""Handle agent step events."""
|
|
396
|
+
# Extract tool information using stream processor
|
|
397
|
+
tool_calls_result = self.stream_processor.parse_tool_calls(ev)
|
|
398
|
+
tool_name, tool_args, tool_out, tool_calls_info = tool_calls_result
|
|
399
|
+
|
|
400
|
+
payload = metadata.get("metadata") or {}
|
|
401
|
+
|
|
402
|
+
tracked_step: Step | None = None
|
|
403
|
+
try:
|
|
404
|
+
tracked_step = self.steps.apply_event(ev)
|
|
405
|
+
except ValueError:
|
|
406
|
+
logger.debug("Malformed step event skipped", exc_info=True)
|
|
407
|
+
else:
|
|
408
|
+
self._record_step_server_start(tracked_step, payload)
|
|
409
|
+
self.thinking_controller.update_timeline(
|
|
410
|
+
tracked_step,
|
|
411
|
+
payload,
|
|
412
|
+
enabled=self.cfg.render_thinking,
|
|
413
|
+
)
|
|
414
|
+
self._maybe_override_root_agent_label(tracked_step, payload)
|
|
415
|
+
self._maybe_attach_root_query(tracked_step)
|
|
416
|
+
|
|
417
|
+
# Track tools and sub-agents for transcript/debug context
|
|
418
|
+
self.stream_processor.track_tools_and_agents(tool_name, tool_calls_info, is_delegation_tool)
|
|
419
|
+
|
|
420
|
+
# Handle tool execution
|
|
421
|
+
self.tool_controller.handle_agent_step(
|
|
422
|
+
ev,
|
|
423
|
+
tool_name,
|
|
424
|
+
tool_args,
|
|
425
|
+
tool_out,
|
|
426
|
+
tool_calls_info,
|
|
427
|
+
tracked_step=tracked_step,
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
# Update live display
|
|
431
|
+
self._ensure_live()
|
|
432
|
+
|
|
433
|
+
def _maybe_attach_root_query(self, step: Step | None) -> None:
|
|
434
|
+
"""Attach the user query to the root agent step for display."""
|
|
435
|
+
if not step or self._root_query_attached or not self._root_query or step.kind != "agent" or step.parent_id:
|
|
436
|
+
return
|
|
437
|
+
|
|
438
|
+
args = dict(getattr(step, "args", {}) or {})
|
|
439
|
+
args.setdefault("query", self._root_query)
|
|
440
|
+
step.args = args
|
|
441
|
+
self._root_query_attached = True
|
|
442
|
+
|
|
443
|
+
def _record_step_server_start(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
444
|
+
"""Store server-provided start times for elapsed calculations."""
|
|
445
|
+
if not step:
|
|
446
|
+
return
|
|
447
|
+
server_time = payload.get("time")
|
|
448
|
+
if not isinstance(server_time, (int, float)):
|
|
449
|
+
return
|
|
450
|
+
self._step_server_start_times.setdefault(step.step_id, float(server_time))
|
|
451
|
+
|
|
452
|
+
def _maybe_override_root_agent_label(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
453
|
+
"""Ensure the root agent row uses the human-friendly name and shows the ID."""
|
|
454
|
+
if not step or step.kind != "agent" or step.parent_id:
|
|
455
|
+
return
|
|
456
|
+
friendly = self._root_agent_friendly or self._humanize_agent_slug((payload or {}).get("agent_name"))
|
|
457
|
+
if not friendly:
|
|
458
|
+
return
|
|
459
|
+
agent_identifier = step.name or step.step_id
|
|
460
|
+
if not agent_identifier:
|
|
461
|
+
return
|
|
462
|
+
step.display_label = normalise_display_label(f"{ICON_AGENT} {friendly} ({agent_identifier})")
|
|
463
|
+
if not self._root_agent_step_id:
|
|
464
|
+
self._root_agent_step_id = step.step_id
|
|
465
|
+
|
|
466
|
+
# Thinking scope management is handled by ThinkingScopeController.
|
|
467
|
+
|
|
468
|
+
def _apply_root_duration(self, duration_seconds: float | None) -> None:
|
|
469
|
+
"""Propagate the final run duration to the root agent step."""
|
|
470
|
+
if duration_seconds is None or not self._root_agent_step_id:
|
|
471
|
+
return
|
|
472
|
+
root_step = self.steps.by_id.get(self._root_agent_step_id)
|
|
473
|
+
if not root_step:
|
|
474
|
+
return
|
|
475
|
+
try:
|
|
476
|
+
duration_ms = max(0, int(round(float(duration_seconds) * 1000)))
|
|
477
|
+
except Exception:
|
|
478
|
+
return
|
|
479
|
+
root_step.duration_ms = duration_ms
|
|
480
|
+
root_step.duration_source = root_step.duration_source or "run"
|
|
481
|
+
root_step.status = "finished"
|
|
482
|
+
|
|
483
|
+
@staticmethod
|
|
484
|
+
def _humanize_agent_slug(value: Any) -> str | None:
|
|
485
|
+
"""Convert a slugified agent name into Title Case."""
|
|
486
|
+
if not isinstance(value, str):
|
|
487
|
+
return None
|
|
488
|
+
cleaned = value.replace("_", " ").replace("-", " ").strip()
|
|
489
|
+
if not cleaned:
|
|
490
|
+
return None
|
|
491
|
+
parts = [part for part in cleaned.split() if part]
|
|
492
|
+
return " ".join(part[:1].upper() + part[1:] for part in parts)
|
|
493
|
+
|
|
494
|
+
def _finish_running_steps(self) -> None:
|
|
495
|
+
"""Mark any running steps as finished to avoid lingering spinners."""
|
|
496
|
+
for st in self.steps.by_id.values():
|
|
497
|
+
if not is_step_finished(st):
|
|
498
|
+
self._mark_incomplete_step(st)
|
|
499
|
+
|
|
500
|
+
def _mark_incomplete_step(self, step: Step) -> None:
|
|
501
|
+
"""Mark a lingering step as incomplete/warning with unknown duration."""
|
|
502
|
+
step.status = "finished"
|
|
503
|
+
step.duration_unknown = True
|
|
504
|
+
if step.duration_ms is None:
|
|
505
|
+
step.duration_ms = 0
|
|
506
|
+
step.duration_source = step.duration_source or "unknown"
|
|
507
|
+
|
|
508
|
+
def _stop_live_display(self) -> None:
|
|
509
|
+
"""Stop live display and clean up."""
|
|
510
|
+
self._shutdown_live()
|
|
511
|
+
|
|
512
|
+
def _print_final_panel_if_needed(self) -> None:
|
|
513
|
+
"""Print final result when configuration requires it."""
|
|
514
|
+
if self.state.printed_final_output:
|
|
515
|
+
return
|
|
516
|
+
|
|
517
|
+
body = (self.state.final_text or self.state.buffer.render() or "").strip()
|
|
518
|
+
if not body:
|
|
519
|
+
return
|
|
520
|
+
|
|
521
|
+
if getattr(self, "_transcript_mode_enabled", False):
|
|
522
|
+
return
|
|
523
|
+
|
|
524
|
+
if self.verbose:
|
|
525
|
+
panel = build_final_panel(
|
|
526
|
+
self.state,
|
|
527
|
+
title=self._final_panel_title(),
|
|
528
|
+
)
|
|
529
|
+
if panel is None:
|
|
530
|
+
return
|
|
531
|
+
self.console.print(panel)
|
|
532
|
+
self.state.printed_final_output = True
|
|
533
|
+
|
|
534
|
+
def finalize(self) -> tuple[list[Any], list[Any]]:
|
|
535
|
+
"""Compose the final transcript renderables."""
|
|
536
|
+
return self._compose_final_transcript()
|
|
537
|
+
|
|
538
|
+
def _compose_final_transcript(self) -> tuple[list[Any], list[Any]]:
|
|
539
|
+
"""Build the transcript snapshot used for final summaries."""
|
|
540
|
+
summary_window = self._summary_window_size()
|
|
541
|
+
summary_window = summary_window if summary_window > 0 else None
|
|
542
|
+
snapshot = build_transcript_snapshot(
|
|
543
|
+
self.state,
|
|
544
|
+
self.steps,
|
|
545
|
+
query_text=extract_query_from_meta(self.state.meta),
|
|
546
|
+
meta=self.state.meta,
|
|
547
|
+
summary_window=summary_window,
|
|
548
|
+
step_status_overrides=self._build_step_status_overrides(),
|
|
549
|
+
)
|
|
550
|
+
header, body = build_transcript_view(snapshot)
|
|
551
|
+
self._final_transcript_snapshot = snapshot
|
|
552
|
+
self._final_transcript_renderables = (header, body)
|
|
553
|
+
return header, body
|
|
554
|
+
|
|
555
|
+
def _render_final_summary(self, header: list[Any], body: list[Any]) -> None:
|
|
556
|
+
"""Print the composed transcript summary for non-live renders."""
|
|
557
|
+
renderables = list(header) + list(body)
|
|
558
|
+
for renderable in renderables:
|
|
559
|
+
try:
|
|
560
|
+
self.console.print(renderable)
|
|
561
|
+
self.console.print()
|
|
562
|
+
except Exception:
|
|
563
|
+
pass
|
|
564
|
+
|
|
565
|
+
def on_complete(self, stats: RunStats) -> None:
|
|
566
|
+
"""Handle completion event."""
|
|
567
|
+
self.state.finalizing_ui = True
|
|
568
|
+
|
|
569
|
+
self._handle_stats_duration(stats)
|
|
570
|
+
self.thinking_controller.close_active_scopes(self.state.final_duration_seconds)
|
|
571
|
+
self._cleanup_ui_elements()
|
|
572
|
+
self._finalize_display()
|
|
573
|
+
self._print_completion_message()
|
|
574
|
+
|
|
575
|
+
def _handle_stats_duration(self, stats: RunStats) -> None:
|
|
576
|
+
"""Handle stats processing and duration calculation."""
|
|
577
|
+
if not isinstance(stats, RunStats):
|
|
578
|
+
return
|
|
579
|
+
|
|
580
|
+
duration = None
|
|
581
|
+
try:
|
|
582
|
+
if stats.finished_at is not None and stats.started_at is not None:
|
|
583
|
+
duration = max(0.0, float(stats.finished_at) - float(stats.started_at))
|
|
584
|
+
except Exception:
|
|
585
|
+
duration = None
|
|
586
|
+
|
|
587
|
+
if duration is not None:
|
|
588
|
+
self._update_final_duration(duration, overwrite=True)
|
|
589
|
+
|
|
590
|
+
def _cleanup_ui_elements(self) -> None:
|
|
591
|
+
"""Clean up running UI elements."""
|
|
592
|
+
# Mark any running steps as finished to avoid lingering spinners
|
|
593
|
+
self._finish_running_steps()
|
|
594
|
+
|
|
595
|
+
# Mark unfinished tool panels as finished
|
|
596
|
+
self.tool_controller.finish_all_panels()
|
|
597
|
+
|
|
598
|
+
def _finalize_display(self) -> None:
|
|
599
|
+
"""Finalize live display and render final output."""
|
|
600
|
+
# Final refresh
|
|
601
|
+
self._ensure_live()
|
|
602
|
+
|
|
603
|
+
header, body = self.finalize()
|
|
604
|
+
|
|
605
|
+
# Stop live display
|
|
606
|
+
self._stop_live_display()
|
|
607
|
+
|
|
608
|
+
# Render final output based on configuration
|
|
609
|
+
if self.cfg.live:
|
|
610
|
+
self._print_final_panel_if_needed()
|
|
611
|
+
else:
|
|
612
|
+
self._render_final_summary(header, body)
|
|
613
|
+
|
|
614
|
+
def _print_completion_message(self) -> None:
|
|
615
|
+
"""Print completion message based on current mode."""
|
|
616
|
+
if self._transcript_mode_enabled:
|
|
617
|
+
try:
|
|
618
|
+
self.console.print(
|
|
619
|
+
"[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events. "
|
|
620
|
+
"Use the post-run viewer for export.[/dim]"
|
|
621
|
+
)
|
|
622
|
+
except Exception:
|
|
623
|
+
pass
|
|
624
|
+
else:
|
|
625
|
+
# No transcript toggle in summary mode; nothing to print here.
|
|
626
|
+
return
|
|
627
|
+
|
|
628
|
+
def _ensure_live(self) -> None:
|
|
629
|
+
"""Ensure live display is updated."""
|
|
630
|
+
if getattr(self, "_transcript_mode_enabled", False):
|
|
631
|
+
return
|
|
632
|
+
if not self._ensure_live_stack():
|
|
633
|
+
return
|
|
634
|
+
|
|
635
|
+
self._start_live_if_needed()
|
|
636
|
+
|
|
637
|
+
if self.live:
|
|
638
|
+
self._refresh_live_panels()
|
|
639
|
+
if (
|
|
640
|
+
not self._transcript_mode_enabled
|
|
641
|
+
and not self.state.finalizing_ui
|
|
642
|
+
and not self._summary_hint_printed_once
|
|
643
|
+
):
|
|
644
|
+
self._print_summary_hint(force=True)
|
|
645
|
+
|
|
646
|
+
def _ensure_live_stack(self) -> bool:
|
|
647
|
+
"""Guarantee the console exposes the internal live stack Rich expects."""
|
|
648
|
+
live_stack = getattr(self.console, "_live_stack", None)
|
|
649
|
+
if isinstance(live_stack, list):
|
|
650
|
+
return True
|
|
651
|
+
|
|
652
|
+
try:
|
|
653
|
+
self.console._live_stack = [] # type: ignore[attr-defined]
|
|
654
|
+
return True
|
|
655
|
+
except Exception:
|
|
656
|
+
# If the console forbids attribute assignment we simply skip the live
|
|
657
|
+
# update for this cycle and fall back to buffered printing.
|
|
658
|
+
logger.debug(
|
|
659
|
+
"Console missing _live_stack; skipping live UI initialisation",
|
|
660
|
+
exc_info=True,
|
|
661
|
+
)
|
|
662
|
+
return False
|
|
663
|
+
|
|
664
|
+
def _start_live_if_needed(self) -> None:
|
|
665
|
+
"""Create and start a Live instance when configuration allows."""
|
|
666
|
+
if self.live is not None or not self.cfg.live:
|
|
667
|
+
return
|
|
668
|
+
|
|
669
|
+
try:
|
|
670
|
+
self.live = Live(
|
|
671
|
+
console=self.console,
|
|
672
|
+
refresh_per_second=1 / self.cfg.refresh_debounce,
|
|
673
|
+
transient=not self.cfg.persist_live,
|
|
674
|
+
)
|
|
675
|
+
self.live.start()
|
|
676
|
+
except Exception:
|
|
677
|
+
self.live = None
|
|
678
|
+
|
|
679
|
+
def _refresh_live_panels(self) -> None:
|
|
680
|
+
"""Render panels and push them to the active Live display."""
|
|
681
|
+
if not self.live:
|
|
682
|
+
return
|
|
683
|
+
|
|
684
|
+
steps_body = self._render_steps_text()
|
|
685
|
+
template_panel = getattr(self, "_last_steps_panel_template", None)
|
|
686
|
+
if template_panel is None:
|
|
687
|
+
template_panel = self._resolve_steps_panel()
|
|
688
|
+
steps_panel = AIPPanel(
|
|
689
|
+
steps_body,
|
|
690
|
+
title=getattr(template_panel, "title", "Steps"),
|
|
691
|
+
border_style=getattr(template_panel, "border_style", "blue"),
|
|
692
|
+
padding=getattr(template_panel, "padding", (0, 1)),
|
|
693
|
+
)
|
|
694
|
+
|
|
695
|
+
main_panel = self._render_main_panel()
|
|
696
|
+
panels = self._build_live_panels(main_panel, steps_panel)
|
|
697
|
+
|
|
698
|
+
self.live.update(Group(*panels))
|
|
699
|
+
|
|
700
|
+
def _build_live_panels(
|
|
701
|
+
self,
|
|
702
|
+
main_panel: Any,
|
|
703
|
+
steps_panel: Any,
|
|
704
|
+
) -> list[Any]:
|
|
705
|
+
"""Assemble the panel order for the live display."""
|
|
706
|
+
if self.verbose:
|
|
707
|
+
return [main_panel, steps_panel]
|
|
708
|
+
|
|
709
|
+
return [steps_panel, main_panel]
|
|
710
|
+
|
|
711
|
+
def _render_main_panel(self) -> Any:
|
|
712
|
+
"""Render the main content panel."""
|
|
713
|
+
body = self.state.buffer.render().strip()
|
|
714
|
+
theme = DEFAULT_TRANSCRIPT_THEME
|
|
715
|
+
if not self.verbose:
|
|
716
|
+
panel = build_final_panel(self.state, theme=theme)
|
|
717
|
+
if panel is not None:
|
|
718
|
+
return panel
|
|
719
|
+
# Dynamic title with spinner + elapsed/hints
|
|
720
|
+
title = self._format_enhanced_main_title()
|
|
721
|
+
return create_main_panel(body, title, theme)
|
|
722
|
+
|
|
723
|
+
def _final_panel_title(self) -> str:
|
|
724
|
+
"""Compose title for the final result panel including duration."""
|
|
725
|
+
return format_final_panel_title(self.state)
|
|
726
|
+
|
|
727
|
+
def apply_verbosity(self, verbose: bool) -> None:
|
|
728
|
+
"""Update verbose behaviour at runtime."""
|
|
729
|
+
if self.verbose == verbose:
|
|
730
|
+
return
|
|
731
|
+
|
|
732
|
+
self.verbose = verbose
|
|
733
|
+
desired_live = not verbose
|
|
734
|
+
if desired_live != self.cfg.live:
|
|
735
|
+
self.cfg.live = desired_live
|
|
736
|
+
if not desired_live:
|
|
737
|
+
self._shutdown_live()
|
|
738
|
+
else:
|
|
739
|
+
self._ensure_live()
|
|
740
|
+
|
|
741
|
+
if self.cfg.live:
|
|
742
|
+
self._ensure_live()
|
|
743
|
+
|
|
744
|
+
# Transcript helper implementations live in TranscriptModeMixin.
|
|
745
|
+
|
|
746
|
+
def get_aggregated_output(self) -> str:
|
|
747
|
+
"""Return the concatenated assistant output collected so far."""
|
|
748
|
+
return self.state.buffer.render().strip()
|
|
749
|
+
|
|
750
|
+
def get_transcript_events(self) -> list[dict[str, Any]]:
|
|
751
|
+
"""Return captured SSE events."""
|
|
752
|
+
return list(self.state.events)
|
|
753
|
+
|
|
754
|
+
def _format_working_indicator(self, started_at: float | None) -> str:
|
|
755
|
+
"""Format working indicator."""
|
|
756
|
+
return format_working_indicator(
|
|
757
|
+
started_at,
|
|
758
|
+
self.stream_processor.server_elapsed_time,
|
|
759
|
+
self.state.streaming_started_at,
|
|
760
|
+
)
|
|
761
|
+
|
|
762
|
+
def close(self) -> None:
|
|
763
|
+
"""Gracefully stop any live rendering and release resources."""
|
|
764
|
+
self._shutdown_live()
|
|
765
|
+
|
|
766
|
+
def __del__(self) -> None:
|
|
767
|
+
"""Destructor that ensures live rendering is properly shut down.
|
|
768
|
+
|
|
769
|
+
This is a safety net to prevent resource leaks if the renderer
|
|
770
|
+
is not explicitly stopped.
|
|
771
|
+
"""
|
|
772
|
+
# Destructors must never raise
|
|
773
|
+
try:
|
|
774
|
+
self._shutdown_live(reset_attr=False)
|
|
775
|
+
except Exception: # pragma: no cover - destructor safety net
|
|
776
|
+
pass
|
|
777
|
+
|
|
778
|
+
def _shutdown_live(self, reset_attr: bool = True) -> None:
|
|
779
|
+
"""Stop the live renderer without letting exceptions escape."""
|
|
780
|
+
live = getattr(self, "live", None)
|
|
781
|
+
if not live:
|
|
782
|
+
if reset_attr and not hasattr(self, "live"):
|
|
783
|
+
self.live = None
|
|
784
|
+
return
|
|
785
|
+
|
|
786
|
+
try:
|
|
787
|
+
live.stop()
|
|
788
|
+
except Exception:
|
|
789
|
+
logger.exception("Failed to stop live display")
|
|
790
|
+
finally:
|
|
791
|
+
if reset_attr:
|
|
792
|
+
self.live = None
|
|
793
|
+
|
|
794
|
+
def _get_analysis_progress_info(self) -> dict[str, Any]:
|
|
795
|
+
total_steps = len(self.steps.order)
|
|
796
|
+
completed_steps = sum(1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid]))
|
|
797
|
+
current_step = None
|
|
798
|
+
for sid in self.steps.order:
|
|
799
|
+
if not is_step_finished(self.steps.by_id[sid]):
|
|
800
|
+
current_step = sid
|
|
801
|
+
break
|
|
802
|
+
# Prefer server elapsed time when available
|
|
803
|
+
elapsed = 0.0
|
|
804
|
+
if isinstance(self.stream_processor.server_elapsed_time, (int, float)):
|
|
805
|
+
elapsed = float(self.stream_processor.server_elapsed_time)
|
|
806
|
+
elif self._started_at is not None:
|
|
807
|
+
elapsed = monotonic() - self._started_at
|
|
808
|
+
progress_percent = int((completed_steps / total_steps) * 100) if total_steps else 0
|
|
809
|
+
return {
|
|
810
|
+
"total_steps": total_steps,
|
|
811
|
+
"completed_steps": completed_steps,
|
|
812
|
+
"current_step": current_step,
|
|
813
|
+
"progress_percent": progress_percent,
|
|
814
|
+
"elapsed_time": elapsed,
|
|
815
|
+
"has_running_steps": self._has_running_steps(),
|
|
816
|
+
}
|
|
817
|
+
|
|
818
|
+
def _format_enhanced_main_title(self) -> str:
|
|
819
|
+
base = format_main_title(
|
|
820
|
+
header_text=self.header_text,
|
|
821
|
+
has_running_steps=self._has_running_steps(),
|
|
822
|
+
get_spinner_char=get_spinner_char,
|
|
823
|
+
)
|
|
824
|
+
# Add elapsed time and subtle progress hints for long operations
|
|
825
|
+
info = self._get_analysis_progress_info()
|
|
826
|
+
elapsed = info.get("elapsed_time", 0.0)
|
|
827
|
+
if elapsed and elapsed > 0:
|
|
828
|
+
base += f" · {format_elapsed_time(elapsed)}"
|
|
829
|
+
if info.get("total_steps", 0) > 1 and info.get("has_running_steps"):
|
|
830
|
+
if elapsed > 60:
|
|
831
|
+
base += " 🐌"
|
|
832
|
+
elif elapsed > 30:
|
|
833
|
+
base += " ⚠️"
|
|
834
|
+
return base
|
|
835
|
+
|
|
836
|
+
# Modern interface only — no legacy helper shims below
|
|
837
|
+
|
|
838
|
+
def _refresh(self, _force: bool | None = None) -> None:
|
|
839
|
+
# In the modular renderer, refreshing simply updates the live group
|
|
840
|
+
self._ensure_live()
|
|
841
|
+
|
|
842
|
+
def _has_running_steps(self) -> bool:
|
|
843
|
+
"""Check if any steps are still running."""
|
|
844
|
+
for _sid, st in self.steps.by_id.items():
|
|
845
|
+
if not is_step_finished(st):
|
|
846
|
+
return True
|
|
847
|
+
return False
|
|
848
|
+
|
|
849
|
+
def _get_step_icon(self, step_kind: str) -> str:
|
|
850
|
+
"""Get icon for step kind."""
|
|
851
|
+
if step_kind == "tool":
|
|
852
|
+
return ICON_TOOL_STEP
|
|
853
|
+
elif step_kind == "delegate":
|
|
854
|
+
return ICON_DELEGATE
|
|
855
|
+
elif step_kind == "agent":
|
|
856
|
+
return ICON_AGENT_STEP
|
|
857
|
+
return ""
|
|
858
|
+
|
|
859
|
+
def _format_step_status(self, step: Step) -> str:
|
|
860
|
+
"""Format step status with elapsed time or duration."""
|
|
861
|
+
if is_step_finished(step):
|
|
862
|
+
return self._format_finished_badge(step)
|
|
863
|
+
else:
|
|
864
|
+
# Calculate elapsed time for running steps
|
|
865
|
+
elapsed = self._calculate_step_elapsed_time(step)
|
|
866
|
+
if elapsed >= 0.1:
|
|
867
|
+
return f"[{elapsed:.2f}s]"
|
|
868
|
+
ms = int(round(elapsed * 1000))
|
|
869
|
+
if ms <= 0:
|
|
870
|
+
return ""
|
|
871
|
+
return f"[{ms}ms]"
|
|
872
|
+
|
|
873
|
+
def _format_finished_badge(self, step: Step) -> str:
|
|
874
|
+
"""Compose duration badge for finished steps including source tagging."""
|
|
875
|
+
if getattr(step, "duration_unknown", False) is True:
|
|
876
|
+
payload = "??s"
|
|
877
|
+
else:
|
|
878
|
+
duration_ms = step.duration_ms
|
|
879
|
+
if duration_ms is None:
|
|
880
|
+
payload = "<1ms"
|
|
881
|
+
elif duration_ms < 0:
|
|
882
|
+
payload = "<1ms"
|
|
883
|
+
elif duration_ms >= 100:
|
|
884
|
+
payload = f"{duration_ms / 1000:.2f}s"
|
|
885
|
+
elif duration_ms > 0:
|
|
886
|
+
payload = f"{duration_ms}ms"
|
|
887
|
+
else:
|
|
888
|
+
payload = "<1ms"
|
|
889
|
+
|
|
890
|
+
return f"[{payload}]"
|
|
891
|
+
|
|
892
|
+
def _calculate_step_elapsed_time(self, step: Step) -> float:
|
|
893
|
+
"""Calculate elapsed time for a running step."""
|
|
894
|
+
server_elapsed = self.stream_processor.server_elapsed_time
|
|
895
|
+
server_start = self._step_server_start_times.get(step.step_id)
|
|
896
|
+
|
|
897
|
+
if isinstance(server_elapsed, (int, float)) and isinstance(server_start, (int, float)):
|
|
898
|
+
return max(0.0, float(server_elapsed) - float(server_start))
|
|
899
|
+
|
|
900
|
+
try:
|
|
901
|
+
return max(0.0, float(monotonic() - step.started_at))
|
|
902
|
+
except Exception:
|
|
903
|
+
return 0.0
|
|
904
|
+
|
|
905
|
+
def _get_step_display_name(self, step: Step) -> str:
|
|
906
|
+
"""Get display name for a step."""
|
|
907
|
+
if step.name and step.name != "step":
|
|
908
|
+
return step.name
|
|
909
|
+
return "thinking..." if step.kind == "agent" else f"{step.kind} step"
|
|
910
|
+
|
|
911
|
+
def _resolve_step_label(self, step: Step) -> str:
|
|
912
|
+
"""Return the display label for a step with sensible fallbacks."""
|
|
913
|
+
return format_step_label(step)
|
|
914
|
+
|
|
915
|
+
def _check_parallel_tools(self) -> dict[tuple[str | None, str | None], list]:
|
|
916
|
+
"""Check for parallel running tools."""
|
|
917
|
+
running_by_ctx: dict[tuple[str | None, str | None], list] = {}
|
|
918
|
+
for sid in self.steps.order:
|
|
919
|
+
st = self.steps.by_id[sid]
|
|
920
|
+
if st.kind == "tool" and not is_step_finished(st):
|
|
921
|
+
key = (st.task_id, st.context_id)
|
|
922
|
+
running_by_ctx.setdefault(key, []).append(st)
|
|
923
|
+
return running_by_ctx
|
|
924
|
+
|
|
925
|
+
def _is_parallel_tool(
|
|
926
|
+
self,
|
|
927
|
+
step: Step,
|
|
928
|
+
running_by_ctx: dict[tuple[str | None, str | None], list],
|
|
929
|
+
) -> bool:
|
|
930
|
+
"""Return True if multiple tools are running in the same context."""
|
|
931
|
+
key = (step.task_id, step.context_id)
|
|
932
|
+
return len(running_by_ctx.get(key, [])) > 1
|
|
933
|
+
|
|
934
|
+
def _build_step_status_overrides(self) -> dict[str, str]:
|
|
935
|
+
"""Return status text overrides for steps (running duration badges)."""
|
|
936
|
+
overrides: dict[str, str] = {}
|
|
937
|
+
for sid in self.steps.order:
|
|
938
|
+
step = self.steps.by_id.get(sid)
|
|
939
|
+
if not step:
|
|
940
|
+
continue
|
|
941
|
+
try:
|
|
942
|
+
status_text = self._format_step_status(step)
|
|
943
|
+
except Exception:
|
|
944
|
+
status_text = ""
|
|
945
|
+
if status_text:
|
|
946
|
+
overrides[sid] = status_text
|
|
947
|
+
return overrides
|
|
948
|
+
|
|
949
|
+
def _resolve_steps_panel(self) -> AIPPanel:
|
|
950
|
+
"""Return the shared steps panel renderable generated by layout helpers."""
|
|
951
|
+
window_arg = self._summary_window_size()
|
|
952
|
+
window_arg = window_arg if window_arg > 0 else None
|
|
953
|
+
panels = render_summary_panels(
|
|
954
|
+
self.state,
|
|
955
|
+
self.steps,
|
|
956
|
+
summary_window=window_arg,
|
|
957
|
+
include_query_panel=False,
|
|
958
|
+
include_final_panel=False,
|
|
959
|
+
step_status_overrides=self._build_step_status_overrides(),
|
|
960
|
+
)
|
|
961
|
+
steps_panel = next((panel for panel in panels if getattr(panel, "title", "").lower() == "steps"), None)
|
|
962
|
+
panel_cls = AIPPanel if isinstance(AIPPanel, type) else None
|
|
963
|
+
if steps_panel is not None and (panel_cls is None or isinstance(steps_panel, panel_cls)):
|
|
964
|
+
return steps_panel
|
|
965
|
+
return AIPPanel(_NO_STEPS_TEXT.copy(), title="Steps", border_style="blue")
|
|
966
|
+
|
|
967
|
+
def _prepare_steps_renderable(self, *, include_progress: bool) -> tuple[AIPPanel, Any]:
|
|
968
|
+
"""Return the template panel and content renderable for steps."""
|
|
969
|
+
panel = self._resolve_steps_panel()
|
|
970
|
+
self._last_steps_panel_template = panel
|
|
971
|
+
base_renderable: Any = getattr(panel, "renderable", panel)
|
|
972
|
+
|
|
973
|
+
if include_progress and not self.state.finalizing_ui:
|
|
974
|
+
footer = build_progress_footer(
|
|
975
|
+
state=self.state,
|
|
976
|
+
steps=self.steps,
|
|
977
|
+
started_at=self._started_at,
|
|
978
|
+
server_elapsed_time=self.stream_processor.server_elapsed_time,
|
|
979
|
+
)
|
|
980
|
+
if footer is not None:
|
|
981
|
+
if isinstance(base_renderable, Group):
|
|
982
|
+
base_renderable = Group(*base_renderable.renderables, footer)
|
|
983
|
+
else:
|
|
984
|
+
base_renderable = Group(base_renderable, footer)
|
|
985
|
+
return panel, base_renderable
|
|
986
|
+
|
|
987
|
+
def _build_steps_body(self, *, include_progress: bool) -> Any:
|
|
988
|
+
"""Return the rendered steps body with optional progress footer."""
|
|
989
|
+
_, renderable = self._prepare_steps_renderable(include_progress=include_progress)
|
|
990
|
+
if isinstance(renderable, Group):
|
|
991
|
+
return renderable
|
|
992
|
+
return Group(renderable)
|
|
993
|
+
|
|
994
|
+
def _render_steps_text(self) -> Any:
|
|
995
|
+
"""Return the rendered steps body used by transcript capture."""
|
|
996
|
+
return self._build_steps_body(include_progress=True)
|
|
997
|
+
|
|
998
|
+
def _summary_window_size(self) -> int:
|
|
999
|
+
"""Return the active window size for step display."""
|
|
1000
|
+
if self.state.finalizing_ui:
|
|
1001
|
+
return 0
|
|
1002
|
+
return int(self.cfg.summary_display_window or 0)
|
|
1003
|
+
|
|
1004
|
+
def _update_final_duration(self, duration: float | None, *, overwrite: bool = False) -> None:
|
|
1005
|
+
"""Store formatted duration for eventual final panels."""
|
|
1006
|
+
if duration is None:
|
|
1007
|
+
return
|
|
1008
|
+
|
|
1009
|
+
try:
|
|
1010
|
+
duration_val = max(0.0, float(duration))
|
|
1011
|
+
except Exception:
|
|
1012
|
+
return
|
|
1013
|
+
|
|
1014
|
+
existing = self.state.final_duration_seconds
|
|
1015
|
+
|
|
1016
|
+
if not overwrite and existing is not None:
|
|
1017
|
+
return
|
|
1018
|
+
|
|
1019
|
+
if overwrite and existing is not None:
|
|
1020
|
+
duration_val = max(existing, duration_val)
|
|
1021
|
+
|
|
1022
|
+
formatted = format_elapsed_time(duration_val)
|
|
1023
|
+
self.state.mark_final_duration(duration_val, formatted=formatted)
|
|
1024
|
+
self._apply_root_duration(duration_val)
|