glaip-sdk 0.0.19__py3-none-any.whl → 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/_version.py +2 -2
- glaip_sdk/branding.py +27 -2
- glaip_sdk/cli/auth.py +93 -28
- glaip_sdk/cli/commands/__init__.py +2 -2
- glaip_sdk/cli/commands/agents.py +127 -21
- glaip_sdk/cli/commands/configure.py +141 -90
- glaip_sdk/cli/commands/mcps.py +82 -31
- glaip_sdk/cli/commands/models.py +4 -3
- glaip_sdk/cli/commands/tools.py +27 -14
- glaip_sdk/cli/commands/update.py +66 -0
- glaip_sdk/cli/config.py +13 -2
- glaip_sdk/cli/display.py +35 -26
- glaip_sdk/cli/io.py +14 -5
- glaip_sdk/cli/main.py +185 -73
- glaip_sdk/cli/pager.py +2 -1
- glaip_sdk/cli/resolution.py +4 -1
- glaip_sdk/cli/slash/__init__.py +3 -4
- glaip_sdk/cli/slash/agent_session.py +88 -36
- glaip_sdk/cli/slash/prompt.py +20 -48
- glaip_sdk/cli/slash/session.py +437 -189
- glaip_sdk/cli/transcript/__init__.py +71 -0
- glaip_sdk/cli/transcript/cache.py +338 -0
- glaip_sdk/cli/transcript/capture.py +278 -0
- glaip_sdk/cli/transcript/export.py +38 -0
- glaip_sdk/cli/transcript/launcher.py +79 -0
- glaip_sdk/cli/transcript/viewer.py +794 -0
- glaip_sdk/cli/update_notifier.py +29 -5
- glaip_sdk/cli/utils.py +255 -74
- glaip_sdk/client/agents.py +3 -1
- glaip_sdk/client/run_rendering.py +126 -21
- glaip_sdk/icons.py +25 -0
- glaip_sdk/models.py +6 -0
- glaip_sdk/rich_components.py +29 -1
- glaip_sdk/utils/__init__.py +1 -1
- glaip_sdk/utils/client_utils.py +6 -4
- glaip_sdk/utils/display.py +61 -32
- glaip_sdk/utils/rendering/formatting.py +55 -11
- glaip_sdk/utils/rendering/models.py +15 -2
- glaip_sdk/utils/rendering/renderer/__init__.py +0 -2
- glaip_sdk/utils/rendering/renderer/base.py +1287 -227
- glaip_sdk/utils/rendering/renderer/config.py +3 -5
- glaip_sdk/utils/rendering/renderer/debug.py +73 -16
- glaip_sdk/utils/rendering/renderer/panels.py +27 -15
- glaip_sdk/utils/rendering/renderer/progress.py +61 -38
- glaip_sdk/utils/rendering/renderer/stream.py +3 -3
- glaip_sdk/utils/rendering/renderer/toggle.py +184 -0
- glaip_sdk/utils/rendering/step_tree_state.py +102 -0
- glaip_sdk/utils/rendering/steps.py +944 -16
- glaip_sdk/utils/serialization.py +5 -2
- glaip_sdk/utils/validation.py +1 -2
- {glaip_sdk-0.0.19.dist-info → glaip_sdk-0.1.0.dist-info}/METADATA +12 -1
- glaip_sdk-0.1.0.dist-info/RECORD +82 -0
- glaip_sdk/utils/rich_utils.py +0 -29
- glaip_sdk-0.0.19.dist-info/RECORD +0 -73
- {glaip_sdk-0.0.19.dist-info → glaip_sdk-0.1.0.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.0.19.dist-info → glaip_sdk-0.1.0.dist-info}/entry_points.txt +0 -0
|
@@ -8,7 +8,9 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import logging
|
|
11
|
-
from
|
|
11
|
+
from collections.abc import Iterable
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from datetime import datetime, timezone
|
|
12
14
|
from time import monotonic
|
|
13
15
|
from typing import Any
|
|
14
16
|
|
|
@@ -16,13 +18,21 @@ from rich.console import Console as RichConsole
|
|
|
16
18
|
from rich.console import Group
|
|
17
19
|
from rich.live import Live
|
|
18
20
|
from rich.markdown import Markdown
|
|
21
|
+
from rich.measure import Measurement
|
|
22
|
+
from rich.spinner import Spinner
|
|
19
23
|
from rich.text import Text
|
|
20
24
|
|
|
25
|
+
from glaip_sdk.icons import ICON_AGENT, ICON_AGENT_STEP, ICON_DELEGATE, ICON_TOOL_STEP
|
|
21
26
|
from glaip_sdk.rich_components import AIPPanel
|
|
22
27
|
from glaip_sdk.utils.rendering.formatting import (
|
|
28
|
+
build_connector_prefix,
|
|
23
29
|
format_main_title,
|
|
24
30
|
get_spinner_char,
|
|
31
|
+
glyph_for_status,
|
|
25
32
|
is_step_finished,
|
|
33
|
+
normalise_display_label,
|
|
34
|
+
pretty_args,
|
|
35
|
+
redact_sensitive,
|
|
26
36
|
)
|
|
27
37
|
from glaip_sdk.utils.rendering.models import RunStats, Step
|
|
28
38
|
from glaip_sdk.utils.rendering.renderer.config import RendererConfig
|
|
@@ -47,6 +57,51 @@ logger = logging.getLogger("glaip_sdk.run_renderer")
|
|
|
47
57
|
|
|
48
58
|
# Constants
|
|
49
59
|
LESS_THAN_1MS = "[<1ms]"
|
|
60
|
+
FINISHED_STATUS_HINTS = {
|
|
61
|
+
"finished",
|
|
62
|
+
"success",
|
|
63
|
+
"succeeded",
|
|
64
|
+
"completed",
|
|
65
|
+
"failed",
|
|
66
|
+
"stopped",
|
|
67
|
+
"error",
|
|
68
|
+
}
|
|
69
|
+
RUNNING_STATUS_HINTS = {"running", "started", "pending", "working"}
|
|
70
|
+
ARGS_VALUE_MAX_LEN = 160
|
|
71
|
+
STATUS_ICON_STYLES = {
|
|
72
|
+
"success": "green",
|
|
73
|
+
"failed": "red",
|
|
74
|
+
"warning": "yellow",
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _coerce_received_at(value: Any) -> datetime | None:
|
|
79
|
+
"""Coerce a received_at value to an aware datetime if possible."""
|
|
80
|
+
if value is None:
|
|
81
|
+
return None
|
|
82
|
+
|
|
83
|
+
if isinstance(value, datetime):
|
|
84
|
+
return value if value.tzinfo else value.replace(tzinfo=timezone.utc)
|
|
85
|
+
|
|
86
|
+
if isinstance(value, str):
|
|
87
|
+
try:
|
|
88
|
+
normalised = value.replace("Z", "+00:00")
|
|
89
|
+
dt = datetime.fromisoformat(normalised)
|
|
90
|
+
except ValueError:
|
|
91
|
+
return None
|
|
92
|
+
return dt if dt.tzinfo else dt.replace(tzinfo=timezone.utc)
|
|
93
|
+
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
def _truncate_display(text: str | None, limit: int = 160) -> str:
|
|
98
|
+
"""Return text capped at the given character limit with ellipsis."""
|
|
99
|
+
if not text:
|
|
100
|
+
return ""
|
|
101
|
+
stripped = str(text).strip()
|
|
102
|
+
if len(stripped) <= limit:
|
|
103
|
+
return stripped
|
|
104
|
+
return stripped[: limit - 1] + "…"
|
|
50
105
|
|
|
51
106
|
|
|
52
107
|
@dataclass
|
|
@@ -56,10 +111,13 @@ class RendererState:
|
|
|
56
111
|
buffer: list[str] | None = None
|
|
57
112
|
final_text: str = ""
|
|
58
113
|
streaming_started_at: float | None = None
|
|
59
|
-
|
|
114
|
+
printed_final_output: bool = False
|
|
60
115
|
finalizing_ui: bool = False
|
|
61
116
|
final_duration_seconds: float | None = None
|
|
62
117
|
final_duration_text: str | None = None
|
|
118
|
+
events: list[dict[str, Any]] = field(default_factory=list)
|
|
119
|
+
meta: dict[str, Any] = field(default_factory=dict)
|
|
120
|
+
streaming_started_event_ts: datetime | None = None
|
|
63
121
|
|
|
64
122
|
def __post_init__(self) -> None:
|
|
65
123
|
"""Initialize renderer state after dataclass creation.
|
|
@@ -70,6 +128,43 @@ class RendererState:
|
|
|
70
128
|
self.buffer = []
|
|
71
129
|
|
|
72
130
|
|
|
131
|
+
@dataclass
|
|
132
|
+
class ThinkingScopeState:
|
|
133
|
+
"""Runtime bookkeeping for deterministic thinking spans."""
|
|
134
|
+
|
|
135
|
+
anchor_id: str
|
|
136
|
+
task_id: str | None
|
|
137
|
+
context_id: str | None
|
|
138
|
+
anchor_started_at: float | None = None
|
|
139
|
+
anchor_finished_at: float | None = None
|
|
140
|
+
idle_started_at: float | None = None
|
|
141
|
+
idle_started_monotonic: float | None = None
|
|
142
|
+
active_thinking_id: str | None = None
|
|
143
|
+
running_children: set[str] = field(default_factory=set)
|
|
144
|
+
closed: bool = False
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class TrailingSpinnerLine:
|
|
148
|
+
"""Render a text line with a trailing animated Rich spinner."""
|
|
149
|
+
|
|
150
|
+
def __init__(self, base_text: Text, spinner: Spinner) -> None:
|
|
151
|
+
"""Initialize spinner line with base text and spinner component."""
|
|
152
|
+
self._base_text = base_text
|
|
153
|
+
self._spinner = spinner
|
|
154
|
+
|
|
155
|
+
def __rich_console__(self, console: RichConsole, options: Any) -> Any:
|
|
156
|
+
"""Render the text with trailing animated spinner."""
|
|
157
|
+
spinner_render = self._spinner.render(console.get_time())
|
|
158
|
+
combined = Text.assemble(self._base_text.copy(), " ", spinner_render)
|
|
159
|
+
yield combined
|
|
160
|
+
|
|
161
|
+
def __rich_measure__(self, console: RichConsole, options: Any) -> Measurement:
|
|
162
|
+
"""Measure the combined text and spinner dimensions."""
|
|
163
|
+
snapshot = self._spinner.render(0)
|
|
164
|
+
combined = Text.assemble(self._base_text.copy(), " ", snapshot)
|
|
165
|
+
return Measurement.get(console, options, combined)
|
|
166
|
+
|
|
167
|
+
|
|
73
168
|
class RichStreamRenderer:
|
|
74
169
|
"""Live, modern terminal renderer for agent execution with rich visual output."""
|
|
75
170
|
|
|
@@ -96,17 +191,18 @@ class RichStreamRenderer:
|
|
|
96
191
|
self.state = RendererState()
|
|
97
192
|
|
|
98
193
|
# Initialize step manager and other state
|
|
99
|
-
self.steps = StepManager()
|
|
194
|
+
self.steps = StepManager(max_steps=self.cfg.summary_max_steps)
|
|
100
195
|
# Live display instance (single source of truth)
|
|
101
196
|
self.live: Live | None = None
|
|
197
|
+
self._step_spinners: dict[str, Spinner] = {}
|
|
102
198
|
|
|
103
|
-
#
|
|
104
|
-
self.context_order: list[str] = []
|
|
105
|
-
self.context_parent: dict[str, str] = {}
|
|
106
|
-
self.tool_order: list[str] = []
|
|
107
|
-
self.context_panels: dict[str, list[str]] = {}
|
|
108
|
-
self.context_meta: dict[str, dict[str, Any]] = {}
|
|
199
|
+
# Tool tracking and thinking scopes
|
|
109
200
|
self.tool_panels: dict[str, dict[str, Any]] = {}
|
|
201
|
+
self._thinking_scopes: dict[str, ThinkingScopeState] = {}
|
|
202
|
+
self._root_agent_friendly: str | None = None
|
|
203
|
+
self._root_agent_step_id: str | None = None
|
|
204
|
+
self._root_query: str | None = None
|
|
205
|
+
self._root_query_attached: bool = False
|
|
110
206
|
|
|
111
207
|
# Timing
|
|
112
208
|
self._started_at: float | None = None
|
|
@@ -119,6 +215,21 @@ class RichStreamRenderer:
|
|
|
119
215
|
# Output formatting constants
|
|
120
216
|
self.OUTPUT_PREFIX: str = "**Output:**\n"
|
|
121
217
|
|
|
218
|
+
# Transcript toggling
|
|
219
|
+
self._transcript_mode_enabled: bool = False
|
|
220
|
+
self._transcript_render_cursor: int = 0
|
|
221
|
+
self.transcript_controller: Any | None = None
|
|
222
|
+
self._transcript_hint_message = (
|
|
223
|
+
"[dim]Transcript view · Press Ctrl+T to return to the summary.[/dim]"
|
|
224
|
+
)
|
|
225
|
+
self._summary_hint_message = (
|
|
226
|
+
"[dim]Press Ctrl+T to inspect raw transcript events.[/dim]"
|
|
227
|
+
)
|
|
228
|
+
self._summary_hint_printed_once: bool = False
|
|
229
|
+
self._transcript_hint_printed_once: bool = False
|
|
230
|
+
self._transcript_header_printed: bool = False
|
|
231
|
+
self._transcript_enabled_message_printed: bool = False
|
|
232
|
+
|
|
122
233
|
def on_start(self, meta: dict[str, Any]) -> None:
|
|
123
234
|
"""Handle renderer start event."""
|
|
124
235
|
if self.cfg.live:
|
|
@@ -127,7 +238,26 @@ class RichStreamRenderer:
|
|
|
127
238
|
|
|
128
239
|
# Set up initial state
|
|
129
240
|
self._started_at = monotonic()
|
|
130
|
-
|
|
241
|
+
try:
|
|
242
|
+
self.state.meta = json.loads(json.dumps(meta))
|
|
243
|
+
except Exception:
|
|
244
|
+
self.state.meta = dict(meta)
|
|
245
|
+
|
|
246
|
+
meta_payload = meta or {}
|
|
247
|
+
self.steps.set_root_agent(meta_payload.get("agent_id"))
|
|
248
|
+
self._root_agent_friendly = self._humanize_agent_slug(
|
|
249
|
+
meta_payload.get("agent_name")
|
|
250
|
+
)
|
|
251
|
+
self._root_query = _truncate_display(
|
|
252
|
+
meta_payload.get("input_message")
|
|
253
|
+
or meta_payload.get("query")
|
|
254
|
+
or meta_payload.get("message")
|
|
255
|
+
or (meta_payload.get("meta") or {}).get("input_message")
|
|
256
|
+
or ""
|
|
257
|
+
)
|
|
258
|
+
if not self._root_query:
|
|
259
|
+
self._root_query = None
|
|
260
|
+
self._root_query_attached = False
|
|
131
261
|
|
|
132
262
|
# Print compact header and user request (parity with old renderer)
|
|
133
263
|
self._render_header(meta)
|
|
@@ -147,7 +277,7 @@ class RichStreamRenderer:
|
|
|
147
277
|
|
|
148
278
|
def _build_header_parts(self, meta: dict[str, Any]) -> list[str]:
|
|
149
279
|
"""Build header text parts from metadata."""
|
|
150
|
-
parts: list[str] = [
|
|
280
|
+
parts: list[str] = [ICON_AGENT]
|
|
151
281
|
agent_name = meta.get("agent_name", "agent")
|
|
152
282
|
if agent_name:
|
|
153
283
|
parts.append(agent_name)
|
|
@@ -178,44 +308,117 @@ class RichStreamRenderer:
|
|
|
178
308
|
except Exception:
|
|
179
309
|
logger.exception("Failed to print header fallback")
|
|
180
310
|
|
|
311
|
+
def _extract_query_from_meta(self, meta: dict[str, Any] | None) -> str | None:
|
|
312
|
+
"""Extract the primary query string from a metadata payload."""
|
|
313
|
+
if not meta:
|
|
314
|
+
return None
|
|
315
|
+
query = (
|
|
316
|
+
meta.get("input_message")
|
|
317
|
+
or meta.get("query")
|
|
318
|
+
or meta.get("message")
|
|
319
|
+
or (meta.get("meta") or {}).get("input_message")
|
|
320
|
+
)
|
|
321
|
+
if isinstance(query, str) and query.strip():
|
|
322
|
+
return query
|
|
323
|
+
return None
|
|
324
|
+
|
|
325
|
+
def _build_user_query_panel(self, query: str) -> AIPPanel:
|
|
326
|
+
"""Create the panel used to display the user request."""
|
|
327
|
+
return AIPPanel(
|
|
328
|
+
Markdown(f"**Query:** {query}"),
|
|
329
|
+
title="User Request",
|
|
330
|
+
border_style="#d97706",
|
|
331
|
+
padding=(0, 1),
|
|
332
|
+
)
|
|
333
|
+
|
|
181
334
|
def _render_user_query(self, meta: dict[str, Any]) -> None:
|
|
182
335
|
"""Render the user query panel."""
|
|
183
|
-
query =
|
|
336
|
+
query = self._extract_query_from_meta(meta)
|
|
184
337
|
if not query:
|
|
185
338
|
return
|
|
339
|
+
self.console.print(self._build_user_query_panel(query))
|
|
340
|
+
|
|
341
|
+
def _render_summary_static_sections(self) -> None:
|
|
342
|
+
"""Re-render header and user query when returning to summary mode."""
|
|
343
|
+
meta = getattr(self.state, "meta", None)
|
|
344
|
+
if meta:
|
|
345
|
+
self._render_header(meta)
|
|
346
|
+
elif self.header_text and not self._render_header_rule():
|
|
347
|
+
self._render_header_fallback()
|
|
186
348
|
|
|
187
|
-
self.
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
349
|
+
query = self._extract_query_from_meta(meta) or self._root_query
|
|
350
|
+
if query:
|
|
351
|
+
self.console.print(self._build_user_query_panel(query))
|
|
352
|
+
|
|
353
|
+
def _ensure_streaming_started_baseline(self, timestamp: float) -> None:
|
|
354
|
+
"""Synchronize streaming start state across renderer components."""
|
|
355
|
+
self.state.streaming_started_at = timestamp
|
|
356
|
+
self.stream_processor.streaming_started_at = timestamp
|
|
357
|
+
self._started_at = timestamp
|
|
195
358
|
|
|
196
359
|
def on_event(self, ev: dict[str, Any]) -> None:
|
|
197
360
|
"""Handle streaming events from the backend."""
|
|
198
|
-
|
|
361
|
+
received_at = self._resolve_received_timestamp(ev)
|
|
362
|
+
self._capture_event(ev, received_at)
|
|
199
363
|
self.stream_processor.reset_event_tracking()
|
|
200
364
|
|
|
201
|
-
|
|
202
|
-
if self.state.streaming_started_at is None:
|
|
203
|
-
self.state.streaming_started_at = monotonic()
|
|
365
|
+
self._sync_stream_start(ev, received_at)
|
|
204
366
|
|
|
205
|
-
# Extract event metadata
|
|
206
367
|
metadata = self.stream_processor.extract_event_metadata(ev)
|
|
207
|
-
kind = metadata["kind"]
|
|
208
|
-
context_id = metadata["context_id"]
|
|
209
|
-
content = metadata["content"]
|
|
210
368
|
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
369
|
+
self._maybe_render_debug(ev, received_at)
|
|
370
|
+
try:
|
|
371
|
+
self._dispatch_event(ev, metadata)
|
|
372
|
+
finally:
|
|
373
|
+
self.stream_processor.update_timing(metadata.get("context_id"))
|
|
374
|
+
|
|
375
|
+
def _resolve_received_timestamp(self, ev: dict[str, Any]) -> datetime:
|
|
376
|
+
"""Return the timestamp an event was received, normalising inputs."""
|
|
377
|
+
received_at = _coerce_received_at(ev.get("received_at"))
|
|
378
|
+
if received_at is None:
|
|
379
|
+
received_at = datetime.now(timezone.utc)
|
|
214
380
|
|
|
215
|
-
|
|
216
|
-
|
|
381
|
+
if self.state.streaming_started_event_ts is None:
|
|
382
|
+
self.state.streaming_started_event_ts = received_at
|
|
383
|
+
|
|
384
|
+
return received_at
|
|
385
|
+
|
|
386
|
+
def _sync_stream_start(
|
|
387
|
+
self, ev: dict[str, Any], received_at: datetime | None
|
|
388
|
+
) -> None:
|
|
389
|
+
"""Ensure renderer and stream processor share a streaming baseline."""
|
|
390
|
+
baseline = self.state.streaming_started_at
|
|
391
|
+
if baseline is None:
|
|
392
|
+
baseline = monotonic()
|
|
393
|
+
self._ensure_streaming_started_baseline(baseline)
|
|
394
|
+
elif getattr(self.stream_processor, "streaming_started_at", None) is None:
|
|
395
|
+
self._ensure_streaming_started_baseline(baseline)
|
|
396
|
+
|
|
397
|
+
if ev.get("status") == "streaming_started":
|
|
398
|
+
self.state.streaming_started_event_ts = received_at
|
|
399
|
+
self._ensure_streaming_started_baseline(monotonic())
|
|
400
|
+
|
|
401
|
+
def _maybe_render_debug(
|
|
402
|
+
self, ev: dict[str, Any], received_at: datetime
|
|
403
|
+
) -> None: # pragma: no cover - guard rails for verbose mode
|
|
404
|
+
"""Render debug view when verbose mode is enabled."""
|
|
405
|
+
if not self.verbose:
|
|
406
|
+
return
|
|
407
|
+
|
|
408
|
+
self._ensure_transcript_header()
|
|
409
|
+
render_debug_event(
|
|
410
|
+
ev,
|
|
411
|
+
self.console,
|
|
412
|
+
received_ts=received_at,
|
|
413
|
+
baseline_ts=self.state.streaming_started_event_ts,
|
|
414
|
+
)
|
|
415
|
+
self._print_transcript_hint()
|
|
416
|
+
|
|
417
|
+
def _dispatch_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
|
|
418
|
+
"""Route events to the appropriate renderer handlers."""
|
|
419
|
+
kind = metadata["kind"]
|
|
420
|
+
content = metadata["content"]
|
|
217
421
|
|
|
218
|
-
# Handle different event types
|
|
219
422
|
if kind == "status":
|
|
220
423
|
self._handle_status_event(ev)
|
|
221
424
|
elif kind == "content":
|
|
@@ -223,16 +426,15 @@ class RichStreamRenderer:
|
|
|
223
426
|
elif kind == "final_response":
|
|
224
427
|
self._handle_final_response_event(content, metadata)
|
|
225
428
|
elif kind in {"agent_step", "agent_thinking_step"}:
|
|
226
|
-
self._handle_agent_step_event(ev)
|
|
429
|
+
self._handle_agent_step_event(ev, metadata)
|
|
227
430
|
else:
|
|
228
|
-
# Update live display for unhandled events
|
|
229
431
|
self._ensure_live()
|
|
230
432
|
|
|
231
433
|
def _handle_status_event(self, ev: dict[str, Any]) -> None:
|
|
232
434
|
"""Handle status events."""
|
|
233
435
|
status = ev.get("status")
|
|
234
436
|
if status == "streaming_started":
|
|
235
|
-
|
|
437
|
+
return
|
|
236
438
|
|
|
237
439
|
def _handle_content_event(self, content: str) -> None:
|
|
238
440
|
"""Handle content streaming events."""
|
|
@@ -249,21 +451,30 @@ class RichStreamRenderer:
|
|
|
249
451
|
self.state.final_text = content
|
|
250
452
|
|
|
251
453
|
meta_payload = metadata.get("metadata") or {}
|
|
252
|
-
self.
|
|
454
|
+
final_time = self._coerce_server_time(meta_payload.get("time"))
|
|
455
|
+
self._update_final_duration(final_time)
|
|
456
|
+
self._close_active_thinking_scopes(final_time)
|
|
457
|
+
self._finish_running_steps()
|
|
458
|
+
self._finish_tool_panels()
|
|
459
|
+
self._normalise_finished_icons()
|
|
253
460
|
|
|
254
|
-
|
|
461
|
+
self._ensure_live()
|
|
462
|
+
self._print_final_panel_if_needed()
|
|
255
463
|
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
464
|
+
def _normalise_finished_icons(self) -> None:
|
|
465
|
+
"""Ensure finished steps do not keep spinner icons."""
|
|
466
|
+
for step in self.steps.by_id.values():
|
|
467
|
+
if (
|
|
468
|
+
getattr(step, "status", None) == "finished"
|
|
469
|
+
and getattr(step, "status_icon", None) == "spinner"
|
|
470
|
+
):
|
|
471
|
+
step.status_icon = "success"
|
|
472
|
+
if getattr(step, "status", None) != "running":
|
|
473
|
+
self._step_spinners.pop(step.step_id, None)
|
|
265
474
|
|
|
266
|
-
def _handle_agent_step_event(
|
|
475
|
+
def _handle_agent_step_event(
|
|
476
|
+
self, ev: dict[str, Any], metadata: dict[str, Any]
|
|
477
|
+
) -> None:
|
|
267
478
|
"""Handle agent step events."""
|
|
268
479
|
# Extract tool information
|
|
269
480
|
(
|
|
@@ -273,22 +484,402 @@ class RichStreamRenderer:
|
|
|
273
484
|
tool_calls_info,
|
|
274
485
|
) = self.stream_processor.parse_tool_calls(ev)
|
|
275
486
|
|
|
276
|
-
|
|
487
|
+
payload = metadata.get("metadata") or {}
|
|
488
|
+
|
|
489
|
+
tracked_step: Step | None = None
|
|
490
|
+
try:
|
|
491
|
+
tracked_step = self.steps.apply_event(ev)
|
|
492
|
+
except ValueError:
|
|
493
|
+
logger.debug("Malformed step event skipped", exc_info=True)
|
|
494
|
+
else:
|
|
495
|
+
self._record_step_server_start(tracked_step, payload)
|
|
496
|
+
self._update_thinking_timeline(tracked_step, payload)
|
|
497
|
+
self._maybe_override_root_agent_label(tracked_step, payload)
|
|
498
|
+
self._maybe_attach_root_query(tracked_step)
|
|
499
|
+
|
|
500
|
+
# Track tools and sub-agents for transcript/debug context
|
|
277
501
|
self.stream_processor.track_tools_and_agents(
|
|
278
502
|
tool_name, tool_calls_info, is_delegation_tool
|
|
279
503
|
)
|
|
280
504
|
|
|
281
505
|
# Handle tool execution
|
|
282
|
-
self._handle_agent_step(
|
|
506
|
+
self._handle_agent_step(
|
|
507
|
+
ev,
|
|
508
|
+
tool_name,
|
|
509
|
+
tool_args,
|
|
510
|
+
tool_out,
|
|
511
|
+
tool_calls_info,
|
|
512
|
+
tracked_step=tracked_step,
|
|
513
|
+
)
|
|
283
514
|
|
|
284
515
|
# Update live display
|
|
285
516
|
self._ensure_live()
|
|
286
517
|
|
|
518
|
+
def _maybe_attach_root_query(self, step: Step | None) -> None:
|
|
519
|
+
"""Attach the user query to the root agent step for display."""
|
|
520
|
+
if (
|
|
521
|
+
not step
|
|
522
|
+
or self._root_query_attached
|
|
523
|
+
or not self._root_query
|
|
524
|
+
or step.kind != "agent"
|
|
525
|
+
or step.parent_id
|
|
526
|
+
):
|
|
527
|
+
return
|
|
528
|
+
|
|
529
|
+
args = dict(getattr(step, "args", {}) or {})
|
|
530
|
+
args.setdefault("query", self._root_query)
|
|
531
|
+
step.args = args
|
|
532
|
+
self._root_query_attached = True
|
|
533
|
+
|
|
534
|
+
def _record_step_server_start(
|
|
535
|
+
self, step: Step | None, payload: dict[str, Any]
|
|
536
|
+
) -> None:
|
|
537
|
+
"""Store server-provided start times for elapsed calculations."""
|
|
538
|
+
if not step:
|
|
539
|
+
return
|
|
540
|
+
server_time = payload.get("time")
|
|
541
|
+
if not isinstance(server_time, (int, float)):
|
|
542
|
+
return
|
|
543
|
+
self._step_server_start_times.setdefault(step.step_id, float(server_time))
|
|
544
|
+
|
|
545
|
+
def _maybe_override_root_agent_label(
|
|
546
|
+
self, step: Step | None, payload: dict[str, Any]
|
|
547
|
+
) -> None:
|
|
548
|
+
"""Ensure the root agent row uses the human-friendly name and shows the ID."""
|
|
549
|
+
if not step or step.kind != "agent" or step.parent_id:
|
|
550
|
+
return
|
|
551
|
+
friendly = self._root_agent_friendly or self._humanize_agent_slug(
|
|
552
|
+
(payload or {}).get("agent_name")
|
|
553
|
+
)
|
|
554
|
+
if not friendly:
|
|
555
|
+
return
|
|
556
|
+
agent_identifier = step.name or step.step_id
|
|
557
|
+
if not agent_identifier:
|
|
558
|
+
return
|
|
559
|
+
step.display_label = normalise_display_label(
|
|
560
|
+
f"{ICON_AGENT} {friendly} ({agent_identifier})"
|
|
561
|
+
)
|
|
562
|
+
if not self._root_agent_step_id:
|
|
563
|
+
self._root_agent_step_id = step.step_id
|
|
564
|
+
|
|
565
|
+
def _update_thinking_timeline(
|
|
566
|
+
self, step: Step | None, payload: dict[str, Any]
|
|
567
|
+
) -> None:
|
|
568
|
+
"""Maintain deterministic thinking spans for each agent/delegate scope."""
|
|
569
|
+
if not self.cfg.render_thinking or not step:
|
|
570
|
+
return
|
|
571
|
+
|
|
572
|
+
now_monotonic = monotonic()
|
|
573
|
+
server_time = self._coerce_server_time(payload.get("time"))
|
|
574
|
+
status_hint = (payload.get("status") or "").lower()
|
|
575
|
+
|
|
576
|
+
if self._is_scope_anchor(step):
|
|
577
|
+
self._update_anchor_thinking(
|
|
578
|
+
step=step,
|
|
579
|
+
server_time=server_time,
|
|
580
|
+
status_hint=status_hint,
|
|
581
|
+
now_monotonic=now_monotonic,
|
|
582
|
+
)
|
|
583
|
+
return
|
|
584
|
+
|
|
585
|
+
self._update_child_thinking(
|
|
586
|
+
step=step,
|
|
587
|
+
server_time=server_time,
|
|
588
|
+
status_hint=status_hint,
|
|
589
|
+
now_monotonic=now_monotonic,
|
|
590
|
+
)
|
|
591
|
+
|
|
592
|
+
def _update_anchor_thinking(
|
|
593
|
+
self,
|
|
594
|
+
*,
|
|
595
|
+
step: Step,
|
|
596
|
+
server_time: float | None,
|
|
597
|
+
status_hint: str,
|
|
598
|
+
now_monotonic: float,
|
|
599
|
+
) -> None:
|
|
600
|
+
"""Handle deterministic thinking bookkeeping for agent/delegate anchors."""
|
|
601
|
+
scope = self._get_or_create_scope(step)
|
|
602
|
+
if scope.anchor_started_at is None and server_time is not None:
|
|
603
|
+
scope.anchor_started_at = server_time
|
|
604
|
+
|
|
605
|
+
if not scope.closed and scope.active_thinking_id is None:
|
|
606
|
+
self._start_scope_thinking(
|
|
607
|
+
scope,
|
|
608
|
+
start_server_time=scope.anchor_started_at or server_time,
|
|
609
|
+
start_monotonic=now_monotonic,
|
|
610
|
+
)
|
|
611
|
+
|
|
612
|
+
is_anchor_finished = status_hint in FINISHED_STATUS_HINTS or (
|
|
613
|
+
not status_hint and is_step_finished(step)
|
|
614
|
+
)
|
|
615
|
+
if is_anchor_finished:
|
|
616
|
+
scope.anchor_finished_at = server_time or scope.anchor_finished_at
|
|
617
|
+
self._finish_scope_thinking(scope, server_time, now_monotonic)
|
|
618
|
+
scope.closed = True
|
|
619
|
+
|
|
620
|
+
parent_anchor_id = self._resolve_anchor_id(step)
|
|
621
|
+
if parent_anchor_id:
|
|
622
|
+
self._cascade_anchor_update(
|
|
623
|
+
parent_anchor_id=parent_anchor_id,
|
|
624
|
+
child_step=step,
|
|
625
|
+
server_time=server_time,
|
|
626
|
+
now_monotonic=now_monotonic,
|
|
627
|
+
is_finished=is_anchor_finished,
|
|
628
|
+
)
|
|
629
|
+
|
|
630
|
+
def _cascade_anchor_update(
|
|
631
|
+
self,
|
|
632
|
+
*,
|
|
633
|
+
parent_anchor_id: str,
|
|
634
|
+
child_step: Step,
|
|
635
|
+
server_time: float | None,
|
|
636
|
+
now_monotonic: float,
|
|
637
|
+
is_finished: bool,
|
|
638
|
+
) -> None:
|
|
639
|
+
"""Propagate anchor state changes to the parent scope."""
|
|
640
|
+
parent_scope = self._thinking_scopes.get(parent_anchor_id)
|
|
641
|
+
if not parent_scope or parent_scope.closed:
|
|
642
|
+
return
|
|
643
|
+
if is_finished:
|
|
644
|
+
self._mark_child_finished(
|
|
645
|
+
parent_scope, child_step.step_id, server_time, now_monotonic
|
|
646
|
+
)
|
|
647
|
+
else:
|
|
648
|
+
self._mark_child_running(
|
|
649
|
+
parent_scope, child_step, server_time, now_monotonic
|
|
650
|
+
)
|
|
651
|
+
|
|
652
|
+
def _update_child_thinking(
|
|
653
|
+
self,
|
|
654
|
+
*,
|
|
655
|
+
step: Step,
|
|
656
|
+
server_time: float | None,
|
|
657
|
+
status_hint: str,
|
|
658
|
+
now_monotonic: float,
|
|
659
|
+
) -> None:
|
|
660
|
+
"""Update deterministic thinking state for non-anchor steps."""
|
|
661
|
+
anchor_id = self._resolve_anchor_id(step)
|
|
662
|
+
if not anchor_id:
|
|
663
|
+
return
|
|
664
|
+
|
|
665
|
+
scope = self._thinking_scopes.get(anchor_id)
|
|
666
|
+
if not scope or scope.closed or step.kind == "thinking":
|
|
667
|
+
return
|
|
668
|
+
|
|
669
|
+
is_finish_event = status_hint in FINISHED_STATUS_HINTS or (
|
|
670
|
+
not status_hint and is_step_finished(step)
|
|
671
|
+
)
|
|
672
|
+
if is_finish_event:
|
|
673
|
+
self._mark_child_finished(scope, step.step_id, server_time, now_monotonic)
|
|
674
|
+
else:
|
|
675
|
+
self._mark_child_running(scope, step, server_time, now_monotonic)
|
|
676
|
+
|
|
677
|
+
def _resolve_anchor_id(self, step: Step) -> str | None:
|
|
678
|
+
"""Return the nearest agent/delegate ancestor for a step."""
|
|
679
|
+
parent_id = step.parent_id
|
|
680
|
+
while parent_id:
|
|
681
|
+
parent = self.steps.by_id.get(parent_id)
|
|
682
|
+
if not parent:
|
|
683
|
+
return None
|
|
684
|
+
if self._is_scope_anchor(parent):
|
|
685
|
+
return parent.step_id
|
|
686
|
+
parent_id = parent.parent_id
|
|
687
|
+
return None
|
|
688
|
+
|
|
689
|
+
def _get_or_create_scope(self, step: Step) -> ThinkingScopeState:
|
|
690
|
+
"""Fetch (or create) thinking state for the given anchor step."""
|
|
691
|
+
scope = self._thinking_scopes.get(step.step_id)
|
|
692
|
+
if scope:
|
|
693
|
+
if scope.task_id is None:
|
|
694
|
+
scope.task_id = step.task_id
|
|
695
|
+
if scope.context_id is None:
|
|
696
|
+
scope.context_id = step.context_id
|
|
697
|
+
return scope
|
|
698
|
+
scope = ThinkingScopeState(
|
|
699
|
+
anchor_id=step.step_id,
|
|
700
|
+
task_id=step.task_id,
|
|
701
|
+
context_id=step.context_id,
|
|
702
|
+
)
|
|
703
|
+
self._thinking_scopes[step.step_id] = scope
|
|
704
|
+
return scope
|
|
705
|
+
|
|
706
|
+
def _is_scope_anchor(self, step: Step) -> bool:
|
|
707
|
+
"""Return True when a step should host its own thinking timeline."""
|
|
708
|
+
if step.kind in {"agent", "delegate"}:
|
|
709
|
+
return True
|
|
710
|
+
name = (step.name or "").lower()
|
|
711
|
+
return name.startswith(("delegate_to_", "delegate_", "delegate "))
|
|
712
|
+
|
|
713
|
+
def _start_scope_thinking(
|
|
714
|
+
self,
|
|
715
|
+
scope: ThinkingScopeState,
|
|
716
|
+
*,
|
|
717
|
+
start_server_time: float | None,
|
|
718
|
+
start_monotonic: float,
|
|
719
|
+
) -> None:
|
|
720
|
+
"""Open a deterministic thinking node beneath the scope anchor."""
|
|
721
|
+
if scope.closed or scope.active_thinking_id or not scope.anchor_id:
|
|
722
|
+
return
|
|
723
|
+
step = self.steps.start_or_get(
|
|
724
|
+
task_id=scope.task_id,
|
|
725
|
+
context_id=scope.context_id,
|
|
726
|
+
kind="thinking",
|
|
727
|
+
name=f"agent_thinking_step::{scope.anchor_id}",
|
|
728
|
+
parent_id=scope.anchor_id,
|
|
729
|
+
args={"reason": "deterministic_timeline"},
|
|
730
|
+
)
|
|
731
|
+
step.display_label = "💭 Thinking…"
|
|
732
|
+
step.status_icon = "spinner"
|
|
733
|
+
scope.active_thinking_id = step.step_id
|
|
734
|
+
scope.idle_started_at = start_server_time
|
|
735
|
+
scope.idle_started_monotonic = start_monotonic
|
|
736
|
+
|
|
737
|
+
def _finish_scope_thinking(
|
|
738
|
+
self,
|
|
739
|
+
scope: ThinkingScopeState,
|
|
740
|
+
end_server_time: float | None,
|
|
741
|
+
end_monotonic: float,
|
|
742
|
+
) -> None:
|
|
743
|
+
"""Close the currently running thinking node if one exists."""
|
|
744
|
+
if not scope.active_thinking_id:
|
|
745
|
+
return
|
|
746
|
+
thinking_step = self.steps.by_id.get(scope.active_thinking_id)
|
|
747
|
+
if not thinking_step:
|
|
748
|
+
scope.active_thinking_id = None
|
|
749
|
+
scope.idle_started_at = None
|
|
750
|
+
scope.idle_started_monotonic = None
|
|
751
|
+
return
|
|
752
|
+
|
|
753
|
+
duration = self._calculate_timeline_duration(
|
|
754
|
+
scope.idle_started_at,
|
|
755
|
+
end_server_time,
|
|
756
|
+
scope.idle_started_monotonic,
|
|
757
|
+
end_monotonic,
|
|
758
|
+
)
|
|
759
|
+
thinking_step.display_label = thinking_step.display_label or "💭 Thinking…"
|
|
760
|
+
if duration is not None:
|
|
761
|
+
thinking_step.finish(duration, source="timeline")
|
|
762
|
+
else:
|
|
763
|
+
thinking_step.finish(None, source="timeline")
|
|
764
|
+
thinking_step.status_icon = "success"
|
|
765
|
+
scope.active_thinking_id = None
|
|
766
|
+
scope.idle_started_at = None
|
|
767
|
+
scope.idle_started_monotonic = None
|
|
768
|
+
|
|
769
|
+
def _mark_child_running(
|
|
770
|
+
self,
|
|
771
|
+
scope: ThinkingScopeState,
|
|
772
|
+
step: Step,
|
|
773
|
+
server_time: float | None,
|
|
774
|
+
now_monotonic: float,
|
|
775
|
+
) -> None:
|
|
776
|
+
"""Mark a direct child as running and close any open thinking node."""
|
|
777
|
+
if step.step_id in scope.running_children:
|
|
778
|
+
return
|
|
779
|
+
scope.running_children.add(step.step_id)
|
|
780
|
+
if not scope.active_thinking_id:
|
|
781
|
+
return
|
|
782
|
+
|
|
783
|
+
start_server = self._step_server_start_times.get(step.step_id)
|
|
784
|
+
if start_server is None:
|
|
785
|
+
start_server = server_time
|
|
786
|
+
self._finish_scope_thinking(scope, start_server, now_monotonic)
|
|
787
|
+
|
|
788
|
+
def _mark_child_finished(
|
|
789
|
+
self,
|
|
790
|
+
scope: ThinkingScopeState,
|
|
791
|
+
step_id: str,
|
|
792
|
+
server_time: float | None,
|
|
793
|
+
now_monotonic: float,
|
|
794
|
+
) -> None:
|
|
795
|
+
"""Handle completion for a scope child and resume thinking if idle."""
|
|
796
|
+
if step_id in scope.running_children:
|
|
797
|
+
scope.running_children.discard(step_id)
|
|
798
|
+
if scope.running_children or scope.closed:
|
|
799
|
+
return
|
|
800
|
+
self._start_scope_thinking(
|
|
801
|
+
scope,
|
|
802
|
+
start_server_time=server_time,
|
|
803
|
+
start_monotonic=now_monotonic,
|
|
804
|
+
)
|
|
805
|
+
|
|
806
|
+
def _close_active_thinking_scopes(self, server_time: float | None) -> None:
|
|
807
|
+
"""Finish any in-flight thinking nodes during finalization."""
|
|
808
|
+
now = monotonic()
|
|
809
|
+
for scope in self._thinking_scopes.values():
|
|
810
|
+
if not scope.active_thinking_id:
|
|
811
|
+
continue
|
|
812
|
+
self._finish_scope_thinking(scope, server_time, now)
|
|
813
|
+
scope.closed = True
|
|
814
|
+
# Parent scopes resume thinking via _cascade_anchor_update
|
|
815
|
+
|
|
816
|
+
def _apply_root_duration(self, duration_seconds: float | None) -> None:
|
|
817
|
+
"""Propagate the final run duration to the root agent step."""
|
|
818
|
+
if duration_seconds is None or not self._root_agent_step_id:
|
|
819
|
+
return
|
|
820
|
+
root_step = self.steps.by_id.get(self._root_agent_step_id)
|
|
821
|
+
if not root_step:
|
|
822
|
+
return
|
|
823
|
+
try:
|
|
824
|
+
duration_ms = max(0, int(round(float(duration_seconds) * 1000)))
|
|
825
|
+
except Exception:
|
|
826
|
+
return
|
|
827
|
+
root_step.duration_ms = duration_ms
|
|
828
|
+
root_step.duration_source = root_step.duration_source or "run"
|
|
829
|
+
root_step.status = "finished"
|
|
830
|
+
|
|
831
|
+
@staticmethod
|
|
832
|
+
def _coerce_server_time(value: Any) -> float | None:
|
|
833
|
+
"""Convert a raw SSE time payload into a float if possible."""
|
|
834
|
+
if isinstance(value, (int, float)):
|
|
835
|
+
return float(value)
|
|
836
|
+
try:
|
|
837
|
+
return float(value)
|
|
838
|
+
except (TypeError, ValueError):
|
|
839
|
+
return None
|
|
840
|
+
|
|
841
|
+
@staticmethod
|
|
842
|
+
def _calculate_timeline_duration(
|
|
843
|
+
start_server: float | None,
|
|
844
|
+
end_server: float | None,
|
|
845
|
+
start_monotonic: float | None,
|
|
846
|
+
end_monotonic: float,
|
|
847
|
+
) -> float | None:
|
|
848
|
+
"""Pick the most reliable pair of timestamps to derive duration seconds."""
|
|
849
|
+
if start_server is not None and end_server is not None:
|
|
850
|
+
return max(0.0, float(end_server) - float(start_server))
|
|
851
|
+
if start_monotonic is not None:
|
|
852
|
+
try:
|
|
853
|
+
return max(0.0, float(end_monotonic) - float(start_monotonic))
|
|
854
|
+
except Exception:
|
|
855
|
+
return None
|
|
856
|
+
return None
|
|
857
|
+
|
|
858
|
+
@staticmethod
|
|
859
|
+
def _humanize_agent_slug(value: Any) -> str | None:
|
|
860
|
+
"""Convert a slugified agent name into Title Case."""
|
|
861
|
+
if not isinstance(value, str):
|
|
862
|
+
return None
|
|
863
|
+
cleaned = value.replace("_", " ").replace("-", " ").strip()
|
|
864
|
+
if not cleaned:
|
|
865
|
+
return None
|
|
866
|
+
parts = [part for part in cleaned.split() if part]
|
|
867
|
+
return " ".join(part[:1].upper() + part[1:] for part in parts)
|
|
868
|
+
|
|
287
869
|
def _finish_running_steps(self) -> None:
|
|
288
870
|
"""Mark any running steps as finished to avoid lingering spinners."""
|
|
289
871
|
for st in self.steps.by_id.values():
|
|
290
872
|
if not is_step_finished(st):
|
|
291
|
-
|
|
873
|
+
self._mark_incomplete_step(st)
|
|
874
|
+
|
|
875
|
+
def _mark_incomplete_step(self, step: Step) -> None:
|
|
876
|
+
"""Mark a lingering step as incomplete/warning with unknown duration."""
|
|
877
|
+
step.status = "finished"
|
|
878
|
+
step.duration_unknown = True
|
|
879
|
+
if step.duration_ms is None:
|
|
880
|
+
step.duration_ms = 0
|
|
881
|
+
step.duration_source = step.duration_source or "unknown"
|
|
882
|
+
step.status_icon = "warning"
|
|
292
883
|
|
|
293
884
|
def _finish_tool_panels(self) -> None:
|
|
294
885
|
"""Mark unfinished tool panels as finished."""
|
|
@@ -307,52 +898,87 @@ class RichStreamRenderer:
|
|
|
307
898
|
self._shutdown_live()
|
|
308
899
|
|
|
309
900
|
def _print_final_panel_if_needed(self) -> None:
|
|
310
|
-
"""Print final result
|
|
311
|
-
if self.
|
|
312
|
-
|
|
313
|
-
|
|
314
|
-
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
901
|
+
"""Print final result when configuration requires it."""
|
|
902
|
+
if self.state.printed_final_output:
|
|
903
|
+
return
|
|
904
|
+
|
|
905
|
+
body = (self.state.final_text or "".join(self.state.buffer) or "").strip()
|
|
906
|
+
if not body:
|
|
907
|
+
return
|
|
908
|
+
|
|
909
|
+
if getattr(self, "_transcript_mode_enabled", False):
|
|
910
|
+
return
|
|
911
|
+
|
|
912
|
+
if self.verbose:
|
|
913
|
+
final_panel = create_final_panel(
|
|
914
|
+
body,
|
|
915
|
+
title=self._final_panel_title(),
|
|
916
|
+
theme=self.cfg.theme,
|
|
917
|
+
)
|
|
918
|
+
self.console.print(final_panel)
|
|
919
|
+
self.state.printed_final_output = True
|
|
321
920
|
|
|
322
921
|
def on_complete(self, stats: RunStats) -> None:
|
|
323
922
|
"""Handle completion event."""
|
|
324
923
|
self.state.finalizing_ui = True
|
|
325
924
|
|
|
326
|
-
|
|
925
|
+
self._handle_stats_duration(stats)
|
|
926
|
+
self._close_active_thinking_scopes(self.state.final_duration_seconds)
|
|
927
|
+
self._cleanup_ui_elements()
|
|
928
|
+
self._finalize_display()
|
|
929
|
+
self._print_completion_message()
|
|
930
|
+
|
|
931
|
+
def _handle_stats_duration(self, stats: RunStats) -> None:
|
|
932
|
+
"""Handle stats processing and duration calculation."""
|
|
933
|
+
if not isinstance(stats, RunStats):
|
|
934
|
+
return
|
|
935
|
+
|
|
936
|
+
duration = None
|
|
937
|
+
try:
|
|
938
|
+
if stats.finished_at is not None and stats.started_at is not None:
|
|
939
|
+
duration = max(0.0, float(stats.finished_at) - float(stats.started_at))
|
|
940
|
+
except Exception:
|
|
327
941
|
duration = None
|
|
328
|
-
try:
|
|
329
|
-
if stats.finished_at is not None and stats.started_at is not None:
|
|
330
|
-
duration = max(
|
|
331
|
-
0.0, float(stats.finished_at) - float(stats.started_at)
|
|
332
|
-
)
|
|
333
|
-
except Exception:
|
|
334
|
-
duration = None
|
|
335
942
|
|
|
336
|
-
|
|
337
|
-
|
|
943
|
+
if duration is not None:
|
|
944
|
+
self._update_final_duration(duration, overwrite=True)
|
|
338
945
|
|
|
946
|
+
def _cleanup_ui_elements(self) -> None:
|
|
947
|
+
"""Clean up running UI elements."""
|
|
339
948
|
# Mark any running steps as finished to avoid lingering spinners
|
|
340
949
|
self._finish_running_steps()
|
|
341
950
|
|
|
342
951
|
# Mark unfinished tool panels as finished
|
|
343
952
|
self._finish_tool_panels()
|
|
344
953
|
|
|
954
|
+
def _finalize_display(self) -> None:
|
|
955
|
+
"""Finalize live display and render final output."""
|
|
345
956
|
# Final refresh
|
|
346
957
|
self._ensure_live()
|
|
347
958
|
|
|
348
959
|
# Stop live display
|
|
349
960
|
self._stop_live_display()
|
|
350
961
|
|
|
351
|
-
#
|
|
962
|
+
# Render final output based on configuration
|
|
352
963
|
self._print_final_panel_if_needed()
|
|
353
964
|
|
|
965
|
+
def _print_completion_message(self) -> None:
|
|
966
|
+
"""Print completion message based on current mode."""
|
|
967
|
+
if self._transcript_mode_enabled:
|
|
968
|
+
try:
|
|
969
|
+
self.console.print(
|
|
970
|
+
"[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events. Use the post-run viewer for export.[/dim]"
|
|
971
|
+
)
|
|
972
|
+
except Exception:
|
|
973
|
+
pass
|
|
974
|
+
else:
|
|
975
|
+
# No transcript toggle in summary mode; nothing to print here.
|
|
976
|
+
return
|
|
977
|
+
|
|
354
978
|
def _ensure_live(self) -> None:
|
|
355
979
|
"""Ensure live display is updated."""
|
|
980
|
+
if getattr(self, "_transcript_mode_enabled", False):
|
|
981
|
+
return
|
|
356
982
|
if not self._ensure_live_stack():
|
|
357
983
|
return
|
|
358
984
|
|
|
@@ -360,6 +986,12 @@ class RichStreamRenderer:
|
|
|
360
986
|
|
|
361
987
|
if self.live:
|
|
362
988
|
self._refresh_live_panels()
|
|
989
|
+
if (
|
|
990
|
+
not self._transcript_mode_enabled
|
|
991
|
+
and not self.state.finalizing_ui
|
|
992
|
+
and not self._summary_hint_printed_once
|
|
993
|
+
):
|
|
994
|
+
self._print_summary_hint(force=True)
|
|
363
995
|
|
|
364
996
|
def _ensure_live_stack(self) -> bool:
|
|
365
997
|
"""Guarantee the console exposes the internal live stack Rich expects."""
|
|
@@ -406,8 +1038,7 @@ class RichStreamRenderer:
|
|
|
406
1038
|
title="Steps",
|
|
407
1039
|
border_style="blue",
|
|
408
1040
|
)
|
|
409
|
-
|
|
410
|
-
panels = self._build_live_panels(main_panel, steps_panel, tool_panels)
|
|
1041
|
+
panels = self._build_live_panels(main_panel, steps_panel)
|
|
411
1042
|
|
|
412
1043
|
self.live.update(Group(*panels))
|
|
413
1044
|
|
|
@@ -415,17 +1046,12 @@ class RichStreamRenderer:
|
|
|
415
1046
|
self,
|
|
416
1047
|
main_panel: Any,
|
|
417
1048
|
steps_panel: Any,
|
|
418
|
-
tool_panels: list[Any],
|
|
419
1049
|
) -> list[Any]:
|
|
420
1050
|
"""Assemble the panel order for the live display."""
|
|
421
1051
|
if self.verbose:
|
|
422
|
-
return [main_panel, steps_panel
|
|
1052
|
+
return [main_panel, steps_panel]
|
|
423
1053
|
|
|
424
|
-
|
|
425
|
-
if tool_panels:
|
|
426
|
-
panels.extend(tool_panels)
|
|
427
|
-
panels.append(main_panel)
|
|
428
|
-
return panels
|
|
1054
|
+
return [steps_panel, main_panel]
|
|
429
1055
|
|
|
430
1056
|
def _render_main_panel(self) -> Any:
|
|
431
1057
|
"""Render the main content panel."""
|
|
@@ -469,12 +1095,166 @@ class RichStreamRenderer:
|
|
|
469
1095
|
if self.cfg.live:
|
|
470
1096
|
self._ensure_live()
|
|
471
1097
|
|
|
472
|
-
|
|
473
|
-
|
|
1098
|
+
# ------------------------------------------------------------------
|
|
1099
|
+
# Transcript helpers
|
|
1100
|
+
# ------------------------------------------------------------------
|
|
1101
|
+
@property
|
|
1102
|
+
def transcript_mode_enabled(self) -> bool:
|
|
1103
|
+
"""Return True when transcript mode is currently active."""
|
|
1104
|
+
return self._transcript_mode_enabled
|
|
1105
|
+
|
|
1106
|
+
def toggle_transcript_mode(self) -> None:
|
|
1107
|
+
"""Flip transcript mode on/off."""
|
|
1108
|
+
self.set_transcript_mode(not self._transcript_mode_enabled)
|
|
1109
|
+
|
|
1110
|
+
def set_transcript_mode(self, enabled: bool) -> None:
|
|
1111
|
+
"""Set transcript mode explicitly."""
|
|
1112
|
+
if enabled == self._transcript_mode_enabled:
|
|
1113
|
+
return
|
|
1114
|
+
|
|
1115
|
+
self._transcript_mode_enabled = enabled
|
|
1116
|
+
self.apply_verbosity(enabled)
|
|
1117
|
+
|
|
1118
|
+
if enabled:
|
|
1119
|
+
self._summary_hint_printed_once = False
|
|
1120
|
+
self._transcript_hint_printed_once = False
|
|
1121
|
+
self._transcript_header_printed = False
|
|
1122
|
+
self._transcript_enabled_message_printed = False
|
|
1123
|
+
self._stop_live_display()
|
|
1124
|
+
self._clear_console_safe()
|
|
1125
|
+
self._print_transcript_enabled_message()
|
|
1126
|
+
self._render_transcript_backfill()
|
|
1127
|
+
else:
|
|
1128
|
+
self._transcript_hint_printed_once = False
|
|
1129
|
+
self._transcript_header_printed = False
|
|
1130
|
+
self._transcript_enabled_message_printed = False
|
|
1131
|
+
self._clear_console_safe()
|
|
1132
|
+
self._render_summary_static_sections()
|
|
1133
|
+
summary_notice = (
|
|
1134
|
+
"[dim]Returning to the summary view. Streaming will continue here.[/dim]"
|
|
1135
|
+
if not self.state.finalizing_ui
|
|
1136
|
+
else "[dim]Returning to the summary view.[/dim]"
|
|
1137
|
+
)
|
|
1138
|
+
self.console.print(summary_notice)
|
|
1139
|
+
if self.live:
|
|
1140
|
+
self._refresh_live_panels()
|
|
1141
|
+
else:
|
|
1142
|
+
steps_renderable = self._render_steps_text()
|
|
1143
|
+
steps_panel = AIPPanel(
|
|
1144
|
+
steps_renderable,
|
|
1145
|
+
title="Steps",
|
|
1146
|
+
border_style="blue",
|
|
1147
|
+
)
|
|
1148
|
+
self.console.print(steps_panel)
|
|
1149
|
+
self.console.print(self._render_main_panel())
|
|
1150
|
+
if not self.state.finalizing_ui:
|
|
1151
|
+
self._print_summary_hint(force=True)
|
|
1152
|
+
|
|
1153
|
+
def _clear_console_safe(self) -> None:
|
|
1154
|
+
"""Best-effort console clear that ignores platform quirks."""
|
|
1155
|
+
try:
|
|
1156
|
+
self.console.clear()
|
|
1157
|
+
except Exception:
|
|
1158
|
+
pass
|
|
1159
|
+
|
|
1160
|
+
def _print_transcript_hint(self) -> None:
|
|
1161
|
+
"""Render the transcript toggle hint, keeping it near the bottom."""
|
|
1162
|
+
if not self._transcript_mode_enabled:
|
|
1163
|
+
return
|
|
1164
|
+
try:
|
|
1165
|
+
self.console.print(self._transcript_hint_message)
|
|
1166
|
+
except Exception:
|
|
1167
|
+
pass
|
|
1168
|
+
else:
|
|
1169
|
+
self._transcript_hint_printed_once = True
|
|
1170
|
+
|
|
1171
|
+
def _print_transcript_enabled_message(self) -> None:
|
|
1172
|
+
if self._transcript_enabled_message_printed:
|
|
1173
|
+
return
|
|
1174
|
+
try:
|
|
1175
|
+
self.console.print(
|
|
1176
|
+
"[dim]Transcript mode enabled — streaming raw transcript events.[/dim]"
|
|
1177
|
+
)
|
|
1178
|
+
except Exception:
|
|
1179
|
+
pass
|
|
1180
|
+
else:
|
|
1181
|
+
self._transcript_enabled_message_printed = True
|
|
1182
|
+
|
|
1183
|
+
def _ensure_transcript_header(self) -> None:
|
|
1184
|
+
if self._transcript_header_printed:
|
|
1185
|
+
return
|
|
1186
|
+
try:
|
|
1187
|
+
self.console.rule("Transcript Events")
|
|
1188
|
+
except Exception:
|
|
1189
|
+
self._transcript_header_printed = True
|
|
1190
|
+
return
|
|
1191
|
+
self._transcript_header_printed = True
|
|
1192
|
+
|
|
1193
|
+
def _print_summary_hint(self, force: bool = False) -> None:
|
|
1194
|
+
"""Show the summary-mode toggle hint."""
|
|
1195
|
+
controller = getattr(self, "transcript_controller", None)
|
|
1196
|
+
if controller and not getattr(controller, "enabled", False):
|
|
1197
|
+
if not force:
|
|
1198
|
+
self._summary_hint_printed_once = True
|
|
1199
|
+
return
|
|
1200
|
+
if not force and self._summary_hint_printed_once:
|
|
1201
|
+
return
|
|
1202
|
+
try:
|
|
1203
|
+
self.console.print(self._summary_hint_message)
|
|
1204
|
+
except Exception:
|
|
1205
|
+
return
|
|
1206
|
+
self._summary_hint_printed_once = True
|
|
1207
|
+
|
|
1208
|
+
def _render_transcript_backfill(self) -> None:
|
|
1209
|
+
"""Render any captured events that haven't been shown in transcript mode."""
|
|
1210
|
+
pending = self.state.events[self._transcript_render_cursor :]
|
|
1211
|
+
self._ensure_transcript_header()
|
|
1212
|
+
if not pending:
|
|
1213
|
+
self._print_transcript_hint()
|
|
1214
|
+
return
|
|
1215
|
+
|
|
1216
|
+
baseline = self.state.streaming_started_event_ts
|
|
1217
|
+
for ev in pending:
|
|
1218
|
+
received_ts = _coerce_received_at(ev.get("received_at"))
|
|
1219
|
+
render_debug_event(
|
|
1220
|
+
ev,
|
|
1221
|
+
self.console,
|
|
1222
|
+
received_ts=received_ts,
|
|
1223
|
+
baseline_ts=baseline,
|
|
1224
|
+
)
|
|
1225
|
+
|
|
1226
|
+
self._transcript_render_cursor = len(self.state.events)
|
|
1227
|
+
self._print_transcript_hint()
|
|
1228
|
+
|
|
1229
|
+
def _capture_event(
|
|
1230
|
+
self, ev: dict[str, Any], received_at: datetime | None = None
|
|
474
1231
|
) -> None:
|
|
475
|
-
"""
|
|
476
|
-
|
|
477
|
-
|
|
1232
|
+
"""Capture a deep copy of SSE events for transcript replay."""
|
|
1233
|
+
try:
|
|
1234
|
+
captured = json.loads(json.dumps(ev))
|
|
1235
|
+
except Exception:
|
|
1236
|
+
captured = ev
|
|
1237
|
+
|
|
1238
|
+
if received_at is not None:
|
|
1239
|
+
try:
|
|
1240
|
+
captured["received_at"] = received_at.isoformat()
|
|
1241
|
+
except Exception:
|
|
1242
|
+
try:
|
|
1243
|
+
captured["received_at"] = str(received_at)
|
|
1244
|
+
except Exception:
|
|
1245
|
+
captured["received_at"] = repr(received_at)
|
|
1246
|
+
|
|
1247
|
+
self.state.events.append(captured)
|
|
1248
|
+
if self._transcript_mode_enabled:
|
|
1249
|
+
self._transcript_render_cursor = len(self.state.events)
|
|
1250
|
+
|
|
1251
|
+
def get_aggregated_output(self) -> str:
|
|
1252
|
+
"""Return the concatenated assistant output collected so far."""
|
|
1253
|
+
return ("".join(self.state.buffer or [])).strip()
|
|
1254
|
+
|
|
1255
|
+
def get_transcript_events(self) -> list[dict[str, Any]]:
|
|
1256
|
+
"""Return captured SSE events."""
|
|
1257
|
+
return list(self.state.events)
|
|
478
1258
|
|
|
479
1259
|
def _ensure_tool_panel(
|
|
480
1260
|
self, name: str, args: Any, task_id: str, context_id: str
|
|
@@ -506,7 +1286,6 @@ class RichStreamRenderer:
|
|
|
506
1286
|
except Exception:
|
|
507
1287
|
args_content = f"**Args:**\n{args}\n\n"
|
|
508
1288
|
self.tool_panels[tool_sid]["chunks"].append(args_content)
|
|
509
|
-
self.tool_order.append(tool_sid)
|
|
510
1289
|
|
|
511
1290
|
return tool_sid
|
|
512
1291
|
|
|
@@ -517,8 +1296,13 @@ class RichStreamRenderer:
|
|
|
517
1296
|
tool_name: str,
|
|
518
1297
|
tool_args: Any,
|
|
519
1298
|
_tool_sid: str,
|
|
1299
|
+
*,
|
|
1300
|
+
tracked_step: Step | None = None,
|
|
520
1301
|
) -> Step | None:
|
|
521
1302
|
"""Start or get a step for a tool."""
|
|
1303
|
+
if tracked_step is not None:
|
|
1304
|
+
return tracked_step
|
|
1305
|
+
|
|
522
1306
|
if is_delegation_tool(tool_name):
|
|
523
1307
|
st = self.steps.start_or_get(
|
|
524
1308
|
task_id=task_id,
|
|
@@ -736,8 +1520,13 @@ class RichStreamRenderer:
|
|
|
736
1520
|
finished_tool_output: Any,
|
|
737
1521
|
task_id: str,
|
|
738
1522
|
context_id: str,
|
|
1523
|
+
*,
|
|
1524
|
+
tracked_step: Step | None = None,
|
|
739
1525
|
) -> None:
|
|
740
1526
|
"""Finish the corresponding step for a completed tool."""
|
|
1527
|
+
if tracked_step is not None:
|
|
1528
|
+
return
|
|
1529
|
+
|
|
741
1530
|
step_duration = self._get_step_duration(finished_tool_name, task_id, context_id)
|
|
742
1531
|
|
|
743
1532
|
if is_delegation_tool(finished_tool_name):
|
|
@@ -852,11 +1641,13 @@ class RichStreamRenderer:
|
|
|
852
1641
|
tool_args: Any,
|
|
853
1642
|
_tool_out: Any,
|
|
854
1643
|
tool_calls_info: list[tuple[str, Any, Any]],
|
|
1644
|
+
*,
|
|
1645
|
+
tracked_step: Step | None = None,
|
|
855
1646
|
) -> None:
|
|
856
1647
|
"""Handle agent step event."""
|
|
857
1648
|
metadata = event.get("metadata", {})
|
|
858
|
-
task_id = event.get("task_id")
|
|
859
|
-
context_id = event.get("context_id")
|
|
1649
|
+
task_id = event.get("task_id") or metadata.get("task_id")
|
|
1650
|
+
context_id = event.get("context_id") or metadata.get("context_id")
|
|
860
1651
|
content = event.get("content", "")
|
|
861
1652
|
|
|
862
1653
|
# Create steps and panels for the primary tool
|
|
@@ -864,7 +1655,14 @@ class RichStreamRenderer:
|
|
|
864
1655
|
tool_sid = self._ensure_tool_panel(
|
|
865
1656
|
tool_name, tool_args, task_id, context_id
|
|
866
1657
|
)
|
|
867
|
-
self._start_tool_step(
|
|
1658
|
+
self._start_tool_step(
|
|
1659
|
+
task_id,
|
|
1660
|
+
context_id,
|
|
1661
|
+
tool_name,
|
|
1662
|
+
tool_args,
|
|
1663
|
+
tool_sid,
|
|
1664
|
+
tracked_step=tracked_step,
|
|
1665
|
+
)
|
|
868
1666
|
|
|
869
1667
|
# Handle additional tool calls
|
|
870
1668
|
self._process_additional_tool_calls(
|
|
@@ -883,7 +1681,11 @@ class RichStreamRenderer:
|
|
|
883
1681
|
finished_tool_name, finished_tool_output, task_id, context_id
|
|
884
1682
|
)
|
|
885
1683
|
self._finish_tool_step(
|
|
886
|
-
finished_tool_name,
|
|
1684
|
+
finished_tool_name,
|
|
1685
|
+
finished_tool_output,
|
|
1686
|
+
task_id,
|
|
1687
|
+
context_id,
|
|
1688
|
+
tracked_step=tracked_step,
|
|
887
1689
|
)
|
|
888
1690
|
self._create_tool_snapshot(finished_tool_name, task_id, context_id)
|
|
889
1691
|
|
|
@@ -993,30 +1795,45 @@ class RichStreamRenderer:
|
|
|
993
1795
|
def _get_step_icon(self, step_kind: str) -> str:
|
|
994
1796
|
"""Get icon for step kind."""
|
|
995
1797
|
if step_kind == "tool":
|
|
996
|
-
return
|
|
1798
|
+
return ICON_TOOL_STEP
|
|
997
1799
|
elif step_kind == "delegate":
|
|
998
|
-
return
|
|
1800
|
+
return ICON_DELEGATE
|
|
999
1801
|
elif step_kind == "agent":
|
|
1000
|
-
return
|
|
1802
|
+
return ICON_AGENT_STEP
|
|
1001
1803
|
return ""
|
|
1002
1804
|
|
|
1003
1805
|
def _format_step_status(self, step: Step) -> str:
|
|
1004
1806
|
"""Format step status with elapsed time or duration."""
|
|
1005
1807
|
if is_step_finished(step):
|
|
1006
|
-
|
|
1007
|
-
return LESS_THAN_1MS
|
|
1008
|
-
elif step.duration_ms >= 1000:
|
|
1009
|
-
return f"[{step.duration_ms / 1000:.2f}s]"
|
|
1010
|
-
elif step.duration_ms > 0:
|
|
1011
|
-
return f"[{step.duration_ms}ms]"
|
|
1012
|
-
return LESS_THAN_1MS
|
|
1808
|
+
return self._format_finished_badge(step)
|
|
1013
1809
|
else:
|
|
1014
1810
|
# Calculate elapsed time for running steps
|
|
1015
1811
|
elapsed = self._calculate_step_elapsed_time(step)
|
|
1016
|
-
if elapsed >= 1:
|
|
1812
|
+
if elapsed >= 0.1:
|
|
1017
1813
|
return f"[{elapsed:.2f}s]"
|
|
1018
|
-
ms = int(elapsed * 1000)
|
|
1019
|
-
|
|
1814
|
+
ms = int(round(elapsed * 1000))
|
|
1815
|
+
if ms <= 0:
|
|
1816
|
+
return ""
|
|
1817
|
+
return f"[{ms}ms]"
|
|
1818
|
+
|
|
1819
|
+
def _format_finished_badge(self, step: Step) -> str:
|
|
1820
|
+
"""Compose duration badge for finished steps including source tagging."""
|
|
1821
|
+
if getattr(step, "duration_unknown", False) is True:
|
|
1822
|
+
payload = "??s"
|
|
1823
|
+
else:
|
|
1824
|
+
duration_ms = step.duration_ms
|
|
1825
|
+
if duration_ms is None:
|
|
1826
|
+
payload = "<1ms"
|
|
1827
|
+
elif duration_ms < 0:
|
|
1828
|
+
payload = "<1ms"
|
|
1829
|
+
elif duration_ms >= 100:
|
|
1830
|
+
payload = f"{duration_ms / 1000:.2f}s"
|
|
1831
|
+
elif duration_ms > 0:
|
|
1832
|
+
payload = f"{duration_ms}ms"
|
|
1833
|
+
else:
|
|
1834
|
+
payload = "<1ms"
|
|
1835
|
+
|
|
1836
|
+
return f"[{payload}]"
|
|
1020
1837
|
|
|
1021
1838
|
def _calculate_step_elapsed_time(self, step: Step) -> float:
|
|
1022
1839
|
"""Calculate elapsed time for a running step."""
|
|
@@ -1039,6 +1856,21 @@ class RichStreamRenderer:
|
|
|
1039
1856
|
return step.name
|
|
1040
1857
|
return "thinking..." if step.kind == "agent" else f"{step.kind} step"
|
|
1041
1858
|
|
|
1859
|
+
def _resolve_step_label(self, step: Step) -> str:
|
|
1860
|
+
"""Return the display label for a step with sensible fallbacks."""
|
|
1861
|
+
raw_label = getattr(step, "display_label", None)
|
|
1862
|
+
label = raw_label.strip() if isinstance(raw_label, str) else ""
|
|
1863
|
+
if label:
|
|
1864
|
+
return normalise_display_label(label)
|
|
1865
|
+
|
|
1866
|
+
if not (step.name or "").strip():
|
|
1867
|
+
return "Unknown step detail"
|
|
1868
|
+
|
|
1869
|
+
icon = self._get_step_icon(step.kind)
|
|
1870
|
+
base_name = self._get_step_display_name(step)
|
|
1871
|
+
fallback = " ".join(part for part in (icon, base_name) if part).strip()
|
|
1872
|
+
return normalise_display_label(fallback)
|
|
1873
|
+
|
|
1042
1874
|
def _check_parallel_tools(self) -> dict[tuple[str | None, str | None], list]:
|
|
1043
1875
|
"""Check for parallel running tools."""
|
|
1044
1876
|
running_by_ctx: dict[tuple[str | None, str | None], list] = {}
|
|
@@ -1049,43 +1881,363 @@ class RichStreamRenderer:
|
|
|
1049
1881
|
running_by_ctx.setdefault(key, []).append(st)
|
|
1050
1882
|
return running_by_ctx
|
|
1051
1883
|
|
|
1052
|
-
def
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
|
|
1884
|
+
def _is_parallel_tool(
|
|
1885
|
+
self,
|
|
1886
|
+
step: Step,
|
|
1887
|
+
running_by_ctx: dict[tuple[str | None, str | None], list],
|
|
1888
|
+
) -> bool:
|
|
1889
|
+
"""Return True if multiple tools are running in the same context."""
|
|
1890
|
+
key = (step.task_id, step.context_id)
|
|
1891
|
+
return len(running_by_ctx.get(key, [])) > 1
|
|
1892
|
+
|
|
1893
|
+
def _compose_step_renderable(
|
|
1894
|
+
self,
|
|
1895
|
+
step: Step,
|
|
1896
|
+
branch_state: tuple[bool, ...],
|
|
1897
|
+
) -> Any:
|
|
1898
|
+
"""Compose a single renderable for the hierarchical steps panel."""
|
|
1899
|
+
prefix = build_connector_prefix(branch_state)
|
|
1900
|
+
text_line = self._build_step_text_line(step, prefix)
|
|
1901
|
+
renderables = self._wrap_step_text(step, text_line)
|
|
1056
1902
|
|
|
1057
|
-
|
|
1058
|
-
|
|
1903
|
+
args_renderable = self._build_args_renderable(step, prefix)
|
|
1904
|
+
if args_renderable is not None:
|
|
1905
|
+
renderables.append(args_renderable)
|
|
1059
1906
|
|
|
1060
|
-
|
|
1061
|
-
st = self.steps.by_id[sid]
|
|
1062
|
-
status_br = self._format_step_status(st)
|
|
1063
|
-
display_name = self._get_step_display_name(st)
|
|
1064
|
-
tail = " ✓" if is_step_finished(st) else ""
|
|
1907
|
+
return self._collapse_renderables(renderables)
|
|
1065
1908
|
|
|
1066
|
-
|
|
1067
|
-
|
|
1068
|
-
|
|
1069
|
-
|
|
1070
|
-
|
|
1909
|
+
def _build_step_text_line(
|
|
1910
|
+
self,
|
|
1911
|
+
step: Step,
|
|
1912
|
+
prefix: str,
|
|
1913
|
+
) -> Text:
|
|
1914
|
+
"""Create the textual portion of a step renderable."""
|
|
1915
|
+
text_line = Text()
|
|
1916
|
+
text_line.append(prefix, style="dim")
|
|
1917
|
+
text_line.append(self._resolve_step_label(step))
|
|
1918
|
+
|
|
1919
|
+
status_badge = self._format_step_status(step)
|
|
1920
|
+
self._append_status_badge(text_line, step, status_badge)
|
|
1921
|
+
self._append_state_glyph(text_line, step)
|
|
1922
|
+
return text_line
|
|
1923
|
+
|
|
1924
|
+
def _append_status_badge(
|
|
1925
|
+
self, text_line: Text, step: Step, status_badge: str
|
|
1926
|
+
) -> None:
|
|
1927
|
+
"""Append the formatted status badge when available."""
|
|
1928
|
+
glyph_key = getattr(step, "status_icon", None)
|
|
1929
|
+
glyph = glyph_for_status(glyph_key)
|
|
1930
|
+
|
|
1931
|
+
if status_badge:
|
|
1932
|
+
text_line.append(" ")
|
|
1933
|
+
text_line.append(status_badge, style="cyan")
|
|
1934
|
+
|
|
1935
|
+
if glyph:
|
|
1936
|
+
text_line.append(" ")
|
|
1937
|
+
style = self._status_icon_style(glyph_key)
|
|
1938
|
+
if style:
|
|
1939
|
+
text_line.append(glyph, style=style)
|
|
1940
|
+
else:
|
|
1941
|
+
text_line.append(glyph)
|
|
1942
|
+
|
|
1943
|
+
def _append_state_glyph(self, text_line: Text, step: Step) -> None:
|
|
1944
|
+
"""Append glyph/failure markers in a single place."""
|
|
1945
|
+
failure_reason = (step.failure_reason or "").strip()
|
|
1946
|
+
if failure_reason:
|
|
1947
|
+
text_line.append(f" {failure_reason}")
|
|
1948
|
+
|
|
1949
|
+
@staticmethod
|
|
1950
|
+
def _status_icon_style(icon_key: str | None) -> str | None:
|
|
1951
|
+
"""Return style for a given status icon."""
|
|
1952
|
+
if not icon_key:
|
|
1953
|
+
return None
|
|
1954
|
+
return STATUS_ICON_STYLES.get(icon_key)
|
|
1955
|
+
|
|
1956
|
+
def _wrap_step_text(self, step: Step, text_line: Text) -> list[Any]:
|
|
1957
|
+
"""Return the base text, optionally decorated with a trailing spinner."""
|
|
1958
|
+
if getattr(step, "status", None) == "running":
|
|
1959
|
+
spinner = self._step_spinners.get(step.step_id)
|
|
1960
|
+
if spinner is None:
|
|
1961
|
+
spinner = Spinner("dots", style="dim")
|
|
1962
|
+
self._step_spinners[step.step_id] = spinner
|
|
1963
|
+
return [TrailingSpinnerLine(text_line, spinner)]
|
|
1964
|
+
|
|
1965
|
+
self._step_spinners.pop(step.step_id, None)
|
|
1966
|
+
return [text_line]
|
|
1967
|
+
|
|
1968
|
+
def _collapse_renderables(self, renderables: list[Any]) -> Any:
|
|
1969
|
+
"""Collapse a list of renderables into a single object."""
|
|
1970
|
+
if not renderables:
|
|
1971
|
+
return None
|
|
1071
1972
|
|
|
1072
|
-
|
|
1073
|
-
|
|
1973
|
+
if len(renderables) == 1:
|
|
1974
|
+
return renderables[0]
|
|
1074
1975
|
|
|
1075
|
-
return
|
|
1976
|
+
return Group(*renderables)
|
|
1076
1977
|
|
|
1077
|
-
def
|
|
1078
|
-
"""
|
|
1079
|
-
if
|
|
1080
|
-
return
|
|
1978
|
+
def _build_args_renderable(self, step: Step, prefix: str) -> Text | Group | None:
|
|
1979
|
+
"""Build a dimmed argument line for tool or agent steps."""
|
|
1980
|
+
if step.kind not in {"tool", "delegate", "agent"}:
|
|
1981
|
+
return None
|
|
1982
|
+
if step.kind == "agent" and step.parent_id:
|
|
1983
|
+
return None
|
|
1984
|
+
formatted_args = self._format_step_args(step)
|
|
1985
|
+
if not formatted_args:
|
|
1986
|
+
return None
|
|
1987
|
+
if isinstance(formatted_args, list):
|
|
1988
|
+
return self._build_arg_list(prefix, formatted_args)
|
|
1989
|
+
|
|
1990
|
+
args_text = Text()
|
|
1991
|
+
args_text.append(prefix, style="dim")
|
|
1992
|
+
args_text.append(" " * 5)
|
|
1993
|
+
args_text.append(formatted_args, style="dim")
|
|
1994
|
+
return args_text
|
|
1995
|
+
|
|
1996
|
+
def _build_arg_list(
|
|
1997
|
+
self, prefix: str, formatted_args: list[str | tuple[int, str]]
|
|
1998
|
+
) -> Group | None:
|
|
1999
|
+
"""Render multi-line argument entries preserving indentation."""
|
|
2000
|
+
arg_lines: list[Text] = []
|
|
2001
|
+
for indent_level, text_value in self._iter_arg_entries(formatted_args):
|
|
2002
|
+
arg_text = Text()
|
|
2003
|
+
arg_text.append(prefix, style="dim")
|
|
2004
|
+
arg_text.append(" " * 5)
|
|
2005
|
+
arg_text.append(" " * (indent_level * 2))
|
|
2006
|
+
arg_text.append(text_value, style="dim")
|
|
2007
|
+
arg_lines.append(arg_text)
|
|
2008
|
+
if not arg_lines:
|
|
2009
|
+
return None
|
|
2010
|
+
return Group(*arg_lines)
|
|
2011
|
+
|
|
2012
|
+
@staticmethod
|
|
2013
|
+
def _iter_arg_entries(
|
|
2014
|
+
formatted_args: list[str | tuple[int, str]],
|
|
2015
|
+
) -> Iterable[tuple[int, str]]:
|
|
2016
|
+
"""Yield normalized indentation/value pairs for argument entries."""
|
|
2017
|
+
for value in formatted_args:
|
|
2018
|
+
if isinstance(value, tuple) and len(value) == 2:
|
|
2019
|
+
indent_level, text_value = value
|
|
2020
|
+
yield indent_level, str(text_value)
|
|
2021
|
+
else:
|
|
2022
|
+
yield 0, str(value)
|
|
2023
|
+
|
|
2024
|
+
def _format_step_args(
|
|
2025
|
+
self, step: Step
|
|
2026
|
+
) -> str | list[str] | list[tuple[int, str]] | None:
|
|
2027
|
+
"""Return a printable representation of tool arguments."""
|
|
2028
|
+
args = getattr(step, "args", None)
|
|
2029
|
+
if args is None:
|
|
2030
|
+
return None
|
|
2031
|
+
|
|
2032
|
+
if isinstance(args, dict):
|
|
2033
|
+
return self._format_dict_args(args, step=step)
|
|
2034
|
+
|
|
2035
|
+
if isinstance(args, (list, tuple)):
|
|
2036
|
+
return self._safe_pretty_args(list(args))
|
|
2037
|
+
|
|
2038
|
+
if isinstance(args, (str, int, float)):
|
|
2039
|
+
return self._stringify_args(args)
|
|
2040
|
+
|
|
2041
|
+
return None
|
|
2042
|
+
|
|
2043
|
+
def _format_dict_args(
|
|
2044
|
+
self, args: dict[str, Any], *, step: Step
|
|
2045
|
+
) -> str | list[str] | list[tuple[int, str]] | None:
|
|
2046
|
+
"""Format dictionary arguments with guardrails."""
|
|
2047
|
+
if not args:
|
|
2048
|
+
return None
|
|
2049
|
+
|
|
2050
|
+
masked_args = self._redact_arg_payload(args)
|
|
2051
|
+
|
|
2052
|
+
if self._should_collapse_single_query(step):
|
|
2053
|
+
single_query = self._extract_single_query_arg(masked_args)
|
|
2054
|
+
if single_query:
|
|
2055
|
+
return single_query
|
|
1081
2056
|
|
|
1082
|
-
|
|
2057
|
+
return self._format_dict_arg_lines(masked_args)
|
|
2058
|
+
|
|
2059
|
+
@staticmethod
|
|
2060
|
+
def _extract_single_query_arg(args: dict[str, Any]) -> str | None:
|
|
2061
|
+
"""Return a trimmed query argument when it is the only entry."""
|
|
2062
|
+
if len(args) != 1:
|
|
2063
|
+
return None
|
|
2064
|
+
key, value = next(iter(args.items()))
|
|
2065
|
+
if key != "query" or not isinstance(value, str):
|
|
2066
|
+
return None
|
|
2067
|
+
stripped = value.strip()
|
|
2068
|
+
return stripped or None
|
|
2069
|
+
|
|
2070
|
+
@staticmethod
|
|
2071
|
+
def _redact_arg_payload(args: dict[str, Any]) -> dict[str, Any]:
|
|
2072
|
+
"""Apply best-effort masking before rendering arguments."""
|
|
2073
|
+
try:
|
|
2074
|
+
cleaned = redact_sensitive(args)
|
|
2075
|
+
return cleaned if isinstance(cleaned, dict) else args
|
|
2076
|
+
except Exception:
|
|
2077
|
+
return args
|
|
2078
|
+
|
|
2079
|
+
@staticmethod
|
|
2080
|
+
def _should_collapse_single_query(step: Step) -> bool:
|
|
2081
|
+
"""Return True when we should display raw query text."""
|
|
2082
|
+
if step.kind == "agent":
|
|
2083
|
+
return True
|
|
2084
|
+
if step.kind == "delegate":
|
|
1083
2085
|
return True
|
|
2086
|
+
return False
|
|
1084
2087
|
|
|
1085
|
-
|
|
1086
|
-
|
|
1087
|
-
|
|
1088
|
-
|
|
2088
|
+
def _format_dict_arg_lines(
|
|
2089
|
+
self, args: dict[str, Any]
|
|
2090
|
+
) -> list[tuple[int, str]] | None:
|
|
2091
|
+
"""Render dictionary arguments as nested YAML-style lines."""
|
|
2092
|
+
lines: list[tuple[int, str]] = []
|
|
2093
|
+
for raw_key, value in args.items():
|
|
2094
|
+
key = str(raw_key)
|
|
2095
|
+
lines.extend(self._format_nested_entry(key, value, indent=0))
|
|
2096
|
+
return lines or None
|
|
2097
|
+
|
|
2098
|
+
def _format_nested_entry(
|
|
2099
|
+
self, key: str, value: Any, indent: int
|
|
2100
|
+
) -> list[tuple[int, str]]:
|
|
2101
|
+
"""Format a mapping entry recursively."""
|
|
2102
|
+
lines: list[tuple[int, str]] = []
|
|
2103
|
+
|
|
2104
|
+
if isinstance(value, dict):
|
|
2105
|
+
if value:
|
|
2106
|
+
lines.append((indent, f"{key}:"))
|
|
2107
|
+
lines.extend(self._format_nested_mapping(value, indent + 1))
|
|
2108
|
+
else:
|
|
2109
|
+
lines.append((indent, f"{key}: {{}}"))
|
|
2110
|
+
return lines
|
|
2111
|
+
|
|
2112
|
+
if isinstance(value, (list, tuple, set)):
|
|
2113
|
+
seq_lines = self._format_sequence_entries(list(value), indent + 1)
|
|
2114
|
+
if seq_lines:
|
|
2115
|
+
lines.append((indent, f"{key}:"))
|
|
2116
|
+
lines.extend(seq_lines)
|
|
2117
|
+
else:
|
|
2118
|
+
lines.append((indent, f"{key}: []"))
|
|
2119
|
+
return lines
|
|
2120
|
+
|
|
2121
|
+
formatted_value = self._format_arg_value(value)
|
|
2122
|
+
if formatted_value is not None:
|
|
2123
|
+
lines.append((indent, f"{key}: {formatted_value}"))
|
|
2124
|
+
return lines
|
|
2125
|
+
|
|
2126
|
+
def _format_nested_mapping(
|
|
2127
|
+
self, mapping: dict[str, Any], indent: int
|
|
2128
|
+
) -> list[tuple[int, str]]:
|
|
2129
|
+
"""Format nested dictionary values."""
|
|
2130
|
+
nested_lines: list[tuple[int, str]] = []
|
|
2131
|
+
for raw_key, value in mapping.items():
|
|
2132
|
+
key = str(raw_key)
|
|
2133
|
+
nested_lines.extend(self._format_nested_entry(key, value, indent))
|
|
2134
|
+
return nested_lines
|
|
2135
|
+
|
|
2136
|
+
def _format_sequence_entries(
|
|
2137
|
+
self, sequence: list[Any], indent: int
|
|
2138
|
+
) -> list[tuple[int, str]]:
|
|
2139
|
+
"""Format list/tuple/set values with YAML-style bullets."""
|
|
2140
|
+
if not sequence:
|
|
2141
|
+
return []
|
|
2142
|
+
|
|
2143
|
+
lines: list[tuple[int, str]] = []
|
|
2144
|
+
for item in sequence:
|
|
2145
|
+
lines.extend(self._format_sequence_item(item, indent))
|
|
2146
|
+
return lines
|
|
2147
|
+
|
|
2148
|
+
def _format_sequence_item(self, item: Any, indent: int) -> list[tuple[int, str]]:
|
|
2149
|
+
"""Format a single list entry."""
|
|
2150
|
+
if isinstance(item, dict):
|
|
2151
|
+
return self._format_dict_sequence_item(item, indent)
|
|
2152
|
+
|
|
2153
|
+
if isinstance(item, (list, tuple, set)):
|
|
2154
|
+
return self._format_nested_sequence_item(list(item), indent)
|
|
2155
|
+
|
|
2156
|
+
formatted = self._format_arg_value(item)
|
|
2157
|
+
if formatted is not None:
|
|
2158
|
+
return [(indent, f"- {formatted}")]
|
|
2159
|
+
return []
|
|
2160
|
+
|
|
2161
|
+
def _format_dict_sequence_item(
|
|
2162
|
+
self, mapping: dict[str, Any], indent: int
|
|
2163
|
+
) -> list[tuple[int, str]]:
|
|
2164
|
+
"""Format a dictionary entry within a list."""
|
|
2165
|
+
child_lines = self._format_nested_mapping(mapping, indent + 1)
|
|
2166
|
+
if child_lines:
|
|
2167
|
+
return self._prepend_sequence_prefix(child_lines, indent)
|
|
2168
|
+
return [(indent, "- {}")]
|
|
2169
|
+
|
|
2170
|
+
def _format_nested_sequence_item(
|
|
2171
|
+
self, sequence: list[Any], indent: int
|
|
2172
|
+
) -> list[tuple[int, str]]:
|
|
2173
|
+
"""Format a nested sequence entry within a list."""
|
|
2174
|
+
child_lines = self._format_sequence_entries(sequence, indent + 1)
|
|
2175
|
+
if child_lines:
|
|
2176
|
+
return self._prepend_sequence_prefix(child_lines, indent)
|
|
2177
|
+
return [(indent, "- []")]
|
|
2178
|
+
|
|
2179
|
+
@staticmethod
|
|
2180
|
+
def _prepend_sequence_prefix(
|
|
2181
|
+
child_lines: list[tuple[int, str]], indent: int
|
|
2182
|
+
) -> list[tuple[int, str]]:
|
|
2183
|
+
"""Attach a sequence bullet to the first child line."""
|
|
2184
|
+
_, first_text = child_lines[0]
|
|
2185
|
+
prefixed: list[tuple[int, str]] = [(indent, f"- {first_text}")]
|
|
2186
|
+
prefixed.extend(child_lines[1:])
|
|
2187
|
+
return prefixed
|
|
2188
|
+
|
|
2189
|
+
def _format_arg_value(self, value: Any) -> str | None:
|
|
2190
|
+
"""Format a single argument value with per-value truncation."""
|
|
2191
|
+
if value is None:
|
|
2192
|
+
return "null"
|
|
2193
|
+
if isinstance(value, (bool, int, float)):
|
|
2194
|
+
return json.dumps(value, ensure_ascii=False)
|
|
2195
|
+
if isinstance(value, str):
|
|
2196
|
+
return self._format_string_arg_value(value)
|
|
2197
|
+
return _truncate_display(str(value), limit=ARGS_VALUE_MAX_LEN)
|
|
2198
|
+
|
|
2199
|
+
@staticmethod
|
|
2200
|
+
def _format_string_arg_value(value: str) -> str:
|
|
2201
|
+
"""Return a trimmed, quoted representation of a string argument."""
|
|
2202
|
+
sanitised = value.replace("\n", " ").strip()
|
|
2203
|
+
sanitised = sanitised.replace('"', '\\"')
|
|
2204
|
+
trimmed = _truncate_display(sanitised, limit=ARGS_VALUE_MAX_LEN)
|
|
2205
|
+
return f'"{trimmed}"'
|
|
2206
|
+
|
|
2207
|
+
@staticmethod
|
|
2208
|
+
def _safe_pretty_args(args: dict[str, Any]) -> str | None:
|
|
2209
|
+
"""Defensively format argument dictionaries."""
|
|
2210
|
+
try:
|
|
2211
|
+
return pretty_args(args, max_len=160)
|
|
2212
|
+
except Exception:
|
|
2213
|
+
return str(args)
|
|
2214
|
+
|
|
2215
|
+
@staticmethod
|
|
2216
|
+
def _stringify_args(args: Any) -> str | None:
|
|
2217
|
+
"""Format non-dictionary argument payloads."""
|
|
2218
|
+
text = str(args).strip()
|
|
2219
|
+
if not text:
|
|
2220
|
+
return None
|
|
2221
|
+
return _truncate_display(text)
|
|
2222
|
+
|
|
2223
|
+
def _render_steps_text(self) -> Any:
|
|
2224
|
+
"""Render the steps panel content."""
|
|
2225
|
+
if not (self.steps.order or self.steps.children):
|
|
2226
|
+
return Text("No steps yet", style="dim")
|
|
2227
|
+
|
|
2228
|
+
renderables: list[Any] = []
|
|
2229
|
+
for step_id, branch_state in self.steps.iter_tree():
|
|
2230
|
+
step = self.steps.by_id.get(step_id)
|
|
2231
|
+
if not step:
|
|
2232
|
+
continue
|
|
2233
|
+
renderable = self._compose_step_renderable(step, branch_state)
|
|
2234
|
+
if renderable is not None:
|
|
2235
|
+
renderables.append(renderable)
|
|
2236
|
+
|
|
2237
|
+
if not renderables:
|
|
2238
|
+
return Text("No steps yet", style="dim")
|
|
2239
|
+
|
|
2240
|
+
return Group(*renderables)
|
|
1089
2241
|
|
|
1090
2242
|
def _update_final_duration(
|
|
1091
2243
|
self, duration: float | None, *, overwrite: bool = False
|
|
@@ -1109,20 +2261,7 @@ class RichStreamRenderer:
|
|
|
1109
2261
|
|
|
1110
2262
|
self.state.final_duration_seconds = duration_val
|
|
1111
2263
|
self.state.final_duration_text = self._format_elapsed_time(duration_val)
|
|
1112
|
-
|
|
1113
|
-
def _calculate_elapsed_time(self, meta: dict[str, Any]) -> str:
|
|
1114
|
-
"""Calculate elapsed time string for running tools."""
|
|
1115
|
-
server_elapsed = self.stream_processor.server_elapsed_time
|
|
1116
|
-
server_start = meta.get("server_started_at")
|
|
1117
|
-
|
|
1118
|
-
if isinstance(server_elapsed, int | float) and isinstance(
|
|
1119
|
-
server_start, int | float
|
|
1120
|
-
):
|
|
1121
|
-
elapsed = max(0.0, float(server_elapsed) - float(server_start))
|
|
1122
|
-
else:
|
|
1123
|
-
elapsed = max(0.0, monotonic() - (meta.get("started_at") or 0.0))
|
|
1124
|
-
|
|
1125
|
-
return self._format_elapsed_time(elapsed)
|
|
2264
|
+
self._apply_root_duration(duration_val)
|
|
1126
2265
|
|
|
1127
2266
|
def _format_elapsed_time(self, elapsed: float) -> str:
|
|
1128
2267
|
"""Format elapsed time as a readable string."""
|
|
@@ -1133,85 +2272,6 @@ class RichStreamRenderer:
|
|
|
1133
2272
|
else:
|
|
1134
2273
|
return "<1ms"
|
|
1135
2274
|
|
|
1136
|
-
def _calculate_finished_duration(self, meta: dict[str, Any]) -> str | None:
|
|
1137
|
-
"""Calculate duration string for finished tools."""
|
|
1138
|
-
dur = meta.get("duration_seconds")
|
|
1139
|
-
if isinstance(dur, int | float):
|
|
1140
|
-
return self._format_elapsed_time(dur)
|
|
1141
|
-
|
|
1142
|
-
try:
|
|
1143
|
-
server_now = self.stream_processor.server_elapsed_time
|
|
1144
|
-
server_start = meta.get("server_started_at")
|
|
1145
|
-
if isinstance(server_now, int | float) and isinstance(
|
|
1146
|
-
server_start, int | float
|
|
1147
|
-
):
|
|
1148
|
-
dur = max(0.0, float(server_now) - float(server_start))
|
|
1149
|
-
elif meta.get("started_at") is not None:
|
|
1150
|
-
dur = max(0.0, float(monotonic() - meta.get("started_at")))
|
|
1151
|
-
except Exception:
|
|
1152
|
-
dur = None
|
|
1153
|
-
|
|
1154
|
-
return self._format_elapsed_time(dur) if isinstance(dur, int | float) else None
|
|
1155
|
-
|
|
1156
|
-
def _process_running_tool_panel(
|
|
1157
|
-
self, title: str, meta: dict[str, Any], body: str
|
|
1158
|
-
) -> tuple[str, str]:
|
|
1159
|
-
"""Process a running tool panel."""
|
|
1160
|
-
elapsed_str = self._calculate_elapsed_time(meta)
|
|
1161
|
-
adjusted_title = f"{title} · {elapsed_str}"
|
|
1162
|
-
chip = f"⏱ {elapsed_str}"
|
|
1163
|
-
|
|
1164
|
-
if not body:
|
|
1165
|
-
body = chip
|
|
1166
|
-
else:
|
|
1167
|
-
body = f"{body}\n\n{chip}"
|
|
1168
|
-
|
|
1169
|
-
return adjusted_title, body
|
|
1170
|
-
|
|
1171
|
-
def _process_finished_tool_panel(self, title: str, meta: dict[str, Any]) -> str:
|
|
1172
|
-
"""Process a finished tool panel."""
|
|
1173
|
-
duration_str = self._calculate_finished_duration(meta)
|
|
1174
|
-
return f"{title} · {duration_str}" if duration_str else title
|
|
1175
|
-
|
|
1176
|
-
def _create_tool_panel_for_session(
|
|
1177
|
-
self, sid: str, meta: dict[str, Any]
|
|
1178
|
-
) -> AIPPanel | None:
|
|
1179
|
-
"""Create a single tool panel for the session."""
|
|
1180
|
-
title = meta.get("title") or "Tool"
|
|
1181
|
-
status = meta.get("status") or "running"
|
|
1182
|
-
chunks = meta.get("chunks") or []
|
|
1183
|
-
is_delegation = bool(meta.get("is_delegation"))
|
|
1184
|
-
|
|
1185
|
-
if self._should_skip_finished_panel(sid, status):
|
|
1186
|
-
return None
|
|
1187
|
-
|
|
1188
|
-
body = "".join(chunks)
|
|
1189
|
-
adjusted_title = title
|
|
1190
|
-
|
|
1191
|
-
if status == "running":
|
|
1192
|
-
adjusted_title, body = self._process_running_tool_panel(title, meta, body)
|
|
1193
|
-
elif status == "finished":
|
|
1194
|
-
adjusted_title = self._process_finished_tool_panel(title, meta)
|
|
1195
|
-
|
|
1196
|
-
return create_tool_panel(
|
|
1197
|
-
title=adjusted_title,
|
|
1198
|
-
content=body,
|
|
1199
|
-
status=status,
|
|
1200
|
-
theme=self.cfg.theme,
|
|
1201
|
-
is_delegation=is_delegation,
|
|
1202
|
-
)
|
|
1203
|
-
|
|
1204
|
-
def _render_tool_panels(self) -> list[AIPPanel]:
|
|
1205
|
-
"""Render tool execution output panels."""
|
|
1206
|
-
panels: list[AIPPanel] = []
|
|
1207
|
-
for sid in self.tool_order:
|
|
1208
|
-
meta = self.tool_panels.get(sid) or {}
|
|
1209
|
-
panel = self._create_tool_panel_for_session(sid, meta)
|
|
1210
|
-
if panel:
|
|
1211
|
-
panels.append(panel)
|
|
1212
|
-
|
|
1213
|
-
return panels
|
|
1214
|
-
|
|
1215
2275
|
def _format_dict_or_list_output(self, output_value: dict | list) -> str:
|
|
1216
2276
|
"""Format dict/list output as pretty JSON."""
|
|
1217
2277
|
try:
|