glaip-sdk 0.0.20__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/_version.py +1 -3
- glaip_sdk/branding.py +2 -6
- glaip_sdk/cli/agent_config.py +2 -6
- glaip_sdk/cli/auth.py +11 -30
- glaip_sdk/cli/commands/agents.py +64 -107
- glaip_sdk/cli/commands/configure.py +12 -36
- glaip_sdk/cli/commands/mcps.py +25 -63
- glaip_sdk/cli/commands/models.py +2 -4
- glaip_sdk/cli/commands/tools.py +22 -35
- glaip_sdk/cli/commands/update.py +3 -8
- glaip_sdk/cli/config.py +1 -3
- glaip_sdk/cli/display.py +4 -12
- glaip_sdk/cli/io.py +8 -14
- glaip_sdk/cli/main.py +10 -30
- glaip_sdk/cli/mcp_validators.py +5 -15
- glaip_sdk/cli/pager.py +3 -9
- glaip_sdk/cli/parsers/json_input.py +11 -22
- glaip_sdk/cli/resolution.py +3 -9
- glaip_sdk/cli/rich_helpers.py +1 -3
- glaip_sdk/cli/slash/agent_session.py +5 -10
- glaip_sdk/cli/slash/prompt.py +3 -10
- glaip_sdk/cli/slash/session.py +46 -98
- glaip_sdk/cli/transcript/cache.py +6 -19
- glaip_sdk/cli/transcript/capture.py +6 -20
- glaip_sdk/cli/transcript/launcher.py +1 -3
- glaip_sdk/cli/transcript/viewer.py +187 -46
- glaip_sdk/cli/update_notifier.py +165 -21
- glaip_sdk/cli/utils.py +33 -85
- glaip_sdk/cli/validators.py +11 -12
- glaip_sdk/client/_agent_payloads.py +10 -30
- glaip_sdk/client/agents.py +33 -63
- glaip_sdk/client/base.py +6 -22
- glaip_sdk/client/mcps.py +1 -3
- glaip_sdk/client/run_rendering.py +121 -24
- glaip_sdk/client/tools.py +8 -24
- glaip_sdk/client/validators.py +20 -48
- glaip_sdk/exceptions.py +1 -3
- glaip_sdk/icons.py +9 -3
- glaip_sdk/models.py +14 -33
- glaip_sdk/payload_schemas/agent.py +1 -3
- glaip_sdk/utils/agent_config.py +4 -14
- glaip_sdk/utils/client_utils.py +7 -21
- glaip_sdk/utils/display.py +2 -6
- glaip_sdk/utils/general.py +1 -3
- glaip_sdk/utils/import_export.py +3 -9
- glaip_sdk/utils/rendering/formatting.py +52 -12
- glaip_sdk/utils/rendering/models.py +17 -8
- glaip_sdk/utils/rendering/renderer/__init__.py +1 -5
- glaip_sdk/utils/rendering/renderer/base.py +1107 -320
- glaip_sdk/utils/rendering/renderer/config.py +3 -5
- glaip_sdk/utils/rendering/renderer/debug.py +4 -14
- glaip_sdk/utils/rendering/renderer/panels.py +1 -3
- glaip_sdk/utils/rendering/renderer/progress.py +3 -11
- glaip_sdk/utils/rendering/renderer/stream.py +10 -22
- glaip_sdk/utils/rendering/renderer/toggle.py +182 -0
- glaip_sdk/utils/rendering/step_tree_state.py +100 -0
- glaip_sdk/utils/rendering/steps.py +899 -25
- glaip_sdk/utils/resource_refs.py +4 -13
- glaip_sdk/utils/serialization.py +14 -46
- glaip_sdk/utils/validation.py +4 -4
- {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.1.1.dist-info}/METADATA +12 -1
- glaip_sdk-0.1.1.dist-info/RECORD +82 -0
- glaip_sdk-0.0.20.dist-info/RECORD +0 -80
- {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.1.1.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.0.20.dist-info → glaip_sdk-0.1.1.dist-info}/entry_points.txt +0 -0
|
@@ -8,25 +8,31 @@ from __future__ import annotations
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import logging
|
|
11
|
+
from collections.abc import Iterable
|
|
11
12
|
from dataclasses import dataclass, field
|
|
12
13
|
from datetime import datetime, timezone
|
|
13
14
|
from time import monotonic
|
|
14
15
|
from typing import Any
|
|
15
16
|
|
|
16
|
-
from rich.align import Align
|
|
17
17
|
from rich.console import Console as RichConsole
|
|
18
18
|
from rich.console import Group
|
|
19
19
|
from rich.live import Live
|
|
20
20
|
from rich.markdown import Markdown
|
|
21
|
+
from rich.measure import Measurement
|
|
21
22
|
from rich.spinner import Spinner
|
|
22
23
|
from rich.text import Text
|
|
23
24
|
|
|
24
25
|
from glaip_sdk.icons import ICON_AGENT, ICON_AGENT_STEP, ICON_DELEGATE, ICON_TOOL_STEP
|
|
25
26
|
from glaip_sdk.rich_components import AIPPanel
|
|
26
27
|
from glaip_sdk.utils.rendering.formatting import (
|
|
28
|
+
build_connector_prefix,
|
|
27
29
|
format_main_title,
|
|
28
30
|
get_spinner_char,
|
|
31
|
+
glyph_for_status,
|
|
29
32
|
is_step_finished,
|
|
33
|
+
normalise_display_label,
|
|
34
|
+
pretty_args,
|
|
35
|
+
redact_sensitive,
|
|
30
36
|
)
|
|
31
37
|
from glaip_sdk.utils.rendering.models import RunStats, Step
|
|
32
38
|
from glaip_sdk.utils.rendering.renderer.config import RendererConfig
|
|
@@ -51,6 +57,22 @@ logger = logging.getLogger("glaip_sdk.run_renderer")
|
|
|
51
57
|
|
|
52
58
|
# Constants
|
|
53
59
|
LESS_THAN_1MS = "[<1ms]"
|
|
60
|
+
FINISHED_STATUS_HINTS = {
|
|
61
|
+
"finished",
|
|
62
|
+
"success",
|
|
63
|
+
"succeeded",
|
|
64
|
+
"completed",
|
|
65
|
+
"failed",
|
|
66
|
+
"stopped",
|
|
67
|
+
"error",
|
|
68
|
+
}
|
|
69
|
+
RUNNING_STATUS_HINTS = {"running", "started", "pending", "working"}
|
|
70
|
+
ARGS_VALUE_MAX_LEN = 160
|
|
71
|
+
STATUS_ICON_STYLES = {
|
|
72
|
+
"success": "green",
|
|
73
|
+
"failed": "red",
|
|
74
|
+
"warning": "yellow",
|
|
75
|
+
}
|
|
54
76
|
|
|
55
77
|
|
|
56
78
|
def _coerce_received_at(value: Any) -> datetime | None:
|
|
@@ -72,6 +94,16 @@ def _coerce_received_at(value: Any) -> datetime | None:
|
|
|
72
94
|
return None
|
|
73
95
|
|
|
74
96
|
|
|
97
|
+
def _truncate_display(text: str | None, limit: int = 160) -> str:
|
|
98
|
+
"""Return text capped at the given character limit with ellipsis."""
|
|
99
|
+
if not text:
|
|
100
|
+
return ""
|
|
101
|
+
stripped = str(text).strip()
|
|
102
|
+
if len(stripped) <= limit:
|
|
103
|
+
return stripped
|
|
104
|
+
return stripped[: limit - 1] + "…"
|
|
105
|
+
|
|
106
|
+
|
|
75
107
|
@dataclass
|
|
76
108
|
class RendererState:
|
|
77
109
|
"""Internal state for the renderer."""
|
|
@@ -96,6 +128,43 @@ class RendererState:
|
|
|
96
128
|
self.buffer = []
|
|
97
129
|
|
|
98
130
|
|
|
131
|
+
@dataclass
|
|
132
|
+
class ThinkingScopeState:
|
|
133
|
+
"""Runtime bookkeeping for deterministic thinking spans."""
|
|
134
|
+
|
|
135
|
+
anchor_id: str
|
|
136
|
+
task_id: str | None
|
|
137
|
+
context_id: str | None
|
|
138
|
+
anchor_started_at: float | None = None
|
|
139
|
+
anchor_finished_at: float | None = None
|
|
140
|
+
idle_started_at: float | None = None
|
|
141
|
+
idle_started_monotonic: float | None = None
|
|
142
|
+
active_thinking_id: str | None = None
|
|
143
|
+
running_children: set[str] = field(default_factory=set)
|
|
144
|
+
closed: bool = False
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
class TrailingSpinnerLine:
|
|
148
|
+
"""Render a text line with a trailing animated Rich spinner."""
|
|
149
|
+
|
|
150
|
+
def __init__(self, base_text: Text, spinner: Spinner) -> None:
|
|
151
|
+
"""Initialize spinner line with base text and spinner component."""
|
|
152
|
+
self._base_text = base_text
|
|
153
|
+
self._spinner = spinner
|
|
154
|
+
|
|
155
|
+
def __rich_console__(self, console: RichConsole, options: Any) -> Any:
|
|
156
|
+
"""Render the text with trailing animated spinner."""
|
|
157
|
+
spinner_render = self._spinner.render(console.get_time())
|
|
158
|
+
combined = Text.assemble(self._base_text.copy(), " ", spinner_render)
|
|
159
|
+
yield combined
|
|
160
|
+
|
|
161
|
+
def __rich_measure__(self, console: RichConsole, options: Any) -> Measurement:
|
|
162
|
+
"""Measure the combined text and spinner dimensions."""
|
|
163
|
+
snapshot = self._spinner.render(0)
|
|
164
|
+
combined = Text.assemble(self._base_text.copy(), " ", snapshot)
|
|
165
|
+
return Measurement.get(console, options, combined)
|
|
166
|
+
|
|
167
|
+
|
|
99
168
|
class RichStreamRenderer:
|
|
100
169
|
"""Live, modern terminal renderer for agent execution with rich visual output."""
|
|
101
170
|
|
|
@@ -122,17 +191,18 @@ class RichStreamRenderer:
|
|
|
122
191
|
self.state = RendererState()
|
|
123
192
|
|
|
124
193
|
# Initialize step manager and other state
|
|
125
|
-
self.steps = StepManager()
|
|
194
|
+
self.steps = StepManager(max_steps=self.cfg.summary_max_steps)
|
|
126
195
|
# Live display instance (single source of truth)
|
|
127
196
|
self.live: Live | None = None
|
|
197
|
+
self._step_spinners: dict[str, Spinner] = {}
|
|
128
198
|
|
|
129
|
-
#
|
|
130
|
-
self.context_order: list[str] = []
|
|
131
|
-
self.context_parent: dict[str, str] = {}
|
|
132
|
-
self.tool_order: list[str] = []
|
|
133
|
-
self.context_panels: dict[str, list[str]] = {}
|
|
134
|
-
self.context_meta: dict[str, dict[str, Any]] = {}
|
|
199
|
+
# Tool tracking and thinking scopes
|
|
135
200
|
self.tool_panels: dict[str, dict[str, Any]] = {}
|
|
201
|
+
self._thinking_scopes: dict[str, ThinkingScopeState] = {}
|
|
202
|
+
self._root_agent_friendly: str | None = None
|
|
203
|
+
self._root_agent_step_id: str | None = None
|
|
204
|
+
self._root_query: str | None = None
|
|
205
|
+
self._root_query_attached: bool = False
|
|
136
206
|
|
|
137
207
|
# Timing
|
|
138
208
|
self._started_at: float | None = None
|
|
@@ -145,6 +215,17 @@ class RichStreamRenderer:
|
|
|
145
215
|
# Output formatting constants
|
|
146
216
|
self.OUTPUT_PREFIX: str = "**Output:**\n"
|
|
147
217
|
|
|
218
|
+
# Transcript toggling
|
|
219
|
+
self._transcript_mode_enabled: bool = False
|
|
220
|
+
self._transcript_render_cursor: int = 0
|
|
221
|
+
self.transcript_controller: Any | None = None
|
|
222
|
+
self._transcript_hint_message = "[dim]Transcript view · Press Ctrl+T to return to the summary.[/dim]"
|
|
223
|
+
self._summary_hint_message = "[dim]Press Ctrl+T to inspect raw transcript events.[/dim]"
|
|
224
|
+
self._summary_hint_printed_once: bool = False
|
|
225
|
+
self._transcript_hint_printed_once: bool = False
|
|
226
|
+
self._transcript_header_printed: bool = False
|
|
227
|
+
self._transcript_enabled_message_printed: bool = False
|
|
228
|
+
|
|
148
229
|
def on_start(self, meta: dict[str, Any]) -> None:
|
|
149
230
|
"""Handle renderer start event."""
|
|
150
231
|
if self.cfg.live:
|
|
@@ -158,6 +239,20 @@ class RichStreamRenderer:
|
|
|
158
239
|
except Exception:
|
|
159
240
|
self.state.meta = dict(meta)
|
|
160
241
|
|
|
242
|
+
meta_payload = meta or {}
|
|
243
|
+
self.steps.set_root_agent(meta_payload.get("agent_id"))
|
|
244
|
+
self._root_agent_friendly = self._humanize_agent_slug(meta_payload.get("agent_name"))
|
|
245
|
+
self._root_query = _truncate_display(
|
|
246
|
+
meta_payload.get("input_message")
|
|
247
|
+
or meta_payload.get("query")
|
|
248
|
+
or meta_payload.get("message")
|
|
249
|
+
or (meta_payload.get("meta") or {}).get("input_message")
|
|
250
|
+
or ""
|
|
251
|
+
)
|
|
252
|
+
if not self._root_query:
|
|
253
|
+
self._root_query = None
|
|
254
|
+
self._root_query_attached = False
|
|
255
|
+
|
|
161
256
|
# Print compact header and user request (parity with old renderer)
|
|
162
257
|
self._render_header(meta)
|
|
163
258
|
self._render_user_query(meta)
|
|
@@ -207,20 +302,47 @@ class RichStreamRenderer:
|
|
|
207
302
|
except Exception:
|
|
208
303
|
logger.exception("Failed to print header fallback")
|
|
209
304
|
|
|
305
|
+
def _extract_query_from_meta(self, meta: dict[str, Any] | None) -> str | None:
|
|
306
|
+
"""Extract the primary query string from a metadata payload."""
|
|
307
|
+
if not meta:
|
|
308
|
+
return None
|
|
309
|
+
query = (
|
|
310
|
+
meta.get("input_message")
|
|
311
|
+
or meta.get("query")
|
|
312
|
+
or meta.get("message")
|
|
313
|
+
or (meta.get("meta") or {}).get("input_message")
|
|
314
|
+
)
|
|
315
|
+
if isinstance(query, str) and query.strip():
|
|
316
|
+
return query
|
|
317
|
+
return None
|
|
318
|
+
|
|
319
|
+
def _build_user_query_panel(self, query: str) -> AIPPanel:
|
|
320
|
+
"""Create the panel used to display the user request."""
|
|
321
|
+
return AIPPanel(
|
|
322
|
+
Markdown(f"**Query:** {query}"),
|
|
323
|
+
title="User Request",
|
|
324
|
+
border_style="#d97706",
|
|
325
|
+
padding=(0, 1),
|
|
326
|
+
)
|
|
327
|
+
|
|
210
328
|
def _render_user_query(self, meta: dict[str, Any]) -> None:
|
|
211
329
|
"""Render the user query panel."""
|
|
212
|
-
query =
|
|
330
|
+
query = self._extract_query_from_meta(meta)
|
|
213
331
|
if not query:
|
|
214
332
|
return
|
|
333
|
+
self.console.print(self._build_user_query_panel(query))
|
|
334
|
+
|
|
335
|
+
def _render_summary_static_sections(self) -> None:
|
|
336
|
+
"""Re-render header and user query when returning to summary mode."""
|
|
337
|
+
meta = getattr(self.state, "meta", None)
|
|
338
|
+
if meta:
|
|
339
|
+
self._render_header(meta)
|
|
340
|
+
elif self.header_text and not self._render_header_rule():
|
|
341
|
+
self._render_header_fallback()
|
|
215
342
|
|
|
216
|
-
self.
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
title="User Request",
|
|
220
|
-
border_style="#d97706",
|
|
221
|
-
padding=(0, 1),
|
|
222
|
-
)
|
|
223
|
-
)
|
|
343
|
+
query = self._extract_query_from_meta(meta) or self._root_query
|
|
344
|
+
if query:
|
|
345
|
+
self.console.print(self._build_user_query_panel(query))
|
|
224
346
|
|
|
225
347
|
def _ensure_streaming_started_baseline(self, timestamp: float) -> None:
|
|
226
348
|
"""Synchronize streaming start state across renderer components."""
|
|
@@ -237,10 +359,12 @@ class RichStreamRenderer:
|
|
|
237
359
|
self._sync_stream_start(ev, received_at)
|
|
238
360
|
|
|
239
361
|
metadata = self.stream_processor.extract_event_metadata(ev)
|
|
240
|
-
self.stream_processor.update_timing(metadata["context_id"])
|
|
241
362
|
|
|
242
363
|
self._maybe_render_debug(ev, received_at)
|
|
243
|
-
|
|
364
|
+
try:
|
|
365
|
+
self._dispatch_event(ev, metadata)
|
|
366
|
+
finally:
|
|
367
|
+
self.stream_processor.update_timing(metadata.get("context_id"))
|
|
244
368
|
|
|
245
369
|
def _resolve_received_timestamp(self, ev: dict[str, Any]) -> datetime:
|
|
246
370
|
"""Return the timestamp an event was received, normalising inputs."""
|
|
@@ -253,9 +377,7 @@ class RichStreamRenderer:
|
|
|
253
377
|
|
|
254
378
|
return received_at
|
|
255
379
|
|
|
256
|
-
def _sync_stream_start(
|
|
257
|
-
self, ev: dict[str, Any], received_at: datetime | None
|
|
258
|
-
) -> None:
|
|
380
|
+
def _sync_stream_start(self, ev: dict[str, Any], received_at: datetime | None) -> None:
|
|
259
381
|
"""Ensure renderer and stream processor share a streaming baseline."""
|
|
260
382
|
baseline = self.state.streaming_started_at
|
|
261
383
|
if baseline is None:
|
|
@@ -275,12 +397,14 @@ class RichStreamRenderer:
|
|
|
275
397
|
if not self.verbose:
|
|
276
398
|
return
|
|
277
399
|
|
|
400
|
+
self._ensure_transcript_header()
|
|
278
401
|
render_debug_event(
|
|
279
402
|
ev,
|
|
280
403
|
self.console,
|
|
281
404
|
received_ts=received_at,
|
|
282
405
|
baseline_ts=self.state.streaming_started_event_ts,
|
|
283
406
|
)
|
|
407
|
+
self._print_transcript_hint()
|
|
284
408
|
|
|
285
409
|
def _dispatch_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
|
|
286
410
|
"""Route events to the appropriate renderer handlers."""
|
|
@@ -294,7 +418,7 @@ class RichStreamRenderer:
|
|
|
294
418
|
elif kind == "final_response":
|
|
295
419
|
self._handle_final_response_event(content, metadata)
|
|
296
420
|
elif kind in {"agent_step", "agent_thinking_step"}:
|
|
297
|
-
self._handle_agent_step_event(ev)
|
|
421
|
+
self._handle_agent_step_event(ev, metadata)
|
|
298
422
|
else:
|
|
299
423
|
self._ensure_live()
|
|
300
424
|
|
|
@@ -310,21 +434,32 @@ class RichStreamRenderer:
|
|
|
310
434
|
self.state.buffer.append(content)
|
|
311
435
|
self._ensure_live()
|
|
312
436
|
|
|
313
|
-
def _handle_final_response_event(
|
|
314
|
-
self, content: str, metadata: dict[str, Any]
|
|
315
|
-
) -> None:
|
|
437
|
+
def _handle_final_response_event(self, content: str, metadata: dict[str, Any]) -> None:
|
|
316
438
|
"""Handle final response events."""
|
|
317
439
|
if content:
|
|
318
440
|
self.state.buffer.append(content)
|
|
319
441
|
self.state.final_text = content
|
|
320
442
|
|
|
321
443
|
meta_payload = metadata.get("metadata") or {}
|
|
322
|
-
self.
|
|
444
|
+
final_time = self._coerce_server_time(meta_payload.get("time"))
|
|
445
|
+
self._update_final_duration(final_time)
|
|
446
|
+
self._close_active_thinking_scopes(final_time)
|
|
447
|
+
self._finish_running_steps()
|
|
448
|
+
self._finish_tool_panels()
|
|
449
|
+
self._normalise_finished_icons()
|
|
323
450
|
|
|
324
|
-
|
|
325
|
-
|
|
451
|
+
self._ensure_live()
|
|
452
|
+
self._print_final_panel_if_needed()
|
|
326
453
|
|
|
327
|
-
def
|
|
454
|
+
def _normalise_finished_icons(self) -> None:
|
|
455
|
+
"""Ensure finished steps do not keep spinner icons."""
|
|
456
|
+
for step in self.steps.by_id.values():
|
|
457
|
+
if getattr(step, "status", None) == "finished" and getattr(step, "status_icon", None) == "spinner":
|
|
458
|
+
step.status_icon = "success"
|
|
459
|
+
if getattr(step, "status", None) != "running":
|
|
460
|
+
self._step_spinners.pop(step.step_id, None)
|
|
461
|
+
|
|
462
|
+
def _handle_agent_step_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
|
|
328
463
|
"""Handle agent step events."""
|
|
329
464
|
# Extract tool information
|
|
330
465
|
(
|
|
@@ -334,22 +469,376 @@ class RichStreamRenderer:
|
|
|
334
469
|
tool_calls_info,
|
|
335
470
|
) = self.stream_processor.parse_tool_calls(ev)
|
|
336
471
|
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
472
|
+
payload = metadata.get("metadata") or {}
|
|
473
|
+
|
|
474
|
+
tracked_step: Step | None = None
|
|
475
|
+
try:
|
|
476
|
+
tracked_step = self.steps.apply_event(ev)
|
|
477
|
+
except ValueError:
|
|
478
|
+
logger.debug("Malformed step event skipped", exc_info=True)
|
|
479
|
+
else:
|
|
480
|
+
self._record_step_server_start(tracked_step, payload)
|
|
481
|
+
self._update_thinking_timeline(tracked_step, payload)
|
|
482
|
+
self._maybe_override_root_agent_label(tracked_step, payload)
|
|
483
|
+
self._maybe_attach_root_query(tracked_step)
|
|
484
|
+
|
|
485
|
+
# Track tools and sub-agents for transcript/debug context
|
|
486
|
+
self.stream_processor.track_tools_and_agents(tool_name, tool_calls_info, is_delegation_tool)
|
|
341
487
|
|
|
342
488
|
# Handle tool execution
|
|
343
|
-
self._handle_agent_step(
|
|
489
|
+
self._handle_agent_step(
|
|
490
|
+
ev,
|
|
491
|
+
tool_name,
|
|
492
|
+
tool_args,
|
|
493
|
+
tool_out,
|
|
494
|
+
tool_calls_info,
|
|
495
|
+
tracked_step=tracked_step,
|
|
496
|
+
)
|
|
344
497
|
|
|
345
498
|
# Update live display
|
|
346
499
|
self._ensure_live()
|
|
347
500
|
|
|
501
|
+
def _maybe_attach_root_query(self, step: Step | None) -> None:
|
|
502
|
+
"""Attach the user query to the root agent step for display."""
|
|
503
|
+
if not step or self._root_query_attached or not self._root_query or step.kind != "agent" or step.parent_id:
|
|
504
|
+
return
|
|
505
|
+
|
|
506
|
+
args = dict(getattr(step, "args", {}) or {})
|
|
507
|
+
args.setdefault("query", self._root_query)
|
|
508
|
+
step.args = args
|
|
509
|
+
self._root_query_attached = True
|
|
510
|
+
|
|
511
|
+
def _record_step_server_start(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
512
|
+
"""Store server-provided start times for elapsed calculations."""
|
|
513
|
+
if not step:
|
|
514
|
+
return
|
|
515
|
+
server_time = payload.get("time")
|
|
516
|
+
if not isinstance(server_time, (int, float)):
|
|
517
|
+
return
|
|
518
|
+
self._step_server_start_times.setdefault(step.step_id, float(server_time))
|
|
519
|
+
|
|
520
|
+
def _maybe_override_root_agent_label(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
521
|
+
"""Ensure the root agent row uses the human-friendly name and shows the ID."""
|
|
522
|
+
if not step or step.kind != "agent" or step.parent_id:
|
|
523
|
+
return
|
|
524
|
+
friendly = self._root_agent_friendly or self._humanize_agent_slug((payload or {}).get("agent_name"))
|
|
525
|
+
if not friendly:
|
|
526
|
+
return
|
|
527
|
+
agent_identifier = step.name or step.step_id
|
|
528
|
+
if not agent_identifier:
|
|
529
|
+
return
|
|
530
|
+
step.display_label = normalise_display_label(f"{ICON_AGENT} {friendly} ({agent_identifier})")
|
|
531
|
+
if not self._root_agent_step_id:
|
|
532
|
+
self._root_agent_step_id = step.step_id
|
|
533
|
+
|
|
534
|
+
def _update_thinking_timeline(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
535
|
+
"""Maintain deterministic thinking spans for each agent/delegate scope."""
|
|
536
|
+
if not self.cfg.render_thinking or not step:
|
|
537
|
+
return
|
|
538
|
+
|
|
539
|
+
now_monotonic = monotonic()
|
|
540
|
+
server_time = self._coerce_server_time(payload.get("time"))
|
|
541
|
+
status_hint = (payload.get("status") or "").lower()
|
|
542
|
+
|
|
543
|
+
if self._is_scope_anchor(step):
|
|
544
|
+
self._update_anchor_thinking(
|
|
545
|
+
step=step,
|
|
546
|
+
server_time=server_time,
|
|
547
|
+
status_hint=status_hint,
|
|
548
|
+
now_monotonic=now_monotonic,
|
|
549
|
+
)
|
|
550
|
+
return
|
|
551
|
+
|
|
552
|
+
self._update_child_thinking(
|
|
553
|
+
step=step,
|
|
554
|
+
server_time=server_time,
|
|
555
|
+
status_hint=status_hint,
|
|
556
|
+
now_monotonic=now_monotonic,
|
|
557
|
+
)
|
|
558
|
+
|
|
559
|
+
def _update_anchor_thinking(
|
|
560
|
+
self,
|
|
561
|
+
*,
|
|
562
|
+
step: Step,
|
|
563
|
+
server_time: float | None,
|
|
564
|
+
status_hint: str,
|
|
565
|
+
now_monotonic: float,
|
|
566
|
+
) -> None:
|
|
567
|
+
"""Handle deterministic thinking bookkeeping for agent/delegate anchors."""
|
|
568
|
+
scope = self._get_or_create_scope(step)
|
|
569
|
+
if scope.anchor_started_at is None and server_time is not None:
|
|
570
|
+
scope.anchor_started_at = server_time
|
|
571
|
+
|
|
572
|
+
if not scope.closed and scope.active_thinking_id is None:
|
|
573
|
+
self._start_scope_thinking(
|
|
574
|
+
scope,
|
|
575
|
+
start_server_time=scope.anchor_started_at or server_time,
|
|
576
|
+
start_monotonic=now_monotonic,
|
|
577
|
+
)
|
|
578
|
+
|
|
579
|
+
is_anchor_finished = status_hint in FINISHED_STATUS_HINTS or (not status_hint and is_step_finished(step))
|
|
580
|
+
if is_anchor_finished:
|
|
581
|
+
scope.anchor_finished_at = server_time or scope.anchor_finished_at
|
|
582
|
+
self._finish_scope_thinking(scope, server_time, now_monotonic)
|
|
583
|
+
scope.closed = True
|
|
584
|
+
|
|
585
|
+
parent_anchor_id = self._resolve_anchor_id(step)
|
|
586
|
+
if parent_anchor_id:
|
|
587
|
+
self._cascade_anchor_update(
|
|
588
|
+
parent_anchor_id=parent_anchor_id,
|
|
589
|
+
child_step=step,
|
|
590
|
+
server_time=server_time,
|
|
591
|
+
now_monotonic=now_monotonic,
|
|
592
|
+
is_finished=is_anchor_finished,
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
def _cascade_anchor_update(
|
|
596
|
+
self,
|
|
597
|
+
*,
|
|
598
|
+
parent_anchor_id: str,
|
|
599
|
+
child_step: Step,
|
|
600
|
+
server_time: float | None,
|
|
601
|
+
now_monotonic: float,
|
|
602
|
+
is_finished: bool,
|
|
603
|
+
) -> None:
|
|
604
|
+
"""Propagate anchor state changes to the parent scope."""
|
|
605
|
+
parent_scope = self._thinking_scopes.get(parent_anchor_id)
|
|
606
|
+
if not parent_scope or parent_scope.closed:
|
|
607
|
+
return
|
|
608
|
+
if is_finished:
|
|
609
|
+
self._mark_child_finished(parent_scope, child_step.step_id, server_time, now_monotonic)
|
|
610
|
+
else:
|
|
611
|
+
self._mark_child_running(parent_scope, child_step, server_time, now_monotonic)
|
|
612
|
+
|
|
613
|
+
def _update_child_thinking(
|
|
614
|
+
self,
|
|
615
|
+
*,
|
|
616
|
+
step: Step,
|
|
617
|
+
server_time: float | None,
|
|
618
|
+
status_hint: str,
|
|
619
|
+
now_monotonic: float,
|
|
620
|
+
) -> None:
|
|
621
|
+
"""Update deterministic thinking state for non-anchor steps."""
|
|
622
|
+
anchor_id = self._resolve_anchor_id(step)
|
|
623
|
+
if not anchor_id:
|
|
624
|
+
return
|
|
625
|
+
|
|
626
|
+
scope = self._thinking_scopes.get(anchor_id)
|
|
627
|
+
if not scope or scope.closed or step.kind == "thinking":
|
|
628
|
+
return
|
|
629
|
+
|
|
630
|
+
is_finish_event = status_hint in FINISHED_STATUS_HINTS or (not status_hint and is_step_finished(step))
|
|
631
|
+
if is_finish_event:
|
|
632
|
+
self._mark_child_finished(scope, step.step_id, server_time, now_monotonic)
|
|
633
|
+
else:
|
|
634
|
+
self._mark_child_running(scope, step, server_time, now_monotonic)
|
|
635
|
+
|
|
636
|
+
def _resolve_anchor_id(self, step: Step) -> str | None:
|
|
637
|
+
"""Return the nearest agent/delegate ancestor for a step."""
|
|
638
|
+
parent_id = step.parent_id
|
|
639
|
+
while parent_id:
|
|
640
|
+
parent = self.steps.by_id.get(parent_id)
|
|
641
|
+
if not parent:
|
|
642
|
+
return None
|
|
643
|
+
if self._is_scope_anchor(parent):
|
|
644
|
+
return parent.step_id
|
|
645
|
+
parent_id = parent.parent_id
|
|
646
|
+
return None
|
|
647
|
+
|
|
648
|
+
def _get_or_create_scope(self, step: Step) -> ThinkingScopeState:
|
|
649
|
+
"""Fetch (or create) thinking state for the given anchor step."""
|
|
650
|
+
scope = self._thinking_scopes.get(step.step_id)
|
|
651
|
+
if scope:
|
|
652
|
+
if scope.task_id is None:
|
|
653
|
+
scope.task_id = step.task_id
|
|
654
|
+
if scope.context_id is None:
|
|
655
|
+
scope.context_id = step.context_id
|
|
656
|
+
return scope
|
|
657
|
+
scope = ThinkingScopeState(
|
|
658
|
+
anchor_id=step.step_id,
|
|
659
|
+
task_id=step.task_id,
|
|
660
|
+
context_id=step.context_id,
|
|
661
|
+
)
|
|
662
|
+
self._thinking_scopes[step.step_id] = scope
|
|
663
|
+
return scope
|
|
664
|
+
|
|
665
|
+
def _is_scope_anchor(self, step: Step) -> bool:
|
|
666
|
+
"""Return True when a step should host its own thinking timeline."""
|
|
667
|
+
if step.kind in {"agent", "delegate"}:
|
|
668
|
+
return True
|
|
669
|
+
name = (step.name or "").lower()
|
|
670
|
+
return name.startswith(("delegate_to_", "delegate_", "delegate "))
|
|
671
|
+
|
|
672
|
+
def _start_scope_thinking(
|
|
673
|
+
self,
|
|
674
|
+
scope: ThinkingScopeState,
|
|
675
|
+
*,
|
|
676
|
+
start_server_time: float | None,
|
|
677
|
+
start_monotonic: float,
|
|
678
|
+
) -> None:
|
|
679
|
+
"""Open a deterministic thinking node beneath the scope anchor."""
|
|
680
|
+
if scope.closed or scope.active_thinking_id or not scope.anchor_id:
|
|
681
|
+
return
|
|
682
|
+
step = self.steps.start_or_get(
|
|
683
|
+
task_id=scope.task_id,
|
|
684
|
+
context_id=scope.context_id,
|
|
685
|
+
kind="thinking",
|
|
686
|
+
name=f"agent_thinking_step::{scope.anchor_id}",
|
|
687
|
+
parent_id=scope.anchor_id,
|
|
688
|
+
args={"reason": "deterministic_timeline"},
|
|
689
|
+
)
|
|
690
|
+
step.display_label = "💭 Thinking…"
|
|
691
|
+
step.status_icon = "spinner"
|
|
692
|
+
scope.active_thinking_id = step.step_id
|
|
693
|
+
scope.idle_started_at = start_server_time
|
|
694
|
+
scope.idle_started_monotonic = start_monotonic
|
|
695
|
+
|
|
696
|
+
def _finish_scope_thinking(
|
|
697
|
+
self,
|
|
698
|
+
scope: ThinkingScopeState,
|
|
699
|
+
end_server_time: float | None,
|
|
700
|
+
end_monotonic: float,
|
|
701
|
+
) -> None:
|
|
702
|
+
"""Close the currently running thinking node if one exists."""
|
|
703
|
+
if not scope.active_thinking_id:
|
|
704
|
+
return
|
|
705
|
+
thinking_step = self.steps.by_id.get(scope.active_thinking_id)
|
|
706
|
+
if not thinking_step:
|
|
707
|
+
scope.active_thinking_id = None
|
|
708
|
+
scope.idle_started_at = None
|
|
709
|
+
scope.idle_started_monotonic = None
|
|
710
|
+
return
|
|
711
|
+
|
|
712
|
+
duration = self._calculate_timeline_duration(
|
|
713
|
+
scope.idle_started_at,
|
|
714
|
+
end_server_time,
|
|
715
|
+
scope.idle_started_monotonic,
|
|
716
|
+
end_monotonic,
|
|
717
|
+
)
|
|
718
|
+
thinking_step.display_label = thinking_step.display_label or "💭 Thinking…"
|
|
719
|
+
if duration is not None:
|
|
720
|
+
thinking_step.finish(duration, source="timeline")
|
|
721
|
+
else:
|
|
722
|
+
thinking_step.finish(None, source="timeline")
|
|
723
|
+
thinking_step.status_icon = "success"
|
|
724
|
+
scope.active_thinking_id = None
|
|
725
|
+
scope.idle_started_at = None
|
|
726
|
+
scope.idle_started_monotonic = None
|
|
727
|
+
|
|
728
|
+
def _mark_child_running(
|
|
729
|
+
self,
|
|
730
|
+
scope: ThinkingScopeState,
|
|
731
|
+
step: Step,
|
|
732
|
+
server_time: float | None,
|
|
733
|
+
now_monotonic: float,
|
|
734
|
+
) -> None:
|
|
735
|
+
"""Mark a direct child as running and close any open thinking node."""
|
|
736
|
+
if step.step_id in scope.running_children:
|
|
737
|
+
return
|
|
738
|
+
scope.running_children.add(step.step_id)
|
|
739
|
+
if not scope.active_thinking_id:
|
|
740
|
+
return
|
|
741
|
+
|
|
742
|
+
start_server = self._step_server_start_times.get(step.step_id)
|
|
743
|
+
if start_server is None:
|
|
744
|
+
start_server = server_time
|
|
745
|
+
self._finish_scope_thinking(scope, start_server, now_monotonic)
|
|
746
|
+
|
|
747
|
+
def _mark_child_finished(
|
|
748
|
+
self,
|
|
749
|
+
scope: ThinkingScopeState,
|
|
750
|
+
step_id: str,
|
|
751
|
+
server_time: float | None,
|
|
752
|
+
now_monotonic: float,
|
|
753
|
+
) -> None:
|
|
754
|
+
"""Handle completion for a scope child and resume thinking if idle."""
|
|
755
|
+
if step_id in scope.running_children:
|
|
756
|
+
scope.running_children.discard(step_id)
|
|
757
|
+
if scope.running_children or scope.closed:
|
|
758
|
+
return
|
|
759
|
+
self._start_scope_thinking(
|
|
760
|
+
scope,
|
|
761
|
+
start_server_time=server_time,
|
|
762
|
+
start_monotonic=now_monotonic,
|
|
763
|
+
)
|
|
764
|
+
|
|
765
|
+
def _close_active_thinking_scopes(self, server_time: float | None) -> None:
|
|
766
|
+
"""Finish any in-flight thinking nodes during finalization."""
|
|
767
|
+
now = monotonic()
|
|
768
|
+
for scope in self._thinking_scopes.values():
|
|
769
|
+
if not scope.active_thinking_id:
|
|
770
|
+
continue
|
|
771
|
+
self._finish_scope_thinking(scope, server_time, now)
|
|
772
|
+
scope.closed = True
|
|
773
|
+
# Parent scopes resume thinking via _cascade_anchor_update
|
|
774
|
+
|
|
775
|
+
def _apply_root_duration(self, duration_seconds: float | None) -> None:
|
|
776
|
+
"""Propagate the final run duration to the root agent step."""
|
|
777
|
+
if duration_seconds is None or not self._root_agent_step_id:
|
|
778
|
+
return
|
|
779
|
+
root_step = self.steps.by_id.get(self._root_agent_step_id)
|
|
780
|
+
if not root_step:
|
|
781
|
+
return
|
|
782
|
+
try:
|
|
783
|
+
duration_ms = max(0, int(round(float(duration_seconds) * 1000)))
|
|
784
|
+
except Exception:
|
|
785
|
+
return
|
|
786
|
+
root_step.duration_ms = duration_ms
|
|
787
|
+
root_step.duration_source = root_step.duration_source or "run"
|
|
788
|
+
root_step.status = "finished"
|
|
789
|
+
|
|
790
|
+
@staticmethod
|
|
791
|
+
def _coerce_server_time(value: Any) -> float | None:
|
|
792
|
+
"""Convert a raw SSE time payload into a float if possible."""
|
|
793
|
+
if isinstance(value, (int, float)):
|
|
794
|
+
return float(value)
|
|
795
|
+
try:
|
|
796
|
+
return float(value)
|
|
797
|
+
except (TypeError, ValueError):
|
|
798
|
+
return None
|
|
799
|
+
|
|
800
|
+
@staticmethod
|
|
801
|
+
def _calculate_timeline_duration(
|
|
802
|
+
start_server: float | None,
|
|
803
|
+
end_server: float | None,
|
|
804
|
+
start_monotonic: float | None,
|
|
805
|
+
end_monotonic: float,
|
|
806
|
+
) -> float | None:
|
|
807
|
+
"""Pick the most reliable pair of timestamps to derive duration seconds."""
|
|
808
|
+
if start_server is not None and end_server is not None:
|
|
809
|
+
return max(0.0, float(end_server) - float(start_server))
|
|
810
|
+
if start_monotonic is not None:
|
|
811
|
+
try:
|
|
812
|
+
return max(0.0, float(end_monotonic) - float(start_monotonic))
|
|
813
|
+
except Exception:
|
|
814
|
+
return None
|
|
815
|
+
return None
|
|
816
|
+
|
|
817
|
+
@staticmethod
|
|
818
|
+
def _humanize_agent_slug(value: Any) -> str | None:
|
|
819
|
+
"""Convert a slugified agent name into Title Case."""
|
|
820
|
+
if not isinstance(value, str):
|
|
821
|
+
return None
|
|
822
|
+
cleaned = value.replace("_", " ").replace("-", " ").strip()
|
|
823
|
+
if not cleaned:
|
|
824
|
+
return None
|
|
825
|
+
parts = [part for part in cleaned.split() if part]
|
|
826
|
+
return " ".join(part[:1].upper() + part[1:] for part in parts)
|
|
827
|
+
|
|
348
828
|
def _finish_running_steps(self) -> None:
|
|
349
829
|
"""Mark any running steps as finished to avoid lingering spinners."""
|
|
350
830
|
for st in self.steps.by_id.values():
|
|
351
831
|
if not is_step_finished(st):
|
|
352
|
-
|
|
832
|
+
self._mark_incomplete_step(st)
|
|
833
|
+
|
|
834
|
+
def _mark_incomplete_step(self, step: Step) -> None:
|
|
835
|
+
"""Mark a lingering step as incomplete/warning with unknown duration."""
|
|
836
|
+
step.status = "finished"
|
|
837
|
+
step.duration_unknown = True
|
|
838
|
+
if step.duration_ms is None:
|
|
839
|
+
step.duration_ms = 0
|
|
840
|
+
step.duration_source = step.duration_source or "unknown"
|
|
841
|
+
step.status_icon = "warning"
|
|
353
842
|
|
|
354
843
|
def _finish_tool_panels(self) -> None:
|
|
355
844
|
"""Mark unfinished tool panels as finished."""
|
|
@@ -376,6 +865,9 @@ class RichStreamRenderer:
|
|
|
376
865
|
if not body:
|
|
377
866
|
return
|
|
378
867
|
|
|
868
|
+
if getattr(self, "_transcript_mode_enabled", False):
|
|
869
|
+
return
|
|
870
|
+
|
|
379
871
|
if self.verbose:
|
|
380
872
|
final_panel = create_final_panel(
|
|
381
873
|
body,
|
|
@@ -389,25 +881,37 @@ class RichStreamRenderer:
|
|
|
389
881
|
"""Handle completion event."""
|
|
390
882
|
self.state.finalizing_ui = True
|
|
391
883
|
|
|
392
|
-
|
|
884
|
+
self._handle_stats_duration(stats)
|
|
885
|
+
self._close_active_thinking_scopes(self.state.final_duration_seconds)
|
|
886
|
+
self._cleanup_ui_elements()
|
|
887
|
+
self._finalize_display()
|
|
888
|
+
self._print_completion_message()
|
|
889
|
+
|
|
890
|
+
def _handle_stats_duration(self, stats: RunStats) -> None:
|
|
891
|
+
"""Handle stats processing and duration calculation."""
|
|
892
|
+
if not isinstance(stats, RunStats):
|
|
893
|
+
return
|
|
894
|
+
|
|
895
|
+
duration = None
|
|
896
|
+
try:
|
|
897
|
+
if stats.finished_at is not None and stats.started_at is not None:
|
|
898
|
+
duration = max(0.0, float(stats.finished_at) - float(stats.started_at))
|
|
899
|
+
except Exception:
|
|
393
900
|
duration = None
|
|
394
|
-
try:
|
|
395
|
-
if stats.finished_at is not None and stats.started_at is not None:
|
|
396
|
-
duration = max(
|
|
397
|
-
0.0, float(stats.finished_at) - float(stats.started_at)
|
|
398
|
-
)
|
|
399
|
-
except Exception:
|
|
400
|
-
duration = None
|
|
401
901
|
|
|
402
|
-
|
|
403
|
-
|
|
902
|
+
if duration is not None:
|
|
903
|
+
self._update_final_duration(duration, overwrite=True)
|
|
404
904
|
|
|
905
|
+
def _cleanup_ui_elements(self) -> None:
|
|
906
|
+
"""Clean up running UI elements."""
|
|
405
907
|
# Mark any running steps as finished to avoid lingering spinners
|
|
406
908
|
self._finish_running_steps()
|
|
407
909
|
|
|
408
910
|
# Mark unfinished tool panels as finished
|
|
409
911
|
self._finish_tool_panels()
|
|
410
912
|
|
|
913
|
+
def _finalize_display(self) -> None:
|
|
914
|
+
"""Finalize live display and render final output."""
|
|
411
915
|
# Final refresh
|
|
412
916
|
self._ensure_live()
|
|
413
917
|
|
|
@@ -417,8 +921,24 @@ class RichStreamRenderer:
|
|
|
417
921
|
# Render final output based on configuration
|
|
418
922
|
self._print_final_panel_if_needed()
|
|
419
923
|
|
|
924
|
+
def _print_completion_message(self) -> None:
|
|
925
|
+
"""Print completion message based on current mode."""
|
|
926
|
+
if self._transcript_mode_enabled:
|
|
927
|
+
try:
|
|
928
|
+
self.console.print(
|
|
929
|
+
"[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events. "
|
|
930
|
+
"Use the post-run viewer for export.[/dim]"
|
|
931
|
+
)
|
|
932
|
+
except Exception:
|
|
933
|
+
pass
|
|
934
|
+
else:
|
|
935
|
+
# No transcript toggle in summary mode; nothing to print here.
|
|
936
|
+
return
|
|
937
|
+
|
|
420
938
|
def _ensure_live(self) -> None:
|
|
421
939
|
"""Ensure live display is updated."""
|
|
940
|
+
if getattr(self, "_transcript_mode_enabled", False):
|
|
941
|
+
return
|
|
422
942
|
if not self._ensure_live_stack():
|
|
423
943
|
return
|
|
424
944
|
|
|
@@ -426,6 +946,12 @@ class RichStreamRenderer:
|
|
|
426
946
|
|
|
427
947
|
if self.live:
|
|
428
948
|
self._refresh_live_panels()
|
|
949
|
+
if (
|
|
950
|
+
not self._transcript_mode_enabled
|
|
951
|
+
and not self.state.finalizing_ui
|
|
952
|
+
and not self._summary_hint_printed_once
|
|
953
|
+
):
|
|
954
|
+
self._print_summary_hint(force=True)
|
|
429
955
|
|
|
430
956
|
def _ensure_live_stack(self) -> bool:
|
|
431
957
|
"""Guarantee the console exposes the internal live stack Rich expects."""
|
|
@@ -472,8 +998,7 @@ class RichStreamRenderer:
|
|
|
472
998
|
title="Steps",
|
|
473
999
|
border_style="blue",
|
|
474
1000
|
)
|
|
475
|
-
|
|
476
|
-
panels = self._build_live_panels(main_panel, steps_panel, tool_panels)
|
|
1001
|
+
panels = self._build_live_panels(main_panel, steps_panel)
|
|
477
1002
|
|
|
478
1003
|
self.live.update(Group(*panels))
|
|
479
1004
|
|
|
@@ -481,17 +1006,12 @@ class RichStreamRenderer:
|
|
|
481
1006
|
self,
|
|
482
1007
|
main_panel: Any,
|
|
483
1008
|
steps_panel: Any,
|
|
484
|
-
tool_panels: list[Any],
|
|
485
1009
|
) -> list[Any]:
|
|
486
1010
|
"""Assemble the panel order for the live display."""
|
|
487
1011
|
if self.verbose:
|
|
488
|
-
return [main_panel, steps_panel
|
|
1012
|
+
return [main_panel, steps_panel]
|
|
489
1013
|
|
|
490
|
-
|
|
491
|
-
if tool_panels:
|
|
492
|
-
panels.extend(tool_panels)
|
|
493
|
-
panels.append(main_panel)
|
|
494
|
-
return panels
|
|
1014
|
+
return [steps_panel, main_panel]
|
|
495
1015
|
|
|
496
1016
|
def _render_main_panel(self) -> Any:
|
|
497
1017
|
"""Render the main content panel."""
|
|
@@ -538,9 +1058,133 @@ class RichStreamRenderer:
|
|
|
538
1058
|
# ------------------------------------------------------------------
|
|
539
1059
|
# Transcript helpers
|
|
540
1060
|
# ------------------------------------------------------------------
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
1061
|
+
@property
|
|
1062
|
+
def transcript_mode_enabled(self) -> bool:
|
|
1063
|
+
"""Return True when transcript mode is currently active."""
|
|
1064
|
+
return self._transcript_mode_enabled
|
|
1065
|
+
|
|
1066
|
+
def toggle_transcript_mode(self) -> None:
|
|
1067
|
+
"""Flip transcript mode on/off."""
|
|
1068
|
+
self.set_transcript_mode(not self._transcript_mode_enabled)
|
|
1069
|
+
|
|
1070
|
+
def set_transcript_mode(self, enabled: bool) -> None:
|
|
1071
|
+
"""Set transcript mode explicitly."""
|
|
1072
|
+
if enabled == self._transcript_mode_enabled:
|
|
1073
|
+
return
|
|
1074
|
+
|
|
1075
|
+
self._transcript_mode_enabled = enabled
|
|
1076
|
+
self.apply_verbosity(enabled)
|
|
1077
|
+
|
|
1078
|
+
if enabled:
|
|
1079
|
+
self._summary_hint_printed_once = False
|
|
1080
|
+
self._transcript_hint_printed_once = False
|
|
1081
|
+
self._transcript_header_printed = False
|
|
1082
|
+
self._transcript_enabled_message_printed = False
|
|
1083
|
+
self._stop_live_display()
|
|
1084
|
+
self._clear_console_safe()
|
|
1085
|
+
self._print_transcript_enabled_message()
|
|
1086
|
+
self._render_transcript_backfill()
|
|
1087
|
+
else:
|
|
1088
|
+
self._transcript_hint_printed_once = False
|
|
1089
|
+
self._transcript_header_printed = False
|
|
1090
|
+
self._transcript_enabled_message_printed = False
|
|
1091
|
+
self._clear_console_safe()
|
|
1092
|
+
self._render_summary_static_sections()
|
|
1093
|
+
summary_notice = (
|
|
1094
|
+
"[dim]Returning to the summary view. Streaming will continue here.[/dim]"
|
|
1095
|
+
if not self.state.finalizing_ui
|
|
1096
|
+
else "[dim]Returning to the summary view.[/dim]"
|
|
1097
|
+
)
|
|
1098
|
+
self.console.print(summary_notice)
|
|
1099
|
+
if self.live:
|
|
1100
|
+
self._refresh_live_panels()
|
|
1101
|
+
else:
|
|
1102
|
+
steps_renderable = self._render_steps_text()
|
|
1103
|
+
steps_panel = AIPPanel(
|
|
1104
|
+
steps_renderable,
|
|
1105
|
+
title="Steps",
|
|
1106
|
+
border_style="blue",
|
|
1107
|
+
)
|
|
1108
|
+
self.console.print(steps_panel)
|
|
1109
|
+
self.console.print(self._render_main_panel())
|
|
1110
|
+
if not self.state.finalizing_ui:
|
|
1111
|
+
self._print_summary_hint(force=True)
|
|
1112
|
+
|
|
1113
|
+
def _clear_console_safe(self) -> None:
|
|
1114
|
+
"""Best-effort console clear that ignores platform quirks."""
|
|
1115
|
+
try:
|
|
1116
|
+
self.console.clear()
|
|
1117
|
+
except Exception:
|
|
1118
|
+
pass
|
|
1119
|
+
|
|
1120
|
+
def _print_transcript_hint(self) -> None:
|
|
1121
|
+
"""Render the transcript toggle hint, keeping it near the bottom."""
|
|
1122
|
+
if not self._transcript_mode_enabled:
|
|
1123
|
+
return
|
|
1124
|
+
try:
|
|
1125
|
+
self.console.print(self._transcript_hint_message)
|
|
1126
|
+
except Exception:
|
|
1127
|
+
pass
|
|
1128
|
+
else:
|
|
1129
|
+
self._transcript_hint_printed_once = True
|
|
1130
|
+
|
|
1131
|
+
def _print_transcript_enabled_message(self) -> None:
|
|
1132
|
+
if self._transcript_enabled_message_printed:
|
|
1133
|
+
return
|
|
1134
|
+
try:
|
|
1135
|
+
self.console.print("[dim]Transcript mode enabled — streaming raw transcript events.[/dim]")
|
|
1136
|
+
except Exception:
|
|
1137
|
+
pass
|
|
1138
|
+
else:
|
|
1139
|
+
self._transcript_enabled_message_printed = True
|
|
1140
|
+
|
|
1141
|
+
def _ensure_transcript_header(self) -> None:
|
|
1142
|
+
if self._transcript_header_printed:
|
|
1143
|
+
return
|
|
1144
|
+
try:
|
|
1145
|
+
self.console.rule("Transcript Events")
|
|
1146
|
+
except Exception:
|
|
1147
|
+
self._transcript_header_printed = True
|
|
1148
|
+
return
|
|
1149
|
+
self._transcript_header_printed = True
|
|
1150
|
+
|
|
1151
|
+
def _print_summary_hint(self, force: bool = False) -> None:
|
|
1152
|
+
"""Show the summary-mode toggle hint."""
|
|
1153
|
+
controller = getattr(self, "transcript_controller", None)
|
|
1154
|
+
if controller and not getattr(controller, "enabled", False):
|
|
1155
|
+
if not force:
|
|
1156
|
+
self._summary_hint_printed_once = True
|
|
1157
|
+
return
|
|
1158
|
+
if not force and self._summary_hint_printed_once:
|
|
1159
|
+
return
|
|
1160
|
+
try:
|
|
1161
|
+
self.console.print(self._summary_hint_message)
|
|
1162
|
+
except Exception:
|
|
1163
|
+
return
|
|
1164
|
+
self._summary_hint_printed_once = True
|
|
1165
|
+
|
|
1166
|
+
def _render_transcript_backfill(self) -> None:
|
|
1167
|
+
"""Render any captured events that haven't been shown in transcript mode."""
|
|
1168
|
+
pending = self.state.events[self._transcript_render_cursor :]
|
|
1169
|
+
self._ensure_transcript_header()
|
|
1170
|
+
if not pending:
|
|
1171
|
+
self._print_transcript_hint()
|
|
1172
|
+
return
|
|
1173
|
+
|
|
1174
|
+
baseline = self.state.streaming_started_event_ts
|
|
1175
|
+
for ev in pending:
|
|
1176
|
+
received_ts = _coerce_received_at(ev.get("received_at"))
|
|
1177
|
+
render_debug_event(
|
|
1178
|
+
ev,
|
|
1179
|
+
self.console,
|
|
1180
|
+
received_ts=received_ts,
|
|
1181
|
+
baseline_ts=baseline,
|
|
1182
|
+
)
|
|
1183
|
+
|
|
1184
|
+
self._transcript_render_cursor = len(self.state.events)
|
|
1185
|
+
self._print_transcript_hint()
|
|
1186
|
+
|
|
1187
|
+
def _capture_event(self, ev: dict[str, Any], received_at: datetime | None = None) -> None:
|
|
544
1188
|
"""Capture a deep copy of SSE events for transcript replay."""
|
|
545
1189
|
try:
|
|
546
1190
|
captured = json.loads(json.dumps(ev))
|
|
@@ -557,6 +1201,8 @@ class RichStreamRenderer:
|
|
|
557
1201
|
captured["received_at"] = repr(received_at)
|
|
558
1202
|
|
|
559
1203
|
self.state.events.append(captured)
|
|
1204
|
+
if self._transcript_mode_enabled:
|
|
1205
|
+
self._transcript_render_cursor = len(self.state.events)
|
|
560
1206
|
|
|
561
1207
|
def get_aggregated_output(self) -> str:
|
|
562
1208
|
"""Return the concatenated assistant output collected so far."""
|
|
@@ -566,16 +1212,7 @@ class RichStreamRenderer:
|
|
|
566
1212
|
"""Return captured SSE events."""
|
|
567
1213
|
return list(self.state.events)
|
|
568
1214
|
|
|
569
|
-
def
|
|
570
|
-
self, task_id: str | None, context_id: str | None
|
|
571
|
-
) -> None:
|
|
572
|
-
"""Insert thinking gap if needed."""
|
|
573
|
-
# Implementation would track thinking states
|
|
574
|
-
pass
|
|
575
|
-
|
|
576
|
-
def _ensure_tool_panel(
|
|
577
|
-
self, name: str, args: Any, task_id: str, context_id: str
|
|
578
|
-
) -> str:
|
|
1215
|
+
def _ensure_tool_panel(self, name: str, args: Any, task_id: str, context_id: str) -> str:
|
|
579
1216
|
"""Ensure a tool panel exists and return its ID."""
|
|
580
1217
|
formatted_title = format_tool_title(name)
|
|
581
1218
|
is_delegation = is_delegation_tool(name)
|
|
@@ -595,15 +1232,10 @@ class RichStreamRenderer:
|
|
|
595
1232
|
# Add Args section once
|
|
596
1233
|
if args:
|
|
597
1234
|
try:
|
|
598
|
-
args_content = (
|
|
599
|
-
"**Args:**\n```json\n"
|
|
600
|
-
+ json.dumps(args, indent=2)
|
|
601
|
-
+ "\n```\n\n"
|
|
602
|
-
)
|
|
1235
|
+
args_content = "**Args:**\n```json\n" + json.dumps(args, indent=2) + "\n```\n\n"
|
|
603
1236
|
except Exception:
|
|
604
1237
|
args_content = f"**Args:**\n{args}\n\n"
|
|
605
1238
|
self.tool_panels[tool_sid]["chunks"].append(args_content)
|
|
606
|
-
self.tool_order.append(tool_sid)
|
|
607
1239
|
|
|
608
1240
|
return tool_sid
|
|
609
1241
|
|
|
@@ -614,8 +1246,13 @@ class RichStreamRenderer:
|
|
|
614
1246
|
tool_name: str,
|
|
615
1247
|
tool_args: Any,
|
|
616
1248
|
_tool_sid: str,
|
|
1249
|
+
*,
|
|
1250
|
+
tracked_step: Step | None = None,
|
|
617
1251
|
) -> Step | None:
|
|
618
1252
|
"""Start or get a step for a tool."""
|
|
1253
|
+
if tracked_step is not None:
|
|
1254
|
+
return tracked_step
|
|
1255
|
+
|
|
619
1256
|
if is_delegation_tool(tool_name):
|
|
620
1257
|
st = self.steps.start_or_get(
|
|
621
1258
|
task_id=task_id,
|
|
@@ -635,9 +1272,7 @@ class RichStreamRenderer:
|
|
|
635
1272
|
|
|
636
1273
|
# Record server start time for this step if available
|
|
637
1274
|
if st and self.stream_processor.server_elapsed_time is not None:
|
|
638
|
-
self._step_server_start_times[st.step_id] =
|
|
639
|
-
self.stream_processor.server_elapsed_time
|
|
640
|
-
)
|
|
1275
|
+
self._step_server_start_times[st.step_id] = self.stream_processor.server_elapsed_time
|
|
641
1276
|
|
|
642
1277
|
return st
|
|
643
1278
|
|
|
@@ -651,26 +1286,18 @@ class RichStreamRenderer:
|
|
|
651
1286
|
"""Process additional tool calls to avoid duplicates."""
|
|
652
1287
|
for call_name, call_args, _ in tool_calls_info or []:
|
|
653
1288
|
if call_name and call_name != tool_name:
|
|
654
|
-
self._process_single_tool_call(
|
|
655
|
-
call_name, call_args, task_id, context_id
|
|
656
|
-
)
|
|
1289
|
+
self._process_single_tool_call(call_name, call_args, task_id, context_id)
|
|
657
1290
|
|
|
658
|
-
def _process_single_tool_call(
|
|
659
|
-
self, call_name: str, call_args: Any, task_id: str, context_id: str
|
|
660
|
-
) -> None:
|
|
1291
|
+
def _process_single_tool_call(self, call_name: str, call_args: Any, task_id: str, context_id: str) -> None:
|
|
661
1292
|
"""Process a single additional tool call."""
|
|
662
1293
|
self._ensure_tool_panel(call_name, call_args, task_id, context_id)
|
|
663
1294
|
|
|
664
1295
|
st2 = self._create_step_for_tool_call(call_name, call_args, task_id, context_id)
|
|
665
1296
|
|
|
666
1297
|
if self.stream_processor.server_elapsed_time is not None and st2:
|
|
667
|
-
self._step_server_start_times[st2.step_id] =
|
|
668
|
-
self.stream_processor.server_elapsed_time
|
|
669
|
-
)
|
|
1298
|
+
self._step_server_start_times[st2.step_id] = self.stream_processor.server_elapsed_time
|
|
670
1299
|
|
|
671
|
-
def _create_step_for_tool_call(
|
|
672
|
-
self, call_name: str, call_args: Any, task_id: str, context_id: str
|
|
673
|
-
) -> Any:
|
|
1300
|
+
def _create_step_for_tool_call(self, call_name: str, call_args: Any, task_id: str, context_id: str) -> Any:
|
|
674
1301
|
"""Create appropriate step for tool call."""
|
|
675
1302
|
if is_delegation_tool(call_name):
|
|
676
1303
|
return self.steps.start_or_get(
|
|
@@ -689,9 +1316,7 @@ class RichStreamRenderer:
|
|
|
689
1316
|
args=call_args,
|
|
690
1317
|
)
|
|
691
1318
|
|
|
692
|
-
def _detect_tool_completion(
|
|
693
|
-
self, metadata: dict, content: str
|
|
694
|
-
) -> tuple[bool, str | None, Any]:
|
|
1319
|
+
def _detect_tool_completion(self, metadata: dict, content: str) -> tuple[bool, str | None, Any]:
|
|
695
1320
|
"""Detect if a tool has completed and return completion info."""
|
|
696
1321
|
tool_info = metadata.get("tool_info", {}) if isinstance(metadata, dict) else {}
|
|
697
1322
|
|
|
@@ -701,18 +1326,14 @@ class RichStreamRenderer:
|
|
|
701
1326
|
# content like "Completed google_serper"
|
|
702
1327
|
tname = content.replace("Completed ", "").strip()
|
|
703
1328
|
if tname:
|
|
704
|
-
output = (
|
|
705
|
-
tool_info.get("output") if tool_info.get("name") == tname else None
|
|
706
|
-
)
|
|
1329
|
+
output = tool_info.get("output") if tool_info.get("name") == tname else None
|
|
707
1330
|
return True, tname, output
|
|
708
1331
|
elif metadata.get("status") == "finished" and tool_info.get("name"):
|
|
709
1332
|
return True, tool_info.get("name"), tool_info.get("output")
|
|
710
1333
|
|
|
711
1334
|
return False, None, None
|
|
712
1335
|
|
|
713
|
-
def _get_tool_session_id(
|
|
714
|
-
self, finished_tool_name: str, task_id: str, context_id: str
|
|
715
|
-
) -> str:
|
|
1336
|
+
def _get_tool_session_id(self, finished_tool_name: str, task_id: str, context_id: str) -> str:
|
|
716
1337
|
"""Generate tool session ID."""
|
|
717
1338
|
return f"tool_{finished_tool_name}_{task_id}_{context_id}"
|
|
718
1339
|
|
|
@@ -742,7 +1363,7 @@ class RichStreamRenderer:
|
|
|
742
1363
|
meta["duration_seconds"] = dur
|
|
743
1364
|
meta["server_finished_at"] = (
|
|
744
1365
|
self.stream_processor.server_elapsed_time
|
|
745
|
-
if isinstance(self.stream_processor.server_elapsed_time, int
|
|
1366
|
+
if isinstance(self.stream_processor.server_elapsed_time, (int, float))
|
|
746
1367
|
else None
|
|
747
1368
|
)
|
|
748
1369
|
meta["finished_at"] = monotonic()
|
|
@@ -752,9 +1373,7 @@ class RichStreamRenderer:
|
|
|
752
1373
|
) -> None:
|
|
753
1374
|
"""Add tool output to panel metadata."""
|
|
754
1375
|
if finished_tool_output is not None:
|
|
755
|
-
meta["chunks"].append(
|
|
756
|
-
self._format_output_block(finished_tool_output, finished_tool_name)
|
|
757
|
-
)
|
|
1376
|
+
meta["chunks"].append(self._format_output_block(finished_tool_output, finished_tool_name))
|
|
758
1377
|
meta["output"] = finished_tool_output
|
|
759
1378
|
|
|
760
1379
|
def _mark_panel_as_finished(self, meta: dict[str, Any], tool_sid: str) -> None:
|
|
@@ -784,9 +1403,7 @@ class RichStreamRenderer:
|
|
|
784
1403
|
self._mark_panel_as_finished(meta, tool_sid)
|
|
785
1404
|
self._add_tool_output_to_panel(meta, finished_tool_output, finished_tool_name)
|
|
786
1405
|
|
|
787
|
-
def _get_step_duration(
|
|
788
|
-
self, finished_tool_name: str, task_id: str, context_id: str
|
|
789
|
-
) -> float | None:
|
|
1406
|
+
def _get_step_duration(self, finished_tool_name: str, task_id: str, context_id: str) -> float | None:
|
|
790
1407
|
"""Get step duration from tool panels."""
|
|
791
1408
|
tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
|
|
792
1409
|
return self.tool_panels.get(tool_sid, {}).get("duration_seconds")
|
|
@@ -833,8 +1450,13 @@ class RichStreamRenderer:
|
|
|
833
1450
|
finished_tool_output: Any,
|
|
834
1451
|
task_id: str,
|
|
835
1452
|
context_id: str,
|
|
1453
|
+
*,
|
|
1454
|
+
tracked_step: Step | None = None,
|
|
836
1455
|
) -> None:
|
|
837
1456
|
"""Finish the corresponding step for a completed tool."""
|
|
1457
|
+
if tracked_step is not None:
|
|
1458
|
+
return
|
|
1459
|
+
|
|
838
1460
|
step_duration = self._get_step_duration(finished_tool_name, task_id, context_id)
|
|
839
1461
|
|
|
840
1462
|
if is_delegation_tool(finished_tool_name):
|
|
@@ -856,9 +1478,7 @@ class RichStreamRenderer:
|
|
|
856
1478
|
|
|
857
1479
|
def _should_create_snapshot(self, tool_sid: str) -> bool:
|
|
858
1480
|
"""Check if a snapshot should be created."""
|
|
859
|
-
return self.cfg.append_finished_snapshots and not self.tool_panels.get(
|
|
860
|
-
tool_sid, {}
|
|
861
|
-
).get("snapshot_printed")
|
|
1481
|
+
return self.cfg.append_finished_snapshots and not self.tool_panels.get(tool_sid, {}).get("snapshot_printed")
|
|
862
1482
|
|
|
863
1483
|
def _get_snapshot_title(self, meta: dict[str, Any], finished_tool_name: str) -> str:
|
|
864
1484
|
"""Get the title for the snapshot."""
|
|
@@ -866,7 +1486,7 @@ class RichStreamRenderer:
|
|
|
866
1486
|
|
|
867
1487
|
# Add elapsed time to title
|
|
868
1488
|
dur = meta.get("duration_seconds")
|
|
869
|
-
if isinstance(dur, int
|
|
1489
|
+
if isinstance(dur, (int, float)):
|
|
870
1490
|
elapsed_str = self._format_snapshot_duration(dur)
|
|
871
1491
|
adjusted_title = f"{adjusted_title} · {elapsed_str}"
|
|
872
1492
|
|
|
@@ -903,9 +1523,7 @@ class RichStreamRenderer:
|
|
|
903
1523
|
|
|
904
1524
|
return body_text
|
|
905
1525
|
|
|
906
|
-
def _create_snapshot_panel(
|
|
907
|
-
self, adjusted_title: str, body_text: str, finished_tool_name: str
|
|
908
|
-
) -> Any:
|
|
1526
|
+
def _create_snapshot_panel(self, adjusted_title: str, body_text: str, finished_tool_name: str) -> Any:
|
|
909
1527
|
"""Create the snapshot panel."""
|
|
910
1528
|
return create_tool_panel(
|
|
911
1529
|
title=adjusted_title,
|
|
@@ -920,9 +1538,7 @@ class RichStreamRenderer:
|
|
|
920
1538
|
self.console.print(snapshot_panel)
|
|
921
1539
|
self.tool_panels[tool_sid]["snapshot_printed"] = True
|
|
922
1540
|
|
|
923
|
-
def _create_tool_snapshot(
|
|
924
|
-
self, finished_tool_name: str, task_id: str, context_id: str
|
|
925
|
-
) -> None:
|
|
1541
|
+
def _create_tool_snapshot(self, finished_tool_name: str, task_id: str, context_id: str) -> None:
|
|
926
1542
|
"""Create and print a snapshot for a finished tool."""
|
|
927
1543
|
tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
|
|
928
1544
|
|
|
@@ -936,9 +1552,7 @@ class RichStreamRenderer:
|
|
|
936
1552
|
body_text = "".join(meta.get("chunks") or [])
|
|
937
1553
|
body_text = self._clamp_snapshot_body(body_text)
|
|
938
1554
|
|
|
939
|
-
snapshot_panel = self._create_snapshot_panel(
|
|
940
|
-
adjusted_title, body_text, finished_tool_name
|
|
941
|
-
)
|
|
1555
|
+
snapshot_panel = self._create_snapshot_panel(adjusted_title, body_text, finished_tool_name)
|
|
942
1556
|
|
|
943
1557
|
self._print_and_mark_snapshot(tool_sid, snapshot_panel)
|
|
944
1558
|
|
|
@@ -949,24 +1563,29 @@ class RichStreamRenderer:
|
|
|
949
1563
|
tool_args: Any,
|
|
950
1564
|
_tool_out: Any,
|
|
951
1565
|
tool_calls_info: list[tuple[str, Any, Any]],
|
|
1566
|
+
*,
|
|
1567
|
+
tracked_step: Step | None = None,
|
|
952
1568
|
) -> None:
|
|
953
1569
|
"""Handle agent step event."""
|
|
954
1570
|
metadata = event.get("metadata", {})
|
|
955
|
-
task_id = event.get("task_id")
|
|
956
|
-
context_id = event.get("context_id")
|
|
1571
|
+
task_id = event.get("task_id") or metadata.get("task_id")
|
|
1572
|
+
context_id = event.get("context_id") or metadata.get("context_id")
|
|
957
1573
|
content = event.get("content", "")
|
|
958
1574
|
|
|
959
1575
|
# Create steps and panels for the primary tool
|
|
960
1576
|
if tool_name:
|
|
961
|
-
tool_sid = self._ensure_tool_panel(
|
|
962
|
-
|
|
1577
|
+
tool_sid = self._ensure_tool_panel(tool_name, tool_args, task_id, context_id)
|
|
1578
|
+
self._start_tool_step(
|
|
1579
|
+
task_id,
|
|
1580
|
+
context_id,
|
|
1581
|
+
tool_name,
|
|
1582
|
+
tool_args,
|
|
1583
|
+
tool_sid,
|
|
1584
|
+
tracked_step=tracked_step,
|
|
963
1585
|
)
|
|
964
|
-
self._start_tool_step(task_id, context_id, tool_name, tool_args, tool_sid)
|
|
965
1586
|
|
|
966
1587
|
# Handle additional tool calls
|
|
967
|
-
self._process_additional_tool_calls(
|
|
968
|
-
tool_calls_info, tool_name, task_id, context_id
|
|
969
|
-
)
|
|
1588
|
+
self._process_additional_tool_calls(tool_calls_info, tool_name, task_id, context_id)
|
|
970
1589
|
|
|
971
1590
|
# Check for tool completion
|
|
972
1591
|
(
|
|
@@ -976,11 +1595,13 @@ class RichStreamRenderer:
|
|
|
976
1595
|
) = self._detect_tool_completion(metadata, content)
|
|
977
1596
|
|
|
978
1597
|
if is_tool_finished and finished_tool_name:
|
|
979
|
-
self._finish_tool_panel(
|
|
980
|
-
finished_tool_name, finished_tool_output, task_id, context_id
|
|
981
|
-
)
|
|
1598
|
+
self._finish_tool_panel(finished_tool_name, finished_tool_output, task_id, context_id)
|
|
982
1599
|
self._finish_tool_step(
|
|
983
|
-
finished_tool_name,
|
|
1600
|
+
finished_tool_name,
|
|
1601
|
+
finished_tool_output,
|
|
1602
|
+
task_id,
|
|
1603
|
+
context_id,
|
|
1604
|
+
tracked_step=tracked_step,
|
|
984
1605
|
)
|
|
985
1606
|
self._create_tool_snapshot(finished_tool_name, task_id, context_id)
|
|
986
1607
|
|
|
@@ -1030,9 +1651,7 @@ class RichStreamRenderer:
|
|
|
1030
1651
|
|
|
1031
1652
|
def _get_analysis_progress_info(self) -> dict[str, Any]:
|
|
1032
1653
|
total_steps = len(self.steps.order)
|
|
1033
|
-
completed_steps = sum(
|
|
1034
|
-
1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid])
|
|
1035
|
-
)
|
|
1654
|
+
completed_steps = sum(1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid]))
|
|
1036
1655
|
current_step = None
|
|
1037
1656
|
for sid in self.steps.order:
|
|
1038
1657
|
if not is_step_finished(self.steps.by_id[sid]):
|
|
@@ -1040,13 +1659,11 @@ class RichStreamRenderer:
|
|
|
1040
1659
|
break
|
|
1041
1660
|
# Prefer server elapsed time when available
|
|
1042
1661
|
elapsed = 0.0
|
|
1043
|
-
if isinstance(self.stream_processor.server_elapsed_time, int
|
|
1662
|
+
if isinstance(self.stream_processor.server_elapsed_time, (int, float)):
|
|
1044
1663
|
elapsed = float(self.stream_processor.server_elapsed_time)
|
|
1045
1664
|
elif self._started_at is not None:
|
|
1046
1665
|
elapsed = monotonic() - self._started_at
|
|
1047
|
-
progress_percent = (
|
|
1048
|
-
int((completed_steps / total_steps) * 100) if total_steps else 0
|
|
1049
|
-
)
|
|
1666
|
+
progress_percent = int((completed_steps / total_steps) * 100) if total_steps else 0
|
|
1050
1667
|
return {
|
|
1051
1668
|
"total_steps": total_steps,
|
|
1052
1669
|
"completed_steps": completed_steps,
|
|
@@ -1100,29 +1717,42 @@ class RichStreamRenderer:
|
|
|
1100
1717
|
def _format_step_status(self, step: Step) -> str:
|
|
1101
1718
|
"""Format step status with elapsed time or duration."""
|
|
1102
1719
|
if is_step_finished(step):
|
|
1103
|
-
|
|
1104
|
-
return LESS_THAN_1MS
|
|
1105
|
-
elif step.duration_ms >= 1000:
|
|
1106
|
-
return f"[{step.duration_ms / 1000:.2f}s]"
|
|
1107
|
-
elif step.duration_ms > 0:
|
|
1108
|
-
return f"[{step.duration_ms}ms]"
|
|
1109
|
-
return LESS_THAN_1MS
|
|
1720
|
+
return self._format_finished_badge(step)
|
|
1110
1721
|
else:
|
|
1111
1722
|
# Calculate elapsed time for running steps
|
|
1112
1723
|
elapsed = self._calculate_step_elapsed_time(step)
|
|
1113
|
-
if elapsed >= 1:
|
|
1724
|
+
if elapsed >= 0.1:
|
|
1114
1725
|
return f"[{elapsed:.2f}s]"
|
|
1115
|
-
ms = int(elapsed * 1000)
|
|
1116
|
-
|
|
1726
|
+
ms = int(round(elapsed * 1000))
|
|
1727
|
+
if ms <= 0:
|
|
1728
|
+
return ""
|
|
1729
|
+
return f"[{ms}ms]"
|
|
1730
|
+
|
|
1731
|
+
def _format_finished_badge(self, step: Step) -> str:
|
|
1732
|
+
"""Compose duration badge for finished steps including source tagging."""
|
|
1733
|
+
if getattr(step, "duration_unknown", False) is True:
|
|
1734
|
+
payload = "??s"
|
|
1735
|
+
else:
|
|
1736
|
+
duration_ms = step.duration_ms
|
|
1737
|
+
if duration_ms is None:
|
|
1738
|
+
payload = "<1ms"
|
|
1739
|
+
elif duration_ms < 0:
|
|
1740
|
+
payload = "<1ms"
|
|
1741
|
+
elif duration_ms >= 100:
|
|
1742
|
+
payload = f"{duration_ms / 1000:.2f}s"
|
|
1743
|
+
elif duration_ms > 0:
|
|
1744
|
+
payload = f"{duration_ms}ms"
|
|
1745
|
+
else:
|
|
1746
|
+
payload = "<1ms"
|
|
1747
|
+
|
|
1748
|
+
return f"[{payload}]"
|
|
1117
1749
|
|
|
1118
1750
|
def _calculate_step_elapsed_time(self, step: Step) -> float:
|
|
1119
1751
|
"""Calculate elapsed time for a running step."""
|
|
1120
1752
|
server_elapsed = self.stream_processor.server_elapsed_time
|
|
1121
1753
|
server_start = self._step_server_start_times.get(step.step_id)
|
|
1122
1754
|
|
|
1123
|
-
if isinstance(server_elapsed, int
|
|
1124
|
-
server_start, int | float
|
|
1125
|
-
):
|
|
1755
|
+
if isinstance(server_elapsed, (int, float)) and isinstance(server_start, (int, float)):
|
|
1126
1756
|
return max(0.0, float(server_elapsed) - float(server_start))
|
|
1127
1757
|
|
|
1128
1758
|
try:
|
|
@@ -1136,6 +1766,21 @@ class RichStreamRenderer:
|
|
|
1136
1766
|
return step.name
|
|
1137
1767
|
return "thinking..." if step.kind == "agent" else f"{step.kind} step"
|
|
1138
1768
|
|
|
1769
|
+
def _resolve_step_label(self, step: Step) -> str:
|
|
1770
|
+
"""Return the display label for a step with sensible fallbacks."""
|
|
1771
|
+
raw_label = getattr(step, "display_label", None)
|
|
1772
|
+
label = raw_label.strip() if isinstance(raw_label, str) else ""
|
|
1773
|
+
if label:
|
|
1774
|
+
return normalise_display_label(label)
|
|
1775
|
+
|
|
1776
|
+
if not (step.name or "").strip():
|
|
1777
|
+
return "Unknown step detail"
|
|
1778
|
+
|
|
1779
|
+
icon = self._get_step_icon(step.kind)
|
|
1780
|
+
base_name = self._get_step_display_name(step)
|
|
1781
|
+
fallback = " ".join(part for part in (icon, base_name) if part).strip()
|
|
1782
|
+
return normalise_display_label(fallback)
|
|
1783
|
+
|
|
1139
1784
|
def _check_parallel_tools(self) -> dict[tuple[str | None, str | None], list]:
|
|
1140
1785
|
"""Check for parallel running tools."""
|
|
1141
1786
|
running_by_ctx: dict[tuple[str | None, str | None], list] = {}
|
|
@@ -1158,69 +1803,331 @@ class RichStreamRenderer:
|
|
|
1158
1803
|
def _compose_step_renderable(
|
|
1159
1804
|
self,
|
|
1160
1805
|
step: Step,
|
|
1161
|
-
|
|
1806
|
+
branch_state: tuple[bool, ...],
|
|
1162
1807
|
) -> Any:
|
|
1163
|
-
"""Compose a single renderable for the steps panel."""
|
|
1164
|
-
|
|
1165
|
-
|
|
1166
|
-
|
|
1167
|
-
|
|
1168
|
-
if (
|
|
1169
|
-
not finished
|
|
1170
|
-
and step.kind == "tool"
|
|
1171
|
-
and self._is_parallel_tool(step, running_by_ctx)
|
|
1172
|
-
):
|
|
1173
|
-
status_br = status_br.replace("]", " 🔄]")
|
|
1808
|
+
"""Compose a single renderable for the hierarchical steps panel."""
|
|
1809
|
+
prefix = build_connector_prefix(branch_state)
|
|
1810
|
+
text_line = self._build_step_text_line(step, prefix)
|
|
1811
|
+
renderables = self._wrap_step_text(step, text_line)
|
|
1174
1812
|
|
|
1175
|
-
|
|
1176
|
-
|
|
1177
|
-
|
|
1178
|
-
|
|
1179
|
-
|
|
1180
|
-
|
|
1813
|
+
args_renderable = self._build_args_renderable(step, prefix)
|
|
1814
|
+
if args_renderable is not None:
|
|
1815
|
+
renderables.append(args_renderable)
|
|
1816
|
+
|
|
1817
|
+
return self._collapse_renderables(renderables)
|
|
1818
|
+
|
|
1819
|
+
def _build_step_text_line(
|
|
1820
|
+
self,
|
|
1821
|
+
step: Step,
|
|
1822
|
+
prefix: str,
|
|
1823
|
+
) -> Text:
|
|
1824
|
+
"""Create the textual portion of a step renderable."""
|
|
1825
|
+
text_line = Text()
|
|
1826
|
+
text_line.append(prefix, style="dim")
|
|
1827
|
+
text_line.append(self._resolve_step_label(step))
|
|
1828
|
+
|
|
1829
|
+
status_badge = self._format_step_status(step)
|
|
1830
|
+
self._append_status_badge(text_line, step, status_badge)
|
|
1831
|
+
self._append_state_glyph(text_line, step)
|
|
1832
|
+
return text_line
|
|
1833
|
+
|
|
1834
|
+
def _append_status_badge(self, text_line: Text, step: Step, status_badge: str) -> None:
|
|
1835
|
+
"""Append the formatted status badge when available."""
|
|
1836
|
+
glyph_key = getattr(step, "status_icon", None)
|
|
1837
|
+
glyph = glyph_for_status(glyph_key)
|
|
1838
|
+
|
|
1839
|
+
if status_badge:
|
|
1181
1840
|
text_line.append(" ")
|
|
1182
|
-
text_line.append(
|
|
1183
|
-
|
|
1184
|
-
|
|
1841
|
+
text_line.append(status_badge, style="cyan")
|
|
1842
|
+
|
|
1843
|
+
if glyph:
|
|
1844
|
+
text_line.append(" ")
|
|
1845
|
+
style = self._status_icon_style(glyph_key)
|
|
1846
|
+
if style:
|
|
1847
|
+
text_line.append(glyph, style=style)
|
|
1848
|
+
else:
|
|
1849
|
+
text_line.append(glyph)
|
|
1850
|
+
|
|
1851
|
+
def _append_state_glyph(self, text_line: Text, step: Step) -> None:
|
|
1852
|
+
"""Append glyph/failure markers in a single place."""
|
|
1853
|
+
failure_reason = (step.failure_reason or "").strip()
|
|
1854
|
+
if failure_reason:
|
|
1855
|
+
text_line.append(f" {failure_reason}")
|
|
1856
|
+
|
|
1857
|
+
@staticmethod
|
|
1858
|
+
def _status_icon_style(icon_key: str | None) -> str | None:
|
|
1859
|
+
"""Return style for a given status icon."""
|
|
1860
|
+
if not icon_key:
|
|
1861
|
+
return None
|
|
1862
|
+
return STATUS_ICON_STYLES.get(icon_key)
|
|
1863
|
+
|
|
1864
|
+
def _wrap_step_text(self, step: Step, text_line: Text) -> list[Any]:
|
|
1865
|
+
"""Return the base text, optionally decorated with a trailing spinner."""
|
|
1866
|
+
if getattr(step, "status", None) == "running":
|
|
1867
|
+
spinner = self._step_spinners.get(step.step_id)
|
|
1868
|
+
if spinner is None:
|
|
1869
|
+
spinner = Spinner("dots", style="dim")
|
|
1870
|
+
self._step_spinners[step.step_id] = spinner
|
|
1871
|
+
return [TrailingSpinnerLine(text_line, spinner)]
|
|
1872
|
+
|
|
1873
|
+
self._step_spinners.pop(step.step_id, None)
|
|
1874
|
+
return [text_line]
|
|
1875
|
+
|
|
1876
|
+
def _collapse_renderables(self, renderables: list[Any]) -> Any:
|
|
1877
|
+
"""Collapse a list of renderables into a single object."""
|
|
1878
|
+
if not renderables:
|
|
1879
|
+
return None
|
|
1880
|
+
|
|
1881
|
+
if len(renderables) == 1:
|
|
1882
|
+
return renderables[0]
|
|
1185
1883
|
|
|
1186
|
-
|
|
1187
|
-
|
|
1884
|
+
return Group(*renderables)
|
|
1885
|
+
|
|
1886
|
+
def _build_args_renderable(self, step: Step, prefix: str) -> Text | Group | None:
|
|
1887
|
+
"""Build a dimmed argument line for tool or agent steps."""
|
|
1888
|
+
if step.kind not in {"tool", "delegate", "agent"}:
|
|
1889
|
+
return None
|
|
1890
|
+
if step.kind == "agent" and step.parent_id:
|
|
1891
|
+
return None
|
|
1892
|
+
formatted_args = self._format_step_args(step)
|
|
1893
|
+
if not formatted_args:
|
|
1894
|
+
return None
|
|
1895
|
+
if isinstance(formatted_args, list):
|
|
1896
|
+
return self._build_arg_list(prefix, formatted_args)
|
|
1897
|
+
|
|
1898
|
+
args_text = Text()
|
|
1899
|
+
args_text.append(prefix, style="dim")
|
|
1900
|
+
args_text.append(" " * 5)
|
|
1901
|
+
args_text.append(formatted_args, style="dim")
|
|
1902
|
+
return args_text
|
|
1903
|
+
|
|
1904
|
+
def _build_arg_list(self, prefix: str, formatted_args: list[str | tuple[int, str]]) -> Group | None:
|
|
1905
|
+
"""Render multi-line argument entries preserving indentation."""
|
|
1906
|
+
arg_lines: list[Text] = []
|
|
1907
|
+
for indent_level, text_value in self._iter_arg_entries(formatted_args):
|
|
1908
|
+
arg_text = Text()
|
|
1909
|
+
arg_text.append(prefix, style="dim")
|
|
1910
|
+
arg_text.append(" " * 5)
|
|
1911
|
+
arg_text.append(" " * (indent_level * 2))
|
|
1912
|
+
arg_text.append(text_value, style="dim")
|
|
1913
|
+
arg_lines.append(arg_text)
|
|
1914
|
+
if not arg_lines:
|
|
1915
|
+
return None
|
|
1916
|
+
return Group(*arg_lines)
|
|
1917
|
+
|
|
1918
|
+
@staticmethod
|
|
1919
|
+
def _iter_arg_entries(
|
|
1920
|
+
formatted_args: list[str | tuple[int, str]],
|
|
1921
|
+
) -> Iterable[tuple[int, str]]:
|
|
1922
|
+
"""Yield normalized indentation/value pairs for argument entries."""
|
|
1923
|
+
for value in formatted_args:
|
|
1924
|
+
if isinstance(value, tuple) and len(value) == 2:
|
|
1925
|
+
indent_level, text_value = value
|
|
1926
|
+
yield indent_level, str(text_value)
|
|
1927
|
+
else:
|
|
1928
|
+
yield 0, str(value)
|
|
1929
|
+
|
|
1930
|
+
def _format_step_args(self, step: Step) -> str | list[str] | list[tuple[int, str]] | None:
|
|
1931
|
+
"""Return a printable representation of tool arguments."""
|
|
1932
|
+
args = getattr(step, "args", None)
|
|
1933
|
+
if args is None:
|
|
1934
|
+
return None
|
|
1935
|
+
|
|
1936
|
+
if isinstance(args, dict):
|
|
1937
|
+
return self._format_dict_args(args, step=step)
|
|
1188
1938
|
|
|
1189
|
-
|
|
1190
|
-
|
|
1939
|
+
if isinstance(args, (list, tuple)):
|
|
1940
|
+
return self._safe_pretty_args(list(args))
|
|
1941
|
+
|
|
1942
|
+
if isinstance(args, (str, int, float)):
|
|
1943
|
+
return self._stringify_args(args)
|
|
1944
|
+
|
|
1945
|
+
return None
|
|
1946
|
+
|
|
1947
|
+
def _format_dict_args(self, args: dict[str, Any], *, step: Step) -> str | list[str] | list[tuple[int, str]] | None:
|
|
1948
|
+
"""Format dictionary arguments with guardrails."""
|
|
1949
|
+
if not args:
|
|
1950
|
+
return None
|
|
1951
|
+
|
|
1952
|
+
masked_args = self._redact_arg_payload(args)
|
|
1953
|
+
|
|
1954
|
+
if self._should_collapse_single_query(step):
|
|
1955
|
+
single_query = self._extract_single_query_arg(masked_args)
|
|
1956
|
+
if single_query:
|
|
1957
|
+
return single_query
|
|
1958
|
+
|
|
1959
|
+
return self._format_dict_arg_lines(masked_args)
|
|
1960
|
+
|
|
1961
|
+
@staticmethod
|
|
1962
|
+
def _extract_single_query_arg(args: dict[str, Any]) -> str | None:
|
|
1963
|
+
"""Return a trimmed query argument when it is the only entry."""
|
|
1964
|
+
if len(args) != 1:
|
|
1965
|
+
return None
|
|
1966
|
+
key, value = next(iter(args.items()))
|
|
1967
|
+
if key != "query" or not isinstance(value, str):
|
|
1968
|
+
return None
|
|
1969
|
+
stripped = value.strip()
|
|
1970
|
+
return stripped or None
|
|
1971
|
+
|
|
1972
|
+
@staticmethod
|
|
1973
|
+
def _redact_arg_payload(args: dict[str, Any]) -> dict[str, Any]:
|
|
1974
|
+
"""Apply best-effort masking before rendering arguments."""
|
|
1975
|
+
try:
|
|
1976
|
+
cleaned = redact_sensitive(args)
|
|
1977
|
+
return cleaned if isinstance(cleaned, dict) else args
|
|
1978
|
+
except Exception:
|
|
1979
|
+
return args
|
|
1980
|
+
|
|
1981
|
+
@staticmethod
|
|
1982
|
+
def _should_collapse_single_query(step: Step) -> bool:
|
|
1983
|
+
"""Return True when we should display raw query text."""
|
|
1984
|
+
if step.kind == "agent":
|
|
1985
|
+
return True
|
|
1986
|
+
if step.kind == "delegate":
|
|
1987
|
+
return True
|
|
1988
|
+
return False
|
|
1989
|
+
|
|
1990
|
+
def _format_dict_arg_lines(self, args: dict[str, Any]) -> list[tuple[int, str]] | None:
|
|
1991
|
+
"""Render dictionary arguments as nested YAML-style lines."""
|
|
1992
|
+
lines: list[tuple[int, str]] = []
|
|
1993
|
+
for raw_key, value in args.items():
|
|
1994
|
+
key = str(raw_key)
|
|
1995
|
+
lines.extend(self._format_nested_entry(key, value, indent=0))
|
|
1996
|
+
return lines or None
|
|
1997
|
+
|
|
1998
|
+
def _format_nested_entry(self, key: str, value: Any, indent: int) -> list[tuple[int, str]]:
|
|
1999
|
+
"""Format a mapping entry recursively."""
|
|
2000
|
+
lines: list[tuple[int, str]] = []
|
|
2001
|
+
|
|
2002
|
+
if isinstance(value, dict):
|
|
2003
|
+
if value:
|
|
2004
|
+
lines.append((indent, f"{key}:"))
|
|
2005
|
+
lines.extend(self._format_nested_mapping(value, indent + 1))
|
|
2006
|
+
else:
|
|
2007
|
+
lines.append((indent, f"{key}: {{}}"))
|
|
2008
|
+
return lines
|
|
2009
|
+
|
|
2010
|
+
if isinstance(value, (list, tuple, set)):
|
|
2011
|
+
seq_lines = self._format_sequence_entries(list(value), indent + 1)
|
|
2012
|
+
if seq_lines:
|
|
2013
|
+
lines.append((indent, f"{key}:"))
|
|
2014
|
+
lines.extend(seq_lines)
|
|
2015
|
+
else:
|
|
2016
|
+
lines.append((indent, f"{key}: []"))
|
|
2017
|
+
return lines
|
|
2018
|
+
|
|
2019
|
+
formatted_value = self._format_arg_value(value)
|
|
2020
|
+
if formatted_value is not None:
|
|
2021
|
+
lines.append((indent, f"{key}: {formatted_value}"))
|
|
2022
|
+
return lines
|
|
2023
|
+
|
|
2024
|
+
def _format_nested_mapping(self, mapping: dict[str, Any], indent: int) -> list[tuple[int, str]]:
|
|
2025
|
+
"""Format nested dictionary values."""
|
|
2026
|
+
nested_lines: list[tuple[int, str]] = []
|
|
2027
|
+
for raw_key, value in mapping.items():
|
|
2028
|
+
key = str(raw_key)
|
|
2029
|
+
nested_lines.extend(self._format_nested_entry(key, value, indent))
|
|
2030
|
+
return nested_lines
|
|
2031
|
+
|
|
2032
|
+
def _format_sequence_entries(self, sequence: list[Any], indent: int) -> list[tuple[int, str]]:
|
|
2033
|
+
"""Format list/tuple/set values with YAML-style bullets."""
|
|
2034
|
+
if not sequence:
|
|
2035
|
+
return []
|
|
2036
|
+
|
|
2037
|
+
lines: list[tuple[int, str]] = []
|
|
2038
|
+
for item in sequence:
|
|
2039
|
+
lines.extend(self._format_sequence_item(item, indent))
|
|
2040
|
+
return lines
|
|
2041
|
+
|
|
2042
|
+
def _format_sequence_item(self, item: Any, indent: int) -> list[tuple[int, str]]:
|
|
2043
|
+
"""Format a single list entry."""
|
|
2044
|
+
if isinstance(item, dict):
|
|
2045
|
+
return self._format_dict_sequence_item(item, indent)
|
|
2046
|
+
|
|
2047
|
+
if isinstance(item, (list, tuple, set)):
|
|
2048
|
+
return self._format_nested_sequence_item(list(item), indent)
|
|
2049
|
+
|
|
2050
|
+
formatted = self._format_arg_value(item)
|
|
2051
|
+
if formatted is not None:
|
|
2052
|
+
return [(indent, f"- {formatted}")]
|
|
2053
|
+
return []
|
|
2054
|
+
|
|
2055
|
+
def _format_dict_sequence_item(self, mapping: dict[str, Any], indent: int) -> list[tuple[int, str]]:
|
|
2056
|
+
"""Format a dictionary entry within a list."""
|
|
2057
|
+
child_lines = self._format_nested_mapping(mapping, indent + 1)
|
|
2058
|
+
if child_lines:
|
|
2059
|
+
return self._prepend_sequence_prefix(child_lines, indent)
|
|
2060
|
+
return [(indent, "- {}")]
|
|
2061
|
+
|
|
2062
|
+
def _format_nested_sequence_item(self, sequence: list[Any], indent: int) -> list[tuple[int, str]]:
|
|
2063
|
+
"""Format a nested sequence entry within a list."""
|
|
2064
|
+
child_lines = self._format_sequence_entries(sequence, indent + 1)
|
|
2065
|
+
if child_lines:
|
|
2066
|
+
return self._prepend_sequence_prefix(child_lines, indent)
|
|
2067
|
+
return [(indent, "- []")]
|
|
2068
|
+
|
|
2069
|
+
@staticmethod
|
|
2070
|
+
def _prepend_sequence_prefix(child_lines: list[tuple[int, str]], indent: int) -> list[tuple[int, str]]:
|
|
2071
|
+
"""Attach a sequence bullet to the first child line."""
|
|
2072
|
+
_, first_text = child_lines[0]
|
|
2073
|
+
prefixed: list[tuple[int, str]] = [(indent, f"- {first_text}")]
|
|
2074
|
+
prefixed.extend(child_lines[1:])
|
|
2075
|
+
return prefixed
|
|
2076
|
+
|
|
2077
|
+
def _format_arg_value(self, value: Any) -> str | None:
|
|
2078
|
+
"""Format a single argument value with per-value truncation."""
|
|
2079
|
+
if value is None:
|
|
2080
|
+
return "null"
|
|
2081
|
+
if isinstance(value, (bool, int, float)):
|
|
2082
|
+
return json.dumps(value, ensure_ascii=False)
|
|
2083
|
+
if isinstance(value, str):
|
|
2084
|
+
return self._format_string_arg_value(value)
|
|
2085
|
+
return _truncate_display(str(value), limit=ARGS_VALUE_MAX_LEN)
|
|
2086
|
+
|
|
2087
|
+
@staticmethod
|
|
2088
|
+
def _format_string_arg_value(value: str) -> str:
|
|
2089
|
+
"""Return a trimmed, quoted representation of a string argument."""
|
|
2090
|
+
sanitised = value.replace("\n", " ").strip()
|
|
2091
|
+
sanitised = sanitised.replace('"', '\\"')
|
|
2092
|
+
trimmed = _truncate_display(sanitised, limit=ARGS_VALUE_MAX_LEN)
|
|
2093
|
+
return f'"{trimmed}"'
|
|
2094
|
+
|
|
2095
|
+
@staticmethod
|
|
2096
|
+
def _safe_pretty_args(args: dict[str, Any]) -> str | None:
|
|
2097
|
+
"""Defensively format argument dictionaries."""
|
|
2098
|
+
try:
|
|
2099
|
+
return pretty_args(args, max_len=160)
|
|
2100
|
+
except Exception:
|
|
2101
|
+
return str(args)
|
|
2102
|
+
|
|
2103
|
+
@staticmethod
|
|
2104
|
+
def _stringify_args(args: Any) -> str | None:
|
|
2105
|
+
"""Format non-dictionary argument payloads."""
|
|
2106
|
+
text = str(args).strip()
|
|
2107
|
+
if not text:
|
|
2108
|
+
return None
|
|
2109
|
+
return _truncate_display(text)
|
|
1191
2110
|
|
|
1192
2111
|
def _render_steps_text(self) -> Any:
|
|
1193
2112
|
"""Render the steps panel content."""
|
|
1194
2113
|
if not (self.steps.order or self.steps.children):
|
|
1195
2114
|
return Text("No steps yet", style="dim")
|
|
1196
2115
|
|
|
1197
|
-
running_by_ctx = self._check_parallel_tools()
|
|
1198
2116
|
renderables: list[Any] = []
|
|
1199
|
-
for
|
|
1200
|
-
|
|
1201
|
-
|
|
2117
|
+
for step_id, branch_state in self.steps.iter_tree():
|
|
2118
|
+
step = self.steps.by_id.get(step_id)
|
|
2119
|
+
if not step:
|
|
2120
|
+
continue
|
|
2121
|
+
renderable = self._compose_step_renderable(step, branch_state)
|
|
2122
|
+
if renderable is not None:
|
|
2123
|
+
renderables.append(renderable)
|
|
1202
2124
|
|
|
1203
2125
|
if not renderables:
|
|
1204
2126
|
return Text("No steps yet", style="dim")
|
|
1205
2127
|
|
|
1206
2128
|
return Group(*renderables)
|
|
1207
2129
|
|
|
1208
|
-
def
|
|
1209
|
-
"""Check if a finished panel should be skipped."""
|
|
1210
|
-
if status != "finished":
|
|
1211
|
-
return False
|
|
1212
|
-
|
|
1213
|
-
if getattr(self.cfg, "append_finished_snapshots", False):
|
|
1214
|
-
return True
|
|
1215
|
-
|
|
1216
|
-
return (
|
|
1217
|
-
not self.state.finalizing_ui
|
|
1218
|
-
and sid not in self.stream_processor.current_event_finished_panels
|
|
1219
|
-
)
|
|
1220
|
-
|
|
1221
|
-
def _update_final_duration(
|
|
1222
|
-
self, duration: float | None, *, overwrite: bool = False
|
|
1223
|
-
) -> None:
|
|
2130
|
+
def _update_final_duration(self, duration: float | None, *, overwrite: bool = False) -> None:
|
|
1224
2131
|
"""Store formatted duration for eventual final panels."""
|
|
1225
2132
|
if duration is None:
|
|
1226
2133
|
return
|
|
@@ -1240,20 +2147,7 @@ class RichStreamRenderer:
|
|
|
1240
2147
|
|
|
1241
2148
|
self.state.final_duration_seconds = duration_val
|
|
1242
2149
|
self.state.final_duration_text = self._format_elapsed_time(duration_val)
|
|
1243
|
-
|
|
1244
|
-
def _calculate_elapsed_time(self, meta: dict[str, Any]) -> str:
|
|
1245
|
-
"""Calculate elapsed time string for running tools."""
|
|
1246
|
-
server_elapsed = self.stream_processor.server_elapsed_time
|
|
1247
|
-
server_start = meta.get("server_started_at")
|
|
1248
|
-
|
|
1249
|
-
if isinstance(server_elapsed, int | float) and isinstance(
|
|
1250
|
-
server_start, int | float
|
|
1251
|
-
):
|
|
1252
|
-
elapsed = max(0.0, float(server_elapsed) - float(server_start))
|
|
1253
|
-
else:
|
|
1254
|
-
elapsed = max(0.0, monotonic() - (meta.get("started_at") or 0.0))
|
|
1255
|
-
|
|
1256
|
-
return self._format_elapsed_time(elapsed)
|
|
2150
|
+
self._apply_root_duration(duration_val)
|
|
1257
2151
|
|
|
1258
2152
|
def _format_elapsed_time(self, elapsed: float) -> str:
|
|
1259
2153
|
"""Format elapsed time as a readable string."""
|
|
@@ -1264,110 +2158,10 @@ class RichStreamRenderer:
|
|
|
1264
2158
|
else:
|
|
1265
2159
|
return "<1ms"
|
|
1266
2160
|
|
|
1267
|
-
def _calculate_finished_duration(self, meta: dict[str, Any]) -> str | None:
|
|
1268
|
-
"""Calculate duration string for finished tools."""
|
|
1269
|
-
dur = meta.get("duration_seconds")
|
|
1270
|
-
if isinstance(dur, int | float):
|
|
1271
|
-
return self._format_elapsed_time(dur)
|
|
1272
|
-
|
|
1273
|
-
try:
|
|
1274
|
-
server_now = self.stream_processor.server_elapsed_time
|
|
1275
|
-
server_start = meta.get("server_started_at")
|
|
1276
|
-
if isinstance(server_now, int | float) and isinstance(
|
|
1277
|
-
server_start, int | float
|
|
1278
|
-
):
|
|
1279
|
-
dur = max(0.0, float(server_now) - float(server_start))
|
|
1280
|
-
elif meta.get("started_at") is not None:
|
|
1281
|
-
dur = max(0.0, float(monotonic() - meta.get("started_at")))
|
|
1282
|
-
except Exception:
|
|
1283
|
-
dur = None
|
|
1284
|
-
|
|
1285
|
-
return self._format_elapsed_time(dur) if isinstance(dur, int | float) else None
|
|
1286
|
-
|
|
1287
|
-
def _process_running_tool_panel(
|
|
1288
|
-
self,
|
|
1289
|
-
title: str,
|
|
1290
|
-
meta: dict[str, Any],
|
|
1291
|
-
body: str,
|
|
1292
|
-
*,
|
|
1293
|
-
include_spinner: bool = False,
|
|
1294
|
-
) -> tuple[str, str] | tuple[str, str, str | None]:
|
|
1295
|
-
"""Process a running tool panel."""
|
|
1296
|
-
elapsed_str = self._calculate_elapsed_time(meta)
|
|
1297
|
-
adjusted_title = f"{title} · {elapsed_str}"
|
|
1298
|
-
chip = f"⏱ {elapsed_str}"
|
|
1299
|
-
spinner_message: str | None = None
|
|
1300
|
-
|
|
1301
|
-
if not body.strip():
|
|
1302
|
-
body = ""
|
|
1303
|
-
spinner_message = f"{title} running... {elapsed_str}"
|
|
1304
|
-
else:
|
|
1305
|
-
body = f"{body}\n\n{chip}"
|
|
1306
|
-
|
|
1307
|
-
if include_spinner:
|
|
1308
|
-
return adjusted_title, body, spinner_message
|
|
1309
|
-
return adjusted_title, body
|
|
1310
|
-
|
|
1311
|
-
def _process_finished_tool_panel(self, title: str, meta: dict[str, Any]) -> str:
|
|
1312
|
-
"""Process a finished tool panel."""
|
|
1313
|
-
duration_str = self._calculate_finished_duration(meta)
|
|
1314
|
-
return f"{title} · {duration_str}" if duration_str else title
|
|
1315
|
-
|
|
1316
|
-
def _create_tool_panel_for_session(
|
|
1317
|
-
self, sid: str, meta: dict[str, Any]
|
|
1318
|
-
) -> AIPPanel | None:
|
|
1319
|
-
"""Create a single tool panel for the session."""
|
|
1320
|
-
title = meta.get("title") or "Tool"
|
|
1321
|
-
status = meta.get("status") or "running"
|
|
1322
|
-
chunks = meta.get("chunks") or []
|
|
1323
|
-
is_delegation = bool(meta.get("is_delegation"))
|
|
1324
|
-
|
|
1325
|
-
if self._should_skip_finished_panel(sid, status):
|
|
1326
|
-
return None
|
|
1327
|
-
|
|
1328
|
-
body = "".join(chunks)
|
|
1329
|
-
adjusted_title = title
|
|
1330
|
-
|
|
1331
|
-
spinner_message: str | None = None
|
|
1332
|
-
|
|
1333
|
-
if status == "running":
|
|
1334
|
-
adjusted_title, body, spinner_message = self._process_running_tool_panel(
|
|
1335
|
-
title, meta, body, include_spinner=True
|
|
1336
|
-
)
|
|
1337
|
-
elif status == "finished":
|
|
1338
|
-
adjusted_title = self._process_finished_tool_panel(title, meta)
|
|
1339
|
-
|
|
1340
|
-
return create_tool_panel(
|
|
1341
|
-
title=adjusted_title,
|
|
1342
|
-
content=body,
|
|
1343
|
-
status=status,
|
|
1344
|
-
theme=self.cfg.theme,
|
|
1345
|
-
is_delegation=is_delegation,
|
|
1346
|
-
spinner_message=spinner_message,
|
|
1347
|
-
)
|
|
1348
|
-
|
|
1349
|
-
def _render_tool_panels(self) -> list[AIPPanel]:
|
|
1350
|
-
"""Render tool execution output panels."""
|
|
1351
|
-
if not getattr(self.cfg, "show_delegate_tool_panels", False):
|
|
1352
|
-
return []
|
|
1353
|
-
panels: list[AIPPanel] = []
|
|
1354
|
-
for sid in self.tool_order:
|
|
1355
|
-
meta = self.tool_panels.get(sid) or {}
|
|
1356
|
-
panel = self._create_tool_panel_for_session(sid, meta)
|
|
1357
|
-
if panel:
|
|
1358
|
-
panels.append(panel)
|
|
1359
|
-
|
|
1360
|
-
return panels
|
|
1361
|
-
|
|
1362
2161
|
def _format_dict_or_list_output(self, output_value: dict | list) -> str:
|
|
1363
2162
|
"""Format dict/list output as pretty JSON."""
|
|
1364
2163
|
try:
|
|
1365
|
-
return (
|
|
1366
|
-
self.OUTPUT_PREFIX
|
|
1367
|
-
+ "```json\n"
|
|
1368
|
-
+ json.dumps(output_value, indent=2)
|
|
1369
|
-
+ "\n```\n"
|
|
1370
|
-
)
|
|
2164
|
+
return self.OUTPUT_PREFIX + "```json\n" + json.dumps(output_value, indent=2) + "\n```\n"
|
|
1371
2165
|
except Exception:
|
|
1372
2166
|
return self.OUTPUT_PREFIX + str(output_value) + "\n"
|
|
1373
2167
|
|
|
@@ -1391,12 +2185,7 @@ class RichStreamRenderer:
|
|
|
1391
2185
|
"""Format string that looks like JSON."""
|
|
1392
2186
|
try:
|
|
1393
2187
|
parsed = json.loads(output)
|
|
1394
|
-
return (
|
|
1395
|
-
self.OUTPUT_PREFIX
|
|
1396
|
-
+ "```json\n"
|
|
1397
|
-
+ json.dumps(parsed, indent=2)
|
|
1398
|
-
+ "\n```\n"
|
|
1399
|
-
)
|
|
2188
|
+
return self.OUTPUT_PREFIX + "```json\n" + json.dumps(parsed, indent=2) + "\n```\n"
|
|
1400
2189
|
except Exception:
|
|
1401
2190
|
return self.OUTPUT_PREFIX + output + "\n"
|
|
1402
2191
|
|
|
@@ -1406,9 +2195,7 @@ class RichStreamRenderer:
|
|
|
1406
2195
|
s = self._clean_sub_agent_prefix(s, tool_name)
|
|
1407
2196
|
|
|
1408
2197
|
# If looks like JSON, pretty print it
|
|
1409
|
-
if (s.startswith("{") and s.endswith("}")) or (
|
|
1410
|
-
s.startswith("[") and s.endswith("]")
|
|
1411
|
-
):
|
|
2198
|
+
if (s.startswith("{") and s.endswith("}")) or (s.startswith("[") and s.endswith("]")):
|
|
1412
2199
|
return self._format_json_string_output(s)
|
|
1413
2200
|
|
|
1414
2201
|
return self.OUTPUT_PREFIX + s + "\n"
|
|
@@ -1422,7 +2209,7 @@ class RichStreamRenderer:
|
|
|
1422
2209
|
|
|
1423
2210
|
def _format_output_block(self, output_value: Any, tool_name: str | None) -> str:
|
|
1424
2211
|
"""Format an output value for panel display."""
|
|
1425
|
-
if isinstance(output_value, dict
|
|
2212
|
+
if isinstance(output_value, (dict, list)):
|
|
1426
2213
|
return self._format_dict_or_list_output(output_value)
|
|
1427
2214
|
elif isinstance(output_value, str):
|
|
1428
2215
|
return self._format_string_output(output_value, tool_name)
|