glaip-sdk 0.0.1b10__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. glaip_sdk/__init__.py +2 -2
  2. glaip_sdk/_version.py +51 -0
  3. glaip_sdk/cli/commands/agents.py +201 -109
  4. glaip_sdk/cli/commands/configure.py +29 -87
  5. glaip_sdk/cli/commands/init.py +16 -7
  6. glaip_sdk/cli/commands/mcps.py +73 -153
  7. glaip_sdk/cli/commands/tools.py +185 -49
  8. glaip_sdk/cli/main.py +30 -27
  9. glaip_sdk/cli/utils.py +126 -13
  10. glaip_sdk/client/__init__.py +54 -2
  11. glaip_sdk/client/agents.py +175 -237
  12. glaip_sdk/client/base.py +62 -2
  13. glaip_sdk/client/mcps.py +63 -20
  14. glaip_sdk/client/tools.py +95 -28
  15. glaip_sdk/config/constants.py +10 -3
  16. glaip_sdk/exceptions.py +13 -0
  17. glaip_sdk/models.py +20 -4
  18. glaip_sdk/utils/__init__.py +116 -18
  19. glaip_sdk/utils/client_utils.py +284 -0
  20. glaip_sdk/utils/rendering/__init__.py +1 -0
  21. glaip_sdk/utils/rendering/formatting.py +211 -0
  22. glaip_sdk/utils/rendering/models.py +53 -0
  23. glaip_sdk/utils/rendering/renderer/__init__.py +38 -0
  24. glaip_sdk/utils/rendering/renderer/base.py +827 -0
  25. glaip_sdk/utils/rendering/renderer/config.py +33 -0
  26. glaip_sdk/utils/rendering/renderer/console.py +54 -0
  27. glaip_sdk/utils/rendering/renderer/debug.py +82 -0
  28. glaip_sdk/utils/rendering/renderer/panels.py +123 -0
  29. glaip_sdk/utils/rendering/renderer/progress.py +118 -0
  30. glaip_sdk/utils/rendering/renderer/stream.py +198 -0
  31. glaip_sdk/utils/rendering/steps.py +168 -0
  32. glaip_sdk/utils/run_renderer.py +22 -1086
  33. {glaip_sdk-0.0.1b10.dist-info → glaip_sdk-0.0.3.dist-info}/METADATA +9 -37
  34. glaip_sdk-0.0.3.dist-info/RECORD +40 -0
  35. glaip_sdk/cli/config.py +0 -592
  36. glaip_sdk/utils.py +0 -167
  37. glaip_sdk-0.0.1b10.dist-info/RECORD +0 -28
  38. {glaip_sdk-0.0.1b10.dist-info → glaip_sdk-0.0.3.dist-info}/WHEEL +0 -0
  39. {glaip_sdk-0.0.1b10.dist-info → glaip_sdk-0.0.3.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,827 @@
1
+ """Base renderer class that orchestrates all rendering components.
2
+
3
+ Authors:
4
+ Raymond Christopher (raymond.christopher@gdplabs.id)
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ import logging
11
+ from dataclasses import dataclass
12
+ from time import monotonic
13
+ from typing import Any
14
+
15
+ from rich.console import Console as RichConsole
16
+ from rich.console import Group
17
+ from rich.live import Live
18
+ from rich.markdown import Markdown
19
+ from rich.panel import Panel
20
+ from rich.text import Text
21
+
22
+ from glaip_sdk.utils.rendering.formatting import (
23
+ format_main_title,
24
+ get_spinner_char,
25
+ is_step_finished,
26
+ )
27
+ from glaip_sdk.utils.rendering.models import RunStats
28
+ from glaip_sdk.utils.rendering.renderer.config import RendererConfig
29
+ from glaip_sdk.utils.rendering.renderer.panels import (
30
+ create_final_panel,
31
+ create_main_panel,
32
+ create_tool_panel,
33
+ )
34
+ from glaip_sdk.utils.rendering.renderer.progress import (
35
+ format_elapsed_time,
36
+ format_tool_title,
37
+ format_working_indicator,
38
+ get_spinner,
39
+ is_delegation_tool,
40
+ )
41
+ from glaip_sdk.utils.rendering.renderer.stream import StreamProcessor
42
+ from glaip_sdk.utils.rendering.steps import StepManager
43
+
44
+ from .debug import render_debug_event
45
+
46
+ # Configure logger
47
+ logger = logging.getLogger("glaip_sdk.run_renderer")
48
+
49
+
50
+ @dataclass
51
+ class RendererState:
52
+ """Internal state for the renderer."""
53
+
54
+ buffer: list[str] = None
55
+ final_text: str = ""
56
+ streaming_started_at: float | None = None
57
+ printed_final_panel: bool = False
58
+ finalizing_ui: bool = False
59
+
60
+ def __post_init__(self):
61
+ if self.buffer is None:
62
+ self.buffer = []
63
+
64
+
65
+ class RichStreamRenderer:
66
+ """Live, modern terminal renderer for agent execution with rich visual output."""
67
+
68
+ def __init__(
69
+ self,
70
+ console=None,
71
+ *,
72
+ cfg: RendererConfig | None = None,
73
+ verbose: bool = False,
74
+ ):
75
+ """Initialize the renderer.
76
+
77
+ Args:
78
+ console: Rich console instance
79
+ cfg: Renderer configuration
80
+ verbose: Whether to enable verbose mode
81
+ """
82
+ self.console = console or RichConsole()
83
+ self.cfg = cfg or RendererConfig()
84
+ self.verbose = verbose
85
+
86
+ # Initialize components
87
+ self.stream_processor = StreamProcessor()
88
+ self.state = RendererState()
89
+
90
+ # Initialize step manager and other state
91
+ self.steps = StepManager()
92
+ # Live display instance (single source of truth)
93
+ self.live: Live | None = None
94
+
95
+ # Context and tool tracking
96
+ self.context_order: list[str] = []
97
+ self.context_parent: dict[str, str] = {}
98
+ self.tool_order: list[str] = []
99
+ self.context_panels: dict[str, list[str]] = {}
100
+ self.context_meta: dict[str, dict[str, Any]] = {}
101
+ self.tool_panels: dict[str, dict[str, Any]] = {}
102
+
103
+ # Timing
104
+ self._started_at: float | None = None
105
+
106
+ # Header/text
107
+ self.header_text: str = ""
108
+ # Track per-step server start times for accurate elapsed labels
109
+ self._step_server_start_times: dict[str, float] = {}
110
+
111
+ def on_start(self, meta: dict[str, Any]):
112
+ """Handle renderer start event."""
113
+ if self.cfg.live:
114
+ # Defer creating Live to _ensure_live so tests and prod both work
115
+ pass
116
+
117
+ # Set up initial state
118
+ self._started_at = monotonic()
119
+ self.stream_processor.streaming_started_at = self._started_at
120
+
121
+ # Print compact header and user request (parity with old renderer)
122
+ try:
123
+ parts: list[str] = ["🤖"]
124
+ agent_name = meta.get("agent_name", "agent")
125
+ if agent_name:
126
+ parts.append(agent_name)
127
+ model = meta.get("model", "")
128
+ if model:
129
+ parts.extend(["•", model])
130
+ run_id = meta.get("run_id", "")
131
+ if run_id:
132
+ parts.extend(["•", run_id])
133
+ self.header_text = " ".join(parts)
134
+ if self.header_text:
135
+ try:
136
+ # Use a rule-like header for readability
137
+ self.console.rule(self.header_text)
138
+ except Exception:
139
+ self.console.print(self.header_text)
140
+
141
+ query = (
142
+ meta.get("input_message") or meta.get("query") or meta.get("message")
143
+ )
144
+ if query:
145
+ self.console.print(
146
+ Panel(
147
+ Markdown(f"**Query:** {query}"),
148
+ title="User Request",
149
+ border_style="yellow",
150
+ padding=(0, 1),
151
+ )
152
+ )
153
+ except Exception:
154
+ # Non-fatal: header is nice-to-have
155
+ pass
156
+
157
+ def on_event(self, ev: dict[str, Any]) -> None:
158
+ """Handle streaming events from the backend."""
159
+ # Reset event tracking
160
+ self.stream_processor.reset_event_tracking()
161
+
162
+ # Track streaming start time
163
+ if self.state.streaming_started_at is None:
164
+ self.state.streaming_started_at = monotonic()
165
+
166
+ # Extract event metadata
167
+ metadata = self.stream_processor.extract_event_metadata(ev)
168
+ kind = metadata["kind"]
169
+ context_id = metadata["context_id"]
170
+ content = metadata["content"]
171
+
172
+ # Render debug event panel if verbose mode is enabled
173
+ if self.verbose:
174
+ render_debug_event(ev, self.console, self.state.streaming_started_at)
175
+
176
+ # Update timing
177
+ self.stream_processor.update_timing(context_id)
178
+
179
+ # Handle different event types
180
+ if kind == "status":
181
+ # Status events
182
+ status = ev.get("status")
183
+ if status == "streaming_started":
184
+ self.state.streaming_started_at = monotonic()
185
+ return
186
+
187
+ elif kind == "content":
188
+ # Content streaming events
189
+ if content:
190
+ self.state.buffer.append(content)
191
+ self._ensure_live()
192
+ return
193
+
194
+ elif kind == "final_response":
195
+ # Final response events
196
+ if content:
197
+ self.state.buffer.append(content)
198
+ self.state.final_text = content
199
+ self._ensure_live()
200
+
201
+ # In verbose mode, show the final result in a panel
202
+ if self.verbose and content and content.strip():
203
+ final_panel = create_final_panel(content, theme=self.cfg.theme)
204
+ self.console.print(final_panel)
205
+ self.state.printed_final_panel = True
206
+ return
207
+
208
+ elif kind in {"agent_step", "agent_thinking_step"}:
209
+ # Agent step events
210
+ # Note: Thinking gaps are primarily a visual aid. Keep minimal here.
211
+
212
+ # Extract tool information
213
+ tool_name, tool_args, tool_out, tool_calls_info = (
214
+ self.stream_processor.parse_tool_calls(ev)
215
+ )
216
+
217
+ # Track tools and sub-agents
218
+ self.stream_processor.track_tools_and_agents(
219
+ tool_name, tool_calls_info, is_delegation_tool
220
+ )
221
+
222
+ # Handle tool execution
223
+ self._handle_agent_step(ev, tool_name, tool_args, tool_out, tool_calls_info)
224
+
225
+ # Update live display
226
+ self._ensure_live()
227
+
228
+ def on_complete(self, stats: RunStats):
229
+ """Handle completion event."""
230
+ self.state.finalizing_ui = True
231
+
232
+ # Mark any running steps as finished to avoid lingering spinners
233
+ try:
234
+ for st in list(self.steps.by_id.values()):
235
+ if not is_step_finished(st):
236
+ st.finish(None)
237
+ except Exception:
238
+ pass
239
+
240
+ # Mark unfinished tool panels as finished
241
+ try:
242
+ for _sid, meta in list(self.tool_panels.items()):
243
+ if meta.get("status") != "finished":
244
+ meta["status"] = "finished"
245
+ except Exception:
246
+ pass
247
+
248
+ # Final refresh
249
+ self._ensure_live()
250
+
251
+ # Stop live display
252
+ if self.live:
253
+ self.live.stop()
254
+ self.live = None
255
+
256
+ def _ensure_live(self):
257
+ """Ensure live display is updated."""
258
+ # Lazily create Live if needed
259
+ if self.live is None and self.cfg.live:
260
+ try:
261
+ self.live = Live(
262
+ console=self.console,
263
+ refresh_per_second=1 / self.cfg.refresh_debounce,
264
+ transient=not self.cfg.persist_live,
265
+ )
266
+ self.live.start()
267
+ except Exception:
268
+ self.live = None
269
+
270
+ if self.live:
271
+ panels = [self._render_main_panel()]
272
+ steps_renderable = self._render_steps_text()
273
+ panels.append(Panel(steps_renderable, title="Steps", border_style="blue"))
274
+ panels.extend(self._render_tool_panels())
275
+ self.live.update(Group(*panels))
276
+
277
+ def _render_main_panel(self):
278
+ """Render the main content panel."""
279
+ body = "".join(self.state.buffer).strip()
280
+ # Dynamic title with spinner + elapsed/hints
281
+ title = self._format_enhanced_main_title()
282
+ return create_main_panel(body, title, self.cfg.theme)
283
+
284
+ def _maybe_insert_thinking_gap(self, task_id: str | None, context_id: str | None):
285
+ """Insert thinking gap if needed."""
286
+ # Implementation would track thinking states
287
+ pass
288
+
289
+ def _handle_agent_step(
290
+ self,
291
+ event: dict[str, Any],
292
+ tool_name: str | None,
293
+ tool_args: Any,
294
+ tool_out: Any,
295
+ tool_calls_info: list,
296
+ ):
297
+ """Handle agent step event."""
298
+ metadata = event.get("metadata", {})
299
+ task_id = event.get("task_id")
300
+ context_id = event.get("context_id")
301
+ content = event.get("content", "")
302
+
303
+ def ensure_tool_panel(name: str, args: Any) -> str:
304
+ formatted_title = format_tool_title(name)
305
+ is_delegation = is_delegation_tool(name)
306
+ tool_sid = f"tool_{name}_{task_id}_{context_id}"
307
+ if tool_sid not in self.tool_panels:
308
+ self.tool_panels[tool_sid] = {
309
+ "title": formatted_title,
310
+ "status": "running",
311
+ "started_at": monotonic(),
312
+ "server_started_at": self.stream_processor.server_elapsed_time,
313
+ "chunks": [],
314
+ "args": args or {},
315
+ "output": None,
316
+ "is_delegation": is_delegation,
317
+ }
318
+ # Add Args section once
319
+ if args:
320
+ try:
321
+ args_content = (
322
+ "**Args:**\n```json\n"
323
+ + json.dumps(args, indent=2)
324
+ + "\n```\n\n"
325
+ )
326
+ except Exception:
327
+ args_content = f"**Args:**\n{args}\n\n"
328
+ self.tool_panels[tool_sid]["chunks"].append(args_content)
329
+ self.tool_order.append(tool_sid)
330
+ return tool_sid
331
+
332
+ # Create steps and panels for the primary tool
333
+ if tool_name:
334
+ tool_sid = ensure_tool_panel(tool_name, tool_args)
335
+ # Start or get a step for this tool
336
+ if is_delegation_tool(tool_name):
337
+ st = self.steps.start_or_get(
338
+ task_id=task_id,
339
+ context_id=context_id,
340
+ kind="delegate",
341
+ name=tool_name,
342
+ args=tool_args,
343
+ )
344
+ else:
345
+ st = self.steps.start_or_get(
346
+ task_id=task_id,
347
+ context_id=context_id,
348
+ kind="tool",
349
+ name=tool_name,
350
+ args=tool_args,
351
+ )
352
+ # Record server start time for this step if available
353
+ if st and self.stream_processor.server_elapsed_time is not None:
354
+ self._step_server_start_times[st.step_id] = (
355
+ self.stream_processor.server_elapsed_time
356
+ )
357
+
358
+ # Handle additional tool calls (avoid duplicates)
359
+ for call_name, call_args, _ in tool_calls_info or []:
360
+ if call_name and call_name != tool_name:
361
+ ensure_tool_panel(call_name, call_args)
362
+ if is_delegation_tool(call_name):
363
+ st2 = self.steps.start_or_get(
364
+ task_id=task_id,
365
+ context_id=context_id,
366
+ kind="delegate",
367
+ name=call_name,
368
+ args=call_args,
369
+ )
370
+ else:
371
+ st2 = self.steps.start_or_get(
372
+ task_id=task_id,
373
+ context_id=context_id,
374
+ kind="tool",
375
+ name=call_name,
376
+ args=call_args,
377
+ )
378
+ if self.stream_processor.server_elapsed_time is not None and st2:
379
+ self._step_server_start_times[st2.step_id] = (
380
+ self.stream_processor.server_elapsed_time
381
+ )
382
+
383
+ # Check completion status hints
384
+ tool_info = metadata.get("tool_info", {}) if isinstance(metadata, dict) else {}
385
+ is_tool_finished = False
386
+ finished_tool_name: str | None = None
387
+ finished_tool_output: Any = None
388
+
389
+ if tool_info.get("status") == "finished" and tool_info.get("name"):
390
+ is_tool_finished = True
391
+ finished_tool_name = tool_info.get("name")
392
+ finished_tool_output = tool_info.get("output")
393
+ elif content and isinstance(content, str) and content.startswith("Completed "):
394
+ # content like "Completed google_serper"
395
+ tname = content.replace("Completed ", "").strip()
396
+ if tname:
397
+ is_tool_finished = True
398
+ finished_tool_name = tname
399
+ if tool_info.get("name") == tname:
400
+ finished_tool_output = tool_info.get("output")
401
+ elif metadata.get("status") == "finished" and tool_info.get("name"):
402
+ is_tool_finished = True
403
+ finished_tool_name = tool_info.get("name")
404
+ finished_tool_output = tool_info.get("output")
405
+
406
+ if is_tool_finished and finished_tool_name:
407
+ # Update panel
408
+ tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
409
+ if tool_sid in self.tool_panels:
410
+ meta = self.tool_panels[tool_sid]
411
+ prev_status = meta.get("status")
412
+ meta["status"] = "finished"
413
+ # Compute and store duration for finished panel
414
+ if prev_status != "finished":
415
+ try:
416
+ server_now = self.stream_processor.server_elapsed_time
417
+ server_start = meta.get("server_started_at")
418
+ dur = None
419
+ if isinstance(server_now, int | float) and isinstance(
420
+ server_start, int | float
421
+ ):
422
+ dur = max(0.0, float(server_now) - float(server_start))
423
+ elif meta.get("started_at") is not None:
424
+ dur = max(0.0, float(monotonic() - meta.get("started_at")))
425
+ if dur is not None:
426
+ meta["duration_seconds"] = dur
427
+ meta["server_finished_at"] = (
428
+ server_now
429
+ if isinstance(server_now, int | float)
430
+ else None
431
+ )
432
+ meta["finished_at"] = monotonic()
433
+ except Exception:
434
+ pass
435
+
436
+ if finished_tool_output is not None:
437
+ meta["chunks"].append(
438
+ self._format_output_block(
439
+ finished_tool_output, finished_tool_name
440
+ )
441
+ )
442
+ meta["output"] = finished_tool_output
443
+ # Ensure this finished panel is visible in this frame
444
+ self.stream_processor.current_event_finished_panels.add(tool_sid)
445
+
446
+ # Finish corresponding step, pass duration to match panel title
447
+ step_duration = None
448
+ try:
449
+ step_duration = self.tool_panels.get(tool_sid, {}).get(
450
+ "duration_seconds"
451
+ )
452
+ except Exception:
453
+ step_duration = None
454
+ if is_delegation_tool(finished_tool_name):
455
+ self.steps.finish(
456
+ task_id=task_id,
457
+ context_id=context_id,
458
+ kind="delegate",
459
+ name=finished_tool_name,
460
+ output=finished_tool_output,
461
+ duration_raw=step_duration,
462
+ )
463
+ else:
464
+ self.steps.finish(
465
+ task_id=task_id,
466
+ context_id=context_id,
467
+ kind="tool",
468
+ name=finished_tool_name,
469
+ output=finished_tool_output,
470
+ duration_raw=step_duration,
471
+ )
472
+
473
+ # Append a truncated snapshot to scrollback so user can freely scroll
474
+ try:
475
+ if self.cfg.append_finished_snapshots and not self.tool_panels.get(
476
+ tool_sid, {}
477
+ ).get("snapshot_printed"):
478
+ # Build title with elapsed if available
479
+ adjusted_title = meta.get("title") or finished_tool_name
480
+ dur = meta.get("duration_seconds")
481
+ if isinstance(dur, int | float):
482
+ elapsed_str = (
483
+ f"{dur:.2f}s"
484
+ if dur >= 1
485
+ else (
486
+ f"{int(dur * 1000)}ms"
487
+ if int(dur * 1000) > 0
488
+ else "<1ms"
489
+ )
490
+ )
491
+ adjusted_title = f"{adjusted_title} · {elapsed_str}"
492
+
493
+ # Compose body from chunks and clamp
494
+ body_text = "".join(meta.get("chunks") or [])
495
+ # Clamp by lines then by chars
496
+ max_lines = int(self.cfg.snapshot_max_lines or 0) or 60
497
+ lines = body_text.splitlines()
498
+ if len(lines) > max_lines:
499
+ lines = lines[:max_lines] + ["… (truncated)"]
500
+ body_text = "\n".join(lines)
501
+ max_chars = int(self.cfg.snapshot_max_chars or 0) or 4000
502
+ if len(body_text) > max_chars:
503
+ body_text = body_text[: max_chars - 12] + "\n… (truncated)"
504
+
505
+ snapshot_panel = create_tool_panel(
506
+ title=adjusted_title,
507
+ content=body_text or "(no output)",
508
+ status="finished",
509
+ theme=self.cfg.theme,
510
+ is_delegation=is_delegation_tool(finished_tool_name),
511
+ )
512
+ # Print as a snapshot entry; when Live is active this prints above the live area
513
+ self.console.print(snapshot_panel)
514
+ # Guard so we don't print snapshot twice for repeated finish events
515
+ self.tool_panels[tool_sid]["snapshot_printed"] = True
516
+ except Exception:
517
+ pass
518
+
519
+ def _spinner(self) -> str:
520
+ """Return spinner character."""
521
+ return get_spinner()
522
+
523
+ def _format_working_indicator(self, started_at: float | None) -> str:
524
+ """Format working indicator."""
525
+ return format_working_indicator(
526
+ started_at,
527
+ self.stream_processor.server_elapsed_time,
528
+ self.state.streaming_started_at,
529
+ )
530
+
531
+ def close(self) -> None:
532
+ """Gracefully stop any live rendering and release resources."""
533
+ try:
534
+ if self.live:
535
+ try:
536
+ self.live.stop()
537
+ finally:
538
+ self.live = None
539
+ except Exception:
540
+ pass
541
+
542
+ def __del__(self):
543
+ try:
544
+ if self.live:
545
+ self.live.stop()
546
+ except Exception:
547
+ pass
548
+
549
+ def _get_analysis_progress_info(self) -> dict[str, Any]:
550
+ total_steps = len(self.steps.order)
551
+ completed_steps = sum(
552
+ 1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid])
553
+ )
554
+ current_step = None
555
+ for sid in self.steps.order:
556
+ if not is_step_finished(self.steps.by_id[sid]):
557
+ current_step = sid
558
+ break
559
+ # Prefer server elapsed time when available
560
+ elapsed = 0.0
561
+ if isinstance(self.stream_processor.server_elapsed_time, int | float):
562
+ try:
563
+ elapsed = float(self.stream_processor.server_elapsed_time)
564
+ except Exception:
565
+ elapsed = 0.0
566
+ elif self._started_at is not None:
567
+ try:
568
+ elapsed = monotonic() - self._started_at
569
+ except Exception:
570
+ elapsed = 0.0
571
+ progress_percent = (
572
+ int((completed_steps / total_steps) * 100) if total_steps else 0
573
+ )
574
+ return {
575
+ "total_steps": total_steps,
576
+ "completed_steps": completed_steps,
577
+ "current_step": current_step,
578
+ "progress_percent": progress_percent,
579
+ "elapsed_time": elapsed,
580
+ "has_running_steps": self._has_running_steps(),
581
+ }
582
+
583
+ def _format_enhanced_main_title(self) -> str:
584
+ base = format_main_title(
585
+ header_text=self.header_text,
586
+ has_running_steps=self._has_running_steps(),
587
+ get_spinner_char=get_spinner_char,
588
+ )
589
+ # Add elapsed time and subtle progress hints for long operations
590
+ info = self._get_analysis_progress_info()
591
+ elapsed = info.get("elapsed_time", 0.0)
592
+ if elapsed and elapsed > 0:
593
+ base += f" · {format_elapsed_time(elapsed)}"
594
+ if info.get("total_steps", 0) > 1 and info.get("has_running_steps"):
595
+ if elapsed > 60:
596
+ base += " 🐌"
597
+ elif elapsed > 30:
598
+ base += " ⚠️"
599
+ return base
600
+
601
+ # Modern interface only — no legacy helper shims below
602
+
603
+ def _refresh(self, force: bool | None = None) -> None:
604
+ # In the modular renderer, refreshing simply updates the live group
605
+ self._ensure_live()
606
+
607
+ def _has_running_steps(self) -> bool:
608
+ """Check if any steps are still running."""
609
+ for _sid, st in self.steps.by_id.items():
610
+ if not is_step_finished(st):
611
+ return True
612
+ return False
613
+
614
+ def _render_steps_text(self) -> Text:
615
+ """Render the steps panel content."""
616
+ if not (self.steps.order or self.steps.children):
617
+ return Text("No steps yet", style="dim")
618
+
619
+ # Track running tools by task/context to annotate parallelism
620
+ running_by_ctx: dict[tuple[str | None, str | None], list] = {}
621
+ for sid in self.steps.order:
622
+ st = self.steps.by_id[sid]
623
+ if st.kind == "tool" and not is_step_finished(st):
624
+ key = (st.task_id, st.context_id)
625
+ running_by_ctx.setdefault(key, []).append(st)
626
+
627
+ lines: list[str] = []
628
+ for sid in self.steps.order:
629
+ st = self.steps.by_id[sid]
630
+ # Determine elapsed/status label
631
+ if is_step_finished(st):
632
+ if st.duration_ms is None:
633
+ status_br = "[<1ms]"
634
+ elif st.duration_ms >= 1000:
635
+ status_br = f"[{st.duration_ms/1000:.2f}s]"
636
+ elif st.duration_ms > 0:
637
+ status_br = f"[{st.duration_ms}ms]"
638
+ else:
639
+ status_br = "[<1ms]"
640
+ else:
641
+ # Prefer server timing when we have a server start timestamp
642
+ server_elapsed = self.stream_processor.server_elapsed_time
643
+ server_start = self._step_server_start_times.get(st.step_id)
644
+ if isinstance(server_elapsed, int | float) and isinstance(
645
+ server_start, int | float
646
+ ):
647
+ elapsed = max(0.0, float(server_elapsed) - float(server_start))
648
+ else:
649
+ try:
650
+ elapsed = max(0.0, float(monotonic() - st.started_at))
651
+ except Exception:
652
+ elapsed = 0.0
653
+ # Standardized elapsed label without "Working..."
654
+ if elapsed >= 1:
655
+ status_br = f"[{elapsed:.2f}s]"
656
+ else:
657
+ ms = int(elapsed * 1000)
658
+ status_br = f"[{ms}ms]" if ms > 0 else "[<1ms]"
659
+
660
+ display_name = (
661
+ st.name
662
+ if st.name and st.name != "step"
663
+ else ("thinking..." if st.kind == "agent" else f"{st.kind} step")
664
+ )
665
+ tail = " ✓" if is_step_finished(st) else ""
666
+
667
+ # Parallel indicator for running tools
668
+ parallel_indicator = ""
669
+ if st.kind == "tool" and not is_step_finished(st):
670
+ key = (st.task_id, st.context_id)
671
+ if len(running_by_ctx.get(key, [])) > 1:
672
+ parallel_indicator = " 🔄"
673
+ status_br = status_br.replace("]", f"{parallel_indicator}]")
674
+
675
+ # Icon prefix (simple mapping)
676
+ if st.kind == "tool":
677
+ icon = "⚙️"
678
+ elif st.kind == "delegate":
679
+ icon = "🤝"
680
+ elif st.kind == "agent":
681
+ icon = "🧠"
682
+ else:
683
+ icon = ""
684
+
685
+ lines.append(f"{icon} {display_name} {status_br}{tail}")
686
+
687
+ return Text("\n".join(lines), style="dim")
688
+
689
+ def _render_tool_panels(self) -> list[Panel]:
690
+ """Render tool execution output panels."""
691
+ panels: list[Panel] = []
692
+ for sid in self.tool_order:
693
+ meta = self.tool_panels.get(sid) or {}
694
+ title = meta.get("title") or "Tool"
695
+ status = meta.get("status") or "running"
696
+ chunks = meta.get("chunks") or []
697
+ is_delegation = bool(meta.get("is_delegation"))
698
+
699
+ # Finished panels visibility rules
700
+ if status == "finished":
701
+ if getattr(self.cfg, "append_finished_snapshots", False):
702
+ # When snapshots are enabled, don't also render finished panels in the live area
703
+ # (prevents duplicates both mid-run and at the end)
704
+ continue
705
+ if (
706
+ not self.state.finalizing_ui
707
+ and sid not in self.stream_processor.current_event_finished_panels
708
+ ):
709
+ continue
710
+
711
+ body = "".join(chunks)
712
+ adjusted_title = title
713
+ if status == "running":
714
+ # Prefer server-based elapsed from when this tool panel started
715
+ server_elapsed = self.stream_processor.server_elapsed_time
716
+ server_start = meta.get("server_started_at")
717
+ if isinstance(server_elapsed, int | float) and isinstance(
718
+ server_start, int | float
719
+ ):
720
+ elapsed = max(0.0, float(server_elapsed) - float(server_start))
721
+ else:
722
+ try:
723
+ elapsed = max(
724
+ 0.0, monotonic() - (meta.get("started_at") or 0.0)
725
+ )
726
+ except Exception:
727
+ elapsed = 0.0
728
+ elapsed_str = (
729
+ f"{elapsed:.2f}s"
730
+ if elapsed >= 1
731
+ else (
732
+ f"{int(elapsed * 1000)}ms"
733
+ if int(elapsed * 1000) > 0
734
+ else "<1ms"
735
+ )
736
+ )
737
+ # Add a small elapsed hint to the title and panel body (standardized)
738
+ adjusted_title = f"{title} · {elapsed_str}"
739
+ chip = f"⏱ {elapsed_str}"
740
+ if not body:
741
+ body = chip
742
+ else:
743
+ body = f"{body}\n\n{chip}"
744
+ elif status == "finished":
745
+ # Use stored duration if present; otherwise try to compute once more
746
+ dur = meta.get("duration_seconds")
747
+ if not isinstance(dur, int | float):
748
+ try:
749
+ server_now = self.stream_processor.server_elapsed_time
750
+ server_start = meta.get("server_started_at")
751
+ if isinstance(server_now, int | float) and isinstance(
752
+ server_start, int | float
753
+ ):
754
+ dur = max(0.0, float(server_now) - float(server_start))
755
+ elif meta.get("started_at") is not None:
756
+ dur = max(0.0, float(monotonic() - meta.get("started_at")))
757
+ except Exception:
758
+ dur = None
759
+ if isinstance(dur, int | float):
760
+ elapsed_str = (
761
+ f"{dur:.2f}s"
762
+ if dur >= 1
763
+ else (f"{int(dur * 1000)}ms" if int(dur * 1000) > 0 else "<1ms")
764
+ )
765
+ adjusted_title = f"{title} · {elapsed_str}"
766
+
767
+ panels.append(
768
+ create_tool_panel(
769
+ title=adjusted_title,
770
+ content=body or "Processing...",
771
+ status=status,
772
+ theme=self.cfg.theme,
773
+ is_delegation=is_delegation,
774
+ )
775
+ )
776
+
777
+ return panels
778
+
779
+ def _format_output_block(self, output_value: Any, tool_name: str | None) -> str:
780
+ """Format an output value for panel display."""
781
+ # If dict/list -> pretty JSON
782
+ if isinstance(output_value, dict | list):
783
+ try:
784
+ return (
785
+ "**Output:**\n```json\n"
786
+ + json.dumps(output_value, indent=2)
787
+ + "\n```\n"
788
+ )
789
+ except Exception:
790
+ pass
791
+
792
+ if isinstance(output_value, str):
793
+ s = output_value.strip()
794
+ # Clean sub-agent name prefix like "[research_compiler_agent_testing] "
795
+ try:
796
+ if tool_name and is_delegation_tool(tool_name):
797
+ sub = tool_name
798
+ if tool_name.startswith("delegate_to_"):
799
+ sub = tool_name.replace("delegate_to_", "")
800
+ elif tool_name.startswith("delegate_"):
801
+ sub = tool_name.replace("delegate_", "")
802
+ prefix = f"[{sub}]"
803
+ if s.startswith(prefix):
804
+ s = s[len(prefix) :].lstrip()
805
+ except Exception:
806
+ pass
807
+ # If looks like JSON, pretty print it
808
+ if (s.startswith("{") and s.endswith("}")) or (
809
+ s.startswith("[") and s.endswith("]")
810
+ ):
811
+ try:
812
+ parsed = json.loads(s)
813
+ return (
814
+ "**Output:**\n```json\n"
815
+ + json.dumps(parsed, indent=2)
816
+ + "\n```\n"
817
+ )
818
+ except Exception:
819
+ pass
820
+ return "**Output:**\n" + s + "\n"
821
+
822
+ try:
823
+ return "**Output:**\n" + json.dumps(output_value, indent=2) + "\n"
824
+ except Exception:
825
+ return "**Output:**\n" + str(output_value) + "\n"
826
+
827
+ # No legacy surface helpers are exposed; use modern interfaces only