glaip-sdk 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/_version.py +1 -3
- glaip_sdk/branding.py +2 -6
- glaip_sdk/cli/agent_config.py +2 -6
- glaip_sdk/cli/auth.py +11 -30
- glaip_sdk/cli/commands/agents.py +45 -107
- glaip_sdk/cli/commands/configure.py +12 -36
- glaip_sdk/cli/commands/mcps.py +26 -63
- glaip_sdk/cli/commands/models.py +2 -4
- glaip_sdk/cli/commands/tools.py +22 -35
- glaip_sdk/cli/commands/update.py +3 -8
- glaip_sdk/cli/config.py +1 -3
- glaip_sdk/cli/display.py +4 -12
- glaip_sdk/cli/io.py +8 -14
- glaip_sdk/cli/main.py +10 -30
- glaip_sdk/cli/mcp_validators.py +5 -15
- glaip_sdk/cli/pager.py +3 -9
- glaip_sdk/cli/parsers/json_input.py +11 -22
- glaip_sdk/cli/resolution.py +3 -9
- glaip_sdk/cli/rich_helpers.py +1 -3
- glaip_sdk/cli/slash/agent_session.py +5 -10
- glaip_sdk/cli/slash/prompt.py +3 -10
- glaip_sdk/cli/slash/session.py +46 -95
- glaip_sdk/cli/transcript/cache.py +6 -19
- glaip_sdk/cli/transcript/capture.py +6 -20
- glaip_sdk/cli/transcript/launcher.py +1 -3
- glaip_sdk/cli/transcript/viewer.py +11 -40
- glaip_sdk/cli/update_notifier.py +165 -21
- glaip_sdk/cli/utils.py +33 -84
- glaip_sdk/cli/validators.py +11 -12
- glaip_sdk/client/_agent_payloads.py +10 -30
- glaip_sdk/client/agents.py +33 -63
- glaip_sdk/client/base.py +77 -35
- glaip_sdk/client/mcps.py +1 -3
- glaip_sdk/client/run_rendering.py +6 -14
- glaip_sdk/client/tools.py +8 -24
- glaip_sdk/client/validators.py +20 -48
- glaip_sdk/exceptions.py +1 -3
- glaip_sdk/models.py +14 -33
- glaip_sdk/payload_schemas/agent.py +1 -3
- glaip_sdk/utils/agent_config.py +4 -14
- glaip_sdk/utils/client_utils.py +7 -21
- glaip_sdk/utils/display.py +2 -6
- glaip_sdk/utils/general.py +1 -3
- glaip_sdk/utils/import_export.py +3 -9
- glaip_sdk/utils/rendering/formatting.py +2 -5
- glaip_sdk/utils/rendering/models.py +2 -6
- glaip_sdk/utils/rendering/renderer/__init__.py +1 -3
- glaip_sdk/utils/rendering/renderer/base.py +63 -189
- glaip_sdk/utils/rendering/renderer/debug.py +4 -14
- glaip_sdk/utils/rendering/renderer/panels.py +1 -3
- glaip_sdk/utils/rendering/renderer/progress.py +3 -11
- glaip_sdk/utils/rendering/renderer/stream.py +7 -19
- glaip_sdk/utils/rendering/renderer/toggle.py +1 -3
- glaip_sdk/utils/rendering/step_tree_state.py +1 -3
- glaip_sdk/utils/rendering/steps.py +29 -83
- glaip_sdk/utils/resource_refs.py +4 -13
- glaip_sdk/utils/serialization.py +14 -46
- glaip_sdk/utils/validation.py +4 -4
- {glaip_sdk-0.1.0.dist-info → glaip_sdk-0.1.2.dist-info}/METADATA +1 -1
- glaip_sdk-0.1.2.dist-info/RECORD +82 -0
- glaip_sdk-0.1.0.dist-info/RECORD +0 -82
- {glaip_sdk-0.1.0.dist-info → glaip_sdk-0.1.2.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.1.0.dist-info → glaip_sdk-0.1.2.dist-info}/entry_points.txt +0 -0
|
@@ -219,12 +219,8 @@ class RichStreamRenderer:
|
|
|
219
219
|
self._transcript_mode_enabled: bool = False
|
|
220
220
|
self._transcript_render_cursor: int = 0
|
|
221
221
|
self.transcript_controller: Any | None = None
|
|
222
|
-
self._transcript_hint_message =
|
|
223
|
-
|
|
224
|
-
)
|
|
225
|
-
self._summary_hint_message = (
|
|
226
|
-
"[dim]Press Ctrl+T to inspect raw transcript events.[/dim]"
|
|
227
|
-
)
|
|
222
|
+
self._transcript_hint_message = "[dim]Transcript view · Press Ctrl+T to return to the summary.[/dim]"
|
|
223
|
+
self._summary_hint_message = "[dim]Press Ctrl+T to inspect raw transcript events.[/dim]"
|
|
228
224
|
self._summary_hint_printed_once: bool = False
|
|
229
225
|
self._transcript_hint_printed_once: bool = False
|
|
230
226
|
self._transcript_header_printed: bool = False
|
|
@@ -245,9 +241,7 @@ class RichStreamRenderer:
|
|
|
245
241
|
|
|
246
242
|
meta_payload = meta or {}
|
|
247
243
|
self.steps.set_root_agent(meta_payload.get("agent_id"))
|
|
248
|
-
self._root_agent_friendly = self._humanize_agent_slug(
|
|
249
|
-
meta_payload.get("agent_name")
|
|
250
|
-
)
|
|
244
|
+
self._root_agent_friendly = self._humanize_agent_slug(meta_payload.get("agent_name"))
|
|
251
245
|
self._root_query = _truncate_display(
|
|
252
246
|
meta_payload.get("input_message")
|
|
253
247
|
or meta_payload.get("query")
|
|
@@ -383,9 +377,7 @@ class RichStreamRenderer:
|
|
|
383
377
|
|
|
384
378
|
return received_at
|
|
385
379
|
|
|
386
|
-
def _sync_stream_start(
|
|
387
|
-
self, ev: dict[str, Any], received_at: datetime | None
|
|
388
|
-
) -> None:
|
|
380
|
+
def _sync_stream_start(self, ev: dict[str, Any], received_at: datetime | None) -> None:
|
|
389
381
|
"""Ensure renderer and stream processor share a streaming baseline."""
|
|
390
382
|
baseline = self.state.streaming_started_at
|
|
391
383
|
if baseline is None:
|
|
@@ -442,9 +434,7 @@ class RichStreamRenderer:
|
|
|
442
434
|
self.state.buffer.append(content)
|
|
443
435
|
self._ensure_live()
|
|
444
436
|
|
|
445
|
-
def _handle_final_response_event(
|
|
446
|
-
self, content: str, metadata: dict[str, Any]
|
|
447
|
-
) -> None:
|
|
437
|
+
def _handle_final_response_event(self, content: str, metadata: dict[str, Any]) -> None:
|
|
448
438
|
"""Handle final response events."""
|
|
449
439
|
if content:
|
|
450
440
|
self.state.buffer.append(content)
|
|
@@ -464,17 +454,12 @@ class RichStreamRenderer:
|
|
|
464
454
|
def _normalise_finished_icons(self) -> None:
|
|
465
455
|
"""Ensure finished steps do not keep spinner icons."""
|
|
466
456
|
for step in self.steps.by_id.values():
|
|
467
|
-
if (
|
|
468
|
-
getattr(step, "status", None) == "finished"
|
|
469
|
-
and getattr(step, "status_icon", None) == "spinner"
|
|
470
|
-
):
|
|
457
|
+
if getattr(step, "status", None) == "finished" and getattr(step, "status_icon", None) == "spinner":
|
|
471
458
|
step.status_icon = "success"
|
|
472
459
|
if getattr(step, "status", None) != "running":
|
|
473
460
|
self._step_spinners.pop(step.step_id, None)
|
|
474
461
|
|
|
475
|
-
def _handle_agent_step_event(
|
|
476
|
-
self, ev: dict[str, Any], metadata: dict[str, Any]
|
|
477
|
-
) -> None:
|
|
462
|
+
def _handle_agent_step_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
|
|
478
463
|
"""Handle agent step events."""
|
|
479
464
|
# Extract tool information
|
|
480
465
|
(
|
|
@@ -498,9 +483,7 @@ class RichStreamRenderer:
|
|
|
498
483
|
self._maybe_attach_root_query(tracked_step)
|
|
499
484
|
|
|
500
485
|
# Track tools and sub-agents for transcript/debug context
|
|
501
|
-
self.stream_processor.track_tools_and_agents(
|
|
502
|
-
tool_name, tool_calls_info, is_delegation_tool
|
|
503
|
-
)
|
|
486
|
+
self.stream_processor.track_tools_and_agents(tool_name, tool_calls_info, is_delegation_tool)
|
|
504
487
|
|
|
505
488
|
# Handle tool execution
|
|
506
489
|
self._handle_agent_step(
|
|
@@ -517,13 +500,7 @@ class RichStreamRenderer:
|
|
|
517
500
|
|
|
518
501
|
def _maybe_attach_root_query(self, step: Step | None) -> None:
|
|
519
502
|
"""Attach the user query to the root agent step for display."""
|
|
520
|
-
if
|
|
521
|
-
not step
|
|
522
|
-
or self._root_query_attached
|
|
523
|
-
or not self._root_query
|
|
524
|
-
or step.kind != "agent"
|
|
525
|
-
or step.parent_id
|
|
526
|
-
):
|
|
503
|
+
if not step or self._root_query_attached or not self._root_query or step.kind != "agent" or step.parent_id:
|
|
527
504
|
return
|
|
528
505
|
|
|
529
506
|
args = dict(getattr(step, "args", {}) or {})
|
|
@@ -531,9 +508,7 @@ class RichStreamRenderer:
|
|
|
531
508
|
step.args = args
|
|
532
509
|
self._root_query_attached = True
|
|
533
510
|
|
|
534
|
-
def _record_step_server_start(
|
|
535
|
-
self, step: Step | None, payload: dict[str, Any]
|
|
536
|
-
) -> None:
|
|
511
|
+
def _record_step_server_start(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
537
512
|
"""Store server-provided start times for elapsed calculations."""
|
|
538
513
|
if not step:
|
|
539
514
|
return
|
|
@@ -542,29 +517,21 @@ class RichStreamRenderer:
|
|
|
542
517
|
return
|
|
543
518
|
self._step_server_start_times.setdefault(step.step_id, float(server_time))
|
|
544
519
|
|
|
545
|
-
def _maybe_override_root_agent_label(
|
|
546
|
-
self, step: Step | None, payload: dict[str, Any]
|
|
547
|
-
) -> None:
|
|
520
|
+
def _maybe_override_root_agent_label(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
548
521
|
"""Ensure the root agent row uses the human-friendly name and shows the ID."""
|
|
549
522
|
if not step or step.kind != "agent" or step.parent_id:
|
|
550
523
|
return
|
|
551
|
-
friendly = self._root_agent_friendly or self._humanize_agent_slug(
|
|
552
|
-
(payload or {}).get("agent_name")
|
|
553
|
-
)
|
|
524
|
+
friendly = self._root_agent_friendly or self._humanize_agent_slug((payload or {}).get("agent_name"))
|
|
554
525
|
if not friendly:
|
|
555
526
|
return
|
|
556
527
|
agent_identifier = step.name or step.step_id
|
|
557
528
|
if not agent_identifier:
|
|
558
529
|
return
|
|
559
|
-
step.display_label = normalise_display_label(
|
|
560
|
-
f"{ICON_AGENT} {friendly} ({agent_identifier})"
|
|
561
|
-
)
|
|
530
|
+
step.display_label = normalise_display_label(f"{ICON_AGENT} {friendly} ({agent_identifier})")
|
|
562
531
|
if not self._root_agent_step_id:
|
|
563
532
|
self._root_agent_step_id = step.step_id
|
|
564
533
|
|
|
565
|
-
def _update_thinking_timeline(
|
|
566
|
-
self, step: Step | None, payload: dict[str, Any]
|
|
567
|
-
) -> None:
|
|
534
|
+
def _update_thinking_timeline(self, step: Step | None, payload: dict[str, Any]) -> None:
|
|
568
535
|
"""Maintain deterministic thinking spans for each agent/delegate scope."""
|
|
569
536
|
if not self.cfg.render_thinking or not step:
|
|
570
537
|
return
|
|
@@ -609,9 +576,7 @@ class RichStreamRenderer:
|
|
|
609
576
|
start_monotonic=now_monotonic,
|
|
610
577
|
)
|
|
611
578
|
|
|
612
|
-
is_anchor_finished = status_hint in FINISHED_STATUS_HINTS or (
|
|
613
|
-
not status_hint and is_step_finished(step)
|
|
614
|
-
)
|
|
579
|
+
is_anchor_finished = status_hint in FINISHED_STATUS_HINTS or (not status_hint and is_step_finished(step))
|
|
615
580
|
if is_anchor_finished:
|
|
616
581
|
scope.anchor_finished_at = server_time or scope.anchor_finished_at
|
|
617
582
|
self._finish_scope_thinking(scope, server_time, now_monotonic)
|
|
@@ -641,13 +606,9 @@ class RichStreamRenderer:
|
|
|
641
606
|
if not parent_scope or parent_scope.closed:
|
|
642
607
|
return
|
|
643
608
|
if is_finished:
|
|
644
|
-
self._mark_child_finished(
|
|
645
|
-
parent_scope, child_step.step_id, server_time, now_monotonic
|
|
646
|
-
)
|
|
609
|
+
self._mark_child_finished(parent_scope, child_step.step_id, server_time, now_monotonic)
|
|
647
610
|
else:
|
|
648
|
-
self._mark_child_running(
|
|
649
|
-
parent_scope, child_step, server_time, now_monotonic
|
|
650
|
-
)
|
|
611
|
+
self._mark_child_running(parent_scope, child_step, server_time, now_monotonic)
|
|
651
612
|
|
|
652
613
|
def _update_child_thinking(
|
|
653
614
|
self,
|
|
@@ -666,9 +627,7 @@ class RichStreamRenderer:
|
|
|
666
627
|
if not scope or scope.closed or step.kind == "thinking":
|
|
667
628
|
return
|
|
668
629
|
|
|
669
|
-
is_finish_event = status_hint in FINISHED_STATUS_HINTS or (
|
|
670
|
-
not status_hint and is_step_finished(step)
|
|
671
|
-
)
|
|
630
|
+
is_finish_event = status_hint in FINISHED_STATUS_HINTS or (not status_hint and is_step_finished(step))
|
|
672
631
|
if is_finish_event:
|
|
673
632
|
self._mark_child_finished(scope, step.step_id, server_time, now_monotonic)
|
|
674
633
|
else:
|
|
@@ -967,7 +926,8 @@ class RichStreamRenderer:
|
|
|
967
926
|
if self._transcript_mode_enabled:
|
|
968
927
|
try:
|
|
969
928
|
self.console.print(
|
|
970
|
-
"[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events.
|
|
929
|
+
"[dim]Run finished. Press Ctrl+T to return to the summary view or stay here to inspect events. "
|
|
930
|
+
"Use the post-run viewer for export.[/dim]"
|
|
971
931
|
)
|
|
972
932
|
except Exception:
|
|
973
933
|
pass
|
|
@@ -1172,9 +1132,7 @@ class RichStreamRenderer:
|
|
|
1172
1132
|
if self._transcript_enabled_message_printed:
|
|
1173
1133
|
return
|
|
1174
1134
|
try:
|
|
1175
|
-
self.console.print(
|
|
1176
|
-
"[dim]Transcript mode enabled — streaming raw transcript events.[/dim]"
|
|
1177
|
-
)
|
|
1135
|
+
self.console.print("[dim]Transcript mode enabled — streaming raw transcript events.[/dim]")
|
|
1178
1136
|
except Exception:
|
|
1179
1137
|
pass
|
|
1180
1138
|
else:
|
|
@@ -1226,9 +1184,7 @@ class RichStreamRenderer:
|
|
|
1226
1184
|
self._transcript_render_cursor = len(self.state.events)
|
|
1227
1185
|
self._print_transcript_hint()
|
|
1228
1186
|
|
|
1229
|
-
def _capture_event(
|
|
1230
|
-
self, ev: dict[str, Any], received_at: datetime | None = None
|
|
1231
|
-
) -> None:
|
|
1187
|
+
def _capture_event(self, ev: dict[str, Any], received_at: datetime | None = None) -> None:
|
|
1232
1188
|
"""Capture a deep copy of SSE events for transcript replay."""
|
|
1233
1189
|
try:
|
|
1234
1190
|
captured = json.loads(json.dumps(ev))
|
|
@@ -1256,9 +1212,7 @@ class RichStreamRenderer:
|
|
|
1256
1212
|
"""Return captured SSE events."""
|
|
1257
1213
|
return list(self.state.events)
|
|
1258
1214
|
|
|
1259
|
-
def _ensure_tool_panel(
|
|
1260
|
-
self, name: str, args: Any, task_id: str, context_id: str
|
|
1261
|
-
) -> str:
|
|
1215
|
+
def _ensure_tool_panel(self, name: str, args: Any, task_id: str, context_id: str) -> str:
|
|
1262
1216
|
"""Ensure a tool panel exists and return its ID."""
|
|
1263
1217
|
formatted_title = format_tool_title(name)
|
|
1264
1218
|
is_delegation = is_delegation_tool(name)
|
|
@@ -1278,11 +1232,7 @@ class RichStreamRenderer:
|
|
|
1278
1232
|
# Add Args section once
|
|
1279
1233
|
if args:
|
|
1280
1234
|
try:
|
|
1281
|
-
args_content = (
|
|
1282
|
-
"**Args:**\n```json\n"
|
|
1283
|
-
+ json.dumps(args, indent=2)
|
|
1284
|
-
+ "\n```\n\n"
|
|
1285
|
-
)
|
|
1235
|
+
args_content = "**Args:**\n```json\n" + json.dumps(args, indent=2) + "\n```\n\n"
|
|
1286
1236
|
except Exception:
|
|
1287
1237
|
args_content = f"**Args:**\n{args}\n\n"
|
|
1288
1238
|
self.tool_panels[tool_sid]["chunks"].append(args_content)
|
|
@@ -1322,9 +1272,7 @@ class RichStreamRenderer:
|
|
|
1322
1272
|
|
|
1323
1273
|
# Record server start time for this step if available
|
|
1324
1274
|
if st and self.stream_processor.server_elapsed_time is not None:
|
|
1325
|
-
self._step_server_start_times[st.step_id] =
|
|
1326
|
-
self.stream_processor.server_elapsed_time
|
|
1327
|
-
)
|
|
1275
|
+
self._step_server_start_times[st.step_id] = self.stream_processor.server_elapsed_time
|
|
1328
1276
|
|
|
1329
1277
|
return st
|
|
1330
1278
|
|
|
@@ -1338,26 +1286,18 @@ class RichStreamRenderer:
|
|
|
1338
1286
|
"""Process additional tool calls to avoid duplicates."""
|
|
1339
1287
|
for call_name, call_args, _ in tool_calls_info or []:
|
|
1340
1288
|
if call_name and call_name != tool_name:
|
|
1341
|
-
self._process_single_tool_call(
|
|
1342
|
-
call_name, call_args, task_id, context_id
|
|
1343
|
-
)
|
|
1289
|
+
self._process_single_tool_call(call_name, call_args, task_id, context_id)
|
|
1344
1290
|
|
|
1345
|
-
def _process_single_tool_call(
|
|
1346
|
-
self, call_name: str, call_args: Any, task_id: str, context_id: str
|
|
1347
|
-
) -> None:
|
|
1291
|
+
def _process_single_tool_call(self, call_name: str, call_args: Any, task_id: str, context_id: str) -> None:
|
|
1348
1292
|
"""Process a single additional tool call."""
|
|
1349
1293
|
self._ensure_tool_panel(call_name, call_args, task_id, context_id)
|
|
1350
1294
|
|
|
1351
1295
|
st2 = self._create_step_for_tool_call(call_name, call_args, task_id, context_id)
|
|
1352
1296
|
|
|
1353
1297
|
if self.stream_processor.server_elapsed_time is not None and st2:
|
|
1354
|
-
self._step_server_start_times[st2.step_id] =
|
|
1355
|
-
self.stream_processor.server_elapsed_time
|
|
1356
|
-
)
|
|
1298
|
+
self._step_server_start_times[st2.step_id] = self.stream_processor.server_elapsed_time
|
|
1357
1299
|
|
|
1358
|
-
def _create_step_for_tool_call(
|
|
1359
|
-
self, call_name: str, call_args: Any, task_id: str, context_id: str
|
|
1360
|
-
) -> Any:
|
|
1300
|
+
def _create_step_for_tool_call(self, call_name: str, call_args: Any, task_id: str, context_id: str) -> Any:
|
|
1361
1301
|
"""Create appropriate step for tool call."""
|
|
1362
1302
|
if is_delegation_tool(call_name):
|
|
1363
1303
|
return self.steps.start_or_get(
|
|
@@ -1376,9 +1316,7 @@ class RichStreamRenderer:
|
|
|
1376
1316
|
args=call_args,
|
|
1377
1317
|
)
|
|
1378
1318
|
|
|
1379
|
-
def _detect_tool_completion(
|
|
1380
|
-
self, metadata: dict, content: str
|
|
1381
|
-
) -> tuple[bool, str | None, Any]:
|
|
1319
|
+
def _detect_tool_completion(self, metadata: dict, content: str) -> tuple[bool, str | None, Any]:
|
|
1382
1320
|
"""Detect if a tool has completed and return completion info."""
|
|
1383
1321
|
tool_info = metadata.get("tool_info", {}) if isinstance(metadata, dict) else {}
|
|
1384
1322
|
|
|
@@ -1388,18 +1326,14 @@ class RichStreamRenderer:
|
|
|
1388
1326
|
# content like "Completed google_serper"
|
|
1389
1327
|
tname = content.replace("Completed ", "").strip()
|
|
1390
1328
|
if tname:
|
|
1391
|
-
output = (
|
|
1392
|
-
tool_info.get("output") if tool_info.get("name") == tname else None
|
|
1393
|
-
)
|
|
1329
|
+
output = tool_info.get("output") if tool_info.get("name") == tname else None
|
|
1394
1330
|
return True, tname, output
|
|
1395
1331
|
elif metadata.get("status") == "finished" and tool_info.get("name"):
|
|
1396
1332
|
return True, tool_info.get("name"), tool_info.get("output")
|
|
1397
1333
|
|
|
1398
1334
|
return False, None, None
|
|
1399
1335
|
|
|
1400
|
-
def _get_tool_session_id(
|
|
1401
|
-
self, finished_tool_name: str, task_id: str, context_id: str
|
|
1402
|
-
) -> str:
|
|
1336
|
+
def _get_tool_session_id(self, finished_tool_name: str, task_id: str, context_id: str) -> str:
|
|
1403
1337
|
"""Generate tool session ID."""
|
|
1404
1338
|
return f"tool_{finished_tool_name}_{task_id}_{context_id}"
|
|
1405
1339
|
|
|
@@ -1429,7 +1363,7 @@ class RichStreamRenderer:
|
|
|
1429
1363
|
meta["duration_seconds"] = dur
|
|
1430
1364
|
meta["server_finished_at"] = (
|
|
1431
1365
|
self.stream_processor.server_elapsed_time
|
|
1432
|
-
if isinstance(self.stream_processor.server_elapsed_time, int
|
|
1366
|
+
if isinstance(self.stream_processor.server_elapsed_time, (int, float))
|
|
1433
1367
|
else None
|
|
1434
1368
|
)
|
|
1435
1369
|
meta["finished_at"] = monotonic()
|
|
@@ -1439,9 +1373,7 @@ class RichStreamRenderer:
|
|
|
1439
1373
|
) -> None:
|
|
1440
1374
|
"""Add tool output to panel metadata."""
|
|
1441
1375
|
if finished_tool_output is not None:
|
|
1442
|
-
meta["chunks"].append(
|
|
1443
|
-
self._format_output_block(finished_tool_output, finished_tool_name)
|
|
1444
|
-
)
|
|
1376
|
+
meta["chunks"].append(self._format_output_block(finished_tool_output, finished_tool_name))
|
|
1445
1377
|
meta["output"] = finished_tool_output
|
|
1446
1378
|
|
|
1447
1379
|
def _mark_panel_as_finished(self, meta: dict[str, Any], tool_sid: str) -> None:
|
|
@@ -1471,9 +1403,7 @@ class RichStreamRenderer:
|
|
|
1471
1403
|
self._mark_panel_as_finished(meta, tool_sid)
|
|
1472
1404
|
self._add_tool_output_to_panel(meta, finished_tool_output, finished_tool_name)
|
|
1473
1405
|
|
|
1474
|
-
def _get_step_duration(
|
|
1475
|
-
self, finished_tool_name: str, task_id: str, context_id: str
|
|
1476
|
-
) -> float | None:
|
|
1406
|
+
def _get_step_duration(self, finished_tool_name: str, task_id: str, context_id: str) -> float | None:
|
|
1477
1407
|
"""Get step duration from tool panels."""
|
|
1478
1408
|
tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
|
|
1479
1409
|
return self.tool_panels.get(tool_sid, {}).get("duration_seconds")
|
|
@@ -1548,9 +1478,7 @@ class RichStreamRenderer:
|
|
|
1548
1478
|
|
|
1549
1479
|
def _should_create_snapshot(self, tool_sid: str) -> bool:
|
|
1550
1480
|
"""Check if a snapshot should be created."""
|
|
1551
|
-
return self.cfg.append_finished_snapshots and not self.tool_panels.get(
|
|
1552
|
-
tool_sid, {}
|
|
1553
|
-
).get("snapshot_printed")
|
|
1481
|
+
return self.cfg.append_finished_snapshots and not self.tool_panels.get(tool_sid, {}).get("snapshot_printed")
|
|
1554
1482
|
|
|
1555
1483
|
def _get_snapshot_title(self, meta: dict[str, Any], finished_tool_name: str) -> str:
|
|
1556
1484
|
"""Get the title for the snapshot."""
|
|
@@ -1558,7 +1486,7 @@ class RichStreamRenderer:
|
|
|
1558
1486
|
|
|
1559
1487
|
# Add elapsed time to title
|
|
1560
1488
|
dur = meta.get("duration_seconds")
|
|
1561
|
-
if isinstance(dur, int
|
|
1489
|
+
if isinstance(dur, (int, float)):
|
|
1562
1490
|
elapsed_str = self._format_snapshot_duration(dur)
|
|
1563
1491
|
adjusted_title = f"{adjusted_title} · {elapsed_str}"
|
|
1564
1492
|
|
|
@@ -1595,9 +1523,7 @@ class RichStreamRenderer:
|
|
|
1595
1523
|
|
|
1596
1524
|
return body_text
|
|
1597
1525
|
|
|
1598
|
-
def _create_snapshot_panel(
|
|
1599
|
-
self, adjusted_title: str, body_text: str, finished_tool_name: str
|
|
1600
|
-
) -> Any:
|
|
1526
|
+
def _create_snapshot_panel(self, adjusted_title: str, body_text: str, finished_tool_name: str) -> Any:
|
|
1601
1527
|
"""Create the snapshot panel."""
|
|
1602
1528
|
return create_tool_panel(
|
|
1603
1529
|
title=adjusted_title,
|
|
@@ -1612,9 +1538,7 @@ class RichStreamRenderer:
|
|
|
1612
1538
|
self.console.print(snapshot_panel)
|
|
1613
1539
|
self.tool_panels[tool_sid]["snapshot_printed"] = True
|
|
1614
1540
|
|
|
1615
|
-
def _create_tool_snapshot(
|
|
1616
|
-
self, finished_tool_name: str, task_id: str, context_id: str
|
|
1617
|
-
) -> None:
|
|
1541
|
+
def _create_tool_snapshot(self, finished_tool_name: str, task_id: str, context_id: str) -> None:
|
|
1618
1542
|
"""Create and print a snapshot for a finished tool."""
|
|
1619
1543
|
tool_sid = f"tool_{finished_tool_name}_{task_id}_{context_id}"
|
|
1620
1544
|
|
|
@@ -1628,9 +1552,7 @@ class RichStreamRenderer:
|
|
|
1628
1552
|
body_text = "".join(meta.get("chunks") or [])
|
|
1629
1553
|
body_text = self._clamp_snapshot_body(body_text)
|
|
1630
1554
|
|
|
1631
|
-
snapshot_panel = self._create_snapshot_panel(
|
|
1632
|
-
adjusted_title, body_text, finished_tool_name
|
|
1633
|
-
)
|
|
1555
|
+
snapshot_panel = self._create_snapshot_panel(adjusted_title, body_text, finished_tool_name)
|
|
1634
1556
|
|
|
1635
1557
|
self._print_and_mark_snapshot(tool_sid, snapshot_panel)
|
|
1636
1558
|
|
|
@@ -1652,9 +1574,7 @@ class RichStreamRenderer:
|
|
|
1652
1574
|
|
|
1653
1575
|
# Create steps and panels for the primary tool
|
|
1654
1576
|
if tool_name:
|
|
1655
|
-
tool_sid = self._ensure_tool_panel(
|
|
1656
|
-
tool_name, tool_args, task_id, context_id
|
|
1657
|
-
)
|
|
1577
|
+
tool_sid = self._ensure_tool_panel(tool_name, tool_args, task_id, context_id)
|
|
1658
1578
|
self._start_tool_step(
|
|
1659
1579
|
task_id,
|
|
1660
1580
|
context_id,
|
|
@@ -1665,9 +1585,7 @@ class RichStreamRenderer:
|
|
|
1665
1585
|
)
|
|
1666
1586
|
|
|
1667
1587
|
# Handle additional tool calls
|
|
1668
|
-
self._process_additional_tool_calls(
|
|
1669
|
-
tool_calls_info, tool_name, task_id, context_id
|
|
1670
|
-
)
|
|
1588
|
+
self._process_additional_tool_calls(tool_calls_info, tool_name, task_id, context_id)
|
|
1671
1589
|
|
|
1672
1590
|
# Check for tool completion
|
|
1673
1591
|
(
|
|
@@ -1677,9 +1595,7 @@ class RichStreamRenderer:
|
|
|
1677
1595
|
) = self._detect_tool_completion(metadata, content)
|
|
1678
1596
|
|
|
1679
1597
|
if is_tool_finished and finished_tool_name:
|
|
1680
|
-
self._finish_tool_panel(
|
|
1681
|
-
finished_tool_name, finished_tool_output, task_id, context_id
|
|
1682
|
-
)
|
|
1598
|
+
self._finish_tool_panel(finished_tool_name, finished_tool_output, task_id, context_id)
|
|
1683
1599
|
self._finish_tool_step(
|
|
1684
1600
|
finished_tool_name,
|
|
1685
1601
|
finished_tool_output,
|
|
@@ -1735,9 +1651,7 @@ class RichStreamRenderer:
|
|
|
1735
1651
|
|
|
1736
1652
|
def _get_analysis_progress_info(self) -> dict[str, Any]:
|
|
1737
1653
|
total_steps = len(self.steps.order)
|
|
1738
|
-
completed_steps = sum(
|
|
1739
|
-
1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid])
|
|
1740
|
-
)
|
|
1654
|
+
completed_steps = sum(1 for sid in self.steps.order if is_step_finished(self.steps.by_id[sid]))
|
|
1741
1655
|
current_step = None
|
|
1742
1656
|
for sid in self.steps.order:
|
|
1743
1657
|
if not is_step_finished(self.steps.by_id[sid]):
|
|
@@ -1745,13 +1659,11 @@ class RichStreamRenderer:
|
|
|
1745
1659
|
break
|
|
1746
1660
|
# Prefer server elapsed time when available
|
|
1747
1661
|
elapsed = 0.0
|
|
1748
|
-
if isinstance(self.stream_processor.server_elapsed_time, int
|
|
1662
|
+
if isinstance(self.stream_processor.server_elapsed_time, (int, float)):
|
|
1749
1663
|
elapsed = float(self.stream_processor.server_elapsed_time)
|
|
1750
1664
|
elif self._started_at is not None:
|
|
1751
1665
|
elapsed = monotonic() - self._started_at
|
|
1752
|
-
progress_percent = (
|
|
1753
|
-
int((completed_steps / total_steps) * 100) if total_steps else 0
|
|
1754
|
-
)
|
|
1666
|
+
progress_percent = int((completed_steps / total_steps) * 100) if total_steps else 0
|
|
1755
1667
|
return {
|
|
1756
1668
|
"total_steps": total_steps,
|
|
1757
1669
|
"completed_steps": completed_steps,
|
|
@@ -1840,9 +1752,7 @@ class RichStreamRenderer:
|
|
|
1840
1752
|
server_elapsed = self.stream_processor.server_elapsed_time
|
|
1841
1753
|
server_start = self._step_server_start_times.get(step.step_id)
|
|
1842
1754
|
|
|
1843
|
-
if isinstance(server_elapsed, int
|
|
1844
|
-
server_start, int | float
|
|
1845
|
-
):
|
|
1755
|
+
if isinstance(server_elapsed, (int, float)) and isinstance(server_start, (int, float)):
|
|
1846
1756
|
return max(0.0, float(server_elapsed) - float(server_start))
|
|
1847
1757
|
|
|
1848
1758
|
try:
|
|
@@ -1921,9 +1831,7 @@ class RichStreamRenderer:
|
|
|
1921
1831
|
self._append_state_glyph(text_line, step)
|
|
1922
1832
|
return text_line
|
|
1923
1833
|
|
|
1924
|
-
def _append_status_badge(
|
|
1925
|
-
self, text_line: Text, step: Step, status_badge: str
|
|
1926
|
-
) -> None:
|
|
1834
|
+
def _append_status_badge(self, text_line: Text, step: Step, status_badge: str) -> None:
|
|
1927
1835
|
"""Append the formatted status badge when available."""
|
|
1928
1836
|
glyph_key = getattr(step, "status_icon", None)
|
|
1929
1837
|
glyph = glyph_for_status(glyph_key)
|
|
@@ -1993,9 +1901,7 @@ class RichStreamRenderer:
|
|
|
1993
1901
|
args_text.append(formatted_args, style="dim")
|
|
1994
1902
|
return args_text
|
|
1995
1903
|
|
|
1996
|
-
def _build_arg_list(
|
|
1997
|
-
self, prefix: str, formatted_args: list[str | tuple[int, str]]
|
|
1998
|
-
) -> Group | None:
|
|
1904
|
+
def _build_arg_list(self, prefix: str, formatted_args: list[str | tuple[int, str]]) -> Group | None:
|
|
1999
1905
|
"""Render multi-line argument entries preserving indentation."""
|
|
2000
1906
|
arg_lines: list[Text] = []
|
|
2001
1907
|
for indent_level, text_value in self._iter_arg_entries(formatted_args):
|
|
@@ -2021,9 +1927,7 @@ class RichStreamRenderer:
|
|
|
2021
1927
|
else:
|
|
2022
1928
|
yield 0, str(value)
|
|
2023
1929
|
|
|
2024
|
-
def _format_step_args(
|
|
2025
|
-
self, step: Step
|
|
2026
|
-
) -> str | list[str] | list[tuple[int, str]] | None:
|
|
1930
|
+
def _format_step_args(self, step: Step) -> str | list[str] | list[tuple[int, str]] | None:
|
|
2027
1931
|
"""Return a printable representation of tool arguments."""
|
|
2028
1932
|
args = getattr(step, "args", None)
|
|
2029
1933
|
if args is None:
|
|
@@ -2040,9 +1944,7 @@ class RichStreamRenderer:
|
|
|
2040
1944
|
|
|
2041
1945
|
return None
|
|
2042
1946
|
|
|
2043
|
-
def _format_dict_args(
|
|
2044
|
-
self, args: dict[str, Any], *, step: Step
|
|
2045
|
-
) -> str | list[str] | list[tuple[int, str]] | None:
|
|
1947
|
+
def _format_dict_args(self, args: dict[str, Any], *, step: Step) -> str | list[str] | list[tuple[int, str]] | None:
|
|
2046
1948
|
"""Format dictionary arguments with guardrails."""
|
|
2047
1949
|
if not args:
|
|
2048
1950
|
return None
|
|
@@ -2085,9 +1987,7 @@ class RichStreamRenderer:
|
|
|
2085
1987
|
return True
|
|
2086
1988
|
return False
|
|
2087
1989
|
|
|
2088
|
-
def _format_dict_arg_lines(
|
|
2089
|
-
self, args: dict[str, Any]
|
|
2090
|
-
) -> list[tuple[int, str]] | None:
|
|
1990
|
+
def _format_dict_arg_lines(self, args: dict[str, Any]) -> list[tuple[int, str]] | None:
|
|
2091
1991
|
"""Render dictionary arguments as nested YAML-style lines."""
|
|
2092
1992
|
lines: list[tuple[int, str]] = []
|
|
2093
1993
|
for raw_key, value in args.items():
|
|
@@ -2095,9 +1995,7 @@ class RichStreamRenderer:
|
|
|
2095
1995
|
lines.extend(self._format_nested_entry(key, value, indent=0))
|
|
2096
1996
|
return lines or None
|
|
2097
1997
|
|
|
2098
|
-
def _format_nested_entry(
|
|
2099
|
-
self, key: str, value: Any, indent: int
|
|
2100
|
-
) -> list[tuple[int, str]]:
|
|
1998
|
+
def _format_nested_entry(self, key: str, value: Any, indent: int) -> list[tuple[int, str]]:
|
|
2101
1999
|
"""Format a mapping entry recursively."""
|
|
2102
2000
|
lines: list[tuple[int, str]] = []
|
|
2103
2001
|
|
|
@@ -2123,9 +2021,7 @@ class RichStreamRenderer:
|
|
|
2123
2021
|
lines.append((indent, f"{key}: {formatted_value}"))
|
|
2124
2022
|
return lines
|
|
2125
2023
|
|
|
2126
|
-
def _format_nested_mapping(
|
|
2127
|
-
self, mapping: dict[str, Any], indent: int
|
|
2128
|
-
) -> list[tuple[int, str]]:
|
|
2024
|
+
def _format_nested_mapping(self, mapping: dict[str, Any], indent: int) -> list[tuple[int, str]]:
|
|
2129
2025
|
"""Format nested dictionary values."""
|
|
2130
2026
|
nested_lines: list[tuple[int, str]] = []
|
|
2131
2027
|
for raw_key, value in mapping.items():
|
|
@@ -2133,9 +2029,7 @@ class RichStreamRenderer:
|
|
|
2133
2029
|
nested_lines.extend(self._format_nested_entry(key, value, indent))
|
|
2134
2030
|
return nested_lines
|
|
2135
2031
|
|
|
2136
|
-
def _format_sequence_entries(
|
|
2137
|
-
self, sequence: list[Any], indent: int
|
|
2138
|
-
) -> list[tuple[int, str]]:
|
|
2032
|
+
def _format_sequence_entries(self, sequence: list[Any], indent: int) -> list[tuple[int, str]]:
|
|
2139
2033
|
"""Format list/tuple/set values with YAML-style bullets."""
|
|
2140
2034
|
if not sequence:
|
|
2141
2035
|
return []
|
|
@@ -2158,18 +2052,14 @@ class RichStreamRenderer:
|
|
|
2158
2052
|
return [(indent, f"- {formatted}")]
|
|
2159
2053
|
return []
|
|
2160
2054
|
|
|
2161
|
-
def _format_dict_sequence_item(
|
|
2162
|
-
self, mapping: dict[str, Any], indent: int
|
|
2163
|
-
) -> list[tuple[int, str]]:
|
|
2055
|
+
def _format_dict_sequence_item(self, mapping: dict[str, Any], indent: int) -> list[tuple[int, str]]:
|
|
2164
2056
|
"""Format a dictionary entry within a list."""
|
|
2165
2057
|
child_lines = self._format_nested_mapping(mapping, indent + 1)
|
|
2166
2058
|
if child_lines:
|
|
2167
2059
|
return self._prepend_sequence_prefix(child_lines, indent)
|
|
2168
2060
|
return [(indent, "- {}")]
|
|
2169
2061
|
|
|
2170
|
-
def _format_nested_sequence_item(
|
|
2171
|
-
self, sequence: list[Any], indent: int
|
|
2172
|
-
) -> list[tuple[int, str]]:
|
|
2062
|
+
def _format_nested_sequence_item(self, sequence: list[Any], indent: int) -> list[tuple[int, str]]:
|
|
2173
2063
|
"""Format a nested sequence entry within a list."""
|
|
2174
2064
|
child_lines = self._format_sequence_entries(sequence, indent + 1)
|
|
2175
2065
|
if child_lines:
|
|
@@ -2177,9 +2067,7 @@ class RichStreamRenderer:
|
|
|
2177
2067
|
return [(indent, "- []")]
|
|
2178
2068
|
|
|
2179
2069
|
@staticmethod
|
|
2180
|
-
def _prepend_sequence_prefix(
|
|
2181
|
-
child_lines: list[tuple[int, str]], indent: int
|
|
2182
|
-
) -> list[tuple[int, str]]:
|
|
2070
|
+
def _prepend_sequence_prefix(child_lines: list[tuple[int, str]], indent: int) -> list[tuple[int, str]]:
|
|
2183
2071
|
"""Attach a sequence bullet to the first child line."""
|
|
2184
2072
|
_, first_text = child_lines[0]
|
|
2185
2073
|
prefixed: list[tuple[int, str]] = [(indent, f"- {first_text}")]
|
|
@@ -2239,9 +2127,7 @@ class RichStreamRenderer:
|
|
|
2239
2127
|
|
|
2240
2128
|
return Group(*renderables)
|
|
2241
2129
|
|
|
2242
|
-
def _update_final_duration(
|
|
2243
|
-
self, duration: float | None, *, overwrite: bool = False
|
|
2244
|
-
) -> None:
|
|
2130
|
+
def _update_final_duration(self, duration: float | None, *, overwrite: bool = False) -> None:
|
|
2245
2131
|
"""Store formatted duration for eventual final panels."""
|
|
2246
2132
|
if duration is None:
|
|
2247
2133
|
return
|
|
@@ -2275,12 +2161,7 @@ class RichStreamRenderer:
|
|
|
2275
2161
|
def _format_dict_or_list_output(self, output_value: dict | list) -> str:
|
|
2276
2162
|
"""Format dict/list output as pretty JSON."""
|
|
2277
2163
|
try:
|
|
2278
|
-
return (
|
|
2279
|
-
self.OUTPUT_PREFIX
|
|
2280
|
-
+ "```json\n"
|
|
2281
|
-
+ json.dumps(output_value, indent=2)
|
|
2282
|
-
+ "\n```\n"
|
|
2283
|
-
)
|
|
2164
|
+
return self.OUTPUT_PREFIX + "```json\n" + json.dumps(output_value, indent=2) + "\n```\n"
|
|
2284
2165
|
except Exception:
|
|
2285
2166
|
return self.OUTPUT_PREFIX + str(output_value) + "\n"
|
|
2286
2167
|
|
|
@@ -2304,12 +2185,7 @@ class RichStreamRenderer:
|
|
|
2304
2185
|
"""Format string that looks like JSON."""
|
|
2305
2186
|
try:
|
|
2306
2187
|
parsed = json.loads(output)
|
|
2307
|
-
return (
|
|
2308
|
-
self.OUTPUT_PREFIX
|
|
2309
|
-
+ "```json\n"
|
|
2310
|
-
+ json.dumps(parsed, indent=2)
|
|
2311
|
-
+ "\n```\n"
|
|
2312
|
-
)
|
|
2188
|
+
return self.OUTPUT_PREFIX + "```json\n" + json.dumps(parsed, indent=2) + "\n```\n"
|
|
2313
2189
|
except Exception:
|
|
2314
2190
|
return self.OUTPUT_PREFIX + output + "\n"
|
|
2315
2191
|
|
|
@@ -2319,9 +2195,7 @@ class RichStreamRenderer:
|
|
|
2319
2195
|
s = self._clean_sub_agent_prefix(s, tool_name)
|
|
2320
2196
|
|
|
2321
2197
|
# If looks like JSON, pretty print it
|
|
2322
|
-
if (s.startswith("{") and s.endswith("}")) or (
|
|
2323
|
-
s.startswith("[") and s.endswith("]")
|
|
2324
|
-
):
|
|
2198
|
+
if (s.startswith("{") and s.endswith("}")) or (s.startswith("[") and s.endswith("]")):
|
|
2325
2199
|
return self._format_json_string_output(s)
|
|
2326
2200
|
|
|
2327
2201
|
return self.OUTPUT_PREFIX + s + "\n"
|
|
@@ -2335,7 +2209,7 @@ class RichStreamRenderer:
|
|
|
2335
2209
|
|
|
2336
2210
|
def _format_output_block(self, output_value: Any, tool_name: str | None) -> str:
|
|
2337
2211
|
"""Format an output value for panel display."""
|
|
2338
|
-
if isinstance(output_value, dict
|
|
2212
|
+
if isinstance(output_value, (dict, list)):
|
|
2339
2213
|
return self._format_dict_or_list_output(output_value)
|
|
2340
2214
|
elif isinstance(output_value, str):
|
|
2341
2215
|
return self._format_string_output(output_value, tool_name)
|
|
@@ -34,16 +34,10 @@ def _coerce_datetime(value: Any) -> datetime | None:
|
|
|
34
34
|
return None
|
|
35
35
|
|
|
36
36
|
|
|
37
|
-
def _parse_event_timestamp(
|
|
38
|
-
event: dict[str, Any], received_ts: datetime | None = None
|
|
39
|
-
) -> datetime | None:
|
|
37
|
+
def _parse_event_timestamp(event: dict[str, Any], received_ts: datetime | None = None) -> datetime | None:
|
|
40
38
|
"""Resolve the most accurate timestamp available for the event."""
|
|
41
39
|
if received_ts is not None:
|
|
42
|
-
return (
|
|
43
|
-
received_ts
|
|
44
|
-
if received_ts.tzinfo
|
|
45
|
-
else received_ts.replace(tzinfo=timezone.utc)
|
|
46
|
-
)
|
|
40
|
+
return received_ts if received_ts.tzinfo else received_ts.replace(tzinfo=timezone.utc)
|
|
47
41
|
|
|
48
42
|
ts_value = event.get("timestamp") or (event.get("metadata") or {}).get("timestamp")
|
|
49
43
|
return _coerce_datetime(ts_value)
|
|
@@ -86,9 +80,7 @@ def _get_event_metadata(event: dict[str, Any]) -> tuple[str, str | None]:
|
|
|
86
80
|
return sse_kind, status_str
|
|
87
81
|
|
|
88
82
|
|
|
89
|
-
def _build_debug_title(
|
|
90
|
-
sse_kind: str, status_str: str | None, ts_ms: str, rel: float
|
|
91
|
-
) -> str:
|
|
83
|
+
def _build_debug_title(sse_kind: str, status_str: str | None, ts_ms: str, rel: float) -> str:
|
|
92
84
|
"""Build the debug event title."""
|
|
93
85
|
if status_str:
|
|
94
86
|
return f"SSE: {sse_kind} — {status_str} @ {ts_ms} (+{rel:.2f}s)"
|
|
@@ -104,9 +96,7 @@ def _dejson_value(obj: Any) -> Any:
|
|
|
104
96
|
return [_dejson_value(x) for x in obj]
|
|
105
97
|
if isinstance(obj, str):
|
|
106
98
|
s = obj.strip()
|
|
107
|
-
if (s.startswith("{") and s.endswith("}")) or (
|
|
108
|
-
s.startswith("[") and s.endswith("]")
|
|
109
|
-
):
|
|
99
|
+
if (s.startswith("{") and s.endswith("}")) or (s.startswith("[") and s.endswith("]")):
|
|
110
100
|
try:
|
|
111
101
|
return _dejson_value(json.loads(s))
|
|
112
102
|
except Exception:
|
|
@@ -128,9 +128,7 @@ def create_context_panel(
|
|
|
128
128
|
)
|
|
129
129
|
|
|
130
130
|
|
|
131
|
-
def create_final_panel(
|
|
132
|
-
content: str, title: str = "Final Result", theme: str = "dark"
|
|
133
|
-
) -> AIPPanel:
|
|
131
|
+
def create_final_panel(content: str, title: str = "Final Result", theme: str = "dark") -> AIPPanel:
|
|
134
132
|
"""Create a final result panel.
|
|
135
133
|
|
|
136
134
|
Args:
|