glaip-sdk 0.2.1__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/_version.py +8 -0
- glaip_sdk/branding.py +13 -0
- glaip_sdk/cli/commands/agents.py +180 -39
- glaip_sdk/cli/commands/mcps.py +44 -18
- glaip_sdk/cli/commands/models.py +11 -5
- glaip_sdk/cli/commands/tools.py +35 -16
- glaip_sdk/cli/commands/transcripts.py +8 -0
- glaip_sdk/cli/constants.py +38 -0
- glaip_sdk/cli/context.py +8 -0
- glaip_sdk/cli/display.py +34 -19
- glaip_sdk/cli/main.py +14 -7
- glaip_sdk/cli/masking.py +8 -33
- glaip_sdk/cli/pager.py +9 -10
- glaip_sdk/cli/slash/agent_session.py +57 -20
- glaip_sdk/cli/slash/prompt.py +8 -0
- glaip_sdk/cli/slash/remote_runs_controller.py +566 -0
- glaip_sdk/cli/slash/session.py +341 -46
- glaip_sdk/cli/slash/tui/__init__.py +9 -0
- glaip_sdk/cli/slash/tui/remote_runs_app.py +632 -0
- glaip_sdk/cli/transcript/viewer.py +232 -32
- glaip_sdk/cli/update_notifier.py +2 -2
- glaip_sdk/cli/utils.py +266 -35
- glaip_sdk/cli/validators.py +5 -6
- glaip_sdk/client/__init__.py +2 -1
- glaip_sdk/client/_agent_payloads.py +30 -0
- glaip_sdk/client/agent_runs.py +147 -0
- glaip_sdk/client/agents.py +186 -22
- glaip_sdk/client/main.py +23 -6
- glaip_sdk/client/mcps.py +2 -4
- glaip_sdk/client/run_rendering.py +66 -0
- glaip_sdk/client/tools.py +2 -3
- glaip_sdk/config/constants.py +11 -0
- glaip_sdk/models/__init__.py +56 -0
- glaip_sdk/models/agent_runs.py +117 -0
- glaip_sdk/rich_components.py +58 -2
- glaip_sdk/utils/client_utils.py +13 -0
- glaip_sdk/utils/export.py +143 -0
- glaip_sdk/utils/import_export.py +6 -9
- glaip_sdk/utils/rendering/__init__.py +122 -1
- glaip_sdk/utils/rendering/renderer/base.py +3 -7
- glaip_sdk/utils/rendering/renderer/debug.py +0 -1
- glaip_sdk/utils/rendering/renderer/stream.py +4 -12
- glaip_sdk/utils/rendering/steps.py +1 -0
- glaip_sdk/utils/resource_refs.py +26 -15
- glaip_sdk/utils/serialization.py +16 -0
- {glaip_sdk-0.2.1.dist-info → glaip_sdk-0.3.0.dist-info}/METADATA +24 -2
- glaip_sdk-0.3.0.dist-info/RECORD +94 -0
- glaip_sdk-0.2.1.dist-info/RECORD +0 -86
- {glaip_sdk-0.2.1.dist-info → glaip_sdk-0.3.0.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.2.1.dist-info → glaip_sdk-0.3.0.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,117 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Agent run models for AIP SDK.
|
|
3
|
+
|
|
4
|
+
Authors:
|
|
5
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
from typing import Any, Literal
|
|
10
|
+
from uuid import UUID
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
# Type alias for SSE event dictionaries
|
|
16
|
+
RunOutputChunk = dict[str, Any]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class RunSummary(BaseModel):
|
|
20
|
+
"""Represents a single agent run in list/table views with metadata only."""
|
|
21
|
+
|
|
22
|
+
id: UUID
|
|
23
|
+
agent_id: UUID
|
|
24
|
+
run_type: Literal["manual", "schedule"]
|
|
25
|
+
schedule_id: UUID | None = None
|
|
26
|
+
status: Literal["started", "success", "failed", "cancelled", "aborted", "unavailable"]
|
|
27
|
+
started_at: datetime
|
|
28
|
+
completed_at: datetime | None = None
|
|
29
|
+
input: str | None = None
|
|
30
|
+
config: dict[str, Any] | None = None
|
|
31
|
+
created_at: datetime
|
|
32
|
+
updated_at: datetime
|
|
33
|
+
|
|
34
|
+
@field_validator("completed_at")
|
|
35
|
+
@classmethod
|
|
36
|
+
def validate_completed_after_started(cls, v: datetime | None, info) -> datetime | None:
|
|
37
|
+
"""Validate that completed_at is after started_at if present."""
|
|
38
|
+
if v is not None and "started_at" in info.data:
|
|
39
|
+
started_at = info.data["started_at"]
|
|
40
|
+
if v < started_at:
|
|
41
|
+
raise ValueError("completed_at must be after started_at")
|
|
42
|
+
return v
|
|
43
|
+
|
|
44
|
+
def duration(self) -> timedelta | None:
|
|
45
|
+
"""Calculate duration from started_at to completed_at.
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
Duration as timedelta if completed_at exists, None otherwise
|
|
49
|
+
"""
|
|
50
|
+
if self.completed_at is not None:
|
|
51
|
+
return self.completed_at - self.started_at
|
|
52
|
+
return None
|
|
53
|
+
|
|
54
|
+
def duration_formatted(self) -> str:
|
|
55
|
+
"""Format duration as HH:MM:SS string.
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Formatted duration string or "—" if not completed
|
|
59
|
+
"""
|
|
60
|
+
duration = self.duration()
|
|
61
|
+
if duration is None:
|
|
62
|
+
return "—"
|
|
63
|
+
total_seconds = int(duration.total_seconds())
|
|
64
|
+
hours = total_seconds // 3600
|
|
65
|
+
minutes = (total_seconds % 3600) // 60
|
|
66
|
+
seconds = total_seconds % 60
|
|
67
|
+
return f"{hours:02d}:{minutes:02d}:{seconds:02d}"
|
|
68
|
+
|
|
69
|
+
def input_preview(self, max_length: int = 120) -> str:
|
|
70
|
+
"""Generate truncated input preview for table display.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
max_length: Maximum length of preview string
|
|
74
|
+
|
|
75
|
+
Returns:
|
|
76
|
+
Truncated input string or "—" if input is None or empty
|
|
77
|
+
"""
|
|
78
|
+
if not self.input:
|
|
79
|
+
return "—"
|
|
80
|
+
# Strip newlines and collapse whitespace
|
|
81
|
+
preview = " ".join(self.input.split())
|
|
82
|
+
if len(preview) > max_length:
|
|
83
|
+
return preview[:max_length] + "…"
|
|
84
|
+
return preview
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class RunsPage(BaseModel):
|
|
88
|
+
"""Represents a paginated collection of run summaries from the list endpoint."""
|
|
89
|
+
|
|
90
|
+
data: list[RunSummary]
|
|
91
|
+
total: int = Field(ge=0)
|
|
92
|
+
page: int = Field(ge=1)
|
|
93
|
+
limit: int = Field(ge=1, le=100)
|
|
94
|
+
has_next: bool
|
|
95
|
+
has_prev: bool
|
|
96
|
+
|
|
97
|
+
@model_validator(mode="after")
|
|
98
|
+
def validate_pagination_consistency(self) -> "RunsPage":
|
|
99
|
+
"""Validate pagination consistency."""
|
|
100
|
+
# If has_next is True, then page * limit < total
|
|
101
|
+
if self.has_next and self.page * self.limit >= self.total:
|
|
102
|
+
raise ValueError("has_next inconsistency: page * limit must be < total when has_next is True")
|
|
103
|
+
return self
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class RunWithOutput(RunSummary):
|
|
107
|
+
"""Extends RunSummary with the complete SSE event stream for detailed viewing."""
|
|
108
|
+
|
|
109
|
+
output: list[RunOutputChunk] = Field(default_factory=list)
|
|
110
|
+
|
|
111
|
+
@field_validator("output", mode="before")
|
|
112
|
+
@classmethod
|
|
113
|
+
def normalize_output(cls, v: Any) -> list[RunOutputChunk]:
|
|
114
|
+
"""Normalize output field to empty list when null."""
|
|
115
|
+
if v is None:
|
|
116
|
+
return []
|
|
117
|
+
return v
|
glaip_sdk/rich_components.py
CHANGED
|
@@ -1,10 +1,15 @@
|
|
|
1
|
-
"""Custom Rich components with copy-friendly defaults.
|
|
1
|
+
"""Custom Rich components with copy-friendly defaults.
|
|
2
|
+
|
|
3
|
+
Authors:
|
|
4
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
5
|
+
"""
|
|
2
6
|
|
|
3
7
|
from __future__ import annotations
|
|
4
8
|
|
|
5
9
|
from rich import box
|
|
6
10
|
from rich.panel import Panel
|
|
7
11
|
from rich.table import Table
|
|
12
|
+
from rich.text import Text
|
|
8
13
|
|
|
9
14
|
|
|
10
15
|
class AIPPanel(Panel):
|
|
@@ -66,4 +71,55 @@ class AIPGrid(Table):
|
|
|
66
71
|
)
|
|
67
72
|
|
|
68
73
|
|
|
69
|
-
|
|
74
|
+
class RemoteRunsTable(AIPTable):
|
|
75
|
+
"""Rich Table for displaying remote agent runs with pagination support."""
|
|
76
|
+
|
|
77
|
+
def __init__(self, *args, **kwargs):
|
|
78
|
+
"""Initialize RemoteRunsTable with columns for run display.
|
|
79
|
+
|
|
80
|
+
Args:
|
|
81
|
+
*args: Positional arguments passed to AIPTable
|
|
82
|
+
**kwargs: Keyword arguments passed to AIPTable
|
|
83
|
+
"""
|
|
84
|
+
kwargs.setdefault("row_styles", ("dim", "none"))
|
|
85
|
+
kwargs.setdefault("show_header", True)
|
|
86
|
+
super().__init__(*args, **kwargs)
|
|
87
|
+
# Add columns for run display
|
|
88
|
+
self.add_column("", width=2, no_wrap=True) # Selection gutter
|
|
89
|
+
self.add_column("Run UUID", style="cyan", width=36, no_wrap=True)
|
|
90
|
+
self.add_column("Type", style="yellow", width=8, no_wrap=True)
|
|
91
|
+
self.add_column("Status", style="magenta", width=12, no_wrap=True)
|
|
92
|
+
self.add_column("Started (UTC)", style="dim", width=20, no_wrap=True)
|
|
93
|
+
self.add_column("Completed (UTC)", style="dim", width=20, no_wrap=True)
|
|
94
|
+
self.add_column("Duration", style="green", width=10, no_wrap=True)
|
|
95
|
+
self.add_column("Input Preview", style="white", width=40, overflow="ellipsis")
|
|
96
|
+
|
|
97
|
+
def add_run_row(
|
|
98
|
+
self,
|
|
99
|
+
run_uuid: str,
|
|
100
|
+
run_type: str,
|
|
101
|
+
status: str,
|
|
102
|
+
started: str,
|
|
103
|
+
completed: str,
|
|
104
|
+
duration: str,
|
|
105
|
+
preview: str,
|
|
106
|
+
*,
|
|
107
|
+
selected: bool = False,
|
|
108
|
+
) -> None:
|
|
109
|
+
"""Append a run row with optional selection styling."""
|
|
110
|
+
gutter = Text("› ", style="bold bright_cyan") if selected else Text(" ")
|
|
111
|
+
row_style = "reverse" if selected else None
|
|
112
|
+
self.add_row(
|
|
113
|
+
gutter,
|
|
114
|
+
run_uuid,
|
|
115
|
+
run_type,
|
|
116
|
+
status,
|
|
117
|
+
started,
|
|
118
|
+
completed,
|
|
119
|
+
duration,
|
|
120
|
+
preview,
|
|
121
|
+
style=row_style,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
__all__ = ["AIPPanel", "AIPTable", "AIPGrid", "RemoteRunsTable"]
|
glaip_sdk/utils/client_utils.py
CHANGED
|
@@ -426,6 +426,19 @@ def _prepare_stream_entry(
|
|
|
426
426
|
)
|
|
427
427
|
|
|
428
428
|
|
|
429
|
+
def add_kwargs_to_payload(payload: dict[str, Any], kwargs: dict[str, Any], excluded_keys: set[str]) -> None:
|
|
430
|
+
"""Add kwargs to payload excluding specified keys.
|
|
431
|
+
|
|
432
|
+
Args:
|
|
433
|
+
payload: Payload dictionary to update.
|
|
434
|
+
kwargs: Keyword arguments to add.
|
|
435
|
+
excluded_keys: Keys to exclude from kwargs.
|
|
436
|
+
"""
|
|
437
|
+
for key, value in kwargs.items():
|
|
438
|
+
if key not in excluded_keys:
|
|
439
|
+
payload[key] = value
|
|
440
|
+
|
|
441
|
+
|
|
429
442
|
def prepare_multipart_data(message: str, files: list[str | BinaryIO]) -> MultipartData:
|
|
430
443
|
"""Prepare multipart form data for file uploads.
|
|
431
444
|
|
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Export utilities for remote agent run transcripts.
|
|
3
|
+
|
|
4
|
+
Authors:
|
|
5
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from glaip_sdk.models.agent_runs import RunWithOutput, RunOutputChunk
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def export_remote_transcript_jsonl(
|
|
17
|
+
run: RunWithOutput,
|
|
18
|
+
destination: Path,
|
|
19
|
+
*,
|
|
20
|
+
overwrite: bool = False,
|
|
21
|
+
agent_name: str | None = None,
|
|
22
|
+
model: str | None = None,
|
|
23
|
+
) -> Path:
|
|
24
|
+
"""Export a remote run transcript to JSONL format compatible with local transcript viewers.
|
|
25
|
+
|
|
26
|
+
Args:
|
|
27
|
+
run: RunWithOutput instance to export
|
|
28
|
+
destination: Target file path for JSONL export
|
|
29
|
+
overwrite: Whether to overwrite existing file
|
|
30
|
+
agent_name: Optional agent name for metadata
|
|
31
|
+
model: Optional model name for metadata (extracted from run.config if not provided)
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
Path to the exported file
|
|
35
|
+
|
|
36
|
+
Raises:
|
|
37
|
+
FileExistsError: If destination exists and overwrite is False
|
|
38
|
+
OSError: If file cannot be written
|
|
39
|
+
"""
|
|
40
|
+
if destination.exists() and not overwrite:
|
|
41
|
+
raise FileExistsError(f"File already exists: {destination}")
|
|
42
|
+
|
|
43
|
+
# Ensure parent directory exists
|
|
44
|
+
destination.parent.mkdir(parents=True, exist_ok=True)
|
|
45
|
+
|
|
46
|
+
model_name = model or _extract_model(run)
|
|
47
|
+
final_output_text = _extract_final_output(run.output) or ""
|
|
48
|
+
|
|
49
|
+
meta_payload = _build_meta_payload(run, agent_name, model_name)
|
|
50
|
+
meta_record = _build_meta_record(run, agent_name, model_name, final_output_text, meta_payload)
|
|
51
|
+
|
|
52
|
+
_write_jsonl_file(destination, meta_record, run.output)
|
|
53
|
+
|
|
54
|
+
return destination
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _build_meta_payload(run: RunWithOutput, agent_name: str | None, model_name: str | None) -> dict[str, Any]:
|
|
58
|
+
"""Build the meta payload dictionary."""
|
|
59
|
+
return {
|
|
60
|
+
"agent_name": agent_name,
|
|
61
|
+
"model": model_name,
|
|
62
|
+
"input_message": run.input,
|
|
63
|
+
"status": run.status,
|
|
64
|
+
"run_type": run.run_type,
|
|
65
|
+
"schedule_id": str(run.schedule_id) if run.schedule_id else None,
|
|
66
|
+
"config": run.config or {},
|
|
67
|
+
"created_at": run.created_at.isoformat() if run.created_at else None,
|
|
68
|
+
"updated_at": run.updated_at.isoformat() if run.updated_at else None,
|
|
69
|
+
"event_count": len(run.output),
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def _build_meta_record(
|
|
74
|
+
run: RunWithOutput,
|
|
75
|
+
agent_name: str | None,
|
|
76
|
+
model_name: str | None,
|
|
77
|
+
final_output_text: str,
|
|
78
|
+
meta_payload: dict[str, Any],
|
|
79
|
+
) -> dict[str, Any]:
|
|
80
|
+
"""Build the meta record dictionary."""
|
|
81
|
+
return {
|
|
82
|
+
"type": "meta",
|
|
83
|
+
"run_id": str(run.id),
|
|
84
|
+
"agent_id": str(run.agent_id),
|
|
85
|
+
"agent_name": agent_name,
|
|
86
|
+
"model": model_name,
|
|
87
|
+
"created_at": run.created_at.isoformat() if run.created_at else None,
|
|
88
|
+
"default_output": final_output_text,
|
|
89
|
+
"final_output": final_output_text,
|
|
90
|
+
"server_run_id": str(run.id),
|
|
91
|
+
"started_at": run.started_at.isoformat() if run.started_at else None,
|
|
92
|
+
"finished_at": run.completed_at.isoformat() if run.completed_at else None,
|
|
93
|
+
"meta": meta_payload,
|
|
94
|
+
"source": "remote_history",
|
|
95
|
+
# Back-compat fields used by older tooling
|
|
96
|
+
"run_type": run.run_type,
|
|
97
|
+
"schedule_id": str(run.schedule_id) if run.schedule_id else None,
|
|
98
|
+
"status": run.status,
|
|
99
|
+
"input": run.input,
|
|
100
|
+
"config": run.config or {},
|
|
101
|
+
"updated_at": run.updated_at.isoformat() if run.updated_at else None,
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _write_jsonl_file(destination: Path, meta_record: dict[str, Any], events: list[RunOutputChunk]) -> None:
|
|
106
|
+
"""Write the JSONL file with meta and event records."""
|
|
107
|
+
records: list[dict[str, Any]] = [meta_record]
|
|
108
|
+
records.extend({"type": "event", "event": event} for event in events)
|
|
109
|
+
|
|
110
|
+
with destination.open("w", encoding="utf-8") as fh:
|
|
111
|
+
for idx, record in enumerate(records):
|
|
112
|
+
json.dump(record, fh, ensure_ascii=False, indent=2, default=_json_default)
|
|
113
|
+
fh.write("\n")
|
|
114
|
+
if idx != len(records) - 1:
|
|
115
|
+
fh.write("\n")
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
def _extract_model(run: RunWithOutput) -> str | None:
|
|
119
|
+
"""Best-effort extraction of the model name from run metadata."""
|
|
120
|
+
config = run.config or {}
|
|
121
|
+
if isinstance(config, dict):
|
|
122
|
+
model = config.get("model") or config.get("llm", {}).get("model")
|
|
123
|
+
if isinstance(model, str):
|
|
124
|
+
return model
|
|
125
|
+
return None
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _extract_final_output(events: list[RunOutputChunk]) -> str | None:
|
|
129
|
+
"""Return the final response content from the event stream."""
|
|
130
|
+
for chunk in reversed(events):
|
|
131
|
+
content = chunk.get("content")
|
|
132
|
+
if not content:
|
|
133
|
+
continue
|
|
134
|
+
if chunk.get("event_type") == "final_response" or chunk.get("final"):
|
|
135
|
+
return str(content)
|
|
136
|
+
return None
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
def _json_default(obj: Any) -> Any:
|
|
140
|
+
"""JSON serializer for datetime objects."""
|
|
141
|
+
if isinstance(obj, datetime):
|
|
142
|
+
return obj.isoformat()
|
|
143
|
+
raise TypeError(f"Type {type(obj)} not serializable")
|
glaip_sdk/utils/import_export.py
CHANGED
|
@@ -9,8 +9,10 @@ Authors:
|
|
|
9
9
|
|
|
10
10
|
from typing import Any
|
|
11
11
|
|
|
12
|
+
from glaip_sdk.utils.resource_refs import _extract_id_from_item
|
|
12
13
|
|
|
13
|
-
|
|
14
|
+
|
|
15
|
+
def extract_ids_from_export(items: list[Any]) -> list[str]:
|
|
14
16
|
"""Extract IDs from export format (list of dicts with id/name fields).
|
|
15
17
|
|
|
16
18
|
This function is similar to `extract_ids` in `resource_refs.py` but differs in behavior:
|
|
@@ -36,14 +38,9 @@ def extract_ids_from_export(items: list[Any]) -> list[str]: # pylint: disable=d
|
|
|
36
38
|
|
|
37
39
|
ids = []
|
|
38
40
|
for item in items:
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
ids.append(str(item.id))
|
|
43
|
-
elif isinstance(item, dict) and "id" in item:
|
|
44
|
-
ids.append(str(item["id"]))
|
|
45
|
-
# Skip items without ID (don't convert to string)
|
|
46
|
-
# Note: This differs from extract_ids() in resource_refs.py which converts all items to strings
|
|
41
|
+
extracted = _extract_id_from_item(item, skip_missing=True)
|
|
42
|
+
if extracted is not None:
|
|
43
|
+
ids.append(extracted)
|
|
47
44
|
|
|
48
45
|
return ids
|
|
49
46
|
|
|
@@ -1 +1,122 @@
|
|
|
1
|
-
"""Rendering utilities package (formatting, models, steps, debug).
|
|
1
|
+
"""Rendering utilities package (formatting, models, steps, debug).
|
|
2
|
+
|
|
3
|
+
Authors:
|
|
4
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from datetime import datetime
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from rich.console import Console
|
|
11
|
+
|
|
12
|
+
from glaip_sdk.models.agent_runs import RunWithOutput
|
|
13
|
+
from glaip_sdk.utils.rendering.renderer.debug import render_debug_event
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _parse_event_received_timestamp(event: dict[str, Any]) -> datetime | None:
|
|
17
|
+
"""Parse received_at timestamp from SSE event.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
event: SSE event dictionary
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
Parsed datetime or None if not available
|
|
24
|
+
"""
|
|
25
|
+
received_at = event.get("received_at")
|
|
26
|
+
if not received_at:
|
|
27
|
+
return None
|
|
28
|
+
|
|
29
|
+
if isinstance(received_at, datetime):
|
|
30
|
+
return received_at
|
|
31
|
+
|
|
32
|
+
if isinstance(received_at, str):
|
|
33
|
+
try:
|
|
34
|
+
# Try ISO format first
|
|
35
|
+
return datetime.fromisoformat(received_at.replace("Z", "+00:00"))
|
|
36
|
+
except ValueError:
|
|
37
|
+
try:
|
|
38
|
+
# Try common formats
|
|
39
|
+
return datetime.strptime(received_at, "%Y-%m-%dT%H:%M:%S.%fZ")
|
|
40
|
+
except ValueError:
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
return None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def render_remote_sse_transcript(
|
|
47
|
+
run: RunWithOutput,
|
|
48
|
+
console: Console,
|
|
49
|
+
*,
|
|
50
|
+
show_metadata: bool = True,
|
|
51
|
+
) -> None:
|
|
52
|
+
"""Render remote SSE transcript events for a RunWithOutput.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
run: RunWithOutput instance containing events
|
|
56
|
+
console: Rich console to render to
|
|
57
|
+
show_metadata: Whether to show run metadata summary
|
|
58
|
+
"""
|
|
59
|
+
if show_metadata:
|
|
60
|
+
# Render metadata summary
|
|
61
|
+
console.print(f"[bold]Run: {run.id}[/bold]")
|
|
62
|
+
console.print(f"[dim]Agent: {run.agent_id}[/dim]")
|
|
63
|
+
console.print(f"[dim]Status: {run.status}[/dim]")
|
|
64
|
+
console.print(f"[dim]Type: {run.run_type}[/dim]")
|
|
65
|
+
if run.schedule_id:
|
|
66
|
+
console.print(f"[dim]Schedule ID: {run.schedule_id}[/dim]")
|
|
67
|
+
else:
|
|
68
|
+
console.print("[dim]Schedule: —[/dim]")
|
|
69
|
+
console.print(f"[dim]Started: {run.started_at.isoformat()}[/dim]")
|
|
70
|
+
if run.completed_at:
|
|
71
|
+
console.print(f"[dim]Completed: {run.completed_at.isoformat()}[/dim]")
|
|
72
|
+
console.print(f"[dim]Duration: {run.duration_formatted()}[/dim]")
|
|
73
|
+
console.print()
|
|
74
|
+
|
|
75
|
+
# Render events
|
|
76
|
+
if not run.output:
|
|
77
|
+
console.print("[dim]No SSE events available for this run.[/dim]")
|
|
78
|
+
return
|
|
79
|
+
|
|
80
|
+
console.print("[bold]SSE Events[/bold]")
|
|
81
|
+
console.print("[dim]────────────────────────────────────────────────────────[/dim]")
|
|
82
|
+
|
|
83
|
+
baseline: datetime | None = None
|
|
84
|
+
for event in run.output:
|
|
85
|
+
received_ts = _parse_event_received_timestamp(event)
|
|
86
|
+
if baseline is None and received_ts is not None:
|
|
87
|
+
baseline = received_ts
|
|
88
|
+
render_debug_event(
|
|
89
|
+
event,
|
|
90
|
+
console,
|
|
91
|
+
received_ts=received_ts,
|
|
92
|
+
baseline_ts=baseline,
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
console.print()
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class RemoteSSETranscriptRenderer:
|
|
99
|
+
"""Renderer for remote SSE transcripts from RunWithOutput."""
|
|
100
|
+
|
|
101
|
+
def __init__(self, console: Console | None = None):
|
|
102
|
+
"""Initialize the renderer.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
console: Rich console instance (creates default if None)
|
|
106
|
+
"""
|
|
107
|
+
self.console = console or Console()
|
|
108
|
+
|
|
109
|
+
def render(self, run: RunWithOutput, *, show_metadata: bool = True) -> None:
|
|
110
|
+
"""Render a remote run transcript.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
run: RunWithOutput instance to render
|
|
114
|
+
show_metadata: Whether to show run metadata summary
|
|
115
|
+
"""
|
|
116
|
+
render_remote_sse_transcript(run, self.console, show_metadata=show_metadata)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
__all__ = [
|
|
120
|
+
"render_remote_sse_transcript",
|
|
121
|
+
"RemoteSSETranscriptRenderer",
|
|
122
|
+
]
|
|
@@ -491,13 +491,9 @@ class RichStreamRenderer:
|
|
|
491
491
|
|
|
492
492
|
def _handle_agent_step_event(self, ev: dict[str, Any], metadata: dict[str, Any]) -> None:
|
|
493
493
|
"""Handle agent step events."""
|
|
494
|
-
# Extract tool information
|
|
495
|
-
(
|
|
496
|
-
|
|
497
|
-
tool_args,
|
|
498
|
-
tool_out,
|
|
499
|
-
tool_calls_info,
|
|
500
|
-
) = self.stream_processor.parse_tool_calls(ev)
|
|
494
|
+
# Extract tool information using stream processor
|
|
495
|
+
tool_calls_result = self.stream_processor.parse_tool_calls(ev)
|
|
496
|
+
tool_name, tool_args, tool_out, tool_calls_info = tool_calls_result
|
|
501
497
|
|
|
502
498
|
payload = metadata.get("metadata") or {}
|
|
503
499
|
|
|
@@ -129,21 +129,13 @@ class StreamProcessor:
|
|
|
129
129
|
metadata = event.get("metadata", {})
|
|
130
130
|
|
|
131
131
|
# Try primary extraction method
|
|
132
|
-
(
|
|
133
|
-
|
|
134
|
-
tool_args,
|
|
135
|
-
tool_out,
|
|
136
|
-
tool_calls_info,
|
|
137
|
-
) = self._extract_metadata_tool_calls(metadata)
|
|
132
|
+
tool_calls_result = self._extract_metadata_tool_calls(metadata)
|
|
133
|
+
tool_name, tool_args, tool_out, tool_calls_info = tool_calls_result
|
|
138
134
|
|
|
139
135
|
# Fallback to nested metadata.tool_info (newer schema)
|
|
140
136
|
if not tool_calls_info:
|
|
141
|
-
(
|
|
142
|
-
|
|
143
|
-
tool_args,
|
|
144
|
-
tool_out,
|
|
145
|
-
tool_calls_info,
|
|
146
|
-
) = self._extract_tool_calls_from_metadata(metadata)
|
|
137
|
+
fallback_result = self._extract_tool_calls_from_metadata(metadata)
|
|
138
|
+
tool_name, tool_args, tool_out, tool_calls_info = fallback_result
|
|
147
139
|
|
|
148
140
|
return tool_name, tool_args, tool_out, tool_calls_info
|
|
149
141
|
|
|
@@ -998,6 +998,7 @@ class StepManager:
|
|
|
998
998
|
def _coerce_server_time(value: Any) -> float | None:
|
|
999
999
|
"""Convert a raw SSE time payload into a float if possible."""
|
|
1000
1000
|
# Reuse the implementation from base renderer
|
|
1001
|
+
# Import here to avoid circular dependency
|
|
1001
1002
|
from glaip_sdk.utils.rendering.renderer.base import RichStreamRenderer
|
|
1002
1003
|
|
|
1003
1004
|
return RichStreamRenderer._coerce_server_time(value)
|
glaip_sdk/utils/resource_refs.py
CHANGED
|
@@ -8,7 +8,6 @@ Authors:
|
|
|
8
8
|
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
9
9
|
"""
|
|
10
10
|
|
|
11
|
-
# pylint: disable=duplicate-code
|
|
12
11
|
import re
|
|
13
12
|
from typing import Any
|
|
14
13
|
from uuid import UUID
|
|
@@ -30,7 +29,29 @@ def is_uuid(value: str) -> bool:
|
|
|
30
29
|
return False
|
|
31
30
|
|
|
32
31
|
|
|
33
|
-
def
|
|
32
|
+
def _extract_id_from_item(item: Any, *, skip_missing: bool = False) -> str | None:
|
|
33
|
+
"""Extract ID from a single item.
|
|
34
|
+
|
|
35
|
+
Args:
|
|
36
|
+
item: Item that may be a string, object with .id, or dict with "id" key.
|
|
37
|
+
skip_missing: If True, return None for items without IDs. If False, convert to string.
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Extracted ID as string, or None if skip_missing=True and no ID found.
|
|
41
|
+
"""
|
|
42
|
+
if isinstance(item, str):
|
|
43
|
+
return item
|
|
44
|
+
if hasattr(item, "id"):
|
|
45
|
+
return str(item.id)
|
|
46
|
+
if isinstance(item, dict) and "id" in item:
|
|
47
|
+
return str(item["id"])
|
|
48
|
+
if skip_missing:
|
|
49
|
+
return None
|
|
50
|
+
# Fallback: convert to string
|
|
51
|
+
return str(item)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def extract_ids(items: list[str | Any] | None) -> list[str]:
|
|
34
55
|
"""Extract IDs from a list of objects or strings.
|
|
35
56
|
|
|
36
57
|
This function unifies the behavior between CLI and SDK layers, always
|
|
@@ -50,19 +71,9 @@ def extract_ids(items: list[str | Any] | None) -> list[str]: # pylint: disable=
|
|
|
50
71
|
if not items:
|
|
51
72
|
return []
|
|
52
73
|
|
|
53
|
-
|
|
54
|
-
for item in items
|
|
55
|
-
|
|
56
|
-
ids.append(item)
|
|
57
|
-
elif hasattr(item, "id"):
|
|
58
|
-
ids.append(str(item.id))
|
|
59
|
-
elif isinstance(item, dict) and "id" in item:
|
|
60
|
-
ids.append(str(item["id"]))
|
|
61
|
-
else:
|
|
62
|
-
# Fallback: convert to string
|
|
63
|
-
ids.append(str(item))
|
|
64
|
-
|
|
65
|
-
return ids
|
|
74
|
+
# Extract IDs from all items, converting non-ID items to strings
|
|
75
|
+
extracted_ids = [_extract_id_from_item(item, skip_missing=False) for item in items]
|
|
76
|
+
return [id_val for id_val in extracted_ids if id_val is not None]
|
|
66
77
|
|
|
67
78
|
|
|
68
79
|
def extract_names(items: list[str | Any] | None) -> list[str]:
|
glaip_sdk/utils/serialization.py
CHANGED
|
@@ -88,9 +88,20 @@ def write_yaml(file_path: Path, data: dict[str, Any]) -> None:
|
|
|
88
88
|
|
|
89
89
|
# Custom YAML dumper for user-friendly instruction formatting
|
|
90
90
|
class LiteralString(str):
|
|
91
|
+
"""String subclass for YAML literal block scalar formatting."""
|
|
92
|
+
|
|
91
93
|
pass
|
|
92
94
|
|
|
93
95
|
def literal_string_representer(dumper: yaml.Dumper, data: "LiteralString") -> yaml.nodes.Node:
|
|
96
|
+
"""YAML representer for LiteralString to use literal block scalar style.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
dumper: YAML dumper instance.
|
|
100
|
+
data: LiteralString instance to represent.
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
YAML node with literal block scalar style for multiline strings.
|
|
104
|
+
"""
|
|
94
105
|
# Use literal block scalar (|) for multiline strings to preserve formatting
|
|
95
106
|
if "\n" in data:
|
|
96
107
|
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
|
|
@@ -239,6 +250,11 @@ def _iter_public_attribute_names(resource: Any) -> Iterable[str]:
|
|
|
239
250
|
names: list[str] = []
|
|
240
251
|
|
|
241
252
|
def _collect(candidates: Iterable[str] | None) -> None:
|
|
253
|
+
"""Collect unique candidate attribute names.
|
|
254
|
+
|
|
255
|
+
Args:
|
|
256
|
+
candidates: Iterable of candidate attribute names.
|
|
257
|
+
"""
|
|
242
258
|
for candidate in candidates or ():
|
|
243
259
|
if candidate not in seen:
|
|
244
260
|
seen.add(candidate)
|