glaip-sdk 0.0.1b10__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/__init__.py +2 -2
- glaip_sdk/_version.py +51 -0
- glaip_sdk/cli/commands/agents.py +201 -109
- glaip_sdk/cli/commands/configure.py +29 -87
- glaip_sdk/cli/commands/init.py +16 -7
- glaip_sdk/cli/commands/mcps.py +73 -153
- glaip_sdk/cli/commands/tools.py +185 -49
- glaip_sdk/cli/main.py +30 -27
- glaip_sdk/cli/utils.py +126 -13
- glaip_sdk/client/__init__.py +54 -2
- glaip_sdk/client/agents.py +175 -237
- glaip_sdk/client/base.py +62 -2
- glaip_sdk/client/mcps.py +63 -20
- glaip_sdk/client/tools.py +95 -28
- glaip_sdk/config/constants.py +10 -3
- glaip_sdk/exceptions.py +13 -0
- glaip_sdk/models.py +20 -4
- glaip_sdk/utils/__init__.py +116 -18
- glaip_sdk/utils/client_utils.py +284 -0
- glaip_sdk/utils/rendering/__init__.py +1 -0
- glaip_sdk/utils/rendering/formatting.py +211 -0
- glaip_sdk/utils/rendering/models.py +53 -0
- glaip_sdk/utils/rendering/renderer/__init__.py +38 -0
- glaip_sdk/utils/rendering/renderer/base.py +827 -0
- glaip_sdk/utils/rendering/renderer/config.py +33 -0
- glaip_sdk/utils/rendering/renderer/console.py +54 -0
- glaip_sdk/utils/rendering/renderer/debug.py +82 -0
- glaip_sdk/utils/rendering/renderer/panels.py +123 -0
- glaip_sdk/utils/rendering/renderer/progress.py +118 -0
- glaip_sdk/utils/rendering/renderer/stream.py +198 -0
- glaip_sdk/utils/rendering/steps.py +168 -0
- glaip_sdk/utils/run_renderer.py +22 -1086
- {glaip_sdk-0.0.1b10.dist-info → glaip_sdk-0.0.3.dist-info}/METADATA +9 -37
- glaip_sdk-0.0.3.dist-info/RECORD +40 -0
- glaip_sdk/cli/config.py +0 -592
- glaip_sdk/utils.py +0 -167
- glaip_sdk-0.0.1b10.dist-info/RECORD +0 -28
- {glaip_sdk-0.0.1b10.dist-info → glaip_sdk-0.0.3.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.0.1b10.dist-info → glaip_sdk-0.0.3.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Utility functions for AIP SDK clients.
|
|
3
|
+
|
|
4
|
+
This module contains generic utility functions that can be reused across
|
|
5
|
+
different client types (agents, tools, etc.).
|
|
6
|
+
|
|
7
|
+
Authors:
|
|
8
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
from contextlib import ExitStack
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any, BinaryIO
|
|
15
|
+
|
|
16
|
+
import httpx
|
|
17
|
+
|
|
18
|
+
from glaip_sdk.exceptions import AgentTimeoutError
|
|
19
|
+
|
|
20
|
+
# Set up module-level logger
|
|
21
|
+
logger = logging.getLogger("glaip_sdk.client_utils")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class MultipartData:
|
|
25
|
+
"""Container for multipart form data with automatic file handle cleanup."""
|
|
26
|
+
|
|
27
|
+
def __init__(self, data: dict[str, Any], files: list[tuple[str, Any]]):
|
|
28
|
+
"""Initialize multipart data container.
|
|
29
|
+
|
|
30
|
+
Args:
|
|
31
|
+
data: Form data dictionary
|
|
32
|
+
files: List of file tuples for multipart form
|
|
33
|
+
"""
|
|
34
|
+
self.data = data
|
|
35
|
+
self.files = files
|
|
36
|
+
self._exit_stack = ExitStack()
|
|
37
|
+
|
|
38
|
+
def close(self):
|
|
39
|
+
"""Close all opened file handles."""
|
|
40
|
+
self._exit_stack.close()
|
|
41
|
+
|
|
42
|
+
def __enter__(self):
|
|
43
|
+
return self
|
|
44
|
+
|
|
45
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
46
|
+
self.close()
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
def extract_ids(items: list[str | Any] | None) -> list[str] | None:
|
|
50
|
+
"""Extract IDs from a list of objects or strings.
|
|
51
|
+
|
|
52
|
+
Args:
|
|
53
|
+
items: List of items that may be strings, objects with .id, or other types
|
|
54
|
+
|
|
55
|
+
Returns:
|
|
56
|
+
List of extracted IDs, or None if items is empty/None
|
|
57
|
+
"""
|
|
58
|
+
if not items:
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
ids = []
|
|
62
|
+
for item in items:
|
|
63
|
+
if isinstance(item, str):
|
|
64
|
+
ids.append(item)
|
|
65
|
+
elif hasattr(item, "id"):
|
|
66
|
+
ids.append(item.id)
|
|
67
|
+
else:
|
|
68
|
+
# Fallback: convert to string
|
|
69
|
+
ids.append(str(item))
|
|
70
|
+
|
|
71
|
+
return ids
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def create_model_instances(
|
|
75
|
+
data: list[dict] | None, model_class: type, client: Any
|
|
76
|
+
) -> list[Any]:
|
|
77
|
+
"""Create model instances from API data with client association.
|
|
78
|
+
|
|
79
|
+
This is a common pattern used across different clients (agents, tools, mcps)
|
|
80
|
+
to create model instances and associate them with the client.
|
|
81
|
+
|
|
82
|
+
Args:
|
|
83
|
+
data: List of dictionaries from API response
|
|
84
|
+
model_class: The model class to instantiate
|
|
85
|
+
client: The client instance to associate with models
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
List of model instances with client association
|
|
89
|
+
"""
|
|
90
|
+
if not data:
|
|
91
|
+
return []
|
|
92
|
+
|
|
93
|
+
return [model_class(**item_data)._set_client(client) for item_data in data]
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def find_by_name(
|
|
97
|
+
items: list[Any], name: str, case_sensitive: bool = False
|
|
98
|
+
) -> list[Any]:
|
|
99
|
+
"""Filter items by name with optional case sensitivity.
|
|
100
|
+
|
|
101
|
+
This is a common pattern used across different clients for client-side
|
|
102
|
+
filtering when the backend doesn't support name query parameters.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
items: List of items to filter
|
|
106
|
+
name: Name to search for
|
|
107
|
+
case_sensitive: Whether the search should be case sensitive
|
|
108
|
+
|
|
109
|
+
Returns:
|
|
110
|
+
Filtered list of items matching the name
|
|
111
|
+
"""
|
|
112
|
+
if not name:
|
|
113
|
+
return items
|
|
114
|
+
|
|
115
|
+
if case_sensitive:
|
|
116
|
+
return [item for item in items if name in item.name]
|
|
117
|
+
else:
|
|
118
|
+
return [item for item in items if name.lower() in item.name.lower()]
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def iter_sse_events(
|
|
122
|
+
response: httpx.Response, timeout_seconds: float = None, agent_name: str = None
|
|
123
|
+
):
|
|
124
|
+
"""Iterate over Server-Sent Events with proper parsing.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
response: HTTP response object with streaming content
|
|
128
|
+
timeout_seconds: Timeout duration in seconds (for error messages)
|
|
129
|
+
agent_name: Agent name (for error messages)
|
|
130
|
+
|
|
131
|
+
Yields:
|
|
132
|
+
Dictionary with event data, type, and ID
|
|
133
|
+
|
|
134
|
+
Raises:
|
|
135
|
+
AgentTimeoutError: When agent execution times out
|
|
136
|
+
httpx.TimeoutException: When general timeout occurs
|
|
137
|
+
Exception: For other unexpected errors
|
|
138
|
+
"""
|
|
139
|
+
buf = []
|
|
140
|
+
event_type = None
|
|
141
|
+
event_id = None
|
|
142
|
+
|
|
143
|
+
try:
|
|
144
|
+
for raw in response.iter_lines():
|
|
145
|
+
line = raw.decode("utf-8") if isinstance(raw, bytes) else raw
|
|
146
|
+
if line is None:
|
|
147
|
+
continue
|
|
148
|
+
|
|
149
|
+
# Normalize CRLF and treat whitespace-only as blank
|
|
150
|
+
line = line.rstrip("\r")
|
|
151
|
+
|
|
152
|
+
if not line.strip(): # instead of: if line == ""
|
|
153
|
+
if buf:
|
|
154
|
+
data = "\n".join(buf)
|
|
155
|
+
yield {
|
|
156
|
+
"event": event_type or "message",
|
|
157
|
+
"id": event_id,
|
|
158
|
+
"data": data,
|
|
159
|
+
}
|
|
160
|
+
buf, event_type, event_id = [], None, None
|
|
161
|
+
continue
|
|
162
|
+
|
|
163
|
+
if line.startswith(":"): # comment
|
|
164
|
+
continue
|
|
165
|
+
if line.startswith("data:"):
|
|
166
|
+
data_line = line[5:].lstrip()
|
|
167
|
+
|
|
168
|
+
# Optional: handle sentinel end markers gracefully
|
|
169
|
+
if data_line.strip() == "[DONE]":
|
|
170
|
+
if buf:
|
|
171
|
+
data = "\n".join(buf)
|
|
172
|
+
yield {
|
|
173
|
+
"event": event_type or "message",
|
|
174
|
+
"id": event_id,
|
|
175
|
+
"data": data,
|
|
176
|
+
}
|
|
177
|
+
return
|
|
178
|
+
|
|
179
|
+
buf.append(data_line)
|
|
180
|
+
elif line.startswith("event:"):
|
|
181
|
+
event_type = line[6:].strip() or None
|
|
182
|
+
elif line.startswith("id:"):
|
|
183
|
+
event_id = line[3:].strip() or None
|
|
184
|
+
|
|
185
|
+
# Flush any remaining data
|
|
186
|
+
if buf:
|
|
187
|
+
yield {
|
|
188
|
+
"event": event_type or "message",
|
|
189
|
+
"id": event_id,
|
|
190
|
+
"data": "\n".join(buf),
|
|
191
|
+
}
|
|
192
|
+
except httpx.ReadTimeout as e:
|
|
193
|
+
logger.error(f"Read timeout during streaming: {e}")
|
|
194
|
+
logger.error("This usually indicates the backend is taking too long to respond")
|
|
195
|
+
logger.error(
|
|
196
|
+
"Consider increasing the timeout value or checking backend performance"
|
|
197
|
+
)
|
|
198
|
+
# Raise a more user-friendly timeout error
|
|
199
|
+
raise AgentTimeoutError(
|
|
200
|
+
timeout_seconds or 30.0, # Default to 30s if not provided
|
|
201
|
+
agent_name,
|
|
202
|
+
)
|
|
203
|
+
except httpx.TimeoutException as e:
|
|
204
|
+
logger.error(f"General timeout during streaming: {e}")
|
|
205
|
+
# Also convert general timeout to agent timeout for consistency
|
|
206
|
+
raise AgentTimeoutError(timeout_seconds or 30.0, agent_name)
|
|
207
|
+
except httpx.StreamClosed as e:
|
|
208
|
+
logger.error(f"Stream closed unexpectedly during streaming: {e}")
|
|
209
|
+
logger.error("This may indicate a backend issue or network problem")
|
|
210
|
+
logger.error("The response stream was closed before all data could be read")
|
|
211
|
+
raise
|
|
212
|
+
except httpx.ConnectError as e:
|
|
213
|
+
logger.error(f"Connection error during streaming: {e}")
|
|
214
|
+
logger.error("Check your network connection and backend availability")
|
|
215
|
+
raise
|
|
216
|
+
except Exception as e:
|
|
217
|
+
logger.error(f"Unexpected error during streaming: {e}")
|
|
218
|
+
logger.error(f"Error type: {type(e).__name__}")
|
|
219
|
+
# Log additional context if available
|
|
220
|
+
if hasattr(e, "__cause__") and e.__cause__:
|
|
221
|
+
logger.error(f"Caused by: {e.__cause__}")
|
|
222
|
+
raise
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def prepare_multipart_data(message: str, files: list[str | BinaryIO]) -> MultipartData:
|
|
226
|
+
"""Prepare multipart form data for file uploads.
|
|
227
|
+
|
|
228
|
+
Args:
|
|
229
|
+
message: Text message to include with the upload
|
|
230
|
+
files: List of file paths or file-like objects
|
|
231
|
+
|
|
232
|
+
Returns:
|
|
233
|
+
MultipartData object with automatic file handle cleanup
|
|
234
|
+
|
|
235
|
+
Raises:
|
|
236
|
+
FileNotFoundError: When a file path doesn't exist
|
|
237
|
+
ValueError: When a file object is invalid
|
|
238
|
+
"""
|
|
239
|
+
# Backend expects 'input' for the main prompt. Keep 'message' for
|
|
240
|
+
# backward-compatibility with any legacy handlers.
|
|
241
|
+
form_data = {"input": message, "message": message, "stream": True}
|
|
242
|
+
file_list = []
|
|
243
|
+
|
|
244
|
+
with ExitStack() as stack:
|
|
245
|
+
multipart_data = MultipartData(form_data, [])
|
|
246
|
+
multipart_data._exit_stack = stack
|
|
247
|
+
|
|
248
|
+
for file_item in files:
|
|
249
|
+
if isinstance(file_item, str):
|
|
250
|
+
# File path - let httpx stream the file handle
|
|
251
|
+
file_path = Path(file_item)
|
|
252
|
+
if not file_path.exists():
|
|
253
|
+
raise FileNotFoundError(f"File not found: {file_item}")
|
|
254
|
+
|
|
255
|
+
# Open file and register for cleanup
|
|
256
|
+
fh = stack.enter_context(open(file_path, "rb"))
|
|
257
|
+
file_list.append(
|
|
258
|
+
(
|
|
259
|
+
"files",
|
|
260
|
+
(
|
|
261
|
+
file_path.name,
|
|
262
|
+
fh,
|
|
263
|
+
"application/octet-stream",
|
|
264
|
+
),
|
|
265
|
+
)
|
|
266
|
+
)
|
|
267
|
+
else:
|
|
268
|
+
# File-like object
|
|
269
|
+
if hasattr(file_item, "name"):
|
|
270
|
+
filename = getattr(file_item, "name", "file")
|
|
271
|
+
else:
|
|
272
|
+
filename = "file"
|
|
273
|
+
|
|
274
|
+
if hasattr(file_item, "read"):
|
|
275
|
+
# For file-like objects, we need to read them since httpx expects bytes
|
|
276
|
+
file_content = file_item.read()
|
|
277
|
+
file_list.append(
|
|
278
|
+
("files", (filename, file_content, "application/octet-stream"))
|
|
279
|
+
)
|
|
280
|
+
else:
|
|
281
|
+
raise ValueError(f"Invalid file object: {file_item}")
|
|
282
|
+
|
|
283
|
+
multipart_data.files = file_list
|
|
284
|
+
return multipart_data
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Rendering utilities package (formatting, models, steps, debug)."""
|
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
"""Formatting helpers for renderer.
|
|
2
|
+
|
|
3
|
+
Authors:
|
|
4
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
import re
|
|
10
|
+
import time
|
|
11
|
+
|
|
12
|
+
# Constants for argument formatting
|
|
13
|
+
DEFAULT_ARGS_MAX_LEN = 100
|
|
14
|
+
IMPORTANT_PARAMETER_KEYS = [
|
|
15
|
+
"model",
|
|
16
|
+
"temperature",
|
|
17
|
+
"max_tokens",
|
|
18
|
+
"top_p",
|
|
19
|
+
"frequency_penalty",
|
|
20
|
+
"presence_penalty",
|
|
21
|
+
"query",
|
|
22
|
+
"url",
|
|
23
|
+
]
|
|
24
|
+
SECRET_VALUE_PATTERNS = [
|
|
25
|
+
re.compile(r"sk-[a-zA-Z0-9]{20,}"), # OpenAI API keys (at least 20 chars)
|
|
26
|
+
re.compile(r"ya29\.[a-zA-Z0-9_-]+"), # Google OAuth tokens
|
|
27
|
+
re.compile(r"ghp_[a-zA-Z0-9]{20,}"), # GitHub tokens (at least 20 chars)
|
|
28
|
+
re.compile(r"gho_[a-zA-Z0-9]{20,}"), # GitHub tokens (at least 20 chars)
|
|
29
|
+
re.compile(r"ghu_[a-zA-Z0-9]{20,}"), # GitHub tokens (at least 20 chars)
|
|
30
|
+
re.compile(r"ghs_[a-zA-Z0-9]{20,}"), # GitHub tokens (at least 20 chars)
|
|
31
|
+
re.compile(r"ghr_[a-zA-Z0-9]{20,}"), # GitHub tokens (at least 20 chars)
|
|
32
|
+
re.compile(r"eyJ[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+\.[a-zA-Z0-9_-]+"), # JWT tokens
|
|
33
|
+
]
|
|
34
|
+
SENSITIVE_PATTERNS = re.compile(
|
|
35
|
+
r"password\s*[:=]\s*[^\s,}]+|secret\s*[:=]\s*[^\s,}]+|token\s*[:=]\s*[^\s,}]+|key\s*[:=]\s*[^\s,}]+|api_key\s*[:=]\s*[^\s,}]+|^password$|^secret$|^token$|^key$|^api_key$",
|
|
36
|
+
re.IGNORECASE,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _truncate_string(s: str, max_len: int) -> str:
|
|
41
|
+
"""Truncate a string to a maximum length."""
|
|
42
|
+
if len(s) <= max_len:
|
|
43
|
+
return s
|
|
44
|
+
return s[: max_len - 3] + "…"
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def mask_secrets_in_string(text: str) -> str:
|
|
48
|
+
"""Mask sensitive information in a string."""
|
|
49
|
+
result = text
|
|
50
|
+
for pattern in SECRET_VALUE_PATTERNS:
|
|
51
|
+
result = re.sub(pattern, "••••••", result)
|
|
52
|
+
return result
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def redact_sensitive(text: str | dict | list) -> str | dict | list:
|
|
56
|
+
"""Redact sensitive information in a string, dict, or list."""
|
|
57
|
+
if isinstance(text, dict):
|
|
58
|
+
# Recursively process dictionary values
|
|
59
|
+
result = {}
|
|
60
|
+
for key, value in text.items():
|
|
61
|
+
# Check if the key itself is sensitive
|
|
62
|
+
key_lower = key.lower()
|
|
63
|
+
if any(
|
|
64
|
+
sensitive in key_lower
|
|
65
|
+
for sensitive in ["password", "secret", "token", "key", "api_key"]
|
|
66
|
+
):
|
|
67
|
+
result[key] = "••••••"
|
|
68
|
+
elif isinstance(value, dict | list):
|
|
69
|
+
result[key] = redact_sensitive(value)
|
|
70
|
+
elif isinstance(value, str):
|
|
71
|
+
result[key] = redact_sensitive(value)
|
|
72
|
+
else:
|
|
73
|
+
result[key] = value
|
|
74
|
+
return result
|
|
75
|
+
elif isinstance(text, list):
|
|
76
|
+
# Recursively process list items
|
|
77
|
+
return [redact_sensitive(item) for item in text]
|
|
78
|
+
elif isinstance(text, str):
|
|
79
|
+
# Process string - first mask secrets, then redact sensitive patterns
|
|
80
|
+
result = text
|
|
81
|
+
# First mask secrets
|
|
82
|
+
for pattern in SECRET_VALUE_PATTERNS:
|
|
83
|
+
result = re.sub(pattern, "••••••", result)
|
|
84
|
+
# Then redact sensitive patterns
|
|
85
|
+
result = re.sub(
|
|
86
|
+
SENSITIVE_PATTERNS,
|
|
87
|
+
lambda m: m.group(0).split("=")[0] + "=••••••",
|
|
88
|
+
result,
|
|
89
|
+
)
|
|
90
|
+
return result
|
|
91
|
+
else:
|
|
92
|
+
return text
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def pretty_args(args: dict | None, max_len: int = DEFAULT_ARGS_MAX_LEN) -> str:
|
|
96
|
+
"""Format arguments in a pretty way."""
|
|
97
|
+
if not args:
|
|
98
|
+
return "{}"
|
|
99
|
+
|
|
100
|
+
# Mask secrets first by recursively processing the structure
|
|
101
|
+
try:
|
|
102
|
+
masked_args = redact_sensitive(args)
|
|
103
|
+
except Exception:
|
|
104
|
+
# Fallback to original args if redact_sensitive fails
|
|
105
|
+
masked_args = args
|
|
106
|
+
|
|
107
|
+
# Convert to JSON string and truncate if needed
|
|
108
|
+
try:
|
|
109
|
+
import json
|
|
110
|
+
|
|
111
|
+
args_str = json.dumps(masked_args, ensure_ascii=False)
|
|
112
|
+
return _truncate_string(args_str, max_len)
|
|
113
|
+
except (TypeError, ValueError, Exception):
|
|
114
|
+
# Fallback to string representation if JSON serialization fails
|
|
115
|
+
args_str = str(masked_args)
|
|
116
|
+
return _truncate_string(args_str, max_len)
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def pretty_out(output: any, max_len: int = DEFAULT_ARGS_MAX_LEN) -> str:
|
|
120
|
+
"""Format output in a pretty way."""
|
|
121
|
+
if output is None:
|
|
122
|
+
return "None"
|
|
123
|
+
|
|
124
|
+
if isinstance(output, str):
|
|
125
|
+
# Mask secrets in string output
|
|
126
|
+
masked_output = mask_secrets_in_string(output)
|
|
127
|
+
# Strip leading/trailing whitespace but preserve internal spacing
|
|
128
|
+
masked_output = masked_output.strip()
|
|
129
|
+
# Replace newlines with spaces to preserve formatting
|
|
130
|
+
masked_output = masked_output.replace("\n", " ")
|
|
131
|
+
return _truncate_string(masked_output, max_len)
|
|
132
|
+
|
|
133
|
+
# For other types, convert to string and truncate
|
|
134
|
+
output_str = str(output)
|
|
135
|
+
return _truncate_string(output_str, max_len)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def get_spinner_char() -> str:
|
|
139
|
+
frames = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
|
|
140
|
+
return frames[int(time.time() * 10) % len(frames)]
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def get_step_icon(step_kind: str) -> str:
|
|
144
|
+
"""Get the appropriate icon for a step kind."""
|
|
145
|
+
if step_kind == "tool":
|
|
146
|
+
return "⚙️" # Gear emoji for tool
|
|
147
|
+
if step_kind == "delegate":
|
|
148
|
+
return "🤝" # Handshake for delegate
|
|
149
|
+
if step_kind == "agent":
|
|
150
|
+
return "🧠" # Brain emoji for agent
|
|
151
|
+
return ""
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def is_step_finished(step) -> bool:
|
|
155
|
+
"""Check if a step is finished.
|
|
156
|
+
|
|
157
|
+
Args:
|
|
158
|
+
step: The step object to check
|
|
159
|
+
|
|
160
|
+
Returns:
|
|
161
|
+
True if the step status is "finished", False otherwise
|
|
162
|
+
"""
|
|
163
|
+
return getattr(step, "status", None) == "finished"
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def format_main_title(
|
|
167
|
+
header_text: str, has_running_steps: bool, get_spinner_char: callable
|
|
168
|
+
) -> str:
|
|
169
|
+
"""Generate the main panel title with dynamic status indicators.
|
|
170
|
+
|
|
171
|
+
Args:
|
|
172
|
+
header_text: The header text from the renderer
|
|
173
|
+
has_running_steps: Whether there are running steps
|
|
174
|
+
get_spinner_char: Function to get spinner character
|
|
175
|
+
|
|
176
|
+
Returns:
|
|
177
|
+
A formatted title string showing the agent name and status.
|
|
178
|
+
"""
|
|
179
|
+
# base name
|
|
180
|
+
name = (header_text or "").strip() or "Assistant"
|
|
181
|
+
# strip leading rule emojis if present
|
|
182
|
+
name = name.replace("—", " ").strip()
|
|
183
|
+
# spinner if still working
|
|
184
|
+
mark = "✓" if not has_running_steps else get_spinner_char()
|
|
185
|
+
return f"{name} {mark}"
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def print_header_once(
|
|
189
|
+
console, text: str, last_header: str, rules_enabled: bool, style: str | None = None
|
|
190
|
+
) -> str:
|
|
191
|
+
"""Print header text only when it changes to avoid duplicate output.
|
|
192
|
+
|
|
193
|
+
Args:
|
|
194
|
+
console: Rich console instance
|
|
195
|
+
text: The header text to display
|
|
196
|
+
last_header: The last header text that was printed
|
|
197
|
+
rules_enabled: Whether header rules are enabled
|
|
198
|
+
style: Optional Rich style for the header rule
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
The updated last_header value
|
|
202
|
+
"""
|
|
203
|
+
if not rules_enabled:
|
|
204
|
+
return text
|
|
205
|
+
if text and text != last_header:
|
|
206
|
+
try:
|
|
207
|
+
console.rule(text, style=style)
|
|
208
|
+
except Exception:
|
|
209
|
+
console.print(text)
|
|
210
|
+
return text
|
|
211
|
+
return last_header
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Rendering utilities.
|
|
2
|
+
|
|
3
|
+
Authors:
|
|
4
|
+
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from __future__ import annotations
|
|
8
|
+
|
|
9
|
+
from dataclasses import dataclass, field
|
|
10
|
+
from time import monotonic
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass(slots=True)
|
|
15
|
+
class Step:
|
|
16
|
+
step_id: str
|
|
17
|
+
kind: str # "tool" | "delegate" | "agent"
|
|
18
|
+
name: str
|
|
19
|
+
status: str = "running"
|
|
20
|
+
args: dict = field(default_factory=dict)
|
|
21
|
+
output: str = ""
|
|
22
|
+
parent_id: str | None = None
|
|
23
|
+
task_id: str | None = None
|
|
24
|
+
context_id: str | None = None
|
|
25
|
+
started_at: float = field(default_factory=monotonic)
|
|
26
|
+
duration_ms: int | None = None
|
|
27
|
+
|
|
28
|
+
def finish(self, duration_raw: float | None):
|
|
29
|
+
if isinstance(duration_raw, int | float) and duration_raw > 0:
|
|
30
|
+
# Use provided duration if it's a positive number (even if very small)
|
|
31
|
+
self.duration_ms = round(float(duration_raw) * 1000)
|
|
32
|
+
else:
|
|
33
|
+
# Calculate from started_at if duration_raw is None, negative, or zero
|
|
34
|
+
self.duration_ms = int((monotonic() - self.started_at) * 1000)
|
|
35
|
+
self.status = "finished"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass(slots=True)
|
|
39
|
+
class RunStats:
|
|
40
|
+
started_at: float = field(default_factory=monotonic)
|
|
41
|
+
finished_at: float | None = None
|
|
42
|
+
usage: dict[str, Any] = field(default_factory=dict)
|
|
43
|
+
|
|
44
|
+
def stop(self) -> None:
|
|
45
|
+
self.finished_at = monotonic()
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def duration_s(self) -> float | None:
|
|
49
|
+
return (
|
|
50
|
+
None
|
|
51
|
+
if self.finished_at is None
|
|
52
|
+
else round(self.finished_at - self.started_at, 2)
|
|
53
|
+
)
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""Renderer package for modular streaming output.
|
|
2
|
+
|
|
3
|
+
This package provides modular components for rendering agent execution streams,
|
|
4
|
+
with clean separation of concerns between configuration, console handling,
|
|
5
|
+
debug output, panel rendering, progress tracking, and event routing.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from .base import RichStreamRenderer
|
|
9
|
+
from .config import RendererConfig
|
|
10
|
+
from .console import CapturingConsole
|
|
11
|
+
from .debug import render_debug_event
|
|
12
|
+
from .panels import (
|
|
13
|
+
create_context_panel,
|
|
14
|
+
create_final_panel,
|
|
15
|
+
create_main_panel,
|
|
16
|
+
create_tool_panel,
|
|
17
|
+
)
|
|
18
|
+
from .progress import (
|
|
19
|
+
format_tool_title,
|
|
20
|
+
is_delegation_tool,
|
|
21
|
+
)
|
|
22
|
+
from .stream import StreamProcessor
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
# Main classes
|
|
26
|
+
"RichStreamRenderer",
|
|
27
|
+
"RendererConfig",
|
|
28
|
+
"CapturingConsole",
|
|
29
|
+
"StreamProcessor",
|
|
30
|
+
# Key functions
|
|
31
|
+
"render_debug_event",
|
|
32
|
+
"create_main_panel",
|
|
33
|
+
"create_tool_panel",
|
|
34
|
+
"create_context_panel",
|
|
35
|
+
"create_final_panel",
|
|
36
|
+
"format_tool_title",
|
|
37
|
+
"is_delegation_tool",
|
|
38
|
+
]
|