glaip-sdk 0.0.16__py3-none-any.whl → 0.0.18__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/__init__.py +1 -1
- glaip_sdk/branding.py +1 -1
- glaip_sdk/cli/commands/agents.py +10 -10
- glaip_sdk/cli/commands/configure.py +7 -2
- glaip_sdk/cli/commands/mcps.py +18 -19
- glaip_sdk/cli/commands/tools.py +19 -13
- glaip_sdk/cli/display.py +5 -4
- glaip_sdk/cli/rich_helpers.py +29 -0
- glaip_sdk/cli/slash/prompt.py +32 -17
- glaip_sdk/cli/slash/session.py +54 -25
- glaip_sdk/cli/utils.py +6 -6
- glaip_sdk/client/agents.py +581 -88
- glaip_sdk/client/main.py +10 -2
- glaip_sdk/client/tools.py +34 -3
- glaip_sdk/config/constants.py +1 -1
- glaip_sdk/utils/agent_config.py +49 -26
- glaip_sdk/utils/rendering/formatting.py +50 -29
- glaip_sdk/utils/rendering/renderer/base.py +156 -70
- {glaip_sdk-0.0.16.dist-info → glaip_sdk-0.0.18.dist-info}/METADATA +2 -2
- {glaip_sdk-0.0.16.dist-info → glaip_sdk-0.0.18.dist-info}/RECORD +22 -21
- {glaip_sdk-0.0.16.dist-info → glaip_sdk-0.0.18.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.0.16.dist-info → glaip_sdk-0.0.18.dist-info}/entry_points.txt +0 -0
glaip_sdk/client/main.py
CHANGED
|
@@ -42,6 +42,10 @@ class Client(BaseClient):
|
|
|
42
42
|
"""Create a new agent."""
|
|
43
43
|
return self.agents.create_agent(**kwargs)
|
|
44
44
|
|
|
45
|
+
def create_agent_from_file(self, *args, **kwargs) -> Agent:
|
|
46
|
+
"""Create a new agent from a JSON or YAML configuration file."""
|
|
47
|
+
return self.agents.create_agent_from_file(*args, **kwargs)
|
|
48
|
+
|
|
45
49
|
def list_agents(
|
|
46
50
|
self,
|
|
47
51
|
agent_type: str | None = None,
|
|
@@ -86,6 +90,10 @@ class Client(BaseClient):
|
|
|
86
90
|
"""Update an existing agent."""
|
|
87
91
|
return self.agents.update_agent(agent_id, **kwargs)
|
|
88
92
|
|
|
93
|
+
def update_agent_from_file(self, agent_id: str, *args, **kwargs) -> Agent:
|
|
94
|
+
"""Update an existing agent using a JSON or YAML configuration file."""
|
|
95
|
+
return self.agents.update_agent_from_file(agent_id, *args, **kwargs)
|
|
96
|
+
|
|
89
97
|
def delete_agent(self, agent_id: str) -> bool:
|
|
90
98
|
"""Delete an agent."""
|
|
91
99
|
return self.agents.delete_agent(agent_id)
|
|
@@ -150,9 +158,9 @@ class Client(BaseClient):
|
|
|
150
158
|
"""Get tool script content."""
|
|
151
159
|
return self.tools.get_tool_script(tool_id)
|
|
152
160
|
|
|
153
|
-
def update_tool_via_file(self, tool_id: str, file_path: str) -> Tool:
|
|
161
|
+
def update_tool_via_file(self, tool_id: str, file_path: str, **kwargs) -> Tool:
|
|
154
162
|
"""Update tool via file."""
|
|
155
|
-
return self.tools.update_tool_via_file(tool_id, file_path)
|
|
163
|
+
return self.tools.update_tool_via_file(tool_id, file_path, **kwargs)
|
|
156
164
|
|
|
157
165
|
# MCPs
|
|
158
166
|
def create_mcp(self, **kwargs) -> MCP:
|
glaip_sdk/client/tools.py
CHANGED
|
@@ -241,7 +241,15 @@ class ToolClient(BaseClient):
|
|
|
241
241
|
self, update_data: dict[str, Any], kwargs: dict[str, Any]
|
|
242
242
|
) -> None:
|
|
243
243
|
"""Handle additional kwargs in update payload."""
|
|
244
|
-
excluded_keys = {
|
|
244
|
+
excluded_keys = {
|
|
245
|
+
"tags",
|
|
246
|
+
"framework",
|
|
247
|
+
"version",
|
|
248
|
+
"type",
|
|
249
|
+
"tool_type",
|
|
250
|
+
"name",
|
|
251
|
+
"description",
|
|
252
|
+
}
|
|
245
253
|
for key, value in kwargs.items():
|
|
246
254
|
if key not in excluded_keys:
|
|
247
255
|
update_data[key] = value
|
|
@@ -269,9 +277,19 @@ class ToolClient(BaseClient):
|
|
|
269
277
|
- Handles metadata updates properly
|
|
270
278
|
"""
|
|
271
279
|
# Prepare the update payload with current values as defaults
|
|
280
|
+
type_override = kwargs.pop("type", None)
|
|
281
|
+
if type_override is None:
|
|
282
|
+
type_override = kwargs.pop("tool_type", None)
|
|
283
|
+
current_type = (
|
|
284
|
+
type_override
|
|
285
|
+
or getattr(current_tool, "tool_type", None)
|
|
286
|
+
or getattr(current_tool, "type", None)
|
|
287
|
+
or DEFAULT_TOOL_TYPE
|
|
288
|
+
)
|
|
289
|
+
|
|
272
290
|
update_data = {
|
|
273
291
|
"name": name if name is not None else current_tool.name,
|
|
274
|
-
"type":
|
|
292
|
+
"type": current_type,
|
|
275
293
|
"framework": kwargs.get(
|
|
276
294
|
"framework", getattr(current_tool, "framework", DEFAULT_TOOL_FRAMEWORK)
|
|
277
295
|
),
|
|
@@ -476,6 +494,19 @@ class ToolClient(BaseClient):
|
|
|
476
494
|
# Validate file exists
|
|
477
495
|
self._validate_and_read_file(file_path)
|
|
478
496
|
|
|
497
|
+
# Fetch current metadata to ensure required fields are preserved
|
|
498
|
+
current_tool = self.get_tool_by_id(tool_id)
|
|
499
|
+
|
|
500
|
+
payload_kwargs = kwargs.copy()
|
|
501
|
+
name = payload_kwargs.pop("name", None)
|
|
502
|
+
description = payload_kwargs.pop("description", None)
|
|
503
|
+
update_payload = self._build_update_payload(
|
|
504
|
+
current_tool=current_tool,
|
|
505
|
+
name=name,
|
|
506
|
+
description=description,
|
|
507
|
+
**payload_kwargs,
|
|
508
|
+
)
|
|
509
|
+
|
|
479
510
|
try:
|
|
480
511
|
# Prepare multipart upload
|
|
481
512
|
with open(file_path, "rb") as fb:
|
|
@@ -491,7 +522,7 @@ class ToolClient(BaseClient):
|
|
|
491
522
|
"PUT",
|
|
492
523
|
TOOLS_UPLOAD_BY_ID_ENDPOINT_FMT.format(tool_id=tool_id),
|
|
493
524
|
files=files,
|
|
494
|
-
data=
|
|
525
|
+
data=update_payload,
|
|
495
526
|
)
|
|
496
527
|
|
|
497
528
|
return Tool(**response)._set_client(self)
|
glaip_sdk/config/constants.py
CHANGED
glaip_sdk/utils/agent_config.py
CHANGED
|
@@ -134,25 +134,51 @@ def normalize_agent_config_for_import(
|
|
|
134
134
|
if not isinstance(agent_config, dict):
|
|
135
135
|
return normalized_data
|
|
136
136
|
|
|
137
|
-
#
|
|
137
|
+
# Apply normalization based on priority order
|
|
138
138
|
if cli_model:
|
|
139
|
-
|
|
140
|
-
normalized_data["model"] = cli_model
|
|
141
|
-
return normalized_data
|
|
139
|
+
return _apply_cli_model_override(normalized_data, cli_model)
|
|
142
140
|
|
|
143
|
-
# Priority 2: language_model_id already exists - clean up agent_config
|
|
144
141
|
if normalized_data.get("language_model_id"):
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
142
|
+
return _cleanup_existing_language_model(normalized_data, agent_config)
|
|
143
|
+
|
|
144
|
+
return _extract_lm_from_agent_config(normalized_data, agent_config)
|
|
145
|
+
|
|
146
|
+
|
|
147
|
+
def _apply_cli_model_override(normalized_data: dict, cli_model: str) -> dict:
|
|
148
|
+
"""Apply CLI model override (highest priority)."""
|
|
149
|
+
normalized_data["model"] = cli_model
|
|
150
|
+
return normalized_data
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def _cleanup_existing_language_model(normalized_data: dict, agent_config: dict) -> dict:
|
|
154
|
+
"""Clean up agent_config when language_model_id already exists."""
|
|
155
|
+
# Remove LM identity keys from agent_config since language_model_id takes precedence
|
|
156
|
+
lm_keys_to_remove = {"lm_provider", "lm_name", "lm_base_url"}
|
|
157
|
+
for key in lm_keys_to_remove:
|
|
158
|
+
agent_config.pop(key, None)
|
|
159
|
+
normalized_data["agent_config"] = agent_config
|
|
160
|
+
return normalized_data
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _extract_lm_from_agent_config(normalized_data: dict, agent_config: dict) -> dict:
|
|
164
|
+
"""Extract LM settings from agent_config (lowest priority)."""
|
|
165
|
+
extracted_lm = _extract_lm_settings(agent_config)
|
|
166
|
+
|
|
167
|
+
if not extracted_lm:
|
|
153
168
|
return normalized_data
|
|
154
169
|
|
|
155
|
-
#
|
|
170
|
+
# Add extracted LM settings to top level
|
|
171
|
+
normalized_data.update(extracted_lm)
|
|
172
|
+
|
|
173
|
+
# Create sanitized agent_config (remove extracted LM settings but keep memory)
|
|
174
|
+
sanitized_config = _sanitize_agent_config(agent_config)
|
|
175
|
+
normalized_data["agent_config"] = sanitized_config
|
|
176
|
+
|
|
177
|
+
return normalized_data
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
def _extract_lm_settings(agent_config: dict) -> dict[str, Any]:
|
|
181
|
+
"""Extract LM settings from agent_config."""
|
|
156
182
|
extracted_lm = {}
|
|
157
183
|
|
|
158
184
|
# Extract lm_name if present
|
|
@@ -163,19 +189,16 @@ def normalize_agent_config_for_import(
|
|
|
163
189
|
if "lm_provider" in agent_config:
|
|
164
190
|
extracted_lm["lm_provider"] = agent_config["lm_provider"]
|
|
165
191
|
|
|
166
|
-
|
|
167
|
-
if extracted_lm:
|
|
168
|
-
# Add extracted LM settings to top level
|
|
169
|
-
normalized_data.update(extracted_lm)
|
|
192
|
+
return extracted_lm
|
|
170
193
|
|
|
171
|
-
# Create sanitized agent_config (remove extracted LM settings but keep memory)
|
|
172
|
-
sanitized_config = agent_config.copy()
|
|
173
194
|
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
sanitized_config.pop(key, None)
|
|
195
|
+
def _sanitize_agent_config(agent_config: dict) -> dict:
|
|
196
|
+
"""Create sanitized agent_config by removing LM identity keys."""
|
|
197
|
+
sanitized_config = agent_config.copy()
|
|
178
198
|
|
|
179
|
-
|
|
199
|
+
# Remove LM identity keys but preserve memory and other settings
|
|
200
|
+
lm_keys_to_remove = {"lm_provider", "lm_name", "lm_base_url"}
|
|
201
|
+
for key in lm_keys_to_remove:
|
|
202
|
+
sanitized_config.pop(key, None)
|
|
180
203
|
|
|
181
|
-
return
|
|
204
|
+
return sanitized_config
|
|
@@ -57,41 +57,62 @@ def mask_secrets_in_string(text: str) -> str:
|
|
|
57
57
|
def redact_sensitive(text: str | dict | list) -> str | dict | list:
|
|
58
58
|
"""Redact sensitive information in a string, dict, or list."""
|
|
59
59
|
if isinstance(text, dict):
|
|
60
|
-
|
|
61
|
-
result = {}
|
|
62
|
-
for key, value in text.items():
|
|
63
|
-
# Check if the key itself is sensitive
|
|
64
|
-
key_lower = key.lower()
|
|
65
|
-
if any(
|
|
66
|
-
sensitive in key_lower
|
|
67
|
-
for sensitive in ["password", "secret", "token", "key", "api_key"]
|
|
68
|
-
):
|
|
69
|
-
result[key] = "••••••"
|
|
70
|
-
elif isinstance(value, dict | list) or isinstance(value, str):
|
|
71
|
-
result[key] = redact_sensitive(value)
|
|
72
|
-
else:
|
|
73
|
-
result[key] = value
|
|
74
|
-
return result
|
|
60
|
+
return _redact_dict_values(text)
|
|
75
61
|
elif isinstance(text, list):
|
|
76
|
-
|
|
77
|
-
return [redact_sensitive(item) for item in text]
|
|
62
|
+
return _redact_list_items(text)
|
|
78
63
|
elif isinstance(text, str):
|
|
79
|
-
|
|
80
|
-
result = text
|
|
81
|
-
# First mask secrets
|
|
82
|
-
for pattern in SECRET_VALUE_PATTERNS:
|
|
83
|
-
result = re.sub(pattern, "••••••", result)
|
|
84
|
-
# Then redact sensitive patterns
|
|
85
|
-
result = re.sub(
|
|
86
|
-
SENSITIVE_PATTERNS,
|
|
87
|
-
lambda m: m.group(0).split("=")[0] + "=••••••",
|
|
88
|
-
result,
|
|
89
|
-
)
|
|
90
|
-
return result
|
|
64
|
+
return _redact_string_content(text)
|
|
91
65
|
else:
|
|
92
66
|
return text
|
|
93
67
|
|
|
94
68
|
|
|
69
|
+
def _redact_dict_values(text: dict) -> dict:
|
|
70
|
+
"""Recursively process dictionary values and redact sensitive keys."""
|
|
71
|
+
result = {}
|
|
72
|
+
for key, value in text.items():
|
|
73
|
+
if _is_sensitive_key(key):
|
|
74
|
+
result[key] = "••••••"
|
|
75
|
+
elif _should_recurse_redaction(value):
|
|
76
|
+
result[key] = redact_sensitive(value)
|
|
77
|
+
else:
|
|
78
|
+
result[key] = value
|
|
79
|
+
return result
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _redact_list_items(text: list) -> list:
|
|
83
|
+
"""Recursively process list items."""
|
|
84
|
+
return [redact_sensitive(item) for item in text]
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def _redact_string_content(text: str) -> str:
|
|
88
|
+
"""Process string - first mask secrets, then redact sensitive patterns."""
|
|
89
|
+
result = text
|
|
90
|
+
# First mask secrets
|
|
91
|
+
for pattern in SECRET_VALUE_PATTERNS:
|
|
92
|
+
result = re.sub(pattern, "••••••", result)
|
|
93
|
+
# Then redact sensitive patterns
|
|
94
|
+
result = re.sub(
|
|
95
|
+
SENSITIVE_PATTERNS,
|
|
96
|
+
lambda m: m.group(0).split("=")[0] + "=••••••",
|
|
97
|
+
result,
|
|
98
|
+
)
|
|
99
|
+
return result
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def _is_sensitive_key(key: str) -> bool:
|
|
103
|
+
"""Check if a key contains sensitive information."""
|
|
104
|
+
key_lower = key.lower()
|
|
105
|
+
return any(
|
|
106
|
+
sensitive in key_lower
|
|
107
|
+
for sensitive in ["password", "secret", "token", "key", "api_key"]
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def _should_recurse_redaction(value: Any) -> bool:
|
|
112
|
+
"""Check if a value should be recursively processed."""
|
|
113
|
+
return isinstance(value, dict | list) or isinstance(value, str)
|
|
114
|
+
|
|
115
|
+
|
|
95
116
|
def pretty_args(args: dict | None, max_len: int = DEFAULT_ARGS_MAX_LEN) -> str:
|
|
96
117
|
"""Format arguments in a pretty way."""
|
|
97
118
|
if not args:
|
|
@@ -45,16 +45,21 @@ from glaip_sdk.utils.rendering.steps import StepManager
|
|
|
45
45
|
# Configure logger
|
|
46
46
|
logger = logging.getLogger("glaip_sdk.run_renderer")
|
|
47
47
|
|
|
48
|
+
# Constants
|
|
49
|
+
LESS_THAN_1MS = "[<1ms]"
|
|
50
|
+
|
|
48
51
|
|
|
49
52
|
@dataclass
|
|
50
53
|
class RendererState:
|
|
51
54
|
"""Internal state for the renderer."""
|
|
52
55
|
|
|
53
|
-
buffer: list[str] = None
|
|
56
|
+
buffer: list[str] | None = None
|
|
54
57
|
final_text: str = ""
|
|
55
58
|
streaming_started_at: float | None = None
|
|
56
59
|
printed_final_panel: bool = False
|
|
57
60
|
finalizing_ui: bool = False
|
|
61
|
+
final_duration_seconds: float | None = None
|
|
62
|
+
final_duration_text: str | None = None
|
|
58
63
|
|
|
59
64
|
def __post_init__(self) -> None:
|
|
60
65
|
"""Initialize renderer state after dataclass creation.
|
|
@@ -212,59 +217,76 @@ class RichStreamRenderer:
|
|
|
212
217
|
|
|
213
218
|
# Handle different event types
|
|
214
219
|
if kind == "status":
|
|
215
|
-
|
|
216
|
-
status = ev.get("status")
|
|
217
|
-
if status == "streaming_started":
|
|
218
|
-
self.state.streaming_started_at = monotonic()
|
|
219
|
-
return
|
|
220
|
-
|
|
220
|
+
self._handle_status_event(ev)
|
|
221
221
|
elif kind == "content":
|
|
222
|
-
|
|
223
|
-
if content:
|
|
224
|
-
self.state.buffer.append(content)
|
|
225
|
-
self._ensure_live()
|
|
226
|
-
return
|
|
227
|
-
|
|
222
|
+
self._handle_content_event(content)
|
|
228
223
|
elif kind == "final_response":
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
224
|
+
self._handle_final_response_event(content, metadata)
|
|
225
|
+
elif kind in {"agent_step", "agent_thinking_step"}:
|
|
226
|
+
self._handle_agent_step_event(ev)
|
|
227
|
+
else:
|
|
228
|
+
# Update live display for unhandled events
|
|
229
|
+
self._ensure_live()
|
|
234
230
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
return
|
|
231
|
+
def _handle_status_event(self, ev: dict[str, Any]) -> None:
|
|
232
|
+
"""Handle status events."""
|
|
233
|
+
status = ev.get("status")
|
|
234
|
+
if status == "streaming_started":
|
|
235
|
+
self.state.streaming_started_at = monotonic()
|
|
241
236
|
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
self.stream_processor.track_tools_and_agents(
|
|
256
|
-
tool_name, tool_calls_info, is_delegation_tool
|
|
257
|
-
)
|
|
237
|
+
def _handle_content_event(self, content: str) -> None:
|
|
238
|
+
"""Handle content streaming events."""
|
|
239
|
+
if content:
|
|
240
|
+
self.state.buffer.append(content)
|
|
241
|
+
self._ensure_live()
|
|
242
|
+
|
|
243
|
+
def _handle_final_response_event(
|
|
244
|
+
self, content: str, metadata: dict[str, Any]
|
|
245
|
+
) -> None:
|
|
246
|
+
"""Handle final response events."""
|
|
247
|
+
if content:
|
|
248
|
+
self.state.buffer.append(content)
|
|
249
|
+
self.state.final_text = content
|
|
258
250
|
|
|
259
|
-
|
|
260
|
-
self.
|
|
251
|
+
meta_payload = metadata.get("metadata") or {}
|
|
252
|
+
self._update_final_duration(meta_payload.get("time"))
|
|
253
|
+
|
|
254
|
+
self._ensure_live()
|
|
255
|
+
|
|
256
|
+
# In verbose mode, show the final result in a panel
|
|
257
|
+
if self.verbose and content.strip():
|
|
258
|
+
final_panel = create_final_panel(
|
|
259
|
+
content,
|
|
260
|
+
title=self._final_panel_title(),
|
|
261
|
+
theme=self.cfg.theme,
|
|
262
|
+
)
|
|
263
|
+
self.console.print(final_panel)
|
|
264
|
+
self.state.printed_final_panel = True
|
|
265
|
+
|
|
266
|
+
def _handle_agent_step_event(self, ev: dict[str, Any]) -> None:
|
|
267
|
+
"""Handle agent step events."""
|
|
268
|
+
# Extract tool information
|
|
269
|
+
(
|
|
270
|
+
tool_name,
|
|
271
|
+
tool_args,
|
|
272
|
+
tool_out,
|
|
273
|
+
tool_calls_info,
|
|
274
|
+
) = self.stream_processor.parse_tool_calls(ev)
|
|
275
|
+
|
|
276
|
+
# Track tools and sub-agents
|
|
277
|
+
self.stream_processor.track_tools_and_agents(
|
|
278
|
+
tool_name, tool_calls_info, is_delegation_tool
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# Handle tool execution
|
|
282
|
+
self._handle_agent_step(ev, tool_name, tool_args, tool_out, tool_calls_info)
|
|
261
283
|
|
|
262
284
|
# Update live display
|
|
263
285
|
self._ensure_live()
|
|
264
286
|
|
|
265
287
|
def _finish_running_steps(self) -> None:
|
|
266
288
|
"""Mark any running steps as finished to avoid lingering spinners."""
|
|
267
|
-
for st in
|
|
289
|
+
for st in self.steps.by_id.values():
|
|
268
290
|
if not is_step_finished(st):
|
|
269
291
|
st.finish(None)
|
|
270
292
|
|
|
@@ -289,14 +311,31 @@ class RichStreamRenderer:
|
|
|
289
311
|
if self.verbose and not self.state.printed_final_panel:
|
|
290
312
|
body = ("".join(self.state.buffer) or "").strip()
|
|
291
313
|
if body:
|
|
292
|
-
final_panel = create_final_panel(
|
|
314
|
+
final_panel = create_final_panel(
|
|
315
|
+
body,
|
|
316
|
+
title=self._final_panel_title(),
|
|
317
|
+
theme=self.cfg.theme,
|
|
318
|
+
)
|
|
293
319
|
self.console.print(final_panel)
|
|
294
320
|
self.state.printed_final_panel = True
|
|
295
321
|
|
|
296
|
-
def on_complete(self,
|
|
322
|
+
def on_complete(self, stats: RunStats) -> None:
|
|
297
323
|
"""Handle completion event."""
|
|
298
324
|
self.state.finalizing_ui = True
|
|
299
325
|
|
|
326
|
+
if isinstance(stats, RunStats):
|
|
327
|
+
duration = None
|
|
328
|
+
try:
|
|
329
|
+
if stats.finished_at is not None and stats.started_at is not None:
|
|
330
|
+
duration = max(
|
|
331
|
+
0.0, float(stats.finished_at) - float(stats.started_at)
|
|
332
|
+
)
|
|
333
|
+
except Exception:
|
|
334
|
+
duration = None
|
|
335
|
+
|
|
336
|
+
if duration is not None:
|
|
337
|
+
self._update_final_duration(duration, overwrite=True)
|
|
338
|
+
|
|
300
339
|
# Mark any running steps as finished to avoid lingering spinners
|
|
301
340
|
self._finish_running_steps()
|
|
302
341
|
|
|
@@ -394,15 +433,23 @@ class RichStreamRenderer:
|
|
|
394
433
|
if not self.verbose:
|
|
395
434
|
final_content = (self.state.final_text or "").strip()
|
|
396
435
|
if final_content:
|
|
436
|
+
title = self._final_panel_title()
|
|
397
437
|
return create_final_panel(
|
|
398
438
|
final_content,
|
|
399
|
-
title=
|
|
439
|
+
title=title,
|
|
400
440
|
theme=self.cfg.theme,
|
|
401
441
|
)
|
|
402
442
|
# Dynamic title with spinner + elapsed/hints
|
|
403
443
|
title = self._format_enhanced_main_title()
|
|
404
444
|
return create_main_panel(body, title, self.cfg.theme)
|
|
405
445
|
|
|
446
|
+
def _final_panel_title(self) -> str:
|
|
447
|
+
"""Compose title for the final result panel including duration."""
|
|
448
|
+
title = "Final Result"
|
|
449
|
+
if self.state.final_duration_text:
|
|
450
|
+
title = f"{title} · {self.state.final_duration_text}"
|
|
451
|
+
return title
|
|
452
|
+
|
|
406
453
|
def apply_verbosity(self, verbose: bool) -> None:
|
|
407
454
|
"""Update verbose behaviour at runtime."""
|
|
408
455
|
if self.verbose == verbose:
|
|
@@ -507,27 +554,43 @@ class RichStreamRenderer:
|
|
|
507
554
|
"""Process additional tool calls to avoid duplicates."""
|
|
508
555
|
for call_name, call_args, _ in tool_calls_info or []:
|
|
509
556
|
if call_name and call_name != tool_name:
|
|
510
|
-
self.
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
|
|
520
|
-
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
527
|
-
|
|
528
|
-
|
|
529
|
-
|
|
530
|
-
|
|
557
|
+
self._process_single_tool_call(
|
|
558
|
+
call_name, call_args, task_id, context_id
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
def _process_single_tool_call(
|
|
562
|
+
self, call_name: str, call_args: Any, task_id: str, context_id: str
|
|
563
|
+
) -> None:
|
|
564
|
+
"""Process a single additional tool call."""
|
|
565
|
+
self._ensure_tool_panel(call_name, call_args, task_id, context_id)
|
|
566
|
+
|
|
567
|
+
st2 = self._create_step_for_tool_call(call_name, call_args, task_id, context_id)
|
|
568
|
+
|
|
569
|
+
if self.stream_processor.server_elapsed_time is not None and st2:
|
|
570
|
+
self._step_server_start_times[st2.step_id] = (
|
|
571
|
+
self.stream_processor.server_elapsed_time
|
|
572
|
+
)
|
|
573
|
+
|
|
574
|
+
def _create_step_for_tool_call(
|
|
575
|
+
self, call_name: str, call_args: Any, task_id: str, context_id: str
|
|
576
|
+
) -> Any:
|
|
577
|
+
"""Create appropriate step for tool call."""
|
|
578
|
+
if is_delegation_tool(call_name):
|
|
579
|
+
return self.steps.start_or_get(
|
|
580
|
+
task_id=task_id,
|
|
581
|
+
context_id=context_id,
|
|
582
|
+
kind="delegate",
|
|
583
|
+
name=call_name,
|
|
584
|
+
args=call_args,
|
|
585
|
+
)
|
|
586
|
+
else:
|
|
587
|
+
return self.steps.start_or_get(
|
|
588
|
+
task_id=task_id,
|
|
589
|
+
context_id=context_id,
|
|
590
|
+
kind="tool",
|
|
591
|
+
name=call_name,
|
|
592
|
+
args=call_args,
|
|
593
|
+
)
|
|
531
594
|
|
|
532
595
|
def _detect_tool_completion(
|
|
533
596
|
self, metadata: dict, content: str
|
|
@@ -941,19 +1004,19 @@ class RichStreamRenderer:
|
|
|
941
1004
|
"""Format step status with elapsed time or duration."""
|
|
942
1005
|
if is_step_finished(step):
|
|
943
1006
|
if step.duration_ms is None:
|
|
944
|
-
return
|
|
1007
|
+
return LESS_THAN_1MS
|
|
945
1008
|
elif step.duration_ms >= 1000:
|
|
946
1009
|
return f"[{step.duration_ms / 1000:.2f}s]"
|
|
947
1010
|
elif step.duration_ms > 0:
|
|
948
1011
|
return f"[{step.duration_ms}ms]"
|
|
949
|
-
return
|
|
1012
|
+
return LESS_THAN_1MS
|
|
950
1013
|
else:
|
|
951
1014
|
# Calculate elapsed time for running steps
|
|
952
1015
|
elapsed = self._calculate_step_elapsed_time(step)
|
|
953
1016
|
if elapsed >= 1:
|
|
954
1017
|
return f"[{elapsed:.2f}s]"
|
|
955
1018
|
ms = int(elapsed * 1000)
|
|
956
|
-
return f"[{ms}ms]" if ms > 0 else
|
|
1019
|
+
return f"[{ms}ms]" if ms > 0 else LESS_THAN_1MS
|
|
957
1020
|
|
|
958
1021
|
def _calculate_step_elapsed_time(self, step: Step) -> float:
|
|
959
1022
|
"""Calculate elapsed time for a running step."""
|
|
@@ -1024,6 +1087,29 @@ class RichStreamRenderer:
|
|
|
1024
1087
|
and sid not in self.stream_processor.current_event_finished_panels
|
|
1025
1088
|
)
|
|
1026
1089
|
|
|
1090
|
+
def _update_final_duration(
|
|
1091
|
+
self, duration: float | None, *, overwrite: bool = False
|
|
1092
|
+
) -> None:
|
|
1093
|
+
"""Store formatted duration for eventual final panels."""
|
|
1094
|
+
if duration is None:
|
|
1095
|
+
return
|
|
1096
|
+
|
|
1097
|
+
try:
|
|
1098
|
+
duration_val = max(0.0, float(duration))
|
|
1099
|
+
except Exception:
|
|
1100
|
+
return
|
|
1101
|
+
|
|
1102
|
+
existing = self.state.final_duration_seconds
|
|
1103
|
+
|
|
1104
|
+
if not overwrite and existing is not None:
|
|
1105
|
+
return
|
|
1106
|
+
|
|
1107
|
+
if overwrite and existing is not None:
|
|
1108
|
+
duration_val = max(existing, duration_val)
|
|
1109
|
+
|
|
1110
|
+
self.state.final_duration_seconds = duration_val
|
|
1111
|
+
self.state.final_duration_text = self._format_elapsed_time(duration_val)
|
|
1112
|
+
|
|
1027
1113
|
def _calculate_elapsed_time(self, meta: dict[str, Any]) -> str:
|
|
1028
1114
|
"""Calculate elapsed time string for running tools."""
|
|
1029
1115
|
server_elapsed = self.stream_processor.server_elapsed_time
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: glaip-sdk
|
|
3
|
-
Version: 0.0.
|
|
3
|
+
Version: 0.0.18
|
|
4
4
|
Summary: Python SDK for GL AIP (GDP Labs AI Agent Package) - Simplified CLI Design
|
|
5
5
|
License: MIT
|
|
6
6
|
Author: Raymond Christopher
|
|
@@ -22,7 +22,7 @@ Requires-Dist: readchar (>=4.2.1,<5.0.0)
|
|
|
22
22
|
Requires-Dist: rich (>=13.0.0)
|
|
23
23
|
Description-Content-Type: text/markdown
|
|
24
24
|
|
|
25
|
-
# GL AIP
|
|
25
|
+
# GL AIP — GDP Labs AI Agents Package
|
|
26
26
|
|
|
27
27
|
[](https://www.python.org/downloads/)
|
|
28
28
|
[](https://github.com/psf/black)
|