glaip-sdk 0.0.15__py3-none-any.whl → 0.0.17__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glaip_sdk/__init__.py +1 -1
- glaip_sdk/branding.py +28 -2
- glaip_sdk/cli/commands/agents.py +36 -27
- glaip_sdk/cli/commands/configure.py +46 -52
- glaip_sdk/cli/commands/mcps.py +19 -22
- glaip_sdk/cli/commands/tools.py +19 -13
- glaip_sdk/cli/config.py +42 -0
- glaip_sdk/cli/display.py +97 -30
- glaip_sdk/cli/main.py +141 -124
- glaip_sdk/cli/mcp_validators.py +2 -2
- glaip_sdk/cli/pager.py +3 -2
- glaip_sdk/cli/parsers/json_input.py +2 -2
- glaip_sdk/cli/resolution.py +12 -10
- glaip_sdk/cli/rich_helpers.py +29 -0
- glaip_sdk/cli/slash/agent_session.py +7 -0
- glaip_sdk/cli/slash/prompt.py +21 -2
- glaip_sdk/cli/slash/session.py +15 -21
- glaip_sdk/cli/update_notifier.py +8 -2
- glaip_sdk/cli/utils.py +115 -58
- glaip_sdk/client/_agent_payloads.py +504 -0
- glaip_sdk/client/agents.py +633 -559
- glaip_sdk/client/base.py +92 -20
- glaip_sdk/client/main.py +14 -0
- glaip_sdk/client/run_rendering.py +275 -0
- glaip_sdk/config/constants.py +4 -1
- glaip_sdk/exceptions.py +15 -0
- glaip_sdk/models.py +5 -0
- glaip_sdk/payload_schemas/__init__.py +19 -0
- glaip_sdk/payload_schemas/agent.py +87 -0
- glaip_sdk/rich_components.py +12 -0
- glaip_sdk/utils/client_utils.py +12 -0
- glaip_sdk/utils/import_export.py +2 -2
- glaip_sdk/utils/rendering/formatting.py +5 -0
- glaip_sdk/utils/rendering/models.py +22 -0
- glaip_sdk/utils/rendering/renderer/base.py +9 -1
- glaip_sdk/utils/rendering/renderer/panels.py +0 -1
- glaip_sdk/utils/rendering/steps.py +59 -0
- glaip_sdk/utils/serialization.py +24 -3
- {glaip_sdk-0.0.15.dist-info → glaip_sdk-0.0.17.dist-info}/METADATA +2 -2
- glaip_sdk-0.0.17.dist-info/RECORD +73 -0
- glaip_sdk-0.0.15.dist-info/RECORD +0 -67
- {glaip_sdk-0.0.15.dist-info → glaip_sdk-0.0.17.dist-info}/WHEEL +0 -0
- {glaip_sdk-0.0.15.dist-info → glaip_sdk-0.0.17.dist-info}/entry_points.txt +0 -0
glaip_sdk/client/agents.py
CHANGED
|
@@ -5,20 +5,30 @@ Authors:
|
|
|
5
5
|
Raymond Christopher (raymond.christopher@gdplabs.id)
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
import io
|
|
9
8
|
import json
|
|
10
9
|
import logging
|
|
11
|
-
from collections.abc import AsyncGenerator
|
|
12
|
-
from
|
|
10
|
+
from collections.abc import AsyncGenerator, Callable, Iterator, Mapping
|
|
11
|
+
from os import PathLike
|
|
12
|
+
from pathlib import Path
|
|
13
13
|
from typing import Any, BinaryIO
|
|
14
14
|
|
|
15
15
|
import httpx
|
|
16
|
-
from rich.console import Console as _Console
|
|
17
16
|
|
|
17
|
+
from glaip_sdk.client._agent_payloads import (
|
|
18
|
+
AgentCreateRequest,
|
|
19
|
+
AgentListParams,
|
|
20
|
+
AgentListResult,
|
|
21
|
+
AgentUpdateRequest,
|
|
22
|
+
)
|
|
18
23
|
from glaip_sdk.client.base import BaseClient
|
|
24
|
+
from glaip_sdk.client.mcps import MCPClient
|
|
25
|
+
from glaip_sdk.client.run_rendering import (
|
|
26
|
+
AgentRunRenderingManager,
|
|
27
|
+
compute_timeout_seconds,
|
|
28
|
+
)
|
|
29
|
+
from glaip_sdk.client.tools import ToolClient
|
|
19
30
|
from glaip_sdk.config.constants import (
|
|
20
31
|
DEFAULT_AGENT_FRAMEWORK,
|
|
21
|
-
DEFAULT_AGENT_PROVIDER,
|
|
22
32
|
DEFAULT_AGENT_RUN_TIMEOUT,
|
|
23
33
|
DEFAULT_AGENT_TYPE,
|
|
24
34
|
DEFAULT_AGENT_VERSION,
|
|
@@ -26,17 +36,21 @@ from glaip_sdk.config.constants import (
|
|
|
26
36
|
)
|
|
27
37
|
from glaip_sdk.exceptions import NotFoundError
|
|
28
38
|
from glaip_sdk.models import Agent
|
|
39
|
+
from glaip_sdk.payload_schemas.agent import list_server_only_fields
|
|
40
|
+
from glaip_sdk.utils.agent_config import normalize_agent_config_for_import
|
|
29
41
|
from glaip_sdk.utils.client_utils import (
|
|
30
42
|
aiter_sse_events,
|
|
31
43
|
create_model_instances,
|
|
32
|
-
extract_ids,
|
|
33
44
|
find_by_name,
|
|
34
|
-
iter_sse_events,
|
|
35
45
|
prepare_multipart_data,
|
|
36
46
|
)
|
|
37
|
-
from glaip_sdk.utils.
|
|
47
|
+
from glaip_sdk.utils.import_export import (
|
|
48
|
+
convert_export_to_import_format,
|
|
49
|
+
merge_import_with_cli_args,
|
|
50
|
+
)
|
|
38
51
|
from glaip_sdk.utils.rendering.renderer import RichStreamRenderer
|
|
39
|
-
from glaip_sdk.utils.
|
|
52
|
+
from glaip_sdk.utils.resource_refs import is_uuid
|
|
53
|
+
from glaip_sdk.utils.serialization import load_resource_from_file
|
|
40
54
|
from glaip_sdk.utils.validation import validate_agent_instruction
|
|
41
55
|
|
|
42
56
|
# API endpoints
|
|
@@ -48,6 +62,145 @@ SSE_CONTENT_TYPE = "text/event-stream"
|
|
|
48
62
|
# Set up module-level logger
|
|
49
63
|
logger = logging.getLogger("glaip_sdk.agents")
|
|
50
64
|
|
|
65
|
+
_SERVER_ONLY_IMPORT_FIELDS = set(list_server_only_fields()) | {"success", "message"}
|
|
66
|
+
_MERGED_SEQUENCE_FIELDS = ("tools", "agents", "mcps")
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _normalise_sequence(value: Any) -> list[Any] | None:
|
|
70
|
+
"""Normalise optional sequence inputs to plain lists."""
|
|
71
|
+
if value is None:
|
|
72
|
+
return None
|
|
73
|
+
if isinstance(value, list):
|
|
74
|
+
return value
|
|
75
|
+
if isinstance(value, (tuple, set)):
|
|
76
|
+
return list(value)
|
|
77
|
+
return [value]
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
def _normalise_sequence_fields(mapping: dict[str, Any]) -> None:
|
|
81
|
+
"""Normalise merged sequence fields in-place."""
|
|
82
|
+
for field in _MERGED_SEQUENCE_FIELDS:
|
|
83
|
+
if field in mapping:
|
|
84
|
+
normalised = _normalise_sequence(mapping[field])
|
|
85
|
+
if normalised is not None:
|
|
86
|
+
mapping[field] = normalised
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def _merge_override_maps(
|
|
90
|
+
base_values: Mapping[str, Any],
|
|
91
|
+
extra_values: Mapping[str, Any],
|
|
92
|
+
) -> dict[str, Any]:
|
|
93
|
+
"""Merge override mappings while normalising sequence fields."""
|
|
94
|
+
merged: dict[str, Any] = {}
|
|
95
|
+
for source in (base_values, extra_values):
|
|
96
|
+
for key, value in source.items():
|
|
97
|
+
if value is None:
|
|
98
|
+
continue
|
|
99
|
+
merged[key] = (
|
|
100
|
+
_normalise_sequence(value) if key in _MERGED_SEQUENCE_FIELDS else value
|
|
101
|
+
)
|
|
102
|
+
return merged
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
def _split_known_and_extra(
|
|
106
|
+
payload: Mapping[str, Any],
|
|
107
|
+
known_fields: Mapping[str, Any],
|
|
108
|
+
) -> tuple[dict[str, Any], dict[str, Any]]:
|
|
109
|
+
"""Split payload mapping into known request fields and extras."""
|
|
110
|
+
known: dict[str, Any] = {}
|
|
111
|
+
extras: dict[str, Any] = {}
|
|
112
|
+
for key, value in payload.items():
|
|
113
|
+
if value is None:
|
|
114
|
+
continue
|
|
115
|
+
if key in known_fields:
|
|
116
|
+
known[key] = value
|
|
117
|
+
else:
|
|
118
|
+
extras[key] = value
|
|
119
|
+
return known, extras
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _load_agent_file_payload(
|
|
123
|
+
file_path: Path, *, model_override: str | None
|
|
124
|
+
) -> dict[str, Any]:
|
|
125
|
+
"""Load agent configuration from disk and normalise legacy fields."""
|
|
126
|
+
if not file_path.exists():
|
|
127
|
+
raise FileNotFoundError(f"Agent configuration file not found: {file_path}")
|
|
128
|
+
if not file_path.is_file():
|
|
129
|
+
raise ValueError(f"Agent configuration path must point to a file: {file_path}")
|
|
130
|
+
|
|
131
|
+
raw_data = load_resource_from_file(file_path)
|
|
132
|
+
if not isinstance(raw_data, Mapping):
|
|
133
|
+
raise ValueError("Agent configuration file must contain a mapping/object.")
|
|
134
|
+
|
|
135
|
+
payload = convert_export_to_import_format(dict(raw_data))
|
|
136
|
+
payload = normalize_agent_config_for_import(payload, model_override)
|
|
137
|
+
|
|
138
|
+
for field in _SERVER_ONLY_IMPORT_FIELDS:
|
|
139
|
+
payload.pop(field, None)
|
|
140
|
+
|
|
141
|
+
return payload
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _prepare_import_payload(
|
|
145
|
+
file_path: Path,
|
|
146
|
+
overrides: Mapping[str, Any],
|
|
147
|
+
*,
|
|
148
|
+
drop_model_fields: bool = False,
|
|
149
|
+
) -> dict[str, Any]:
|
|
150
|
+
"""Prepare merged payload from file contents and explicit overrides."""
|
|
151
|
+
overrides_dict = dict(overrides)
|
|
152
|
+
|
|
153
|
+
raw_definition = load_resource_from_file(file_path)
|
|
154
|
+
original_refs = {
|
|
155
|
+
"tools": list(raw_definition.get("tools") or []),
|
|
156
|
+
"agents": list(raw_definition.get("agents") or []),
|
|
157
|
+
"mcps": list(raw_definition.get("mcps") or []),
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
base_payload = _load_agent_file_payload(
|
|
161
|
+
file_path, model_override=overrides_dict.get("model")
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
cli_args = {
|
|
165
|
+
key: overrides_dict.get(key)
|
|
166
|
+
for key in (
|
|
167
|
+
"name",
|
|
168
|
+
"instruction",
|
|
169
|
+
"model",
|
|
170
|
+
"tools",
|
|
171
|
+
"agents",
|
|
172
|
+
"mcps",
|
|
173
|
+
"timeout",
|
|
174
|
+
)
|
|
175
|
+
if overrides_dict.get(key) is not None
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
for field in _MERGED_SEQUENCE_FIELDS:
|
|
179
|
+
if field in cli_args:
|
|
180
|
+
cli_args[field] = tuple(_normalise_sequence(cli_args[field]) or [])
|
|
181
|
+
|
|
182
|
+
merged = merge_import_with_cli_args(base_payload, cli_args)
|
|
183
|
+
|
|
184
|
+
additional = {
|
|
185
|
+
key: value
|
|
186
|
+
for key, value in overrides_dict.items()
|
|
187
|
+
if value is not None and key not in cli_args
|
|
188
|
+
}
|
|
189
|
+
merged.update(additional)
|
|
190
|
+
|
|
191
|
+
if drop_model_fields:
|
|
192
|
+
if overrides_dict.get("language_model_id") is None:
|
|
193
|
+
merged.pop("language_model_id", None)
|
|
194
|
+
if overrides_dict.get("provider") is None:
|
|
195
|
+
merged.pop("provider", None)
|
|
196
|
+
|
|
197
|
+
merged.setdefault("_tool_refs", original_refs["tools"])
|
|
198
|
+
merged.setdefault("_agent_refs", original_refs["agents"])
|
|
199
|
+
merged.setdefault("_mcp_refs", original_refs["mcps"])
|
|
200
|
+
|
|
201
|
+
_normalise_sequence_fields(merged)
|
|
202
|
+
return merged
|
|
203
|
+
|
|
51
204
|
|
|
52
205
|
class AgentClient(BaseClient):
|
|
53
206
|
"""Client for agent operations."""
|
|
@@ -65,44 +218,53 @@ class AgentClient(BaseClient):
|
|
|
65
218
|
**kwargs: Additional arguments for standalone initialization
|
|
66
219
|
"""
|
|
67
220
|
super().__init__(parent_client=parent_client, **kwargs)
|
|
221
|
+
self._renderer_manager = AgentRunRenderingManager(logger)
|
|
222
|
+
self._tool_client: ToolClient | None = None
|
|
223
|
+
self._mcp_client: MCPClient | None = None
|
|
68
224
|
|
|
69
225
|
def list_agents(
|
|
70
226
|
self,
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
sync_langflow_agents: bool = False,
|
|
76
|
-
) -> list[Agent]:
|
|
77
|
-
"""List agents with optional filtering.
|
|
227
|
+
query: AgentListParams | None = None,
|
|
228
|
+
**kwargs: Any,
|
|
229
|
+
) -> AgentListResult:
|
|
230
|
+
"""List agents with optional filtering and pagination support.
|
|
78
231
|
|
|
79
232
|
Args:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
name: Filter by partial name match (case-insensitive)
|
|
83
|
-
version: Filter by exact version match
|
|
84
|
-
sync_langflow_agents: Sync with LangFlow server before listing (only applies when agent_type=langflow)
|
|
85
|
-
|
|
86
|
-
Returns:
|
|
87
|
-
List of agents matching the filters
|
|
233
|
+
query: Query parameters for filtering agents. If None, uses kwargs to create query.
|
|
234
|
+
**kwargs: Individual filter parameters for backward compatibility.
|
|
88
235
|
"""
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
236
|
+
if query is not None and kwargs:
|
|
237
|
+
# Both query object and individual parameters provided
|
|
238
|
+
raise ValueError(
|
|
239
|
+
"Provide either `query` or individual filter arguments, not both."
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
if query is None:
|
|
243
|
+
# Create query from individual parameters for backward compatibility
|
|
244
|
+
query = AgentListParams(**kwargs)
|
|
245
|
+
|
|
246
|
+
params = query.to_query_params()
|
|
247
|
+
envelope = self._request_with_envelope(
|
|
248
|
+
"GET",
|
|
249
|
+
AGENTS_ENDPOINT,
|
|
250
|
+
params=params if params else None,
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
if not isinstance(envelope, dict):
|
|
254
|
+
envelope = {"data": envelope}
|
|
255
|
+
|
|
256
|
+
data_payload = envelope.get("data") or []
|
|
257
|
+
items = create_model_instances(data_payload, Agent, self)
|
|
258
|
+
|
|
259
|
+
return AgentListResult(
|
|
260
|
+
items=items,
|
|
261
|
+
total=envelope.get("total"),
|
|
262
|
+
page=envelope.get("page"),
|
|
263
|
+
limit=envelope.get("limit"),
|
|
264
|
+
has_next=envelope.get("has_next"),
|
|
265
|
+
has_prev=envelope.get("has_prev"),
|
|
266
|
+
message=envelope.get("message"),
|
|
267
|
+
)
|
|
106
268
|
|
|
107
269
|
def sync_langflow_agents(
|
|
108
270
|
self,
|
|
@@ -151,329 +313,423 @@ class AgentClient(BaseClient):
|
|
|
151
313
|
|
|
152
314
|
def find_agents(self, name: str | None = None) -> list[Agent]:
|
|
153
315
|
"""Find agents by name."""
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
params["name"] = name
|
|
157
|
-
|
|
158
|
-
data = self._request("GET", AGENTS_ENDPOINT, params=params)
|
|
159
|
-
agents = create_model_instances(data, Agent, self)
|
|
316
|
+
result = self.list_agents(name=name)
|
|
317
|
+
agents = list(result)
|
|
160
318
|
if name is None:
|
|
161
319
|
return agents
|
|
162
320
|
return find_by_name(agents, name, case_sensitive=False)
|
|
163
321
|
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
) -> dict[str, Any]:
|
|
174
|
-
"""Build payload for agent creation with proper LM selection and metadata handling.
|
|
175
|
-
|
|
176
|
-
CENTRALIZED PAYLOAD BUILDING LOGIC:
|
|
177
|
-
- LM exclusivity: Uses language_model_id if provided, otherwise provider/model_name
|
|
178
|
-
- Always includes required backend metadata
|
|
179
|
-
- Preserves mem0 keys in agent_config
|
|
180
|
-
- Handles tool/agent ID extraction from objects
|
|
181
|
-
|
|
182
|
-
Args:
|
|
183
|
-
name: Agent name
|
|
184
|
-
instruction: Agent instruction
|
|
185
|
-
model: Language model name (used when language_model_id not provided)
|
|
186
|
-
tools: List of tools to attach
|
|
187
|
-
agents: List of sub-agents to attach
|
|
188
|
-
timeout: Agent execution timeout
|
|
189
|
-
**kwargs: Additional parameters (language_model_id, agent_config, etc.)
|
|
190
|
-
|
|
191
|
-
Returns:
|
|
192
|
-
Complete payload dictionary for agent creation
|
|
193
|
-
"""
|
|
194
|
-
# Prepare the creation payload with required fields
|
|
195
|
-
payload: dict[str, Any] = {
|
|
196
|
-
"name": name.strip(),
|
|
197
|
-
"instruction": instruction.strip(),
|
|
198
|
-
"type": DEFAULT_AGENT_TYPE,
|
|
199
|
-
"framework": DEFAULT_AGENT_FRAMEWORK,
|
|
200
|
-
"version": DEFAULT_AGENT_VERSION,
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
# Language model selection with exclusivity:
|
|
204
|
-
# Priority: language_model_id (if provided) > provider/model_name (fallback)
|
|
205
|
-
if kwargs.get("language_model_id"):
|
|
206
|
-
# Use language_model_id - defer to kwargs update below
|
|
207
|
-
pass
|
|
208
|
-
else:
|
|
209
|
-
# Use provider/model_name fallback
|
|
210
|
-
payload["provider"] = DEFAULT_AGENT_PROVIDER
|
|
211
|
-
payload["model_name"] = model or DEFAULT_MODEL
|
|
212
|
-
|
|
213
|
-
# Include execution timeout if provided
|
|
214
|
-
if timeout is not None:
|
|
215
|
-
payload["timeout"] = str(timeout)
|
|
216
|
-
|
|
217
|
-
# Ensure minimum required metadata for visibility
|
|
218
|
-
if "metadata" not in kwargs:
|
|
219
|
-
kwargs["metadata"] = {}
|
|
220
|
-
if "type" not in kwargs["metadata"]:
|
|
221
|
-
kwargs["metadata"]["type"] = "custom"
|
|
322
|
+
# ------------------------------------------------------------------ #
|
|
323
|
+
# Renderer delegation helpers
|
|
324
|
+
# ------------------------------------------------------------------ #
|
|
325
|
+
def _get_renderer_manager(self) -> AgentRunRenderingManager:
|
|
326
|
+
manager = getattr(self, "_renderer_manager", None)
|
|
327
|
+
if manager is None:
|
|
328
|
+
manager = AgentRunRenderingManager(logger)
|
|
329
|
+
self._renderer_manager = manager
|
|
330
|
+
return manager
|
|
222
331
|
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
332
|
+
def _create_renderer(
|
|
333
|
+
self, renderer: RichStreamRenderer | str | None, **kwargs: Any
|
|
334
|
+
) -> RichStreamRenderer:
|
|
335
|
+
manager = self._get_renderer_manager()
|
|
336
|
+
verbose = kwargs.get("verbose", False)
|
|
337
|
+
if isinstance(renderer, RichStreamRenderer) or hasattr(renderer, "on_start"):
|
|
338
|
+
return renderer # type: ignore[return-value]
|
|
339
|
+
return manager.create_renderer(renderer, verbose=verbose)
|
|
226
340
|
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
341
|
+
def _process_stream_events(
|
|
342
|
+
self,
|
|
343
|
+
stream_response: httpx.Response,
|
|
344
|
+
renderer: RichStreamRenderer,
|
|
345
|
+
timeout_seconds: float,
|
|
346
|
+
agent_name: str | None,
|
|
347
|
+
meta: dict[str, Any],
|
|
348
|
+
) -> tuple[str, dict[str, Any], float | None, float | None]:
|
|
349
|
+
manager = self._get_renderer_manager()
|
|
350
|
+
return manager.process_stream_events(
|
|
351
|
+
stream_response,
|
|
352
|
+
renderer,
|
|
353
|
+
timeout_seconds,
|
|
354
|
+
agent_name,
|
|
355
|
+
meta,
|
|
356
|
+
)
|
|
232
357
|
|
|
233
|
-
|
|
234
|
-
|
|
358
|
+
def _finalize_renderer(
|
|
359
|
+
self,
|
|
360
|
+
renderer: RichStreamRenderer,
|
|
361
|
+
final_text: str,
|
|
362
|
+
stats_usage: dict[str, Any],
|
|
363
|
+
started_monotonic: float | None,
|
|
364
|
+
finished_monotonic: float | None,
|
|
365
|
+
) -> str:
|
|
366
|
+
manager = self._get_renderer_manager()
|
|
367
|
+
return manager.finalize_renderer(
|
|
368
|
+
renderer,
|
|
369
|
+
final_text,
|
|
370
|
+
stats_usage,
|
|
371
|
+
started_monotonic,
|
|
372
|
+
finished_monotonic,
|
|
373
|
+
)
|
|
235
374
|
|
|
236
|
-
|
|
375
|
+
def _get_tool_client(self) -> ToolClient:
|
|
376
|
+
if self._tool_client is None:
|
|
377
|
+
self._tool_client = ToolClient(parent_client=self)
|
|
378
|
+
return self._tool_client
|
|
237
379
|
|
|
238
|
-
def
|
|
239
|
-
self
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
return {
|
|
243
|
-
"name": name if name is not None else current_agent.name,
|
|
244
|
-
"instruction": instruction
|
|
245
|
-
if instruction is not None
|
|
246
|
-
else current_agent.instruction,
|
|
247
|
-
"type": DEFAULT_AGENT_TYPE, # Required by backend
|
|
248
|
-
"framework": DEFAULT_AGENT_FRAMEWORK, # Required by backend
|
|
249
|
-
"version": DEFAULT_AGENT_VERSION, # Required by backend
|
|
250
|
-
}
|
|
380
|
+
def _get_mcp_client(self) -> MCPClient:
|
|
381
|
+
if self._mcp_client is None:
|
|
382
|
+
self._mcp_client = MCPClient(parent_client=self)
|
|
383
|
+
return self._mcp_client
|
|
251
384
|
|
|
252
|
-
def
|
|
385
|
+
def _normalise_reference_entry(
|
|
253
386
|
self,
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
if
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
387
|
+
entry: Any,
|
|
388
|
+
fallback_iter: Iterator[Any] | None,
|
|
389
|
+
) -> tuple[str | None, str | None]:
|
|
390
|
+
entry_id: str | None = None
|
|
391
|
+
entry_name: str | None = None
|
|
392
|
+
|
|
393
|
+
if isinstance(entry, str):
|
|
394
|
+
if is_uuid(entry):
|
|
395
|
+
entry_id = entry
|
|
396
|
+
else:
|
|
397
|
+
entry_name = entry
|
|
398
|
+
elif isinstance(entry, dict):
|
|
399
|
+
entry_id = entry.get("id")
|
|
400
|
+
entry_name = entry.get("name")
|
|
267
401
|
else:
|
|
268
|
-
|
|
269
|
-
self._set_language_model_from_current_agent(update_data, current_agent)
|
|
402
|
+
entry_name = str(entry)
|
|
270
403
|
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
) -> None:
|
|
274
|
-
"""Set language model from current agent config or use defaults."""
|
|
275
|
-
if hasattr(current_agent, "agent_config") and current_agent.agent_config:
|
|
276
|
-
agent_config = current_agent.agent_config
|
|
277
|
-
if "lm_provider" in agent_config:
|
|
278
|
-
update_data["provider"] = agent_config["lm_provider"]
|
|
279
|
-
if "lm_name" in agent_config:
|
|
280
|
-
update_data["model_name"] = agent_config["lm_name"]
|
|
281
|
-
else:
|
|
282
|
-
# Default fallback values
|
|
283
|
-
update_data["provider"] = DEFAULT_AGENT_PROVIDER
|
|
284
|
-
update_data["model_name"] = DEFAULT_MODEL
|
|
404
|
+
if entry_name or fallback_iter is None:
|
|
405
|
+
return entry_id, entry_name
|
|
285
406
|
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
) -> None:
|
|
293
|
-
"""Handle tools and agents with proper ID extraction."""
|
|
294
|
-
# Handle tools
|
|
295
|
-
if tools is not None:
|
|
296
|
-
tool_ids = extract_ids(tools)
|
|
297
|
-
update_data["tools"] = tool_ids if tool_ids else []
|
|
298
|
-
else:
|
|
299
|
-
update_data["tools"] = self._extract_current_tool_ids(current_agent)
|
|
407
|
+
try:
|
|
408
|
+
ref = next(fallback_iter)
|
|
409
|
+
except StopIteration:
|
|
410
|
+
ref = None
|
|
411
|
+
if isinstance(ref, dict):
|
|
412
|
+
entry_name = ref.get("name") or entry_name
|
|
300
413
|
|
|
301
|
-
|
|
302
|
-
if agents is not None:
|
|
303
|
-
agent_ids = extract_ids(agents)
|
|
304
|
-
update_data["agents"] = agent_ids if agent_ids else []
|
|
305
|
-
else:
|
|
306
|
-
update_data["agents"] = self._extract_current_agent_ids(current_agent)
|
|
307
|
-
|
|
308
|
-
def _extract_current_tool_ids(self, current_agent: "Agent") -> list[str]:
|
|
309
|
-
"""Extract tool IDs from current agent."""
|
|
310
|
-
if current_agent.tools:
|
|
311
|
-
return [
|
|
312
|
-
tool["id"] if isinstance(tool, dict) else tool
|
|
313
|
-
for tool in current_agent.tools
|
|
314
|
-
]
|
|
315
|
-
return []
|
|
316
|
-
|
|
317
|
-
def _extract_current_agent_ids(self, current_agent: "Agent") -> list[str]:
|
|
318
|
-
"""Extract agent IDs from current agent."""
|
|
319
|
-
if current_agent.agents:
|
|
320
|
-
return [
|
|
321
|
-
agent["id"] if isinstance(agent, dict) else agent
|
|
322
|
-
for agent in current_agent.agents
|
|
323
|
-
]
|
|
324
|
-
return []
|
|
414
|
+
return entry_id, entry_name
|
|
325
415
|
|
|
326
|
-
def
|
|
416
|
+
def _resolve_resource_ids(
|
|
327
417
|
self,
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
418
|
+
items: list[Any] | None,
|
|
419
|
+
references: list[Any] | None,
|
|
420
|
+
*,
|
|
421
|
+
fetch_by_id: Callable[[str], Any],
|
|
422
|
+
find_by_name: Callable[[str], list[Any]],
|
|
423
|
+
label: str,
|
|
424
|
+
plural_label: str | None = None,
|
|
425
|
+
) -> list[str] | None:
|
|
426
|
+
if not items:
|
|
427
|
+
return None
|
|
428
|
+
|
|
429
|
+
if references is None:
|
|
430
|
+
return [self._coerce_reference_value(entry) for entry in items]
|
|
431
|
+
|
|
432
|
+
singular = label
|
|
433
|
+
plural = plural_label or f"{label}s"
|
|
434
|
+
fallback_iter = iter(references or [])
|
|
435
|
+
|
|
436
|
+
return [
|
|
437
|
+
self._resolve_single_resource(
|
|
438
|
+
entry,
|
|
439
|
+
fallback_iter,
|
|
440
|
+
fetch_by_id,
|
|
441
|
+
find_by_name,
|
|
442
|
+
singular,
|
|
443
|
+
plural,
|
|
337
444
|
)
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
# Default agent_config
|
|
343
|
-
update_data["agent_config"] = {
|
|
344
|
-
"lm_provider": DEFAULT_AGENT_PROVIDER,
|
|
345
|
-
"lm_name": DEFAULT_MODEL,
|
|
346
|
-
"lm_hyperparameters": {"temperature": 0.0},
|
|
347
|
-
}
|
|
348
|
-
|
|
349
|
-
# Clean LM keys from agent_config to prevent conflicts
|
|
350
|
-
self._clean_agent_config_lm_keys(update_data)
|
|
351
|
-
|
|
352
|
-
def _merge_agent_configs(self, current_agent: "Agent", new_config: dict) -> dict:
|
|
353
|
-
"""Merge current agent config with new config."""
|
|
354
|
-
if hasattr(current_agent, "agent_config") and current_agent.agent_config:
|
|
355
|
-
merged_config = current_agent.agent_config.copy()
|
|
356
|
-
merged_config.update(new_config)
|
|
357
|
-
return merged_config
|
|
358
|
-
return new_config
|
|
359
|
-
|
|
360
|
-
def _clean_agent_config_lm_keys(self, update_data: dict[str, Any]) -> None:
|
|
361
|
-
"""Remove LM keys from agent_config to prevent conflicts."""
|
|
362
|
-
if "agent_config" in update_data and isinstance(
|
|
363
|
-
update_data["agent_config"], dict
|
|
364
|
-
):
|
|
365
|
-
agent_config = update_data["agent_config"]
|
|
366
|
-
lm_keys_to_remove = {
|
|
367
|
-
"lm_provider",
|
|
368
|
-
"lm_name",
|
|
369
|
-
"lm_base_url",
|
|
370
|
-
"lm_hyperparameters",
|
|
371
|
-
}
|
|
372
|
-
for key in lm_keys_to_remove:
|
|
373
|
-
agent_config.pop(key, None)
|
|
374
|
-
|
|
375
|
-
def _finalize_update_payload(
|
|
445
|
+
for entry in items
|
|
446
|
+
]
|
|
447
|
+
|
|
448
|
+
def _resolve_single_resource(
|
|
376
449
|
self,
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
450
|
+
entry: Any,
|
|
451
|
+
fallback_iter: Iterator[Any] | None,
|
|
452
|
+
fetch_by_id: Callable[[str], Any],
|
|
453
|
+
find_by_name: Callable[[str], list[Any]],
|
|
454
|
+
singular: str,
|
|
455
|
+
plural: str,
|
|
456
|
+
) -> str:
|
|
457
|
+
entry_id, entry_name = self._normalise_reference_entry(entry, fallback_iter)
|
|
385
458
|
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
459
|
+
validated_id = self._validate_resource_id(fetch_by_id, entry_id)
|
|
460
|
+
if validated_id:
|
|
461
|
+
return validated_id
|
|
462
|
+
if entry_id and entry_name is None:
|
|
463
|
+
return entry_id
|
|
391
464
|
|
|
392
|
-
|
|
465
|
+
if entry_name:
|
|
466
|
+
resolved, success = self._resolve_resource_by_name(
|
|
467
|
+
find_by_name, entry_name, singular, plural
|
|
468
|
+
)
|
|
469
|
+
return resolved if success else entry_name
|
|
470
|
+
|
|
471
|
+
raise ValueError(f"{singular} references must include a valid ID or name.")
|
|
472
|
+
|
|
473
|
+
@staticmethod
|
|
474
|
+
def _coerce_reference_value(entry: Any) -> str:
|
|
475
|
+
if isinstance(entry, dict):
|
|
476
|
+
if entry.get("id"):
|
|
477
|
+
return str(entry["id"])
|
|
478
|
+
if entry.get("name"):
|
|
479
|
+
return str(entry["name"])
|
|
480
|
+
return str(entry)
|
|
481
|
+
|
|
482
|
+
@staticmethod
|
|
483
|
+
def _validate_resource_id(
|
|
484
|
+
fetch_by_id: Callable[[str], Any], candidate_id: str | None
|
|
485
|
+
) -> str | None:
|
|
486
|
+
if not candidate_id:
|
|
487
|
+
return None
|
|
488
|
+
try:
|
|
489
|
+
fetch_by_id(candidate_id)
|
|
490
|
+
except Exception:
|
|
491
|
+
return None
|
|
492
|
+
return candidate_id
|
|
493
|
+
|
|
494
|
+
@staticmethod
|
|
495
|
+
def _resolve_resource_by_name(
|
|
496
|
+
find_by_name: Callable[[str], list[Any]],
|
|
497
|
+
entry_name: str,
|
|
498
|
+
singular: str,
|
|
499
|
+
plural: str,
|
|
500
|
+
) -> tuple[str, bool]:
|
|
501
|
+
try:
|
|
502
|
+
matches = find_by_name(entry_name)
|
|
503
|
+
except Exception:
|
|
504
|
+
return entry_name, False
|
|
393
505
|
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
506
|
+
if not matches:
|
|
507
|
+
raise ValueError(
|
|
508
|
+
f"{singular} '{entry_name}' not found in current workspace."
|
|
509
|
+
)
|
|
510
|
+
if len(matches) > 1:
|
|
511
|
+
exact = [
|
|
512
|
+
m
|
|
513
|
+
for m in matches
|
|
514
|
+
if getattr(m, "name", "").lower() == entry_name.lower()
|
|
515
|
+
]
|
|
516
|
+
if len(exact) == 1:
|
|
517
|
+
matches = exact
|
|
518
|
+
else:
|
|
519
|
+
raise ValueError(
|
|
520
|
+
f"Multiple {plural} named '{entry_name}'. Please disambiguate."
|
|
521
|
+
)
|
|
522
|
+
return str(matches[0].id), True
|
|
403
523
|
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
524
|
+
def _resolve_tool_ids(
|
|
525
|
+
self,
|
|
526
|
+
tools: list[Any] | None,
|
|
527
|
+
references: list[Any] | None = None,
|
|
528
|
+
) -> list[str] | None:
|
|
529
|
+
tool_client = self._get_tool_client()
|
|
530
|
+
return self._resolve_resource_ids(
|
|
531
|
+
tools,
|
|
532
|
+
references,
|
|
533
|
+
fetch_by_id=tool_client.get_tool_by_id,
|
|
534
|
+
find_by_name=tool_client.find_tools,
|
|
535
|
+
label="Tool",
|
|
536
|
+
plural_label="tools",
|
|
537
|
+
)
|
|
410
538
|
|
|
411
|
-
|
|
412
|
-
|
|
539
|
+
def _resolve_agent_ids(
|
|
540
|
+
self,
|
|
541
|
+
agents: list[Any] | None,
|
|
542
|
+
references: list[Any] | None = None,
|
|
543
|
+
) -> list[str] | None:
|
|
544
|
+
return self._resolve_resource_ids(
|
|
545
|
+
agents,
|
|
546
|
+
references,
|
|
547
|
+
fetch_by_id=self.get_agent_by_id,
|
|
548
|
+
find_by_name=self.find_agents,
|
|
549
|
+
label="Agent",
|
|
550
|
+
plural_label="agents",
|
|
551
|
+
)
|
|
413
552
|
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
|
|
417
|
-
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
553
|
+
def _resolve_mcp_ids(
|
|
554
|
+
self,
|
|
555
|
+
mcps: list[Any] | None,
|
|
556
|
+
references: list[Any] | None = None,
|
|
557
|
+
) -> list[str] | None:
|
|
558
|
+
mcp_client = self._get_mcp_client()
|
|
559
|
+
return self._resolve_resource_ids(
|
|
560
|
+
mcps,
|
|
561
|
+
references,
|
|
562
|
+
fetch_by_id=mcp_client.get_mcp_by_id,
|
|
563
|
+
find_by_name=mcp_client.find_mcps,
|
|
564
|
+
label="MCP",
|
|
565
|
+
plural_label="MCPs",
|
|
566
|
+
)
|
|
421
567
|
|
|
422
|
-
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
568
|
+
def _create_agent_from_payload(self, payload: Mapping[str, Any]) -> "Agent":
|
|
569
|
+
"""Create an agent using a fully prepared payload mapping."""
|
|
570
|
+
known, extras = _split_known_and_extra(
|
|
571
|
+
payload, AgentCreateRequest.__dataclass_fields__
|
|
426
572
|
)
|
|
427
573
|
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
431
|
-
|
|
574
|
+
name = known.pop("name", None)
|
|
575
|
+
instruction = known.pop("instruction", None)
|
|
576
|
+
if not name or not str(name).strip():
|
|
577
|
+
raise ValueError("Agent name cannot be empty or whitespace")
|
|
578
|
+
if not instruction or not str(instruction).strip():
|
|
579
|
+
raise ValueError("Agent instruction cannot be empty or whitespace")
|
|
580
|
+
|
|
581
|
+
validated_instruction = validate_agent_instruction(str(instruction))
|
|
582
|
+
_normalise_sequence_fields(known)
|
|
583
|
+
|
|
584
|
+
resolved_model = known.pop("model", None) or DEFAULT_MODEL
|
|
585
|
+
tool_refs = extras.pop("_tool_refs", None)
|
|
586
|
+
agent_refs = extras.pop("_agent_refs", None)
|
|
587
|
+
mcp_refs = extras.pop("_mcp_refs", None)
|
|
588
|
+
|
|
589
|
+
tools_raw = known.pop("tools", None)
|
|
590
|
+
agents_raw = known.pop("agents", None)
|
|
591
|
+
mcps_raw = known.pop("mcps", None)
|
|
592
|
+
|
|
593
|
+
resolved_tools = self._resolve_tool_ids(tools_raw, tool_refs)
|
|
594
|
+
resolved_agents = self._resolve_agent_ids(agents_raw, agent_refs)
|
|
595
|
+
resolved_mcps = self._resolve_mcp_ids(mcps_raw, mcp_refs)
|
|
596
|
+
|
|
597
|
+
final_extras = {**known, **extras}
|
|
598
|
+
final_extras.setdefault("model", resolved_model)
|
|
599
|
+
|
|
600
|
+
request = AgentCreateRequest(
|
|
601
|
+
name=str(name).strip(),
|
|
602
|
+
instruction=validated_instruction,
|
|
603
|
+
model=resolved_model,
|
|
604
|
+
language_model_id=known.pop("language_model_id", None),
|
|
605
|
+
provider=known.pop("provider", None),
|
|
606
|
+
model_name=known.pop("model_name", None),
|
|
607
|
+
agent_type=known.pop("agent_type", known.pop("type", DEFAULT_AGENT_TYPE)),
|
|
608
|
+
framework=known.pop("framework", None) or DEFAULT_AGENT_FRAMEWORK,
|
|
609
|
+
version=known.pop("version", None) or DEFAULT_AGENT_VERSION,
|
|
610
|
+
account_id=known.pop("account_id", None),
|
|
611
|
+
description=known.pop("description", None),
|
|
612
|
+
metadata=known.pop("metadata", None),
|
|
613
|
+
tools=resolved_tools,
|
|
614
|
+
agents=resolved_agents,
|
|
615
|
+
mcps=resolved_mcps,
|
|
616
|
+
tool_configs=known.pop("tool_configs", None),
|
|
617
|
+
agent_config=known.pop("agent_config", None),
|
|
618
|
+
timeout=known.pop("timeout", None) or DEFAULT_AGENT_RUN_TIMEOUT,
|
|
619
|
+
a2a_profile=known.pop("a2a_profile", None),
|
|
620
|
+
extras=final_extras,
|
|
621
|
+
)
|
|
432
622
|
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
self._handle_agent_config(update_data, current_agent, agent_config)
|
|
623
|
+
payload_dict = request.to_payload()
|
|
624
|
+
payload_dict.setdefault("model", resolved_model)
|
|
436
625
|
|
|
437
|
-
|
|
438
|
-
|
|
626
|
+
full_agent_data = self._post_then_fetch(
|
|
627
|
+
id_key="id",
|
|
628
|
+
post_endpoint=AGENTS_ENDPOINT,
|
|
629
|
+
get_endpoint_fmt=f"{AGENTS_ENDPOINT}{{id}}",
|
|
630
|
+
json=payload_dict,
|
|
631
|
+
)
|
|
632
|
+
return Agent(**full_agent_data)._set_client(self)
|
|
439
633
|
|
|
440
634
|
def create_agent(
|
|
441
635
|
self,
|
|
442
|
-
name: str,
|
|
443
|
-
instruction: str,
|
|
444
|
-
model: str =
|
|
636
|
+
name: str | None = None,
|
|
637
|
+
instruction: str | None = None,
|
|
638
|
+
model: str | None = None,
|
|
445
639
|
tools: list[str | Any] | None = None,
|
|
446
640
|
agents: list[str | Any] | None = None,
|
|
447
|
-
timeout: int =
|
|
641
|
+
timeout: int | None = None,
|
|
642
|
+
*,
|
|
643
|
+
file: str | PathLike[str] | None = None,
|
|
644
|
+
mcps: list[str | Any] | None = None,
|
|
645
|
+
tool_configs: Mapping[str, Any] | None = None,
|
|
448
646
|
**kwargs: Any,
|
|
449
647
|
) -> "Agent":
|
|
450
|
-
"""Create a new agent."""
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
648
|
+
"""Create a new agent, optionally loading configuration from a file."""
|
|
649
|
+
base_overrides = {
|
|
650
|
+
"name": name,
|
|
651
|
+
"instruction": instruction,
|
|
652
|
+
"model": model,
|
|
653
|
+
"tools": tools,
|
|
654
|
+
"agents": agents,
|
|
655
|
+
"timeout": timeout,
|
|
656
|
+
"mcps": mcps,
|
|
657
|
+
"tool_configs": tool_configs,
|
|
658
|
+
}
|
|
659
|
+
overrides = _merge_override_maps(base_overrides, kwargs)
|
|
454
660
|
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
tools=tools,
|
|
464
|
-
agents=agents,
|
|
465
|
-
timeout=timeout,
|
|
466
|
-
**kwargs,
|
|
467
|
-
)
|
|
661
|
+
if file is not None:
|
|
662
|
+
payload = _prepare_import_payload(
|
|
663
|
+
Path(file).expanduser(), overrides, drop_model_fields=True
|
|
664
|
+
)
|
|
665
|
+
if overrides.get("model") is None:
|
|
666
|
+
payload.pop("model", None)
|
|
667
|
+
else:
|
|
668
|
+
payload = overrides
|
|
468
669
|
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
670
|
+
return self._create_agent_from_payload(payload)
|
|
671
|
+
|
|
672
|
+
def create_agent_from_file( # pragma: no cover - thin compatibility wrapper
|
|
673
|
+
self,
|
|
674
|
+
file_path: str | PathLike[str],
|
|
675
|
+
**overrides: Any,
|
|
676
|
+
) -> "Agent":
|
|
677
|
+
"""Backward-compatible helper to create an agent from a configuration file."""
|
|
678
|
+
return self.create_agent(file=file_path, **overrides)
|
|
679
|
+
|
|
680
|
+
def _update_agent_from_payload(
|
|
681
|
+
self,
|
|
682
|
+
agent_id: str,
|
|
683
|
+
current_agent: Agent,
|
|
684
|
+
payload: Mapping[str, Any],
|
|
685
|
+
) -> "Agent":
|
|
686
|
+
"""Update an agent using a prepared payload mapping."""
|
|
687
|
+
known, extras = _split_known_and_extra(
|
|
688
|
+
payload, AgentUpdateRequest.__dataclass_fields__
|
|
689
|
+
)
|
|
690
|
+
_normalise_sequence_fields(known)
|
|
691
|
+
|
|
692
|
+
tool_refs = extras.pop("_tool_refs", None)
|
|
693
|
+
agent_refs = extras.pop("_agent_refs", None)
|
|
694
|
+
mcp_refs = extras.pop("_mcp_refs", None)
|
|
695
|
+
|
|
696
|
+
tools_value = known.pop("tools", None)
|
|
697
|
+
agents_value = known.pop("agents", None)
|
|
698
|
+
mcps_value = known.pop("mcps", None)
|
|
699
|
+
|
|
700
|
+
if tools_value is not None:
|
|
701
|
+
tools_value = self._resolve_tool_ids(tools_value, tool_refs)
|
|
702
|
+
if agents_value is not None:
|
|
703
|
+
agents_value = self._resolve_agent_ids(agents_value, agent_refs)
|
|
704
|
+
if mcps_value is not None:
|
|
705
|
+
mcps_value = self._resolve_mcp_ids(mcps_value, mcp_refs) # pragma: no cover
|
|
706
|
+
|
|
707
|
+
request = AgentUpdateRequest(
|
|
708
|
+
name=known.pop("name", None),
|
|
709
|
+
instruction=known.pop("instruction", None),
|
|
710
|
+
description=known.pop("description", None),
|
|
711
|
+
model=known.pop("model", None),
|
|
712
|
+
language_model_id=known.pop("language_model_id", None),
|
|
713
|
+
provider=known.pop("provider", None),
|
|
714
|
+
model_name=known.pop("model_name", None),
|
|
715
|
+
agent_type=known.pop("agent_type", known.pop("type", None)),
|
|
716
|
+
framework=known.pop("framework", None),
|
|
717
|
+
version=known.pop("version", None),
|
|
718
|
+
account_id=known.pop("account_id", None),
|
|
719
|
+
metadata=known.pop("metadata", None),
|
|
720
|
+
tools=tools_value,
|
|
721
|
+
tool_configs=known.pop("tool_configs", None),
|
|
722
|
+
agents=agents_value,
|
|
723
|
+
mcps=mcps_value,
|
|
724
|
+
agent_config=known.pop("agent_config", None),
|
|
725
|
+
a2a_profile=known.pop("a2a_profile", None),
|
|
726
|
+
extras={**known, **extras},
|
|
475
727
|
)
|
|
476
|
-
|
|
728
|
+
|
|
729
|
+
payload_dict = request.to_payload(current_agent)
|
|
730
|
+
|
|
731
|
+
response = self._request("PUT", f"/agents/{agent_id}", json=payload_dict)
|
|
732
|
+
return Agent(**response)._set_client(self)
|
|
477
733
|
|
|
478
734
|
def update_agent(
|
|
479
735
|
self,
|
|
@@ -481,24 +737,42 @@ class AgentClient(BaseClient):
|
|
|
481
737
|
name: str | None = None,
|
|
482
738
|
instruction: str | None = None,
|
|
483
739
|
model: str | None = None,
|
|
740
|
+
*,
|
|
741
|
+
file: str | PathLike[str] | None = None,
|
|
742
|
+
tools: list[str | Any] | None = None,
|
|
743
|
+
agents: list[str | Any] | None = None,
|
|
744
|
+
mcps: list[str | Any] | None = None,
|
|
484
745
|
**kwargs: Any,
|
|
485
746
|
) -> "Agent":
|
|
486
747
|
"""Update an existing agent."""
|
|
487
|
-
|
|
488
|
-
|
|
748
|
+
base_overrides = {
|
|
749
|
+
"name": name,
|
|
750
|
+
"instruction": instruction,
|
|
751
|
+
"model": model,
|
|
752
|
+
"tools": tools,
|
|
753
|
+
"agents": agents,
|
|
754
|
+
"mcps": mcps,
|
|
755
|
+
}
|
|
756
|
+
overrides = _merge_override_maps(base_overrides, kwargs)
|
|
489
757
|
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
**kwargs,
|
|
497
|
-
)
|
|
758
|
+
if file is not None:
|
|
759
|
+
payload = _prepare_import_payload(
|
|
760
|
+
Path(file).expanduser(), overrides, drop_model_fields=True
|
|
761
|
+
)
|
|
762
|
+
else:
|
|
763
|
+
payload = overrides
|
|
498
764
|
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
765
|
+
current_agent = self.get_agent_by_id(agent_id)
|
|
766
|
+
return self._update_agent_from_payload(agent_id, current_agent, payload)
|
|
767
|
+
|
|
768
|
+
def update_agent_from_file( # pragma: no cover - thin compatibility wrapper
|
|
769
|
+
self,
|
|
770
|
+
agent_id: str,
|
|
771
|
+
file_path: str | PathLike[str],
|
|
772
|
+
**overrides: Any,
|
|
773
|
+
) -> "Agent":
|
|
774
|
+
"""Backward-compatible helper to update an agent from a configuration file."""
|
|
775
|
+
return self.update_agent(agent_id, file=file_path, **overrides)
|
|
502
776
|
|
|
503
777
|
def delete_agent(self, agent_id: str) -> None:
|
|
504
778
|
"""Delete an agent."""
|
|
@@ -562,197 +836,6 @@ class AgentClient(BaseClient):
|
|
|
562
836
|
execution_timeout = kwargs.get("timeout", DEFAULT_AGENT_RUN_TIMEOUT)
|
|
563
837
|
return request_timeout, execution_timeout
|
|
564
838
|
|
|
565
|
-
def _create_renderer(
|
|
566
|
-
self, renderer: RichStreamRenderer | None, **kwargs: Any
|
|
567
|
-
) -> RichStreamRenderer:
|
|
568
|
-
"""Create appropriate renderer based on configuration."""
|
|
569
|
-
if isinstance(renderer, RichStreamRenderer):
|
|
570
|
-
return renderer
|
|
571
|
-
|
|
572
|
-
verbose = kwargs.get("verbose", False)
|
|
573
|
-
|
|
574
|
-
if isinstance(renderer, str):
|
|
575
|
-
if renderer == "silent":
|
|
576
|
-
return self._create_silent_renderer()
|
|
577
|
-
elif renderer == "minimal":
|
|
578
|
-
return self._create_minimal_renderer()
|
|
579
|
-
else:
|
|
580
|
-
return self._create_default_renderer(verbose)
|
|
581
|
-
elif verbose:
|
|
582
|
-
return self._create_verbose_renderer()
|
|
583
|
-
else:
|
|
584
|
-
return self._create_default_renderer(verbose)
|
|
585
|
-
|
|
586
|
-
def _create_silent_renderer(self) -> RichStreamRenderer:
|
|
587
|
-
"""Create a silent renderer that suppresses all output."""
|
|
588
|
-
silent_config = RendererConfig(
|
|
589
|
-
live=False,
|
|
590
|
-
persist_live=False,
|
|
591
|
-
show_delegate_tool_panels=False,
|
|
592
|
-
render_thinking=False,
|
|
593
|
-
)
|
|
594
|
-
return RichStreamRenderer(
|
|
595
|
-
console=_Console(file=io.StringIO(), force_terminal=False),
|
|
596
|
-
cfg=silent_config,
|
|
597
|
-
verbose=False,
|
|
598
|
-
)
|
|
599
|
-
|
|
600
|
-
def _create_minimal_renderer(self) -> RichStreamRenderer:
|
|
601
|
-
"""Create a minimal renderer with basic output."""
|
|
602
|
-
minimal_config = RendererConfig(
|
|
603
|
-
live=False,
|
|
604
|
-
persist_live=False,
|
|
605
|
-
show_delegate_tool_panels=False,
|
|
606
|
-
render_thinking=False,
|
|
607
|
-
)
|
|
608
|
-
return RichStreamRenderer(
|
|
609
|
-
console=_Console(),
|
|
610
|
-
cfg=minimal_config,
|
|
611
|
-
verbose=False,
|
|
612
|
-
)
|
|
613
|
-
|
|
614
|
-
def _create_verbose_renderer(self) -> RichStreamRenderer:
|
|
615
|
-
"""Create a verbose renderer for detailed output."""
|
|
616
|
-
verbose_config = RendererConfig(
|
|
617
|
-
theme="dark",
|
|
618
|
-
style="debug",
|
|
619
|
-
live=False,
|
|
620
|
-
show_delegate_tool_panels=True,
|
|
621
|
-
append_finished_snapshots=False,
|
|
622
|
-
)
|
|
623
|
-
return RichStreamRenderer(
|
|
624
|
-
console=_Console(),
|
|
625
|
-
cfg=verbose_config,
|
|
626
|
-
verbose=True,
|
|
627
|
-
)
|
|
628
|
-
|
|
629
|
-
def _create_default_renderer(self, verbose: bool) -> RichStreamRenderer:
|
|
630
|
-
"""Create the default renderer."""
|
|
631
|
-
if verbose:
|
|
632
|
-
return self._create_verbose_renderer()
|
|
633
|
-
else:
|
|
634
|
-
default_config = RendererConfig(show_delegate_tool_panels=True)
|
|
635
|
-
return RichStreamRenderer(console=_Console(), cfg=default_config)
|
|
636
|
-
|
|
637
|
-
def _initialize_stream_metadata(self, kwargs: dict[str, Any]) -> dict[str, Any]:
|
|
638
|
-
"""Initialize stream metadata."""
|
|
639
|
-
return {
|
|
640
|
-
"agent_name": kwargs.get("agent_name", ""),
|
|
641
|
-
"model": kwargs.get("model"),
|
|
642
|
-
"run_id": None,
|
|
643
|
-
"input_message": "", # Will be set from kwargs if available
|
|
644
|
-
}
|
|
645
|
-
|
|
646
|
-
def _capture_request_id(
|
|
647
|
-
self,
|
|
648
|
-
stream_response: httpx.Response,
|
|
649
|
-
meta: dict[str, Any],
|
|
650
|
-
renderer: RichStreamRenderer,
|
|
651
|
-
) -> None:
|
|
652
|
-
"""Capture request ID from response headers."""
|
|
653
|
-
req_id = stream_response.headers.get(
|
|
654
|
-
"x-request-id"
|
|
655
|
-
) or stream_response.headers.get("x-run-id")
|
|
656
|
-
if req_id:
|
|
657
|
-
meta["run_id"] = req_id
|
|
658
|
-
renderer.on_start(meta)
|
|
659
|
-
|
|
660
|
-
def _should_start_timer(self, ev: dict[str, Any]) -> bool:
|
|
661
|
-
"""Check if timer should be started for this event."""
|
|
662
|
-
return "content" in ev or "status" in ev or ev.get("metadata")
|
|
663
|
-
|
|
664
|
-
def _handle_content_event(self, ev: dict[str, Any], final_text: str) -> str:
|
|
665
|
-
"""Handle content events."""
|
|
666
|
-
content = ev.get("content", "")
|
|
667
|
-
if not content.startswith("Artifact received:"):
|
|
668
|
-
return content
|
|
669
|
-
return final_text
|
|
670
|
-
|
|
671
|
-
def _handle_usage_event(
|
|
672
|
-
self, ev: dict[str, Any], stats_usage: dict[str, Any]
|
|
673
|
-
) -> None:
|
|
674
|
-
"""Handle usage events."""
|
|
675
|
-
stats_usage.update(ev.get("usage") or {})
|
|
676
|
-
|
|
677
|
-
def _handle_run_info_event(
|
|
678
|
-
self, ev: dict[str, Any], meta: dict[str, Any], renderer: RichStreamRenderer
|
|
679
|
-
) -> None:
|
|
680
|
-
"""Handle run info events."""
|
|
681
|
-
if ev.get("model"):
|
|
682
|
-
meta["model"] = ev["model"]
|
|
683
|
-
renderer.on_start(meta)
|
|
684
|
-
if ev.get("run_id"):
|
|
685
|
-
meta["run_id"] = ev["run_id"]
|
|
686
|
-
renderer.on_start(meta)
|
|
687
|
-
|
|
688
|
-
def _process_single_event(
|
|
689
|
-
self,
|
|
690
|
-
event: dict[str, Any],
|
|
691
|
-
renderer: RichStreamRenderer,
|
|
692
|
-
final_text: str,
|
|
693
|
-
stats_usage: dict[str, Any],
|
|
694
|
-
meta: dict[str, Any],
|
|
695
|
-
) -> tuple[str, dict[str, Any]]:
|
|
696
|
-
"""Process a single streaming event."""
|
|
697
|
-
try:
|
|
698
|
-
ev = json.loads(event["data"])
|
|
699
|
-
except json.JSONDecodeError:
|
|
700
|
-
logger.debug("Non-JSON SSE fragment skipped")
|
|
701
|
-
return final_text, stats_usage
|
|
702
|
-
|
|
703
|
-
kind = (ev.get("metadata") or {}).get("kind")
|
|
704
|
-
renderer.on_event(ev)
|
|
705
|
-
|
|
706
|
-
# Skip artifacts from content accumulation
|
|
707
|
-
if kind == "artifact":
|
|
708
|
-
return final_text, stats_usage
|
|
709
|
-
|
|
710
|
-
# Handle different event types
|
|
711
|
-
if kind == "final_response" and ev.get("content"):
|
|
712
|
-
final_text = ev.get("content", "")
|
|
713
|
-
elif ev.get("content"):
|
|
714
|
-
final_text = self._handle_content_event(ev, final_text)
|
|
715
|
-
elif kind == "usage":
|
|
716
|
-
self._handle_usage_event(ev, stats_usage)
|
|
717
|
-
elif kind == "run_info":
|
|
718
|
-
self._handle_run_info_event(ev, meta, renderer)
|
|
719
|
-
|
|
720
|
-
return final_text, stats_usage
|
|
721
|
-
|
|
722
|
-
def _process_stream_events(
|
|
723
|
-
self,
|
|
724
|
-
stream_response: httpx.Response,
|
|
725
|
-
renderer: RichStreamRenderer,
|
|
726
|
-
timeout_seconds: float,
|
|
727
|
-
agent_name: str | None,
|
|
728
|
-
kwargs: dict[str, Any],
|
|
729
|
-
) -> tuple[str, dict[str, Any], float | None, float | None]:
|
|
730
|
-
"""Process streaming events and accumulate response."""
|
|
731
|
-
final_text = ""
|
|
732
|
-
stats_usage = {}
|
|
733
|
-
started_monotonic = None
|
|
734
|
-
finished_monotonic = None
|
|
735
|
-
|
|
736
|
-
meta = self._initialize_stream_metadata(kwargs)
|
|
737
|
-
self._capture_request_id(stream_response, meta, renderer)
|
|
738
|
-
|
|
739
|
-
for event in iter_sse_events(stream_response, timeout_seconds, agent_name):
|
|
740
|
-
# Start timer at first meaningful event
|
|
741
|
-
if started_monotonic is None:
|
|
742
|
-
try:
|
|
743
|
-
ev = json.loads(event["data"])
|
|
744
|
-
if self._should_start_timer(ev):
|
|
745
|
-
started_monotonic = monotonic()
|
|
746
|
-
except json.JSONDecodeError:
|
|
747
|
-
pass
|
|
748
|
-
|
|
749
|
-
final_text, stats_usage = self._process_single_event(
|
|
750
|
-
event, renderer, final_text, stats_usage, meta
|
|
751
|
-
)
|
|
752
|
-
|
|
753
|
-
finished_monotonic = monotonic()
|
|
754
|
-
return final_text, stats_usage, started_monotonic, finished_monotonic
|
|
755
|
-
|
|
756
839
|
def run_agent(
|
|
757
840
|
self,
|
|
758
841
|
agent_id: str,
|
|
@@ -764,7 +847,6 @@ class AgentClient(BaseClient):
|
|
|
764
847
|
**kwargs,
|
|
765
848
|
) -> str:
|
|
766
849
|
"""Run an agent with a message, streaming via a renderer."""
|
|
767
|
-
# Prepare request payload and headers
|
|
768
850
|
(
|
|
769
851
|
payload,
|
|
770
852
|
data_payload,
|
|
@@ -773,20 +855,18 @@ class AgentClient(BaseClient):
|
|
|
773
855
|
multipart_data,
|
|
774
856
|
) = self._prepare_sync_request_data(message, files, tty, **kwargs)
|
|
775
857
|
|
|
776
|
-
|
|
777
|
-
|
|
858
|
+
render_manager = self._get_renderer_manager()
|
|
859
|
+
verbose = kwargs.get("verbose", False)
|
|
860
|
+
r = self._create_renderer(renderer, verbose=verbose)
|
|
861
|
+
meta = render_manager.build_initial_metadata(agent_id, message, kwargs)
|
|
862
|
+
render_manager.start_renderer(r, meta)
|
|
778
863
|
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
"run_id": None,
|
|
784
|
-
"input_message": message,
|
|
785
|
-
}
|
|
786
|
-
r.on_start(meta)
|
|
864
|
+
final_text = ""
|
|
865
|
+
stats_usage: dict[str, Any] = {}
|
|
866
|
+
started_monotonic: float | None = None
|
|
867
|
+
finished_monotonic: float | None = None
|
|
787
868
|
|
|
788
869
|
try:
|
|
789
|
-
# Make streaming request
|
|
790
870
|
response = self.http_client.stream(
|
|
791
871
|
"POST",
|
|
792
872
|
f"/agents/{agent_id}/run",
|
|
@@ -799,8 +879,7 @@ class AgentClient(BaseClient):
|
|
|
799
879
|
with response as stream_response:
|
|
800
880
|
stream_response.raise_for_status()
|
|
801
881
|
|
|
802
|
-
|
|
803
|
-
timeout_seconds = kwargs.get("timeout", DEFAULT_AGENT_RUN_TIMEOUT)
|
|
882
|
+
timeout_seconds = compute_timeout_seconds(kwargs)
|
|
804
883
|
agent_name = kwargs.get("agent_name")
|
|
805
884
|
|
|
806
885
|
(
|
|
@@ -809,7 +888,11 @@ class AgentClient(BaseClient):
|
|
|
809
888
|
started_monotonic,
|
|
810
889
|
finished_monotonic,
|
|
811
890
|
) = self._process_stream_events(
|
|
812
|
-
stream_response,
|
|
891
|
+
stream_response,
|
|
892
|
+
r,
|
|
893
|
+
timeout_seconds,
|
|
894
|
+
agent_name,
|
|
895
|
+
meta,
|
|
813
896
|
)
|
|
814
897
|
|
|
815
898
|
except KeyboardInterrupt:
|
|
@@ -823,25 +906,16 @@ class AgentClient(BaseClient):
|
|
|
823
906
|
finally:
|
|
824
907
|
raise
|
|
825
908
|
finally:
|
|
826
|
-
# Ensure cleanup
|
|
827
909
|
if multipart_data:
|
|
828
910
|
multipart_data.close()
|
|
829
911
|
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
if hasattr(r, "state") and hasattr(r.state, "buffer"):
|
|
838
|
-
rendered_text = "".join(r.state.buffer)
|
|
839
|
-
else:
|
|
840
|
-
rendered_text = ""
|
|
841
|
-
|
|
842
|
-
final_payload = final_text or rendered_text or "No response content received."
|
|
843
|
-
r.on_complete(st)
|
|
844
|
-
return final_payload
|
|
912
|
+
return self._finalize_renderer(
|
|
913
|
+
r,
|
|
914
|
+
final_text,
|
|
915
|
+
stats_usage,
|
|
916
|
+
started_monotonic,
|
|
917
|
+
finished_monotonic,
|
|
918
|
+
)
|
|
845
919
|
|
|
846
920
|
def _prepare_request_data(
|
|
847
921
|
self,
|