agentfield 0.1.22rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentfield/__init__.py +66 -0
- agentfield/agent.py +3569 -0
- agentfield/agent_ai.py +1125 -0
- agentfield/agent_cli.py +386 -0
- agentfield/agent_field_handler.py +494 -0
- agentfield/agent_mcp.py +534 -0
- agentfield/agent_registry.py +29 -0
- agentfield/agent_server.py +1185 -0
- agentfield/agent_utils.py +269 -0
- agentfield/agent_workflow.py +323 -0
- agentfield/async_config.py +278 -0
- agentfield/async_execution_manager.py +1227 -0
- agentfield/client.py +1447 -0
- agentfield/connection_manager.py +280 -0
- agentfield/decorators.py +527 -0
- agentfield/did_manager.py +337 -0
- agentfield/dynamic_skills.py +304 -0
- agentfield/execution_context.py +255 -0
- agentfield/execution_state.py +453 -0
- agentfield/http_connection_manager.py +429 -0
- agentfield/litellm_adapters.py +140 -0
- agentfield/logger.py +249 -0
- agentfield/mcp_client.py +204 -0
- agentfield/mcp_manager.py +340 -0
- agentfield/mcp_stdio_bridge.py +550 -0
- agentfield/memory.py +723 -0
- agentfield/memory_events.py +489 -0
- agentfield/multimodal.py +173 -0
- agentfield/multimodal_response.py +403 -0
- agentfield/pydantic_utils.py +227 -0
- agentfield/rate_limiter.py +280 -0
- agentfield/result_cache.py +441 -0
- agentfield/router.py +190 -0
- agentfield/status.py +70 -0
- agentfield/types.py +710 -0
- agentfield/utils.py +26 -0
- agentfield/vc_generator.py +464 -0
- agentfield/vision.py +198 -0
- agentfield-0.1.22rc2.dist-info/METADATA +102 -0
- agentfield-0.1.22rc2.dist-info/RECORD +42 -0
- agentfield-0.1.22rc2.dist-info/WHEEL +5 -0
- agentfield-0.1.22rc2.dist-info/top_level.txt +1 -0
agentfield/client.py
ADDED
|
@@ -0,0 +1,1447 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import datetime
|
|
3
|
+
import importlib
|
|
4
|
+
import random
|
|
5
|
+
import sys
|
|
6
|
+
import time
|
|
7
|
+
from dataclasses import dataclass
|
|
8
|
+
from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union
|
|
9
|
+
|
|
10
|
+
import requests
|
|
11
|
+
|
|
12
|
+
from .types import (
|
|
13
|
+
AgentStatus,
|
|
14
|
+
CompactDiscoveryResponse,
|
|
15
|
+
DiscoveryResponse,
|
|
16
|
+
DiscoveryResult,
|
|
17
|
+
HeartbeatData,
|
|
18
|
+
WebhookConfig,
|
|
19
|
+
)
|
|
20
|
+
from .async_config import AsyncConfig
|
|
21
|
+
from .execution_state import ExecutionStatus
|
|
22
|
+
from .result_cache import ResultCache
|
|
23
|
+
from .async_execution_manager import AsyncExecutionManager
|
|
24
|
+
from .logger import get_logger
|
|
25
|
+
from .status import normalize_status
|
|
26
|
+
from .execution_context import generate_run_id
|
|
27
|
+
|
|
28
|
+
httpx = None # type: ignore
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Python 3.8 compatibility: asyncio.to_thread was added in Python 3.9
|
|
32
|
+
if sys.version_info >= (3, 9):
|
|
33
|
+
from asyncio import to_thread as _to_thread
|
|
34
|
+
else:
|
|
35
|
+
async def _to_thread(func, *args, **kwargs):
|
|
36
|
+
"""Compatibility shim for asyncio.to_thread on Python 3.8."""
|
|
37
|
+
loop = asyncio.get_event_loop()
|
|
38
|
+
return await loop.run_in_executor(None, lambda: func(*args, **kwargs))
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _ensure_httpx(force_reload: bool = False):
|
|
42
|
+
"""Load httpx lazily, allowing tests to monkeypatch the module."""
|
|
43
|
+
global httpx
|
|
44
|
+
|
|
45
|
+
if not force_reload and httpx is not None:
|
|
46
|
+
return httpx
|
|
47
|
+
|
|
48
|
+
try:
|
|
49
|
+
module = importlib.import_module("httpx")
|
|
50
|
+
except ImportError:
|
|
51
|
+
httpx = None
|
|
52
|
+
else:
|
|
53
|
+
httpx = module
|
|
54
|
+
|
|
55
|
+
return httpx
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
if TYPE_CHECKING: # pragma: no cover - imported for type hints only
|
|
59
|
+
import httpx # noqa: F401
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
# Prime optional dependency cache at import time when available
|
|
63
|
+
_ensure_httpx()
|
|
64
|
+
|
|
65
|
+
# Set up logger for this module
|
|
66
|
+
logger = get_logger(__name__)
|
|
67
|
+
|
|
68
|
+
SUCCESS_STATUSES = {ExecutionStatus.SUCCEEDED.value}
|
|
69
|
+
FAILURE_STATUSES = {
|
|
70
|
+
ExecutionStatus.FAILED.value,
|
|
71
|
+
ExecutionStatus.CANCELLED.value,
|
|
72
|
+
ExecutionStatus.TIMEOUT.value,
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class _Submission:
|
|
78
|
+
execution_id: str
|
|
79
|
+
run_id: str
|
|
80
|
+
target: str
|
|
81
|
+
status: str
|
|
82
|
+
target_type: Optional[str] = None
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class AgentFieldClient:
|
|
86
|
+
def __init__(
|
|
87
|
+
self,
|
|
88
|
+
base_url: str = "http://localhost:8080",
|
|
89
|
+
api_key: Optional[str] = None,
|
|
90
|
+
async_config: Optional[AsyncConfig] = None,
|
|
91
|
+
):
|
|
92
|
+
self.base_url = base_url
|
|
93
|
+
self.api_base = f"{base_url}/api/v1"
|
|
94
|
+
self.api_key = api_key
|
|
95
|
+
|
|
96
|
+
# Async execution components
|
|
97
|
+
self.async_config = async_config or AsyncConfig()
|
|
98
|
+
self._async_execution_manager: Optional[AsyncExecutionManager] = None
|
|
99
|
+
self._async_http_client: Optional["httpx.AsyncClient"] = None
|
|
100
|
+
self._async_http_client_lock: Optional[asyncio.Lock] = None
|
|
101
|
+
self._result_cache = ResultCache(self.async_config)
|
|
102
|
+
self._latest_event_stream_headers: Dict[str, str] = {}
|
|
103
|
+
self._current_workflow_context = None
|
|
104
|
+
|
|
105
|
+
def _generate_id(self, prefix: str) -> str:
|
|
106
|
+
timestamp = datetime.datetime.utcnow().strftime("%Y%m%d_%H%M%S")
|
|
107
|
+
random_suffix = f"{random.getrandbits(32):08x}"
|
|
108
|
+
return f"{prefix}_{timestamp}_{random_suffix}"
|
|
109
|
+
|
|
110
|
+
def _build_event_stream_headers(
|
|
111
|
+
self, source_headers: Optional[Dict[str, str]]
|
|
112
|
+
) -> Dict[str, str]:
|
|
113
|
+
"""Return headers that should be forwarded to the SSE event stream."""
|
|
114
|
+
|
|
115
|
+
headers = dict(source_headers or {})
|
|
116
|
+
if not headers:
|
|
117
|
+
return {}
|
|
118
|
+
|
|
119
|
+
allowed = {"authorization", "cookie"}
|
|
120
|
+
event_headers: Dict[str, str] = {}
|
|
121
|
+
for key, value in headers.items():
|
|
122
|
+
if value is None:
|
|
123
|
+
continue
|
|
124
|
+
lower = key.lower()
|
|
125
|
+
if lower.startswith("x-") or lower in allowed:
|
|
126
|
+
event_headers[key] = value
|
|
127
|
+
return event_headers
|
|
128
|
+
|
|
129
|
+
def _sanitize_header_values(
|
|
130
|
+
self, headers: Dict[str, Any]
|
|
131
|
+
) -> Dict[str, str]:
|
|
132
|
+
"""Ensure all header values are concrete strings for requests/httpx."""
|
|
133
|
+
|
|
134
|
+
sanitized: Dict[str, str] = {}
|
|
135
|
+
for key, value in headers.items():
|
|
136
|
+
if value is None:
|
|
137
|
+
continue
|
|
138
|
+
if isinstance(value, bytes):
|
|
139
|
+
sanitized[key] = value.decode("utf-8", errors="replace")
|
|
140
|
+
elif isinstance(value, str):
|
|
141
|
+
sanitized[key] = value
|
|
142
|
+
else:
|
|
143
|
+
sanitized[key] = str(value)
|
|
144
|
+
return sanitized
|
|
145
|
+
|
|
146
|
+
def _get_auth_headers(self) -> Dict[str, str]:
|
|
147
|
+
"""Return auth headers if configured."""
|
|
148
|
+
if not self.api_key:
|
|
149
|
+
return {}
|
|
150
|
+
return {"X-API-Key": self.api_key}
|
|
151
|
+
|
|
152
|
+
def _get_headers_with_context(
|
|
153
|
+
self, headers: Optional[Dict[str, str]] = None
|
|
154
|
+
) -> Dict[str, str]:
|
|
155
|
+
"""Merge caller headers with the active workflow context headers."""
|
|
156
|
+
|
|
157
|
+
merged = self._get_auth_headers()
|
|
158
|
+
merged.update(headers or {})
|
|
159
|
+
context = getattr(self, "_current_workflow_context", None)
|
|
160
|
+
if context and hasattr(context, "to_headers"):
|
|
161
|
+
try:
|
|
162
|
+
context_headers = context.to_headers()
|
|
163
|
+
except Exception:
|
|
164
|
+
context_headers = {}
|
|
165
|
+
for key, value in (context_headers or {}).items():
|
|
166
|
+
merged.setdefault(key, value)
|
|
167
|
+
return merged
|
|
168
|
+
|
|
169
|
+
def _maybe_update_event_stream_headers(
|
|
170
|
+
self, source_headers: Optional[Dict[str, str]]
|
|
171
|
+
) -> None:
|
|
172
|
+
"""Update stored SSE headers and propagate to the manager when enabled."""
|
|
173
|
+
|
|
174
|
+
if not self.async_config.enable_event_stream:
|
|
175
|
+
return
|
|
176
|
+
|
|
177
|
+
new_headers = self._build_event_stream_headers(source_headers)
|
|
178
|
+
|
|
179
|
+
if (
|
|
180
|
+
not new_headers
|
|
181
|
+
and source_headers is None
|
|
182
|
+
and self._current_workflow_context
|
|
183
|
+
):
|
|
184
|
+
try:
|
|
185
|
+
context_headers = self._current_workflow_context.to_headers()
|
|
186
|
+
except Exception:
|
|
187
|
+
context_headers = {}
|
|
188
|
+
new_headers = self._build_event_stream_headers(context_headers)
|
|
189
|
+
|
|
190
|
+
if new_headers:
|
|
191
|
+
self._latest_event_stream_headers = new_headers
|
|
192
|
+
elif source_headers is None and not self._latest_event_stream_headers:
|
|
193
|
+
# No headers from context yet; keep empty state.
|
|
194
|
+
self._latest_event_stream_headers = {}
|
|
195
|
+
|
|
196
|
+
if self._async_execution_manager is not None:
|
|
197
|
+
self._async_execution_manager.set_event_stream_headers(
|
|
198
|
+
self._latest_event_stream_headers
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
def discover_capabilities(
|
|
202
|
+
self,
|
|
203
|
+
*,
|
|
204
|
+
agent: Optional[str] = None,
|
|
205
|
+
node_id: Optional[str] = None,
|
|
206
|
+
agent_ids: Optional[List[str]] = None,
|
|
207
|
+
node_ids: Optional[List[str]] = None,
|
|
208
|
+
reasoner: Optional[str] = None,
|
|
209
|
+
skill: Optional[str] = None,
|
|
210
|
+
tags: Optional[List[str]] = None,
|
|
211
|
+
include_input_schema: Optional[bool] = None,
|
|
212
|
+
include_output_schema: Optional[bool] = None,
|
|
213
|
+
include_descriptions: Optional[bool] = None,
|
|
214
|
+
include_examples: Optional[bool] = None,
|
|
215
|
+
format: str = "json",
|
|
216
|
+
health_status: Optional[str] = None,
|
|
217
|
+
limit: Optional[int] = None,
|
|
218
|
+
offset: Optional[int] = None,
|
|
219
|
+
headers: Optional[Dict[str, str]] = None,
|
|
220
|
+
) -> DiscoveryResult:
|
|
221
|
+
"""
|
|
222
|
+
Query the control plane discovery API.
|
|
223
|
+
"""
|
|
224
|
+
|
|
225
|
+
fmt = (format or "json").lower()
|
|
226
|
+
params: Dict[str, str] = {"format": fmt}
|
|
227
|
+
|
|
228
|
+
def _dedupe(values: Optional[List[str]]) -> List[str]:
|
|
229
|
+
if not values:
|
|
230
|
+
return []
|
|
231
|
+
seen = set()
|
|
232
|
+
out: List[str] = []
|
|
233
|
+
for value in values:
|
|
234
|
+
if not value or value in seen:
|
|
235
|
+
continue
|
|
236
|
+
seen.add(value)
|
|
237
|
+
out.append(value)
|
|
238
|
+
return out
|
|
239
|
+
|
|
240
|
+
combined_agent_ids = _dedupe(
|
|
241
|
+
([agent] if agent else [])
|
|
242
|
+
+ ([node_id] if node_id else [])
|
|
243
|
+
+ (agent_ids or [])
|
|
244
|
+
+ (node_ids or [])
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
if len(combined_agent_ids) == 1:
|
|
248
|
+
params["agent"] = combined_agent_ids[0]
|
|
249
|
+
elif len(combined_agent_ids) > 1:
|
|
250
|
+
params["agent_ids"] = ",".join(combined_agent_ids)
|
|
251
|
+
|
|
252
|
+
if reasoner:
|
|
253
|
+
params["reasoner"] = reasoner
|
|
254
|
+
if skill:
|
|
255
|
+
params["skill"] = skill
|
|
256
|
+
if tags:
|
|
257
|
+
params["tags"] = ",".join(_dedupe(tags))
|
|
258
|
+
|
|
259
|
+
if include_input_schema is not None:
|
|
260
|
+
params["include_input_schema"] = str(bool(include_input_schema)).lower()
|
|
261
|
+
if include_output_schema is not None:
|
|
262
|
+
params["include_output_schema"] = str(bool(include_output_schema)).lower()
|
|
263
|
+
if include_descriptions is not None:
|
|
264
|
+
params["include_descriptions"] = str(bool(include_descriptions)).lower()
|
|
265
|
+
if include_examples is not None:
|
|
266
|
+
params["include_examples"] = str(bool(include_examples)).lower()
|
|
267
|
+
if health_status:
|
|
268
|
+
params["health_status"] = health_status.lower()
|
|
269
|
+
if limit is not None:
|
|
270
|
+
params["limit"] = str(limit)
|
|
271
|
+
if offset is not None:
|
|
272
|
+
params["offset"] = str(offset)
|
|
273
|
+
|
|
274
|
+
request_headers = self._get_headers_with_context(headers)
|
|
275
|
+
request_headers["Accept"] = (
|
|
276
|
+
"application/xml" if fmt == "xml" else "application/json"
|
|
277
|
+
)
|
|
278
|
+
sanitized_headers = self._sanitize_header_values(request_headers)
|
|
279
|
+
|
|
280
|
+
response = requests.get(
|
|
281
|
+
f"{self.api_base}/discovery/capabilities",
|
|
282
|
+
params=params,
|
|
283
|
+
headers=sanitized_headers,
|
|
284
|
+
timeout=self.async_config.polling_timeout,
|
|
285
|
+
)
|
|
286
|
+
response.raise_for_status()
|
|
287
|
+
|
|
288
|
+
raw_body = response.text
|
|
289
|
+
if fmt == "xml":
|
|
290
|
+
return DiscoveryResult(format=fmt, raw=raw_body, xml=raw_body)
|
|
291
|
+
|
|
292
|
+
payload = response.json()
|
|
293
|
+
if fmt == "compact":
|
|
294
|
+
compact = CompactDiscoveryResponse.from_dict(payload)
|
|
295
|
+
return DiscoveryResult(
|
|
296
|
+
format=fmt, raw=raw_body, compact=compact, json=None
|
|
297
|
+
)
|
|
298
|
+
|
|
299
|
+
json_payload = DiscoveryResponse.from_dict(payload)
|
|
300
|
+
return DiscoveryResult(format="json", raw=raw_body, json=json_payload)
|
|
301
|
+
|
|
302
|
+
async def get_async_http_client(self) -> "httpx.AsyncClient":
|
|
303
|
+
"""Lazily create and return a shared httpx.AsyncClient."""
|
|
304
|
+
current_module = sys.modules.get("httpx")
|
|
305
|
+
reload_needed = httpx is None or current_module is not httpx
|
|
306
|
+
httpx_module = _ensure_httpx(force_reload=reload_needed)
|
|
307
|
+
if httpx_module is None:
|
|
308
|
+
raise RuntimeError("httpx is required for async HTTP operations")
|
|
309
|
+
|
|
310
|
+
if self._async_http_client and not getattr(
|
|
311
|
+
self._async_http_client, "is_closed", False
|
|
312
|
+
):
|
|
313
|
+
return self._async_http_client
|
|
314
|
+
|
|
315
|
+
if self._async_http_client_lock is None:
|
|
316
|
+
self._async_http_client_lock = asyncio.Lock()
|
|
317
|
+
|
|
318
|
+
async with self._async_http_client_lock:
|
|
319
|
+
if self._async_http_client and not getattr(
|
|
320
|
+
self._async_http_client, "is_closed", False
|
|
321
|
+
):
|
|
322
|
+
return self._async_http_client
|
|
323
|
+
|
|
324
|
+
client_kwargs = {
|
|
325
|
+
"headers": {
|
|
326
|
+
"User-Agent": "AgentFieldSDK/1.0",
|
|
327
|
+
"Accept": "application/json",
|
|
328
|
+
}
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
limits_factory = getattr(httpx_module, "Limits", None)
|
|
332
|
+
if limits_factory:
|
|
333
|
+
client_kwargs["limits"] = limits_factory(
|
|
334
|
+
max_connections=self.async_config.connection_pool_size,
|
|
335
|
+
max_keepalive_connections=self.async_config.connection_pool_per_host,
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
timeout_factory = getattr(httpx_module, "Timeout", None)
|
|
339
|
+
if timeout_factory:
|
|
340
|
+
client_kwargs["timeout"] = timeout_factory(10.0, connect=5.0)
|
|
341
|
+
else:
|
|
342
|
+
client_kwargs["timeout"] = 10.0
|
|
343
|
+
|
|
344
|
+
try:
|
|
345
|
+
self._async_http_client = httpx_module.AsyncClient(**client_kwargs)
|
|
346
|
+
except TypeError:
|
|
347
|
+
# Test doubles may not accept keyword arguments
|
|
348
|
+
self._async_http_client = httpx_module.AsyncClient()
|
|
349
|
+
headers = client_kwargs.get("headers")
|
|
350
|
+
if headers and hasattr(self._async_http_client, "headers"):
|
|
351
|
+
try:
|
|
352
|
+
self._async_http_client.headers.update(headers)
|
|
353
|
+
except Exception:
|
|
354
|
+
pass
|
|
355
|
+
|
|
356
|
+
return self._async_http_client
|
|
357
|
+
|
|
358
|
+
async def _async_request(self, method: str, url: str, **kwargs):
|
|
359
|
+
"""Perform an HTTP request using the shared async client with sync fallback."""
|
|
360
|
+
# Inject API key into headers if available
|
|
361
|
+
if self.api_key:
|
|
362
|
+
if "headers" not in kwargs:
|
|
363
|
+
kwargs["headers"] = {}
|
|
364
|
+
if "X-API-Key" not in kwargs["headers"]:
|
|
365
|
+
kwargs["headers"]["X-API-Key"] = self.api_key
|
|
366
|
+
|
|
367
|
+
try:
|
|
368
|
+
client = await self.get_async_http_client()
|
|
369
|
+
except RuntimeError:
|
|
370
|
+
return await _to_thread(self._sync_request, method, url, **kwargs)
|
|
371
|
+
|
|
372
|
+
return await client.request(method, url, **kwargs)
|
|
373
|
+
|
|
374
|
+
@staticmethod
|
|
375
|
+
def _sync_request(method: str, url: str, **kwargs):
|
|
376
|
+
"""Blocking HTTP request helper used when httpx is unavailable."""
|
|
377
|
+
# DIAGNOSTIC: Add request size logging
|
|
378
|
+
if "json" in kwargs:
|
|
379
|
+
import json
|
|
380
|
+
|
|
381
|
+
json_size = len(json.dumps(kwargs["json"]).encode("utf-8"))
|
|
382
|
+
logger.debug(
|
|
383
|
+
f"🔍 SYNC_REQUEST: Making {method} request to {url} with JSON payload size: {json_size} bytes"
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Configure session with proper settings for large payloads
|
|
387
|
+
session = requests.Session()
|
|
388
|
+
|
|
389
|
+
# Configure adapter with larger buffer sizes for handling large JSON responses
|
|
390
|
+
from requests.adapters import HTTPAdapter
|
|
391
|
+
from urllib3.util.retry import Retry
|
|
392
|
+
|
|
393
|
+
# Create custom adapter with larger buffer sizes
|
|
394
|
+
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.3))
|
|
395
|
+
session.mount("http://", adapter)
|
|
396
|
+
session.mount("https://", adapter)
|
|
397
|
+
|
|
398
|
+
# Set default headers if not provided
|
|
399
|
+
if "headers" not in kwargs:
|
|
400
|
+
kwargs["headers"] = {}
|
|
401
|
+
|
|
402
|
+
# Ensure proper content type for JSON requests
|
|
403
|
+
if "json" in kwargs and "Content-Type" not in kwargs["headers"]:
|
|
404
|
+
kwargs["headers"]["Content-Type"] = "application/json"
|
|
405
|
+
|
|
406
|
+
# Add User-Agent if not present
|
|
407
|
+
if "User-Agent" not in kwargs["headers"]:
|
|
408
|
+
kwargs["headers"]["User-Agent"] = "AgentFieldSDK/1.0"
|
|
409
|
+
|
|
410
|
+
# DIAGNOSTIC: Log request details
|
|
411
|
+
logger.debug(f"🔍 SYNC_REQUEST: Headers: {kwargs.get('headers', {})}")
|
|
412
|
+
|
|
413
|
+
# Configure stream=False to ensure we read the full response
|
|
414
|
+
# This prevents truncation issues with large JSON responses
|
|
415
|
+
if "stream" not in kwargs:
|
|
416
|
+
kwargs["stream"] = False
|
|
417
|
+
|
|
418
|
+
try:
|
|
419
|
+
response = session.request(method, url, **kwargs)
|
|
420
|
+
|
|
421
|
+
# DIAGNOSTIC: Log response details
|
|
422
|
+
logger.debug(
|
|
423
|
+
f"🔍 SYNC_RESPONSE: Status {response.status_code}, Content-Length: {response.headers.get('Content-Length', 'unknown')}"
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
# Check if response might be truncated
|
|
427
|
+
content_length = response.headers.get("Content-Length")
|
|
428
|
+
if content_length and len(response.content) != int(content_length):
|
|
429
|
+
logger.error(
|
|
430
|
+
f"🚨 RESPONSE_TRUNCATION: Expected {content_length} bytes, got {len(response.content)} bytes"
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
# Check for exactly 4096 bytes which indicates truncation
|
|
434
|
+
if len(response.content) == 4096:
|
|
435
|
+
logger.error(
|
|
436
|
+
"🚨 POSSIBLE_TRUNCATION: Response is exactly 4096 bytes - likely truncated!"
|
|
437
|
+
)
|
|
438
|
+
|
|
439
|
+
return response
|
|
440
|
+
finally:
|
|
441
|
+
session.close()
|
|
442
|
+
|
|
443
|
+
async def aclose(self) -> None:
|
|
444
|
+
"""Close shared resources such as async HTTP clients and managers."""
|
|
445
|
+
if self._async_execution_manager is not None:
|
|
446
|
+
try:
|
|
447
|
+
await self._async_execution_manager.stop()
|
|
448
|
+
finally:
|
|
449
|
+
self._async_execution_manager = None
|
|
450
|
+
|
|
451
|
+
if self._async_http_client is not None:
|
|
452
|
+
try:
|
|
453
|
+
await self._async_http_client.aclose()
|
|
454
|
+
finally:
|
|
455
|
+
self._async_http_client = None
|
|
456
|
+
self._async_http_client_lock = None
|
|
457
|
+
|
|
458
|
+
def register_node(self, node_data: Dict[str, Any]) -> Dict[str, Any]:
|
|
459
|
+
"""Register agent node with AgentField server"""
|
|
460
|
+
response = requests.post(
|
|
461
|
+
f"{self.api_base}/nodes/register",
|
|
462
|
+
json=node_data,
|
|
463
|
+
headers=self._get_auth_headers(),
|
|
464
|
+
)
|
|
465
|
+
response.raise_for_status() # Raise an exception for bad status codes
|
|
466
|
+
return response.json()
|
|
467
|
+
|
|
468
|
+
def update_health(
|
|
469
|
+
self, node_id: str, health_data: Dict[str, Any]
|
|
470
|
+
) -> Dict[str, Any]:
|
|
471
|
+
"""Update node health status"""
|
|
472
|
+
response = requests.put(
|
|
473
|
+
f"{self.api_base}/nodes/{node_id}/health",
|
|
474
|
+
json=health_data,
|
|
475
|
+
headers=self._get_auth_headers(),
|
|
476
|
+
)
|
|
477
|
+
response.raise_for_status() # Raise an exception for bad status codes
|
|
478
|
+
return response.json()
|
|
479
|
+
|
|
480
|
+
def get_nodes(self) -> Dict[str, Any]:
|
|
481
|
+
"""Get all registered nodes"""
|
|
482
|
+
response = requests.get(
|
|
483
|
+
f"{self.api_base}/nodes",
|
|
484
|
+
headers=self._get_auth_headers(),
|
|
485
|
+
)
|
|
486
|
+
response.raise_for_status() # Raise an exception for bad status codes
|
|
487
|
+
return response.json()
|
|
488
|
+
|
|
489
|
+
def _apply_vc_metadata(
|
|
490
|
+
self, registration_data: Dict[str, Any], vc_metadata: Optional[Dict[str, Any]]
|
|
491
|
+
) -> None:
|
|
492
|
+
"""Attach VC metadata to the registration payload if supplied."""
|
|
493
|
+
if not vc_metadata:
|
|
494
|
+
return
|
|
495
|
+
|
|
496
|
+
metadata = registration_data.setdefault("metadata", {})
|
|
497
|
+
custom_section = metadata.setdefault("custom", {})
|
|
498
|
+
custom_section["vc_generation"] = vc_metadata
|
|
499
|
+
|
|
500
|
+
async def register_agent(
|
|
501
|
+
self,
|
|
502
|
+
node_id: str,
|
|
503
|
+
reasoners: List[dict],
|
|
504
|
+
skills: List[dict],
|
|
505
|
+
base_url: str,
|
|
506
|
+
discovery: Optional[Dict[str, Any]] = None,
|
|
507
|
+
vc_metadata: Optional[Dict[str, Any]] = None,
|
|
508
|
+
) -> Tuple[bool, Optional[Dict[str, Any]]]:
|
|
509
|
+
"""Register or update agent information with AgentField server."""
|
|
510
|
+
try:
|
|
511
|
+
registration_data = {
|
|
512
|
+
"id": node_id,
|
|
513
|
+
"team_id": "default",
|
|
514
|
+
"base_url": base_url,
|
|
515
|
+
"version": "1.0.0",
|
|
516
|
+
"reasoners": reasoners,
|
|
517
|
+
"skills": skills,
|
|
518
|
+
"communication_config": {
|
|
519
|
+
"protocols": ["http"],
|
|
520
|
+
"websocket_endpoint": "",
|
|
521
|
+
"heartbeat_interval": "5s",
|
|
522
|
+
},
|
|
523
|
+
"health_status": "healthy",
|
|
524
|
+
"last_heartbeat": datetime.datetime.now().isoformat() + "Z",
|
|
525
|
+
"registered_at": datetime.datetime.now().isoformat() + "Z",
|
|
526
|
+
"features": {
|
|
527
|
+
"ab_testing": False,
|
|
528
|
+
"advanced_metrics": False,
|
|
529
|
+
"compliance": False,
|
|
530
|
+
"audit_logging": False,
|
|
531
|
+
"role_based_access": False,
|
|
532
|
+
"experimental": {},
|
|
533
|
+
},
|
|
534
|
+
"metadata": {
|
|
535
|
+
"deployment": {
|
|
536
|
+
"environment": "development",
|
|
537
|
+
"platform": "python",
|
|
538
|
+
"region": "local",
|
|
539
|
+
"tags": {"sdk_version": "1.0.0", "language": "python"},
|
|
540
|
+
},
|
|
541
|
+
"performance": {"latency_ms": 0, "throughput_ps": 0},
|
|
542
|
+
"custom": {},
|
|
543
|
+
},
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
if discovery:
|
|
547
|
+
registration_data["callback_discovery"] = discovery
|
|
548
|
+
|
|
549
|
+
self._apply_vc_metadata(registration_data, vc_metadata)
|
|
550
|
+
|
|
551
|
+
response = await self._async_request(
|
|
552
|
+
"POST",
|
|
553
|
+
f"{self.api_base}/nodes/register",
|
|
554
|
+
json=registration_data,
|
|
555
|
+
headers=self._get_auth_headers(),
|
|
556
|
+
timeout=30.0,
|
|
557
|
+
)
|
|
558
|
+
payload: Optional[Dict[str, Any]] = None
|
|
559
|
+
if hasattr(response, "json"):
|
|
560
|
+
try:
|
|
561
|
+
payload = response.json()
|
|
562
|
+
except Exception:
|
|
563
|
+
payload = None
|
|
564
|
+
|
|
565
|
+
if response.status_code not in (200, 201):
|
|
566
|
+
return False, payload
|
|
567
|
+
|
|
568
|
+
return True, payload
|
|
569
|
+
|
|
570
|
+
except Exception:
|
|
571
|
+
# self.logger.error(f"Failed to register agent: {e}")
|
|
572
|
+
return False, None
|
|
573
|
+
|
|
574
|
+
async def execute(
|
|
575
|
+
self,
|
|
576
|
+
target: str,
|
|
577
|
+
input_data: Dict[str, Any],
|
|
578
|
+
headers: Optional[Dict[str, str]] = None,
|
|
579
|
+
) -> Dict[str, Any]:
|
|
580
|
+
"""
|
|
581
|
+
Execute a reasoner or skill via the durable execution gateway.
|
|
582
|
+
|
|
583
|
+
The public signature remains unchanged, but internally we now submit the
|
|
584
|
+
execution, poll for completion with adaptive backoff, and return the final
|
|
585
|
+
result once the worker finishes processing.
|
|
586
|
+
"""
|
|
587
|
+
|
|
588
|
+
execution_headers = self._prepare_execution_headers(headers)
|
|
589
|
+
submission = await self._submit_execution_async(
|
|
590
|
+
target, input_data, execution_headers
|
|
591
|
+
)
|
|
592
|
+
status_payload = await self._await_execution_async(
|
|
593
|
+
submission, execution_headers
|
|
594
|
+
)
|
|
595
|
+
result_value, metadata = self._format_execution_result(
|
|
596
|
+
submission, status_payload
|
|
597
|
+
)
|
|
598
|
+
return self._build_execute_response(
|
|
599
|
+
submission, status_payload, result_value, metadata
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
def execute_sync(
|
|
603
|
+
self,
|
|
604
|
+
target: str,
|
|
605
|
+
input_data: Dict[str, Any],
|
|
606
|
+
headers: Optional[Dict[str, str]] = None,
|
|
607
|
+
) -> Dict[str, Any]:
|
|
608
|
+
"""
|
|
609
|
+
Blocking version of execute used by synchronous callers.
|
|
610
|
+
"""
|
|
611
|
+
|
|
612
|
+
execution_headers = self._prepare_execution_headers(headers)
|
|
613
|
+
submission = self._submit_execution_sync(target, input_data, execution_headers)
|
|
614
|
+
status_payload = self._await_execution_sync(submission, execution_headers)
|
|
615
|
+
result_value, metadata = self._format_execution_result(
|
|
616
|
+
submission, status_payload
|
|
617
|
+
)
|
|
618
|
+
return self._build_execute_response(
|
|
619
|
+
submission, status_payload, result_value, metadata
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
def _prepare_execution_headers(
|
|
623
|
+
self, headers: Optional[Dict[str, str]]
|
|
624
|
+
) -> Dict[str, str]:
|
|
625
|
+
merged_headers = self._get_headers_with_context(headers)
|
|
626
|
+
|
|
627
|
+
final_headers: Dict[str, str] = {"Content-Type": "application/json"}
|
|
628
|
+
final_headers.update(merged_headers)
|
|
629
|
+
|
|
630
|
+
run_id = final_headers.get("X-Run-ID") or final_headers.get("x-run-id")
|
|
631
|
+
if not run_id:
|
|
632
|
+
final_headers["X-Run-ID"] = generate_run_id()
|
|
633
|
+
else:
|
|
634
|
+
final_headers["X-Run-ID"] = run_id
|
|
635
|
+
|
|
636
|
+
# Ensure parent execution header casing is consistent if provided
|
|
637
|
+
parent_execution = final_headers.pop("x-parent-execution-id", None)
|
|
638
|
+
if parent_execution and parent_execution.strip():
|
|
639
|
+
final_headers["X-Parent-Execution-ID"] = parent_execution.strip()
|
|
640
|
+
|
|
641
|
+
session_id = final_headers.pop("x-session-id", None)
|
|
642
|
+
if session_id:
|
|
643
|
+
final_headers["X-Session-ID"] = session_id
|
|
644
|
+
|
|
645
|
+
actor_id = final_headers.pop("x-actor-id", None)
|
|
646
|
+
if actor_id:
|
|
647
|
+
final_headers["X-Actor-ID"] = actor_id
|
|
648
|
+
|
|
649
|
+
sanitized_headers = self._sanitize_header_values(final_headers)
|
|
650
|
+
self._maybe_update_event_stream_headers(sanitized_headers)
|
|
651
|
+
return sanitized_headers
|
|
652
|
+
|
|
653
|
+
def _submit_execution_sync(
|
|
654
|
+
self,
|
|
655
|
+
target: str,
|
|
656
|
+
input_data: Dict[str, Any],
|
|
657
|
+
headers: Dict[str, str],
|
|
658
|
+
) -> _Submission:
|
|
659
|
+
payload = {"input": input_data}
|
|
660
|
+
try:
|
|
661
|
+
response = requests.post(
|
|
662
|
+
f"{self.api_base}/execute/async/{target}",
|
|
663
|
+
json=payload,
|
|
664
|
+
headers=headers,
|
|
665
|
+
timeout=self.async_config.polling_timeout,
|
|
666
|
+
)
|
|
667
|
+
except requests.RequestException as exc:
|
|
668
|
+
raise RuntimeError(f"Failed to submit execution: {exc}") from exc
|
|
669
|
+
response.raise_for_status()
|
|
670
|
+
body = response.json()
|
|
671
|
+
return self._parse_submission(body, headers, target)
|
|
672
|
+
|
|
673
|
+
async def _submit_execution_async(
|
|
674
|
+
self,
|
|
675
|
+
target: str,
|
|
676
|
+
input_data: Dict[str, Any],
|
|
677
|
+
headers: Dict[str, str],
|
|
678
|
+
) -> _Submission:
|
|
679
|
+
payload = {"input": input_data}
|
|
680
|
+
response = await self._async_request(
|
|
681
|
+
"POST",
|
|
682
|
+
f"{self.api_base}/execute/async/{target}",
|
|
683
|
+
json=payload,
|
|
684
|
+
headers=headers,
|
|
685
|
+
timeout=self.async_config.polling_timeout,
|
|
686
|
+
)
|
|
687
|
+
response.raise_for_status()
|
|
688
|
+
body = response.json()
|
|
689
|
+
return self._parse_submission(body, headers, target)
|
|
690
|
+
|
|
691
|
+
def _parse_submission(
|
|
692
|
+
self,
|
|
693
|
+
body: Dict[str, Any],
|
|
694
|
+
headers: Dict[str, str],
|
|
695
|
+
target: str,
|
|
696
|
+
) -> _Submission:
|
|
697
|
+
execution_id = body.get("execution_id")
|
|
698
|
+
run_id = body.get("run_id") or headers.get("X-Run-ID")
|
|
699
|
+
status = (body.get("status") or "pending").lower()
|
|
700
|
+
target_type = body.get("type") or body.get("target_type")
|
|
701
|
+
|
|
702
|
+
if not execution_id or not run_id:
|
|
703
|
+
raise RuntimeError("Execution submission missing identifiers")
|
|
704
|
+
|
|
705
|
+
return _Submission(
|
|
706
|
+
execution_id=execution_id,
|
|
707
|
+
run_id=run_id,
|
|
708
|
+
target=target,
|
|
709
|
+
status=status,
|
|
710
|
+
target_type=target_type,
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
def _await_execution_sync(
|
|
714
|
+
self,
|
|
715
|
+
submission: _Submission,
|
|
716
|
+
headers: Dict[str, str],
|
|
717
|
+
) -> Dict[str, Any]:
|
|
718
|
+
cached = self._result_cache.get_execution_result(submission.execution_id)
|
|
719
|
+
if cached is not None:
|
|
720
|
+
return {
|
|
721
|
+
"result": cached,
|
|
722
|
+
"status": "succeeded",
|
|
723
|
+
"run_id": submission.run_id,
|
|
724
|
+
}
|
|
725
|
+
|
|
726
|
+
interval = max(self.async_config.initial_poll_interval, 0.25)
|
|
727
|
+
start = time.time()
|
|
728
|
+
|
|
729
|
+
while True:
|
|
730
|
+
response = requests.get(
|
|
731
|
+
f"{self.api_base}/executions/{submission.execution_id}",
|
|
732
|
+
headers=headers,
|
|
733
|
+
timeout=self.async_config.polling_timeout,
|
|
734
|
+
)
|
|
735
|
+
response.raise_for_status()
|
|
736
|
+
payload = response.json()
|
|
737
|
+
normalized_status = normalize_status(payload.get("status"))
|
|
738
|
+
payload["status"] = normalized_status
|
|
739
|
+
|
|
740
|
+
if normalized_status in SUCCESS_STATUSES:
|
|
741
|
+
return payload
|
|
742
|
+
|
|
743
|
+
if normalized_status in FAILURE_STATUSES:
|
|
744
|
+
if not payload.get("error_message") and payload.get("error"):
|
|
745
|
+
payload["error_message"] = payload["error"]
|
|
746
|
+
return payload
|
|
747
|
+
|
|
748
|
+
if (time.time() - start) > self.async_config.max_execution_timeout:
|
|
749
|
+
raise TimeoutError(
|
|
750
|
+
f"Execution {submission.execution_id} exceeded timeout"
|
|
751
|
+
)
|
|
752
|
+
|
|
753
|
+
time.sleep(self._next_poll_interval(interval))
|
|
754
|
+
interval = min(interval * 2, self.async_config.max_poll_interval)
|
|
755
|
+
|
|
756
|
+
async def _await_execution_async(
|
|
757
|
+
self,
|
|
758
|
+
submission: _Submission,
|
|
759
|
+
headers: Dict[str, str],
|
|
760
|
+
) -> Dict[str, Any]:
|
|
761
|
+
cached = self._result_cache.get_execution_result(submission.execution_id)
|
|
762
|
+
if cached is not None:
|
|
763
|
+
return {
|
|
764
|
+
"result": cached,
|
|
765
|
+
"status": "succeeded",
|
|
766
|
+
"run_id": submission.run_id,
|
|
767
|
+
}
|
|
768
|
+
|
|
769
|
+
interval = max(self.async_config.initial_poll_interval, 0.25)
|
|
770
|
+
start = time.time()
|
|
771
|
+
|
|
772
|
+
while True:
|
|
773
|
+
response = await self._async_request(
|
|
774
|
+
"GET",
|
|
775
|
+
f"{self.api_base}/executions/{submission.execution_id}",
|
|
776
|
+
headers=headers,
|
|
777
|
+
timeout=self.async_config.polling_timeout,
|
|
778
|
+
)
|
|
779
|
+
response.raise_for_status()
|
|
780
|
+
payload = response.json()
|
|
781
|
+
normalized_status = normalize_status(payload.get("status"))
|
|
782
|
+
payload["status"] = normalized_status
|
|
783
|
+
|
|
784
|
+
if normalized_status in SUCCESS_STATUSES:
|
|
785
|
+
return payload
|
|
786
|
+
|
|
787
|
+
if normalized_status in FAILURE_STATUSES:
|
|
788
|
+
if not payload.get("error_message") and payload.get("error"):
|
|
789
|
+
payload["error_message"] = payload["error"]
|
|
790
|
+
return payload
|
|
791
|
+
|
|
792
|
+
if (time.time() - start) > self.async_config.max_execution_timeout:
|
|
793
|
+
raise TimeoutError(
|
|
794
|
+
f"Execution {submission.execution_id} exceeded timeout"
|
|
795
|
+
)
|
|
796
|
+
|
|
797
|
+
await asyncio.sleep(self._next_poll_interval(interval))
|
|
798
|
+
interval = min(interval * 2, self.async_config.max_poll_interval)
|
|
799
|
+
|
|
800
|
+
def _format_execution_result(
|
|
801
|
+
self,
|
|
802
|
+
submission: _Submission,
|
|
803
|
+
payload: Dict[str, Any],
|
|
804
|
+
) -> Tuple[Any, Dict[str, Any]]:
|
|
805
|
+
result_value = payload.get("result")
|
|
806
|
+
if result_value is None:
|
|
807
|
+
result_value = payload
|
|
808
|
+
|
|
809
|
+
normalized_status = normalize_status(payload.get("status"))
|
|
810
|
+
target = payload.get("target") or submission.target
|
|
811
|
+
node_id = payload.get("node_id")
|
|
812
|
+
if not node_id and target and "." in target:
|
|
813
|
+
node_id = target.split(".", 1)[0]
|
|
814
|
+
|
|
815
|
+
metadata = {
|
|
816
|
+
"execution_id": submission.execution_id,
|
|
817
|
+
"run_id": payload.get("run_id") or submission.run_id,
|
|
818
|
+
"status": normalized_status,
|
|
819
|
+
"target": target,
|
|
820
|
+
"type": payload.get("type") or submission.target_type,
|
|
821
|
+
"duration_ms": payload.get("duration_ms") or payload.get("duration"),
|
|
822
|
+
"started_at": payload.get("started_at"),
|
|
823
|
+
"completed_at": payload.get("completed_at"),
|
|
824
|
+
"node_id": node_id,
|
|
825
|
+
"error_message": payload.get("error_message") or payload.get("error"),
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
if metadata.get("completed_at"):
|
|
829
|
+
metadata["timestamp"] = metadata["completed_at"]
|
|
830
|
+
elif metadata.get("started_at"):
|
|
831
|
+
metadata["timestamp"] = metadata["started_at"]
|
|
832
|
+
else:
|
|
833
|
+
metadata["timestamp"] = datetime.datetime.utcnow().isoformat()
|
|
834
|
+
|
|
835
|
+
# Cache successful results for reuse
|
|
836
|
+
if normalized_status in SUCCESS_STATUSES:
|
|
837
|
+
try:
|
|
838
|
+
self._result_cache.set_execution_result(
|
|
839
|
+
submission.execution_id, result_value
|
|
840
|
+
)
|
|
841
|
+
except Exception:
|
|
842
|
+
logger.debug("Failed to cache execution result", exc_info=True)
|
|
843
|
+
|
|
844
|
+
return result_value, {k: v for k, v in metadata.items() if v is not None}
|
|
845
|
+
|
|
846
|
+
def _build_execute_response(
|
|
847
|
+
self,
|
|
848
|
+
submission: _Submission,
|
|
849
|
+
payload: Dict[str, Any],
|
|
850
|
+
result_value: Any,
|
|
851
|
+
metadata: Dict[str, Any],
|
|
852
|
+
) -> Dict[str, Any]:
|
|
853
|
+
normalized_status = normalize_status(metadata.get("status"))
|
|
854
|
+
error_message = metadata.get("error_message")
|
|
855
|
+
|
|
856
|
+
if normalized_status in SUCCESS_STATUSES:
|
|
857
|
+
response_result = result_value
|
|
858
|
+
elif normalized_status in FAILURE_STATUSES:
|
|
859
|
+
response_result = None
|
|
860
|
+
else:
|
|
861
|
+
response_result = result_value
|
|
862
|
+
|
|
863
|
+
response = {
|
|
864
|
+
"execution_id": metadata.get("execution_id"),
|
|
865
|
+
"run_id": metadata.get("run_id"),
|
|
866
|
+
"node_id": metadata.get("node_id"),
|
|
867
|
+
"type": metadata.get("type"),
|
|
868
|
+
"target": metadata.get("target") or submission.target,
|
|
869
|
+
"status": normalized_status,
|
|
870
|
+
"duration_ms": metadata.get("duration_ms"),
|
|
871
|
+
"timestamp": metadata.get("timestamp")
|
|
872
|
+
or datetime.datetime.utcnow().isoformat(),
|
|
873
|
+
"result": response_result,
|
|
874
|
+
"error_message": error_message,
|
|
875
|
+
"cost": payload.get("cost"),
|
|
876
|
+
}
|
|
877
|
+
|
|
878
|
+
return response
|
|
879
|
+
|
|
880
|
+
def _next_poll_interval(self, current: float) -> float:
|
|
881
|
+
jitter = random.uniform(0.8, 1.2)
|
|
882
|
+
return max(0.05, min(current * jitter, self.async_config.max_poll_interval))
|
|
883
|
+
|
|
884
|
+
async def send_enhanced_heartbeat(
|
|
885
|
+
self, node_id: str, heartbeat_data: HeartbeatData
|
|
886
|
+
) -> bool:
|
|
887
|
+
"""
|
|
888
|
+
Send enhanced heartbeat with status and MCP information to AgentField server.
|
|
889
|
+
|
|
890
|
+
Args:
|
|
891
|
+
node_id: The agent node ID
|
|
892
|
+
heartbeat_data: Enhanced heartbeat data with status and MCP info
|
|
893
|
+
|
|
894
|
+
Returns:
|
|
895
|
+
True if heartbeat was successful, False otherwise
|
|
896
|
+
"""
|
|
897
|
+
try:
|
|
898
|
+
headers = {"Content-Type": "application/json"}
|
|
899
|
+
headers.update(self._get_auth_headers())
|
|
900
|
+
response = await self._async_request(
|
|
901
|
+
"POST",
|
|
902
|
+
f"{self.api_base}/nodes/{node_id}/heartbeat",
|
|
903
|
+
json=heartbeat_data.to_dict(),
|
|
904
|
+
headers=headers,
|
|
905
|
+
timeout=5.0,
|
|
906
|
+
)
|
|
907
|
+
response.raise_for_status()
|
|
908
|
+
return True
|
|
909
|
+
except Exception:
|
|
910
|
+
return False
|
|
911
|
+
|
|
912
|
+
def send_enhanced_heartbeat_sync(
|
|
913
|
+
self, node_id: str, heartbeat_data: HeartbeatData
|
|
914
|
+
) -> bool:
|
|
915
|
+
"""
|
|
916
|
+
Synchronous version of enhanced heartbeat for compatibility.
|
|
917
|
+
|
|
918
|
+
Args:
|
|
919
|
+
node_id: The agent node ID
|
|
920
|
+
heartbeat_data: Enhanced heartbeat data with status and MCP info
|
|
921
|
+
|
|
922
|
+
Returns:
|
|
923
|
+
True if heartbeat was successful, False otherwise
|
|
924
|
+
"""
|
|
925
|
+
try:
|
|
926
|
+
headers = {"Content-Type": "application/json"}
|
|
927
|
+
headers.update(self._get_auth_headers())
|
|
928
|
+
response = requests.post(
|
|
929
|
+
f"{self.api_base}/nodes/{node_id}/heartbeat",
|
|
930
|
+
json=heartbeat_data.to_dict(),
|
|
931
|
+
headers=headers,
|
|
932
|
+
timeout=5.0,
|
|
933
|
+
)
|
|
934
|
+
response.raise_for_status()
|
|
935
|
+
return True
|
|
936
|
+
except Exception:
|
|
937
|
+
return False
|
|
938
|
+
|
|
939
|
+
async def notify_graceful_shutdown(self, node_id: str) -> bool:
|
|
940
|
+
"""
|
|
941
|
+
Notify AgentField server that the agent is shutting down gracefully.
|
|
942
|
+
|
|
943
|
+
Args:
|
|
944
|
+
node_id: The agent node ID
|
|
945
|
+
|
|
946
|
+
Returns:
|
|
947
|
+
True if notification was successful, False otherwise
|
|
948
|
+
"""
|
|
949
|
+
try:
|
|
950
|
+
headers = {"Content-Type": "application/json"}
|
|
951
|
+
headers.update(self._get_auth_headers())
|
|
952
|
+
response = await self._async_request(
|
|
953
|
+
"POST",
|
|
954
|
+
f"{self.api_base}/nodes/{node_id}/shutdown",
|
|
955
|
+
headers=headers,
|
|
956
|
+
timeout=5.0,
|
|
957
|
+
)
|
|
958
|
+
response.raise_for_status()
|
|
959
|
+
return True
|
|
960
|
+
except Exception:
|
|
961
|
+
return False
|
|
962
|
+
|
|
963
|
+
def notify_graceful_shutdown_sync(self, node_id: str) -> bool:
|
|
964
|
+
"""
|
|
965
|
+
Synchronous version of graceful shutdown notification.
|
|
966
|
+
|
|
967
|
+
Args:
|
|
968
|
+
node_id: The agent node ID
|
|
969
|
+
|
|
970
|
+
Returns:
|
|
971
|
+
True if notification was successful, False otherwise
|
|
972
|
+
"""
|
|
973
|
+
try:
|
|
974
|
+
headers = {"Content-Type": "application/json"}
|
|
975
|
+
headers.update(self._get_auth_headers())
|
|
976
|
+
response = requests.post(
|
|
977
|
+
f"{self.api_base}/nodes/{node_id}/shutdown",
|
|
978
|
+
headers=headers,
|
|
979
|
+
timeout=5.0,
|
|
980
|
+
)
|
|
981
|
+
response.raise_for_status()
|
|
982
|
+
return True
|
|
983
|
+
except Exception:
|
|
984
|
+
return False
|
|
985
|
+
|
|
986
|
+
async def register_agent_with_status(
|
|
987
|
+
self,
|
|
988
|
+
node_id: str,
|
|
989
|
+
reasoners: List[dict],
|
|
990
|
+
skills: List[dict],
|
|
991
|
+
base_url: str,
|
|
992
|
+
status: AgentStatus = AgentStatus.STARTING,
|
|
993
|
+
discovery: Optional[Dict[str, Any]] = None,
|
|
994
|
+
suppress_errors: bool = False,
|
|
995
|
+
vc_metadata: Optional[Dict[str, Any]] = None,
|
|
996
|
+
) -> Tuple[bool, Optional[Dict[str, Any]]]:
|
|
997
|
+
"""Register agent with immediate status reporting for fast lifecycle."""
|
|
998
|
+
try:
|
|
999
|
+
registration_data = {
|
|
1000
|
+
"id": node_id,
|
|
1001
|
+
"team_id": "default",
|
|
1002
|
+
"base_url": base_url,
|
|
1003
|
+
"version": "1.0.0",
|
|
1004
|
+
"reasoners": reasoners,
|
|
1005
|
+
"skills": skills,
|
|
1006
|
+
"lifecycle_status": status.value,
|
|
1007
|
+
"communication_config": {
|
|
1008
|
+
"protocols": ["http"],
|
|
1009
|
+
"websocket_endpoint": "",
|
|
1010
|
+
"heartbeat_interval": "2s",
|
|
1011
|
+
},
|
|
1012
|
+
"health_status": "healthy",
|
|
1013
|
+
"last_heartbeat": datetime.datetime.now().isoformat() + "Z",
|
|
1014
|
+
"registered_at": datetime.datetime.now().isoformat() + "Z",
|
|
1015
|
+
"features": {
|
|
1016
|
+
"ab_testing": False,
|
|
1017
|
+
"advanced_metrics": False,
|
|
1018
|
+
"compliance": False,
|
|
1019
|
+
"audit_logging": False,
|
|
1020
|
+
"role_based_access": False,
|
|
1021
|
+
"experimental": {},
|
|
1022
|
+
},
|
|
1023
|
+
"metadata": {
|
|
1024
|
+
"deployment": {
|
|
1025
|
+
"environment": "development",
|
|
1026
|
+
"platform": "python",
|
|
1027
|
+
"region": "local",
|
|
1028
|
+
"tags": {"sdk_version": "1.0.0", "language": "python"},
|
|
1029
|
+
},
|
|
1030
|
+
"performance": {"latency_ms": 0, "throughput_ps": 0},
|
|
1031
|
+
"custom": {},
|
|
1032
|
+
},
|
|
1033
|
+
}
|
|
1034
|
+
|
|
1035
|
+
if discovery:
|
|
1036
|
+
registration_data["callback_discovery"] = discovery
|
|
1037
|
+
|
|
1038
|
+
self._apply_vc_metadata(registration_data, vc_metadata)
|
|
1039
|
+
|
|
1040
|
+
response = await self._async_request(
|
|
1041
|
+
"POST",
|
|
1042
|
+
f"{self.api_base}/nodes/register",
|
|
1043
|
+
json=registration_data,
|
|
1044
|
+
headers=self._get_auth_headers(),
|
|
1045
|
+
timeout=10.0,
|
|
1046
|
+
)
|
|
1047
|
+
|
|
1048
|
+
payload: Optional[Dict[str, Any]] = None
|
|
1049
|
+
try:
|
|
1050
|
+
if getattr(response, "content", None):
|
|
1051
|
+
payload = response.json()
|
|
1052
|
+
except Exception:
|
|
1053
|
+
payload = None
|
|
1054
|
+
|
|
1055
|
+
if response.status_code not in (200, 201):
|
|
1056
|
+
if not suppress_errors:
|
|
1057
|
+
logger.error(
|
|
1058
|
+
"Fast lifecycle registration failed with status %s",
|
|
1059
|
+
response.status_code,
|
|
1060
|
+
)
|
|
1061
|
+
logger.error(
|
|
1062
|
+
f"Response text: {getattr(response, 'text', '<none>')}"
|
|
1063
|
+
)
|
|
1064
|
+
else:
|
|
1065
|
+
logger.debug(
|
|
1066
|
+
"Fast lifecycle registration failed with status %s",
|
|
1067
|
+
response.status_code,
|
|
1068
|
+
)
|
|
1069
|
+
return False, payload
|
|
1070
|
+
|
|
1071
|
+
logger.debug(f"Agent {node_id} registered successfully")
|
|
1072
|
+
return True, payload
|
|
1073
|
+
|
|
1074
|
+
except Exception as e:
|
|
1075
|
+
if not suppress_errors:
|
|
1076
|
+
logger.error(
|
|
1077
|
+
f"Agent registration failed for {node_id}: {type(e).__name__}: {e}"
|
|
1078
|
+
)
|
|
1079
|
+
else:
|
|
1080
|
+
logger.debug(
|
|
1081
|
+
f"Agent registration failed for {node_id}: {type(e).__name__}"
|
|
1082
|
+
)
|
|
1083
|
+
return False, None
|
|
1084
|
+
|
|
1085
|
+
# Async Execution Methods
|
|
1086
|
+
|
|
1087
|
+
async def _get_async_execution_manager(self) -> AsyncExecutionManager:
|
|
1088
|
+
"""
|
|
1089
|
+
Get or create the async execution manager instance.
|
|
1090
|
+
|
|
1091
|
+
Returns:
|
|
1092
|
+
AsyncExecutionManager: Active async execution manager
|
|
1093
|
+
"""
|
|
1094
|
+
if self._async_execution_manager is None:
|
|
1095
|
+
self._async_execution_manager = AsyncExecutionManager(
|
|
1096
|
+
base_url=self.base_url, config=self.async_config
|
|
1097
|
+
)
|
|
1098
|
+
await self._async_execution_manager.start()
|
|
1099
|
+
self._maybe_update_event_stream_headers(None)
|
|
1100
|
+
|
|
1101
|
+
return self._async_execution_manager
|
|
1102
|
+
|
|
1103
|
+
async def execute_async(
|
|
1104
|
+
self,
|
|
1105
|
+
target: str,
|
|
1106
|
+
input_data: Dict[str, Any],
|
|
1107
|
+
headers: Optional[Dict[str, str]] = None,
|
|
1108
|
+
timeout: Optional[float] = None,
|
|
1109
|
+
webhook: Optional[Union[WebhookConfig, Dict[str, Any]]] = None,
|
|
1110
|
+
) -> str:
|
|
1111
|
+
"""
|
|
1112
|
+
Submit an async execution and return execution_id.
|
|
1113
|
+
|
|
1114
|
+
Args:
|
|
1115
|
+
target: Target in format 'node_id.reasoner_name' or 'node_id.skill_name'
|
|
1116
|
+
input_data: Input data for the reasoner/skill
|
|
1117
|
+
headers: Optional headers to include (will be merged with context headers)
|
|
1118
|
+
timeout: Optional execution timeout (uses config default if None)
|
|
1119
|
+
webhook: Optional webhook registration (dict or WebhookConfig)
|
|
1120
|
+
|
|
1121
|
+
Returns:
|
|
1122
|
+
str: Execution ID for tracking the execution
|
|
1123
|
+
|
|
1124
|
+
Raises:
|
|
1125
|
+
RuntimeError: If async execution is disabled or at capacity
|
|
1126
|
+
aiohttp.ClientError: For HTTP-related errors
|
|
1127
|
+
"""
|
|
1128
|
+
if not self.async_config.enable_async_execution:
|
|
1129
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1130
|
+
|
|
1131
|
+
try:
|
|
1132
|
+
final_headers = self._prepare_execution_headers(headers)
|
|
1133
|
+
|
|
1134
|
+
# Get async execution manager and submit
|
|
1135
|
+
manager = await self._get_async_execution_manager()
|
|
1136
|
+
execution_id = await manager.submit_execution(
|
|
1137
|
+
target=target,
|
|
1138
|
+
input_data=input_data,
|
|
1139
|
+
headers=final_headers,
|
|
1140
|
+
timeout=timeout,
|
|
1141
|
+
webhook=webhook,
|
|
1142
|
+
)
|
|
1143
|
+
|
|
1144
|
+
logger.debug(
|
|
1145
|
+
f"Submitted async execution {execution_id[:8]}... for target {target}"
|
|
1146
|
+
)
|
|
1147
|
+
return execution_id
|
|
1148
|
+
|
|
1149
|
+
except Exception as e:
|
|
1150
|
+
logger.error(f"Failed to submit async execution for target {target}: {e}")
|
|
1151
|
+
|
|
1152
|
+
# Fallback to sync execution if enabled
|
|
1153
|
+
if self.async_config.fallback_to_sync:
|
|
1154
|
+
logger.warn(f"Falling back to sync execution for target {target}")
|
|
1155
|
+
try:
|
|
1156
|
+
await self.execute(target, input_data, headers)
|
|
1157
|
+
# Create a synthetic execution ID for consistency
|
|
1158
|
+
synthetic_id = self._generate_id("sync")
|
|
1159
|
+
logger.debug(
|
|
1160
|
+
f"Sync fallback completed with synthetic ID {synthetic_id[:8]}..."
|
|
1161
|
+
)
|
|
1162
|
+
return synthetic_id
|
|
1163
|
+
except Exception as sync_error:
|
|
1164
|
+
logger.error(f"Sync fallback also failed: {sync_error}")
|
|
1165
|
+
raise e
|
|
1166
|
+
else:
|
|
1167
|
+
raise
|
|
1168
|
+
|
|
1169
|
+
async def poll_execution_status(
|
|
1170
|
+
self, execution_id: str
|
|
1171
|
+
) -> Optional[Dict[str, Any]]:
|
|
1172
|
+
"""
|
|
1173
|
+
Poll single execution status with connection reuse.
|
|
1174
|
+
|
|
1175
|
+
Args:
|
|
1176
|
+
execution_id: Execution ID to poll
|
|
1177
|
+
|
|
1178
|
+
Returns:
|
|
1179
|
+
Optional[Dict]: Execution status dictionary or None if not found
|
|
1180
|
+
|
|
1181
|
+
Raises:
|
|
1182
|
+
RuntimeError: If async execution is disabled
|
|
1183
|
+
aiohttp.ClientError: For HTTP-related errors
|
|
1184
|
+
"""
|
|
1185
|
+
if not self.async_config.enable_async_execution:
|
|
1186
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1187
|
+
|
|
1188
|
+
try:
|
|
1189
|
+
manager = await self._get_async_execution_manager()
|
|
1190
|
+
status = await manager.get_execution_status(execution_id)
|
|
1191
|
+
|
|
1192
|
+
if status:
|
|
1193
|
+
logger.debug(
|
|
1194
|
+
f"Polled status for execution {execution_id[:8]}...: {status.get('status')}"
|
|
1195
|
+
)
|
|
1196
|
+
else:
|
|
1197
|
+
logger.debug(f"Execution {execution_id[:8]}... not found")
|
|
1198
|
+
|
|
1199
|
+
return status
|
|
1200
|
+
|
|
1201
|
+
except Exception as e:
|
|
1202
|
+
logger.error(
|
|
1203
|
+
f"Failed to poll execution status for {execution_id[:8]}...: {e}"
|
|
1204
|
+
)
|
|
1205
|
+
raise
|
|
1206
|
+
|
|
1207
|
+
async def batch_check_statuses(
|
|
1208
|
+
self, execution_ids: List[str]
|
|
1209
|
+
) -> Dict[str, Optional[Dict[str, Any]]]:
|
|
1210
|
+
"""
|
|
1211
|
+
Check multiple execution statuses efficiently.
|
|
1212
|
+
|
|
1213
|
+
Args:
|
|
1214
|
+
execution_ids: List of execution IDs to check
|
|
1215
|
+
|
|
1216
|
+
Returns:
|
|
1217
|
+
Dict[str, Optional[Dict]]: Mapping of execution_id to status dict
|
|
1218
|
+
|
|
1219
|
+
Raises:
|
|
1220
|
+
RuntimeError: If async execution is disabled
|
|
1221
|
+
ValueError: If execution_ids list is empty
|
|
1222
|
+
"""
|
|
1223
|
+
if not self.async_config.enable_async_execution:
|
|
1224
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1225
|
+
|
|
1226
|
+
if not execution_ids:
|
|
1227
|
+
raise ValueError("execution_ids list cannot be empty")
|
|
1228
|
+
|
|
1229
|
+
try:
|
|
1230
|
+
manager = await self._get_async_execution_manager()
|
|
1231
|
+
results = {}
|
|
1232
|
+
|
|
1233
|
+
# Use batch processing if enabled and list is large enough
|
|
1234
|
+
if (
|
|
1235
|
+
self.async_config.enable_batch_polling and len(execution_ids) >= 2
|
|
1236
|
+
): # Use batch for 2+ executions
|
|
1237
|
+
# Process in batches
|
|
1238
|
+
batch_size = self.async_config.batch_size
|
|
1239
|
+
for i in range(0, len(execution_ids), batch_size):
|
|
1240
|
+
batch_ids = execution_ids[i : i + batch_size]
|
|
1241
|
+
|
|
1242
|
+
# Get statuses for this batch
|
|
1243
|
+
for exec_id in batch_ids:
|
|
1244
|
+
status = await manager.get_execution_status(exec_id)
|
|
1245
|
+
results[exec_id] = status
|
|
1246
|
+
|
|
1247
|
+
logger.debug(f"Batch checked {len(batch_ids)} execution statuses")
|
|
1248
|
+
else:
|
|
1249
|
+
# Process individually
|
|
1250
|
+
for exec_id in execution_ids:
|
|
1251
|
+
status = await manager.get_execution_status(exec_id)
|
|
1252
|
+
results[exec_id] = status
|
|
1253
|
+
|
|
1254
|
+
logger.debug(
|
|
1255
|
+
f"Individually checked {len(execution_ids)} execution statuses"
|
|
1256
|
+
)
|
|
1257
|
+
|
|
1258
|
+
return results
|
|
1259
|
+
|
|
1260
|
+
except Exception as e:
|
|
1261
|
+
logger.error(f"Failed to batch check execution statuses: {e}")
|
|
1262
|
+
raise
|
|
1263
|
+
|
|
1264
|
+
async def wait_for_execution_result(
|
|
1265
|
+
self, execution_id: str, timeout: Optional[float] = None
|
|
1266
|
+
) -> Any:
|
|
1267
|
+
"""
|
|
1268
|
+
Wait for execution completion with polling.
|
|
1269
|
+
|
|
1270
|
+
Args:
|
|
1271
|
+
execution_id: Execution ID to wait for
|
|
1272
|
+
timeout: Optional timeout override (uses config default if None)
|
|
1273
|
+
|
|
1274
|
+
Returns:
|
|
1275
|
+
Any: Execution result
|
|
1276
|
+
|
|
1277
|
+
Raises:
|
|
1278
|
+
RuntimeError: If async execution is disabled or execution fails
|
|
1279
|
+
TimeoutError: If execution times out
|
|
1280
|
+
KeyError: If execution_id is not found
|
|
1281
|
+
"""
|
|
1282
|
+
if not self.async_config.enable_async_execution:
|
|
1283
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1284
|
+
|
|
1285
|
+
try:
|
|
1286
|
+
manager = await self._get_async_execution_manager()
|
|
1287
|
+
result = await manager.wait_for_result(execution_id, timeout)
|
|
1288
|
+
|
|
1289
|
+
logger.debug(f"Execution {execution_id[:8]}... completed successfully")
|
|
1290
|
+
return result
|
|
1291
|
+
|
|
1292
|
+
except Exception as e:
|
|
1293
|
+
logger.error(
|
|
1294
|
+
f"Failed to wait for execution result {execution_id[:8]}...: {e}"
|
|
1295
|
+
)
|
|
1296
|
+
raise
|
|
1297
|
+
|
|
1298
|
+
async def cancel_async_execution(
|
|
1299
|
+
self, execution_id: str, reason: Optional[str] = None
|
|
1300
|
+
) -> bool:
|
|
1301
|
+
"""
|
|
1302
|
+
Cancel an active async execution.
|
|
1303
|
+
|
|
1304
|
+
Args:
|
|
1305
|
+
execution_id: Execution ID to cancel
|
|
1306
|
+
reason: Optional cancellation reason
|
|
1307
|
+
|
|
1308
|
+
Returns:
|
|
1309
|
+
bool: True if execution was cancelled, False if not found or already terminal
|
|
1310
|
+
|
|
1311
|
+
Raises:
|
|
1312
|
+
RuntimeError: If async execution is disabled
|
|
1313
|
+
"""
|
|
1314
|
+
if not self.async_config.enable_async_execution:
|
|
1315
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1316
|
+
|
|
1317
|
+
try:
|
|
1318
|
+
manager = await self._get_async_execution_manager()
|
|
1319
|
+
cancelled = await manager.cancel_execution(execution_id, reason)
|
|
1320
|
+
|
|
1321
|
+
if cancelled:
|
|
1322
|
+
logger.debug(
|
|
1323
|
+
f"Cancelled execution {execution_id[:8]}... - {reason or 'No reason provided'}"
|
|
1324
|
+
)
|
|
1325
|
+
else:
|
|
1326
|
+
logger.debug(
|
|
1327
|
+
f"Could not cancel execution {execution_id[:8]}... (not found or already terminal)"
|
|
1328
|
+
)
|
|
1329
|
+
|
|
1330
|
+
return cancelled
|
|
1331
|
+
|
|
1332
|
+
except Exception as e:
|
|
1333
|
+
logger.error(f"Failed to cancel execution {execution_id[:8]}...: {e}")
|
|
1334
|
+
raise
|
|
1335
|
+
|
|
1336
|
+
async def list_async_executions(
|
|
1337
|
+
self, status_filter: Optional[str] = None, limit: Optional[int] = None
|
|
1338
|
+
) -> List[Dict[str, Any]]:
|
|
1339
|
+
"""
|
|
1340
|
+
List async executions with optional filtering.
|
|
1341
|
+
|
|
1342
|
+
Args:
|
|
1343
|
+
status_filter: Optional status to filter by ('pending', 'queued', 'running', 'succeeded', 'failed', etc.)
|
|
1344
|
+
limit: Optional limit on number of results
|
|
1345
|
+
|
|
1346
|
+
Returns:
|
|
1347
|
+
List[Dict]: List of execution status dictionaries
|
|
1348
|
+
|
|
1349
|
+
Raises:
|
|
1350
|
+
RuntimeError: If async execution is disabled
|
|
1351
|
+
"""
|
|
1352
|
+
if not self.async_config.enable_async_execution:
|
|
1353
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1354
|
+
|
|
1355
|
+
try:
|
|
1356
|
+
manager = await self._get_async_execution_manager()
|
|
1357
|
+
|
|
1358
|
+
# Convert string status to ExecutionStatus enum if provided
|
|
1359
|
+
status_enum = None
|
|
1360
|
+
if status_filter:
|
|
1361
|
+
try:
|
|
1362
|
+
status_enum = ExecutionStatus(status_filter.lower())
|
|
1363
|
+
except ValueError:
|
|
1364
|
+
logger.warning(f"Invalid status filter: {status_filter}")
|
|
1365
|
+
return []
|
|
1366
|
+
|
|
1367
|
+
executions = await manager.list_executions(status_enum, limit)
|
|
1368
|
+
logger.debug(f"Listed {len(executions)} async executions")
|
|
1369
|
+
|
|
1370
|
+
return executions
|
|
1371
|
+
|
|
1372
|
+
except Exception as e:
|
|
1373
|
+
logger.error(f"Failed to list async executions: {e}")
|
|
1374
|
+
raise
|
|
1375
|
+
|
|
1376
|
+
async def get_async_execution_metrics(self) -> Dict[str, Any]:
|
|
1377
|
+
"""
|
|
1378
|
+
Get comprehensive metrics for async execution manager.
|
|
1379
|
+
|
|
1380
|
+
Returns:
|
|
1381
|
+
Dict[str, Any]: Metrics dictionary with execution statistics
|
|
1382
|
+
|
|
1383
|
+
Raises:
|
|
1384
|
+
RuntimeError: If async execution is disabled
|
|
1385
|
+
"""
|
|
1386
|
+
if not self.async_config.enable_async_execution:
|
|
1387
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1388
|
+
|
|
1389
|
+
try:
|
|
1390
|
+
if self._async_execution_manager is None:
|
|
1391
|
+
return {
|
|
1392
|
+
"manager_started": False,
|
|
1393
|
+
"message": "Async execution manager not yet initialized",
|
|
1394
|
+
}
|
|
1395
|
+
|
|
1396
|
+
metrics = self._async_execution_manager.get_metrics()
|
|
1397
|
+
logger.debug("Retrieved async execution metrics")
|
|
1398
|
+
|
|
1399
|
+
return metrics
|
|
1400
|
+
|
|
1401
|
+
except Exception as e:
|
|
1402
|
+
logger.error(f"Failed to get async execution metrics: {e}")
|
|
1403
|
+
raise
|
|
1404
|
+
|
|
1405
|
+
async def cleanup_async_executions(self) -> int:
|
|
1406
|
+
"""
|
|
1407
|
+
Manually trigger cleanup of completed executions.
|
|
1408
|
+
|
|
1409
|
+
Returns:
|
|
1410
|
+
int: Number of executions cleaned up
|
|
1411
|
+
|
|
1412
|
+
Raises:
|
|
1413
|
+
RuntimeError: If async execution is disabled
|
|
1414
|
+
"""
|
|
1415
|
+
if not self.async_config.enable_async_execution:
|
|
1416
|
+
raise RuntimeError("Async execution is disabled in configuration")
|
|
1417
|
+
|
|
1418
|
+
try:
|
|
1419
|
+
if self._async_execution_manager is None:
|
|
1420
|
+
return 0
|
|
1421
|
+
|
|
1422
|
+
cleanup_count = (
|
|
1423
|
+
await self._async_execution_manager.cleanup_completed_executions()
|
|
1424
|
+
)
|
|
1425
|
+
logger.debug(f"Cleaned up {cleanup_count} completed async executions")
|
|
1426
|
+
|
|
1427
|
+
return cleanup_count
|
|
1428
|
+
|
|
1429
|
+
except Exception as e:
|
|
1430
|
+
logger.error(f"Failed to cleanup async executions: {e}")
|
|
1431
|
+
raise
|
|
1432
|
+
|
|
1433
|
+
async def close_async_execution_manager(self) -> None:
|
|
1434
|
+
"""
|
|
1435
|
+
Close the async execution manager and cleanup resources.
|
|
1436
|
+
|
|
1437
|
+
This should be called when the AgentFieldClient is no longer needed
|
|
1438
|
+
to ensure proper cleanup of background tasks and connections.
|
|
1439
|
+
"""
|
|
1440
|
+
if self._async_execution_manager is not None:
|
|
1441
|
+
try:
|
|
1442
|
+
await self._async_execution_manager.stop()
|
|
1443
|
+
self._async_execution_manager = None
|
|
1444
|
+
logger.debug("Async execution manager closed successfully")
|
|
1445
|
+
except Exception as e:
|
|
1446
|
+
logger.error(f"Error closing async execution manager: {e}")
|
|
1447
|
+
raise
|