agent0-sdk 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent0_sdk/__init__.py +57 -0
- agent0_sdk/core/agent.py +1187 -0
- agent0_sdk/core/contracts.py +547 -0
- agent0_sdk/core/endpoint_crawler.py +330 -0
- agent0_sdk/core/feedback_manager.py +1052 -0
- agent0_sdk/core/indexer.py +1837 -0
- agent0_sdk/core/ipfs_client.py +357 -0
- agent0_sdk/core/models.py +303 -0
- agent0_sdk/core/oasf_validator.py +98 -0
- agent0_sdk/core/sdk.py +1005 -0
- agent0_sdk/core/subgraph_client.py +853 -0
- agent0_sdk/core/transaction_handle.py +71 -0
- agent0_sdk/core/value_encoding.py +91 -0
- agent0_sdk/core/web3_client.py +399 -0
- agent0_sdk/taxonomies/all_domains.json +1565 -0
- agent0_sdk/taxonomies/all_skills.json +1030 -0
- agent0_sdk-1.4.0.dist-info/METADATA +403 -0
- agent0_sdk-1.4.0.dist-info/RECORD +21 -0
- agent0_sdk-1.4.0.dist-info/WHEEL +5 -0
- agent0_sdk-1.4.0.dist-info/licenses/LICENSE +22 -0
- agent0_sdk-1.4.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1837 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent indexer for discovery and search functionality.
|
|
3
|
+
|
|
4
|
+
ARCHITECTURAL PURPOSE:
|
|
5
|
+
======================
|
|
6
|
+
|
|
7
|
+
The indexer serves as the unified entry point for all discovery and search operations
|
|
8
|
+
(agents AND feedback), not merely a thin wrapper around SubgraphClient. While currently
|
|
9
|
+
it delegates most queries to the subgraph, it is designed to be the foundation for:
|
|
10
|
+
|
|
11
|
+
1. SEMANTIC/VECTOR SEARCH: Future integration with embeddings and vector databases
|
|
12
|
+
for semantic search across agent descriptions, feedback text, and capabilities.
|
|
13
|
+
|
|
14
|
+
2. HYBRID SEARCH: Combining subgraph queries (structured data) with vector similarity
|
|
15
|
+
(semantic understanding) for richer discovery experiences.
|
|
16
|
+
|
|
17
|
+
3. LOCAL INDEXING: Optional local caching and indexing for offline-capable applications
|
|
18
|
+
or performance optimization.
|
|
19
|
+
|
|
20
|
+
4. SEARCH OPTIMIZATION: Advanced filtering, ranking, and relevance scoring that goes
|
|
21
|
+
beyond simple subgraph queries.
|
|
22
|
+
|
|
23
|
+
5. MULTI-SOURCE AGGREGATION: Combining data from subgraph, blockchain direct queries,
|
|
24
|
+
and IPFS to provide complete agent/feedback information.
|
|
25
|
+
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
from __future__ import annotations
|
|
29
|
+
|
|
30
|
+
import asyncio
|
|
31
|
+
import json
|
|
32
|
+
import logging
|
|
33
|
+
import time
|
|
34
|
+
import aiohttp
|
|
35
|
+
from typing import Any, Dict, List, Optional, Union
|
|
36
|
+
from datetime import datetime
|
|
37
|
+
|
|
38
|
+
from .models import (
|
|
39
|
+
AgentId, ChainId, Address, URI, Timestamp,
|
|
40
|
+
AgentSummary, Feedback, SearchParams, SearchFeedbackParams
|
|
41
|
+
)
|
|
42
|
+
from .web3_client import Web3Client
|
|
43
|
+
|
|
44
|
+
logger = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class AgentIndexer:
|
|
48
|
+
"""Indexer for agent discovery and search."""
|
|
49
|
+
|
|
50
|
+
def __init__(
|
|
51
|
+
self,
|
|
52
|
+
web3_client: Web3Client,
|
|
53
|
+
store: Optional[Any] = None,
|
|
54
|
+
embeddings: Optional[Any] = None,
|
|
55
|
+
subgraph_client: Optional[Any] = None,
|
|
56
|
+
identity_registry: Optional[Any] = None,
|
|
57
|
+
subgraph_url_overrides: Optional[Dict[int, str]] = None,
|
|
58
|
+
):
|
|
59
|
+
"""Initialize indexer with optional subgraph URL overrides for multiple chains."""
|
|
60
|
+
self.web3_client = web3_client
|
|
61
|
+
self.store = store or self._create_default_store()
|
|
62
|
+
self.embeddings = embeddings or self._create_default_embeddings()
|
|
63
|
+
self.subgraph_client = subgraph_client
|
|
64
|
+
self.identity_registry = identity_registry
|
|
65
|
+
self.subgraph_url_overrides = subgraph_url_overrides or {}
|
|
66
|
+
self._agent_cache = {} # Cache for agent data
|
|
67
|
+
self._cache_timestamp = 0
|
|
68
|
+
self._cache_ttl = 7 * 24 * 60 * 60 # 1 week cache TTL (604800 seconds)
|
|
69
|
+
self._http_cache = {} # Cache for HTTP content
|
|
70
|
+
self._http_cache_ttl = 60 * 60 # 1 hour cache TTL for HTTP content
|
|
71
|
+
|
|
72
|
+
# Cache for subgraph clients (one per chain)
|
|
73
|
+
self._subgraph_client_cache: Dict[int, Any] = {}
|
|
74
|
+
|
|
75
|
+
# If default subgraph_client provided, cache it for current chain
|
|
76
|
+
if self.subgraph_client:
|
|
77
|
+
self._subgraph_client_cache[self.web3_client.chain_id] = self.subgraph_client
|
|
78
|
+
|
|
79
|
+
def _create_default_store(self) -> Dict[str, Any]:
|
|
80
|
+
"""Create default in-memory store."""
|
|
81
|
+
return {
|
|
82
|
+
"agents": {},
|
|
83
|
+
"feedback": {},
|
|
84
|
+
"embeddings": {},
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
def _create_default_embeddings(self):
|
|
88
|
+
"""Create default embeddings model."""
|
|
89
|
+
try:
|
|
90
|
+
from sentence_transformers import SentenceTransformer # type: ignore[import-not-found]
|
|
91
|
+
return SentenceTransformer('all-MiniLM-L6-v2')
|
|
92
|
+
except ImportError:
|
|
93
|
+
# Return None if sentence-transformers is not available
|
|
94
|
+
return None
|
|
95
|
+
|
|
96
|
+
async def _fetch_http_content(self, url: str) -> Optional[Dict[str, Any]]:
|
|
97
|
+
"""Fetch content from HTTP/HTTPS URL with caching."""
|
|
98
|
+
# Check cache first
|
|
99
|
+
current_time = time.time()
|
|
100
|
+
if url in self._http_cache:
|
|
101
|
+
cached_data, timestamp = self._http_cache[url]
|
|
102
|
+
if current_time - timestamp < self._http_cache_ttl:
|
|
103
|
+
return cached_data
|
|
104
|
+
|
|
105
|
+
try:
|
|
106
|
+
async with aiohttp.ClientSession() as session:
|
|
107
|
+
async with session.get(url, timeout=aiohttp.ClientTimeout(total=10)) as response:
|
|
108
|
+
if response.status == 200:
|
|
109
|
+
content = await response.json()
|
|
110
|
+
# Cache the result
|
|
111
|
+
self._http_cache[url] = (content, current_time)
|
|
112
|
+
return content
|
|
113
|
+
else:
|
|
114
|
+
logger.warning(f"Failed to fetch {url}: HTTP {response.status}")
|
|
115
|
+
return None
|
|
116
|
+
except Exception as e:
|
|
117
|
+
logger.warning(f"Error fetching HTTPS content from {url}: {e}")
|
|
118
|
+
return None
|
|
119
|
+
|
|
120
|
+
def _detect_uri_type(self, uri: str) -> str:
|
|
121
|
+
"""Detect URI type (ipfs, https, http, unknown)."""
|
|
122
|
+
if uri.startswith("ipfs://"):
|
|
123
|
+
return "ipfs"
|
|
124
|
+
elif uri.startswith("https://"):
|
|
125
|
+
return "https"
|
|
126
|
+
elif uri.startswith("http://"):
|
|
127
|
+
return "http"
|
|
128
|
+
elif self._is_ipfs_cid(uri):
|
|
129
|
+
return "ipfs"
|
|
130
|
+
else:
|
|
131
|
+
return "unknown"
|
|
132
|
+
|
|
133
|
+
def _is_ipfs_cid(self, uri: str) -> bool:
|
|
134
|
+
"""Check if string is an IPFS CID (without ipfs:// prefix)."""
|
|
135
|
+
# Basic IPFS CID patterns
|
|
136
|
+
# Qm... (CIDv0, 46 characters)
|
|
137
|
+
# bafy... (CIDv1, starts with bafy)
|
|
138
|
+
# bafk... (CIDv1, starts with bafk)
|
|
139
|
+
# bafg... (CIDv1, starts with bafg)
|
|
140
|
+
# bafh... (CIDv1, starts with bafh)
|
|
141
|
+
# bafq... (CIDv1, starts with bafq)
|
|
142
|
+
# bafr... (CIDv1, starts with bafr)
|
|
143
|
+
# bafs... (CIDv1, starts with bafs)
|
|
144
|
+
# baft... (CIDv1, starts with baft)
|
|
145
|
+
# bafu... (CIDv1, starts with bafu)
|
|
146
|
+
# bafv... (CIDv1, starts with bafv)
|
|
147
|
+
# bafw... (CIDv1, starts with bafw)
|
|
148
|
+
# bafx... (CIDv1, starts with bafx)
|
|
149
|
+
# bafy... (CIDv1, starts with bafy)
|
|
150
|
+
# bafz... (CIDv1, starts with bafz)
|
|
151
|
+
|
|
152
|
+
if not uri:
|
|
153
|
+
return False
|
|
154
|
+
|
|
155
|
+
# Check for CIDv0 (Qm...)
|
|
156
|
+
if uri.startswith("Qm") and len(uri) == 46:
|
|
157
|
+
return True
|
|
158
|
+
|
|
159
|
+
# Check for CIDv1 (baf...)
|
|
160
|
+
# CIDv1 has variable length but typically 50+ characters
|
|
161
|
+
# We'll be more lenient for shorter CIDs that start with baf
|
|
162
|
+
if uri.startswith("baf") and len(uri) >= 8:
|
|
163
|
+
return True
|
|
164
|
+
|
|
165
|
+
return False
|
|
166
|
+
|
|
167
|
+
def _is_ipfs_gateway_url(self, url: str) -> bool:
|
|
168
|
+
"""Check if URL is an IPFS gateway URL."""
|
|
169
|
+
ipfs_gateways = [
|
|
170
|
+
"ipfs.io",
|
|
171
|
+
"gateway.pinata.cloud",
|
|
172
|
+
"cloudflare-ipfs.com",
|
|
173
|
+
"dweb.link",
|
|
174
|
+
"ipfs.fleek.co"
|
|
175
|
+
]
|
|
176
|
+
return any(gateway in url for gateway in ipfs_gateways)
|
|
177
|
+
|
|
178
|
+
def _convert_gateway_to_ipfs(self, url: str) -> Optional[str]:
|
|
179
|
+
"""Convert IPFS gateway URL to ipfs:// format."""
|
|
180
|
+
if "/ipfs/" in url:
|
|
181
|
+
# Extract hash from gateway URL
|
|
182
|
+
parts = url.split("/ipfs/")
|
|
183
|
+
if len(parts) == 2:
|
|
184
|
+
hash_part = parts[1].split("/")[0] # Remove any path after hash
|
|
185
|
+
return f"ipfs://{hash_part}"
|
|
186
|
+
return None
|
|
187
|
+
|
|
188
|
+
async def _fetch_registration_file(self, uri: str) -> Optional[Dict[str, Any]]:
|
|
189
|
+
"""Fetch registration file from IPFS or HTTPS."""
|
|
190
|
+
uri_type = self._detect_uri_type(uri)
|
|
191
|
+
|
|
192
|
+
if uri_type == "ipfs":
|
|
193
|
+
# Normalize bare CID to ipfs:// format
|
|
194
|
+
if not uri.startswith("ipfs://"):
|
|
195
|
+
uri = f"ipfs://{uri}"
|
|
196
|
+
|
|
197
|
+
# Use existing IPFS client (if available)
|
|
198
|
+
# For now, return None as IPFS fetching is handled by subgraph
|
|
199
|
+
return None
|
|
200
|
+
elif uri_type in ["https", "http"]:
|
|
201
|
+
# Check if it's an IPFS gateway URL
|
|
202
|
+
if self._is_ipfs_gateway_url(uri):
|
|
203
|
+
ipfs_uri = self._convert_gateway_to_ipfs(uri)
|
|
204
|
+
if ipfs_uri:
|
|
205
|
+
# Try to fetch as IPFS first
|
|
206
|
+
return await self._fetch_registration_file(ipfs_uri)
|
|
207
|
+
|
|
208
|
+
# Fetch directly from HTTPS
|
|
209
|
+
return await self._fetch_http_content(uri)
|
|
210
|
+
else:
|
|
211
|
+
logger.warning(f"Unsupported URI type: {uri}")
|
|
212
|
+
return None
|
|
213
|
+
|
|
214
|
+
async def _fetch_feedback_file(self, uri: str) -> Optional[Dict[str, Any]]:
|
|
215
|
+
"""Fetch feedback file from IPFS or HTTPS."""
|
|
216
|
+
uri_type = self._detect_uri_type(uri)
|
|
217
|
+
|
|
218
|
+
if uri_type == "ipfs":
|
|
219
|
+
# Normalize bare CID to ipfs:// format
|
|
220
|
+
if not uri.startswith("ipfs://"):
|
|
221
|
+
uri = f"ipfs://{uri}"
|
|
222
|
+
|
|
223
|
+
# Use existing IPFS client (if available)
|
|
224
|
+
# For now, return None as IPFS fetching is handled by subgraph
|
|
225
|
+
return None
|
|
226
|
+
elif uri_type in ["https", "http"]:
|
|
227
|
+
# Check if it's an IPFS gateway URL
|
|
228
|
+
if self._is_ipfs_gateway_url(uri):
|
|
229
|
+
ipfs_uri = self._convert_gateway_to_ipfs(uri)
|
|
230
|
+
if ipfs_uri:
|
|
231
|
+
# Try to fetch as IPFS first
|
|
232
|
+
return await self._fetch_feedback_file(ipfs_uri)
|
|
233
|
+
|
|
234
|
+
# Fetch directly from HTTPS
|
|
235
|
+
return await self._fetch_http_content(uri)
|
|
236
|
+
else:
|
|
237
|
+
logger.warning(f"Unsupported URI type: {uri}")
|
|
238
|
+
return None
|
|
239
|
+
|
|
240
|
+
async def refresh_agent(self, agent_id: AgentId, deep: bool = False) -> AgentSummary:
|
|
241
|
+
"""Refresh index for a single agent."""
|
|
242
|
+
# Parse agent ID
|
|
243
|
+
if ":" in agent_id:
|
|
244
|
+
chain_id, token_id = agent_id.split(":", 1)
|
|
245
|
+
else:
|
|
246
|
+
chain_id = self.web3_client.chain_id
|
|
247
|
+
token_id = agent_id
|
|
248
|
+
|
|
249
|
+
# Get basic agent data from contract
|
|
250
|
+
try:
|
|
251
|
+
if self.identity_registry:
|
|
252
|
+
agent_uri = self.web3_client.call_contract(
|
|
253
|
+
self.identity_registry,
|
|
254
|
+
"tokenURI", # ERC-721 standard function name, but represents agentURI
|
|
255
|
+
int(token_id)
|
|
256
|
+
)
|
|
257
|
+
else:
|
|
258
|
+
raise ValueError("Identity registry not available")
|
|
259
|
+
except Exception as e:
|
|
260
|
+
raise ValueError(f"Failed to get agent data: {e}")
|
|
261
|
+
|
|
262
|
+
# Load registration file
|
|
263
|
+
registration_data = await self._load_registration_data(agent_uri)
|
|
264
|
+
|
|
265
|
+
# Create agent summary
|
|
266
|
+
summary = self._create_agent_summary(
|
|
267
|
+
chain_id=int(chain_id),
|
|
268
|
+
agent_id=agent_id,
|
|
269
|
+
registration_data=registration_data
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Store in index
|
|
273
|
+
self.store["agents"][agent_id] = summary
|
|
274
|
+
|
|
275
|
+
# Deep refresh if requested
|
|
276
|
+
if deep:
|
|
277
|
+
await self._deep_refresh_agent(summary)
|
|
278
|
+
|
|
279
|
+
return summary
|
|
280
|
+
|
|
281
|
+
async def refresh_agents(
|
|
282
|
+
self,
|
|
283
|
+
agent_ids: Optional[List[AgentId]] = None,
|
|
284
|
+
concurrency: int = 8,
|
|
285
|
+
) -> List[AgentSummary]:
|
|
286
|
+
"""Refresh index for multiple agents."""
|
|
287
|
+
if agent_ids is None:
|
|
288
|
+
# Get all known agents (this would need to be implemented)
|
|
289
|
+
agent_ids = list(self.store["agents"].keys())
|
|
290
|
+
|
|
291
|
+
# Use semaphore to limit concurrency
|
|
292
|
+
semaphore = asyncio.Semaphore(concurrency)
|
|
293
|
+
|
|
294
|
+
async def refresh_single(agent_id: AgentId) -> AgentSummary:
|
|
295
|
+
async with semaphore:
|
|
296
|
+
return await self.refresh_agent(agent_id)
|
|
297
|
+
|
|
298
|
+
# Execute all refreshes concurrently
|
|
299
|
+
tasks = [refresh_single(agent_id) for agent_id in agent_ids]
|
|
300
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
301
|
+
|
|
302
|
+
# Filter out exceptions
|
|
303
|
+
summaries = []
|
|
304
|
+
for result in results:
|
|
305
|
+
if isinstance(result, Exception):
|
|
306
|
+
logger.warning(f"Error refreshing agent: {result}")
|
|
307
|
+
else:
|
|
308
|
+
summaries.append(result)
|
|
309
|
+
|
|
310
|
+
return summaries
|
|
311
|
+
|
|
312
|
+
async def _load_registration_data(self, uri: str) -> Dict[str, Any]:
|
|
313
|
+
"""Load registration data from URI."""
|
|
314
|
+
registration_file = await self._fetch_registration_file(uri)
|
|
315
|
+
if registration_file is None:
|
|
316
|
+
raise ValueError(f"Failed to load registration data from: {uri}")
|
|
317
|
+
return registration_file
|
|
318
|
+
|
|
319
|
+
def _create_agent_summary(
|
|
320
|
+
self,
|
|
321
|
+
chain_id: int,
|
|
322
|
+
agent_id: AgentId,
|
|
323
|
+
registration_data: Dict[str, Any]
|
|
324
|
+
) -> AgentSummary:
|
|
325
|
+
"""Create agent summary from registration data."""
|
|
326
|
+
# Extract endpoints
|
|
327
|
+
endpoints = registration_data.get("endpoints", [])
|
|
328
|
+
mcp = any(ep.get("name") == "MCP" for ep in endpoints)
|
|
329
|
+
a2a = any(ep.get("name") == "A2A" for ep in endpoints)
|
|
330
|
+
|
|
331
|
+
ens = None
|
|
332
|
+
did = None
|
|
333
|
+
for ep in endpoints:
|
|
334
|
+
if ep.get("name") == "ENS":
|
|
335
|
+
ens = ep.get("endpoint")
|
|
336
|
+
elif ep.get("name") == "DID":
|
|
337
|
+
did = ep.get("endpoint")
|
|
338
|
+
|
|
339
|
+
# Extract capabilities (would need MCP/A2A crawling)
|
|
340
|
+
a2a_skills = []
|
|
341
|
+
mcp_tools = []
|
|
342
|
+
mcp_prompts = []
|
|
343
|
+
mcp_resources = []
|
|
344
|
+
|
|
345
|
+
return AgentSummary(
|
|
346
|
+
chainId=chain_id,
|
|
347
|
+
agentId=agent_id,
|
|
348
|
+
name=registration_data.get("name", ""),
|
|
349
|
+
image=registration_data.get("image"),
|
|
350
|
+
description=registration_data.get("description", ""),
|
|
351
|
+
owners=[], # Would be populated from contract
|
|
352
|
+
operators=[], # Would be populated from contract
|
|
353
|
+
mcp=mcp,
|
|
354
|
+
a2a=a2a,
|
|
355
|
+
ens=ens,
|
|
356
|
+
did=did,
|
|
357
|
+
walletAddress=registration_data.get("walletAddress"),
|
|
358
|
+
supportedTrusts=registration_data.get("supportedTrust", []),
|
|
359
|
+
a2aSkills=a2a_skills,
|
|
360
|
+
mcpTools=mcp_tools,
|
|
361
|
+
mcpPrompts=mcp_prompts,
|
|
362
|
+
mcpResources=mcp_resources,
|
|
363
|
+
active=registration_data.get("active", True),
|
|
364
|
+
extras={}
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
async def _deep_refresh_agent(self, summary: AgentSummary):
|
|
368
|
+
"""Perform deep refresh of agent capabilities."""
|
|
369
|
+
# This would crawl MCP/A2A endpoints to extract capabilities
|
|
370
|
+
# For now, it's a placeholder
|
|
371
|
+
pass
|
|
372
|
+
|
|
373
|
+
def get_agent(self, agent_id: AgentId) -> AgentSummary:
|
|
374
|
+
"""Get agent summary from index."""
|
|
375
|
+
# Parse chainId from agentId
|
|
376
|
+
chain_id, token_id = self._parse_agent_id(agent_id)
|
|
377
|
+
|
|
378
|
+
# Get subgraph client for the chain
|
|
379
|
+
subgraph_client = None
|
|
380
|
+
full_agent_id = agent_id
|
|
381
|
+
|
|
382
|
+
if chain_id is not None:
|
|
383
|
+
subgraph_client = self._get_subgraph_client_for_chain(chain_id)
|
|
384
|
+
else:
|
|
385
|
+
# No chainId in agentId, use SDK's default
|
|
386
|
+
# Construct full agentId format for subgraph query
|
|
387
|
+
default_chain_id = self.web3_client.chain_id
|
|
388
|
+
full_agent_id = f"{default_chain_id}:{token_id}"
|
|
389
|
+
subgraph_client = self.subgraph_client
|
|
390
|
+
|
|
391
|
+
# Use subgraph if available (preferred)
|
|
392
|
+
if subgraph_client:
|
|
393
|
+
return self._get_agent_from_subgraph(full_agent_id, subgraph_client)
|
|
394
|
+
|
|
395
|
+
# Fallback to local cache
|
|
396
|
+
if agent_id not in self.store["agents"]:
|
|
397
|
+
raise ValueError(f"Agent {agent_id} not found in index")
|
|
398
|
+
return self.store["agents"][agent_id]
|
|
399
|
+
|
|
400
|
+
def _get_agent_from_subgraph(self, agent_id: AgentId, subgraph_client: Optional[Any] = None) -> AgentSummary:
|
|
401
|
+
"""Get agent summary from subgraph."""
|
|
402
|
+
# Use provided client or default
|
|
403
|
+
client = subgraph_client or self.subgraph_client
|
|
404
|
+
if not client:
|
|
405
|
+
raise ValueError("No subgraph client available")
|
|
406
|
+
|
|
407
|
+
try:
|
|
408
|
+
agent_data = client.get_agent_by_id(agent_id)
|
|
409
|
+
|
|
410
|
+
if agent_data is None:
|
|
411
|
+
raise ValueError(f"Agent {agent_id} not found in subgraph")
|
|
412
|
+
|
|
413
|
+
reg_file = agent_data.get('registrationFile') or {}
|
|
414
|
+
if not isinstance(reg_file, dict):
|
|
415
|
+
reg_file = {}
|
|
416
|
+
|
|
417
|
+
return AgentSummary(
|
|
418
|
+
chainId=int(agent_data.get('chainId', 0)),
|
|
419
|
+
agentId=agent_data.get('id', agent_id),
|
|
420
|
+
name=reg_file.get('name', f"Agent {agent_id}"),
|
|
421
|
+
image=reg_file.get('image'),
|
|
422
|
+
description=reg_file.get('description', ''),
|
|
423
|
+
owners=[agent_data.get('owner', '')],
|
|
424
|
+
operators=agent_data.get('operators', []),
|
|
425
|
+
mcp=reg_file.get('mcpEndpoint') is not None,
|
|
426
|
+
a2a=reg_file.get('a2aEndpoint') is not None,
|
|
427
|
+
ens=reg_file.get('ens'),
|
|
428
|
+
did=reg_file.get('did'),
|
|
429
|
+
walletAddress=reg_file.get('agentWallet'),
|
|
430
|
+
supportedTrusts=reg_file.get('supportedTrusts', []),
|
|
431
|
+
a2aSkills=reg_file.get('a2aSkills', []),
|
|
432
|
+
mcpTools=reg_file.get('mcpTools', []),
|
|
433
|
+
mcpPrompts=reg_file.get('mcpPrompts', []),
|
|
434
|
+
mcpResources=reg_file.get('mcpResources', []),
|
|
435
|
+
active=reg_file.get('active', True),
|
|
436
|
+
x402support=reg_file.get('x402Support', reg_file.get('x402support', False)),
|
|
437
|
+
extras={}
|
|
438
|
+
)
|
|
439
|
+
|
|
440
|
+
except Exception as e:
|
|
441
|
+
raise ValueError(f"Failed to get agent from subgraph: {e}")
|
|
442
|
+
|
|
443
|
+
def search_agents(
|
|
444
|
+
self,
|
|
445
|
+
params: SearchParams,
|
|
446
|
+
sort: List[str],
|
|
447
|
+
page_size: int,
|
|
448
|
+
cursor: Optional[str] = None,
|
|
449
|
+
) -> Dict[str, Any]:
|
|
450
|
+
"""Search for agents by querying the subgraph or blockchain."""
|
|
451
|
+
# Handle "all" chains shorthand
|
|
452
|
+
if params.chains == "all":
|
|
453
|
+
params.chains = self._get_all_configured_chains()
|
|
454
|
+
logger.info(f"Expanding 'all' to configured chains: {params.chains}")
|
|
455
|
+
|
|
456
|
+
# If chains are explicitly specified (even a single chain), use multi-chain path
|
|
457
|
+
# This ensures the correct subgraph client is used for the requested chain(s)
|
|
458
|
+
if params.chains and len(params.chains) > 0:
|
|
459
|
+
# Validate chains are configured
|
|
460
|
+
available_chains = set(self._get_all_configured_chains())
|
|
461
|
+
requested_chains = set(params.chains)
|
|
462
|
+
invalid_chains = requested_chains - available_chains
|
|
463
|
+
|
|
464
|
+
if invalid_chains:
|
|
465
|
+
logger.warning(
|
|
466
|
+
f"Requested chains not configured: {invalid_chains}. "
|
|
467
|
+
f"Available chains: {available_chains}"
|
|
468
|
+
)
|
|
469
|
+
# Filter to valid chains only
|
|
470
|
+
valid_chains = list(requested_chains & available_chains)
|
|
471
|
+
if not valid_chains:
|
|
472
|
+
return {
|
|
473
|
+
"items": [],
|
|
474
|
+
"nextCursor": None,
|
|
475
|
+
"meta": {
|
|
476
|
+
"chains": list(requested_chains),
|
|
477
|
+
"successfulChains": [],
|
|
478
|
+
"failedChains": list(requested_chains),
|
|
479
|
+
"error": f"No valid chains configured. Available: {list(available_chains)}"
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
params.chains = valid_chains
|
|
483
|
+
|
|
484
|
+
return asyncio.run(
|
|
485
|
+
self._search_agents_across_chains(params, sort, page_size, cursor)
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
# Use subgraph if available (preferred)
|
|
489
|
+
if self.subgraph_client:
|
|
490
|
+
return self._search_agents_via_subgraph(params, sort, page_size, cursor)
|
|
491
|
+
|
|
492
|
+
# Fallback to blockchain queries
|
|
493
|
+
return self._search_agents_via_blockchain(params, sort, page_size, cursor)
|
|
494
|
+
|
|
495
|
+
async def _search_agents_across_chains(
|
|
496
|
+
self,
|
|
497
|
+
params: SearchParams,
|
|
498
|
+
sort: List[str],
|
|
499
|
+
page_size: int,
|
|
500
|
+
cursor: Optional[str] = None,
|
|
501
|
+
timeout: float = 30.0,
|
|
502
|
+
) -> Dict[str, Any]:
|
|
503
|
+
"""
|
|
504
|
+
Search agents across multiple chains in parallel.
|
|
505
|
+
|
|
506
|
+
This method is called when params.chains contains 2+ chain IDs.
|
|
507
|
+
It executes one subgraph query per chain, all in parallel using asyncio.
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
params: Search parameters
|
|
511
|
+
sort: Sort specification
|
|
512
|
+
page_size: Number of results per page
|
|
513
|
+
cursor: Pagination cursor
|
|
514
|
+
timeout: Maximum time in seconds for all chain queries (default: 30.0)
|
|
515
|
+
|
|
516
|
+
Returns:
|
|
517
|
+
{
|
|
518
|
+
"items": [agent_dict, ...],
|
|
519
|
+
"nextCursor": str or None,
|
|
520
|
+
"meta": {
|
|
521
|
+
"chains": [chainId, ...],
|
|
522
|
+
"successfulChains": [chainId, ...],
|
|
523
|
+
"failedChains": [chainId, ...],
|
|
524
|
+
"totalResults": int,
|
|
525
|
+
"timing": {"totalMs": int}
|
|
526
|
+
}
|
|
527
|
+
}
|
|
528
|
+
"""
|
|
529
|
+
import time
|
|
530
|
+
start_time = time.time()
|
|
531
|
+
# Step 1: Determine which chains to query
|
|
532
|
+
chains_to_query = params.chains if params.chains else self._get_all_configured_chains()
|
|
533
|
+
|
|
534
|
+
if not chains_to_query or len(chains_to_query) == 0:
|
|
535
|
+
logger.warning("No chains specified or configured for multi-chain query")
|
|
536
|
+
return {"items": [], "nextCursor": None, "meta": {"chains": [], "successfulChains": [], "failedChains": []}}
|
|
537
|
+
|
|
538
|
+
# Step 2: Parse pagination cursor (if any)
|
|
539
|
+
chain_cursors = self._parse_multi_chain_cursor(cursor)
|
|
540
|
+
global_offset = chain_cursors.get("_global_offset", 0)
|
|
541
|
+
|
|
542
|
+
# Step 3: Define async function for querying a single chain
|
|
543
|
+
async def query_single_chain(chain_id: int) -> Dict[str, Any]:
|
|
544
|
+
"""Query one chain and return its results with metadata."""
|
|
545
|
+
try:
|
|
546
|
+
# Get subgraph client for this chain
|
|
547
|
+
subgraph_client = self._get_subgraph_client_for_chain(chain_id)
|
|
548
|
+
|
|
549
|
+
if subgraph_client is None:
|
|
550
|
+
logger.warning(f"No subgraph client available for chain {chain_id}")
|
|
551
|
+
return {
|
|
552
|
+
"chainId": chain_id,
|
|
553
|
+
"status": "unavailable",
|
|
554
|
+
"agents": [],
|
|
555
|
+
"error": f"No subgraph configured for chain {chain_id}"
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
# Build WHERE clause for this chain's query
|
|
559
|
+
# (reuse existing logic from _search_agents_via_subgraph)
|
|
560
|
+
where_clause = {}
|
|
561
|
+
reg_file_where = {}
|
|
562
|
+
|
|
563
|
+
if params.name is not None:
|
|
564
|
+
reg_file_where["name_contains"] = params.name
|
|
565
|
+
if params.active is not None:
|
|
566
|
+
reg_file_where["active"] = params.active
|
|
567
|
+
if params.x402support is not None:
|
|
568
|
+
reg_file_where["x402support"] = params.x402support
|
|
569
|
+
if params.mcp is not None:
|
|
570
|
+
if params.mcp:
|
|
571
|
+
reg_file_where["mcpEndpoint_not"] = None
|
|
572
|
+
else:
|
|
573
|
+
reg_file_where["mcpEndpoint"] = None
|
|
574
|
+
if params.a2a is not None:
|
|
575
|
+
if params.a2a:
|
|
576
|
+
reg_file_where["a2aEndpoint_not"] = None
|
|
577
|
+
else:
|
|
578
|
+
reg_file_where["a2aEndpoint"] = None
|
|
579
|
+
if params.ens is not None:
|
|
580
|
+
reg_file_where["ens"] = params.ens
|
|
581
|
+
if params.did is not None:
|
|
582
|
+
reg_file_where["did"] = params.did
|
|
583
|
+
if params.walletAddress is not None:
|
|
584
|
+
reg_file_where["agentWallet"] = params.walletAddress
|
|
585
|
+
|
|
586
|
+
if reg_file_where:
|
|
587
|
+
where_clause["registrationFile_"] = reg_file_where
|
|
588
|
+
|
|
589
|
+
# Owner filtering
|
|
590
|
+
if params.owners is not None and len(params.owners) > 0:
|
|
591
|
+
normalized_owners = [owner.lower() for owner in params.owners]
|
|
592
|
+
if len(normalized_owners) == 1:
|
|
593
|
+
where_clause["owner"] = normalized_owners[0]
|
|
594
|
+
else:
|
|
595
|
+
where_clause["owner_in"] = normalized_owners
|
|
596
|
+
|
|
597
|
+
# Operator filtering
|
|
598
|
+
if params.operators is not None and len(params.operators) > 0:
|
|
599
|
+
normalized_operators = [op.lower() for op in params.operators]
|
|
600
|
+
where_clause["operators_contains"] = normalized_operators
|
|
601
|
+
|
|
602
|
+
# Get pagination offset for this chain (not used in multi-chain, fetch all)
|
|
603
|
+
skip = 0
|
|
604
|
+
|
|
605
|
+
# Execute subgraph query
|
|
606
|
+
agents = subgraph_client.get_agents(
|
|
607
|
+
where=where_clause if where_clause else None,
|
|
608
|
+
first=page_size * 3, # Fetch extra to allow for filtering/sorting
|
|
609
|
+
skip=skip,
|
|
610
|
+
order_by=self._extract_order_by(sort),
|
|
611
|
+
order_direction=self._extract_order_direction(sort)
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
logger.info(f"Chain {chain_id}: fetched {len(agents)} agents")
|
|
615
|
+
|
|
616
|
+
return {
|
|
617
|
+
"chainId": chain_id,
|
|
618
|
+
"status": "success",
|
|
619
|
+
"agents": agents,
|
|
620
|
+
"count": len(agents),
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
except Exception as e:
|
|
624
|
+
logger.error(f"Error querying chain {chain_id}: {e}", exc_info=True)
|
|
625
|
+
return {
|
|
626
|
+
"chainId": chain_id,
|
|
627
|
+
"status": "error",
|
|
628
|
+
"agents": [],
|
|
629
|
+
"error": str(e)
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
# Step 4: Execute all chain queries in parallel with timeout
|
|
633
|
+
logger.info(f"Querying {len(chains_to_query)} chains in parallel: {chains_to_query}")
|
|
634
|
+
tasks = [query_single_chain(chain_id) for chain_id in chains_to_query]
|
|
635
|
+
|
|
636
|
+
try:
|
|
637
|
+
chain_results = await asyncio.wait_for(
|
|
638
|
+
asyncio.gather(*tasks),
|
|
639
|
+
timeout=timeout
|
|
640
|
+
)
|
|
641
|
+
except asyncio.TimeoutError:
|
|
642
|
+
logger.error(f"Multi-chain query timed out after {timeout}s")
|
|
643
|
+
# Collect results from completed tasks
|
|
644
|
+
chain_results = []
|
|
645
|
+
for task in tasks:
|
|
646
|
+
if task.done():
|
|
647
|
+
try:
|
|
648
|
+
chain_results.append(task.result())
|
|
649
|
+
except Exception as e:
|
|
650
|
+
logger.warning(f"Task failed: {e}")
|
|
651
|
+
else:
|
|
652
|
+
# Task didn't complete - mark as timeout
|
|
653
|
+
chain_results.append({
|
|
654
|
+
"chainId": None,
|
|
655
|
+
"status": "timeout",
|
|
656
|
+
"agents": [],
|
|
657
|
+
"error": f"Query timed out after {timeout}s"
|
|
658
|
+
})
|
|
659
|
+
|
|
660
|
+
# Step 5: Extract successful results and track failures
|
|
661
|
+
all_agents = []
|
|
662
|
+
successful_chains = []
|
|
663
|
+
failed_chains = []
|
|
664
|
+
|
|
665
|
+
for result in chain_results:
|
|
666
|
+
chain_id = result["chainId"]
|
|
667
|
+
|
|
668
|
+
if result["status"] == "success":
|
|
669
|
+
successful_chains.append(chain_id)
|
|
670
|
+
all_agents.extend(result["agents"])
|
|
671
|
+
else:
|
|
672
|
+
failed_chains.append(chain_id)
|
|
673
|
+
logger.warning(
|
|
674
|
+
f"Chain {chain_id} query failed: {result.get('error', 'Unknown error')}"
|
|
675
|
+
)
|
|
676
|
+
|
|
677
|
+
logger.info(f"Multi-chain query: {len(successful_chains)} successful, {len(failed_chains)} failed, {len(all_agents)} total agents")
|
|
678
|
+
|
|
679
|
+
# If ALL chains failed, raise error
|
|
680
|
+
if len(successful_chains) == 0:
|
|
681
|
+
raise ConnectionError(
|
|
682
|
+
f"All chains failed: {', '.join(str(c) for c in failed_chains)}"
|
|
683
|
+
)
|
|
684
|
+
|
|
685
|
+
# Step 6: Apply cross-chain filtering (for fields not supported by subgraph WHERE clause)
|
|
686
|
+
filtered_agents = self._apply_cross_chain_filters(all_agents, params)
|
|
687
|
+
logger.info(f"After cross-chain filters: {len(filtered_agents)} agents")
|
|
688
|
+
|
|
689
|
+
# Step 7: Deduplicate if requested
|
|
690
|
+
deduplicated_agents = self._deduplicate_agents_cross_chain(filtered_agents, params)
|
|
691
|
+
logger.info(f"After deduplication: {len(deduplicated_agents)} agents")
|
|
692
|
+
|
|
693
|
+
# Step 8: Sort across chains
|
|
694
|
+
sorted_agents = self._sort_agents_cross_chain(deduplicated_agents, sort)
|
|
695
|
+
logger.info(f"After sorting: {len(sorted_agents)} agents")
|
|
696
|
+
|
|
697
|
+
# Step 9: Apply pagination
|
|
698
|
+
start_idx = global_offset
|
|
699
|
+
paginated_agents = sorted_agents[start_idx:start_idx + page_size]
|
|
700
|
+
|
|
701
|
+
# Step 10: Convert to result format (keep as dicts, SDK will convert to AgentSummary)
|
|
702
|
+
results = []
|
|
703
|
+
for agent_data in paginated_agents:
|
|
704
|
+
reg_file = agent_data.get('registrationFile') or {}
|
|
705
|
+
if not isinstance(reg_file, dict):
|
|
706
|
+
reg_file = {}
|
|
707
|
+
|
|
708
|
+
result_agent = {
|
|
709
|
+
"agentId": agent_data.get('id'),
|
|
710
|
+
"chainId": agent_data.get('chainId'),
|
|
711
|
+
"name": reg_file.get('name', f"Agent {agent_data.get('agentId')}"),
|
|
712
|
+
"description": reg_file.get('description', ''),
|
|
713
|
+
"image": reg_file.get('image'),
|
|
714
|
+
"owner": agent_data.get('owner'),
|
|
715
|
+
"operators": agent_data.get('operators', []),
|
|
716
|
+
"mcp": reg_file.get('mcpEndpoint') is not None,
|
|
717
|
+
"a2a": reg_file.get('a2aEndpoint') is not None,
|
|
718
|
+
"ens": reg_file.get('ens'),
|
|
719
|
+
"did": reg_file.get('did'),
|
|
720
|
+
"walletAddress": reg_file.get('agentWallet'),
|
|
721
|
+
"supportedTrusts": reg_file.get('supportedTrusts', []),
|
|
722
|
+
"a2aSkills": reg_file.get('a2aSkills', []),
|
|
723
|
+
"mcpTools": reg_file.get('mcpTools', []),
|
|
724
|
+
"mcpPrompts": reg_file.get('mcpPrompts', []),
|
|
725
|
+
"mcpResources": reg_file.get('mcpResources', []),
|
|
726
|
+
"active": reg_file.get('active', True),
|
|
727
|
+
"x402support": reg_file.get('x402Support', reg_file.get('x402support', False)),
|
|
728
|
+
"totalFeedback": agent_data.get('totalFeedback', 0),
|
|
729
|
+
"lastActivity": agent_data.get('lastActivity'),
|
|
730
|
+
"updatedAt": agent_data.get('updatedAt'),
|
|
731
|
+
"extras": {}
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
# Add deployedOn if deduplication was used
|
|
735
|
+
if 'deployedOn' in agent_data:
|
|
736
|
+
result_agent['extras']['deployedOn'] = agent_data['deployedOn']
|
|
737
|
+
|
|
738
|
+
results.append(result_agent)
|
|
739
|
+
|
|
740
|
+
# Step 11: Calculate next cursor
|
|
741
|
+
next_cursor = None
|
|
742
|
+
if len(sorted_agents) > start_idx + page_size:
|
|
743
|
+
# More results available
|
|
744
|
+
next_cursor = self._create_multi_chain_cursor(
|
|
745
|
+
global_offset=start_idx + page_size
|
|
746
|
+
)
|
|
747
|
+
|
|
748
|
+
# Step 12: Build response with metadata
|
|
749
|
+
query_time = time.time() - start_time
|
|
750
|
+
|
|
751
|
+
return {
|
|
752
|
+
"items": results,
|
|
753
|
+
"nextCursor": next_cursor,
|
|
754
|
+
"meta": {
|
|
755
|
+
"chains": chains_to_query,
|
|
756
|
+
"successfulChains": successful_chains,
|
|
757
|
+
"failedChains": failed_chains,
|
|
758
|
+
"totalResults": len(sorted_agents),
|
|
759
|
+
"pageResults": len(results),
|
|
760
|
+
"timing": {
|
|
761
|
+
"totalMs": int(query_time * 1000),
|
|
762
|
+
"averagePerChainMs": int(query_time * 1000 / len(chains_to_query)) if chains_to_query else 0,
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
}
|
|
766
|
+
|
|
767
|
+
def _search_agents_via_subgraph(
|
|
768
|
+
self,
|
|
769
|
+
params: SearchParams,
|
|
770
|
+
sort: List[str],
|
|
771
|
+
page_size: int,
|
|
772
|
+
cursor: Optional[str] = None,
|
|
773
|
+
) -> Dict[str, Any]:
|
|
774
|
+
"""Search for agents using the subgraph."""
|
|
775
|
+
# Build subgraph query filters
|
|
776
|
+
where_clause = {}
|
|
777
|
+
reg_file_where = {}
|
|
778
|
+
|
|
779
|
+
if params.name is not None:
|
|
780
|
+
reg_file_where["name_contains"] = params.name
|
|
781
|
+
if params.active is not None:
|
|
782
|
+
reg_file_where["active"] = params.active
|
|
783
|
+
if params.x402support is not None:
|
|
784
|
+
reg_file_where["x402support"] = params.x402support
|
|
785
|
+
if params.mcp is not None:
|
|
786
|
+
if params.mcp:
|
|
787
|
+
reg_file_where["mcpEndpoint_not"] = None
|
|
788
|
+
else:
|
|
789
|
+
reg_file_where["mcpEndpoint"] = None
|
|
790
|
+
if params.a2a is not None:
|
|
791
|
+
if params.a2a:
|
|
792
|
+
reg_file_where["a2aEndpoint_not"] = None
|
|
793
|
+
else:
|
|
794
|
+
reg_file_where["a2aEndpoint"] = None
|
|
795
|
+
if params.ens is not None:
|
|
796
|
+
reg_file_where["ens"] = params.ens
|
|
797
|
+
if params.did is not None:
|
|
798
|
+
reg_file_where["did"] = params.did
|
|
799
|
+
if params.walletAddress is not None:
|
|
800
|
+
reg_file_where["agentWallet"] = params.walletAddress
|
|
801
|
+
|
|
802
|
+
if reg_file_where:
|
|
803
|
+
where_clause["registrationFile_"] = reg_file_where
|
|
804
|
+
|
|
805
|
+
# Owner filtering
|
|
806
|
+
if params.owners is not None and len(params.owners) > 0:
|
|
807
|
+
# Normalize addresses to lowercase for case-insensitive matching
|
|
808
|
+
normalized_owners = [owner.lower() for owner in params.owners]
|
|
809
|
+
if len(normalized_owners) == 1:
|
|
810
|
+
where_clause["owner"] = normalized_owners[0]
|
|
811
|
+
else:
|
|
812
|
+
where_clause["owner_in"] = normalized_owners
|
|
813
|
+
|
|
814
|
+
# Operator filtering
|
|
815
|
+
if params.operators is not None and len(params.operators) > 0:
|
|
816
|
+
# Normalize addresses to lowercase for case-insensitive matching
|
|
817
|
+
normalized_operators = [op.lower() for op in params.operators]
|
|
818
|
+
# For operators (array field), use contains to check if any operator matches
|
|
819
|
+
where_clause["operators_contains"] = normalized_operators
|
|
820
|
+
|
|
821
|
+
# Calculate pagination
|
|
822
|
+
skip = 0
|
|
823
|
+
if cursor:
|
|
824
|
+
try:
|
|
825
|
+
skip = int(cursor)
|
|
826
|
+
except ValueError:
|
|
827
|
+
skip = 0
|
|
828
|
+
|
|
829
|
+
# Determine sort
|
|
830
|
+
order_by = "createdAt"
|
|
831
|
+
order_direction = "desc"
|
|
832
|
+
if sort and len(sort) > 0:
|
|
833
|
+
sort_field = sort[0].split(":")
|
|
834
|
+
if len(sort_field) >= 1:
|
|
835
|
+
order_by = sort_field[0]
|
|
836
|
+
if len(sort_field) >= 2:
|
|
837
|
+
order_direction = sort_field[1]
|
|
838
|
+
|
|
839
|
+
try:
|
|
840
|
+
agents = self.subgraph_client.get_agents(
|
|
841
|
+
where=where_clause if where_clause else None,
|
|
842
|
+
first=page_size,
|
|
843
|
+
skip=skip,
|
|
844
|
+
order_by=order_by,
|
|
845
|
+
order_direction=order_direction
|
|
846
|
+
)
|
|
847
|
+
|
|
848
|
+
results = []
|
|
849
|
+
for agent in agents:
|
|
850
|
+
reg_file = agent.get('registrationFile') or {}
|
|
851
|
+
# Ensure reg_file is a dict
|
|
852
|
+
if not isinstance(reg_file, dict):
|
|
853
|
+
reg_file = {}
|
|
854
|
+
|
|
855
|
+
agent_data = {
|
|
856
|
+
"agentId": agent.get('id'),
|
|
857
|
+
"chainId": agent.get('chainId'),
|
|
858
|
+
"name": reg_file.get('name', f"Agent {agent.get('agentId')}"),
|
|
859
|
+
"description": reg_file.get('description', ''),
|
|
860
|
+
"image": reg_file.get('image'),
|
|
861
|
+
"owner": agent.get('owner'),
|
|
862
|
+
"operators": agent.get('operators', []),
|
|
863
|
+
"mcp": reg_file.get('mcpEndpoint') is not None,
|
|
864
|
+
"a2a": reg_file.get('a2aEndpoint') is not None,
|
|
865
|
+
"ens": reg_file.get('ens'),
|
|
866
|
+
"did": reg_file.get('did'),
|
|
867
|
+
"walletAddress": reg_file.get('agentWallet'),
|
|
868
|
+
"supportedTrusts": reg_file.get('supportedTrusts', []),
|
|
869
|
+
"a2aSkills": reg_file.get('a2aSkills', []),
|
|
870
|
+
"mcpTools": reg_file.get('mcpTools', []),
|
|
871
|
+
"mcpPrompts": reg_file.get('mcpPrompts', []),
|
|
872
|
+
"mcpResources": reg_file.get('mcpResources', []),
|
|
873
|
+
"active": reg_file.get('active', True),
|
|
874
|
+
"x402support": reg_file.get('x402Support', reg_file.get('x402support', False)),
|
|
875
|
+
"totalFeedback": agent.get('totalFeedback', 0),
|
|
876
|
+
"lastActivity": agent.get('lastActivity'),
|
|
877
|
+
"updatedAt": agent.get('updatedAt'),
|
|
878
|
+
"extras": {}
|
|
879
|
+
}
|
|
880
|
+
|
|
881
|
+
if params.chains is not None:
|
|
882
|
+
if agent_data["chainId"] not in params.chains:
|
|
883
|
+
continue
|
|
884
|
+
if params.supportedTrust is not None:
|
|
885
|
+
if not any(trust in agent_data["supportedTrusts"] for trust in params.supportedTrust):
|
|
886
|
+
continue
|
|
887
|
+
|
|
888
|
+
results.append(agent_data)
|
|
889
|
+
|
|
890
|
+
next_cursor = str(skip + len(results)) if len(results) == page_size else None
|
|
891
|
+
return {"items": results, "nextCursor": next_cursor}
|
|
892
|
+
|
|
893
|
+
except Exception as e:
|
|
894
|
+
logger.warning(f"Subgraph search failed: {e}")
|
|
895
|
+
return {"items": [], "nextCursor": None}
|
|
896
|
+
|
|
897
|
+
def _search_agents_via_blockchain(
|
|
898
|
+
self,
|
|
899
|
+
params: SearchParams,
|
|
900
|
+
sort: List[str],
|
|
901
|
+
page_size: int,
|
|
902
|
+
cursor: Optional[str] = None,
|
|
903
|
+
) -> Dict[str, Any]:
|
|
904
|
+
"""Search for agents by querying the blockchain (fallback)."""
|
|
905
|
+
return {"items": [], "nextCursor": None}
|
|
906
|
+
|
|
907
|
+
def _apply_filters(self, agents: List[Dict[str, Any]], params: SearchParams) -> List[Dict[str, Any]]:
|
|
908
|
+
"""Apply search filters to agents."""
|
|
909
|
+
filtered = agents
|
|
910
|
+
|
|
911
|
+
if params.chains is not None:
|
|
912
|
+
filtered = [a for a in filtered if a.get("chainId") in params.chains]
|
|
913
|
+
|
|
914
|
+
if params.name is not None:
|
|
915
|
+
filtered = [a for a in filtered if params.name.lower() in a.get("name", "").lower()]
|
|
916
|
+
|
|
917
|
+
if params.description is not None:
|
|
918
|
+
# This would use semantic search with embeddings
|
|
919
|
+
filtered = [a for a in filtered if params.description.lower() in a.get("description", "").lower()]
|
|
920
|
+
|
|
921
|
+
if params.owners is not None:
|
|
922
|
+
filtered = [a for a in filtered if any(owner in params.owners for owner in a.get("owners", []))]
|
|
923
|
+
|
|
924
|
+
if params.operators is not None:
|
|
925
|
+
filtered = [a for a in filtered if any(op in params.operators for op in a.get("operators", []))]
|
|
926
|
+
|
|
927
|
+
if params.mcp is not None:
|
|
928
|
+
filtered = [a for a in filtered if a.get("mcp") == params.mcp]
|
|
929
|
+
|
|
930
|
+
if params.a2a is not None:
|
|
931
|
+
filtered = [a for a in filtered if a.get("a2a") == params.a2a]
|
|
932
|
+
|
|
933
|
+
if params.ens is not None:
|
|
934
|
+
filtered = [a for a in filtered if a.get("ens") and params.ens.lower() in a.get("ens", "").lower()]
|
|
935
|
+
|
|
936
|
+
if params.did is not None:
|
|
937
|
+
filtered = [a for a in filtered if a.get("did") == params.did]
|
|
938
|
+
|
|
939
|
+
if params.walletAddress is not None:
|
|
940
|
+
filtered = [a for a in filtered if a.get("walletAddress") == params.walletAddress]
|
|
941
|
+
|
|
942
|
+
if params.supportedTrust is not None:
|
|
943
|
+
filtered = [a for a in filtered if any(trust in params.supportedTrust for trust in a.get("supportedTrusts", []))]
|
|
944
|
+
|
|
945
|
+
if params.a2aSkills is not None:
|
|
946
|
+
filtered = [a for a in filtered if any(skill in params.a2aSkills for skill in a.get("a2aSkills", []))]
|
|
947
|
+
|
|
948
|
+
if params.mcpTools is not None:
|
|
949
|
+
filtered = [a for a in filtered if any(tool in params.mcpTools for tool in a.get("mcpTools", []))]
|
|
950
|
+
|
|
951
|
+
if params.mcpPrompts is not None:
|
|
952
|
+
filtered = [a for a in filtered if any(prompt in params.mcpPrompts for prompt in a.get("mcpPrompts", []))]
|
|
953
|
+
|
|
954
|
+
if params.mcpResources is not None:
|
|
955
|
+
filtered = [a for a in filtered if any(resource in params.mcpResources for resource in a.get("mcpResources", []))]
|
|
956
|
+
|
|
957
|
+
if params.active is not None:
|
|
958
|
+
filtered = [a for a in filtered if a.get("active") == params.active]
|
|
959
|
+
|
|
960
|
+
if params.x402support is not None:
|
|
961
|
+
filtered = [a for a in filtered if a.get("x402support") == params.x402support]
|
|
962
|
+
|
|
963
|
+
return filtered
|
|
964
|
+
|
|
965
|
+
def _apply_sorting(self, agents: List[AgentSummary], sort: List[str]) -> List[AgentSummary]:
|
|
966
|
+
"""Apply sorting to agents."""
|
|
967
|
+
def sort_key(agent):
|
|
968
|
+
key_values = []
|
|
969
|
+
for sort_field in sort:
|
|
970
|
+
field, direction = sort_field.split(":", 1)
|
|
971
|
+
if hasattr(agent, field):
|
|
972
|
+
value = getattr(agent, field)
|
|
973
|
+
if direction == "desc":
|
|
974
|
+
value = -value if isinstance(value, (int, float)) else value
|
|
975
|
+
key_values.append(value)
|
|
976
|
+
return key_values
|
|
977
|
+
|
|
978
|
+
return sorted(agents, key=sort_key)
|
|
979
|
+
|
|
980
|
+
def get_feedback(
|
|
981
|
+
self,
|
|
982
|
+
agentId: AgentId,
|
|
983
|
+
clientAddress: Address,
|
|
984
|
+
feedbackIndex: int,
|
|
985
|
+
) -> Feedback:
|
|
986
|
+
"""Get single feedback by agent ID, client address, and index."""
|
|
987
|
+
# Use subgraph if available (preferred)
|
|
988
|
+
if self.subgraph_client:
|
|
989
|
+
return self._get_feedback_from_subgraph(agentId, clientAddress, feedbackIndex)
|
|
990
|
+
|
|
991
|
+
# Fallback to local store (if populated in future)
|
|
992
|
+
# For now, raise error if subgraph unavailable
|
|
993
|
+
feedback_id = Feedback.create_id(agentId, clientAddress, feedbackIndex)
|
|
994
|
+
if feedback_id not in self.store["feedback"]:
|
|
995
|
+
raise ValueError(f"Feedback {feedback_id} not found (subgraph required)")
|
|
996
|
+
return self.store["feedback"][feedback_id]
|
|
997
|
+
|
|
998
|
+
def _get_feedback_from_subgraph(
|
|
999
|
+
self,
|
|
1000
|
+
agentId: AgentId,
|
|
1001
|
+
clientAddress: Address,
|
|
1002
|
+
feedbackIndex: int,
|
|
1003
|
+
) -> Feedback:
|
|
1004
|
+
"""Get feedback from subgraph."""
|
|
1005
|
+
# Normalize addresses to lowercase for consistent storage
|
|
1006
|
+
normalized_client_address = self.web3_client.normalize_address(clientAddress)
|
|
1007
|
+
|
|
1008
|
+
# Build feedback ID in format: chainId:agentId:clientAddress:feedbackIndex
|
|
1009
|
+
if ":" in agentId:
|
|
1010
|
+
feedback_id = f"{agentId}:{normalized_client_address}:{feedbackIndex}"
|
|
1011
|
+
else:
|
|
1012
|
+
chain_id = str(self.web3_client.chain_id)
|
|
1013
|
+
feedback_id = f"{chain_id}:{agentId}:{normalized_client_address}:{feedbackIndex}"
|
|
1014
|
+
|
|
1015
|
+
try:
|
|
1016
|
+
feedback_data = self.subgraph_client.get_feedback_by_id(feedback_id)
|
|
1017
|
+
|
|
1018
|
+
if feedback_data is None:
|
|
1019
|
+
raise ValueError(f"Feedback {feedback_id} not found in subgraph")
|
|
1020
|
+
|
|
1021
|
+
return self._map_subgraph_feedback_to_model(feedback_data, agentId, clientAddress, feedbackIndex)
|
|
1022
|
+
|
|
1023
|
+
except Exception as e:
|
|
1024
|
+
raise ValueError(f"Failed to get feedback from subgraph: {e}")
|
|
1025
|
+
|
|
1026
|
+
def _map_subgraph_feedback_to_model(
|
|
1027
|
+
self,
|
|
1028
|
+
feedback_data: Dict[str, Any],
|
|
1029
|
+
agentId: AgentId,
|
|
1030
|
+
clientAddress: Address,
|
|
1031
|
+
feedbackIndex: int,
|
|
1032
|
+
) -> Feedback:
|
|
1033
|
+
"""Map subgraph feedback data to Feedback model."""
|
|
1034
|
+
feedback_file = feedback_data.get('feedbackFile') or {}
|
|
1035
|
+
if not isinstance(feedback_file, dict):
|
|
1036
|
+
feedback_file = {}
|
|
1037
|
+
|
|
1038
|
+
# Map responses
|
|
1039
|
+
responses_data = feedback_data.get('responses', [])
|
|
1040
|
+
answers = []
|
|
1041
|
+
for resp in responses_data:
|
|
1042
|
+
answers.append({
|
|
1043
|
+
'responder': resp.get('responder'),
|
|
1044
|
+
'responseURI': resp.get('responseURI') or resp.get('responseUri'), # Handle both old and new field names
|
|
1045
|
+
'responseHash': resp.get('responseHash'),
|
|
1046
|
+
'createdAt': resp.get('createdAt')
|
|
1047
|
+
})
|
|
1048
|
+
|
|
1049
|
+
# Map tags - tags are now strings (not bytes32)
|
|
1050
|
+
tags = []
|
|
1051
|
+
tag1 = feedback_data.get('tag1') or feedback_file.get('tag1')
|
|
1052
|
+
tag2 = feedback_data.get('tag2') or feedback_file.get('tag2')
|
|
1053
|
+
|
|
1054
|
+
# Tags are now plain strings, but handle backward compatibility with hex bytes32
|
|
1055
|
+
if tag1:
|
|
1056
|
+
if isinstance(tag1, str) and not tag1.startswith("0x"):
|
|
1057
|
+
tags.append(tag1)
|
|
1058
|
+
elif isinstance(tag1, str) and tag1.startswith("0x"):
|
|
1059
|
+
# Try to convert from hex bytes32 (old format)
|
|
1060
|
+
try:
|
|
1061
|
+
hex_bytes = bytes.fromhex(tag1[2:])
|
|
1062
|
+
tag1_str = hex_bytes.rstrip(b'\x00').decode('utf-8', errors='ignore')
|
|
1063
|
+
if tag1_str:
|
|
1064
|
+
tags.append(tag1_str)
|
|
1065
|
+
except Exception:
|
|
1066
|
+
pass # Ignore invalid hex strings
|
|
1067
|
+
|
|
1068
|
+
if tag2:
|
|
1069
|
+
if isinstance(tag2, str) and not tag2.startswith("0x"):
|
|
1070
|
+
tags.append(tag2)
|
|
1071
|
+
elif isinstance(tag2, str) and tag2.startswith("0x"):
|
|
1072
|
+
# Try to convert from hex bytes32 (old format)
|
|
1073
|
+
try:
|
|
1074
|
+
hex_bytes = bytes.fromhex(tag2[2:])
|
|
1075
|
+
tag2_str = hex_bytes.rstrip(b'\x00').decode('utf-8', errors='ignore')
|
|
1076
|
+
if tag2_str:
|
|
1077
|
+
tags.append(tag2_str)
|
|
1078
|
+
except Exception:
|
|
1079
|
+
pass # Ignore invalid hex strings
|
|
1080
|
+
|
|
1081
|
+
return Feedback(
|
|
1082
|
+
id=Feedback.create_id(agentId, clientAddress, feedbackIndex),
|
|
1083
|
+
agentId=agentId,
|
|
1084
|
+
reviewer=self.web3_client.normalize_address(clientAddress),
|
|
1085
|
+
value=float(feedback_data.get("value")) if feedback_data.get("value") is not None else None,
|
|
1086
|
+
tags=tags,
|
|
1087
|
+
text=feedback_file.get('text'),
|
|
1088
|
+
capability=feedback_file.get('capability'),
|
|
1089
|
+
context=feedback_file.get('context'),
|
|
1090
|
+
proofOfPayment={
|
|
1091
|
+
'fromAddress': feedback_file.get('proofOfPaymentFromAddress'),
|
|
1092
|
+
'toAddress': feedback_file.get('proofOfPaymentToAddress'),
|
|
1093
|
+
'chainId': feedback_file.get('proofOfPaymentChainId'),
|
|
1094
|
+
'txHash': feedback_file.get('proofOfPaymentTxHash'),
|
|
1095
|
+
} if feedback_file.get('proofOfPaymentFromAddress') else None,
|
|
1096
|
+
fileURI=feedback_data.get('feedbackURI') or feedback_data.get('feedbackUri'), # Handle both old and new field names
|
|
1097
|
+
# Prefer on-chain endpoint; fall back to off-chain file endpoint if missing
|
|
1098
|
+
endpoint=feedback_data.get('endpoint') or feedback_file.get('endpoint'),
|
|
1099
|
+
createdAt=feedback_data.get('createdAt', int(time.time())),
|
|
1100
|
+
answers=answers,
|
|
1101
|
+
isRevoked=feedback_data.get('isRevoked', False),
|
|
1102
|
+
name=feedback_file.get('name'),
|
|
1103
|
+
skill=feedback_file.get('skill'),
|
|
1104
|
+
task=feedback_file.get('task'),
|
|
1105
|
+
)
|
|
1106
|
+
|
|
1107
|
+
def search_feedback(
|
|
1108
|
+
self,
|
|
1109
|
+
agentId: Optional[AgentId] = None,
|
|
1110
|
+
clientAddresses: Optional[List[Address]] = None,
|
|
1111
|
+
tags: Optional[List[str]] = None,
|
|
1112
|
+
capabilities: Optional[List[str]] = None,
|
|
1113
|
+
skills: Optional[List[str]] = None,
|
|
1114
|
+
tasks: Optional[List[str]] = None,
|
|
1115
|
+
names: Optional[List[str]] = None,
|
|
1116
|
+
minValue: Optional[float] = None,
|
|
1117
|
+
maxValue: Optional[float] = None,
|
|
1118
|
+
include_revoked: bool = False,
|
|
1119
|
+
first: int = 100,
|
|
1120
|
+
skip: int = 0,
|
|
1121
|
+
agents: Optional[List[AgentId]] = None,
|
|
1122
|
+
) -> List[Feedback]:
|
|
1123
|
+
"""Search feedback via subgraph.
|
|
1124
|
+
|
|
1125
|
+
Backwards compatible:
|
|
1126
|
+
- Previously required `agentId`; it is now optional.
|
|
1127
|
+
|
|
1128
|
+
New:
|
|
1129
|
+
- `agents` supports searching across multiple agents.
|
|
1130
|
+
- If neither `agentId` nor `agents` is provided, subgraph search can still run using
|
|
1131
|
+
other filters (e.g., reviewers / tags).
|
|
1132
|
+
"""
|
|
1133
|
+
|
|
1134
|
+
merged_agents: Optional[List[AgentId]] = None
|
|
1135
|
+
if agents:
|
|
1136
|
+
merged_agents = list(agents)
|
|
1137
|
+
if agentId:
|
|
1138
|
+
merged_agents = (merged_agents or []) + [agentId]
|
|
1139
|
+
|
|
1140
|
+
# Determine chain/subgraph client based on first specified agent (if any)
|
|
1141
|
+
chain_id = None
|
|
1142
|
+
if merged_agents and len(merged_agents) > 0:
|
|
1143
|
+
first_agent = merged_agents[0]
|
|
1144
|
+
chain_id, token_id = self._parse_agent_id(first_agent)
|
|
1145
|
+
|
|
1146
|
+
# Get subgraph client for the chain
|
|
1147
|
+
subgraph_client = None
|
|
1148
|
+
|
|
1149
|
+
if chain_id is not None:
|
|
1150
|
+
subgraph_client = self._get_subgraph_client_for_chain(chain_id)
|
|
1151
|
+
else:
|
|
1152
|
+
# If no explicit chainId, use SDK's default subgraph client (if configured).
|
|
1153
|
+
subgraph_client = self.subgraph_client
|
|
1154
|
+
|
|
1155
|
+
# If we have agent ids but they weren't chain-prefixed, prefix them with default chain id for the subgraph.
|
|
1156
|
+
if merged_agents and chain_id is None:
|
|
1157
|
+
default_chain_id = self.web3_client.chain_id
|
|
1158
|
+
normalized: List[AgentId] = []
|
|
1159
|
+
for aid in merged_agents:
|
|
1160
|
+
if isinstance(aid, str) and ":" in aid:
|
|
1161
|
+
normalized.append(aid)
|
|
1162
|
+
else:
|
|
1163
|
+
normalized.append(f"{default_chain_id}:{int(aid)}")
|
|
1164
|
+
merged_agents = normalized
|
|
1165
|
+
elif merged_agents and chain_id is not None:
|
|
1166
|
+
# Ensure all agent ids are chain-prefixed for the chosen chain
|
|
1167
|
+
normalized = []
|
|
1168
|
+
for aid in merged_agents:
|
|
1169
|
+
if isinstance(aid, str) and ":" in aid:
|
|
1170
|
+
normalized.append(aid)
|
|
1171
|
+
else:
|
|
1172
|
+
normalized.append(f"{chain_id}:{int(aid)}")
|
|
1173
|
+
merged_agents = normalized
|
|
1174
|
+
|
|
1175
|
+
# Use subgraph if available (preferred)
|
|
1176
|
+
if subgraph_client:
|
|
1177
|
+
return self._search_feedback_subgraph(
|
|
1178
|
+
agentId=None,
|
|
1179
|
+
agents=merged_agents,
|
|
1180
|
+
clientAddresses=clientAddresses,
|
|
1181
|
+
tags=tags,
|
|
1182
|
+
capabilities=capabilities,
|
|
1183
|
+
skills=skills,
|
|
1184
|
+
tasks=tasks,
|
|
1185
|
+
names=names,
|
|
1186
|
+
minValue=minValue,
|
|
1187
|
+
maxValue=maxValue,
|
|
1188
|
+
include_revoked=include_revoked,
|
|
1189
|
+
first=first,
|
|
1190
|
+
skip=skip,
|
|
1191
|
+
subgraph_client=subgraph_client,
|
|
1192
|
+
)
|
|
1193
|
+
|
|
1194
|
+
# Fallback not implemented (would require blockchain queries)
|
|
1195
|
+
# For now, return empty if subgraph unavailable
|
|
1196
|
+
return []
|
|
1197
|
+
|
|
1198
|
+
def _search_feedback_subgraph(
|
|
1199
|
+
self,
|
|
1200
|
+
agentId: Optional[AgentId],
|
|
1201
|
+
agents: Optional[List[AgentId]],
|
|
1202
|
+
clientAddresses: Optional[List[Address]],
|
|
1203
|
+
tags: Optional[List[str]],
|
|
1204
|
+
capabilities: Optional[List[str]],
|
|
1205
|
+
skills: Optional[List[str]],
|
|
1206
|
+
tasks: Optional[List[str]],
|
|
1207
|
+
names: Optional[List[str]],
|
|
1208
|
+
minValue: Optional[float],
|
|
1209
|
+
maxValue: Optional[float],
|
|
1210
|
+
include_revoked: bool,
|
|
1211
|
+
first: int,
|
|
1212
|
+
skip: int,
|
|
1213
|
+
subgraph_client: Optional[Any] = None,
|
|
1214
|
+
) -> List[Feedback]:
|
|
1215
|
+
"""Search feedback using subgraph."""
|
|
1216
|
+
# Use provided client or default
|
|
1217
|
+
client = subgraph_client or self.subgraph_client
|
|
1218
|
+
if not client:
|
|
1219
|
+
return []
|
|
1220
|
+
|
|
1221
|
+
merged_agents: Optional[List[AgentId]] = None
|
|
1222
|
+
if agents:
|
|
1223
|
+
merged_agents = list(agents)
|
|
1224
|
+
if agentId:
|
|
1225
|
+
merged_agents = (merged_agents or []) + [agentId]
|
|
1226
|
+
|
|
1227
|
+
# Create SearchFeedbackParams
|
|
1228
|
+
params = SearchFeedbackParams(
|
|
1229
|
+
agents=merged_agents,
|
|
1230
|
+
reviewers=clientAddresses,
|
|
1231
|
+
tags=tags,
|
|
1232
|
+
capabilities=capabilities,
|
|
1233
|
+
skills=skills,
|
|
1234
|
+
tasks=tasks,
|
|
1235
|
+
names=names,
|
|
1236
|
+
minValue=minValue,
|
|
1237
|
+
maxValue=maxValue,
|
|
1238
|
+
includeRevoked=include_revoked
|
|
1239
|
+
)
|
|
1240
|
+
|
|
1241
|
+
# Query subgraph
|
|
1242
|
+
feedbacks_data = client.search_feedback(
|
|
1243
|
+
params=params,
|
|
1244
|
+
first=first,
|
|
1245
|
+
skip=skip,
|
|
1246
|
+
order_by="createdAt",
|
|
1247
|
+
order_direction="desc"
|
|
1248
|
+
)
|
|
1249
|
+
|
|
1250
|
+
# Map to Feedback objects
|
|
1251
|
+
feedbacks = []
|
|
1252
|
+
for fb_data in feedbacks_data:
|
|
1253
|
+
# Parse agentId from feedback ID
|
|
1254
|
+
feedback_id = fb_data['id']
|
|
1255
|
+
parts = feedback_id.split(':')
|
|
1256
|
+
if len(parts) >= 2:
|
|
1257
|
+
agent_id_str = f"{parts[0]}:{parts[1]}"
|
|
1258
|
+
client_addr = parts[2] if len(parts) > 2 else ""
|
|
1259
|
+
feedback_idx = int(parts[3]) if len(parts) > 3 else 1
|
|
1260
|
+
else:
|
|
1261
|
+
agent_id_str = feedback_id
|
|
1262
|
+
client_addr = ""
|
|
1263
|
+
feedback_idx = 1
|
|
1264
|
+
|
|
1265
|
+
feedback = self._map_subgraph_feedback_to_model(
|
|
1266
|
+
fb_data, agent_id_str, client_addr, feedback_idx
|
|
1267
|
+
)
|
|
1268
|
+
feedbacks.append(feedback)
|
|
1269
|
+
|
|
1270
|
+
return feedbacks
|
|
1271
|
+
|
|
1272
|
+
def _hexBytes32ToTags(self, tag1: str, tag2: str) -> List[str]:
|
|
1273
|
+
"""Convert hex bytes32 tags back to strings, or return plain strings as-is.
|
|
1274
|
+
|
|
1275
|
+
The subgraph now stores tags as human-readable strings (not hex),
|
|
1276
|
+
so this method handles both formats for backwards compatibility.
|
|
1277
|
+
"""
|
|
1278
|
+
tags = []
|
|
1279
|
+
|
|
1280
|
+
if tag1 and tag1 != "0x" + "00" * 32:
|
|
1281
|
+
# If it's already a plain string (from subgraph), use it directly
|
|
1282
|
+
if not tag1.startswith("0x"):
|
|
1283
|
+
if tag1:
|
|
1284
|
+
tags.append(tag1)
|
|
1285
|
+
else:
|
|
1286
|
+
# Try to convert from hex bytes32 (on-chain format)
|
|
1287
|
+
try:
|
|
1288
|
+
hex_bytes = bytes.fromhex(tag1[2:])
|
|
1289
|
+
tag1_str = hex_bytes.rstrip(b'\x00').decode('utf-8', errors='ignore')
|
|
1290
|
+
if tag1_str:
|
|
1291
|
+
tags.append(tag1_str)
|
|
1292
|
+
except Exception:
|
|
1293
|
+
pass # Ignore invalid hex strings
|
|
1294
|
+
|
|
1295
|
+
if tag2 and tag2 != "0x" + "00" * 32:
|
|
1296
|
+
# If it's already a plain string (from subgraph), use it directly
|
|
1297
|
+
if not tag2.startswith("0x"):
|
|
1298
|
+
if tag2:
|
|
1299
|
+
tags.append(tag2)
|
|
1300
|
+
else:
|
|
1301
|
+
# Try to convert from hex bytes32 (on-chain format)
|
|
1302
|
+
try:
|
|
1303
|
+
if tag2.startswith("0x"):
|
|
1304
|
+
hex_bytes = bytes.fromhex(tag2[2:])
|
|
1305
|
+
else:
|
|
1306
|
+
hex_bytes = bytes.fromhex(tag2)
|
|
1307
|
+
tag2_str = hex_bytes.rstrip(b'\x00').decode('utf-8', errors='ignore')
|
|
1308
|
+
if tag2_str:
|
|
1309
|
+
tags.append(tag2_str)
|
|
1310
|
+
except Exception:
|
|
1311
|
+
pass # Ignore invalid hex strings
|
|
1312
|
+
|
|
1313
|
+
return tags
|
|
1314
|
+
|
|
1315
|
+
def get_reputation_summary(
|
|
1316
|
+
self,
|
|
1317
|
+
agent_id: AgentId,
|
|
1318
|
+
group_by: List[str],
|
|
1319
|
+
reviewers: Optional[List[Address]] = None,
|
|
1320
|
+
since: Optional[Timestamp] = None,
|
|
1321
|
+
until: Optional[Timestamp] = None,
|
|
1322
|
+
sort: List[str] = None,
|
|
1323
|
+
page_size: int = 100,
|
|
1324
|
+
cursor: Optional[str] = None,
|
|
1325
|
+
) -> Dict[str, Any]:
|
|
1326
|
+
"""Get reputation summary for an agent."""
|
|
1327
|
+
# This would aggregate feedback data
|
|
1328
|
+
# For now, return empty result
|
|
1329
|
+
return {
|
|
1330
|
+
"groups": [],
|
|
1331
|
+
"nextCursor": None
|
|
1332
|
+
}
|
|
1333
|
+
|
|
1334
|
+
def get_reputation_map(
|
|
1335
|
+
self,
|
|
1336
|
+
agents: List[Union[AgentSummary, AgentId]],
|
|
1337
|
+
filters: Dict[str, Any],
|
|
1338
|
+
sort: List[str],
|
|
1339
|
+
reviewers: Optional[List[Address]] = None,
|
|
1340
|
+
) -> List[Dict[str, Any]]:
|
|
1341
|
+
"""Get reputation map for multiple agents."""
|
|
1342
|
+
# This would calculate reputation metrics for each agent
|
|
1343
|
+
# For now, return empty result
|
|
1344
|
+
return []
|
|
1345
|
+
|
|
1346
|
+
def _get_agent_from_blockchain(self, token_id: int, sdk) -> Optional[Dict[str, Any]]:
|
|
1347
|
+
"""Get agent data from blockchain."""
|
|
1348
|
+
try:
|
|
1349
|
+
# Get agent URI from contract (using ERC-721 tokenURI function)
|
|
1350
|
+
agent_uri = self.web3_client.call_contract(
|
|
1351
|
+
sdk.identity_registry,
|
|
1352
|
+
"tokenURI", # ERC-721 standard function name, but represents agentURI
|
|
1353
|
+
token_id
|
|
1354
|
+
)
|
|
1355
|
+
|
|
1356
|
+
# Get owner
|
|
1357
|
+
owner = self.web3_client.call_contract(
|
|
1358
|
+
sdk.identity_registry,
|
|
1359
|
+
"ownerOf",
|
|
1360
|
+
token_id
|
|
1361
|
+
)
|
|
1362
|
+
|
|
1363
|
+
# Get on-chain verified wallet (IdentityRegistry.getAgentWallet)
|
|
1364
|
+
wallet_address = None
|
|
1365
|
+
try:
|
|
1366
|
+
wallet_address = self.web3_client.call_contract(
|
|
1367
|
+
sdk.identity_registry,
|
|
1368
|
+
"getAgentWallet",
|
|
1369
|
+
token_id
|
|
1370
|
+
)
|
|
1371
|
+
if wallet_address == "0x0000000000000000000000000000000000000000":
|
|
1372
|
+
wallet_address = None
|
|
1373
|
+
except Exception:
|
|
1374
|
+
pass
|
|
1375
|
+
|
|
1376
|
+
# Create agent ID
|
|
1377
|
+
agent_id = f"{sdk.chain_id}:{token_id}"
|
|
1378
|
+
|
|
1379
|
+
# Try to load registration data from IPFS
|
|
1380
|
+
registration_data = self._load_registration_from_ipfs(agent_uri, sdk)
|
|
1381
|
+
|
|
1382
|
+
if registration_data:
|
|
1383
|
+
# Use data from IPFS, but prefer on-chain wallet if available
|
|
1384
|
+
return {
|
|
1385
|
+
"agentId": agent_id,
|
|
1386
|
+
"name": registration_data.get("name", f"Agent {token_id}"),
|
|
1387
|
+
"description": registration_data.get("description", f"Agent registered with token ID {token_id}"),
|
|
1388
|
+
"owner": owner,
|
|
1389
|
+
"tokenId": token_id,
|
|
1390
|
+
"agentURI": agent_uri, # Updated field name
|
|
1391
|
+
"x402support": registration_data.get("x402Support", registration_data.get("x402support", False)),
|
|
1392
|
+
"trustModels": registration_data.get("trustModels", ["reputation"]),
|
|
1393
|
+
"active": registration_data.get("active", True),
|
|
1394
|
+
"endpoints": registration_data.get("endpoints", []),
|
|
1395
|
+
"image": registration_data.get("image"),
|
|
1396
|
+
"walletAddress": wallet_address or registration_data.get("walletAddress"), # Prefer on-chain wallet
|
|
1397
|
+
"metadata": registration_data.get("metadata", {})
|
|
1398
|
+
}
|
|
1399
|
+
else:
|
|
1400
|
+
# Fallback to basic data
|
|
1401
|
+
return {
|
|
1402
|
+
"agentId": agent_id,
|
|
1403
|
+
"name": f"Agent {token_id}",
|
|
1404
|
+
"description": f"Agent registered with token ID {token_id}",
|
|
1405
|
+
"owner": owner,
|
|
1406
|
+
"tokenId": token_id,
|
|
1407
|
+
"agentURI": agent_uri, # Updated field name
|
|
1408
|
+
"x402support": False,
|
|
1409
|
+
"trustModels": ["reputation"],
|
|
1410
|
+
"active": True,
|
|
1411
|
+
"endpoints": [],
|
|
1412
|
+
"image": None,
|
|
1413
|
+
"walletAddress": wallet_address,
|
|
1414
|
+
"metadata": {}
|
|
1415
|
+
}
|
|
1416
|
+
except Exception as e:
|
|
1417
|
+
logger.error(f"Error loading agent {token_id}: {e}")
|
|
1418
|
+
return None
|
|
1419
|
+
|
|
1420
|
+
def _load_registration_from_ipfs(self, token_uri: str, sdk) -> Optional[Dict[str, Any]]:
|
|
1421
|
+
"""Load agent registration data from IPFS or HTTP gateway."""
|
|
1422
|
+
try:
|
|
1423
|
+
import json
|
|
1424
|
+
import requests
|
|
1425
|
+
|
|
1426
|
+
# Extract IPFS hash from token URI
|
|
1427
|
+
if token_uri.startswith("ipfs://"):
|
|
1428
|
+
ipfs_hash = token_uri[7:] # Remove "ipfs://" prefix
|
|
1429
|
+
elif token_uri.startswith("https://") and "ipfs" in token_uri:
|
|
1430
|
+
# Extract hash from IPFS gateway URL
|
|
1431
|
+
parts = token_uri.split("/")
|
|
1432
|
+
ipfs_hash = parts[-1] if parts[-1] else parts[-2]
|
|
1433
|
+
elif token_uri.startswith("https://"):
|
|
1434
|
+
# Direct HTTP URL - try to fetch directly
|
|
1435
|
+
try:
|
|
1436
|
+
response = requests.get(token_uri, timeout=10)
|
|
1437
|
+
response.raise_for_status()
|
|
1438
|
+
return response.json()
|
|
1439
|
+
except Exception as e:
|
|
1440
|
+
logger.warning(f"Could not load HTTP data from {token_uri}: {e}")
|
|
1441
|
+
return None
|
|
1442
|
+
else:
|
|
1443
|
+
return None
|
|
1444
|
+
|
|
1445
|
+
# Try local IPFS client first (if available)
|
|
1446
|
+
if hasattr(sdk, 'ipfs_client') and sdk.ipfs_client is not None:
|
|
1447
|
+
try:
|
|
1448
|
+
data = sdk.ipfs_client.get(ipfs_hash)
|
|
1449
|
+
if data:
|
|
1450
|
+
return json.loads(data)
|
|
1451
|
+
except Exception as e:
|
|
1452
|
+
logger.warning(f"Could not load from local IPFS for {ipfs_hash}: {e}")
|
|
1453
|
+
|
|
1454
|
+
# Fallback to IPFS HTTP gateways
|
|
1455
|
+
gateways = [
|
|
1456
|
+
f"https://ipfs.io/ipfs/{ipfs_hash}",
|
|
1457
|
+
f"https://gateway.pinata.cloud/ipfs/{ipfs_hash}",
|
|
1458
|
+
f"https://cloudflare-ipfs.com/ipfs/{ipfs_hash}",
|
|
1459
|
+
f"https://dweb.link/ipfs/{ipfs_hash}"
|
|
1460
|
+
]
|
|
1461
|
+
|
|
1462
|
+
for gateway_url in gateways:
|
|
1463
|
+
try:
|
|
1464
|
+
response = requests.get(gateway_url, timeout=10)
|
|
1465
|
+
response.raise_for_status()
|
|
1466
|
+
return response.json()
|
|
1467
|
+
except Exception as e:
|
|
1468
|
+
logger.debug(f"Could not load from {gateway_url}: {e}")
|
|
1469
|
+
continue
|
|
1470
|
+
|
|
1471
|
+
logger.warning(f"Could not load data for {ipfs_hash} from any source")
|
|
1472
|
+
return None
|
|
1473
|
+
|
|
1474
|
+
except Exception as e:
|
|
1475
|
+
logger.warning(f"Could not parse token URI {token_uri}: {e}")
|
|
1476
|
+
return None
|
|
1477
|
+
|
|
1478
|
+
def _get_subgraph_client_for_chain(self, chain_id: int):
|
|
1479
|
+
"""
|
|
1480
|
+
Get or create SubgraphClient for a specific chain.
|
|
1481
|
+
|
|
1482
|
+
Checks (in order):
|
|
1483
|
+
1. Client cache (already created)
|
|
1484
|
+
2. Subgraph URL overrides (from constructor)
|
|
1485
|
+
3. DEFAULT_SUBGRAPH_URLS (from contracts.py)
|
|
1486
|
+
4. Environment variables (SUBGRAPH_URL_<chainId>)
|
|
1487
|
+
|
|
1488
|
+
Returns None if no subgraph URL is available for this chain.
|
|
1489
|
+
"""
|
|
1490
|
+
# Check cache first
|
|
1491
|
+
if chain_id in self._subgraph_client_cache:
|
|
1492
|
+
return self._subgraph_client_cache[chain_id]
|
|
1493
|
+
|
|
1494
|
+
# Get subgraph URL for this chain
|
|
1495
|
+
subgraph_url = self._get_subgraph_url_for_chain(chain_id)
|
|
1496
|
+
|
|
1497
|
+
if subgraph_url is None:
|
|
1498
|
+
logger.warning(f"No subgraph URL configured for chain {chain_id}")
|
|
1499
|
+
return None
|
|
1500
|
+
|
|
1501
|
+
# Create new SubgraphClient
|
|
1502
|
+
from .subgraph_client import SubgraphClient
|
|
1503
|
+
client = SubgraphClient(subgraph_url)
|
|
1504
|
+
|
|
1505
|
+
# Cache for future use
|
|
1506
|
+
self._subgraph_client_cache[chain_id] = client
|
|
1507
|
+
|
|
1508
|
+
logger.info(f"Created subgraph client for chain {chain_id}: {subgraph_url}")
|
|
1509
|
+
|
|
1510
|
+
return client
|
|
1511
|
+
|
|
1512
|
+
def _get_subgraph_url_for_chain(self, chain_id: int) -> Optional[str]:
|
|
1513
|
+
"""
|
|
1514
|
+
Get subgraph URL for a specific chain.
|
|
1515
|
+
|
|
1516
|
+
Priority order:
|
|
1517
|
+
1. Constructor-provided overrides (self.subgraph_url_overrides)
|
|
1518
|
+
2. DEFAULT_SUBGRAPH_URLS from contracts.py
|
|
1519
|
+
3. Environment variable SUBGRAPH_URL_<chainId>
|
|
1520
|
+
4. None (not configured)
|
|
1521
|
+
"""
|
|
1522
|
+
import os
|
|
1523
|
+
|
|
1524
|
+
# 1. Check constructor overrides
|
|
1525
|
+
if chain_id in self.subgraph_url_overrides:
|
|
1526
|
+
return self.subgraph_url_overrides[chain_id]
|
|
1527
|
+
|
|
1528
|
+
# 2. Check DEFAULT_SUBGRAPH_URLS
|
|
1529
|
+
from .contracts import DEFAULT_SUBGRAPH_URLS
|
|
1530
|
+
if chain_id in DEFAULT_SUBGRAPH_URLS:
|
|
1531
|
+
return DEFAULT_SUBGRAPH_URLS[chain_id]
|
|
1532
|
+
|
|
1533
|
+
# 3. Check environment variable
|
|
1534
|
+
env_key = f"SUBGRAPH_URL_{chain_id}"
|
|
1535
|
+
env_url = os.environ.get(env_key)
|
|
1536
|
+
if env_url:
|
|
1537
|
+
logger.info(f"Using subgraph URL from environment: {env_key}={env_url}")
|
|
1538
|
+
return env_url
|
|
1539
|
+
|
|
1540
|
+
# 4. Not found
|
|
1541
|
+
return None
|
|
1542
|
+
|
|
1543
|
+
def _parse_agent_id(self, agent_id: AgentId) -> tuple[Optional[int], str]:
|
|
1544
|
+
"""
|
|
1545
|
+
Parse agentId to extract chainId and tokenId.
|
|
1546
|
+
|
|
1547
|
+
Returns:
|
|
1548
|
+
(chain_id, token_id_str) where:
|
|
1549
|
+
- chain_id: int if "chainId:tokenId" format, None if just "tokenId"
|
|
1550
|
+
- token_id_str: the tokenId part (always present)
|
|
1551
|
+
"""
|
|
1552
|
+
if ":" in agent_id:
|
|
1553
|
+
parts = agent_id.split(":", 1)
|
|
1554
|
+
try:
|
|
1555
|
+
chain_id = int(parts[0])
|
|
1556
|
+
token_id = parts[1]
|
|
1557
|
+
return (chain_id, token_id)
|
|
1558
|
+
except ValueError:
|
|
1559
|
+
# Invalid chainId, treat as tokenId only
|
|
1560
|
+
return (None, agent_id)
|
|
1561
|
+
return (None, agent_id)
|
|
1562
|
+
|
|
1563
|
+
def _get_all_configured_chains(self) -> List[int]:
|
|
1564
|
+
"""
|
|
1565
|
+
Get list of all chains that have subgraphs configured.
|
|
1566
|
+
|
|
1567
|
+
This is used when params.chains is None (query all available chains).
|
|
1568
|
+
"""
|
|
1569
|
+
import os
|
|
1570
|
+
from .contracts import DEFAULT_SUBGRAPH_URLS
|
|
1571
|
+
|
|
1572
|
+
chains = set()
|
|
1573
|
+
|
|
1574
|
+
# Add chains from DEFAULT_SUBGRAPH_URLS
|
|
1575
|
+
chains.update(DEFAULT_SUBGRAPH_URLS.keys())
|
|
1576
|
+
|
|
1577
|
+
# Add chains from constructor overrides
|
|
1578
|
+
chains.update(self.subgraph_url_overrides.keys())
|
|
1579
|
+
|
|
1580
|
+
# Add chains from environment variables
|
|
1581
|
+
for key, value in os.environ.items():
|
|
1582
|
+
if key.startswith("SUBGRAPH_URL_") and value:
|
|
1583
|
+
try:
|
|
1584
|
+
chain_id = int(key.replace("SUBGRAPH_URL_", ""))
|
|
1585
|
+
chains.add(chain_id)
|
|
1586
|
+
except ValueError:
|
|
1587
|
+
pass
|
|
1588
|
+
|
|
1589
|
+
return sorted(list(chains))
|
|
1590
|
+
|
|
1591
|
+
def _apply_cross_chain_filters(
|
|
1592
|
+
self,
|
|
1593
|
+
agents: List[Dict[str, Any]],
|
|
1594
|
+
params: SearchParams
|
|
1595
|
+
) -> List[Dict[str, Any]]:
|
|
1596
|
+
"""
|
|
1597
|
+
Apply filters that couldn't be expressed in subgraph WHERE clause.
|
|
1598
|
+
|
|
1599
|
+
Most filters are already applied by the subgraph query, but some
|
|
1600
|
+
(like supportedTrust, mcpTools, etc.) need post-processing.
|
|
1601
|
+
"""
|
|
1602
|
+
filtered = agents
|
|
1603
|
+
|
|
1604
|
+
# Filter by supportedTrust (if specified)
|
|
1605
|
+
if params.supportedTrust is not None:
|
|
1606
|
+
filtered = [
|
|
1607
|
+
agent for agent in filtered
|
|
1608
|
+
if any(
|
|
1609
|
+
trust in agent.get('registrationFile', {}).get('supportedTrusts', [])
|
|
1610
|
+
for trust in params.supportedTrust
|
|
1611
|
+
)
|
|
1612
|
+
]
|
|
1613
|
+
|
|
1614
|
+
# Filter by mcpTools (if specified)
|
|
1615
|
+
if params.mcpTools is not None:
|
|
1616
|
+
filtered = [
|
|
1617
|
+
agent for agent in filtered
|
|
1618
|
+
if any(
|
|
1619
|
+
tool in agent.get('registrationFile', {}).get('mcpTools', [])
|
|
1620
|
+
for tool in params.mcpTools
|
|
1621
|
+
)
|
|
1622
|
+
]
|
|
1623
|
+
|
|
1624
|
+
# Filter by a2aSkills (if specified)
|
|
1625
|
+
if params.a2aSkills is not None:
|
|
1626
|
+
filtered = [
|
|
1627
|
+
agent for agent in filtered
|
|
1628
|
+
if any(
|
|
1629
|
+
skill in agent.get('registrationFile', {}).get('a2aSkills', [])
|
|
1630
|
+
for skill in params.a2aSkills
|
|
1631
|
+
)
|
|
1632
|
+
]
|
|
1633
|
+
|
|
1634
|
+
# Filter by mcpPrompts (if specified)
|
|
1635
|
+
if params.mcpPrompts is not None:
|
|
1636
|
+
filtered = [
|
|
1637
|
+
agent for agent in filtered
|
|
1638
|
+
if any(
|
|
1639
|
+
prompt in agent.get('registrationFile', {}).get('mcpPrompts', [])
|
|
1640
|
+
for prompt in params.mcpPrompts
|
|
1641
|
+
)
|
|
1642
|
+
]
|
|
1643
|
+
|
|
1644
|
+
# Filter by mcpResources (if specified)
|
|
1645
|
+
if params.mcpResources is not None:
|
|
1646
|
+
filtered = [
|
|
1647
|
+
agent for agent in filtered
|
|
1648
|
+
if any(
|
|
1649
|
+
resource in agent.get('registrationFile', {}).get('mcpResources', [])
|
|
1650
|
+
for resource in params.mcpResources
|
|
1651
|
+
)
|
|
1652
|
+
]
|
|
1653
|
+
|
|
1654
|
+
return filtered
|
|
1655
|
+
|
|
1656
|
+
def _deduplicate_agents_cross_chain(
|
|
1657
|
+
self,
|
|
1658
|
+
agents: List[Dict[str, Any]],
|
|
1659
|
+
params: SearchParams
|
|
1660
|
+
) -> List[Dict[str, Any]]:
|
|
1661
|
+
"""
|
|
1662
|
+
Deduplicate agents across chains (if requested).
|
|
1663
|
+
|
|
1664
|
+
Strategy:
|
|
1665
|
+
- By default, DON'T deduplicate (agents on different chains are different entities)
|
|
1666
|
+
- If params.deduplicate_cross_chain=True, deduplicate by (owner, registration_hash)
|
|
1667
|
+
|
|
1668
|
+
When deduplicating:
|
|
1669
|
+
- Keep the first instance encountered
|
|
1670
|
+
- Add 'deployedOn' array with all chain IDs where this agent exists
|
|
1671
|
+
"""
|
|
1672
|
+
# Check if deduplication requested
|
|
1673
|
+
if not params.deduplicate_cross_chain:
|
|
1674
|
+
return agents
|
|
1675
|
+
|
|
1676
|
+
# Group agents by identity key
|
|
1677
|
+
seen = {}
|
|
1678
|
+
deduplicated = []
|
|
1679
|
+
|
|
1680
|
+
for agent in agents:
|
|
1681
|
+
# Create identity key: (owner, name, description)
|
|
1682
|
+
# This identifies "the same agent" across chains
|
|
1683
|
+
owner = agent.get('owner', '').lower()
|
|
1684
|
+
reg_file = agent.get('registrationFile', {})
|
|
1685
|
+
name = reg_file.get('name', '')
|
|
1686
|
+
description = reg_file.get('description', '')
|
|
1687
|
+
|
|
1688
|
+
identity_key = (owner, name, description)
|
|
1689
|
+
|
|
1690
|
+
if identity_key not in seen:
|
|
1691
|
+
# First time seeing this agent
|
|
1692
|
+
seen[identity_key] = agent
|
|
1693
|
+
|
|
1694
|
+
# Add deployedOn array
|
|
1695
|
+
agent['deployedOn'] = [agent['chainId']]
|
|
1696
|
+
|
|
1697
|
+
deduplicated.append(agent)
|
|
1698
|
+
else:
|
|
1699
|
+
# Already seen this agent on another chain
|
|
1700
|
+
# Add this chain to deployedOn array
|
|
1701
|
+
seen[identity_key]['deployedOn'].append(agent['chainId'])
|
|
1702
|
+
|
|
1703
|
+
logger.info(
|
|
1704
|
+
f"Deduplication: {len(agents)} agents → {len(deduplicated)} unique agents"
|
|
1705
|
+
)
|
|
1706
|
+
|
|
1707
|
+
return deduplicated
|
|
1708
|
+
|
|
1709
|
+
def _sort_agents_cross_chain(
|
|
1710
|
+
self,
|
|
1711
|
+
agents: List[Dict[str, Any]],
|
|
1712
|
+
sort: List[str]
|
|
1713
|
+
) -> List[Dict[str, Any]]:
|
|
1714
|
+
"""
|
|
1715
|
+
Sort agents from multiple chains.
|
|
1716
|
+
|
|
1717
|
+
Supports sorting by:
|
|
1718
|
+
- createdAt (timestamp)
|
|
1719
|
+
- updatedAt (timestamp)
|
|
1720
|
+
- totalFeedback (count)
|
|
1721
|
+
- name (alphabetical)
|
|
1722
|
+
- averageValue (reputation, if available)
|
|
1723
|
+
"""
|
|
1724
|
+
if not sort or len(sort) == 0:
|
|
1725
|
+
# Default: sort by createdAt descending (newest first)
|
|
1726
|
+
return sorted(
|
|
1727
|
+
agents,
|
|
1728
|
+
key=lambda a: a.get('createdAt', 0),
|
|
1729
|
+
reverse=True
|
|
1730
|
+
)
|
|
1731
|
+
|
|
1732
|
+
# Parse first sort specification
|
|
1733
|
+
sort_spec = sort[0]
|
|
1734
|
+
if ':' in sort_spec:
|
|
1735
|
+
field, direction = sort_spec.split(':', 1)
|
|
1736
|
+
else:
|
|
1737
|
+
field = sort_spec
|
|
1738
|
+
direction = 'desc'
|
|
1739
|
+
|
|
1740
|
+
reverse = (direction.lower() == 'desc')
|
|
1741
|
+
|
|
1742
|
+
# Define sort key function
|
|
1743
|
+
def get_sort_key(agent: Dict[str, Any]):
|
|
1744
|
+
if field == 'createdAt':
|
|
1745
|
+
return agent.get('createdAt', 0)
|
|
1746
|
+
|
|
1747
|
+
elif field == 'updatedAt':
|
|
1748
|
+
return agent.get('updatedAt', 0)
|
|
1749
|
+
|
|
1750
|
+
elif field == 'totalFeedback':
|
|
1751
|
+
return agent.get('totalFeedback', 0)
|
|
1752
|
+
|
|
1753
|
+
elif field == 'name':
|
|
1754
|
+
reg_file = agent.get('registrationFile', {})
|
|
1755
|
+
return reg_file.get('name', '').lower()
|
|
1756
|
+
|
|
1757
|
+
elif field == 'averageValue':
|
|
1758
|
+
# If reputation search was done, averageValue may be available
|
|
1759
|
+
return agent.get('averageValue', 0)
|
|
1760
|
+
|
|
1761
|
+
else:
|
|
1762
|
+
logger.warning(f"Unknown sort field: {field}, defaulting to createdAt")
|
|
1763
|
+
return agent.get('createdAt', 0)
|
|
1764
|
+
|
|
1765
|
+
return sorted(agents, key=get_sort_key, reverse=reverse)
|
|
1766
|
+
|
|
1767
|
+
def _parse_multi_chain_cursor(self, cursor: Optional[str]) -> Dict[int, int]:
|
|
1768
|
+
"""
|
|
1769
|
+
Parse multi-chain cursor into per-chain offsets.
|
|
1770
|
+
|
|
1771
|
+
Cursor format (JSON):
|
|
1772
|
+
{
|
|
1773
|
+
"11155111": 50, # Ethereum Sepolia offset
|
|
1774
|
+
"84532": 30, # Base Sepolia offset
|
|
1775
|
+
"_global_offset": 100 # Total items returned so far
|
|
1776
|
+
}
|
|
1777
|
+
|
|
1778
|
+
Returns:
|
|
1779
|
+
Dict mapping chainId → offset (default 0)
|
|
1780
|
+
"""
|
|
1781
|
+
if not cursor:
|
|
1782
|
+
return {}
|
|
1783
|
+
|
|
1784
|
+
try:
|
|
1785
|
+
cursor_data = json.loads(cursor)
|
|
1786
|
+
|
|
1787
|
+
# Validate format
|
|
1788
|
+
if not isinstance(cursor_data, dict):
|
|
1789
|
+
logger.warning(f"Invalid cursor format: {cursor}, using empty")
|
|
1790
|
+
return {}
|
|
1791
|
+
|
|
1792
|
+
return cursor_data
|
|
1793
|
+
|
|
1794
|
+
except json.JSONDecodeError as e:
|
|
1795
|
+
logger.warning(f"Failed to parse cursor: {e}, using empty")
|
|
1796
|
+
return {}
|
|
1797
|
+
|
|
1798
|
+
def _create_multi_chain_cursor(
|
|
1799
|
+
self,
|
|
1800
|
+
global_offset: int,
|
|
1801
|
+
) -> str:
|
|
1802
|
+
"""
|
|
1803
|
+
Create multi-chain cursor for next page.
|
|
1804
|
+
|
|
1805
|
+
Args:
|
|
1806
|
+
global_offset: Total items returned so far
|
|
1807
|
+
|
|
1808
|
+
Returns:
|
|
1809
|
+
JSON string cursor
|
|
1810
|
+
"""
|
|
1811
|
+
cursor_data = {
|
|
1812
|
+
"_global_offset": global_offset
|
|
1813
|
+
}
|
|
1814
|
+
|
|
1815
|
+
return json.dumps(cursor_data)
|
|
1816
|
+
|
|
1817
|
+
def _extract_order_by(self, sort: List[str]) -> str:
|
|
1818
|
+
"""Extract order_by field from sort specification."""
|
|
1819
|
+
if not sort or len(sort) == 0:
|
|
1820
|
+
return "createdAt"
|
|
1821
|
+
|
|
1822
|
+
sort_spec = sort[0]
|
|
1823
|
+
if ':' in sort_spec:
|
|
1824
|
+
field, _ = sort_spec.split(':', 1)
|
|
1825
|
+
return field
|
|
1826
|
+
return sort_spec
|
|
1827
|
+
|
|
1828
|
+
def _extract_order_direction(self, sort: List[str]) -> str:
|
|
1829
|
+
"""Extract order direction from sort specification."""
|
|
1830
|
+
if not sort or len(sort) == 0:
|
|
1831
|
+
return "desc"
|
|
1832
|
+
|
|
1833
|
+
sort_spec = sort[0]
|
|
1834
|
+
if ':' in sort_spec:
|
|
1835
|
+
_, direction = sort_spec.split(':', 1)
|
|
1836
|
+
return direction
|
|
1837
|
+
return "desc"
|