agent0-sdk 0.2.2__py3-none-any.whl → 0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent0_sdk/__init__.py +1 -1
- agent0_sdk/core/agent.py +303 -29
- agent0_sdk/core/contracts.py +93 -51
- agent0_sdk/core/feedback_manager.py +191 -162
- agent0_sdk/core/indexer.py +787 -37
- agent0_sdk/core/models.py +10 -21
- agent0_sdk/core/oasf_validator.py +98 -0
- agent0_sdk/core/sdk.py +238 -20
- agent0_sdk/core/subgraph_client.py +56 -17
- agent0_sdk/core/web3_client.py +184 -17
- agent0_sdk/taxonomies/all_domains.json +1565 -0
- agent0_sdk/taxonomies/all_skills.json +1030 -0
- {agent0_sdk-0.2.2.dist-info → agent0_sdk-0.5.dist-info}/METADATA +78 -6
- agent0_sdk-0.5.dist-info/RECORD +19 -0
- {agent0_sdk-0.2.2.dist-info → agent0_sdk-0.5.dist-info}/top_level.txt +0 -1
- agent0_sdk-0.2.2.dist-info/RECORD +0 -27
- tests/__init__.py +0 -1
- tests/config.py +0 -46
- tests/conftest.py +0 -22
- tests/test_feedback.py +0 -417
- tests/test_models.py +0 -224
- tests/test_real_public_servers.py +0 -103
- tests/test_registration.py +0 -267
- tests/test_registrationIpfs.py +0 -227
- tests/test_sdk.py +0 -240
- tests/test_search.py +0 -415
- tests/test_transfer.py +0 -255
- {agent0_sdk-0.2.2.dist-info → agent0_sdk-0.5.dist-info}/WHEEL +0 -0
- {agent0_sdk-0.2.2.dist-info → agent0_sdk-0.5.dist-info}/licenses/LICENSE +0 -0
agent0_sdk/core/models.py
CHANGED
|
@@ -7,7 +7,7 @@ from __future__ import annotations
|
|
|
7
7
|
import json
|
|
8
8
|
from dataclasses import dataclass, field
|
|
9
9
|
from enum import Enum
|
|
10
|
-
from typing import Any, Dict, List, Optional, Union
|
|
10
|
+
from typing import Any, Dict, List, Optional, Union, Literal
|
|
11
11
|
from datetime import datetime
|
|
12
12
|
|
|
13
13
|
|
|
@@ -28,6 +28,7 @@ class EndpointType(Enum):
|
|
|
28
28
|
ENS = "ENS"
|
|
29
29
|
DID = "DID"
|
|
30
30
|
WALLET = "wallet"
|
|
31
|
+
OASF = "OASF"
|
|
31
32
|
|
|
32
33
|
|
|
33
34
|
class TrustModel(Enum):
|
|
@@ -87,23 +88,8 @@ class RegistrationFile:
|
|
|
87
88
|
}
|
|
88
89
|
endpoints.append(endpoint_dict)
|
|
89
90
|
|
|
90
|
-
#
|
|
91
|
-
|
|
92
|
-
# Use stored walletChainId if available, otherwise extract from agentId
|
|
93
|
-
chain_id_for_wallet = self.walletChainId
|
|
94
|
-
if chain_id_for_wallet is None:
|
|
95
|
-
# Extract chain ID from agentId if available, otherwise use 1 as default
|
|
96
|
-
chain_id_for_wallet = 1 # Default to mainnet
|
|
97
|
-
if self.agentId and ":" in self.agentId:
|
|
98
|
-
try:
|
|
99
|
-
chain_id_for_wallet = int(self.agentId.split(":")[1])
|
|
100
|
-
except (ValueError, IndexError):
|
|
101
|
-
chain_id_for_wallet = 1
|
|
102
|
-
|
|
103
|
-
endpoints.append({
|
|
104
|
-
"name": "agentWallet",
|
|
105
|
-
"endpoint": f"eip155:{chain_id_for_wallet}:{self.walletAddress}"
|
|
106
|
-
})
|
|
91
|
+
# Note: agentWallet is no longer included in endpoints array.
|
|
92
|
+
# It's now a reserved on-chain metadata key managed via setAgentWallet().
|
|
107
93
|
|
|
108
94
|
# Build registrations array
|
|
109
95
|
registrations = []
|
|
@@ -124,7 +110,7 @@ class RegistrationFile:
|
|
|
124
110
|
"registrations": registrations,
|
|
125
111
|
"supportedTrust": [tm.value if isinstance(tm, TrustModel) else tm for tm in self.trustModels],
|
|
126
112
|
"active": self.active,
|
|
127
|
-
"
|
|
113
|
+
"x402Support": self.x402support, # Use camelCase in JSON output per spec
|
|
128
114
|
"updatedAt": self.updatedAt,
|
|
129
115
|
}
|
|
130
116
|
|
|
@@ -162,7 +148,7 @@ class RegistrationFile:
|
|
|
162
148
|
endpoints=endpoints,
|
|
163
149
|
trustModels=trust_models,
|
|
164
150
|
active=data.get("active", False),
|
|
165
|
-
x402support=data.get("x402support", False),
|
|
151
|
+
x402support=data.get("x402Support", data.get("x402support", False)), # Handle both camelCase and lowercase
|
|
166
152
|
metadata=data.get("metadata", {}),
|
|
167
153
|
updatedAt=data.get("updatedAt", int(datetime.now().timestamp())),
|
|
168
154
|
)
|
|
@@ -205,6 +191,7 @@ class Feedback:
|
|
|
205
191
|
context: Optional[Dict[str, Any]] = None
|
|
206
192
|
proofOfPayment: Optional[Dict[str, Any]] = None
|
|
207
193
|
fileURI: Optional[URI] = None
|
|
194
|
+
endpoint: Optional[str] = None # Endpoint URI associated with feedback
|
|
208
195
|
createdAt: Timestamp = field(default_factory=lambda: int(datetime.now().timestamp()))
|
|
209
196
|
answers: List[Dict[str, Any]] = field(default_factory=list)
|
|
210
197
|
isRevoked: bool = False
|
|
@@ -269,7 +256,7 @@ class Feedback:
|
|
|
269
256
|
@dataclass
|
|
270
257
|
class SearchParams:
|
|
271
258
|
"""Parameters for agent search."""
|
|
272
|
-
chains: Optional[List[ChainId]] = None
|
|
259
|
+
chains: Optional[Union[List[ChainId], Literal["all"]]] = None
|
|
273
260
|
name: Optional[str] = None # case-insensitive substring
|
|
274
261
|
description: Optional[str] = None # semantic; vector distance < threshold
|
|
275
262
|
owners: Optional[List[Address]] = None
|
|
@@ -286,6 +273,7 @@ class SearchParams:
|
|
|
286
273
|
mcpResources: Optional[List[str]] = None
|
|
287
274
|
active: Optional[bool] = True
|
|
288
275
|
x402support: Optional[bool] = None
|
|
276
|
+
deduplicate_cross_chain: bool = False # Deduplicate same agent across chains
|
|
289
277
|
|
|
290
278
|
def to_dict(self) -> Dict[str, Any]:
|
|
291
279
|
"""Convert to dictionary, filtering out None values."""
|
|
@@ -302,6 +290,7 @@ class SearchFeedbackParams:
|
|
|
302
290
|
skills: Optional[List[str]] = None
|
|
303
291
|
tasks: Optional[List[str]] = None
|
|
304
292
|
names: Optional[List[str]] = None # MCP tool/resource/prompt names
|
|
293
|
+
endpoint: Optional[str] = None # Filter by endpoint URI
|
|
305
294
|
minScore: Optional[int] = None # 0-100
|
|
306
295
|
maxScore: Optional[int] = None # 0-100
|
|
307
296
|
includeRevoked: bool = False
|
|
@@ -0,0 +1,98 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OASF taxonomy validation utilities.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import os
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Optional
|
|
9
|
+
|
|
10
|
+
# Cache for loaded taxonomy data
|
|
11
|
+
_skills_cache: Optional[dict] = None
|
|
12
|
+
_domains_cache: Optional[dict] = None
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _get_taxonomy_path(filename: str) -> Path:
|
|
16
|
+
"""Get the path to a taxonomy file."""
|
|
17
|
+
# Get the directory where this file is located
|
|
18
|
+
current_dir = Path(__file__).parent
|
|
19
|
+
# Go up one level to agent0_sdk, then into taxonomies
|
|
20
|
+
taxonomy_dir = current_dir.parent / "taxonomies"
|
|
21
|
+
return taxonomy_dir / filename
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _load_skills() -> dict:
|
|
25
|
+
"""Load skills taxonomy file with caching."""
|
|
26
|
+
global _skills_cache
|
|
27
|
+
if _skills_cache is None:
|
|
28
|
+
skills_path = _get_taxonomy_path("all_skills.json")
|
|
29
|
+
try:
|
|
30
|
+
with open(skills_path, "r", encoding="utf-8") as f:
|
|
31
|
+
_skills_cache = json.load(f)
|
|
32
|
+
except FileNotFoundError:
|
|
33
|
+
raise FileNotFoundError(
|
|
34
|
+
f"Skills taxonomy file not found: {skills_path}"
|
|
35
|
+
)
|
|
36
|
+
except json.JSONDecodeError as e:
|
|
37
|
+
raise ValueError(
|
|
38
|
+
f"Invalid JSON in skills taxonomy file: {e}"
|
|
39
|
+
)
|
|
40
|
+
return _skills_cache
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _load_domains() -> dict:
|
|
44
|
+
"""Load domains taxonomy file with caching."""
|
|
45
|
+
global _domains_cache
|
|
46
|
+
if _domains_cache is None:
|
|
47
|
+
domains_path = _get_taxonomy_path("all_domains.json")
|
|
48
|
+
try:
|
|
49
|
+
with open(domains_path, "r", encoding="utf-8") as f:
|
|
50
|
+
_domains_cache = json.load(f)
|
|
51
|
+
except FileNotFoundError:
|
|
52
|
+
raise FileNotFoundError(
|
|
53
|
+
f"Domains taxonomy file not found: {domains_path}"
|
|
54
|
+
)
|
|
55
|
+
except json.JSONDecodeError as e:
|
|
56
|
+
raise ValueError(
|
|
57
|
+
f"Invalid JSON in domains taxonomy file: {e}"
|
|
58
|
+
)
|
|
59
|
+
return _domains_cache
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def validate_skill(slug: str) -> bool:
|
|
63
|
+
"""
|
|
64
|
+
Validate if a skill slug exists in the OASF taxonomy.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
slug: The skill slug to validate (e.g., "natural_language_processing/natural_language_generation/summarization")
|
|
68
|
+
|
|
69
|
+
Returns:
|
|
70
|
+
True if the skill exists in the taxonomy, False otherwise
|
|
71
|
+
|
|
72
|
+
Raises:
|
|
73
|
+
FileNotFoundError: If the taxonomy file cannot be found
|
|
74
|
+
ValueError: If the taxonomy file is invalid JSON
|
|
75
|
+
"""
|
|
76
|
+
skills_data = _load_skills()
|
|
77
|
+
skills = skills_data.get("skills", {})
|
|
78
|
+
return slug in skills
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def validate_domain(slug: str) -> bool:
|
|
82
|
+
"""
|
|
83
|
+
Validate if a domain slug exists in the OASF taxonomy.
|
|
84
|
+
|
|
85
|
+
Args:
|
|
86
|
+
slug: The domain slug to validate (e.g., "finance_and_business/investment_services")
|
|
87
|
+
|
|
88
|
+
Returns:
|
|
89
|
+
True if the domain exists in the taxonomy, False otherwise
|
|
90
|
+
|
|
91
|
+
Raises:
|
|
92
|
+
FileNotFoundError: If the taxonomy file cannot be found
|
|
93
|
+
ValueError: If the taxonomy file is invalid JSON
|
|
94
|
+
"""
|
|
95
|
+
domains_data = _load_domains()
|
|
96
|
+
domains = domains_data.get("domains", {})
|
|
97
|
+
return slug in domains
|
|
98
|
+
|
agent0_sdk/core/sdk.py
CHANGED
|
@@ -6,10 +6,13 @@ from __future__ import annotations
|
|
|
6
6
|
|
|
7
7
|
import asyncio
|
|
8
8
|
import json
|
|
9
|
+
import logging
|
|
9
10
|
import time
|
|
10
|
-
from typing import Any, Dict, List, Optional, Union
|
|
11
|
+
from typing import Any, Dict, List, Optional, Union, Literal
|
|
11
12
|
from datetime import datetime
|
|
12
13
|
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
13
16
|
from .models import (
|
|
14
17
|
AgentId, ChainId, Address, URI, Timestamp, IdemKey,
|
|
15
18
|
EndpointType, TrustModel, Endpoint, RegistrationFile,
|
|
@@ -102,7 +105,8 @@ class SDK:
|
|
|
102
105
|
web3_client=self.web3_client,
|
|
103
106
|
store=indexingStore,
|
|
104
107
|
embeddings=embeddings,
|
|
105
|
-
subgraph_client=self.subgraph_client
|
|
108
|
+
subgraph_client=self.subgraph_client,
|
|
109
|
+
subgraph_url_overrides=self._subgraph_urls
|
|
106
110
|
)
|
|
107
111
|
|
|
108
112
|
# Initialize IPFS client based on configuration
|
|
@@ -274,7 +278,12 @@ class SDK:
|
|
|
274
278
|
return Agent(sdk=self, registration_file=registration_file)
|
|
275
279
|
|
|
276
280
|
def loadAgent(self, agentId: AgentId) -> Agent:
|
|
277
|
-
"""Load an existing agent (hydrates from registration file if registered).
|
|
281
|
+
"""Load an existing agent (hydrates from registration file if registered).
|
|
282
|
+
|
|
283
|
+
Note: Agents can be minted with an empty token URI (e.g. IPFS flow where publish fails).
|
|
284
|
+
In that case we return a partially-hydrated Agent with an empty registration file so the
|
|
285
|
+
caller can resume publishing and set the URI later.
|
|
286
|
+
"""
|
|
278
287
|
# Convert agentId to string if it's an integer
|
|
279
288
|
agentId = str(agentId)
|
|
280
289
|
|
|
@@ -288,16 +297,22 @@ class SDK:
|
|
|
288
297
|
|
|
289
298
|
# Get token URI from contract
|
|
290
299
|
try:
|
|
291
|
-
|
|
292
|
-
self.identity_registry, "tokenURI", int(token_id)
|
|
300
|
+
agent_uri = self.web3_client.call_contract(
|
|
301
|
+
self.identity_registry, "tokenURI", int(token_id) # tokenURI is ERC-721 standard, but represents agentURI
|
|
293
302
|
)
|
|
294
303
|
except Exception as e:
|
|
295
304
|
raise ValueError(f"Failed to load agent {agentId}: {e}")
|
|
296
305
|
|
|
297
|
-
# Load registration file
|
|
298
|
-
registration_file = self._load_registration_file(
|
|
306
|
+
# Load registration file (or fall back to a minimal file if agent URI is missing)
|
|
307
|
+
registration_file = self._load_registration_file(agent_uri)
|
|
299
308
|
registration_file.agentId = agentId
|
|
300
|
-
registration_file.agentURI =
|
|
309
|
+
registration_file.agentURI = agent_uri if agent_uri else None
|
|
310
|
+
|
|
311
|
+
if not agent_uri or not str(agent_uri).strip():
|
|
312
|
+
logger.warning(
|
|
313
|
+
f"Agent {agentId} has no agentURI set on-chain yet. "
|
|
314
|
+
"Returning a partial agent; update info and call registerIPFS() to publish and set URI."
|
|
315
|
+
)
|
|
301
316
|
|
|
302
317
|
# Store registry address for proper JSON generation
|
|
303
318
|
registry_address = self._registries.get("IDENTITY")
|
|
@@ -311,7 +326,13 @@ class SDK:
|
|
|
311
326
|
return Agent(sdk=self, registration_file=registration_file)
|
|
312
327
|
|
|
313
328
|
def _load_registration_file(self, uri: str) -> RegistrationFile:
|
|
314
|
-
"""Load registration file from URI.
|
|
329
|
+
"""Load registration file from URI.
|
|
330
|
+
|
|
331
|
+
If uri is empty/None/whitespace, returns an empty RegistrationFile to allow resume flows.
|
|
332
|
+
"""
|
|
333
|
+
if not uri or not str(uri).strip():
|
|
334
|
+
return RegistrationFile()
|
|
335
|
+
|
|
315
336
|
if uri.startswith("ipfs://"):
|
|
316
337
|
if not self.ipfs_client:
|
|
317
338
|
raise ValueError("IPFS client not configured")
|
|
@@ -342,21 +363,20 @@ class SDK:
|
|
|
342
363
|
# For now, we'll leave it empty
|
|
343
364
|
registration_file.operators = []
|
|
344
365
|
|
|
345
|
-
# Hydrate
|
|
366
|
+
# Hydrate agentWallet from on-chain (now uses getAgentWallet() instead of metadata)
|
|
346
367
|
agent_id = token_id
|
|
347
368
|
try:
|
|
348
|
-
#
|
|
349
|
-
|
|
350
|
-
self.identity_registry, "
|
|
369
|
+
# Get agentWallet using the new dedicated function
|
|
370
|
+
wallet_address = self.web3_client.call_contract(
|
|
371
|
+
self.identity_registry, "getAgentWallet", agent_id
|
|
351
372
|
)
|
|
352
|
-
if
|
|
353
|
-
wallet_address = "0x" + wallet_bytes.hex()
|
|
373
|
+
if wallet_address and wallet_address != "0x0000000000000000000000000000000000000000":
|
|
354
374
|
registration_file.walletAddress = wallet_address
|
|
355
375
|
# If wallet is read from on-chain, use current chain ID
|
|
356
376
|
# (the chain ID from the registration file might be outdated)
|
|
357
377
|
registration_file.walletChainId = self.chainId
|
|
358
378
|
except Exception as e:
|
|
359
|
-
# No on-chain wallet, will fall back to registration file
|
|
379
|
+
# No on-chain wallet set, will fall back to registration file
|
|
360
380
|
pass
|
|
361
381
|
|
|
362
382
|
try:
|
|
@@ -438,7 +458,7 @@ class SDK:
|
|
|
438
458
|
def searchAgents(
|
|
439
459
|
self,
|
|
440
460
|
params: Union[SearchParams, Dict[str, Any], None] = None,
|
|
441
|
-
sort: List[str] = None,
|
|
461
|
+
sort: Union[str, List[str], None] = None,
|
|
442
462
|
page_size: int = 50,
|
|
443
463
|
cursor: Optional[str] = None,
|
|
444
464
|
**kwargs # Accept search criteria as kwargs for better DX
|
|
@@ -466,7 +486,9 @@ class SDK:
|
|
|
466
486
|
|
|
467
487
|
if sort is None:
|
|
468
488
|
sort = ["updatedAt:desc"]
|
|
469
|
-
|
|
489
|
+
elif isinstance(sort, str):
|
|
490
|
+
sort = [sort]
|
|
491
|
+
|
|
470
492
|
return self.indexer.search_agents(params, sort, page_size, cursor)
|
|
471
493
|
|
|
472
494
|
# Feedback methods
|
|
@@ -588,8 +610,26 @@ class SDK:
|
|
|
588
610
|
page_size: int = 50,
|
|
589
611
|
cursor: Optional[str] = None,
|
|
590
612
|
sort: Optional[List[str]] = None,
|
|
613
|
+
chains: Optional[Union[List[ChainId], Literal["all"]]] = None,
|
|
591
614
|
) -> Dict[str, Any]:
|
|
592
615
|
"""Search agents filtered by reputation criteria."""
|
|
616
|
+
# Handle multi-chain search
|
|
617
|
+
if chains:
|
|
618
|
+
# Expand "all" if needed
|
|
619
|
+
if chains == "all":
|
|
620
|
+
chains = self.indexer._get_all_configured_chains()
|
|
621
|
+
|
|
622
|
+
# If multiple chains or single chain different from default
|
|
623
|
+
if isinstance(chains, list) and len(chains) > 0:
|
|
624
|
+
if len(chains) > 1 or (len(chains) == 1 and chains[0] != self.chainId):
|
|
625
|
+
return asyncio.run(
|
|
626
|
+
self._search_agents_by_reputation_across_chains(
|
|
627
|
+
agents, tags, reviewers, capabilities, skills, tasks, names,
|
|
628
|
+
minAverageScore, includeRevoked, page_size, cursor, sort, chains
|
|
629
|
+
)
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
# Single chain search (existing behavior)
|
|
593
633
|
if not self.subgraph_client:
|
|
594
634
|
raise ValueError("Subgraph client required for searchAgentsByReputation")
|
|
595
635
|
|
|
@@ -664,6 +704,185 @@ class SDK:
|
|
|
664
704
|
except Exception as e:
|
|
665
705
|
raise ValueError(f"Failed to search agents by reputation: {e}")
|
|
666
706
|
|
|
707
|
+
async def _search_agents_by_reputation_across_chains(
|
|
708
|
+
self,
|
|
709
|
+
agents: Optional[List[AgentId]],
|
|
710
|
+
tags: Optional[List[str]],
|
|
711
|
+
reviewers: Optional[List[Address]],
|
|
712
|
+
capabilities: Optional[List[str]],
|
|
713
|
+
skills: Optional[List[str]],
|
|
714
|
+
tasks: Optional[List[str]],
|
|
715
|
+
names: Optional[List[str]],
|
|
716
|
+
minAverageScore: Optional[int],
|
|
717
|
+
includeRevoked: bool,
|
|
718
|
+
page_size: int,
|
|
719
|
+
cursor: Optional[str],
|
|
720
|
+
sort: Optional[List[str]],
|
|
721
|
+
chains: List[ChainId],
|
|
722
|
+
) -> Dict[str, Any]:
|
|
723
|
+
"""
|
|
724
|
+
Search agents by reputation across multiple chains in parallel.
|
|
725
|
+
|
|
726
|
+
Similar to indexer._search_agents_across_chains() but for reputation-based search.
|
|
727
|
+
"""
|
|
728
|
+
import time
|
|
729
|
+
start_time = time.time()
|
|
730
|
+
|
|
731
|
+
if sort is None:
|
|
732
|
+
sort = ["createdAt:desc"]
|
|
733
|
+
|
|
734
|
+
order_by = "createdAt"
|
|
735
|
+
order_direction = "desc"
|
|
736
|
+
if sort and len(sort) > 0:
|
|
737
|
+
sort_field = sort[0].split(":")
|
|
738
|
+
order_by = sort_field[0] if len(sort_field) >= 1 else order_by
|
|
739
|
+
order_direction = sort_field[1] if len(sort_field) >= 2 else order_direction
|
|
740
|
+
|
|
741
|
+
skip = 0
|
|
742
|
+
if cursor:
|
|
743
|
+
try:
|
|
744
|
+
skip = int(cursor)
|
|
745
|
+
except ValueError:
|
|
746
|
+
skip = 0
|
|
747
|
+
|
|
748
|
+
# Define async function for querying a single chain
|
|
749
|
+
async def query_single_chain(chain_id: int) -> Dict[str, Any]:
|
|
750
|
+
"""Query one chain and return its results with metadata."""
|
|
751
|
+
try:
|
|
752
|
+
# Get subgraph client for this chain
|
|
753
|
+
subgraph_client = self.indexer._get_subgraph_client_for_chain(chain_id)
|
|
754
|
+
|
|
755
|
+
if subgraph_client is None:
|
|
756
|
+
logger.warning(f"No subgraph client available for chain {chain_id}")
|
|
757
|
+
return {
|
|
758
|
+
"chainId": chain_id,
|
|
759
|
+
"status": "unavailable",
|
|
760
|
+
"agents": [],
|
|
761
|
+
"error": f"No subgraph configured for chain {chain_id}"
|
|
762
|
+
}
|
|
763
|
+
|
|
764
|
+
# Execute reputation search query
|
|
765
|
+
try:
|
|
766
|
+
agents_data = subgraph_client.search_agents_by_reputation(
|
|
767
|
+
agents=agents,
|
|
768
|
+
tags=tags,
|
|
769
|
+
reviewers=reviewers,
|
|
770
|
+
capabilities=capabilities,
|
|
771
|
+
skills=skills,
|
|
772
|
+
tasks=tasks,
|
|
773
|
+
names=names,
|
|
774
|
+
minAverageScore=minAverageScore,
|
|
775
|
+
includeRevoked=includeRevoked,
|
|
776
|
+
first=page_size * 3, # Fetch extra to allow for filtering/sorting
|
|
777
|
+
skip=0, # We'll handle pagination after aggregation
|
|
778
|
+
order_by=order_by,
|
|
779
|
+
order_direction=order_direction
|
|
780
|
+
)
|
|
781
|
+
|
|
782
|
+
logger.info(f"Chain {chain_id}: fetched {len(agents_data)} agents by reputation")
|
|
783
|
+
except Exception as e:
|
|
784
|
+
logger.error(f"Error in search_agents_by_reputation for chain {chain_id}: {e}", exc_info=True)
|
|
785
|
+
agents_data = []
|
|
786
|
+
|
|
787
|
+
return {
|
|
788
|
+
"chainId": chain_id,
|
|
789
|
+
"status": "success",
|
|
790
|
+
"agents": agents_data,
|
|
791
|
+
"count": len(agents_data),
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
except Exception as e:
|
|
795
|
+
logger.error(f"Error querying chain {chain_id} for reputation search: {e}", exc_info=True)
|
|
796
|
+
return {
|
|
797
|
+
"chainId": chain_id,
|
|
798
|
+
"status": "error",
|
|
799
|
+
"agents": [],
|
|
800
|
+
"error": str(e),
|
|
801
|
+
"count": 0
|
|
802
|
+
}
|
|
803
|
+
|
|
804
|
+
# Execute queries in parallel
|
|
805
|
+
chain_tasks = [query_single_chain(chain_id) for chain_id in chains]
|
|
806
|
+
chain_results = await asyncio.gather(*chain_tasks)
|
|
807
|
+
|
|
808
|
+
# Aggregate results from all chains
|
|
809
|
+
all_agents = []
|
|
810
|
+
successful_chains = []
|
|
811
|
+
failed_chains = []
|
|
812
|
+
|
|
813
|
+
for result in chain_results:
|
|
814
|
+
chain_id = result["chainId"]
|
|
815
|
+
if result["status"] == "success":
|
|
816
|
+
successful_chains.append(chain_id)
|
|
817
|
+
agents_count = len(result.get("agents", []))
|
|
818
|
+
logger.debug(f"Chain {chain_id}: aggregating {agents_count} agents")
|
|
819
|
+
all_agents.extend(result["agents"])
|
|
820
|
+
else:
|
|
821
|
+
failed_chains.append(chain_id)
|
|
822
|
+
logger.warning(f"Chain {chain_id}: status={result.get('status')}, error={result.get('error', 'N/A')}")
|
|
823
|
+
|
|
824
|
+
logger.debug(f"Total agents aggregated: {len(all_agents)} from {len(successful_chains)} chains")
|
|
825
|
+
|
|
826
|
+
# Transform to AgentSummary objects
|
|
827
|
+
from .models import AgentSummary
|
|
828
|
+
results = []
|
|
829
|
+
for agent_data in all_agents:
|
|
830
|
+
reg_file = agent_data.get('registrationFile') or {}
|
|
831
|
+
if not isinstance(reg_file, dict):
|
|
832
|
+
reg_file = {}
|
|
833
|
+
|
|
834
|
+
agent_summary = AgentSummary(
|
|
835
|
+
chainId=int(agent_data.get('chainId', 0)),
|
|
836
|
+
agentId=agent_data.get('id'),
|
|
837
|
+
name=reg_file.get('name', f"Agent {agent_data.get('id')}"),
|
|
838
|
+
image=reg_file.get('image'),
|
|
839
|
+
description=reg_file.get('description', ''),
|
|
840
|
+
owners=[agent_data.get('owner', '')],
|
|
841
|
+
operators=agent_data.get('operators', []),
|
|
842
|
+
mcp=reg_file.get('mcpEndpoint') is not None,
|
|
843
|
+
a2a=reg_file.get('a2aEndpoint') is not None,
|
|
844
|
+
ens=reg_file.get('ens'),
|
|
845
|
+
did=reg_file.get('did'),
|
|
846
|
+
walletAddress=reg_file.get('agentWallet'),
|
|
847
|
+
supportedTrusts=reg_file.get('supportedTrusts', []),
|
|
848
|
+
a2aSkills=reg_file.get('a2aSkills', []),
|
|
849
|
+
mcpTools=reg_file.get('mcpTools', []),
|
|
850
|
+
mcpPrompts=reg_file.get('mcpPrompts', []),
|
|
851
|
+
mcpResources=reg_file.get('mcpResources', []),
|
|
852
|
+
active=reg_file.get('active', True),
|
|
853
|
+
x402support=reg_file.get('x402support', False),
|
|
854
|
+
extras={'averageScore': agent_data.get('averageScore')}
|
|
855
|
+
)
|
|
856
|
+
results.append(agent_summary)
|
|
857
|
+
|
|
858
|
+
# Sort by averageScore (descending) if available, otherwise by createdAt
|
|
859
|
+
results.sort(
|
|
860
|
+
key=lambda x: (
|
|
861
|
+
x.extras.get('averageScore') if x.extras.get('averageScore') is not None else 0,
|
|
862
|
+
x.chainId,
|
|
863
|
+
x.agentId
|
|
864
|
+
),
|
|
865
|
+
reverse=True
|
|
866
|
+
)
|
|
867
|
+
|
|
868
|
+
# Apply pagination
|
|
869
|
+
paginated_results = results[skip:skip + page_size]
|
|
870
|
+
next_cursor = str(skip + len(paginated_results)) if len(paginated_results) == page_size and skip + len(paginated_results) < len(results) else None
|
|
871
|
+
|
|
872
|
+
elapsed_ms = int((time.time() - start_time) * 1000)
|
|
873
|
+
|
|
874
|
+
return {
|
|
875
|
+
"items": paginated_results,
|
|
876
|
+
"nextCursor": next_cursor,
|
|
877
|
+
"meta": {
|
|
878
|
+
"chains": chains,
|
|
879
|
+
"successfulChains": successful_chains,
|
|
880
|
+
"failedChains": failed_chains,
|
|
881
|
+
"totalResults": len(results),
|
|
882
|
+
"timing": {"totalMs": elapsed_ms}
|
|
883
|
+
}
|
|
884
|
+
}
|
|
885
|
+
|
|
667
886
|
# Feedback methods - delegate to feedback_manager
|
|
668
887
|
def signFeedbackAuth(
|
|
669
888
|
self,
|
|
@@ -701,11 +920,10 @@ class SDK:
|
|
|
701
920
|
agentId: "AgentId",
|
|
702
921
|
feedbackFile: Dict[str, Any],
|
|
703
922
|
idem: Optional["IdemKey"] = None,
|
|
704
|
-
feedbackAuth: Optional[bytes] = None,
|
|
705
923
|
) -> "Feedback":
|
|
706
924
|
"""Give feedback (maps 8004 endpoint)."""
|
|
707
925
|
return self.feedback_manager.giveFeedback(
|
|
708
|
-
agentId, feedbackFile, idem
|
|
926
|
+
agentId, feedbackFile, idem
|
|
709
927
|
)
|
|
710
928
|
|
|
711
929
|
def getFeedback(
|