agent0-sdk 1.5.0b1__py3-none-any.whl → 1.5.1b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent0_sdk/__init__.py +1 -1
- agent0_sdk/core/feedback_manager.py +75 -78
- agent0_sdk/core/indexer.py +81 -727
- agent0_sdk/core/models.py +6 -8
- agent0_sdk/core/sdk.py +7 -16
- agent0_sdk/core/semantic_search_client.py +10 -6
- {agent0_sdk-1.5.0b1.dist-info → agent0_sdk-1.5.1b1.dist-info}/METADATA +151 -7
- {agent0_sdk-1.5.0b1.dist-info → agent0_sdk-1.5.1b1.dist-info}/RECORD +11 -11
- {agent0_sdk-1.5.0b1.dist-info → agent0_sdk-1.5.1b1.dist-info}/WHEEL +0 -0
- {agent0_sdk-1.5.0b1.dist-info → agent0_sdk-1.5.1b1.dist-info}/licenses/LICENSE +0 -0
- {agent0_sdk-1.5.0b1.dist-info → agent0_sdk-1.5.1b1.dist-info}/top_level.txt +0 -0
agent0_sdk/core/indexer.py
CHANGED
|
@@ -475,21 +475,12 @@ class AgentIndexer:
|
|
|
475
475
|
self,
|
|
476
476
|
filters: SearchFilters,
|
|
477
477
|
options: SearchOptions,
|
|
478
|
-
) ->
|
|
478
|
+
) -> List[AgentSummary]:
|
|
479
479
|
"""Unified search entry point (replaces all legacy search variants)."""
|
|
480
|
-
start_ms = int(time.time() * 1000)
|
|
481
|
-
|
|
482
480
|
if filters.keyword and str(filters.keyword).strip():
|
|
483
|
-
|
|
481
|
+
return self._search_unified_with_keyword(filters, options)
|
|
484
482
|
else:
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
meta = out.get("meta") or {}
|
|
488
|
-
timing = meta.get("timing") or {}
|
|
489
|
-
timing["totalMs"] = int(time.time() * 1000) - start_ms
|
|
490
|
-
meta["timing"] = timing
|
|
491
|
-
out["meta"] = meta
|
|
492
|
-
return out
|
|
483
|
+
return self._search_unified_no_keyword(filters, options)
|
|
493
484
|
|
|
494
485
|
# -------------------------------------------------------------------------
|
|
495
486
|
# Unified search (v2)
|
|
@@ -514,38 +505,7 @@ class AgentIndexer:
|
|
|
514
505
|
return self._get_all_configured_chains()
|
|
515
506
|
return [self.web3_client.chain_id]
|
|
516
507
|
|
|
517
|
-
|
|
518
|
-
if not cursor:
|
|
519
|
-
return 0
|
|
520
|
-
try:
|
|
521
|
-
n = int(cursor)
|
|
522
|
-
return n if n >= 0 else 0
|
|
523
|
-
except Exception:
|
|
524
|
-
return 0
|
|
525
|
-
|
|
526
|
-
def _parse_per_chain_cursor(self, chains: List[int], cursor: Optional[str]) -> Dict[int, int]:
|
|
527
|
-
out: Dict[int, int] = {c: 0 for c in chains}
|
|
528
|
-
if not cursor:
|
|
529
|
-
return out
|
|
530
|
-
try:
|
|
531
|
-
data = json.loads(cursor)
|
|
532
|
-
if isinstance(data, dict):
|
|
533
|
-
for c in chains:
|
|
534
|
-
v = data.get(str(c))
|
|
535
|
-
if isinstance(v, int) and v >= 0:
|
|
536
|
-
out[c] = v
|
|
537
|
-
return out
|
|
538
|
-
except Exception:
|
|
539
|
-
pass
|
|
540
|
-
if len(chains) == 1:
|
|
541
|
-
try:
|
|
542
|
-
out[chains[0]] = max(0, int(cursor))
|
|
543
|
-
except Exception:
|
|
544
|
-
pass
|
|
545
|
-
return out
|
|
546
|
-
|
|
547
|
-
def _encode_per_chain_cursor(self, skips: Dict[int, int]) -> str:
|
|
548
|
-
return json.dumps({str(k): int(v) for k, v in sorted(skips.items(), key=lambda kv: kv[0])})
|
|
508
|
+
# Pagination removed: cursor helpers deleted.
|
|
549
509
|
|
|
550
510
|
def _to_unix_seconds(self, dt: Any) -> int:
|
|
551
511
|
if isinstance(dt, int):
|
|
@@ -727,7 +687,6 @@ class AgentIndexer:
|
|
|
727
687
|
value_hex = self._utf8_to_hex(str(value_str)) if value_str is not None else None
|
|
728
688
|
|
|
729
689
|
first = 1000
|
|
730
|
-
max_rows = 5000
|
|
731
690
|
out: Dict[int, List[str]] = {}
|
|
732
691
|
|
|
733
692
|
for chain_id in chains:
|
|
@@ -736,7 +695,8 @@ class AgentIndexer:
|
|
|
736
695
|
out[chain_id] = []
|
|
737
696
|
continue
|
|
738
697
|
ids: List[str] = []
|
|
739
|
-
|
|
698
|
+
skip = 0
|
|
699
|
+
while True:
|
|
740
700
|
where: Dict[str, Any] = {"key": key}
|
|
741
701
|
if value_hex is not None:
|
|
742
702
|
where["value"] = value_hex
|
|
@@ -748,6 +708,7 @@ class AgentIndexer:
|
|
|
748
708
|
ids.append(str(aid))
|
|
749
709
|
if len(rows) < first:
|
|
750
710
|
break
|
|
711
|
+
skip += first
|
|
751
712
|
out[chain_id] = sorted(list(set(ids)))
|
|
752
713
|
return out
|
|
753
714
|
|
|
@@ -794,7 +755,6 @@ class AgentIndexer:
|
|
|
794
755
|
raise ValueError("feedback.hasNoFeedback requires a pre-filtered candidate set (e.g. agentIds or keyword).")
|
|
795
756
|
|
|
796
757
|
first = 1000
|
|
797
|
-
max_rows = 5000
|
|
798
758
|
|
|
799
759
|
sums: Dict[str, float] = {}
|
|
800
760
|
counts: Dict[str, int] = {}
|
|
@@ -832,7 +792,8 @@ class AgentIndexer:
|
|
|
832
792
|
|
|
833
793
|
where: Dict[str, Any] = {"and": [base, *and_conditions]} if and_conditions else base
|
|
834
794
|
|
|
835
|
-
|
|
795
|
+
skip = 0
|
|
796
|
+
while True:
|
|
836
797
|
rows = sub.query_feedbacks_minimal(where=where, first=first, skip=skip, order_by="createdAt", order_direction="desc")
|
|
837
798
|
for r in rows:
|
|
838
799
|
agent = r.get("agent") or {}
|
|
@@ -853,6 +814,7 @@ class AgentIndexer:
|
|
|
853
814
|
matched_by_chain.setdefault(chain_id, set()).add(aid_s)
|
|
854
815
|
if len(rows) < first:
|
|
855
816
|
break
|
|
817
|
+
skip += first
|
|
856
818
|
|
|
857
819
|
stats: Dict[str, Dict[str, float]] = {}
|
|
858
820
|
for aid, cnt in counts.items():
|
|
@@ -901,21 +863,21 @@ class AgentIndexer:
|
|
|
901
863
|
|
|
902
864
|
return allow, stats
|
|
903
865
|
|
|
904
|
-
def _search_unified_no_keyword(self, filters: SearchFilters, options: SearchOptions) ->
|
|
866
|
+
def _search_unified_no_keyword(self, filters: SearchFilters, options: SearchOptions) -> List[AgentSummary]:
|
|
905
867
|
if not self.subgraph_client:
|
|
906
868
|
raise ValueError("Subgraph client required for searchAgents")
|
|
907
869
|
|
|
908
870
|
field, direction = self._parse_sort(options.sort, False)
|
|
909
871
|
chains = self._resolve_chains(filters, False)
|
|
910
|
-
page_size = options.pageSize or 50
|
|
911
|
-
per_chain_skip = self._parse_per_chain_cursor(chains, options.cursor)
|
|
912
872
|
ids_by_chain = self._normalize_agent_ids(filters, chains)
|
|
913
873
|
metadata_ids_by_chain = self._prefilter_by_metadata(filters, chains)
|
|
874
|
+
|
|
914
875
|
candidate_for_feedback: Dict[int, List[str]] = {}
|
|
915
876
|
for c in chains:
|
|
916
877
|
ids0 = self._intersect_ids((ids_by_chain or {}).get(c), (metadata_ids_by_chain or {}).get(c))
|
|
917
878
|
if ids0:
|
|
918
879
|
candidate_for_feedback[c] = ids0
|
|
880
|
+
|
|
919
881
|
feedback_ids_by_chain, feedback_stats_by_id = self._prefilter_by_feedback(
|
|
920
882
|
filters, chains, candidate_for_feedback if candidate_for_feedback else None
|
|
921
883
|
)
|
|
@@ -924,33 +886,6 @@ class AgentIndexer:
|
|
|
924
886
|
if field == "feedbackCount":
|
|
925
887
|
order_by = "totalFeedback"
|
|
926
888
|
|
|
927
|
-
# Fetch one page per chain (page_size + 1) and merge client-side.
|
|
928
|
-
chain_results: List[Dict[str, Any]] = []
|
|
929
|
-
successful: List[int] = []
|
|
930
|
-
failed: List[int] = []
|
|
931
|
-
|
|
932
|
-
for chain_id in chains:
|
|
933
|
-
client = self._get_subgraph_client_for_chain(chain_id)
|
|
934
|
-
if client is None:
|
|
935
|
-
failed.append(chain_id)
|
|
936
|
-
chain_results.append({"chainId": chain_id, "items": []})
|
|
937
|
-
continue
|
|
938
|
-
try:
|
|
939
|
-
ids0 = self._intersect_ids((ids_by_chain or {}).get(chain_id), (metadata_ids_by_chain or {}).get(chain_id))
|
|
940
|
-
ids = self._intersect_ids(ids0, (feedback_ids_by_chain or {}).get(chain_id))
|
|
941
|
-
if ids is not None and len(ids) == 0:
|
|
942
|
-
successful.append(chain_id)
|
|
943
|
-
chain_results.append({"chainId": chain_id, "items": []})
|
|
944
|
-
continue
|
|
945
|
-
where = self._build_where_v2(filters, ids)
|
|
946
|
-
agents = client.get_agents_v2(where=where, first=page_size + 1, skip=per_chain_skip.get(chain_id, 0), order_by=order_by, order_direction=direction)
|
|
947
|
-
successful.append(chain_id)
|
|
948
|
-
chain_results.append({"chainId": chain_id, "items": agents})
|
|
949
|
-
except Exception:
|
|
950
|
-
failed.append(chain_id)
|
|
951
|
-
chain_results.append({"chainId": chain_id, "items": []})
|
|
952
|
-
|
|
953
|
-
# Convert to AgentSummary objects and k-way merge using the same sort field.
|
|
954
889
|
def to_summary(agent_data: Dict[str, Any]) -> AgentSummary:
|
|
955
890
|
reg_file = agent_data.get("registrationFile") or {}
|
|
956
891
|
if not isinstance(reg_file, dict):
|
|
@@ -991,73 +926,46 @@ class AgentIndexer:
|
|
|
991
926
|
extras={},
|
|
992
927
|
)
|
|
993
928
|
|
|
994
|
-
|
|
995
|
-
|
|
996
|
-
|
|
929
|
+
batch = 1000
|
|
930
|
+
out: List[AgentSummary] = []
|
|
931
|
+
for chain_id in chains:
|
|
932
|
+
client = self._get_subgraph_client_for_chain(chain_id)
|
|
933
|
+
if client is None:
|
|
934
|
+
continue
|
|
935
|
+
ids0 = self._intersect_ids((ids_by_chain or {}).get(chain_id), (metadata_ids_by_chain or {}).get(chain_id))
|
|
936
|
+
ids = self._intersect_ids(ids0, (feedback_ids_by_chain or {}).get(chain_id))
|
|
937
|
+
if ids is not None and len(ids) == 0:
|
|
938
|
+
continue
|
|
939
|
+
where = self._build_where_v2(filters, ids)
|
|
940
|
+
|
|
941
|
+
skip = 0
|
|
942
|
+
while True:
|
|
943
|
+
agents = client.get_agents_v2(where=where, first=batch, skip=skip, order_by=order_by, order_direction=direction)
|
|
944
|
+
for a in agents:
|
|
945
|
+
out.append(to_summary(a))
|
|
946
|
+
if len(agents) < batch:
|
|
947
|
+
break
|
|
948
|
+
skip += batch
|
|
997
949
|
|
|
998
|
-
|
|
999
|
-
v = getattr(agent, field, None)
|
|
1000
|
-
if v is None:
|
|
1001
|
-
return 0
|
|
1002
|
-
return v
|
|
950
|
+
reverse = direction == "desc"
|
|
1003
951
|
|
|
1004
|
-
def
|
|
1005
|
-
# return True if a should come before b
|
|
952
|
+
def sort_key(a: AgentSummary):
|
|
1006
953
|
if field == "name":
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
954
|
+
return (a.name or "").lower()
|
|
955
|
+
v = getattr(a, field, None)
|
|
956
|
+
if v is None and field == "totalFeedback":
|
|
957
|
+
v = getattr(a, "feedbackCount", None)
|
|
958
|
+
if v is None:
|
|
959
|
+
return 0.0
|
|
1010
960
|
try:
|
|
1011
|
-
|
|
1012
|
-
bv = float(key(b))
|
|
961
|
+
return float(v)
|
|
1013
962
|
except Exception:
|
|
1014
|
-
|
|
1015
|
-
bv = 0.0
|
|
1016
|
-
return av < bv if direction == "asc" else av > bv
|
|
1017
|
-
|
|
1018
|
-
merged: List[AgentSummary] = []
|
|
1019
|
-
while len(merged) < page_size:
|
|
1020
|
-
best_chain: Optional[int] = None
|
|
1021
|
-
best_item: Optional[AgentSummary] = None
|
|
1022
|
-
for c in chains:
|
|
1023
|
-
idx = indices[c]
|
|
1024
|
-
arr = per_chain_lists.get(c, [])
|
|
1025
|
-
if idx >= len(arr):
|
|
1026
|
-
continue
|
|
1027
|
-
cand = arr[idx]
|
|
1028
|
-
if best_item is None or compare(cand, best_item):
|
|
1029
|
-
best_item = cand
|
|
1030
|
-
best_chain = c
|
|
1031
|
-
if best_item is None or best_chain is None:
|
|
1032
|
-
break
|
|
1033
|
-
merged.append(best_item)
|
|
1034
|
-
indices[best_chain] += 1
|
|
1035
|
-
consumed[best_chain] += 1
|
|
963
|
+
return 0.0
|
|
1036
964
|
|
|
1037
|
-
|
|
1038
|
-
(indices[c] < len(per_chain_lists.get(c, []))) or (len(per_chain_lists.get(c, [])) > page_size)
|
|
1039
|
-
for c in chains
|
|
1040
|
-
)
|
|
1041
|
-
next_cursor = None
|
|
1042
|
-
if has_more:
|
|
1043
|
-
next_skips = {c: per_chain_skip.get(c, 0) + consumed.get(c, 0) for c in chains}
|
|
1044
|
-
next_cursor = self._encode_per_chain_cursor(next_skips)
|
|
1045
|
-
|
|
1046
|
-
return {
|
|
1047
|
-
"items": merged,
|
|
1048
|
-
"nextCursor": next_cursor,
|
|
1049
|
-
"meta": {
|
|
1050
|
-
"chains": chains,
|
|
1051
|
-
"successfulChains": successful,
|
|
1052
|
-
"failedChains": failed,
|
|
1053
|
-
"totalResults": 0,
|
|
1054
|
-
},
|
|
1055
|
-
}
|
|
965
|
+
return sorted(out, key=sort_key, reverse=reverse)
|
|
1056
966
|
|
|
1057
|
-
def _search_unified_with_keyword(self, filters: SearchFilters, options: SearchOptions) ->
|
|
967
|
+
def _search_unified_with_keyword(self, filters: SearchFilters, options: SearchOptions) -> List[AgentSummary]:
|
|
1058
968
|
field, direction = self._parse_sort(options.sort, True)
|
|
1059
|
-
page_size = options.pageSize or 50
|
|
1060
|
-
offset = self._parse_cursor_offset(options.cursor)
|
|
1061
969
|
chains = self._resolve_chains(filters, True)
|
|
1062
970
|
|
|
1063
971
|
client = SemanticSearchClient()
|
|
@@ -1076,8 +984,6 @@ class AgentIndexer:
|
|
|
1076
984
|
score_by_id[r.agentId] = r.score
|
|
1077
985
|
|
|
1078
986
|
fetched: List[AgentSummary] = []
|
|
1079
|
-
successful: List[int] = []
|
|
1080
|
-
failed: List[int] = []
|
|
1081
987
|
|
|
1082
988
|
metadata_ids_by_chain = self._prefilter_by_metadata(filters, chains)
|
|
1083
989
|
feedback_ids_by_chain, feedback_stats_by_id = self._prefilter_by_feedback(filters, chains, ids_by_chain)
|
|
@@ -1088,11 +994,8 @@ class AgentIndexer:
|
|
|
1088
994
|
sub = self._get_subgraph_client_for_chain(chain_id)
|
|
1089
995
|
ids = ids_by_chain.get(chain_id, [])
|
|
1090
996
|
if sub is None:
|
|
1091
|
-
if ids:
|
|
1092
|
-
failed.append(chain_id)
|
|
1093
997
|
continue
|
|
1094
998
|
try:
|
|
1095
|
-
successful.append(chain_id)
|
|
1096
999
|
for i in range(0, len(ids), chunk_size):
|
|
1097
1000
|
chunk = ids[i : i + chunk_size]
|
|
1098
1001
|
ids2 = self._intersect_ids(chunk, (metadata_ids_by_chain or {}).get(chain_id))
|
|
@@ -1146,7 +1049,7 @@ class AgentIndexer:
|
|
|
1146
1049
|
)
|
|
1147
1050
|
)
|
|
1148
1051
|
except Exception:
|
|
1149
|
-
|
|
1052
|
+
continue
|
|
1150
1053
|
|
|
1151
1054
|
# Default keyword sorting: semanticScore desc, unless overridden.
|
|
1152
1055
|
sort_field = field if options.sort and len(options.sort) > 0 else "semanticScore"
|
|
@@ -1164,508 +1067,11 @@ class AgentIndexer:
|
|
|
1164
1067
|
return 0
|
|
1165
1068
|
|
|
1166
1069
|
fetched.sort(key=sort_key, reverse=(sort_dir == "desc"))
|
|
1167
|
-
|
|
1168
|
-
next_cursor = str(offset + page_size) if len(fetched) > offset + page_size else None
|
|
1169
|
-
|
|
1170
|
-
return {
|
|
1171
|
-
"items": page,
|
|
1172
|
-
"nextCursor": next_cursor,
|
|
1173
|
-
"meta": {
|
|
1174
|
-
"chains": chains,
|
|
1175
|
-
"successfulChains": successful,
|
|
1176
|
-
"failedChains": failed,
|
|
1177
|
-
"totalResults": len(fetched),
|
|
1178
|
-
},
|
|
1179
|
-
}
|
|
1180
|
-
|
|
1181
|
-
async def _search_agents_across_chains(
|
|
1182
|
-
self,
|
|
1183
|
-
params: SearchFilters,
|
|
1184
|
-
sort: List[str],
|
|
1185
|
-
page_size: int,
|
|
1186
|
-
cursor: Optional[str] = None,
|
|
1187
|
-
timeout: float = 30.0,
|
|
1188
|
-
) -> Dict[str, Any]:
|
|
1189
|
-
"""
|
|
1190
|
-
Search agents across multiple chains in parallel.
|
|
1191
|
-
|
|
1192
|
-
This method is called when params.chains contains 2+ chain IDs.
|
|
1193
|
-
It executes one subgraph query per chain, all in parallel using asyncio.
|
|
1194
|
-
|
|
1195
|
-
Args:
|
|
1196
|
-
params: Search parameters
|
|
1197
|
-
sort: Sort specification
|
|
1198
|
-
page_size: Number of results per page
|
|
1199
|
-
cursor: Pagination cursor
|
|
1200
|
-
timeout: Maximum time in seconds for all chain queries (default: 30.0)
|
|
1201
|
-
|
|
1202
|
-
Returns:
|
|
1203
|
-
{
|
|
1204
|
-
"items": [agent_dict, ...],
|
|
1205
|
-
"nextCursor": str or None,
|
|
1206
|
-
"meta": {
|
|
1207
|
-
"chains": [chainId, ...],
|
|
1208
|
-
"successfulChains": [chainId, ...],
|
|
1209
|
-
"failedChains": [chainId, ...],
|
|
1210
|
-
"totalResults": int,
|
|
1211
|
-
"timing": {"totalMs": int}
|
|
1212
|
-
}
|
|
1213
|
-
}
|
|
1214
|
-
"""
|
|
1215
|
-
import time
|
|
1216
|
-
start_time = time.time()
|
|
1217
|
-
# Step 1: Determine which chains to query
|
|
1218
|
-
chains_to_query = params.chains if params.chains else self._get_all_configured_chains()
|
|
1219
|
-
|
|
1220
|
-
if not chains_to_query or len(chains_to_query) == 0:
|
|
1221
|
-
logger.warning("No chains specified or configured for multi-chain query")
|
|
1222
|
-
return {"items": [], "nextCursor": None, "meta": {"chains": [], "successfulChains": [], "failedChains": []}}
|
|
1223
|
-
|
|
1224
|
-
# Step 2: Parse pagination cursor (if any)
|
|
1225
|
-
chain_cursors = self._parse_multi_chain_cursor(cursor)
|
|
1226
|
-
global_offset = chain_cursors.get("_global_offset", 0)
|
|
1227
|
-
|
|
1228
|
-
# Step 3: Define async function for querying a single chain
|
|
1229
|
-
async def query_single_chain(chain_id: int) -> Dict[str, Any]:
|
|
1230
|
-
"""Query one chain and return its results with metadata."""
|
|
1231
|
-
try:
|
|
1232
|
-
# Get subgraph client for this chain
|
|
1233
|
-
subgraph_client = self._get_subgraph_client_for_chain(chain_id)
|
|
1234
|
-
|
|
1235
|
-
if subgraph_client is None:
|
|
1236
|
-
logger.warning(f"No subgraph client available for chain {chain_id}")
|
|
1237
|
-
return {
|
|
1238
|
-
"chainId": chain_id,
|
|
1239
|
-
"status": "unavailable",
|
|
1240
|
-
"agents": [],
|
|
1241
|
-
"error": f"No subgraph configured for chain {chain_id}"
|
|
1242
|
-
}
|
|
1243
|
-
|
|
1244
|
-
# Build WHERE clause for this chain's query
|
|
1245
|
-
# (reuse existing logic from _search_agents_via_subgraph)
|
|
1246
|
-
where_clause = {}
|
|
1247
|
-
reg_file_where = {}
|
|
1248
|
-
|
|
1249
|
-
if params.name is not None:
|
|
1250
|
-
reg_file_where["name_contains"] = params.name
|
|
1251
|
-
if params.active is not None:
|
|
1252
|
-
reg_file_where["active"] = params.active
|
|
1253
|
-
if params.x402support is not None:
|
|
1254
|
-
reg_file_where["x402support"] = params.x402support
|
|
1255
|
-
if params.hasMCP is not None:
|
|
1256
|
-
if params.hasMCP:
|
|
1257
|
-
reg_file_where["mcpEndpoint_not"] = None
|
|
1258
|
-
else:
|
|
1259
|
-
reg_file_where["mcpEndpoint"] = None
|
|
1260
|
-
if params.hasA2A is not None:
|
|
1261
|
-
if params.hasA2A:
|
|
1262
|
-
reg_file_where["a2aEndpoint_not"] = None
|
|
1263
|
-
else:
|
|
1264
|
-
reg_file_where["a2aEndpoint"] = None
|
|
1265
|
-
if params.ensContains is not None:
|
|
1266
|
-
reg_file_where["ens_contains_nocase"] = params.ensContains
|
|
1267
|
-
if params.didContains is not None:
|
|
1268
|
-
reg_file_where["did_contains_nocase"] = params.didContains
|
|
1269
|
-
if params.walletAddress is not None:
|
|
1270
|
-
reg_file_where["agentWallet"] = params.walletAddress
|
|
1271
|
-
|
|
1272
|
-
if reg_file_where:
|
|
1273
|
-
where_clause["registrationFile_"] = reg_file_where
|
|
1274
|
-
|
|
1275
|
-
# Owner filtering
|
|
1276
|
-
if params.owners is not None and len(params.owners) > 0:
|
|
1277
|
-
normalized_owners = [owner.lower() for owner in params.owners]
|
|
1278
|
-
if len(normalized_owners) == 1:
|
|
1279
|
-
where_clause["owner"] = normalized_owners[0]
|
|
1280
|
-
else:
|
|
1281
|
-
where_clause["owner_in"] = normalized_owners
|
|
1282
|
-
|
|
1283
|
-
# Operator filtering
|
|
1284
|
-
if params.operators is not None and len(params.operators) > 0:
|
|
1285
|
-
normalized_operators = [op.lower() for op in params.operators]
|
|
1286
|
-
where_clause["operators_contains"] = normalized_operators
|
|
1287
|
-
|
|
1288
|
-
# Get pagination offset for this chain (not used in multi-chain, fetch all)
|
|
1289
|
-
skip = 0
|
|
1290
|
-
|
|
1291
|
-
# Execute subgraph query
|
|
1292
|
-
agents = subgraph_client.get_agents(
|
|
1293
|
-
where=where_clause if where_clause else None,
|
|
1294
|
-
first=page_size * 3, # Fetch extra to allow for filtering/sorting
|
|
1295
|
-
skip=skip,
|
|
1296
|
-
order_by=self._extract_order_by(sort),
|
|
1297
|
-
order_direction=self._extract_order_direction(sort)
|
|
1298
|
-
)
|
|
1299
|
-
|
|
1300
|
-
logger.info(f"Chain {chain_id}: fetched {len(agents)} agents")
|
|
1301
|
-
|
|
1302
|
-
return {
|
|
1303
|
-
"chainId": chain_id,
|
|
1304
|
-
"status": "success",
|
|
1305
|
-
"agents": agents,
|
|
1306
|
-
"count": len(agents),
|
|
1307
|
-
}
|
|
1308
|
-
|
|
1309
|
-
except Exception as e:
|
|
1310
|
-
logger.error(f"Error querying chain {chain_id}: {e}", exc_info=True)
|
|
1311
|
-
return {
|
|
1312
|
-
"chainId": chain_id,
|
|
1313
|
-
"status": "error",
|
|
1314
|
-
"agents": [],
|
|
1315
|
-
"error": str(e)
|
|
1316
|
-
}
|
|
1317
|
-
|
|
1318
|
-
# Step 4: Execute all chain queries in parallel with timeout
|
|
1319
|
-
logger.info(f"Querying {len(chains_to_query)} chains in parallel: {chains_to_query}")
|
|
1320
|
-
tasks = [query_single_chain(chain_id) for chain_id in chains_to_query]
|
|
1070
|
+
return fetched
|
|
1321
1071
|
|
|
1322
|
-
|
|
1323
|
-
chain_results = await asyncio.wait_for(
|
|
1324
|
-
asyncio.gather(*tasks),
|
|
1325
|
-
timeout=timeout
|
|
1326
|
-
)
|
|
1327
|
-
except asyncio.TimeoutError:
|
|
1328
|
-
logger.error(f"Multi-chain query timed out after {timeout}s")
|
|
1329
|
-
# Collect results from completed tasks
|
|
1330
|
-
chain_results = []
|
|
1331
|
-
for task in tasks:
|
|
1332
|
-
if task.done():
|
|
1333
|
-
try:
|
|
1334
|
-
chain_results.append(task.result())
|
|
1335
|
-
except Exception as e:
|
|
1336
|
-
logger.warning(f"Task failed: {e}")
|
|
1337
|
-
else:
|
|
1338
|
-
# Task didn't complete - mark as timeout
|
|
1339
|
-
chain_results.append({
|
|
1340
|
-
"chainId": None,
|
|
1341
|
-
"status": "timeout",
|
|
1342
|
-
"agents": [],
|
|
1343
|
-
"error": f"Query timed out after {timeout}s"
|
|
1344
|
-
})
|
|
1345
|
-
|
|
1346
|
-
# Step 5: Extract successful results and track failures
|
|
1347
|
-
all_agents = []
|
|
1348
|
-
successful_chains = []
|
|
1349
|
-
failed_chains = []
|
|
1350
|
-
|
|
1351
|
-
for result in chain_results:
|
|
1352
|
-
chain_id = result["chainId"]
|
|
1353
|
-
|
|
1354
|
-
if result["status"] == "success":
|
|
1355
|
-
successful_chains.append(chain_id)
|
|
1356
|
-
all_agents.extend(result["agents"])
|
|
1357
|
-
else:
|
|
1358
|
-
failed_chains.append(chain_id)
|
|
1359
|
-
logger.warning(
|
|
1360
|
-
f"Chain {chain_id} query failed: {result.get('error', 'Unknown error')}"
|
|
1361
|
-
)
|
|
1362
|
-
|
|
1363
|
-
logger.info(f"Multi-chain query: {len(successful_chains)} successful, {len(failed_chains)} failed, {len(all_agents)} total agents")
|
|
1364
|
-
|
|
1365
|
-
# If ALL chains failed, raise error
|
|
1366
|
-
if len(successful_chains) == 0:
|
|
1367
|
-
raise ConnectionError(
|
|
1368
|
-
f"All chains failed: {', '.join(str(c) for c in failed_chains)}"
|
|
1369
|
-
)
|
|
1370
|
-
|
|
1371
|
-
# Step 6: Apply cross-chain filtering (for fields not supported by subgraph WHERE clause)
|
|
1372
|
-
filtered_agents = self._apply_cross_chain_filters(all_agents, params)
|
|
1373
|
-
logger.info(f"After cross-chain filters: {len(filtered_agents)} agents")
|
|
1374
|
-
|
|
1375
|
-
# Step 7: Deduplicate if requested
|
|
1376
|
-
deduplicated_agents = self._deduplicate_agents_cross_chain(filtered_agents, params)
|
|
1377
|
-
logger.info(f"After deduplication: {len(deduplicated_agents)} agents")
|
|
1378
|
-
|
|
1379
|
-
# Step 8: Sort across chains
|
|
1380
|
-
sorted_agents = self._sort_agents_cross_chain(deduplicated_agents, sort)
|
|
1381
|
-
logger.info(f"After sorting: {len(sorted_agents)} agents")
|
|
1072
|
+
# Pagination removed: legacy cursor-based multi-chain agent search deleted.
|
|
1382
1073
|
|
|
1383
|
-
|
|
1384
|
-
start_idx = global_offset
|
|
1385
|
-
paginated_agents = sorted_agents[start_idx:start_idx + page_size]
|
|
1386
|
-
|
|
1387
|
-
# Step 10: Convert to result format (keep as dicts, SDK will convert to AgentSummary)
|
|
1388
|
-
results = []
|
|
1389
|
-
for agent_data in paginated_agents:
|
|
1390
|
-
reg_file = agent_data.get('registrationFile') or {}
|
|
1391
|
-
if not isinstance(reg_file, dict):
|
|
1392
|
-
reg_file = {}
|
|
1393
|
-
|
|
1394
|
-
result_agent = {
|
|
1395
|
-
"agentId": agent_data.get('id'),
|
|
1396
|
-
"chainId": agent_data.get('chainId'),
|
|
1397
|
-
"name": reg_file.get('name', f"Agent {agent_data.get('agentId')}"),
|
|
1398
|
-
"description": reg_file.get('description', ''),
|
|
1399
|
-
"image": reg_file.get('image'),
|
|
1400
|
-
"owner": agent_data.get('owner'),
|
|
1401
|
-
"operators": agent_data.get('operators', []),
|
|
1402
|
-
"mcp": reg_file.get('mcpEndpoint') is not None,
|
|
1403
|
-
"a2a": reg_file.get('a2aEndpoint') is not None,
|
|
1404
|
-
"ens": reg_file.get('ens'),
|
|
1405
|
-
"did": reg_file.get('did'),
|
|
1406
|
-
"walletAddress": reg_file.get('agentWallet'),
|
|
1407
|
-
"supportedTrusts": reg_file.get('supportedTrusts', []),
|
|
1408
|
-
"a2aSkills": reg_file.get('a2aSkills', []),
|
|
1409
|
-
"mcpTools": reg_file.get('mcpTools', []),
|
|
1410
|
-
"mcpPrompts": reg_file.get('mcpPrompts', []),
|
|
1411
|
-
"mcpResources": reg_file.get('mcpResources', []),
|
|
1412
|
-
"active": reg_file.get('active', True),
|
|
1413
|
-
"x402support": reg_file.get('x402Support', reg_file.get('x402support', False)),
|
|
1414
|
-
"totalFeedback": agent_data.get('totalFeedback', 0),
|
|
1415
|
-
"lastActivity": agent_data.get('lastActivity'),
|
|
1416
|
-
"updatedAt": agent_data.get('updatedAt'),
|
|
1417
|
-
"extras": {}
|
|
1418
|
-
}
|
|
1419
|
-
|
|
1420
|
-
# Add deployedOn if deduplication was used
|
|
1421
|
-
if 'deployedOn' in agent_data:
|
|
1422
|
-
result_agent['extras']['deployedOn'] = agent_data['deployedOn']
|
|
1423
|
-
|
|
1424
|
-
results.append(result_agent)
|
|
1425
|
-
|
|
1426
|
-
# Step 11: Calculate next cursor
|
|
1427
|
-
next_cursor = None
|
|
1428
|
-
if len(sorted_agents) > start_idx + page_size:
|
|
1429
|
-
# More results available
|
|
1430
|
-
next_cursor = self._create_multi_chain_cursor(
|
|
1431
|
-
global_offset=start_idx + page_size
|
|
1432
|
-
)
|
|
1433
|
-
|
|
1434
|
-
# Step 12: Build response with metadata
|
|
1435
|
-
query_time = time.time() - start_time
|
|
1436
|
-
|
|
1437
|
-
return {
|
|
1438
|
-
"items": results,
|
|
1439
|
-
"nextCursor": next_cursor,
|
|
1440
|
-
"meta": {
|
|
1441
|
-
"chains": chains_to_query,
|
|
1442
|
-
"successfulChains": successful_chains,
|
|
1443
|
-
"failedChains": failed_chains,
|
|
1444
|
-
"totalResults": len(sorted_agents),
|
|
1445
|
-
"pageResults": len(results),
|
|
1446
|
-
"timing": {
|
|
1447
|
-
"totalMs": int(query_time * 1000),
|
|
1448
|
-
"averagePerChainMs": int(query_time * 1000 / len(chains_to_query)) if chains_to_query else 0,
|
|
1449
|
-
}
|
|
1450
|
-
}
|
|
1451
|
-
}
|
|
1452
|
-
|
|
1453
|
-
def _search_agents_via_subgraph(
|
|
1454
|
-
self,
|
|
1455
|
-
params: SearchFilters,
|
|
1456
|
-
sort: List[str],
|
|
1457
|
-
page_size: int,
|
|
1458
|
-
cursor: Optional[str] = None,
|
|
1459
|
-
) -> Dict[str, Any]:
|
|
1460
|
-
"""Search for agents using the subgraph."""
|
|
1461
|
-
# Build subgraph query filters
|
|
1462
|
-
where_clause = {}
|
|
1463
|
-
reg_file_where = {}
|
|
1464
|
-
|
|
1465
|
-
if params.name is not None:
|
|
1466
|
-
reg_file_where["name_contains"] = params.name
|
|
1467
|
-
if params.active is not None:
|
|
1468
|
-
reg_file_where["active"] = params.active
|
|
1469
|
-
if params.x402support is not None:
|
|
1470
|
-
reg_file_where["x402support"] = params.x402support
|
|
1471
|
-
if params.hasMCP is not None:
|
|
1472
|
-
if params.hasMCP:
|
|
1473
|
-
reg_file_where["mcpEndpoint_not"] = None
|
|
1474
|
-
else:
|
|
1475
|
-
reg_file_where["mcpEndpoint"] = None
|
|
1476
|
-
if params.hasA2A is not None:
|
|
1477
|
-
if params.hasA2A:
|
|
1478
|
-
reg_file_where["a2aEndpoint_not"] = None
|
|
1479
|
-
else:
|
|
1480
|
-
reg_file_where["a2aEndpoint"] = None
|
|
1481
|
-
if params.ensContains is not None:
|
|
1482
|
-
reg_file_where["ens_contains_nocase"] = params.ensContains
|
|
1483
|
-
if params.didContains is not None:
|
|
1484
|
-
reg_file_where["did_contains_nocase"] = params.didContains
|
|
1485
|
-
if params.walletAddress is not None:
|
|
1486
|
-
reg_file_where["agentWallet"] = params.walletAddress
|
|
1487
|
-
|
|
1488
|
-
if reg_file_where:
|
|
1489
|
-
where_clause["registrationFile_"] = reg_file_where
|
|
1490
|
-
|
|
1491
|
-
# Owner filtering
|
|
1492
|
-
if params.owners is not None and len(params.owners) > 0:
|
|
1493
|
-
# Normalize addresses to lowercase for case-insensitive matching
|
|
1494
|
-
normalized_owners = [owner.lower() for owner in params.owners]
|
|
1495
|
-
if len(normalized_owners) == 1:
|
|
1496
|
-
where_clause["owner"] = normalized_owners[0]
|
|
1497
|
-
else:
|
|
1498
|
-
where_clause["owner_in"] = normalized_owners
|
|
1499
|
-
|
|
1500
|
-
# Operator filtering
|
|
1501
|
-
if params.operators is not None and len(params.operators) > 0:
|
|
1502
|
-
# Normalize addresses to lowercase for case-insensitive matching
|
|
1503
|
-
normalized_operators = [op.lower() for op in params.operators]
|
|
1504
|
-
# For operators (array field), use contains to check if any operator matches
|
|
1505
|
-
where_clause["operators_contains"] = normalized_operators
|
|
1506
|
-
|
|
1507
|
-
# Calculate pagination
|
|
1508
|
-
skip = 0
|
|
1509
|
-
if cursor:
|
|
1510
|
-
try:
|
|
1511
|
-
skip = int(cursor)
|
|
1512
|
-
except ValueError:
|
|
1513
|
-
skip = 0
|
|
1514
|
-
|
|
1515
|
-
# Determine sort
|
|
1516
|
-
order_by = "createdAt"
|
|
1517
|
-
order_direction = "desc"
|
|
1518
|
-
if sort and len(sort) > 0:
|
|
1519
|
-
sort_field = sort[0].split(":")
|
|
1520
|
-
if len(sort_field) >= 1:
|
|
1521
|
-
order_by = sort_field[0]
|
|
1522
|
-
if len(sort_field) >= 2:
|
|
1523
|
-
order_direction = sort_field[1]
|
|
1524
|
-
|
|
1525
|
-
try:
|
|
1526
|
-
agents = self.subgraph_client.get_agents(
|
|
1527
|
-
where=where_clause if where_clause else None,
|
|
1528
|
-
first=page_size,
|
|
1529
|
-
skip=skip,
|
|
1530
|
-
order_by=order_by,
|
|
1531
|
-
order_direction=order_direction
|
|
1532
|
-
)
|
|
1533
|
-
|
|
1534
|
-
results = []
|
|
1535
|
-
for agent in agents:
|
|
1536
|
-
reg_file = agent.get('registrationFile') or {}
|
|
1537
|
-
# Ensure reg_file is a dict
|
|
1538
|
-
if not isinstance(reg_file, dict):
|
|
1539
|
-
reg_file = {}
|
|
1540
|
-
|
|
1541
|
-
agent_data = {
|
|
1542
|
-
"agentId": agent.get('id'),
|
|
1543
|
-
"chainId": agent.get('chainId'),
|
|
1544
|
-
"name": reg_file.get('name', f"Agent {agent.get('agentId')}"),
|
|
1545
|
-
"description": reg_file.get('description', ''),
|
|
1546
|
-
"image": reg_file.get('image'),
|
|
1547
|
-
"owner": agent.get('owner'),
|
|
1548
|
-
"operators": agent.get('operators', []),
|
|
1549
|
-
"mcp": reg_file.get('mcpEndpoint') is not None,
|
|
1550
|
-
"a2a": reg_file.get('a2aEndpoint') is not None,
|
|
1551
|
-
"ens": reg_file.get('ens'),
|
|
1552
|
-
"did": reg_file.get('did'),
|
|
1553
|
-
"walletAddress": reg_file.get('agentWallet'),
|
|
1554
|
-
"supportedTrusts": reg_file.get('supportedTrusts', []),
|
|
1555
|
-
"a2aSkills": reg_file.get('a2aSkills', []),
|
|
1556
|
-
"mcpTools": reg_file.get('mcpTools', []),
|
|
1557
|
-
"mcpPrompts": reg_file.get('mcpPrompts', []),
|
|
1558
|
-
"mcpResources": reg_file.get('mcpResources', []),
|
|
1559
|
-
"active": reg_file.get('active', True),
|
|
1560
|
-
"x402support": reg_file.get('x402Support', reg_file.get('x402support', False)),
|
|
1561
|
-
"totalFeedback": agent.get('totalFeedback', 0),
|
|
1562
|
-
"lastActivity": agent.get('lastActivity'),
|
|
1563
|
-
"updatedAt": agent.get('updatedAt'),
|
|
1564
|
-
"extras": {}
|
|
1565
|
-
}
|
|
1566
|
-
|
|
1567
|
-
if params.chains is not None:
|
|
1568
|
-
if agent_data["chainId"] not in params.chains:
|
|
1569
|
-
continue
|
|
1570
|
-
if params.supportedTrust is not None:
|
|
1571
|
-
if not any(trust in agent_data["supportedTrusts"] for trust in params.supportedTrust):
|
|
1572
|
-
continue
|
|
1573
|
-
|
|
1574
|
-
results.append(agent_data)
|
|
1575
|
-
|
|
1576
|
-
next_cursor = str(skip + len(results)) if len(results) == page_size else None
|
|
1577
|
-
return {"items": results, "nextCursor": next_cursor}
|
|
1578
|
-
|
|
1579
|
-
except Exception as e:
|
|
1580
|
-
logger.warning(f"Subgraph search failed: {e}")
|
|
1581
|
-
return {"items": [], "nextCursor": None}
|
|
1582
|
-
|
|
1583
|
-
def _search_agents_via_blockchain(
|
|
1584
|
-
self,
|
|
1585
|
-
params: SearchFilters,
|
|
1586
|
-
sort: List[str],
|
|
1587
|
-
page_size: int,
|
|
1588
|
-
cursor: Optional[str] = None,
|
|
1589
|
-
) -> Dict[str, Any]:
|
|
1590
|
-
"""Search for agents by querying the blockchain (fallback)."""
|
|
1591
|
-
return {"items": [], "nextCursor": None}
|
|
1592
|
-
|
|
1593
|
-
def _apply_filters(self, agents: List[Dict[str, Any]], params: SearchFilters) -> List[Dict[str, Any]]:
|
|
1594
|
-
"""Apply search filters to agents."""
|
|
1595
|
-
filtered = agents
|
|
1596
|
-
|
|
1597
|
-
if params.chains is not None:
|
|
1598
|
-
filtered = [a for a in filtered if a.get("chainId") in params.chains]
|
|
1599
|
-
|
|
1600
|
-
if params.name is not None:
|
|
1601
|
-
filtered = [a for a in filtered if params.name.lower() in a.get("name", "").lower()]
|
|
1602
|
-
|
|
1603
|
-
if params.description is not None:
|
|
1604
|
-
# This would use semantic search with embeddings
|
|
1605
|
-
filtered = [a for a in filtered if params.description.lower() in a.get("description", "").lower()]
|
|
1606
|
-
|
|
1607
|
-
if params.owners is not None:
|
|
1608
|
-
filtered = [a for a in filtered if any(owner in params.owners for owner in a.get("owners", []))]
|
|
1609
|
-
|
|
1610
|
-
if params.operators is not None:
|
|
1611
|
-
filtered = [a for a in filtered if any(op in params.operators for op in a.get("operators", []))]
|
|
1612
|
-
|
|
1613
|
-
if getattr(params, "hasMCP", None) is not None:
|
|
1614
|
-
has = params.hasMCP
|
|
1615
|
-
filtered = [a for a in filtered if bool(a.get("mcp")) == bool(has)]
|
|
1616
|
-
|
|
1617
|
-
if getattr(params, "hasA2A", None) is not None:
|
|
1618
|
-
has = params.hasA2A
|
|
1619
|
-
filtered = [a for a in filtered if bool(a.get("a2a")) == bool(has)]
|
|
1620
|
-
|
|
1621
|
-
if getattr(params, "ensContains", None) is not None:
|
|
1622
|
-
needle = (params.ensContains or "").lower()
|
|
1623
|
-
filtered = [a for a in filtered if needle in (a.get("ens") or "").lower()]
|
|
1624
|
-
|
|
1625
|
-
if getattr(params, "didContains", None) is not None:
|
|
1626
|
-
needle = (params.didContains or "").lower()
|
|
1627
|
-
filtered = [a for a in filtered if needle in (a.get("did") or "").lower()]
|
|
1628
|
-
|
|
1629
|
-
if params.walletAddress is not None:
|
|
1630
|
-
filtered = [a for a in filtered if a.get("walletAddress") == params.walletAddress]
|
|
1631
|
-
|
|
1632
|
-
if params.supportedTrust is not None:
|
|
1633
|
-
filtered = [a for a in filtered if any(trust in params.supportedTrust for trust in a.get("supportedTrusts", []))]
|
|
1634
|
-
|
|
1635
|
-
if params.a2aSkills is not None:
|
|
1636
|
-
filtered = [a for a in filtered if any(skill in params.a2aSkills for skill in a.get("a2aSkills", []))]
|
|
1637
|
-
|
|
1638
|
-
if params.mcpTools is not None:
|
|
1639
|
-
filtered = [a for a in filtered if any(tool in params.mcpTools for tool in a.get("mcpTools", []))]
|
|
1640
|
-
|
|
1641
|
-
if params.mcpPrompts is not None:
|
|
1642
|
-
filtered = [a for a in filtered if any(prompt in params.mcpPrompts for prompt in a.get("mcpPrompts", []))]
|
|
1643
|
-
|
|
1644
|
-
if params.mcpResources is not None:
|
|
1645
|
-
filtered = [a for a in filtered if any(resource in params.mcpResources for resource in a.get("mcpResources", []))]
|
|
1646
|
-
|
|
1647
|
-
if params.active is not None:
|
|
1648
|
-
filtered = [a for a in filtered if a.get("active") == params.active]
|
|
1649
|
-
|
|
1650
|
-
if params.x402support is not None:
|
|
1651
|
-
filtered = [a for a in filtered if a.get("x402support") == params.x402support]
|
|
1652
|
-
|
|
1653
|
-
return filtered
|
|
1654
|
-
|
|
1655
|
-
def _apply_sorting(self, agents: List[AgentSummary], sort: List[str]) -> List[AgentSummary]:
|
|
1656
|
-
"""Apply sorting to agents."""
|
|
1657
|
-
def sort_key(agent):
|
|
1658
|
-
key_values = []
|
|
1659
|
-
for sort_field in sort:
|
|
1660
|
-
field, direction = sort_field.split(":", 1)
|
|
1661
|
-
if hasattr(agent, field):
|
|
1662
|
-
value = getattr(agent, field)
|
|
1663
|
-
if direction == "desc":
|
|
1664
|
-
value = -value if isinstance(value, (int, float)) else value
|
|
1665
|
-
key_values.append(value)
|
|
1666
|
-
return key_values
|
|
1667
|
-
|
|
1668
|
-
return sorted(agents, key=sort_key)
|
|
1074
|
+
# Pagination removed: legacy cursor-based agent search helpers deleted.
|
|
1669
1075
|
|
|
1670
1076
|
def get_feedback(
|
|
1671
1077
|
self,
|
|
@@ -1806,8 +1212,6 @@ class AgentIndexer:
|
|
|
1806
1212
|
minValue: Optional[float] = None,
|
|
1807
1213
|
maxValue: Optional[float] = None,
|
|
1808
1214
|
include_revoked: bool = False,
|
|
1809
|
-
first: int = 100,
|
|
1810
|
-
skip: int = 0,
|
|
1811
1215
|
agents: Optional[List[AgentId]] = None,
|
|
1812
1216
|
) -> List[Feedback]:
|
|
1813
1217
|
"""Search feedback via subgraph.
|
|
@@ -1876,8 +1280,6 @@ class AgentIndexer:
|
|
|
1876
1280
|
minValue=minValue,
|
|
1877
1281
|
maxValue=maxValue,
|
|
1878
1282
|
include_revoked=include_revoked,
|
|
1879
|
-
first=first,
|
|
1880
|
-
skip=skip,
|
|
1881
1283
|
subgraph_client=subgraph_client,
|
|
1882
1284
|
)
|
|
1883
1285
|
|
|
@@ -1898,8 +1300,6 @@ class AgentIndexer:
|
|
|
1898
1300
|
minValue: Optional[float],
|
|
1899
1301
|
maxValue: Optional[float],
|
|
1900
1302
|
include_revoked: bool,
|
|
1901
|
-
first: int,
|
|
1902
|
-
skip: int,
|
|
1903
1303
|
subgraph_client: Optional[Any] = None,
|
|
1904
1304
|
) -> List[Feedback]:
|
|
1905
1305
|
"""Search feedback using subgraph."""
|
|
@@ -1928,34 +1328,39 @@ class AgentIndexer:
|
|
|
1928
1328
|
includeRevoked=include_revoked
|
|
1929
1329
|
)
|
|
1930
1330
|
|
|
1931
|
-
# Query subgraph
|
|
1932
|
-
feedbacks_data = client.search_feedback(
|
|
1933
|
-
params=params,
|
|
1934
|
-
first=first,
|
|
1935
|
-
skip=skip,
|
|
1936
|
-
order_by="createdAt",
|
|
1937
|
-
order_direction="desc"
|
|
1938
|
-
)
|
|
1939
|
-
|
|
1940
|
-
# Map to Feedback objects
|
|
1941
1331
|
feedbacks = []
|
|
1942
|
-
|
|
1943
|
-
|
|
1944
|
-
|
|
1945
|
-
|
|
1946
|
-
|
|
1947
|
-
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
agent_id_str = feedback_id
|
|
1952
|
-
client_addr = ""
|
|
1953
|
-
feedback_idx = 1
|
|
1954
|
-
|
|
1955
|
-
feedback = self._map_subgraph_feedback_to_model(
|
|
1956
|
-
fb_data, agent_id_str, client_addr, feedback_idx
|
|
1332
|
+
batch = 1000
|
|
1333
|
+
skip = 0
|
|
1334
|
+
while True:
|
|
1335
|
+
feedbacks_data = client.search_feedback(
|
|
1336
|
+
params=params,
|
|
1337
|
+
first=batch,
|
|
1338
|
+
skip=skip,
|
|
1339
|
+
order_by="createdAt",
|
|
1340
|
+
order_direction="desc",
|
|
1957
1341
|
)
|
|
1958
|
-
|
|
1342
|
+
|
|
1343
|
+
for fb_data in feedbacks_data:
|
|
1344
|
+
# Parse agentId from feedback ID
|
|
1345
|
+
feedback_id = fb_data['id']
|
|
1346
|
+
parts = feedback_id.split(':')
|
|
1347
|
+
if len(parts) >= 2:
|
|
1348
|
+
agent_id_str = f"{parts[0]}:{parts[1]}"
|
|
1349
|
+
client_addr = parts[2] if len(parts) > 2 else ""
|
|
1350
|
+
feedback_idx = int(parts[3]) if len(parts) > 3 else 1
|
|
1351
|
+
else:
|
|
1352
|
+
agent_id_str = feedback_id
|
|
1353
|
+
client_addr = ""
|
|
1354
|
+
feedback_idx = 1
|
|
1355
|
+
|
|
1356
|
+
feedback = self._map_subgraph_feedback_to_model(
|
|
1357
|
+
fb_data, agent_id_str, client_addr, feedback_idx
|
|
1358
|
+
)
|
|
1359
|
+
feedbacks.append(feedback)
|
|
1360
|
+
|
|
1361
|
+
if len(feedbacks_data) < batch:
|
|
1362
|
+
break
|
|
1363
|
+
skip += batch
|
|
1959
1364
|
|
|
1960
1365
|
return feedbacks
|
|
1961
1366
|
|
|
@@ -2010,15 +1415,12 @@ class AgentIndexer:
|
|
|
2010
1415
|
since: Optional[Timestamp] = None,
|
|
2011
1416
|
until: Optional[Timestamp] = None,
|
|
2012
1417
|
sort: List[str] = None,
|
|
2013
|
-
page_size: int = 100,
|
|
2014
|
-
cursor: Optional[str] = None,
|
|
2015
1418
|
) -> Dict[str, Any]:
|
|
2016
1419
|
"""Get reputation summary for an agent."""
|
|
2017
1420
|
# This would aggregate feedback data
|
|
2018
1421
|
# For now, return empty result
|
|
2019
1422
|
return {
|
|
2020
1423
|
"groups": [],
|
|
2021
|
-
"nextCursor": None
|
|
2022
1424
|
}
|
|
2023
1425
|
|
|
2024
1426
|
def get_reputation_map(
|
|
@@ -2420,55 +1822,7 @@ class AgentIndexer:
|
|
|
2420
1822
|
|
|
2421
1823
|
return sorted(agents, key=get_sort_key, reverse=reverse)
|
|
2422
1824
|
|
|
2423
|
-
|
|
2424
|
-
"""
|
|
2425
|
-
Parse multi-chain cursor into per-chain offsets.
|
|
2426
|
-
|
|
2427
|
-
Cursor format (JSON):
|
|
2428
|
-
{
|
|
2429
|
-
"11155111": 50, # Ethereum Sepolia offset
|
|
2430
|
-
"84532": 30, # Base Sepolia offset
|
|
2431
|
-
"_global_offset": 100 # Total items returned so far
|
|
2432
|
-
}
|
|
2433
|
-
|
|
2434
|
-
Returns:
|
|
2435
|
-
Dict mapping chainId → offset (default 0)
|
|
2436
|
-
"""
|
|
2437
|
-
if not cursor:
|
|
2438
|
-
return {}
|
|
2439
|
-
|
|
2440
|
-
try:
|
|
2441
|
-
cursor_data = json.loads(cursor)
|
|
2442
|
-
|
|
2443
|
-
# Validate format
|
|
2444
|
-
if not isinstance(cursor_data, dict):
|
|
2445
|
-
logger.warning(f"Invalid cursor format: {cursor}, using empty")
|
|
2446
|
-
return {}
|
|
2447
|
-
|
|
2448
|
-
return cursor_data
|
|
2449
|
-
|
|
2450
|
-
except json.JSONDecodeError as e:
|
|
2451
|
-
logger.warning(f"Failed to parse cursor: {e}, using empty")
|
|
2452
|
-
return {}
|
|
2453
|
-
|
|
2454
|
-
def _create_multi_chain_cursor(
|
|
2455
|
-
self,
|
|
2456
|
-
global_offset: int,
|
|
2457
|
-
) -> str:
|
|
2458
|
-
"""
|
|
2459
|
-
Create multi-chain cursor for next page.
|
|
2460
|
-
|
|
2461
|
-
Args:
|
|
2462
|
-
global_offset: Total items returned so far
|
|
2463
|
-
|
|
2464
|
-
Returns:
|
|
2465
|
-
JSON string cursor
|
|
2466
|
-
"""
|
|
2467
|
-
cursor_data = {
|
|
2468
|
-
"_global_offset": global_offset
|
|
2469
|
-
}
|
|
2470
|
-
|
|
2471
|
-
return json.dumps(cursor_data)
|
|
1825
|
+
# Pagination removed: multi-chain cursor helpers deleted.
|
|
2472
1826
|
|
|
2473
1827
|
def _extract_order_by(self, sort: List[str]) -> str:
|
|
2474
1828
|
"""Extract order_by field from sort specification."""
|