agent0-sdk 1.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent0_sdk/__init__.py +57 -0
- agent0_sdk/core/agent.py +1187 -0
- agent0_sdk/core/contracts.py +547 -0
- agent0_sdk/core/endpoint_crawler.py +330 -0
- agent0_sdk/core/feedback_manager.py +1052 -0
- agent0_sdk/core/indexer.py +1837 -0
- agent0_sdk/core/ipfs_client.py +357 -0
- agent0_sdk/core/models.py +303 -0
- agent0_sdk/core/oasf_validator.py +98 -0
- agent0_sdk/core/sdk.py +1005 -0
- agent0_sdk/core/subgraph_client.py +853 -0
- agent0_sdk/core/transaction_handle.py +71 -0
- agent0_sdk/core/value_encoding.py +91 -0
- agent0_sdk/core/web3_client.py +399 -0
- agent0_sdk/taxonomies/all_domains.json +1565 -0
- agent0_sdk/taxonomies/all_skills.json +1030 -0
- agent0_sdk-1.4.0.dist-info/METADATA +403 -0
- agent0_sdk-1.4.0.dist-info/RECORD +21 -0
- agent0_sdk-1.4.0.dist-info/WHEEL +5 -0
- agent0_sdk-1.4.0.dist-info/licenses/LICENSE +22 -0
- agent0_sdk-1.4.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1052 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Feedback management system for Agent0 SDK.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
11
|
+
from datetime import datetime, timezone
|
|
12
|
+
|
|
13
|
+
from .models import (
|
|
14
|
+
AgentId, Address, URI, Timestamp, IdemKey,
|
|
15
|
+
Feedback, TrustModel, SearchFeedbackParams
|
|
16
|
+
)
|
|
17
|
+
from .web3_client import Web3Client
|
|
18
|
+
from .ipfs_client import IPFSClient
|
|
19
|
+
from .value_encoding import encode_feedback_value, decode_feedback_value
|
|
20
|
+
from .transaction_handle import TransactionHandle
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class FeedbackManager:
|
|
26
|
+
"""Manages feedback operations for the Agent0 SDK."""
|
|
27
|
+
|
|
28
|
+
def __init__(
|
|
29
|
+
self,
|
|
30
|
+
web3_client: Web3Client,
|
|
31
|
+
ipfs_client: Optional[IPFSClient] = None,
|
|
32
|
+
reputation_registry: Any = None,
|
|
33
|
+
identity_registry: Any = None,
|
|
34
|
+
subgraph_client: Optional[Any] = None,
|
|
35
|
+
indexer: Optional[Any] = None,
|
|
36
|
+
):
|
|
37
|
+
"""Initialize feedback manager."""
|
|
38
|
+
self.web3_client = web3_client
|
|
39
|
+
self.ipfs_client = ipfs_client
|
|
40
|
+
self.reputation_registry = reputation_registry
|
|
41
|
+
self.identity_registry = identity_registry
|
|
42
|
+
self.subgraph_client = subgraph_client
|
|
43
|
+
self.indexer = indexer
|
|
44
|
+
|
|
45
|
+
def prepareFeedbackFile(self, input: Dict[str, Any]) -> Dict[str, Any]:
|
|
46
|
+
"""Prepare an off-chain feedback file payload (no on-chain fields).
|
|
47
|
+
|
|
48
|
+
This intentionally does NOT attempt to represent on-chain fields like:
|
|
49
|
+
value/tag1/tag2/endpoint (on-chain value), or registry-derived fields.
|
|
50
|
+
|
|
51
|
+
It may validate/normalize and remove None values.
|
|
52
|
+
"""
|
|
53
|
+
if input is None:
|
|
54
|
+
raise ValueError("prepareFeedbackFile input cannot be None")
|
|
55
|
+
if not isinstance(input, dict):
|
|
56
|
+
raise TypeError(f"prepareFeedbackFile input must be a dict, got {type(input)}")
|
|
57
|
+
|
|
58
|
+
# Shallow copy and strip None values
|
|
59
|
+
out: Dict[str, Any] = {k: v for k, v in dict(input).items() if v is not None}
|
|
60
|
+
|
|
61
|
+
# Minimal normalization for known optional fields
|
|
62
|
+
if "endpoint" in out and out["endpoint"] is not None and not isinstance(out["endpoint"], str):
|
|
63
|
+
out["endpoint"] = str(out["endpoint"])
|
|
64
|
+
if "domain" in out and out["domain"] is not None and not isinstance(out["domain"], str):
|
|
65
|
+
out["domain"] = str(out["domain"])
|
|
66
|
+
|
|
67
|
+
return out
|
|
68
|
+
|
|
69
|
+
def giveFeedback(
|
|
70
|
+
self,
|
|
71
|
+
agentId: AgentId,
|
|
72
|
+
value: Union[int, float, str],
|
|
73
|
+
tag1: Optional[str] = None,
|
|
74
|
+
tag2: Optional[str] = None,
|
|
75
|
+
endpoint: Optional[str] = None,
|
|
76
|
+
feedbackFile: Optional[Dict[str, Any]] = None,
|
|
77
|
+
) -> TransactionHandle[Feedback]:
|
|
78
|
+
"""Give feedback (maps 8004 endpoint)."""
|
|
79
|
+
# Parse agentId into (chainId, tokenId)
|
|
80
|
+
agent_chain_id: Optional[int] = None
|
|
81
|
+
tokenId: int
|
|
82
|
+
if isinstance(agentId, str) and agentId.startswith("eip155:"):
|
|
83
|
+
parts = agentId.split(":")
|
|
84
|
+
if len(parts) != 3:
|
|
85
|
+
raise ValueError(f"Invalid AgentId (expected eip155:chainId:tokenId): {agentId}")
|
|
86
|
+
agent_chain_id = int(parts[1])
|
|
87
|
+
tokenId = int(parts[2])
|
|
88
|
+
elif isinstance(agentId, str) and ":" in agentId:
|
|
89
|
+
parts = agentId.split(":")
|
|
90
|
+
if len(parts) != 2:
|
|
91
|
+
raise ValueError(f"Invalid AgentId (expected chainId:tokenId): {agentId}")
|
|
92
|
+
agent_chain_id = int(parts[0])
|
|
93
|
+
tokenId = int(parts[1])
|
|
94
|
+
else:
|
|
95
|
+
tokenId = int(agentId)
|
|
96
|
+
agent_chain_id = int(self.web3_client.chain_id)
|
|
97
|
+
|
|
98
|
+
# Ensure we are submitting the tx on the agent's chain
|
|
99
|
+
if int(self.web3_client.chain_id) != int(agent_chain_id):
|
|
100
|
+
raise ValueError(
|
|
101
|
+
f"Chain mismatch for giveFeedback: agentId={agentId} targets chainId={agent_chain_id}, "
|
|
102
|
+
f"but web3 client is connected to chainId={self.web3_client.chain_id}. "
|
|
103
|
+
f"Initialize the SDK/Web3Client for chainId={agent_chain_id}."
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
# Get client address (the one giving feedback)
|
|
107
|
+
# Keep in checksum format for blockchain calls (web3.py requirement)
|
|
108
|
+
clientAddress = self.web3_client.account.address
|
|
109
|
+
|
|
110
|
+
# Get current feedback index for this client-agent pair
|
|
111
|
+
try:
|
|
112
|
+
lastIndex = self.web3_client.call_contract(
|
|
113
|
+
self.reputation_registry,
|
|
114
|
+
"getLastIndex",
|
|
115
|
+
tokenId,
|
|
116
|
+
clientAddress
|
|
117
|
+
)
|
|
118
|
+
feedbackIndex = lastIndex + 1
|
|
119
|
+
except Exception as e:
|
|
120
|
+
raise ValueError(f"Failed to get feedback index: {e}")
|
|
121
|
+
|
|
122
|
+
value_raw, value_decimals, _normalized = encode_feedback_value(value)
|
|
123
|
+
|
|
124
|
+
tag1 = tag1 or ""
|
|
125
|
+
tag2 = tag2 or ""
|
|
126
|
+
|
|
127
|
+
feedback_file: Optional[Dict[str, Any]] = feedbackFile
|
|
128
|
+
if feedback_file is not None and not isinstance(feedback_file, dict):
|
|
129
|
+
raise TypeError(f"feedbackFile must be a dict when provided, got {type(feedback_file)}")
|
|
130
|
+
|
|
131
|
+
# Endpoint precedence: explicit arg > file endpoint > empty string
|
|
132
|
+
if endpoint:
|
|
133
|
+
endpoint_onchain = endpoint
|
|
134
|
+
elif feedback_file and isinstance(feedback_file.get("endpoint"), str) and feedback_file.get("endpoint"):
|
|
135
|
+
endpoint_onchain = feedback_file.get("endpoint")
|
|
136
|
+
else:
|
|
137
|
+
endpoint_onchain = ""
|
|
138
|
+
|
|
139
|
+
# If uploading a file and we have an explicit endpoint, inject it for consistency
|
|
140
|
+
if feedback_file is not None and endpoint and isinstance(endpoint, str):
|
|
141
|
+
feedback_file = dict(feedback_file)
|
|
142
|
+
feedback_file["endpoint"] = endpoint
|
|
143
|
+
|
|
144
|
+
# Handle off-chain file storage
|
|
145
|
+
feedbackUri = ""
|
|
146
|
+
feedbackHash = b"\x00" * 32 # Default empty hash
|
|
147
|
+
|
|
148
|
+
if feedback_file is not None:
|
|
149
|
+
if not self.ipfs_client:
|
|
150
|
+
raise ValueError("feedbackFile was provided, but no IPFS client is configured")
|
|
151
|
+
|
|
152
|
+
# Store an ERC-8004 compliant feedback file on IPFS (explicit opt-in)
|
|
153
|
+
try:
|
|
154
|
+
logger.debug("Storing feedback file on IPFS")
|
|
155
|
+
# createdAt MUST be present in the off-chain file; use provided value if valid, else now (UTC).
|
|
156
|
+
created_at = feedback_file.get("createdAt")
|
|
157
|
+
if not isinstance(created_at, str) or not created_at:
|
|
158
|
+
created_at = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
159
|
+
|
|
160
|
+
identity_registry_address = "0x0"
|
|
161
|
+
try:
|
|
162
|
+
if self.identity_registry is not None:
|
|
163
|
+
identity_registry_address = str(getattr(self.identity_registry, "address", "0x0"))
|
|
164
|
+
except Exception:
|
|
165
|
+
identity_registry_address = "0x0"
|
|
166
|
+
|
|
167
|
+
# Remove any user-provided copies of the envelope keys; SDK-owned values must win
|
|
168
|
+
rich = dict(feedback_file)
|
|
169
|
+
for k in [
|
|
170
|
+
"agentRegistry",
|
|
171
|
+
"agentId",
|
|
172
|
+
"clientAddress",
|
|
173
|
+
"createdAt",
|
|
174
|
+
"value",
|
|
175
|
+
"valueDecimals",
|
|
176
|
+
"tag1",
|
|
177
|
+
"tag2",
|
|
178
|
+
"endpoint",
|
|
179
|
+
]:
|
|
180
|
+
rich.pop(k, None)
|
|
181
|
+
|
|
182
|
+
file_for_storage: Dict[str, Any] = {
|
|
183
|
+
# MUST fields (spec)
|
|
184
|
+
"agentRegistry": f"eip155:{agent_chain_id}:{identity_registry_address}",
|
|
185
|
+
"agentId": tokenId,
|
|
186
|
+
"clientAddress": f"eip155:{agent_chain_id}:{clientAddress}",
|
|
187
|
+
"createdAt": created_at,
|
|
188
|
+
# On-chain fields (store raw+decimals for precision)
|
|
189
|
+
"value": int(value_raw),
|
|
190
|
+
"valueDecimals": int(value_decimals),
|
|
191
|
+
|
|
192
|
+
# OPTIONAL fields that mirror on-chain
|
|
193
|
+
**({"tag1": tag1} if tag1 else {}),
|
|
194
|
+
**({"tag2": tag2} if tag2 else {}),
|
|
195
|
+
**({"endpoint": endpoint_onchain} if endpoint_onchain else {}),
|
|
196
|
+
|
|
197
|
+
# Rich/off-chain fields
|
|
198
|
+
**rich,
|
|
199
|
+
}
|
|
200
|
+
|
|
201
|
+
cid = self.ipfs_client.addFeedbackFile(file_for_storage)
|
|
202
|
+
feedbackUri = f"ipfs://{cid}"
|
|
203
|
+
feedbackHash = self.web3_client.keccak256(
|
|
204
|
+
json.dumps(file_for_storage, sort_keys=True).encode()
|
|
205
|
+
)
|
|
206
|
+
logger.debug(f"Feedback file stored on IPFS: {cid}")
|
|
207
|
+
except Exception as e:
|
|
208
|
+
raise ValueError(f"Failed to store feedback on IPFS: {e}")
|
|
209
|
+
|
|
210
|
+
# Submit to blockchain with new signature: giveFeedback(agentId, value, valueDecimals, tag1, tag2, endpoint, feedbackURI, feedbackHash)
|
|
211
|
+
try:
|
|
212
|
+
txHash = self.web3_client.transact_contract(
|
|
213
|
+
self.reputation_registry,
|
|
214
|
+
"giveFeedback",
|
|
215
|
+
tokenId,
|
|
216
|
+
value_raw,
|
|
217
|
+
value_decimals,
|
|
218
|
+
tag1,
|
|
219
|
+
tag2,
|
|
220
|
+
endpoint_onchain,
|
|
221
|
+
feedbackUri,
|
|
222
|
+
feedbackHash
|
|
223
|
+
)
|
|
224
|
+
except Exception as e:
|
|
225
|
+
raise ValueError(f"Failed to submit feedback to blockchain: {e}")
|
|
226
|
+
|
|
227
|
+
# Create a tx handle; build the Feedback object on confirmation.
|
|
228
|
+
feedbackId = Feedback.create_id(agentId, clientAddress, feedbackIndex)
|
|
229
|
+
ff: Dict[str, Any] = feedback_file or {}
|
|
230
|
+
|
|
231
|
+
return TransactionHandle(
|
|
232
|
+
web3_client=self.web3_client,
|
|
233
|
+
tx_hash=txHash,
|
|
234
|
+
compute_result=lambda _receipt: Feedback(
|
|
235
|
+
id=feedbackId,
|
|
236
|
+
agentId=agentId,
|
|
237
|
+
reviewer=clientAddress,
|
|
238
|
+
value=decode_feedback_value(value_raw, value_decimals),
|
|
239
|
+
tags=[tag1, tag2] if tag1 or tag2 else [],
|
|
240
|
+
text=ff.get("text"),
|
|
241
|
+
context=ff.get("context"),
|
|
242
|
+
proofOfPayment=ff.get("proofOfPayment"),
|
|
243
|
+
fileURI=feedbackUri if feedbackUri else None,
|
|
244
|
+
endpoint=endpoint_onchain if endpoint_onchain else None,
|
|
245
|
+
createdAt=int(time.time()),
|
|
246
|
+
isRevoked=False,
|
|
247
|
+
capability=ff.get("capability"),
|
|
248
|
+
name=ff.get("name"),
|
|
249
|
+
skill=ff.get("skill"),
|
|
250
|
+
task=ff.get("task"),
|
|
251
|
+
),
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
def getFeedback(
|
|
255
|
+
self,
|
|
256
|
+
agentId: AgentId,
|
|
257
|
+
clientAddress: Address,
|
|
258
|
+
feedbackIndex: int,
|
|
259
|
+
) -> Feedback:
|
|
260
|
+
"""Get single feedback with responses from subgraph or blockchain."""
|
|
261
|
+
# Prefer subgraph/indexer for richer data, but fall back to chain when subgraph is behind
|
|
262
|
+
if self.indexer and self.subgraph_client:
|
|
263
|
+
try:
|
|
264
|
+
return self.indexer.get_feedback(agentId, clientAddress, feedbackIndex)
|
|
265
|
+
except Exception as e:
|
|
266
|
+
logger.debug(f"Indexer/subgraph get_feedback failed, falling back to blockchain: {e}")
|
|
267
|
+
return self._get_feedback_from_blockchain(agentId, clientAddress, feedbackIndex)
|
|
268
|
+
|
|
269
|
+
if self.subgraph_client:
|
|
270
|
+
try:
|
|
271
|
+
return self._get_feedback_from_subgraph(agentId, clientAddress, feedbackIndex)
|
|
272
|
+
except Exception as e:
|
|
273
|
+
logger.debug(f"Subgraph get feedback failed, falling back to blockchain: {e}")
|
|
274
|
+
return self._get_feedback_from_blockchain(agentId, clientAddress, feedbackIndex)
|
|
275
|
+
|
|
276
|
+
return self._get_feedback_from_blockchain(agentId, clientAddress, feedbackIndex)
|
|
277
|
+
|
|
278
|
+
def _get_feedback_from_subgraph(
|
|
279
|
+
self,
|
|
280
|
+
agentId: AgentId,
|
|
281
|
+
clientAddress: Address,
|
|
282
|
+
feedbackIndex: int,
|
|
283
|
+
) -> Feedback:
|
|
284
|
+
"""Get feedback from subgraph."""
|
|
285
|
+
# Normalize addresses to lowercase for consistent storage
|
|
286
|
+
normalized_client_address = self.web3_client.normalize_address(clientAddress)
|
|
287
|
+
|
|
288
|
+
# Build feedback ID in format: chainId:agentId:clientAddress:feedbackIndex
|
|
289
|
+
# If agentId already contains chainId (format: chainId:tokenId), use it as is
|
|
290
|
+
# Otherwise, prepend chainId from web3_client
|
|
291
|
+
if ":" in agentId:
|
|
292
|
+
# agentId already has chainId, so use it directly
|
|
293
|
+
feedback_id = f"{agentId}:{normalized_client_address}:{feedbackIndex}"
|
|
294
|
+
else:
|
|
295
|
+
# No chainId in agentId, prepend it
|
|
296
|
+
chain_id = str(self.web3_client.chain_id)
|
|
297
|
+
feedback_id = f"{chain_id}:{agentId}:{normalized_client_address}:{feedbackIndex}"
|
|
298
|
+
|
|
299
|
+
try:
|
|
300
|
+
feedback_data = self.subgraph_client.get_feedback_by_id(feedback_id)
|
|
301
|
+
|
|
302
|
+
if feedback_data is None:
|
|
303
|
+
raise ValueError(f"Feedback {feedback_id} not found in subgraph")
|
|
304
|
+
|
|
305
|
+
feedback_file = feedback_data.get('feedbackFile') or {}
|
|
306
|
+
if not isinstance(feedback_file, dict):
|
|
307
|
+
feedback_file = {}
|
|
308
|
+
|
|
309
|
+
# Map responses
|
|
310
|
+
responses_data = feedback_data.get('responses', [])
|
|
311
|
+
answers = []
|
|
312
|
+
for resp in responses_data:
|
|
313
|
+
answers.append({
|
|
314
|
+
'responder': resp.get('responder'),
|
|
315
|
+
'responseUri': resp.get('responseUri'),
|
|
316
|
+
'responseHash': resp.get('responseHash'),
|
|
317
|
+
'createdAt': resp.get('createdAt')
|
|
318
|
+
})
|
|
319
|
+
|
|
320
|
+
# Map tags: rely on whatever the subgraph returns (may be legacy bytes/hash-like values)
|
|
321
|
+
tags: List[str] = []
|
|
322
|
+
tag1 = feedback_data.get('tag1') or feedback_file.get('tag1')
|
|
323
|
+
tag2 = feedback_data.get('tag2') or feedback_file.get('tag2')
|
|
324
|
+
if isinstance(tag1, str) and tag1:
|
|
325
|
+
tags.append(tag1)
|
|
326
|
+
if isinstance(tag2, str) and tag2:
|
|
327
|
+
tags.append(tag2)
|
|
328
|
+
|
|
329
|
+
return Feedback(
|
|
330
|
+
id=Feedback.create_id(agentId, clientAddress, feedbackIndex), # create_id now normalizes
|
|
331
|
+
agentId=agentId,
|
|
332
|
+
reviewer=self.web3_client.normalize_address(clientAddress), # Also normalize reviewer field
|
|
333
|
+
value=float(feedback_data.get("value")) if feedback_data.get("value") is not None else None,
|
|
334
|
+
tags=tags,
|
|
335
|
+
text=feedback_file.get('text'),
|
|
336
|
+
capability=feedback_file.get('capability'),
|
|
337
|
+
context=feedback_file.get('context'),
|
|
338
|
+
proofOfPayment={
|
|
339
|
+
'fromAddress': feedback_file.get('proofOfPaymentFromAddress'),
|
|
340
|
+
'toAddress': feedback_file.get('proofOfPaymentToAddress'),
|
|
341
|
+
'chainId': feedback_file.get('proofOfPaymentChainId'),
|
|
342
|
+
'txHash': feedback_file.get('proofOfPaymentTxHash'),
|
|
343
|
+
} if feedback_file.get('proofOfPaymentFromAddress') else None,
|
|
344
|
+
fileURI=feedback_data.get('feedbackURI') or feedback_data.get('feedbackUri'), # Handle both old and new field names
|
|
345
|
+
# Prefer on-chain endpoint; fall back to off-chain file endpoint if missing
|
|
346
|
+
endpoint=feedback_data.get('endpoint') or feedback_file.get('endpoint'),
|
|
347
|
+
createdAt=feedback_data.get('createdAt', int(time.time())),
|
|
348
|
+
answers=answers,
|
|
349
|
+
isRevoked=feedback_data.get('isRevoked', False),
|
|
350
|
+
name=feedback_file.get('name'),
|
|
351
|
+
skill=feedback_file.get('skill'),
|
|
352
|
+
task=feedback_file.get('task'),
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
except Exception as e:
|
|
356
|
+
raise ValueError(f"Failed to get feedback from subgraph: {e}")
|
|
357
|
+
|
|
358
|
+
def _get_feedback_from_blockchain(
|
|
359
|
+
self,
|
|
360
|
+
agentId: AgentId,
|
|
361
|
+
clientAddress: Address,
|
|
362
|
+
feedbackIndex: int,
|
|
363
|
+
) -> Feedback:
|
|
364
|
+
"""Get feedback from blockchain (fallback)."""
|
|
365
|
+
# Parse agent ID
|
|
366
|
+
if ":" in agentId:
|
|
367
|
+
tokenId = int(agentId.split(":")[-1])
|
|
368
|
+
else:
|
|
369
|
+
tokenId = int(agentId)
|
|
370
|
+
|
|
371
|
+
try:
|
|
372
|
+
# Read from blockchain - new signature: readFeedback(agentId, clientAddress, feedbackIndex)
|
|
373
|
+
result = self.web3_client.call_contract(
|
|
374
|
+
self.reputation_registry,
|
|
375
|
+
"readFeedback",
|
|
376
|
+
tokenId,
|
|
377
|
+
clientAddress,
|
|
378
|
+
feedbackIndex
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
value_raw, value_decimals, tag1, tag2, is_revoked = result
|
|
382
|
+
|
|
383
|
+
# Create feedback object (normalize address for consistency)
|
|
384
|
+
normalized_address = self.web3_client.normalize_address(clientAddress)
|
|
385
|
+
feedbackId = Feedback.create_id(agentId, normalized_address, feedbackIndex)
|
|
386
|
+
|
|
387
|
+
# Tags are now strings, not bytes32
|
|
388
|
+
tags = []
|
|
389
|
+
if tag1:
|
|
390
|
+
tags.append(tag1)
|
|
391
|
+
if tag2:
|
|
392
|
+
tags.append(tag2)
|
|
393
|
+
|
|
394
|
+
return Feedback(
|
|
395
|
+
id=feedbackId,
|
|
396
|
+
agentId=agentId,
|
|
397
|
+
reviewer=normalized_address,
|
|
398
|
+
value=decode_feedback_value(int(value_raw), int(value_decimals)),
|
|
399
|
+
tags=tags,
|
|
400
|
+
text=None, # Not stored on-chain
|
|
401
|
+
capability=None, # Not stored on-chain
|
|
402
|
+
context=None, # Not stored on-chain
|
|
403
|
+
proofOfPayment=None, # Not stored on-chain
|
|
404
|
+
fileURI=None, # Would need to be retrieved separately
|
|
405
|
+
endpoint=None, # Not stored on-chain in readFeedback
|
|
406
|
+
createdAt=int(time.time()), # Not stored on-chain
|
|
407
|
+
isRevoked=is_revoked
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
except Exception as e:
|
|
411
|
+
raise ValueError(f"Failed to get feedback: {e}")
|
|
412
|
+
|
|
413
|
+
def searchFeedback(
|
|
414
|
+
self,
|
|
415
|
+
agentId: Optional[AgentId] = None,
|
|
416
|
+
clientAddresses: Optional[List[Address]] = None,
|
|
417
|
+
tags: Optional[List[str]] = None,
|
|
418
|
+
capabilities: Optional[List[str]] = None,
|
|
419
|
+
skills: Optional[List[str]] = None,
|
|
420
|
+
tasks: Optional[List[str]] = None,
|
|
421
|
+
names: Optional[List[str]] = None,
|
|
422
|
+
minValue: Optional[float] = None,
|
|
423
|
+
maxValue: Optional[float] = None,
|
|
424
|
+
include_revoked: bool = False,
|
|
425
|
+
first: int = 100,
|
|
426
|
+
skip: int = 0,
|
|
427
|
+
agents: Optional[List[AgentId]] = None,
|
|
428
|
+
) -> List[Feedback]:
|
|
429
|
+
"""Search feedback.
|
|
430
|
+
|
|
431
|
+
Backwards compatible:
|
|
432
|
+
- `agentId` was previously required; it is now optional.
|
|
433
|
+
|
|
434
|
+
New:
|
|
435
|
+
- `agents` supports searching across multiple agents.
|
|
436
|
+
- If neither `agentId` nor `agents` are provided, the query can still run via subgraph
|
|
437
|
+
using other filters like `clientAddresses` (reviewers), tags, etc.
|
|
438
|
+
"""
|
|
439
|
+
# Use indexer for subgraph queries (unified search interface)
|
|
440
|
+
if self.indexer and self.subgraph_client:
|
|
441
|
+
# Indexer handles subgraph queries for unified search architecture
|
|
442
|
+
# This enables future semantic search capabilities
|
|
443
|
+
return self.indexer.search_feedback(
|
|
444
|
+
agentId,
|
|
445
|
+
clientAddresses,
|
|
446
|
+
tags,
|
|
447
|
+
capabilities,
|
|
448
|
+
skills,
|
|
449
|
+
tasks,
|
|
450
|
+
names,
|
|
451
|
+
minValue,
|
|
452
|
+
maxValue,
|
|
453
|
+
include_revoked,
|
|
454
|
+
first,
|
|
455
|
+
skip,
|
|
456
|
+
agents=agents,
|
|
457
|
+
)
|
|
458
|
+
|
|
459
|
+
# Fallback: direct subgraph access (if indexer not available)
|
|
460
|
+
if self.subgraph_client:
|
|
461
|
+
return self._search_feedback_subgraph(
|
|
462
|
+
agentId,
|
|
463
|
+
clientAddresses,
|
|
464
|
+
tags,
|
|
465
|
+
capabilities,
|
|
466
|
+
skills,
|
|
467
|
+
tasks,
|
|
468
|
+
names,
|
|
469
|
+
minValue,
|
|
470
|
+
maxValue,
|
|
471
|
+
include_revoked,
|
|
472
|
+
first,
|
|
473
|
+
skip,
|
|
474
|
+
agents=agents,
|
|
475
|
+
)
|
|
476
|
+
|
|
477
|
+
# Fallback to blockchain (requires a specific agent)
|
|
478
|
+
if not agentId and not agents:
|
|
479
|
+
raise ValueError(
|
|
480
|
+
"searchFeedback requires a subgraph when searching without agentId/agents."
|
|
481
|
+
)
|
|
482
|
+
if not agentId and agents and len(agents) == 1:
|
|
483
|
+
agentId = agents[0]
|
|
484
|
+
if not agentId:
|
|
485
|
+
raise ValueError(
|
|
486
|
+
"Blockchain fallback only supports searching a single agent; provide agentId or a single-item agents=[...]."
|
|
487
|
+
)
|
|
488
|
+
|
|
489
|
+
# Parse agent ID
|
|
490
|
+
if ":" in agentId:
|
|
491
|
+
tokenId = int(agentId.split(":")[-1])
|
|
492
|
+
else:
|
|
493
|
+
tokenId = int(agentId)
|
|
494
|
+
|
|
495
|
+
try:
|
|
496
|
+
# Prepare filter parameters - tags are now strings
|
|
497
|
+
client_list = clientAddresses if clientAddresses else []
|
|
498
|
+
tag1_filter = tags[0] if tags else ""
|
|
499
|
+
tag2_filter = tags[1] if tags and len(tags) > 1 else ""
|
|
500
|
+
|
|
501
|
+
# Read from blockchain - signature returns: (clients, feedbackIndexes, values, valueDecimals, tag1s, tag2s, revokedStatuses)
|
|
502
|
+
result = self.web3_client.call_contract(
|
|
503
|
+
self.reputation_registry,
|
|
504
|
+
"readAllFeedback",
|
|
505
|
+
tokenId,
|
|
506
|
+
client_list,
|
|
507
|
+
tag1_filter,
|
|
508
|
+
tag2_filter,
|
|
509
|
+
include_revoked
|
|
510
|
+
)
|
|
511
|
+
|
|
512
|
+
clients, feedback_indexes, values, value_decimals, tag1s, tag2s, revoked_statuses = result
|
|
513
|
+
|
|
514
|
+
# Convert to Feedback objects
|
|
515
|
+
feedbacks = []
|
|
516
|
+
for i in range(len(clients)):
|
|
517
|
+
feedback_index = int(feedback_indexes[i]) if i < len(feedback_indexes) else (i + 1)
|
|
518
|
+
feedbackId = Feedback.create_id(agentId, clients[i], feedback_index)
|
|
519
|
+
|
|
520
|
+
# Tags are now strings
|
|
521
|
+
tags_list = []
|
|
522
|
+
if i < len(tag1s) and tag1s[i]:
|
|
523
|
+
tags_list.append(tag1s[i])
|
|
524
|
+
if i < len(tag2s) and tag2s[i]:
|
|
525
|
+
tags_list.append(tag2s[i])
|
|
526
|
+
|
|
527
|
+
feedback = Feedback(
|
|
528
|
+
id=feedbackId,
|
|
529
|
+
agentId=agentId,
|
|
530
|
+
reviewer=clients[i],
|
|
531
|
+
value=decode_feedback_value(int(values[i]), int(value_decimals[i])),
|
|
532
|
+
tags=tags_list,
|
|
533
|
+
text=None,
|
|
534
|
+
capability=None,
|
|
535
|
+
endpoint=None,
|
|
536
|
+
context=None,
|
|
537
|
+
proofOfPayment=None,
|
|
538
|
+
fileURI=None,
|
|
539
|
+
createdAt=int(time.time()),
|
|
540
|
+
isRevoked=revoked_statuses[i] if i < len(revoked_statuses) else False
|
|
541
|
+
)
|
|
542
|
+
feedbacks.append(feedback)
|
|
543
|
+
|
|
544
|
+
return feedbacks
|
|
545
|
+
|
|
546
|
+
except Exception as e:
|
|
547
|
+
raise ValueError(f"Failed to search feedback: {e}")
|
|
548
|
+
|
|
549
|
+
def _search_feedback_subgraph(
|
|
550
|
+
self,
|
|
551
|
+
agentId: Optional[AgentId],
|
|
552
|
+
clientAddresses: Optional[List[Address]],
|
|
553
|
+
tags: Optional[List[str]],
|
|
554
|
+
capabilities: Optional[List[str]],
|
|
555
|
+
skills: Optional[List[str]],
|
|
556
|
+
tasks: Optional[List[str]],
|
|
557
|
+
names: Optional[List[str]],
|
|
558
|
+
minValue: Optional[float],
|
|
559
|
+
maxValue: Optional[float],
|
|
560
|
+
include_revoked: bool,
|
|
561
|
+
first: int,
|
|
562
|
+
skip: int,
|
|
563
|
+
agents: Optional[List[AgentId]] = None,
|
|
564
|
+
) -> List[Feedback]:
|
|
565
|
+
"""Search feedback using subgraph."""
|
|
566
|
+
merged_agents: Optional[List[AgentId]] = None
|
|
567
|
+
if agents:
|
|
568
|
+
merged_agents = list(agents)
|
|
569
|
+
if agentId:
|
|
570
|
+
merged_agents = (merged_agents or []) + [agentId]
|
|
571
|
+
|
|
572
|
+
# Create SearchFeedbackParams
|
|
573
|
+
params = SearchFeedbackParams(
|
|
574
|
+
agents=merged_agents,
|
|
575
|
+
reviewers=clientAddresses,
|
|
576
|
+
tags=tags,
|
|
577
|
+
capabilities=capabilities,
|
|
578
|
+
skills=skills,
|
|
579
|
+
tasks=tasks,
|
|
580
|
+
names=names,
|
|
581
|
+
minValue=minValue,
|
|
582
|
+
maxValue=maxValue,
|
|
583
|
+
includeRevoked=include_revoked
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
# Query subgraph
|
|
587
|
+
feedbacks_data = self.subgraph_client.search_feedback(
|
|
588
|
+
params=params,
|
|
589
|
+
first=first,
|
|
590
|
+
skip=skip,
|
|
591
|
+
order_by="createdAt",
|
|
592
|
+
order_direction="desc"
|
|
593
|
+
)
|
|
594
|
+
|
|
595
|
+
# Map to Feedback objects
|
|
596
|
+
feedbacks = []
|
|
597
|
+
for fb_data in feedbacks_data:
|
|
598
|
+
feedback_file = fb_data.get('feedbackFile') or {}
|
|
599
|
+
if not isinstance(feedback_file, dict):
|
|
600
|
+
feedback_file = {}
|
|
601
|
+
|
|
602
|
+
# Map responses
|
|
603
|
+
responses_data = fb_data.get('responses', [])
|
|
604
|
+
answers = []
|
|
605
|
+
for resp in responses_data:
|
|
606
|
+
answers.append({
|
|
607
|
+
'responder': resp.get('responder'),
|
|
608
|
+
'responseUri': resp.get('responseUri'),
|
|
609
|
+
'responseHash': resp.get('responseHash'),
|
|
610
|
+
'createdAt': resp.get('createdAt')
|
|
611
|
+
})
|
|
612
|
+
|
|
613
|
+
# Map tags: rely on whatever the subgraph returns (may be legacy bytes/hash-like values)
|
|
614
|
+
tags_list: List[str] = []
|
|
615
|
+
tag1 = fb_data.get('tag1') or feedback_file.get('tag1')
|
|
616
|
+
tag2 = fb_data.get('tag2') or feedback_file.get('tag2')
|
|
617
|
+
if isinstance(tag1, str) and tag1:
|
|
618
|
+
tags_list.append(tag1)
|
|
619
|
+
if isinstance(tag2, str) and tag2:
|
|
620
|
+
tags_list.append(tag2)
|
|
621
|
+
|
|
622
|
+
# Parse agentId from feedback ID
|
|
623
|
+
feedback_id = fb_data['id']
|
|
624
|
+
parts = feedback_id.split(':')
|
|
625
|
+
if len(parts) >= 2:
|
|
626
|
+
agent_id_str = f"{parts[0]}:{parts[1]}"
|
|
627
|
+
client_addr = parts[2] if len(parts) > 2 else ""
|
|
628
|
+
feedback_idx = int(parts[3]) if len(parts) > 3 else 1
|
|
629
|
+
else:
|
|
630
|
+
agent_id_str = feedback_id
|
|
631
|
+
client_addr = ""
|
|
632
|
+
feedback_idx = 1
|
|
633
|
+
|
|
634
|
+
feedback = Feedback(
|
|
635
|
+
id=Feedback.create_id(agent_id_str, client_addr, feedback_idx),
|
|
636
|
+
agentId=agent_id_str,
|
|
637
|
+
reviewer=client_addr,
|
|
638
|
+
value=float(fb_data.get("value")) if fb_data.get("value") is not None else None,
|
|
639
|
+
tags=tags_list,
|
|
640
|
+
text=feedback_file.get('text'),
|
|
641
|
+
capability=feedback_file.get('capability'),
|
|
642
|
+
context=feedback_file.get('context'),
|
|
643
|
+
proofOfPayment={
|
|
644
|
+
'fromAddress': feedback_file.get('proofOfPaymentFromAddress'),
|
|
645
|
+
'toAddress': feedback_file.get('proofOfPaymentToAddress'),
|
|
646
|
+
'chainId': feedback_file.get('proofOfPaymentChainId'),
|
|
647
|
+
'txHash': feedback_file.get('proofOfPaymentTxHash'),
|
|
648
|
+
} if feedback_file.get('proofOfPaymentFromAddress') else None,
|
|
649
|
+
fileURI=fb_data.get('feedbackURI') or fb_data.get('feedbackUri'), # Handle both old and new field names
|
|
650
|
+
endpoint=fb_data.get('endpoint'),
|
|
651
|
+
createdAt=fb_data.get('createdAt', int(time.time())),
|
|
652
|
+
answers=answers,
|
|
653
|
+
isRevoked=fb_data.get('isRevoked', False),
|
|
654
|
+
name=feedback_file.get('name'),
|
|
655
|
+
skill=feedback_file.get('skill'),
|
|
656
|
+
task=feedback_file.get('task'),
|
|
657
|
+
)
|
|
658
|
+
feedbacks.append(feedback)
|
|
659
|
+
|
|
660
|
+
return feedbacks
|
|
661
|
+
|
|
662
|
+
def revokeFeedback(
|
|
663
|
+
self,
|
|
664
|
+
agentId: AgentId,
|
|
665
|
+
feedbackIndex: int,
|
|
666
|
+
) -> TransactionHandle[Feedback]:
|
|
667
|
+
"""Revoke feedback."""
|
|
668
|
+
# Parse agent ID
|
|
669
|
+
if ":" in agentId:
|
|
670
|
+
tokenId = int(agentId.split(":")[-1])
|
|
671
|
+
else:
|
|
672
|
+
tokenId = int(agentId)
|
|
673
|
+
|
|
674
|
+
clientAddress = self.web3_client.account.address
|
|
675
|
+
|
|
676
|
+
try:
|
|
677
|
+
txHash = self.web3_client.transact_contract(
|
|
678
|
+
self.reputation_registry,
|
|
679
|
+
"revokeFeedback",
|
|
680
|
+
tokenId,
|
|
681
|
+
feedbackIndex
|
|
682
|
+
)
|
|
683
|
+
return TransactionHandle(
|
|
684
|
+
web3_client=self.web3_client,
|
|
685
|
+
tx_hash=txHash,
|
|
686
|
+
compute_result=lambda _receipt: self.getFeedback(agentId, clientAddress, feedbackIndex),
|
|
687
|
+
)
|
|
688
|
+
except Exception as e:
|
|
689
|
+
raise ValueError(f"Failed to revoke feedback: {e}")
|
|
690
|
+
|
|
691
|
+
def appendResponse(
|
|
692
|
+
self,
|
|
693
|
+
agentId: AgentId,
|
|
694
|
+
clientAddress: Address,
|
|
695
|
+
feedbackIndex: int,
|
|
696
|
+
response: Dict[str, Any],
|
|
697
|
+
) -> TransactionHandle[Feedback]:
|
|
698
|
+
"""Append a response/follow-up to existing feedback."""
|
|
699
|
+
# Parse agent ID
|
|
700
|
+
if ":" in agentId:
|
|
701
|
+
tokenId = int(agentId.split(":")[-1])
|
|
702
|
+
else:
|
|
703
|
+
tokenId = int(agentId)
|
|
704
|
+
|
|
705
|
+
# Prepare response data
|
|
706
|
+
responseText = response.get("text", "")
|
|
707
|
+
responseUri = ""
|
|
708
|
+
responseHash = b"\x00" * 32
|
|
709
|
+
|
|
710
|
+
if self.ipfs_client and (response.get("text") or response.get("attachments")):
|
|
711
|
+
try:
|
|
712
|
+
cid = self.ipfs_client.add_json(response)
|
|
713
|
+
responseUri = f"ipfs://{cid}"
|
|
714
|
+
responseHash = self.web3_client.keccak256(json.dumps(response, sort_keys=True).encode())
|
|
715
|
+
except Exception as e:
|
|
716
|
+
logger.warning(f"Failed to store response on IPFS: {e}")
|
|
717
|
+
|
|
718
|
+
try:
|
|
719
|
+
txHash = self.web3_client.transact_contract(
|
|
720
|
+
self.reputation_registry,
|
|
721
|
+
"appendResponse",
|
|
722
|
+
tokenId,
|
|
723
|
+
clientAddress,
|
|
724
|
+
feedbackIndex,
|
|
725
|
+
responseUri, # Note: contract uses responseURI but variable name kept for compatibility
|
|
726
|
+
responseHash
|
|
727
|
+
)
|
|
728
|
+
return TransactionHandle(
|
|
729
|
+
web3_client=self.web3_client,
|
|
730
|
+
tx_hash=txHash,
|
|
731
|
+
compute_result=lambda _receipt: self.getFeedback(agentId, clientAddress, feedbackIndex),
|
|
732
|
+
)
|
|
733
|
+
except Exception as e:
|
|
734
|
+
raise ValueError(f"Failed to append response: {e}")
|
|
735
|
+
|
|
736
|
+
def getReputationSummary(
|
|
737
|
+
self,
|
|
738
|
+
agentId: AgentId,
|
|
739
|
+
clientAddresses: Optional[List[Address]] = None,
|
|
740
|
+
tag1: Optional[str] = None,
|
|
741
|
+
tag2: Optional[str] = None,
|
|
742
|
+
groupBy: Optional[List[str]] = None,
|
|
743
|
+
) -> Dict[str, Any]:
|
|
744
|
+
"""Get reputation summary for an agent with optional grouping."""
|
|
745
|
+
# Parse chainId from agentId
|
|
746
|
+
chain_id = None
|
|
747
|
+
if ":" in agentId:
|
|
748
|
+
try:
|
|
749
|
+
chain_id = int(agentId.split(":", 1)[0])
|
|
750
|
+
except ValueError:
|
|
751
|
+
chain_id = None
|
|
752
|
+
|
|
753
|
+
# Try subgraph first (if available and indexer supports it)
|
|
754
|
+
if self.indexer and self.subgraph_client:
|
|
755
|
+
# Get correct subgraph client for the chain
|
|
756
|
+
subgraph_client = None
|
|
757
|
+
full_agent_id = agentId
|
|
758
|
+
|
|
759
|
+
if chain_id is not None:
|
|
760
|
+
subgraph_client = self.indexer._get_subgraph_client_for_chain(chain_id)
|
|
761
|
+
else:
|
|
762
|
+
# No chainId in agentId, use SDK's default
|
|
763
|
+
# Construct full agentId format for subgraph query
|
|
764
|
+
default_chain_id = self.web3_client.chain_id
|
|
765
|
+
token_id = agentId.split(":")[-1] if ":" in agentId else agentId
|
|
766
|
+
full_agent_id = f"{default_chain_id}:{token_id}"
|
|
767
|
+
subgraph_client = self.subgraph_client
|
|
768
|
+
|
|
769
|
+
if subgraph_client:
|
|
770
|
+
# Use subgraph to calculate reputation
|
|
771
|
+
return self._get_reputation_summary_from_subgraph(
|
|
772
|
+
full_agent_id, clientAddresses, tag1, tag2, groupBy
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
# Fallback to blockchain (requires chain-specific web3 client)
|
|
776
|
+
# For now, only works if chain matches SDK's default
|
|
777
|
+
if chain_id is not None and chain_id != self.web3_client.chain_id:
|
|
778
|
+
raise ValueError(
|
|
779
|
+
f"Blockchain reputation summary not supported for chain {chain_id}. "
|
|
780
|
+
f"SDK is configured for chain {self.web3_client.chain_id}. "
|
|
781
|
+
f"Use subgraph-based summary instead."
|
|
782
|
+
)
|
|
783
|
+
|
|
784
|
+
# Parse agent ID for blockchain call
|
|
785
|
+
if ":" in agentId:
|
|
786
|
+
tokenId = int(agentId.split(":")[-1])
|
|
787
|
+
else:
|
|
788
|
+
tokenId = int(agentId)
|
|
789
|
+
|
|
790
|
+
try:
|
|
791
|
+
client_list = clientAddresses if clientAddresses else []
|
|
792
|
+
tag1_str = tag1 if tag1 else ""
|
|
793
|
+
tag2_str = tag2 if tag2 else ""
|
|
794
|
+
|
|
795
|
+
result = self.web3_client.call_contract(
|
|
796
|
+
self.reputation_registry,
|
|
797
|
+
"getSummary",
|
|
798
|
+
tokenId,
|
|
799
|
+
client_list,
|
|
800
|
+
tag1_str,
|
|
801
|
+
tag2_str
|
|
802
|
+
)
|
|
803
|
+
|
|
804
|
+
count, summary_value, summary_value_decimals = result
|
|
805
|
+
average_value = decode_feedback_value(int(summary_value), int(summary_value_decimals))
|
|
806
|
+
|
|
807
|
+
# If no grouping requested, return simple summary
|
|
808
|
+
if not groupBy:
|
|
809
|
+
return {
|
|
810
|
+
"agentId": agentId,
|
|
811
|
+
"count": count,
|
|
812
|
+
"averageValue": average_value,
|
|
813
|
+
"filters": {
|
|
814
|
+
"clientAddresses": clientAddresses,
|
|
815
|
+
"tag1": tag1,
|
|
816
|
+
"tag2": tag2
|
|
817
|
+
}
|
|
818
|
+
}
|
|
819
|
+
|
|
820
|
+
# Get detailed feedback data for grouping
|
|
821
|
+
all_feedback = self.read_all_feedback(
|
|
822
|
+
agentId=agentId,
|
|
823
|
+
clientAddresses=clientAddresses,
|
|
824
|
+
tags=[tag1, tag2] if tag1 or tag2 else None,
|
|
825
|
+
include_revoked=False
|
|
826
|
+
)
|
|
827
|
+
|
|
828
|
+
# Group feedback by requested dimensions
|
|
829
|
+
grouped_data = self._groupFeedback(all_feedback, groupBy)
|
|
830
|
+
|
|
831
|
+
return {
|
|
832
|
+
"agentId": agentId,
|
|
833
|
+
"totalCount": count,
|
|
834
|
+
"totalAverageValue": average_value,
|
|
835
|
+
"groupedData": grouped_data,
|
|
836
|
+
"filters": {
|
|
837
|
+
"clientAddresses": clientAddresses,
|
|
838
|
+
"tag1": tag1,
|
|
839
|
+
"tag2": tag2
|
|
840
|
+
},
|
|
841
|
+
"groupBy": groupBy
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
except Exception as e:
|
|
845
|
+
raise ValueError(f"Failed to get reputation summary: {e}")
|
|
846
|
+
|
|
847
|
+
def _get_reputation_summary_from_subgraph(
|
|
848
|
+
self,
|
|
849
|
+
agentId: AgentId,
|
|
850
|
+
clientAddresses: Optional[List[Address]] = None,
|
|
851
|
+
tag1: Optional[str] = None,
|
|
852
|
+
tag2: Optional[str] = None,
|
|
853
|
+
groupBy: Optional[List[str]] = None,
|
|
854
|
+
) -> Dict[str, Any]:
|
|
855
|
+
"""Get reputation summary from subgraph."""
|
|
856
|
+
# Build tags list
|
|
857
|
+
tags = []
|
|
858
|
+
if tag1:
|
|
859
|
+
tags.append(tag1)
|
|
860
|
+
if tag2:
|
|
861
|
+
tags.append(tag2)
|
|
862
|
+
|
|
863
|
+
# Get all feedback for the agent using indexer (which handles multi-chain)
|
|
864
|
+
# Use searchFeedback with a large limit to get all feedback
|
|
865
|
+
all_feedback = self.searchFeedback(
|
|
866
|
+
agentId=agentId,
|
|
867
|
+
clientAddresses=clientAddresses,
|
|
868
|
+
tags=tags if tags else None,
|
|
869
|
+
include_revoked=False,
|
|
870
|
+
first=1000, # Large limit to get all feedback
|
|
871
|
+
skip=0
|
|
872
|
+
)
|
|
873
|
+
|
|
874
|
+
# Calculate summary statistics
|
|
875
|
+
count = len(all_feedback)
|
|
876
|
+
values = [fb.value for fb in all_feedback if fb.value is not None]
|
|
877
|
+
average_value = sum(values) / len(values) if values else 0.0
|
|
878
|
+
|
|
879
|
+
# If no grouping requested, return simple summary
|
|
880
|
+
if not groupBy:
|
|
881
|
+
return {
|
|
882
|
+
"agentId": agentId,
|
|
883
|
+
"count": count,
|
|
884
|
+
"averageValue": average_value,
|
|
885
|
+
"filters": {
|
|
886
|
+
"clientAddresses": clientAddresses,
|
|
887
|
+
"tag1": tag1,
|
|
888
|
+
"tag2": tag2
|
|
889
|
+
}
|
|
890
|
+
}
|
|
891
|
+
|
|
892
|
+
# Group feedback by requested dimensions
|
|
893
|
+
grouped_data = self._groupFeedback(all_feedback, groupBy)
|
|
894
|
+
|
|
895
|
+
return {
|
|
896
|
+
"agentId": agentId,
|
|
897
|
+
"totalCount": count,
|
|
898
|
+
"totalAverageValue": average_value,
|
|
899
|
+
"groupedData": grouped_data,
|
|
900
|
+
"filters": {
|
|
901
|
+
"clientAddresses": clientAddresses,
|
|
902
|
+
"tag1": tag1,
|
|
903
|
+
"tag2": tag2
|
|
904
|
+
},
|
|
905
|
+
"groupBy": groupBy
|
|
906
|
+
}
|
|
907
|
+
|
|
908
|
+
def _groupFeedback(self, feedbackList: List[Feedback], groupBy: List[str]) -> Dict[str, Any]:
|
|
909
|
+
"""Group feedback by specified dimensions."""
|
|
910
|
+
grouped = {}
|
|
911
|
+
|
|
912
|
+
for feedback in feedbackList:
|
|
913
|
+
# Create group key based on requested dimensions
|
|
914
|
+
group_key = self._createGroupKey(feedback, groupBy)
|
|
915
|
+
|
|
916
|
+
if group_key not in grouped:
|
|
917
|
+
grouped[group_key] = {
|
|
918
|
+
"count": 0,
|
|
919
|
+
"totalValue": 0.0,
|
|
920
|
+
"averageValue": 0.0,
|
|
921
|
+
"values": [],
|
|
922
|
+
"feedback": []
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
# Add feedback to group
|
|
926
|
+
grouped[group_key]["count"] += 1
|
|
927
|
+
if feedback.value is not None:
|
|
928
|
+
grouped[group_key]["totalValue"] += float(feedback.value)
|
|
929
|
+
grouped[group_key]["values"].append(float(feedback.value))
|
|
930
|
+
grouped[group_key]["feedback"].append(feedback)
|
|
931
|
+
|
|
932
|
+
# Calculate averages for each group
|
|
933
|
+
for group_data in grouped.values():
|
|
934
|
+
if group_data["count"] > 0:
|
|
935
|
+
group_data["averageValue"] = group_data["totalValue"] / group_data["count"]
|
|
936
|
+
|
|
937
|
+
return grouped
|
|
938
|
+
|
|
939
|
+
def _createGroupKey(self, feedback: Feedback, groupBy: List[str]) -> str:
|
|
940
|
+
"""Create a group key for feedback based on grouping dimensions."""
|
|
941
|
+
key_parts = []
|
|
942
|
+
|
|
943
|
+
for dimension in groupBy:
|
|
944
|
+
if dimension == "tag":
|
|
945
|
+
# Group by tags
|
|
946
|
+
if feedback.tags:
|
|
947
|
+
key_parts.append(f"tags:{','.join(feedback.tags)}")
|
|
948
|
+
else:
|
|
949
|
+
key_parts.append("tags:none")
|
|
950
|
+
elif dimension == "capability":
|
|
951
|
+
# Group by MCP capability
|
|
952
|
+
if feedback.capability:
|
|
953
|
+
key_parts.append(f"capability:{feedback.capability}")
|
|
954
|
+
else:
|
|
955
|
+
key_parts.append("capability:none")
|
|
956
|
+
elif dimension == "skill":
|
|
957
|
+
# Group by A2A skill
|
|
958
|
+
if feedback.skill:
|
|
959
|
+
key_parts.append(f"skill:{feedback.skill}")
|
|
960
|
+
else:
|
|
961
|
+
key_parts.append("skill:none")
|
|
962
|
+
elif dimension == "task":
|
|
963
|
+
# Group by A2A task
|
|
964
|
+
if feedback.task:
|
|
965
|
+
key_parts.append(f"task:{feedback.task}")
|
|
966
|
+
else:
|
|
967
|
+
key_parts.append("task:none")
|
|
968
|
+
elif dimension == "endpoint":
|
|
969
|
+
# Group by endpoint (from context or capability)
|
|
970
|
+
endpoint = None
|
|
971
|
+
if feedback.context and "endpoint" in feedback.context:
|
|
972
|
+
endpoint = feedback.context["endpoint"]
|
|
973
|
+
elif feedback.capability:
|
|
974
|
+
endpoint = f"mcp:{feedback.capability}"
|
|
975
|
+
|
|
976
|
+
if endpoint:
|
|
977
|
+
key_parts.append(f"endpoint:{endpoint}")
|
|
978
|
+
else:
|
|
979
|
+
key_parts.append("endpoint:none")
|
|
980
|
+
elif dimension == "time":
|
|
981
|
+
# Group by time periods (daily, weekly, monthly)
|
|
982
|
+
from datetime import datetime
|
|
983
|
+
createdAt = datetime.fromtimestamp(feedback.createdAt)
|
|
984
|
+
key_parts.append(f"time:{createdAt.strftime('%Y-%m')}") # Monthly grouping
|
|
985
|
+
else:
|
|
986
|
+
# Unknown dimension, use as-is
|
|
987
|
+
key_parts.append(f"{dimension}:unknown")
|
|
988
|
+
|
|
989
|
+
return "|".join(key_parts)
|
|
990
|
+
|
|
991
|
+
def _normalizeTag(self, tag: str) -> str:
|
|
992
|
+
"""Normalize string tag (trim, validate length if needed).
|
|
993
|
+
|
|
994
|
+
Args:
|
|
995
|
+
tag: Tag string to normalize
|
|
996
|
+
|
|
997
|
+
Returns:
|
|
998
|
+
Normalized tag string
|
|
999
|
+
"""
|
|
1000
|
+
if not tag:
|
|
1001
|
+
return ""
|
|
1002
|
+
# Trim whitespace
|
|
1003
|
+
normalized = tag.strip()
|
|
1004
|
+
# Tags are now strings with no length limit, but we can validate if needed
|
|
1005
|
+
return normalized
|
|
1006
|
+
|
|
1007
|
+
def _hexBytes32ToTags(self, tag1: str, tag2: str) -> List[str]:
|
|
1008
|
+
"""Convert hex bytes32 tags back to strings, or return plain strings as-is.
|
|
1009
|
+
|
|
1010
|
+
DEPRECATED: This method is kept for backward compatibility with old data
|
|
1011
|
+
that may have bytes32 tags. New tags are strings and don't need conversion.
|
|
1012
|
+
|
|
1013
|
+
The subgraph now stores tags as human-readable strings (not hex),
|
|
1014
|
+
so this method handles both formats for backwards compatibility.
|
|
1015
|
+
"""
|
|
1016
|
+
tags = []
|
|
1017
|
+
|
|
1018
|
+
if tag1 and tag1 != "0x" + "00" * 32:
|
|
1019
|
+
# If it's already a plain string (from subgraph), use it directly
|
|
1020
|
+
if not tag1.startswith("0x"):
|
|
1021
|
+
if tag1:
|
|
1022
|
+
tags.append(tag1)
|
|
1023
|
+
else:
|
|
1024
|
+
# Try to convert from hex bytes32 (on-chain format)
|
|
1025
|
+
try:
|
|
1026
|
+
# Remove 0x prefix if present
|
|
1027
|
+
hex_bytes = bytes.fromhex(tag1[2:])
|
|
1028
|
+
tag1_str = hex_bytes.rstrip(b'\x00').decode('utf-8', errors='ignore')
|
|
1029
|
+
if tag1_str:
|
|
1030
|
+
tags.append(tag1_str)
|
|
1031
|
+
except Exception as e:
|
|
1032
|
+
pass # Ignore invalid hex strings
|
|
1033
|
+
|
|
1034
|
+
if tag2 and tag2 != "0x" + "00" * 32:
|
|
1035
|
+
# If it's already a plain string (from subgraph), use it directly
|
|
1036
|
+
if not tag2.startswith("0x"):
|
|
1037
|
+
if tag2:
|
|
1038
|
+
tags.append(tag2)
|
|
1039
|
+
else:
|
|
1040
|
+
# Try to convert from hex bytes32 (on-chain format)
|
|
1041
|
+
try:
|
|
1042
|
+
if tag2.startswith("0x"):
|
|
1043
|
+
hex_bytes = bytes.fromhex(tag2[2:])
|
|
1044
|
+
else:
|
|
1045
|
+
hex_bytes = bytes.fromhex(tag2)
|
|
1046
|
+
tag2_str = hex_bytes.rstrip(b'\x00').decode('utf-8', errors='ignore')
|
|
1047
|
+
if tag2_str:
|
|
1048
|
+
tags.append(tag2_str)
|
|
1049
|
+
except Exception as e:
|
|
1050
|
+
pass # Ignore invalid hex strings
|
|
1051
|
+
|
|
1052
|
+
return tags
|