agentmesh-platform 1.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentmesh/__init__.py +119 -0
- agentmesh/cli/__init__.py +10 -0
- agentmesh/cli/main.py +405 -0
- agentmesh/governance/__init__.py +26 -0
- agentmesh/governance/audit.py +381 -0
- agentmesh/governance/compliance.py +447 -0
- agentmesh/governance/policy.py +385 -0
- agentmesh/governance/shadow.py +266 -0
- agentmesh/identity/__init__.py +30 -0
- agentmesh/identity/agent_id.py +319 -0
- agentmesh/identity/credentials.py +323 -0
- agentmesh/identity/delegation.py +281 -0
- agentmesh/identity/risk.py +279 -0
- agentmesh/identity/spiffe.py +230 -0
- agentmesh/identity/sponsor.py +178 -0
- agentmesh/reward/__init__.py +19 -0
- agentmesh/reward/engine.py +454 -0
- agentmesh/reward/learning.py +287 -0
- agentmesh/reward/scoring.py +203 -0
- agentmesh/trust/__init__.py +19 -0
- agentmesh/trust/bridge.py +386 -0
- agentmesh/trust/capability.py +293 -0
- agentmesh/trust/handshake.py +334 -0
- agentmesh_platform-1.0.0a1.dist-info/METADATA +332 -0
- agentmesh_platform-1.0.0a1.dist-info/RECORD +28 -0
- agentmesh_platform-1.0.0a1.dist-info/WHEEL +4 -0
- agentmesh_platform-1.0.0a1.dist-info/entry_points.txt +2 -0
- agentmesh_platform-1.0.0a1.dist-info/licenses/LICENSE +190 -0
|
@@ -0,0 +1,381 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Audit Log
|
|
3
|
+
|
|
4
|
+
Tamper-evident audit log with Merkle-chain hashing.
|
|
5
|
+
Any log modification is detected; integrity verifiable offline.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from datetime import datetime
|
|
9
|
+
from typing import Optional, Any
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
import hashlib
|
|
12
|
+
import json
|
|
13
|
+
import uuid
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class AuditEntry(BaseModel):
|
|
17
|
+
"""
|
|
18
|
+
Single audit log entry.
|
|
19
|
+
|
|
20
|
+
Every entry is:
|
|
21
|
+
- Timestamped
|
|
22
|
+
- Signed
|
|
23
|
+
- Chained to previous entry via hash
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
entry_id: str = Field(default_factory=lambda: f"audit_{uuid.uuid4().hex[:16]}")
|
|
27
|
+
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
|
28
|
+
|
|
29
|
+
# Event details
|
|
30
|
+
event_type: str
|
|
31
|
+
agent_did: str
|
|
32
|
+
action: str
|
|
33
|
+
|
|
34
|
+
# Context
|
|
35
|
+
resource: Optional[str] = None
|
|
36
|
+
target_did: Optional[str] = None
|
|
37
|
+
|
|
38
|
+
# Data (sanitized - no secrets)
|
|
39
|
+
data: dict = Field(default_factory=dict)
|
|
40
|
+
|
|
41
|
+
# Outcome
|
|
42
|
+
outcome: str = "success" # success, failure, denied, error
|
|
43
|
+
|
|
44
|
+
# Policy evaluation
|
|
45
|
+
policy_decision: Optional[str] = None
|
|
46
|
+
matched_rule: Optional[str] = None
|
|
47
|
+
|
|
48
|
+
# Chaining
|
|
49
|
+
previous_hash: str = Field(default="")
|
|
50
|
+
entry_hash: str = Field(default="")
|
|
51
|
+
|
|
52
|
+
# Metadata
|
|
53
|
+
trace_id: Optional[str] = None
|
|
54
|
+
session_id: Optional[str] = None
|
|
55
|
+
|
|
56
|
+
def compute_hash(self) -> str:
|
|
57
|
+
"""Compute hash of this entry."""
|
|
58
|
+
data = {
|
|
59
|
+
"entry_id": self.entry_id,
|
|
60
|
+
"timestamp": self.timestamp.isoformat(),
|
|
61
|
+
"event_type": self.event_type,
|
|
62
|
+
"agent_did": self.agent_did,
|
|
63
|
+
"action": self.action,
|
|
64
|
+
"resource": self.resource,
|
|
65
|
+
"data": self.data,
|
|
66
|
+
"outcome": self.outcome,
|
|
67
|
+
"previous_hash": self.previous_hash,
|
|
68
|
+
}
|
|
69
|
+
canonical = json.dumps(data, sort_keys=True)
|
|
70
|
+
return hashlib.sha256(canonical.encode()).hexdigest()
|
|
71
|
+
|
|
72
|
+
def verify_hash(self) -> bool:
|
|
73
|
+
"""Verify this entry's hash is correct."""
|
|
74
|
+
return self.entry_hash == self.compute_hash()
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
class MerkleNode(BaseModel):
|
|
78
|
+
"""Node in a Merkle tree for audit verification."""
|
|
79
|
+
|
|
80
|
+
hash: str
|
|
81
|
+
left_child: Optional[str] = None
|
|
82
|
+
right_child: Optional[str] = None
|
|
83
|
+
is_leaf: bool = False
|
|
84
|
+
entry_id: Optional[str] = None # Only for leaves
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class MerkleAuditChain:
|
|
88
|
+
"""
|
|
89
|
+
Merkle tree for efficient audit verification.
|
|
90
|
+
|
|
91
|
+
Allows:
|
|
92
|
+
- Efficient verification of single entries
|
|
93
|
+
- Proof that an entry exists in the log
|
|
94
|
+
- Detection of any tampering
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(self):
|
|
98
|
+
self._entries: list[AuditEntry] = []
|
|
99
|
+
self._tree: list[list[MerkleNode]] = []
|
|
100
|
+
self._root_hash: Optional[str] = None
|
|
101
|
+
|
|
102
|
+
def add_entry(self, entry: AuditEntry) -> None:
|
|
103
|
+
"""Add an entry and rebuild tree."""
|
|
104
|
+
# Set previous hash
|
|
105
|
+
if self._entries:
|
|
106
|
+
entry.previous_hash = self._entries[-1].entry_hash
|
|
107
|
+
|
|
108
|
+
# Compute and set hash
|
|
109
|
+
entry.entry_hash = entry.compute_hash()
|
|
110
|
+
|
|
111
|
+
self._entries.append(entry)
|
|
112
|
+
self._rebuild_tree()
|
|
113
|
+
|
|
114
|
+
def _rebuild_tree(self) -> None:
|
|
115
|
+
"""Rebuild Merkle tree from entries."""
|
|
116
|
+
if not self._entries:
|
|
117
|
+
self._tree = []
|
|
118
|
+
self._root_hash = None
|
|
119
|
+
return
|
|
120
|
+
|
|
121
|
+
# Create leaf nodes
|
|
122
|
+
leaves = []
|
|
123
|
+
for entry in self._entries:
|
|
124
|
+
leaves.append(MerkleNode(
|
|
125
|
+
hash=entry.entry_hash,
|
|
126
|
+
is_leaf=True,
|
|
127
|
+
entry_id=entry.entry_id,
|
|
128
|
+
))
|
|
129
|
+
|
|
130
|
+
# Pad to power of 2
|
|
131
|
+
while len(leaves) & (len(leaves) - 1) != 0:
|
|
132
|
+
leaves.append(MerkleNode(hash="0" * 64, is_leaf=True))
|
|
133
|
+
|
|
134
|
+
self._tree = [leaves]
|
|
135
|
+
|
|
136
|
+
# Build tree bottom-up
|
|
137
|
+
current_level = leaves
|
|
138
|
+
while len(current_level) > 1:
|
|
139
|
+
next_level = []
|
|
140
|
+
for i in range(0, len(current_level), 2):
|
|
141
|
+
left = current_level[i]
|
|
142
|
+
right = current_level[i + 1] if i + 1 < len(current_level) else left
|
|
143
|
+
|
|
144
|
+
combined = left.hash + right.hash
|
|
145
|
+
parent_hash = hashlib.sha256(combined.encode()).hexdigest()
|
|
146
|
+
|
|
147
|
+
next_level.append(MerkleNode(
|
|
148
|
+
hash=parent_hash,
|
|
149
|
+
left_child=left.hash,
|
|
150
|
+
right_child=right.hash,
|
|
151
|
+
))
|
|
152
|
+
|
|
153
|
+
self._tree.append(next_level)
|
|
154
|
+
current_level = next_level
|
|
155
|
+
|
|
156
|
+
self._root_hash = self._tree[-1][0].hash if self._tree else None
|
|
157
|
+
|
|
158
|
+
def get_root_hash(self) -> Optional[str]:
|
|
159
|
+
"""Get the Merkle root hash."""
|
|
160
|
+
return self._root_hash
|
|
161
|
+
|
|
162
|
+
def get_proof(self, entry_id: str) -> Optional[list[tuple[str, str]]]:
|
|
163
|
+
"""
|
|
164
|
+
Get Merkle proof for an entry.
|
|
165
|
+
|
|
166
|
+
Returns list of (sibling_hash, position) tuples.
|
|
167
|
+
"""
|
|
168
|
+
# Find entry index
|
|
169
|
+
entry_idx = None
|
|
170
|
+
for i, entry in enumerate(self._entries):
|
|
171
|
+
if entry.entry_id == entry_id:
|
|
172
|
+
entry_idx = i
|
|
173
|
+
break
|
|
174
|
+
|
|
175
|
+
if entry_idx is None:
|
|
176
|
+
return None
|
|
177
|
+
|
|
178
|
+
proof = []
|
|
179
|
+
idx = entry_idx
|
|
180
|
+
|
|
181
|
+
for level in self._tree[:-1]: # Exclude root
|
|
182
|
+
sibling_idx = idx ^ 1 # XOR to get sibling
|
|
183
|
+
if sibling_idx < len(level):
|
|
184
|
+
position = "right" if idx % 2 == 0 else "left"
|
|
185
|
+
proof.append((level[sibling_idx].hash, position))
|
|
186
|
+
idx //= 2
|
|
187
|
+
|
|
188
|
+
return proof
|
|
189
|
+
|
|
190
|
+
def verify_proof(
|
|
191
|
+
self,
|
|
192
|
+
entry_hash: str,
|
|
193
|
+
proof: list[tuple[str, str]],
|
|
194
|
+
root_hash: str,
|
|
195
|
+
) -> bool:
|
|
196
|
+
"""Verify a Merkle proof."""
|
|
197
|
+
current = entry_hash
|
|
198
|
+
|
|
199
|
+
for sibling_hash, position in proof:
|
|
200
|
+
if position == "right":
|
|
201
|
+
combined = current + sibling_hash
|
|
202
|
+
else:
|
|
203
|
+
combined = sibling_hash + current
|
|
204
|
+
current = hashlib.sha256(combined.encode()).hexdigest()
|
|
205
|
+
|
|
206
|
+
return current == root_hash
|
|
207
|
+
|
|
208
|
+
def verify_chain(self) -> tuple[bool, Optional[str]]:
|
|
209
|
+
"""
|
|
210
|
+
Verify the entire chain integrity.
|
|
211
|
+
|
|
212
|
+
Returns (is_valid, error_message).
|
|
213
|
+
"""
|
|
214
|
+
previous_hash = ""
|
|
215
|
+
|
|
216
|
+
for i, entry in enumerate(self._entries):
|
|
217
|
+
# Verify entry's own hash
|
|
218
|
+
if not entry.verify_hash():
|
|
219
|
+
return False, f"Entry {i} hash mismatch"
|
|
220
|
+
|
|
221
|
+
# Verify chain
|
|
222
|
+
if entry.previous_hash != previous_hash:
|
|
223
|
+
return False, f"Entry {i} chain broken"
|
|
224
|
+
|
|
225
|
+
previous_hash = entry.entry_hash
|
|
226
|
+
|
|
227
|
+
return True, None
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
class AuditLog:
|
|
231
|
+
"""
|
|
232
|
+
Complete audit logging system.
|
|
233
|
+
|
|
234
|
+
Features:
|
|
235
|
+
- Tamper-evident Merkle chains
|
|
236
|
+
- Offline verification
|
|
237
|
+
- Efficient querying
|
|
238
|
+
"""
|
|
239
|
+
|
|
240
|
+
def __init__(self):
|
|
241
|
+
self._chain = MerkleAuditChain()
|
|
242
|
+
self._by_agent: dict[str, list[str]] = {} # agent_did -> [entry_ids]
|
|
243
|
+
self._by_type: dict[str, list[str]] = {} # event_type -> [entry_ids]
|
|
244
|
+
|
|
245
|
+
def log(
|
|
246
|
+
self,
|
|
247
|
+
event_type: str,
|
|
248
|
+
agent_did: str,
|
|
249
|
+
action: str,
|
|
250
|
+
resource: Optional[str] = None,
|
|
251
|
+
data: Optional[dict] = None,
|
|
252
|
+
outcome: str = "success",
|
|
253
|
+
policy_decision: Optional[str] = None,
|
|
254
|
+
trace_id: Optional[str] = None,
|
|
255
|
+
) -> AuditEntry:
|
|
256
|
+
"""
|
|
257
|
+
Log an audit event.
|
|
258
|
+
|
|
259
|
+
All agent actions should be logged through this method.
|
|
260
|
+
"""
|
|
261
|
+
entry = AuditEntry(
|
|
262
|
+
event_type=event_type,
|
|
263
|
+
agent_did=agent_did,
|
|
264
|
+
action=action,
|
|
265
|
+
resource=resource,
|
|
266
|
+
data=data or {},
|
|
267
|
+
outcome=outcome,
|
|
268
|
+
policy_decision=policy_decision,
|
|
269
|
+
trace_id=trace_id,
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
self._chain.add_entry(entry)
|
|
273
|
+
|
|
274
|
+
# Index
|
|
275
|
+
if agent_did not in self._by_agent:
|
|
276
|
+
self._by_agent[agent_did] = []
|
|
277
|
+
self._by_agent[agent_did].append(entry.entry_id)
|
|
278
|
+
|
|
279
|
+
if event_type not in self._by_type:
|
|
280
|
+
self._by_type[event_type] = []
|
|
281
|
+
self._by_type[event_type].append(entry.entry_id)
|
|
282
|
+
|
|
283
|
+
return entry
|
|
284
|
+
|
|
285
|
+
def get_entry(self, entry_id: str) -> Optional[AuditEntry]:
|
|
286
|
+
"""Get an entry by ID."""
|
|
287
|
+
for entry in self._chain._entries:
|
|
288
|
+
if entry.entry_id == entry_id:
|
|
289
|
+
return entry
|
|
290
|
+
return None
|
|
291
|
+
|
|
292
|
+
def get_entries_for_agent(
|
|
293
|
+
self,
|
|
294
|
+
agent_did: str,
|
|
295
|
+
limit: int = 100,
|
|
296
|
+
) -> list[AuditEntry]:
|
|
297
|
+
"""Get recent entries for an agent."""
|
|
298
|
+
entry_ids = self._by_agent.get(agent_did, [])[-limit:]
|
|
299
|
+
return [
|
|
300
|
+
entry for entry in self._chain._entries
|
|
301
|
+
if entry.entry_id in entry_ids
|
|
302
|
+
]
|
|
303
|
+
|
|
304
|
+
def get_entries_by_type(
|
|
305
|
+
self,
|
|
306
|
+
event_type: str,
|
|
307
|
+
limit: int = 100,
|
|
308
|
+
) -> list[AuditEntry]:
|
|
309
|
+
"""Get recent entries of a type."""
|
|
310
|
+
entry_ids = self._by_type.get(event_type, [])[-limit:]
|
|
311
|
+
return [
|
|
312
|
+
entry for entry in self._chain._entries
|
|
313
|
+
if entry.entry_id in entry_ids
|
|
314
|
+
]
|
|
315
|
+
|
|
316
|
+
def query(
|
|
317
|
+
self,
|
|
318
|
+
agent_did: Optional[str] = None,
|
|
319
|
+
event_type: Optional[str] = None,
|
|
320
|
+
start_time: Optional[datetime] = None,
|
|
321
|
+
end_time: Optional[datetime] = None,
|
|
322
|
+
outcome: Optional[str] = None,
|
|
323
|
+
limit: int = 100,
|
|
324
|
+
) -> list[AuditEntry]:
|
|
325
|
+
"""Query audit entries with filters."""
|
|
326
|
+
results = self._chain._entries
|
|
327
|
+
|
|
328
|
+
if agent_did:
|
|
329
|
+
results = [e for e in results if e.agent_did == agent_did]
|
|
330
|
+
|
|
331
|
+
if event_type:
|
|
332
|
+
results = [e for e in results if e.event_type == event_type]
|
|
333
|
+
|
|
334
|
+
if start_time:
|
|
335
|
+
results = [e for e in results if e.timestamp >= start_time]
|
|
336
|
+
|
|
337
|
+
if end_time:
|
|
338
|
+
results = [e for e in results if e.timestamp <= end_time]
|
|
339
|
+
|
|
340
|
+
if outcome:
|
|
341
|
+
results = [e for e in results if e.outcome == outcome]
|
|
342
|
+
|
|
343
|
+
return results[-limit:]
|
|
344
|
+
|
|
345
|
+
def verify_integrity(self) -> tuple[bool, Optional[str]]:
|
|
346
|
+
"""Verify the entire audit log integrity."""
|
|
347
|
+
return self._chain.verify_chain()
|
|
348
|
+
|
|
349
|
+
def get_proof(self, entry_id: str) -> Optional[dict]:
|
|
350
|
+
"""Get tamper-proof evidence for an entry."""
|
|
351
|
+
entry = self.get_entry(entry_id)
|
|
352
|
+
if not entry:
|
|
353
|
+
return None
|
|
354
|
+
|
|
355
|
+
proof = self._chain.get_proof(entry_id)
|
|
356
|
+
if not proof:
|
|
357
|
+
return None
|
|
358
|
+
|
|
359
|
+
return {
|
|
360
|
+
"entry": entry.model_dump(),
|
|
361
|
+
"merkle_proof": proof,
|
|
362
|
+
"merkle_root": self._chain.get_root_hash(),
|
|
363
|
+
"verified": self._chain.verify_proof(
|
|
364
|
+
entry.entry_hash, proof, self._chain.get_root_hash()
|
|
365
|
+
),
|
|
366
|
+
}
|
|
367
|
+
|
|
368
|
+
def export(
|
|
369
|
+
self,
|
|
370
|
+
start_time: Optional[datetime] = None,
|
|
371
|
+
end_time: Optional[datetime] = None,
|
|
372
|
+
) -> dict:
|
|
373
|
+
"""Export audit log for external verification."""
|
|
374
|
+
entries = self.query(start_time=start_time, end_time=end_time, limit=10000)
|
|
375
|
+
|
|
376
|
+
return {
|
|
377
|
+
"exported_at": datetime.utcnow().isoformat(),
|
|
378
|
+
"merkle_root": self._chain.get_root_hash(),
|
|
379
|
+
"entry_count": len(entries),
|
|
380
|
+
"entries": [e.model_dump() for e in entries],
|
|
381
|
+
}
|