agentmesh-platform 1.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentmesh/__init__.py +119 -0
- agentmesh/cli/__init__.py +10 -0
- agentmesh/cli/main.py +405 -0
- agentmesh/governance/__init__.py +26 -0
- agentmesh/governance/audit.py +381 -0
- agentmesh/governance/compliance.py +447 -0
- agentmesh/governance/policy.py +385 -0
- agentmesh/governance/shadow.py +266 -0
- agentmesh/identity/__init__.py +30 -0
- agentmesh/identity/agent_id.py +319 -0
- agentmesh/identity/credentials.py +323 -0
- agentmesh/identity/delegation.py +281 -0
- agentmesh/identity/risk.py +279 -0
- agentmesh/identity/spiffe.py +230 -0
- agentmesh/identity/sponsor.py +178 -0
- agentmesh/reward/__init__.py +19 -0
- agentmesh/reward/engine.py +454 -0
- agentmesh/reward/learning.py +287 -0
- agentmesh/reward/scoring.py +203 -0
- agentmesh/trust/__init__.py +19 -0
- agentmesh/trust/bridge.py +386 -0
- agentmesh/trust/capability.py +293 -0
- agentmesh/trust/handshake.py +334 -0
- agentmesh_platform-1.0.0a1.dist-info/METADATA +332 -0
- agentmesh_platform-1.0.0a1.dist-info/RECORD +28 -0
- agentmesh_platform-1.0.0a1.dist-info/WHEEL +4 -0
- agentmesh_platform-1.0.0a1.dist-info/entry_points.txt +2 -0
- agentmesh_platform-1.0.0a1.dist-info/licenses/LICENSE +190 -0
|
@@ -0,0 +1,454 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Reward Engine
|
|
3
|
+
|
|
4
|
+
The platform's ability to learn. Per-agent trust scores from
|
|
5
|
+
multi-dimensional reward signals, updated every ≤30s.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from datetime import datetime, timedelta
|
|
9
|
+
from typing import Optional, Callable
|
|
10
|
+
from pydantic import BaseModel, Field
|
|
11
|
+
import asyncio
|
|
12
|
+
|
|
13
|
+
from .scoring import TrustScore, RewardDimension, RewardSignal, DimensionType
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class RewardConfig(BaseModel):
|
|
17
|
+
"""Configuration for the reward engine."""
|
|
18
|
+
|
|
19
|
+
# Update frequency
|
|
20
|
+
update_interval_seconds: int = Field(default=30, ge=1, le=300)
|
|
21
|
+
|
|
22
|
+
# Thresholds
|
|
23
|
+
revocation_threshold: int = Field(default=300, ge=0, le=1000)
|
|
24
|
+
warning_threshold: int = Field(default=500, ge=0, le=1000)
|
|
25
|
+
|
|
26
|
+
# Dimension weights (must sum to 1.0)
|
|
27
|
+
policy_compliance_weight: float = Field(default=0.25)
|
|
28
|
+
resource_efficiency_weight: float = Field(default=0.15)
|
|
29
|
+
output_quality_weight: float = Field(default=0.20)
|
|
30
|
+
security_posture_weight: float = Field(default=0.25)
|
|
31
|
+
collaboration_health_weight: float = Field(default=0.15)
|
|
32
|
+
|
|
33
|
+
def validate_weights(self) -> bool:
|
|
34
|
+
"""Verify weights sum to 1.0."""
|
|
35
|
+
total = (
|
|
36
|
+
self.policy_compliance_weight +
|
|
37
|
+
self.resource_efficiency_weight +
|
|
38
|
+
self.output_quality_weight +
|
|
39
|
+
self.security_posture_weight +
|
|
40
|
+
self.collaboration_health_weight
|
|
41
|
+
)
|
|
42
|
+
return abs(total - 1.0) < 0.001
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class AgentRewardState(BaseModel):
|
|
46
|
+
"""Current reward state for an agent."""
|
|
47
|
+
|
|
48
|
+
agent_did: str
|
|
49
|
+
|
|
50
|
+
# Current score
|
|
51
|
+
trust_score: TrustScore
|
|
52
|
+
|
|
53
|
+
# Dimension scores
|
|
54
|
+
dimensions: dict[str, RewardDimension] = Field(default_factory=dict)
|
|
55
|
+
|
|
56
|
+
# Recent signals
|
|
57
|
+
recent_signals: list[RewardSignal] = Field(default_factory=list)
|
|
58
|
+
max_signals: int = Field(default=1000)
|
|
59
|
+
|
|
60
|
+
# History
|
|
61
|
+
score_history: list[tuple[datetime, int]] = Field(default_factory=list)
|
|
62
|
+
max_history: int = Field(default=100)
|
|
63
|
+
|
|
64
|
+
# Status
|
|
65
|
+
last_updated: datetime = Field(default_factory=datetime.utcnow)
|
|
66
|
+
revoked: bool = False
|
|
67
|
+
revoked_at: Optional[datetime] = None
|
|
68
|
+
revocation_reason: Optional[str] = None
|
|
69
|
+
|
|
70
|
+
def add_signal(self, signal: RewardSignal) -> None:
|
|
71
|
+
"""Add a reward signal."""
|
|
72
|
+
self.recent_signals.append(signal)
|
|
73
|
+
|
|
74
|
+
# Trim if needed
|
|
75
|
+
if len(self.recent_signals) > self.max_signals:
|
|
76
|
+
self.recent_signals = self.recent_signals[-self.max_signals:]
|
|
77
|
+
|
|
78
|
+
def record_score(self, score: int) -> None:
|
|
79
|
+
"""Record score in history."""
|
|
80
|
+
self.score_history.append((datetime.utcnow(), score))
|
|
81
|
+
|
|
82
|
+
if len(self.score_history) > self.max_history:
|
|
83
|
+
self.score_history = self.score_history[-self.max_history:]
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
class RewardEngine:
|
|
87
|
+
"""
|
|
88
|
+
The Reward Agent - runtime learning, not static rules.
|
|
89
|
+
|
|
90
|
+
Scores every action against 5 dimensions:
|
|
91
|
+
1. Policy Compliance - Did the action violate any policy?
|
|
92
|
+
2. Resource Efficiency - Was compute/token usage proportionate?
|
|
93
|
+
3. Output Quality - Did downstream accept or reject output?
|
|
94
|
+
4. Security Posture - Did agent stay in trust boundary?
|
|
95
|
+
5. Collaboration Health - Did inter-agent handoffs complete?
|
|
96
|
+
|
|
97
|
+
Features:
|
|
98
|
+
- Per-agent trust scores updated every ≤30s
|
|
99
|
+
- Automatic credential revocation on breach
|
|
100
|
+
- Operator-tunable weights
|
|
101
|
+
- Fully explainable scores
|
|
102
|
+
"""
|
|
103
|
+
|
|
104
|
+
def __init__(self, config: Optional[RewardConfig] = None):
|
|
105
|
+
self.config = config or RewardConfig()
|
|
106
|
+
self._agents: dict[str, AgentRewardState] = {}
|
|
107
|
+
self._revocation_callbacks: list[Callable] = []
|
|
108
|
+
self._running = False
|
|
109
|
+
|
|
110
|
+
def get_agent_score(self, agent_did: str) -> TrustScore:
|
|
111
|
+
"""Get current trust score for an agent."""
|
|
112
|
+
state = self._get_or_create_state(agent_did)
|
|
113
|
+
return state.trust_score
|
|
114
|
+
|
|
115
|
+
def record_signal(
|
|
116
|
+
self,
|
|
117
|
+
agent_did: str,
|
|
118
|
+
dimension: DimensionType,
|
|
119
|
+
value: float,
|
|
120
|
+
source: str,
|
|
121
|
+
details: Optional[str] = None,
|
|
122
|
+
) -> None:
|
|
123
|
+
"""
|
|
124
|
+
Record a reward signal for an agent.
|
|
125
|
+
|
|
126
|
+
Args:
|
|
127
|
+
agent_did: The agent's DID
|
|
128
|
+
dimension: Which dimension this affects
|
|
129
|
+
value: Signal value (0.0 = bad, 1.0 = good)
|
|
130
|
+
source: Where the signal came from
|
|
131
|
+
details: Optional details
|
|
132
|
+
"""
|
|
133
|
+
state = self._get_or_create_state(agent_did)
|
|
134
|
+
|
|
135
|
+
signal = RewardSignal(
|
|
136
|
+
dimension=dimension,
|
|
137
|
+
value=value,
|
|
138
|
+
source=source,
|
|
139
|
+
details=details,
|
|
140
|
+
)
|
|
141
|
+
|
|
142
|
+
state.add_signal(signal)
|
|
143
|
+
|
|
144
|
+
# Immediate recalculation for critical signals
|
|
145
|
+
if value < 0.3:
|
|
146
|
+
self._recalculate_score(agent_did)
|
|
147
|
+
|
|
148
|
+
def record_policy_compliance(
|
|
149
|
+
self,
|
|
150
|
+
agent_did: str,
|
|
151
|
+
compliant: bool,
|
|
152
|
+
policy_name: Optional[str] = None,
|
|
153
|
+
) -> None:
|
|
154
|
+
"""Record a policy compliance signal."""
|
|
155
|
+
self.record_signal(
|
|
156
|
+
agent_did=agent_did,
|
|
157
|
+
dimension=DimensionType.POLICY_COMPLIANCE,
|
|
158
|
+
value=1.0 if compliant else 0.0,
|
|
159
|
+
source="policy_engine",
|
|
160
|
+
details=f"Policy: {policy_name}" if policy_name else None,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
def record_resource_usage(
|
|
164
|
+
self,
|
|
165
|
+
agent_did: str,
|
|
166
|
+
tokens_used: int,
|
|
167
|
+
tokens_budget: int,
|
|
168
|
+
compute_ms: int,
|
|
169
|
+
compute_budget_ms: int,
|
|
170
|
+
) -> None:
|
|
171
|
+
"""Record resource efficiency signal."""
|
|
172
|
+
# Calculate efficiency (1.0 = perfect, 0.0 = over budget)
|
|
173
|
+
token_efficiency = min(1.0, tokens_budget / max(1, tokens_used))
|
|
174
|
+
compute_efficiency = min(1.0, compute_budget_ms / max(1, compute_ms))
|
|
175
|
+
|
|
176
|
+
efficiency = (token_efficiency + compute_efficiency) / 2
|
|
177
|
+
|
|
178
|
+
self.record_signal(
|
|
179
|
+
agent_did=agent_did,
|
|
180
|
+
dimension=DimensionType.RESOURCE_EFFICIENCY,
|
|
181
|
+
value=efficiency,
|
|
182
|
+
source="resource_monitor",
|
|
183
|
+
details=f"tokens={tokens_used}/{tokens_budget}, compute={compute_ms}/{compute_budget_ms}ms",
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
def record_output_quality(
|
|
187
|
+
self,
|
|
188
|
+
agent_did: str,
|
|
189
|
+
accepted: bool,
|
|
190
|
+
consumer: str,
|
|
191
|
+
rejection_reason: Optional[str] = None,
|
|
192
|
+
) -> None:
|
|
193
|
+
"""Record output quality signal from downstream consumer."""
|
|
194
|
+
self.record_signal(
|
|
195
|
+
agent_did=agent_did,
|
|
196
|
+
dimension=DimensionType.OUTPUT_QUALITY,
|
|
197
|
+
value=1.0 if accepted else 0.0,
|
|
198
|
+
source=f"consumer:{consumer}",
|
|
199
|
+
details=rejection_reason,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
def record_security_event(
|
|
203
|
+
self,
|
|
204
|
+
agent_did: str,
|
|
205
|
+
within_boundary: bool,
|
|
206
|
+
event_type: str,
|
|
207
|
+
) -> None:
|
|
208
|
+
"""Record security posture signal."""
|
|
209
|
+
self.record_signal(
|
|
210
|
+
agent_did=agent_did,
|
|
211
|
+
dimension=DimensionType.SECURITY_POSTURE,
|
|
212
|
+
value=1.0 if within_boundary else 0.0,
|
|
213
|
+
source="security_monitor",
|
|
214
|
+
details=event_type,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
def record_collaboration(
|
|
218
|
+
self,
|
|
219
|
+
agent_did: str,
|
|
220
|
+
handoff_successful: bool,
|
|
221
|
+
peer_did: str,
|
|
222
|
+
) -> None:
|
|
223
|
+
"""Record collaboration health signal."""
|
|
224
|
+
self.record_signal(
|
|
225
|
+
agent_did=agent_did,
|
|
226
|
+
dimension=DimensionType.COLLABORATION_HEALTH,
|
|
227
|
+
value=1.0 if handoff_successful else 0.0,
|
|
228
|
+
source=f"collaboration:{peer_did}",
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
def _recalculate_score(self, agent_did: str) -> TrustScore:
|
|
232
|
+
"""
|
|
233
|
+
Recalculate trust score from recent signals.
|
|
234
|
+
|
|
235
|
+
Score is calculated as weighted sum of dimension scores,
|
|
236
|
+
where each dimension is the average of recent signals.
|
|
237
|
+
"""
|
|
238
|
+
state = self._get_or_create_state(agent_did)
|
|
239
|
+
|
|
240
|
+
# Calculate dimension scores
|
|
241
|
+
dimension_scores = {}
|
|
242
|
+
for dim_type in DimensionType:
|
|
243
|
+
signals = [s for s in state.recent_signals if s.dimension == dim_type]
|
|
244
|
+
|
|
245
|
+
if signals:
|
|
246
|
+
# Weighted by recency
|
|
247
|
+
total = 0
|
|
248
|
+
weight_sum = 0
|
|
249
|
+
for i, signal in enumerate(signals[-100:]): # Last 100
|
|
250
|
+
weight = 1.0 + (i / 100) # More recent = higher weight
|
|
251
|
+
total += signal.value * weight
|
|
252
|
+
weight_sum += weight
|
|
253
|
+
|
|
254
|
+
score = (total / weight_sum) * 100 if weight_sum > 0 else 50
|
|
255
|
+
else:
|
|
256
|
+
score = 50 # Neutral default
|
|
257
|
+
|
|
258
|
+
dimension_scores[dim_type.value] = score
|
|
259
|
+
|
|
260
|
+
state.dimensions[dim_type.value] = RewardDimension(
|
|
261
|
+
name=dim_type.value,
|
|
262
|
+
score=score,
|
|
263
|
+
signal_count=len(signals),
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
# Calculate weighted total
|
|
267
|
+
weights = {
|
|
268
|
+
DimensionType.POLICY_COMPLIANCE.value: self.config.policy_compliance_weight,
|
|
269
|
+
DimensionType.RESOURCE_EFFICIENCY.value: self.config.resource_efficiency_weight,
|
|
270
|
+
DimensionType.OUTPUT_QUALITY.value: self.config.output_quality_weight,
|
|
271
|
+
DimensionType.SECURITY_POSTURE.value: self.config.security_posture_weight,
|
|
272
|
+
DimensionType.COLLABORATION_HEALTH.value: self.config.collaboration_health_weight,
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
total_score = sum(
|
|
276
|
+
dimension_scores.get(dim, 50) * weight
|
|
277
|
+
for dim, weight in weights.items()
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
# Scale to 0-1000
|
|
281
|
+
total_score = int(total_score * 10)
|
|
282
|
+
total_score = max(0, min(1000, total_score))
|
|
283
|
+
|
|
284
|
+
# Update state
|
|
285
|
+
state.trust_score = TrustScore(
|
|
286
|
+
agent_did=agent_did,
|
|
287
|
+
total_score=total_score,
|
|
288
|
+
dimensions=state.dimensions,
|
|
289
|
+
)
|
|
290
|
+
state.record_score(total_score)
|
|
291
|
+
state.last_updated = datetime.utcnow()
|
|
292
|
+
|
|
293
|
+
# Check for revocation
|
|
294
|
+
if total_score < self.config.revocation_threshold and not state.revoked:
|
|
295
|
+
self._trigger_revocation(agent_did, f"Trust score {total_score} below threshold")
|
|
296
|
+
|
|
297
|
+
return state.trust_score
|
|
298
|
+
|
|
299
|
+
def _trigger_revocation(self, agent_did: str, reason: str) -> None:
|
|
300
|
+
"""Trigger automatic credential revocation."""
|
|
301
|
+
state = self._agents.get(agent_did)
|
|
302
|
+
if not state:
|
|
303
|
+
return
|
|
304
|
+
|
|
305
|
+
state.revoked = True
|
|
306
|
+
state.revoked_at = datetime.utcnow()
|
|
307
|
+
state.revocation_reason = reason
|
|
308
|
+
|
|
309
|
+
# Notify callbacks
|
|
310
|
+
for callback in self._revocation_callbacks:
|
|
311
|
+
try:
|
|
312
|
+
callback(agent_did, reason)
|
|
313
|
+
except Exception:
|
|
314
|
+
pass
|
|
315
|
+
|
|
316
|
+
def on_revocation(self, callback: Callable) -> None:
|
|
317
|
+
"""Register callback for automatic revocations."""
|
|
318
|
+
self._revocation_callbacks.append(callback)
|
|
319
|
+
|
|
320
|
+
def get_score_explanation(self, agent_did: str) -> dict:
|
|
321
|
+
"""
|
|
322
|
+
Get fully explainable breakdown of an agent's score.
|
|
323
|
+
|
|
324
|
+
Returns breakdown and contributing factors.
|
|
325
|
+
"""
|
|
326
|
+
state = self._get_or_create_state(agent_did)
|
|
327
|
+
|
|
328
|
+
return {
|
|
329
|
+
"agent_did": agent_did,
|
|
330
|
+
"total_score": state.trust_score.total_score,
|
|
331
|
+
"dimensions": {
|
|
332
|
+
name: {
|
|
333
|
+
"score": dim.score,
|
|
334
|
+
"signal_count": dim.signal_count,
|
|
335
|
+
"weight": getattr(self.config, f"{name}_weight", 0),
|
|
336
|
+
"contribution": dim.score * getattr(self.config, f"{name}_weight", 0),
|
|
337
|
+
}
|
|
338
|
+
for name, dim in state.dimensions.items()
|
|
339
|
+
},
|
|
340
|
+
"recent_signals": [
|
|
341
|
+
{
|
|
342
|
+
"dimension": s.dimension.value,
|
|
343
|
+
"value": s.value,
|
|
344
|
+
"source": s.source,
|
|
345
|
+
"timestamp": s.timestamp.isoformat(),
|
|
346
|
+
}
|
|
347
|
+
for s in state.recent_signals[-10:]
|
|
348
|
+
],
|
|
349
|
+
"trend": self._calculate_trend(state),
|
|
350
|
+
"revoked": state.revoked,
|
|
351
|
+
"revocation_reason": state.revocation_reason,
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
def _calculate_trend(self, state: AgentRewardState) -> str:
|
|
355
|
+
"""Calculate score trend."""
|
|
356
|
+
if len(state.score_history) < 2:
|
|
357
|
+
return "stable"
|
|
358
|
+
|
|
359
|
+
recent = [s for t, s in state.score_history[-10:]]
|
|
360
|
+
if len(recent) < 2:
|
|
361
|
+
return "stable"
|
|
362
|
+
|
|
363
|
+
avg_recent = sum(recent[-5:]) / len(recent[-5:])
|
|
364
|
+
avg_older = sum(recent[:-5]) / len(recent[:-5]) if len(recent) > 5 else avg_recent
|
|
365
|
+
|
|
366
|
+
if avg_recent > avg_older + 50:
|
|
367
|
+
return "improving"
|
|
368
|
+
elif avg_recent < avg_older - 50:
|
|
369
|
+
return "degrading"
|
|
370
|
+
else:
|
|
371
|
+
return "stable"
|
|
372
|
+
|
|
373
|
+
def _get_or_create_state(self, agent_did: str) -> AgentRewardState:
|
|
374
|
+
"""Get or create agent state."""
|
|
375
|
+
if agent_did not in self._agents:
|
|
376
|
+
self._agents[agent_did] = AgentRewardState(
|
|
377
|
+
agent_did=agent_did,
|
|
378
|
+
trust_score=TrustScore(agent_did=agent_did),
|
|
379
|
+
)
|
|
380
|
+
return self._agents[agent_did]
|
|
381
|
+
|
|
382
|
+
async def start_background_updates(self) -> None:
|
|
383
|
+
"""Start background score updates."""
|
|
384
|
+
self._running = True
|
|
385
|
+
while self._running:
|
|
386
|
+
for agent_did in list(self._agents.keys()):
|
|
387
|
+
self._recalculate_score(agent_did)
|
|
388
|
+
await asyncio.sleep(self.config.update_interval_seconds)
|
|
389
|
+
|
|
390
|
+
def stop_background_updates(self) -> None:
|
|
391
|
+
"""Stop background updates."""
|
|
392
|
+
self._running = False
|
|
393
|
+
|
|
394
|
+
def update_weights(
|
|
395
|
+
self,
|
|
396
|
+
policy_compliance: Optional[float] = None,
|
|
397
|
+
resource_efficiency: Optional[float] = None,
|
|
398
|
+
output_quality: Optional[float] = None,
|
|
399
|
+
security_posture: Optional[float] = None,
|
|
400
|
+
collaboration_health: Optional[float] = None,
|
|
401
|
+
) -> bool:
|
|
402
|
+
"""
|
|
403
|
+
Update dimension weights.
|
|
404
|
+
|
|
405
|
+
Weight changes effective within 60s.
|
|
406
|
+
"""
|
|
407
|
+
if policy_compliance is not None:
|
|
408
|
+
self.config.policy_compliance_weight = policy_compliance
|
|
409
|
+
if resource_efficiency is not None:
|
|
410
|
+
self.config.resource_efficiency_weight = resource_efficiency
|
|
411
|
+
if output_quality is not None:
|
|
412
|
+
self.config.output_quality_weight = output_quality
|
|
413
|
+
if security_posture is not None:
|
|
414
|
+
self.config.security_posture_weight = security_posture
|
|
415
|
+
if collaboration_health is not None:
|
|
416
|
+
self.config.collaboration_health_weight = collaboration_health
|
|
417
|
+
|
|
418
|
+
return self.config.validate_weights()
|
|
419
|
+
|
|
420
|
+
def get_agents_at_risk(self) -> list[str]:
|
|
421
|
+
"""Get agents with scores approaching revocation threshold."""
|
|
422
|
+
at_risk = []
|
|
423
|
+
for agent_did, state in self._agents.items():
|
|
424
|
+
if not state.revoked:
|
|
425
|
+
if state.trust_score.total_score < self.config.warning_threshold:
|
|
426
|
+
at_risk.append(agent_did)
|
|
427
|
+
return at_risk
|
|
428
|
+
|
|
429
|
+
def get_health_report(self, days: int = 7) -> dict:
|
|
430
|
+
"""Get longitudinal health report."""
|
|
431
|
+
cutoff = datetime.utcnow() - timedelta(days=days)
|
|
432
|
+
|
|
433
|
+
report = {
|
|
434
|
+
"period_days": days,
|
|
435
|
+
"total_agents": len(self._agents),
|
|
436
|
+
"revoked_agents": len([s for s in self._agents.values() if s.revoked]),
|
|
437
|
+
"at_risk_agents": len(self.get_agents_at_risk()),
|
|
438
|
+
"agents": {},
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
for agent_did, state in self._agents.items():
|
|
442
|
+
history = [(t, s) for t, s in state.score_history if t >= cutoff]
|
|
443
|
+
if history:
|
|
444
|
+
scores = [s for t, s in history]
|
|
445
|
+
report["agents"][agent_did] = {
|
|
446
|
+
"current_score": state.trust_score.total_score,
|
|
447
|
+
"min_score": min(scores),
|
|
448
|
+
"max_score": max(scores),
|
|
449
|
+
"avg_score": sum(scores) / len(scores),
|
|
450
|
+
"trend": self._calculate_trend(state),
|
|
451
|
+
"revoked": state.revoked,
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
return report
|