proxilion 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. proxilion/__init__.py +136 -0
  2. proxilion/audit/__init__.py +133 -0
  3. proxilion/audit/base_exporters.py +527 -0
  4. proxilion/audit/compliance/__init__.py +130 -0
  5. proxilion/audit/compliance/base.py +457 -0
  6. proxilion/audit/compliance/eu_ai_act.py +603 -0
  7. proxilion/audit/compliance/iso27001.py +544 -0
  8. proxilion/audit/compliance/soc2.py +491 -0
  9. proxilion/audit/events.py +493 -0
  10. proxilion/audit/explainability.py +1173 -0
  11. proxilion/audit/exporters/__init__.py +58 -0
  12. proxilion/audit/exporters/aws_s3.py +636 -0
  13. proxilion/audit/exporters/azure_storage.py +608 -0
  14. proxilion/audit/exporters/cloud_base.py +468 -0
  15. proxilion/audit/exporters/gcp_storage.py +570 -0
  16. proxilion/audit/exporters/multi_exporter.py +498 -0
  17. proxilion/audit/hash_chain.py +652 -0
  18. proxilion/audit/logger.py +543 -0
  19. proxilion/caching/__init__.py +49 -0
  20. proxilion/caching/tool_cache.py +633 -0
  21. proxilion/context/__init__.py +73 -0
  22. proxilion/context/context_window.py +556 -0
  23. proxilion/context/message_history.py +505 -0
  24. proxilion/context/session.py +735 -0
  25. proxilion/contrib/__init__.py +51 -0
  26. proxilion/contrib/anthropic.py +609 -0
  27. proxilion/contrib/google.py +1012 -0
  28. proxilion/contrib/langchain.py +641 -0
  29. proxilion/contrib/mcp.py +893 -0
  30. proxilion/contrib/openai.py +646 -0
  31. proxilion/core.py +3058 -0
  32. proxilion/decorators.py +966 -0
  33. proxilion/engines/__init__.py +287 -0
  34. proxilion/engines/base.py +266 -0
  35. proxilion/engines/casbin_engine.py +412 -0
  36. proxilion/engines/opa_engine.py +493 -0
  37. proxilion/engines/simple.py +437 -0
  38. proxilion/exceptions.py +887 -0
  39. proxilion/guards/__init__.py +54 -0
  40. proxilion/guards/input_guard.py +522 -0
  41. proxilion/guards/output_guard.py +634 -0
  42. proxilion/observability/__init__.py +198 -0
  43. proxilion/observability/cost_tracker.py +866 -0
  44. proxilion/observability/hooks.py +683 -0
  45. proxilion/observability/metrics.py +798 -0
  46. proxilion/observability/session_cost_tracker.py +1063 -0
  47. proxilion/policies/__init__.py +67 -0
  48. proxilion/policies/base.py +304 -0
  49. proxilion/policies/builtin.py +486 -0
  50. proxilion/policies/registry.py +376 -0
  51. proxilion/providers/__init__.py +201 -0
  52. proxilion/providers/adapter.py +468 -0
  53. proxilion/providers/anthropic_adapter.py +330 -0
  54. proxilion/providers/gemini_adapter.py +391 -0
  55. proxilion/providers/openai_adapter.py +294 -0
  56. proxilion/py.typed +0 -0
  57. proxilion/resilience/__init__.py +81 -0
  58. proxilion/resilience/degradation.py +615 -0
  59. proxilion/resilience/fallback.py +555 -0
  60. proxilion/resilience/retry.py +554 -0
  61. proxilion/scheduling/__init__.py +57 -0
  62. proxilion/scheduling/priority_queue.py +419 -0
  63. proxilion/scheduling/scheduler.py +459 -0
  64. proxilion/security/__init__.py +244 -0
  65. proxilion/security/agent_trust.py +968 -0
  66. proxilion/security/behavioral_drift.py +794 -0
  67. proxilion/security/cascade_protection.py +869 -0
  68. proxilion/security/circuit_breaker.py +428 -0
  69. proxilion/security/cost_limiter.py +690 -0
  70. proxilion/security/idor_protection.py +460 -0
  71. proxilion/security/intent_capsule.py +849 -0
  72. proxilion/security/intent_validator.py +495 -0
  73. proxilion/security/memory_integrity.py +767 -0
  74. proxilion/security/rate_limiter.py +509 -0
  75. proxilion/security/scope_enforcer.py +680 -0
  76. proxilion/security/sequence_validator.py +636 -0
  77. proxilion/security/trust_boundaries.py +784 -0
  78. proxilion/streaming/__init__.py +70 -0
  79. proxilion/streaming/detector.py +761 -0
  80. proxilion/streaming/transformer.py +674 -0
  81. proxilion/timeouts/__init__.py +55 -0
  82. proxilion/timeouts/decorators.py +477 -0
  83. proxilion/timeouts/manager.py +545 -0
  84. proxilion/tools/__init__.py +69 -0
  85. proxilion/tools/decorators.py +493 -0
  86. proxilion/tools/registry.py +732 -0
  87. proxilion/types.py +339 -0
  88. proxilion/validation/__init__.py +93 -0
  89. proxilion/validation/pydantic_schema.py +351 -0
  90. proxilion/validation/schema.py +651 -0
  91. proxilion-0.0.1.dist-info/METADATA +872 -0
  92. proxilion-0.0.1.dist-info/RECORD +94 -0
  93. proxilion-0.0.1.dist-info/WHEEL +4 -0
  94. proxilion-0.0.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,603 @@
1
+ """
2
+ EU AI Act compliance exporter.
3
+
4
+ Provides audit log export formats for EU AI Act compliance,
5
+ specifically targeting:
6
+ - Article 14: Human Oversight
7
+ - Article 15: Accuracy, Robustness and Cybersecurity
8
+ - Article 17: Quality Management System
9
+
10
+ Example:
11
+ >>> from proxilion.audit import InMemoryAuditLogger
12
+ >>> from proxilion.audit.compliance import EUAIActExporter
13
+ >>> from datetime import datetime, timedelta, timezone
14
+ >>>
15
+ >>> logger = InMemoryAuditLogger()
16
+ >>> # ... log events ...
17
+ >>>
18
+ >>> exporter = EUAIActExporter(
19
+ ... logger,
20
+ ... organization="Acme Corp",
21
+ ... system_name="Customer Service AI",
22
+ ... responsible_party="AI Governance Team",
23
+ ... )
24
+ >>>
25
+ >>> end = datetime.now(timezone.utc)
26
+ >>> start = end - timedelta(days=30)
27
+ >>>
28
+ >>> # Export human oversight evidence
29
+ >>> oversight = exporter.export_human_oversight_evidence(start, end)
30
+ >>>
31
+ >>> # Generate full compliance report
32
+ >>> report = exporter.generate_compliance_report(start, end)
33
+ """
34
+
35
+ from __future__ import annotations
36
+
37
+ from dataclasses import dataclass, field
38
+ from datetime import datetime, timezone # noqa: F401 (timezone used in docstring)
39
+ from typing import Any
40
+
41
+ from proxilion.audit.compliance.base import (
42
+ BaseComplianceExporter,
43
+ ComplianceEvidence,
44
+ ComplianceFramework,
45
+ ComplianceReport,
46
+ EventSource,
47
+ )
48
+ from proxilion.audit.events import AuditEventV2, EventType
49
+
50
+
51
+ @dataclass
52
+ class HumanOversightEvidence:
53
+ """
54
+ Evidence of human oversight capability per Article 14.
55
+
56
+ Attributes:
57
+ approval_requests: Events where human approval was requested.
58
+ override_events: Events where humans overrode AI decisions.
59
+ intervention_points: Documented intervention capabilities.
60
+ denied_requests: Requests denied by human oversight.
61
+ total_decisions: Total number of decisions made.
62
+ human_involvement_rate: Percentage of decisions with human involvement.
63
+ """
64
+ approval_requests: list[dict[str, Any]] = field(default_factory=list)
65
+ override_events: list[dict[str, Any]] = field(default_factory=list)
66
+ intervention_points: list[dict[str, Any]] = field(default_factory=list)
67
+ denied_requests: list[dict[str, Any]] = field(default_factory=list)
68
+ total_decisions: int = 0
69
+ human_involvement_rate: float = 0.0
70
+
71
+ def to_dict(self) -> dict[str, Any]:
72
+ """Convert to dictionary."""
73
+ return {
74
+ "approval_requests": self.approval_requests,
75
+ "override_events": self.override_events,
76
+ "intervention_points": self.intervention_points,
77
+ "denied_requests": self.denied_requests,
78
+ "total_decisions": self.total_decisions,
79
+ "human_involvement_rate": self.human_involvement_rate,
80
+ "summary": {
81
+ "approval_request_count": len(self.approval_requests),
82
+ "override_count": len(self.override_events),
83
+ "intervention_point_count": len(self.intervention_points),
84
+ "denied_count": len(self.denied_requests),
85
+ },
86
+ }
87
+
88
+
89
+ @dataclass
90
+ class DecisionAuditTrailEntry:
91
+ """
92
+ A single entry in the decision audit trail.
93
+
94
+ Attributes:
95
+ timestamp: When the decision was made.
96
+ decision_id: Unique identifier for this decision.
97
+ decision_type: Type of decision (authorization, tool_call, etc.).
98
+ inputs: Inputs to the decision.
99
+ outputs: Outputs/results of the decision.
100
+ user_context: User context at time of decision.
101
+ ai_system_id: Identifier of the AI system.
102
+ rationale: Explanation for the decision.
103
+ """
104
+ timestamp: datetime
105
+ decision_id: str
106
+ decision_type: str
107
+ inputs: dict[str, Any]
108
+ outputs: dict[str, Any]
109
+ user_context: dict[str, Any]
110
+ ai_system_id: str
111
+ rationale: str | None = None
112
+
113
+ def to_dict(self) -> dict[str, Any]:
114
+ """Convert to dictionary."""
115
+ return {
116
+ "timestamp": self.timestamp.isoformat(),
117
+ "decision_id": self.decision_id,
118
+ "decision_type": self.decision_type,
119
+ "inputs": self.inputs,
120
+ "outputs": self.outputs,
121
+ "user_context": self.user_context,
122
+ "ai_system_id": self.ai_system_id,
123
+ "rationale": self.rationale,
124
+ }
125
+
126
+
127
+ @dataclass
128
+ class RiskAssessmentEntry:
129
+ """
130
+ Entry in the risk assessment log per Article 15.
131
+
132
+ Attributes:
133
+ timestamp: When the risk was identified.
134
+ event_id: Related event ID.
135
+ risk_type: Type of risk identified.
136
+ severity: Severity level (low, medium, high, critical).
137
+ description: Description of the risk.
138
+ mitigation_action: Action taken to mitigate.
139
+ resolved: Whether the risk was resolved.
140
+ """
141
+ timestamp: datetime
142
+ event_id: str
143
+ risk_type: str
144
+ severity: str
145
+ description: str
146
+ mitigation_action: str | None = None
147
+ resolved: bool = False
148
+
149
+ def to_dict(self) -> dict[str, Any]:
150
+ """Convert to dictionary."""
151
+ return {
152
+ "timestamp": self.timestamp.isoformat(),
153
+ "event_id": self.event_id,
154
+ "risk_type": self.risk_type,
155
+ "severity": self.severity,
156
+ "description": self.description,
157
+ "mitigation_action": self.mitigation_action,
158
+ "resolved": self.resolved,
159
+ }
160
+
161
+
162
+ class EUAIActExporter(BaseComplianceExporter):
163
+ """
164
+ Export audit logs in EU AI Act compliant format.
165
+
166
+ Produces documentation required for:
167
+ - Article 14: Human oversight evidence
168
+ - Article 15: Accuracy and robustness records
169
+ - Article 17: Quality management system
170
+
171
+ The EU AI Act requires high-risk AI systems to maintain:
172
+ - Records of all AI system decisions
173
+ - Evidence of human oversight capability
174
+ - Risk management documentation
175
+ - Clear audit trails
176
+
177
+ Example:
178
+ >>> exporter = EUAIActExporter(
179
+ ... logger,
180
+ ... organization="Acme Corp",
181
+ ... system_name="Risk Assessment AI",
182
+ ... responsible_party="compliance@acme.com",
183
+ ... )
184
+ >>>
185
+ >>> # Export human oversight evidence
186
+ >>> oversight = exporter.export_human_oversight_evidence(start, end)
187
+ >>>
188
+ >>> # Export decision audit trail
189
+ >>> trail = exporter.export_decision_audit_trail(start, end)
190
+ >>>
191
+ >>> # Generate full compliance report
192
+ >>> report = exporter.generate_compliance_report(start, end)
193
+ """
194
+
195
+ def __init__(
196
+ self,
197
+ event_source: EventSource | list[AuditEventV2],
198
+ organization: str = "",
199
+ system_name: str = "",
200
+ responsible_party: str = "",
201
+ ai_system_id: str = "",
202
+ risk_classification: str = "high-risk",
203
+ ) -> None:
204
+ """
205
+ Initialize the EU AI Act exporter.
206
+
207
+ Args:
208
+ event_source: Source of audit events.
209
+ organization: Organization name.
210
+ system_name: AI system name.
211
+ responsible_party: Responsible party contact.
212
+ ai_system_id: Unique identifier for the AI system.
213
+ risk_classification: Risk classification (high-risk, limited-risk, etc.).
214
+ """
215
+ super().__init__(event_source, organization, system_name, responsible_party)
216
+ self._ai_system_id = ai_system_id or system_name
217
+ self._risk_classification = risk_classification
218
+
219
+ @property
220
+ def framework(self) -> ComplianceFramework:
221
+ """Get the compliance framework."""
222
+ return ComplianceFramework.EU_AI_ACT
223
+
224
+ @property
225
+ def framework_version(self) -> str:
226
+ """Get the framework version."""
227
+ return "2024"
228
+
229
+ def export_human_oversight_evidence(
230
+ self,
231
+ start: datetime,
232
+ end: datetime,
233
+ ) -> HumanOversightEvidence:
234
+ """
235
+ Export evidence of human oversight capability (Article 14).
236
+
237
+ Article 14 requires that high-risk AI systems have:
238
+ - Appropriate human-machine interface tools
239
+ - Ability to correctly interpret outputs
240
+ - Ability to decide not to use the system
241
+ - Ability to intervene or interrupt
242
+
243
+ Args:
244
+ start: Start of the reporting period.
245
+ end: End of the reporting period.
246
+
247
+ Returns:
248
+ HumanOversightEvidence with categorized events.
249
+ """
250
+ events = self.filter_by_date_range(start, end)
251
+
252
+ evidence = HumanOversightEvidence(total_decisions=len(events))
253
+
254
+ for event in events:
255
+ event_dict = self.event_to_evidence_dict(event)
256
+
257
+ # Identify approval requests (requires_approval tools)
258
+ if event.data.authorization_metadata.get("requires_approval"):
259
+ evidence.approval_requests.append(event_dict)
260
+
261
+ # Identify override events (human overrode AI decision)
262
+ if event.data.authorization_metadata.get("human_override"):
263
+ evidence.override_events.append(event_dict)
264
+
265
+ # Identify intervention points (denied requests are intervention)
266
+ if event.data.event_type == EventType.AUTHORIZATION_DENIED:
267
+ evidence.denied_requests.append(event_dict)
268
+ evidence.intervention_points.append({
269
+ **event_dict,
270
+ "intervention_type": "authorization_denied",
271
+ "reason": event.data.authorization_reason,
272
+ })
273
+
274
+ # Security events are also intervention points
275
+ if event.data.event_type in [
276
+ EventType.RATE_LIMIT_EXCEEDED,
277
+ EventType.CIRCUIT_BREAKER_OPEN,
278
+ EventType.IDOR_VIOLATION,
279
+ ]:
280
+ evidence.intervention_points.append({
281
+ **event_dict,
282
+ "intervention_type": event.data.event_type.value,
283
+ })
284
+
285
+ # Calculate human involvement rate
286
+ human_involved = (
287
+ len(evidence.approval_requests) +
288
+ len(evidence.override_events) +
289
+ len(evidence.denied_requests)
290
+ )
291
+ if evidence.total_decisions > 0:
292
+ evidence.human_involvement_rate = human_involved / evidence.total_decisions
293
+
294
+ return evidence
295
+
296
+ def export_decision_audit_trail(
297
+ self,
298
+ start: datetime,
299
+ end: datetime,
300
+ ) -> list[DecisionAuditTrailEntry]:
301
+ """
302
+ Export chronological decision audit trail (Article 14.4).
303
+
304
+ Article 14.4 requires that operators keep logs of
305
+ the high-risk AI system's operation.
306
+
307
+ Args:
308
+ start: Start of the reporting period.
309
+ end: End of the reporting period.
310
+
311
+ Returns:
312
+ List of decision audit trail entries.
313
+ """
314
+ events = self.filter_by_date_range(start, end)
315
+ trail = []
316
+
317
+ for event in events:
318
+ entry = DecisionAuditTrailEntry(
319
+ timestamp=event.timestamp,
320
+ decision_id=event.event_id,
321
+ decision_type=event.data.event_type.value,
322
+ inputs={
323
+ "tool_name": event.data.tool_name,
324
+ "tool_arguments": event.data.tool_arguments,
325
+ },
326
+ outputs={
327
+ "authorized": event.data.authorization_allowed,
328
+ "execution_result": event.data.execution_result,
329
+ },
330
+ user_context={
331
+ "user_id": event.data.user_id,
332
+ "roles": event.data.user_roles,
333
+ "session_id": event.data.session_id,
334
+ "attributes": event.data.user_attributes,
335
+ },
336
+ ai_system_id=self._ai_system_id,
337
+ rationale=event.data.authorization_reason,
338
+ )
339
+ trail.append(entry)
340
+
341
+ return trail
342
+
343
+ def export_risk_assessment_log(
344
+ self,
345
+ start: datetime,
346
+ end: datetime,
347
+ ) -> dict[str, Any]:
348
+ """
349
+ Export risk-related events for Article 15 compliance.
350
+
351
+ Article 15 requires appropriate levels of accuracy,
352
+ robustness and cybersecurity.
353
+
354
+ Args:
355
+ start: Start of the reporting period.
356
+ end: End of the reporting period.
357
+
358
+ Returns:
359
+ Dictionary containing categorized risk events.
360
+ """
361
+ events = self.filter_by_date_range(start, end)
362
+
363
+ risk_log: dict[str, Any] = {
364
+ "high_risk_tool_calls": [],
365
+ "denied_requests": [],
366
+ "anomaly_detections": [],
367
+ "security_events": [],
368
+ "summary": {},
369
+ }
370
+
371
+ for event in events:
372
+ event_dict = self.event_to_evidence_dict(event)
373
+
374
+ # High-risk tool calls
375
+ if event.data.authorization_metadata.get("risk_level") == "high":
376
+ risk_log["high_risk_tool_calls"].append(event_dict)
377
+
378
+ # Denied requests
379
+ if event.data.event_type == EventType.AUTHORIZATION_DENIED:
380
+ risk_entry = RiskAssessmentEntry(
381
+ timestamp=event.timestamp,
382
+ event_id=event.event_id,
383
+ risk_type="access_denied",
384
+ severity="low",
385
+ description=f"Authorization denied for {event.data.tool_name}",
386
+ mitigation_action="Request blocked by policy",
387
+ resolved=True,
388
+ )
389
+ risk_log["denied_requests"].append(risk_entry.to_dict())
390
+
391
+ # Security events
392
+ if event.data.event_type == EventType.RATE_LIMIT_EXCEEDED:
393
+ risk_entry = RiskAssessmentEntry(
394
+ timestamp=event.timestamp,
395
+ event_id=event.event_id,
396
+ risk_type="rate_limit",
397
+ severity="medium",
398
+ description=f"Rate limit exceeded for user {event.data.user_id}",
399
+ mitigation_action="Request throttled",
400
+ resolved=True,
401
+ )
402
+ risk_log["security_events"].append(risk_entry.to_dict())
403
+
404
+ if event.data.event_type == EventType.CIRCUIT_BREAKER_OPEN:
405
+ risk_entry = RiskAssessmentEntry(
406
+ timestamp=event.timestamp,
407
+ event_id=event.event_id,
408
+ risk_type="circuit_breaker",
409
+ severity="high",
410
+ description=f"Circuit breaker opened for {event.data.tool_name}",
411
+ mitigation_action="Tool temporarily disabled",
412
+ resolved=False,
413
+ )
414
+ risk_log["security_events"].append(risk_entry.to_dict())
415
+
416
+ if event.data.event_type == EventType.IDOR_VIOLATION:
417
+ risk_entry = RiskAssessmentEntry(
418
+ timestamp=event.timestamp,
419
+ event_id=event.event_id,
420
+ risk_type="idor_violation",
421
+ severity="critical",
422
+ description=f"IDOR violation by user {event.data.user_id}",
423
+ mitigation_action="Request blocked",
424
+ resolved=True,
425
+ )
426
+ risk_log["security_events"].append(risk_entry.to_dict())
427
+ risk_log["anomaly_detections"].append(risk_entry.to_dict())
428
+
429
+ if event.data.event_type == EventType.SCHEMA_VALIDATION_FAILURE:
430
+ risk_entry = RiskAssessmentEntry(
431
+ timestamp=event.timestamp,
432
+ event_id=event.event_id,
433
+ risk_type="validation_failure",
434
+ severity="medium",
435
+ description=f"Schema validation failed for {event.data.tool_name}",
436
+ mitigation_action="Request rejected",
437
+ resolved=True,
438
+ )
439
+ risk_log["anomaly_detections"].append(risk_entry.to_dict())
440
+
441
+ # Compute summary
442
+ risk_log["summary"] = {
443
+ "total_high_risk_calls": len(risk_log["high_risk_tool_calls"]),
444
+ "total_denied": len(risk_log["denied_requests"]),
445
+ "total_anomalies": len(risk_log["anomaly_detections"]),
446
+ "total_security_events": len(risk_log["security_events"]),
447
+ "period_start": start.isoformat(),
448
+ "period_end": end.isoformat(),
449
+ }
450
+
451
+ return risk_log
452
+
453
+ def generate_report(
454
+ self,
455
+ start: datetime,
456
+ end: datetime,
457
+ ) -> ComplianceReport:
458
+ """Generate a complete EU AI Act compliance report."""
459
+ return self.generate_compliance_report(start, end)
460
+
461
+ def generate_compliance_report(
462
+ self,
463
+ start: datetime,
464
+ end: datetime,
465
+ ) -> ComplianceReport:
466
+ """
467
+ Generate full EU AI Act compliance report.
468
+
469
+ Args:
470
+ start: Start of the reporting period.
471
+ end: End of the reporting period.
472
+
473
+ Returns:
474
+ Complete compliance report in markdown-ready format.
475
+ """
476
+ metadata = self.create_metadata(start, end)
477
+ metadata.additional_info = {
478
+ "ai_system_id": self._ai_system_id,
479
+ "risk_classification": self._risk_classification,
480
+ }
481
+
482
+ evidence = []
483
+ recommendations = []
484
+
485
+ # Article 14: Human Oversight
486
+ oversight = self.export_human_oversight_evidence(start, end)
487
+ article_14_evidence = ComplianceEvidence(
488
+ control_id="Article 14",
489
+ control_name="Human Oversight",
490
+ evidence_type="operational_logs",
491
+ description=(
492
+ "Evidence of human oversight capabilities including "
493
+ "approval requests, override events, and intervention points."
494
+ ),
495
+ events=oversight.approval_requests + oversight.override_events,
496
+ summary={
497
+ "total_decisions": oversight.total_decisions,
498
+ "approval_requests": len(oversight.approval_requests),
499
+ "override_events": len(oversight.override_events),
500
+ "intervention_points": len(oversight.intervention_points),
501
+ "human_involvement_rate": f"{oversight.human_involvement_rate:.1%}",
502
+ },
503
+ compliant=(
504
+ oversight.human_involvement_rate > 0 or len(oversight.intervention_points) > 0
505
+ ),
506
+ )
507
+ evidence.append(article_14_evidence)
508
+
509
+ if oversight.human_involvement_rate < 0.1:
510
+ recommendations.append(
511
+ "Consider increasing human oversight by requiring approval "
512
+ "for high-risk operations."
513
+ )
514
+
515
+ # Article 14.4: Decision Audit Trail
516
+ trail = self.export_decision_audit_trail(start, end)
517
+ article_14_4_evidence = ComplianceEvidence(
518
+ control_id="Article 14.4",
519
+ control_name="Operation Logs",
520
+ evidence_type="audit_trail",
521
+ description=(
522
+ "Chronological record of all AI system operations "
523
+ "including inputs, outputs, and decision rationale."
524
+ ),
525
+ events=[e.to_dict() for e in trail[:100]], # Limit for readability
526
+ summary={
527
+ "total_entries": len(trail),
528
+ "unique_users": len({e.user_context["user_id"] for e in trail}),
529
+ "unique_tools": len({e.inputs["tool_name"] for e in trail}),
530
+ },
531
+ compliant=len(trail) > 0,
532
+ notes=f"Showing first 100 of {len(trail)} entries." if len(trail) > 100 else None,
533
+ )
534
+ evidence.append(article_14_4_evidence)
535
+
536
+ # Article 15: Risk Assessment
537
+ risk_log = self.export_risk_assessment_log(start, end)
538
+ article_15_evidence = ComplianceEvidence(
539
+ control_id="Article 15",
540
+ control_name="Accuracy, Robustness and Cybersecurity",
541
+ evidence_type="risk_assessment",
542
+ description=(
543
+ "Risk assessment log including security events, "
544
+ "anomaly detections, and mitigation actions."
545
+ ),
546
+ events=risk_log["security_events"] + risk_log["anomaly_detections"],
547
+ summary=risk_log["summary"],
548
+ compliant=True, # Having the log demonstrates compliance
549
+ notes=(
550
+ "All security events were handled with appropriate mitigation actions."
551
+ if risk_log["security_events"] else
552
+ "No security events detected during this period."
553
+ ),
554
+ )
555
+ evidence.append(article_15_evidence)
556
+
557
+ if risk_log["summary"]["total_security_events"] > 10:
558
+ recommendations.append(
559
+ "Review security event patterns and consider strengthening access controls."
560
+ )
561
+
562
+ # Article 17: Quality Management
563
+ all_events = self.filter_by_date_range(start, end)
564
+ stats = self.compute_summary_stats(all_events)
565
+
566
+ article_17_evidence = ComplianceEvidence(
567
+ control_id="Article 17",
568
+ control_name="Quality Management System",
569
+ evidence_type="system_metrics",
570
+ description=(
571
+ "Quality management metrics demonstrating systematic "
572
+ "monitoring and control of the AI system."
573
+ ),
574
+ summary={
575
+ "total_operations": stats["total_events"],
576
+ "authorization_rate": f"{stats['grant_rate']:.1%}",
577
+ "unique_users_served": stats["unique_users"],
578
+ "tools_available": stats["unique_tools"],
579
+ "period_coverage": f"{start.date()} to {end.date()}",
580
+ },
581
+ compliant=True,
582
+ )
583
+ evidence.append(article_17_evidence)
584
+
585
+ # Overall summary
586
+ summary = {
587
+ "reporting_period": f"{start.date()} to {end.date()}",
588
+ "ai_system_id": self._ai_system_id,
589
+ "risk_classification": self._risk_classification,
590
+ "total_operations": stats["total_events"],
591
+ "human_oversight_rate": f"{oversight.human_involvement_rate:.1%}",
592
+ "security_events": risk_log["summary"]["total_security_events"],
593
+ "compliance_status": (
594
+ "Compliant" if all(e.compliant for e in evidence) else "Review Required"
595
+ ),
596
+ }
597
+
598
+ return ComplianceReport(
599
+ metadata=metadata,
600
+ evidence=evidence,
601
+ summary=summary,
602
+ recommendations=recommendations,
603
+ )