proxilion 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (94) hide show
  1. proxilion/__init__.py +136 -0
  2. proxilion/audit/__init__.py +133 -0
  3. proxilion/audit/base_exporters.py +527 -0
  4. proxilion/audit/compliance/__init__.py +130 -0
  5. proxilion/audit/compliance/base.py +457 -0
  6. proxilion/audit/compliance/eu_ai_act.py +603 -0
  7. proxilion/audit/compliance/iso27001.py +544 -0
  8. proxilion/audit/compliance/soc2.py +491 -0
  9. proxilion/audit/events.py +493 -0
  10. proxilion/audit/explainability.py +1173 -0
  11. proxilion/audit/exporters/__init__.py +58 -0
  12. proxilion/audit/exporters/aws_s3.py +636 -0
  13. proxilion/audit/exporters/azure_storage.py +608 -0
  14. proxilion/audit/exporters/cloud_base.py +468 -0
  15. proxilion/audit/exporters/gcp_storage.py +570 -0
  16. proxilion/audit/exporters/multi_exporter.py +498 -0
  17. proxilion/audit/hash_chain.py +652 -0
  18. proxilion/audit/logger.py +543 -0
  19. proxilion/caching/__init__.py +49 -0
  20. proxilion/caching/tool_cache.py +633 -0
  21. proxilion/context/__init__.py +73 -0
  22. proxilion/context/context_window.py +556 -0
  23. proxilion/context/message_history.py +505 -0
  24. proxilion/context/session.py +735 -0
  25. proxilion/contrib/__init__.py +51 -0
  26. proxilion/contrib/anthropic.py +609 -0
  27. proxilion/contrib/google.py +1012 -0
  28. proxilion/contrib/langchain.py +641 -0
  29. proxilion/contrib/mcp.py +893 -0
  30. proxilion/contrib/openai.py +646 -0
  31. proxilion/core.py +3058 -0
  32. proxilion/decorators.py +966 -0
  33. proxilion/engines/__init__.py +287 -0
  34. proxilion/engines/base.py +266 -0
  35. proxilion/engines/casbin_engine.py +412 -0
  36. proxilion/engines/opa_engine.py +493 -0
  37. proxilion/engines/simple.py +437 -0
  38. proxilion/exceptions.py +887 -0
  39. proxilion/guards/__init__.py +54 -0
  40. proxilion/guards/input_guard.py +522 -0
  41. proxilion/guards/output_guard.py +634 -0
  42. proxilion/observability/__init__.py +198 -0
  43. proxilion/observability/cost_tracker.py +866 -0
  44. proxilion/observability/hooks.py +683 -0
  45. proxilion/observability/metrics.py +798 -0
  46. proxilion/observability/session_cost_tracker.py +1063 -0
  47. proxilion/policies/__init__.py +67 -0
  48. proxilion/policies/base.py +304 -0
  49. proxilion/policies/builtin.py +486 -0
  50. proxilion/policies/registry.py +376 -0
  51. proxilion/providers/__init__.py +201 -0
  52. proxilion/providers/adapter.py +468 -0
  53. proxilion/providers/anthropic_adapter.py +330 -0
  54. proxilion/providers/gemini_adapter.py +391 -0
  55. proxilion/providers/openai_adapter.py +294 -0
  56. proxilion/py.typed +0 -0
  57. proxilion/resilience/__init__.py +81 -0
  58. proxilion/resilience/degradation.py +615 -0
  59. proxilion/resilience/fallback.py +555 -0
  60. proxilion/resilience/retry.py +554 -0
  61. proxilion/scheduling/__init__.py +57 -0
  62. proxilion/scheduling/priority_queue.py +419 -0
  63. proxilion/scheduling/scheduler.py +459 -0
  64. proxilion/security/__init__.py +244 -0
  65. proxilion/security/agent_trust.py +968 -0
  66. proxilion/security/behavioral_drift.py +794 -0
  67. proxilion/security/cascade_protection.py +869 -0
  68. proxilion/security/circuit_breaker.py +428 -0
  69. proxilion/security/cost_limiter.py +690 -0
  70. proxilion/security/idor_protection.py +460 -0
  71. proxilion/security/intent_capsule.py +849 -0
  72. proxilion/security/intent_validator.py +495 -0
  73. proxilion/security/memory_integrity.py +767 -0
  74. proxilion/security/rate_limiter.py +509 -0
  75. proxilion/security/scope_enforcer.py +680 -0
  76. proxilion/security/sequence_validator.py +636 -0
  77. proxilion/security/trust_boundaries.py +784 -0
  78. proxilion/streaming/__init__.py +70 -0
  79. proxilion/streaming/detector.py +761 -0
  80. proxilion/streaming/transformer.py +674 -0
  81. proxilion/timeouts/__init__.py +55 -0
  82. proxilion/timeouts/decorators.py +477 -0
  83. proxilion/timeouts/manager.py +545 -0
  84. proxilion/tools/__init__.py +69 -0
  85. proxilion/tools/decorators.py +493 -0
  86. proxilion/tools/registry.py +732 -0
  87. proxilion/types.py +339 -0
  88. proxilion/validation/__init__.py +93 -0
  89. proxilion/validation/pydantic_schema.py +351 -0
  90. proxilion/validation/schema.py +651 -0
  91. proxilion-0.0.1.dist-info/METADATA +872 -0
  92. proxilion-0.0.1.dist-info/RECORD +94 -0
  93. proxilion-0.0.1.dist-info/WHEEL +4 -0
  94. proxilion-0.0.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,498 @@
1
+ """
2
+ Multi-cloud exporter for redundant audit log export.
3
+
4
+ Provides resilient export to multiple cloud destinations with
5
+ configurable failure handling and retry strategies.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import logging
11
+ import threading
12
+ import time
13
+ from dataclasses import dataclass, field
14
+ from datetime import datetime, timezone
15
+ from enum import Enum
16
+ from typing import Any
17
+
18
+ from proxilion.audit.events import AuditEventV2
19
+ from proxilion.audit.exporters.cloud_base import (
20
+ BaseCloudExporter,
21
+ CloudExporter,
22
+ CloudExporterConfig,
23
+ ExportBatch,
24
+ ExportResult,
25
+ )
26
+
27
+ logger = logging.getLogger(__name__)
28
+
29
+
30
+ class FailureStrategy(Enum):
31
+ """Strategy for handling export failures."""
32
+
33
+ FAIL_FAST = "fail_fast"
34
+ """Stop on first failure."""
35
+
36
+ BEST_EFFORT = "best_effort"
37
+ """Continue even if some exporters fail."""
38
+
39
+ REQUIRE_ONE = "require_one"
40
+ """Succeed if at least one exporter succeeds."""
41
+
42
+ REQUIRE_ALL = "require_all"
43
+ """Only succeed if all exporters succeed."""
44
+
45
+ REQUIRE_MAJORITY = "require_majority"
46
+ """Succeed if majority of exporters succeed."""
47
+
48
+
49
+ @dataclass
50
+ class MultiExportResult:
51
+ """
52
+ Aggregated result of multi-cloud export.
53
+
54
+ Attributes:
55
+ success: Whether the overall export succeeded.
56
+ results: Individual results from each exporter.
57
+ total_events: Total events in the batch.
58
+ successful_destinations: Number of successful destinations.
59
+ failed_destinations: Number of failed destinations.
60
+ duration_ms: Total export duration in milliseconds.
61
+ """
62
+ success: bool
63
+ results: list[ExportResult] = field(default_factory=list)
64
+ total_events: int = 0
65
+ successful_destinations: int = 0
66
+ failed_destinations: int = 0
67
+ duration_ms: float = 0.0
68
+
69
+ @property
70
+ def all_succeeded(self) -> bool:
71
+ """Check if all exporters succeeded."""
72
+ return all(r.success for r in self.results)
73
+
74
+ @property
75
+ def any_succeeded(self) -> bool:
76
+ """Check if any exporter succeeded."""
77
+ return any(r.success for r in self.results)
78
+
79
+ @property
80
+ def majority_succeeded(self) -> bool:
81
+ """Check if majority of exporters succeeded."""
82
+ if not self.results:
83
+ return False
84
+ return self.successful_destinations > len(self.results) / 2
85
+
86
+ def get_failed_results(self) -> list[ExportResult]:
87
+ """Get list of failed export results."""
88
+ return [r for r in self.results if not r.success]
89
+
90
+ def get_successful_results(self) -> list[ExportResult]:
91
+ """Get list of successful export results."""
92
+ return [r for r in self.results if r.success]
93
+
94
+
95
+ class MultiCloudExporter:
96
+ """
97
+ Export audit logs to multiple cloud destinations.
98
+
99
+ Provides redundant export with configurable failure handling,
100
+ parallel execution, and comprehensive result tracking.
101
+
102
+ Example:
103
+ >>> from proxilion.audit.exporters import (
104
+ ... S3Exporter, GCSExporter, MultiCloudExporter, FailureStrategy
105
+ ... )
106
+ >>>
107
+ >>> s3 = S3Exporter(s3_config)
108
+ >>> gcs = GCSExporter(gcs_config)
109
+ >>>
110
+ >>> multi = MultiCloudExporter(
111
+ ... exporters=[s3, gcs],
112
+ ... strategy=FailureStrategy.REQUIRE_ONE,
113
+ ... parallel=True,
114
+ ... )
115
+ >>>
116
+ >>> result = multi.export(events)
117
+ >>> if result.success:
118
+ ... print(f"Exported to {result.successful_destinations} destinations")
119
+ """
120
+
121
+ def __init__(
122
+ self,
123
+ exporters: list[CloudExporter | BaseCloudExporter],
124
+ strategy: FailureStrategy = FailureStrategy.BEST_EFFORT,
125
+ parallel: bool = True,
126
+ timeout: float = 300.0,
127
+ retry_failed: bool = True,
128
+ max_retries: int = 2,
129
+ retry_delay: float = 5.0,
130
+ ) -> None:
131
+ """
132
+ Initialize the multi-cloud exporter.
133
+
134
+ Args:
135
+ exporters: List of cloud exporters to use.
136
+ strategy: Strategy for handling failures.
137
+ parallel: Execute exports in parallel.
138
+ timeout: Timeout for parallel exports in seconds.
139
+ retry_failed: Retry failed exports.
140
+ max_retries: Maximum number of retries per exporter.
141
+ retry_delay: Delay between retries in seconds.
142
+ """
143
+ self.exporters = exporters
144
+ self.strategy = strategy
145
+ self.parallel = parallel
146
+ self.timeout = timeout
147
+ self.retry_failed = retry_failed
148
+ self.max_retries = max_retries
149
+ self.retry_delay = retry_delay
150
+
151
+ self._pending_events: list[AuditEventV2] = []
152
+ self._lock = threading.RLock()
153
+ self._batch_counter = 0
154
+
155
+ @property
156
+ def exporter_count(self) -> int:
157
+ """Get the number of configured exporters."""
158
+ return len(self.exporters)
159
+
160
+ def export(self, events: list[AuditEventV2]) -> MultiExportResult:
161
+ """
162
+ Export events to all configured destinations.
163
+
164
+ Args:
165
+ events: List of audit events to export.
166
+
167
+ Returns:
168
+ MultiExportResult with aggregated results.
169
+ """
170
+ if not events:
171
+ return MultiExportResult(success=True, total_events=0)
172
+
173
+ # Prepare batch
174
+ batch = self._prepare_batch(events)
175
+
176
+ return self.export_batch(batch)
177
+
178
+ def export_batch(self, batch: ExportBatch) -> MultiExportResult:
179
+ """
180
+ Export a batch to all destinations.
181
+
182
+ Args:
183
+ batch: The batch to export.
184
+
185
+ Returns:
186
+ MultiExportResult with aggregated results.
187
+ """
188
+ start_time = time.time()
189
+
190
+ if self.parallel and len(self.exporters) > 1:
191
+ results = self._export_parallel(batch)
192
+ else:
193
+ results = self._export_sequential(batch)
194
+
195
+ # Retry failed exports if configured
196
+ if self.retry_failed:
197
+ results = self._retry_failed_exports(batch, results)
198
+
199
+ # Aggregate results
200
+ duration_ms = (time.time() - start_time) * 1000
201
+ successful = [r for r in results if r.success]
202
+ failed = [r for r in results if not r.success]
203
+
204
+ # Determine overall success based on strategy
205
+ success = self._evaluate_success(len(successful), len(failed))
206
+
207
+ return MultiExportResult(
208
+ success=success,
209
+ results=results,
210
+ total_events=batch.event_count,
211
+ successful_destinations=len(successful),
212
+ failed_destinations=len(failed),
213
+ duration_ms=duration_ms,
214
+ )
215
+
216
+ def _prepare_batch(self, events: list[AuditEventV2]) -> ExportBatch:
217
+ """Prepare events as an export batch."""
218
+
219
+ with self._lock:
220
+ self._batch_counter += 1
221
+ ts = datetime.now(timezone.utc).strftime('%Y%m%d%H%M%S')
222
+ batch_id = f"multi_{ts}_{self._batch_counter:06d}"
223
+
224
+ return ExportBatch(
225
+ batch_id=batch_id,
226
+ events=events,
227
+ metadata={
228
+ "exporter": "MultiCloudExporter",
229
+ "destinations": self.exporter_count,
230
+ "strategy": self.strategy.value,
231
+ },
232
+ )
233
+
234
+ def _export_sequential(self, batch: ExportBatch) -> list[ExportResult]:
235
+ """Export to destinations sequentially."""
236
+ results = []
237
+
238
+ for i, exporter in enumerate(self.exporters):
239
+ try:
240
+ result = exporter.export_batch(batch)
241
+ results.append(result)
242
+
243
+ # Check for fail-fast
244
+ if self.strategy == FailureStrategy.FAIL_FAST and not result.success:
245
+ logger.warning(
246
+ f"Export to destination {i} failed with fail-fast strategy. "
247
+ f"Skipping remaining {len(self.exporters) - i - 1} destinations."
248
+ )
249
+ break
250
+
251
+ except Exception as e:
252
+ logger.error(f"Export to destination {i} failed with exception: {e}")
253
+ results.append(ExportResult(
254
+ success=False,
255
+ batch_id=batch.batch_id,
256
+ error=str(e),
257
+ ))
258
+
259
+ if self.strategy == FailureStrategy.FAIL_FAST:
260
+ break
261
+
262
+ return results
263
+
264
+ def _export_parallel(self, batch: ExportBatch) -> list[ExportResult]:
265
+ """Export to destinations in parallel."""
266
+ import concurrent.futures
267
+
268
+ results: list[ExportResult | None] = [None] * len(self.exporters)
269
+
270
+ def export_to_destination(index: int, exporter: CloudExporter) -> tuple[int, ExportResult]:
271
+ try:
272
+ result = exporter.export_batch(batch)
273
+ return index, result
274
+ except Exception as e:
275
+ logger.error(f"Export to destination {index} failed: {e}")
276
+ return index, ExportResult(
277
+ success=False,
278
+ batch_id=batch.batch_id,
279
+ error=str(e),
280
+ )
281
+
282
+ with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.exporters)) as executor:
283
+ futures = {
284
+ executor.submit(export_to_destination, i, exp): i
285
+ for i, exp in enumerate(self.exporters)
286
+ }
287
+
288
+ try:
289
+ for future in concurrent.futures.as_completed(futures, timeout=self.timeout):
290
+ index, result = future.result()
291
+ results[index] = result
292
+
293
+ # Check for fail-fast (exit early)
294
+ if self.strategy == FailureStrategy.FAIL_FAST and not result.success:
295
+ logger.warning(
296
+ "Export failed with fail-fast strategy. Cancelling remaining exports."
297
+ )
298
+ # Cancel remaining futures
299
+ for f in futures:
300
+ f.cancel()
301
+ break
302
+
303
+ except concurrent.futures.TimeoutError:
304
+ logger.error(f"Parallel export timed out after {self.timeout}s")
305
+ # Mark timed out exports as failed
306
+ for i, r in enumerate(results):
307
+ if r is None:
308
+ results[i] = ExportResult(
309
+ success=False,
310
+ batch_id=batch.batch_id,
311
+ error="Export timed out",
312
+ )
313
+
314
+ # Replace any remaining None values
315
+ return [
316
+ r if r is not None else ExportResult(
317
+ success=False,
318
+ batch_id=batch.batch_id,
319
+ error="Export did not complete",
320
+ )
321
+ for r in results
322
+ ]
323
+
324
+ def _retry_failed_exports(
325
+ self,
326
+ batch: ExportBatch,
327
+ results: list[ExportResult],
328
+ ) -> list[ExportResult]:
329
+ """Retry failed exports."""
330
+ final_results = list(results)
331
+
332
+ for retry in range(self.max_retries):
333
+ # Find failed exports
334
+ failed_indices = [
335
+ i for i, r in enumerate(final_results)
336
+ if not r.success
337
+ ]
338
+
339
+ if not failed_indices:
340
+ break
341
+
342
+ logger.info(
343
+ f"Retrying {len(failed_indices)} failed exports "
344
+ f"(attempt {retry + 1}/{self.max_retries})"
345
+ )
346
+
347
+ time.sleep(self.retry_delay * (retry + 1)) # Increasing delay
348
+
349
+ for i in failed_indices:
350
+ try:
351
+ result = self.exporters[i].export_batch(batch)
352
+ if result.success:
353
+ final_results[i] = result
354
+ logger.info(f"Retry succeeded for destination {i}")
355
+ except Exception as e:
356
+ logger.warning(f"Retry failed for destination {i}: {e}")
357
+
358
+ return final_results
359
+
360
+ def _evaluate_success(self, successful: int, failed: int) -> bool:
361
+ """Evaluate overall success based on strategy."""
362
+ total = successful + failed
363
+
364
+ if self.strategy == FailureStrategy.FAIL_FAST:
365
+ return successful == total and failed == 0
366
+
367
+ elif self.strategy == FailureStrategy.BEST_EFFORT:
368
+ return True # Always succeed, just log failures
369
+
370
+ elif self.strategy == FailureStrategy.REQUIRE_ONE:
371
+ return successful >= 1
372
+
373
+ elif self.strategy == FailureStrategy.REQUIRE_ALL:
374
+ return failed == 0
375
+
376
+ elif self.strategy == FailureStrategy.REQUIRE_MAJORITY:
377
+ return successful > total / 2
378
+
379
+ return False
380
+
381
+ def health_check(self) -> dict[int, bool]:
382
+ """
383
+ Check health of all exporters.
384
+
385
+ Returns:
386
+ Dict mapping exporter index to health status.
387
+ """
388
+ results = {}
389
+
390
+ for i, exporter in enumerate(self.exporters):
391
+ try:
392
+ results[i] = exporter.health_check()
393
+ except Exception as e:
394
+ logger.warning(f"Health check failed for exporter {i}: {e}")
395
+ results[i] = False
396
+
397
+ return results
398
+
399
+ def configure(self, config: dict[str, Any]) -> None:
400
+ """
401
+ Update exporter configuration.
402
+
403
+ Args:
404
+ config: Configuration dictionary with optional keys:
405
+ - strategy: FailureStrategy value or string
406
+ - parallel: bool
407
+ - timeout: float
408
+ - retry_failed: bool
409
+ - max_retries: int
410
+ - retry_delay: float
411
+ """
412
+ if "strategy" in config:
413
+ strategy = config["strategy"]
414
+ if isinstance(strategy, str):
415
+ self.strategy = FailureStrategy(strategy)
416
+ else:
417
+ self.strategy = strategy
418
+
419
+ if "parallel" in config:
420
+ self.parallel = config["parallel"]
421
+
422
+ if "timeout" in config:
423
+ self.timeout = config["timeout"]
424
+
425
+ if "retry_failed" in config:
426
+ self.retry_failed = config["retry_failed"]
427
+
428
+ if "max_retries" in config:
429
+ self.max_retries = config["max_retries"]
430
+
431
+ if "retry_delay" in config:
432
+ self.retry_delay = config["retry_delay"]
433
+
434
+ def add_exporter(self, exporter: CloudExporter | BaseCloudExporter) -> None:
435
+ """
436
+ Add an exporter to the list.
437
+
438
+ Args:
439
+ exporter: Exporter to add.
440
+ """
441
+ self.exporters.append(exporter)
442
+
443
+ def remove_exporter(self, index: int) -> CloudExporter | BaseCloudExporter | None:
444
+ """
445
+ Remove an exporter by index.
446
+
447
+ Args:
448
+ index: Index of exporter to remove.
449
+
450
+ Returns:
451
+ The removed exporter, or None if index invalid.
452
+ """
453
+ if 0 <= index < len(self.exporters):
454
+ return self.exporters.pop(index)
455
+ return None
456
+
457
+ def add_pending(self, event: AuditEventV2) -> MultiExportResult | None:
458
+ """
459
+ Add an event to the pending buffer.
460
+
461
+ Exports when buffer reaches the smallest batch_size of any exporter.
462
+
463
+ Args:
464
+ event: Event to add.
465
+
466
+ Returns:
467
+ MultiExportResult if batch was exported, None otherwise.
468
+ """
469
+ with self._lock:
470
+ self._pending_events.append(event)
471
+
472
+ # Get minimum batch size from all exporters
473
+ min_batch_size = min(
474
+ getattr(exp, "config", CloudExporterConfig("aws", "")).batch_size
475
+ for exp in self.exporters
476
+ ) if self.exporters else 100
477
+
478
+ if len(self._pending_events) >= min_batch_size:
479
+ events = self._pending_events
480
+ self._pending_events = []
481
+ return self.export(events)
482
+
483
+ return None
484
+
485
+ def flush_pending(self) -> MultiExportResult | None:
486
+ """
487
+ Export any pending events.
488
+
489
+ Returns:
490
+ MultiExportResult if events were exported, None if buffer was empty.
491
+ """
492
+ with self._lock:
493
+ if not self._pending_events:
494
+ return None
495
+
496
+ events = self._pending_events
497
+ self._pending_events = []
498
+ return self.export(events)