proxilion 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- proxilion/audit/__init__.py +15 -15
- proxilion/audit/compliance/base.py +4 -4
- proxilion/audit/compliance/eu_ai_act.py +14 -4
- proxilion/audit/compliance/iso27001.py +2 -2
- proxilion/audit/compliance/soc2.py +16 -3
- proxilion/audit/events.py +9 -5
- proxilion/audit/explainability.py +30 -19
- proxilion/audit/hash_chain.py +14 -0
- proxilion/caching/tool_cache.py +14 -8
- proxilion/context/context_window.py +27 -2
- proxilion/contrib/anthropic.py +2 -2
- proxilion/contrib/mcp.py +2 -1
- proxilion/contrib/openai.py +2 -2
- proxilion/core.py +26 -21
- proxilion/exceptions.py +84 -0
- proxilion/guards/output_guard.py +1 -1
- proxilion/observability/__init__.py +3 -1
- proxilion/observability/metrics.py +14 -6
- proxilion/observability/session_cost_tracker.py +6 -7
- proxilion/policies/builtin.py +2 -1
- proxilion/policies/registry.py +12 -6
- proxilion/security/__init__.py +51 -37
- proxilion/security/agent_trust.py +23 -8
- proxilion/security/behavioral_drift.py +14 -6
- proxilion/security/idor_protection.py +12 -4
- proxilion/security/intent_capsule.py +3 -2
- proxilion/security/intent_validator.py +89 -2
- proxilion/security/memory_integrity.py +14 -13
- proxilion/security/rate_limiter.py +112 -22
- proxilion/timeouts/manager.py +2 -0
- {proxilion-0.0.2.dist-info → proxilion-0.0.3.dist-info}/METADATA +6 -6
- {proxilion-0.0.2.dist-info → proxilion-0.0.3.dist-info}/RECORD +34 -34
- {proxilion-0.0.2.dist-info → proxilion-0.0.3.dist-info}/WHEEL +0 -0
- {proxilion-0.0.2.dist-info → proxilion-0.0.3.dist-info}/licenses/LICENSE +0 -0
proxilion/exceptions.py
CHANGED
|
@@ -885,3 +885,87 @@ class EmergencyHaltError(ProxilionError):
|
|
|
885
885
|
"triggered_by": triggered_by,
|
|
886
886
|
}
|
|
887
887
|
super().__init__(message, details)
|
|
888
|
+
|
|
889
|
+
|
|
890
|
+
class ApprovalRequiredError(ProxilionError):
|
|
891
|
+
"""
|
|
892
|
+
Raised when a tool requires approval before execution.
|
|
893
|
+
|
|
894
|
+
Tools marked with requires_approval=True must have approval granted
|
|
895
|
+
before they can be executed. This exception blocks execution until
|
|
896
|
+
the approval workflow is completed.
|
|
897
|
+
|
|
898
|
+
Attributes:
|
|
899
|
+
tool_name: Name of the tool that requires approval.
|
|
900
|
+
user: The user who attempted to execute the tool.
|
|
901
|
+
reason: Why approval is required.
|
|
902
|
+
|
|
903
|
+
Example:
|
|
904
|
+
>>> raise ApprovalRequiredError(
|
|
905
|
+
... tool_name="delete_database",
|
|
906
|
+
... user="user_123",
|
|
907
|
+
... reason="Tool is marked as high-risk and requires manager approval"
|
|
908
|
+
... )
|
|
909
|
+
"""
|
|
910
|
+
|
|
911
|
+
def __init__(
|
|
912
|
+
self,
|
|
913
|
+
tool_name: str,
|
|
914
|
+
user: str,
|
|
915
|
+
reason: str | None = None,
|
|
916
|
+
) -> None:
|
|
917
|
+
self.tool_name = tool_name
|
|
918
|
+
self.user = user
|
|
919
|
+
self.reason = reason or "Tool requires approval before execution"
|
|
920
|
+
|
|
921
|
+
message = f"Approval required: Tool '{tool_name}' requires approval. {self.reason}"
|
|
922
|
+
|
|
923
|
+
details = {
|
|
924
|
+
"tool_name": tool_name,
|
|
925
|
+
"user": user,
|
|
926
|
+
"reason": self.reason,
|
|
927
|
+
}
|
|
928
|
+
super().__init__(message, details)
|
|
929
|
+
|
|
930
|
+
|
|
931
|
+
class ScopeLoaderError(ProxilionError):
|
|
932
|
+
"""
|
|
933
|
+
Raised when a scope loader encounters a temporary failure.
|
|
934
|
+
|
|
935
|
+
This exception distinguishes between permanent configuration errors
|
|
936
|
+
(which should be logged and denied) and temporary failures (network
|
|
937
|
+
issues, database timeouts) that callers may want to retry.
|
|
938
|
+
|
|
939
|
+
Attributes:
|
|
940
|
+
resource_type: Type of resource being loaded.
|
|
941
|
+
user_id: User for whom scope was being loaded.
|
|
942
|
+
original_error: The underlying error that caused the failure.
|
|
943
|
+
|
|
944
|
+
Example:
|
|
945
|
+
>>> raise ScopeLoaderError(
|
|
946
|
+
... resource_type="document",
|
|
947
|
+
... user_id="user_123",
|
|
948
|
+
... original_error=TimeoutError("Database connection timed out")
|
|
949
|
+
... )
|
|
950
|
+
"""
|
|
951
|
+
|
|
952
|
+
def __init__(
|
|
953
|
+
self,
|
|
954
|
+
resource_type: str,
|
|
955
|
+
user_id: str,
|
|
956
|
+
original_error: Exception | None = None,
|
|
957
|
+
) -> None:
|
|
958
|
+
self.resource_type = resource_type
|
|
959
|
+
self.user_id = user_id
|
|
960
|
+
self.original_error = original_error
|
|
961
|
+
|
|
962
|
+
message = f"Scope loader failed for {resource_type} (user: {user_id})"
|
|
963
|
+
if original_error:
|
|
964
|
+
message += f": {original_error}"
|
|
965
|
+
|
|
966
|
+
details = {
|
|
967
|
+
"resource_type": resource_type,
|
|
968
|
+
"user_id": user_id,
|
|
969
|
+
"original_error": str(original_error) if original_error else None,
|
|
970
|
+
}
|
|
971
|
+
super().__init__(message, details)
|
proxilion/guards/output_guard.py
CHANGED
|
@@ -330,7 +330,7 @@ DEFAULT_LEAKAGE_PATTERNS: list[LeakagePattern] = [
|
|
|
330
330
|
),
|
|
331
331
|
LeakagePattern(
|
|
332
332
|
name="password_in_text",
|
|
333
|
-
pattern=r"(?i)(password|passwd|pwd)\s*[:=]\s*['\"]?[^\s'\"]{4,}['\"]?",
|
|
333
|
+
pattern=r"(?i)(password|passwd|pwd)\s*(?:is\s*)?[:=]\s*['\"]?[^\s'\"]{4,}['\"]?",
|
|
334
334
|
category=LeakageCategory.CREDENTIAL,
|
|
335
335
|
severity=0.9,
|
|
336
336
|
description="Passwords in plaintext",
|
|
@@ -120,10 +120,12 @@ from proxilion.observability.metrics import (
|
|
|
120
120
|
AlertRule,
|
|
121
121
|
EventType,
|
|
122
122
|
MetricsCollector,
|
|
123
|
-
MetricType as SecurityMetricType,
|
|
124
123
|
PrometheusExporter,
|
|
125
124
|
SecurityEvent,
|
|
126
125
|
)
|
|
126
|
+
from proxilion.observability.metrics import (
|
|
127
|
+
MetricType as SecurityMetricType,
|
|
128
|
+
)
|
|
127
129
|
|
|
128
130
|
# Session-based cost tracking
|
|
129
131
|
from proxilion.observability.session_cost_tracker import (
|
|
@@ -38,12 +38,13 @@ import logging
|
|
|
38
38
|
import threading
|
|
39
39
|
import time
|
|
40
40
|
from collections import defaultdict, deque
|
|
41
|
+
from collections.abc import Callable
|
|
41
42
|
from dataclasses import dataclass, field
|
|
42
43
|
from datetime import datetime, timezone
|
|
43
44
|
from enum import Enum
|
|
44
|
-
from typing import Any
|
|
45
|
-
from urllib.request import Request, urlopen
|
|
45
|
+
from typing import Any
|
|
46
46
|
from urllib.error import URLError
|
|
47
|
+
from urllib.request import Request, urlopen
|
|
47
48
|
|
|
48
49
|
logger = logging.getLogger(__name__)
|
|
49
50
|
|
|
@@ -232,7 +233,10 @@ class MetricsCollector:
|
|
|
232
233
|
user: str | None = None,
|
|
233
234
|
) -> None:
|
|
234
235
|
"""Record a guard block."""
|
|
235
|
-
|
|
236
|
+
if guard_type == "input":
|
|
237
|
+
event_type = EventType.INPUT_GUARD_BLOCK
|
|
238
|
+
else:
|
|
239
|
+
event_type = EventType.OUTPUT_GUARD_BLOCK
|
|
236
240
|
|
|
237
241
|
self.record_event(SecurityEvent(
|
|
238
242
|
event_type=event_type,
|
|
@@ -628,7 +632,10 @@ class AlertManager:
|
|
|
628
632
|
alert = Alert(
|
|
629
633
|
rule_name=rule.name,
|
|
630
634
|
severity=rule.severity,
|
|
631
|
-
message=
|
|
635
|
+
message=(
|
|
636
|
+
f"{rule.event_type.value} rate ({rate_per_minute:.1f}/min) "
|
|
637
|
+
f"exceeds threshold ({rule.threshold}/min)"
|
|
638
|
+
),
|
|
632
639
|
value=rate_per_minute,
|
|
633
640
|
threshold=rule.threshold,
|
|
634
641
|
details={
|
|
@@ -728,7 +735,7 @@ class PrometheusExporter:
|
|
|
728
735
|
lines: list[str] = []
|
|
729
736
|
|
|
730
737
|
# Add header
|
|
731
|
-
lines.append(
|
|
738
|
+
lines.append("# Proxilion Security Metrics")
|
|
732
739
|
lines.append(f"# Generated at {datetime.now(timezone.utc).isoformat()}")
|
|
733
740
|
lines.append("")
|
|
734
741
|
|
|
@@ -773,7 +780,8 @@ class PrometheusExporter:
|
|
|
773
780
|
for bucket_le, count in buckets:
|
|
774
781
|
lines.append(f'{name}_bucket{{le="{bucket_le}"}} {count}')
|
|
775
782
|
|
|
776
|
-
|
|
783
|
+
inf_count = self._collector._histogram_counts.get(hist_name, 0)
|
|
784
|
+
lines.append(f'{name}_bucket{{le="+Inf"}} {inf_count}')
|
|
777
785
|
lines.append(f"{name}_sum {self._collector._histogram_sums.get(hist_name, 0):.6f}")
|
|
778
786
|
lines.append(f"{name}_count {self._collector._histogram_counts.get(hist_name, 0)}")
|
|
779
787
|
lines.append("")
|
|
@@ -50,24 +50,21 @@ Example:
|
|
|
50
50
|
|
|
51
51
|
from __future__ import annotations
|
|
52
52
|
|
|
53
|
-
import hashlib
|
|
54
53
|
import json
|
|
55
54
|
import logging
|
|
56
55
|
import threading
|
|
57
56
|
import uuid
|
|
58
57
|
from collections import defaultdict
|
|
58
|
+
from collections.abc import Callable
|
|
59
59
|
from dataclasses import asdict, dataclass, field
|
|
60
60
|
from datetime import datetime, timedelta, timezone
|
|
61
61
|
from enum import Enum
|
|
62
|
-
from typing import Any
|
|
62
|
+
from typing import Any
|
|
63
63
|
|
|
64
64
|
from proxilion.observability.cost_tracker import (
|
|
65
65
|
BudgetPolicy,
|
|
66
|
-
CostSummary,
|
|
67
66
|
CostTracker,
|
|
68
|
-
ModelPricing,
|
|
69
67
|
UsageRecord,
|
|
70
|
-
DEFAULT_PRICING,
|
|
71
68
|
)
|
|
72
69
|
|
|
73
70
|
logger = logging.getLogger(__name__)
|
|
@@ -622,8 +619,10 @@ class SessionCostTracker:
|
|
|
622
619
|
agent_profile.tool_calls += 1
|
|
623
620
|
|
|
624
621
|
if tool_name:
|
|
625
|
-
|
|
626
|
-
|
|
622
|
+
prev_tool = agent_profile.by_tool.get(tool_name, 0.0)
|
|
623
|
+
agent_profile.by_tool[tool_name] = prev_tool + record.cost_usd
|
|
624
|
+
prev_model = agent_profile.by_model.get(model, 0.0)
|
|
625
|
+
agent_profile.by_model[model] = prev_model + record.cost_usd
|
|
627
626
|
|
|
628
627
|
if agent_profile.first_activity is None:
|
|
629
628
|
agent_profile.first_activity = record.timestamp
|
proxilion/policies/builtin.py
CHANGED
|
@@ -304,7 +304,8 @@ class AttributeBasedPolicy(Policy[Any]):
|
|
|
304
304
|
Returns:
|
|
305
305
|
True if authorized, False otherwise.
|
|
306
306
|
"""
|
|
307
|
-
|
|
307
|
+
# Copy to avoid mutating caller's dict
|
|
308
|
+
ctx = dict(context) if context else {}
|
|
308
309
|
|
|
309
310
|
# Add user attributes to context for convenience
|
|
310
311
|
ctx["user_id"] = self.user.user_id
|
proxilion/policies/registry.py
CHANGED
|
@@ -339,6 +339,7 @@ class PolicyRegistry:
|
|
|
339
339
|
|
|
340
340
|
# Global registry instance for convenience
|
|
341
341
|
_global_registry: PolicyRegistry | None = None
|
|
342
|
+
_global_registry_lock = threading.Lock()
|
|
342
343
|
|
|
343
344
|
|
|
344
345
|
def get_global_registry() -> PolicyRegistry:
|
|
@@ -359,9 +360,13 @@ def get_global_registry() -> PolicyRegistry:
|
|
|
359
360
|
... pass
|
|
360
361
|
"""
|
|
361
362
|
global _global_registry
|
|
362
|
-
if _global_registry is None:
|
|
363
|
-
_global_registry
|
|
364
|
-
|
|
363
|
+
if _global_registry is not None:
|
|
364
|
+
return _global_registry
|
|
365
|
+
with _global_registry_lock:
|
|
366
|
+
# Double-check after acquiring lock
|
|
367
|
+
if _global_registry is None:
|
|
368
|
+
_global_registry = PolicyRegistry()
|
|
369
|
+
return _global_registry
|
|
365
370
|
|
|
366
371
|
|
|
367
372
|
def reset_global_registry() -> None:
|
|
@@ -371,6 +376,7 @@ def reset_global_registry() -> None:
|
|
|
371
376
|
Clears the global registry instance. Primarily useful for testing.
|
|
372
377
|
"""
|
|
373
378
|
global _global_registry
|
|
374
|
-
|
|
375
|
-
_global_registry
|
|
376
|
-
|
|
379
|
+
with _global_registry_lock:
|
|
380
|
+
if _global_registry is not None:
|
|
381
|
+
_global_registry.clear()
|
|
382
|
+
_global_registry = None
|
proxilion/security/__init__.py
CHANGED
|
@@ -76,6 +76,31 @@ Quick Start:
|
|
|
76
76
|
... ))
|
|
77
77
|
"""
|
|
78
78
|
|
|
79
|
+
from proxilion.security.agent_trust import (
|
|
80
|
+
AgentCredential,
|
|
81
|
+
AgentTrustManager,
|
|
82
|
+
DelegationChain,
|
|
83
|
+
)
|
|
84
|
+
from proxilion.security.agent_trust import (
|
|
85
|
+
DelegationToken as AgentDelegationToken,
|
|
86
|
+
)
|
|
87
|
+
from proxilion.security.agent_trust import (
|
|
88
|
+
SignedMessage as AgentSignedMessage,
|
|
89
|
+
)
|
|
90
|
+
from proxilion.security.agent_trust import (
|
|
91
|
+
TrustLevel as AgentTrustLevel,
|
|
92
|
+
)
|
|
93
|
+
from proxilion.security.agent_trust import (
|
|
94
|
+
VerificationResult as AgentVerificationResult,
|
|
95
|
+
)
|
|
96
|
+
from proxilion.security.behavioral_drift import (
|
|
97
|
+
BaselineStats,
|
|
98
|
+
BehavioralMonitor,
|
|
99
|
+
DriftDetector,
|
|
100
|
+
DriftMetric,
|
|
101
|
+
DriftResult,
|
|
102
|
+
KillSwitch,
|
|
103
|
+
)
|
|
79
104
|
from proxilion.security.cascade_protection import (
|
|
80
105
|
CascadeAwareCircuitBreakerRegistry,
|
|
81
106
|
CascadeEvent,
|
|
@@ -95,6 +120,16 @@ from proxilion.security.idor_protection import (
|
|
|
95
120
|
IDPattern,
|
|
96
121
|
ResourceScope,
|
|
97
122
|
)
|
|
123
|
+
from proxilion.security.intent_capsule import (
|
|
124
|
+
HijackDetection,
|
|
125
|
+
IntentCapsule,
|
|
126
|
+
IntentCapsuleManager,
|
|
127
|
+
IntentCategory,
|
|
128
|
+
IntentGuard,
|
|
129
|
+
)
|
|
130
|
+
from proxilion.security.intent_capsule import (
|
|
131
|
+
IntentValidator as IntentHijackValidator,
|
|
132
|
+
)
|
|
98
133
|
from proxilion.security.intent_validator import (
|
|
99
134
|
AnomalyThresholds,
|
|
100
135
|
IntentValidator,
|
|
@@ -102,6 +137,22 @@ from proxilion.security.intent_validator import (
|
|
|
102
137
|
ValidationResult,
|
|
103
138
|
WorkflowState,
|
|
104
139
|
)
|
|
140
|
+
|
|
141
|
+
# New ASI Top 10 features
|
|
142
|
+
from proxilion.security.memory_integrity import (
|
|
143
|
+
ContextWindowGuard,
|
|
144
|
+
IntegrityViolation,
|
|
145
|
+
IntegrityViolationType,
|
|
146
|
+
MemoryIntegrityGuard,
|
|
147
|
+
RAGDocument,
|
|
148
|
+
RAGScanResult,
|
|
149
|
+
)
|
|
150
|
+
from proxilion.security.memory_integrity import (
|
|
151
|
+
SignedMessage as MemorySignedMessage,
|
|
152
|
+
)
|
|
153
|
+
from proxilion.security.memory_integrity import (
|
|
154
|
+
VerificationResult as MemoryVerificationResult,
|
|
155
|
+
)
|
|
105
156
|
from proxilion.security.rate_limiter import (
|
|
106
157
|
MultiDimensionalRateLimiter,
|
|
107
158
|
RateLimitConfig,
|
|
@@ -128,43 +179,6 @@ from proxilion.security.trust_boundaries import (
|
|
|
128
179
|
TrustLevel,
|
|
129
180
|
)
|
|
130
181
|
|
|
131
|
-
# New ASI Top 10 features
|
|
132
|
-
from proxilion.security.memory_integrity import (
|
|
133
|
-
ContextWindowGuard,
|
|
134
|
-
IntegrityViolation,
|
|
135
|
-
IntegrityViolationType,
|
|
136
|
-
MemoryIntegrityGuard,
|
|
137
|
-
RAGDocument,
|
|
138
|
-
RAGScanResult,
|
|
139
|
-
SignedMessage as MemorySignedMessage,
|
|
140
|
-
VerificationResult as MemoryVerificationResult,
|
|
141
|
-
)
|
|
142
|
-
from proxilion.security.agent_trust import (
|
|
143
|
-
AgentCredential,
|
|
144
|
-
AgentTrustManager,
|
|
145
|
-
DelegationChain,
|
|
146
|
-
DelegationToken as AgentDelegationToken,
|
|
147
|
-
SignedMessage as AgentSignedMessage,
|
|
148
|
-
TrustLevel as AgentTrustLevel,
|
|
149
|
-
VerificationResult as AgentVerificationResult,
|
|
150
|
-
)
|
|
151
|
-
from proxilion.security.intent_capsule import (
|
|
152
|
-
HijackDetection,
|
|
153
|
-
IntentCapsule,
|
|
154
|
-
IntentCapsuleManager,
|
|
155
|
-
IntentCategory,
|
|
156
|
-
IntentGuard,
|
|
157
|
-
IntentValidator as IntentHijackValidator,
|
|
158
|
-
)
|
|
159
|
-
from proxilion.security.behavioral_drift import (
|
|
160
|
-
BaselineStats,
|
|
161
|
-
BehavioralMonitor,
|
|
162
|
-
DriftDetector,
|
|
163
|
-
DriftMetric,
|
|
164
|
-
DriftResult,
|
|
165
|
-
KillSwitch,
|
|
166
|
-
)
|
|
167
|
-
|
|
168
182
|
__all__ = [
|
|
169
183
|
# Rate limiting
|
|
170
184
|
"TokenBucketRateLimiter",
|
|
@@ -55,7 +55,6 @@ import hashlib
|
|
|
55
55
|
import hmac
|
|
56
56
|
import json
|
|
57
57
|
import logging
|
|
58
|
-
import secrets
|
|
59
58
|
import threading
|
|
60
59
|
import time
|
|
61
60
|
import uuid
|
|
@@ -126,17 +125,28 @@ class AgentCredential:
|
|
|
126
125
|
return datetime.now(timezone.utc) > self.expires_at
|
|
127
126
|
|
|
128
127
|
def has_capability(self, capability: str) -> bool:
|
|
129
|
-
"""Check if agent has a specific capability.
|
|
130
|
-
|
|
128
|
+
"""Check if agent has a specific capability.
|
|
129
|
+
|
|
130
|
+
Capability matching rules:
|
|
131
|
+
- Exact match: "read" matches "read"
|
|
132
|
+
- Wildcard: "*" matches everything
|
|
133
|
+
- Explicit wildcard pattern: "read:*" matches "read:documents"
|
|
134
|
+
|
|
135
|
+
Note: An agent with "read" does NOT automatically get "read:documents".
|
|
136
|
+
Use explicit wildcards like "read:*" for hierarchical access.
|
|
137
|
+
"""
|
|
138
|
+
# Full wildcard capability
|
|
131
139
|
if "*" in self.capabilities:
|
|
132
140
|
return True
|
|
133
141
|
# Exact match
|
|
134
142
|
if capability in self.capabilities:
|
|
135
143
|
return True
|
|
136
|
-
#
|
|
144
|
+
# Explicit wildcard patterns (e.g., "read:*" matches "read:documents")
|
|
137
145
|
for cap in self.capabilities:
|
|
138
|
-
if
|
|
139
|
-
|
|
146
|
+
if cap.endswith(":*"):
|
|
147
|
+
prefix = cap[:-1] # "read:" from "read:*"
|
|
148
|
+
if capability.startswith(prefix):
|
|
149
|
+
return True
|
|
140
150
|
return False
|
|
141
151
|
|
|
142
152
|
def can_delegate_to(self, other: AgentCredential) -> bool:
|
|
@@ -317,7 +327,11 @@ class DelegationChain:
|
|
|
317
327
|
if i > 0:
|
|
318
328
|
prev_token = self._chain[i - 1]
|
|
319
329
|
if token.issuer_agent != prev_token.delegate_agent:
|
|
320
|
-
|
|
330
|
+
msg = (
|
|
331
|
+
f"Chain break at position {i}: "
|
|
332
|
+
f"{prev_token.delegate_agent} != {token.issuer_agent}"
|
|
333
|
+
)
|
|
334
|
+
return False, msg
|
|
321
335
|
|
|
322
336
|
return True, None
|
|
323
337
|
|
|
@@ -611,7 +625,8 @@ class AgentTrustManager:
|
|
|
611
625
|
token_id = str(uuid.uuid4())
|
|
612
626
|
|
|
613
627
|
# Sign the token
|
|
614
|
-
|
|
628
|
+
caps_str = str(sorted(capabilities))
|
|
629
|
+
token_data = f"{token_id}|{from_agent}|{to_agent}|{caps_str}|{now.isoformat()}"
|
|
615
630
|
signature = hmac.new(
|
|
616
631
|
issuer._secret.encode(),
|
|
617
632
|
token_data.encode(),
|
|
@@ -39,17 +39,17 @@ Example:
|
|
|
39
39
|
from __future__ import annotations
|
|
40
40
|
|
|
41
41
|
import logging
|
|
42
|
-
import math
|
|
43
42
|
import statistics
|
|
44
43
|
import threading
|
|
45
44
|
import time
|
|
46
45
|
from collections import deque
|
|
46
|
+
from collections.abc import Callable
|
|
47
47
|
from dataclasses import dataclass, field
|
|
48
48
|
from datetime import datetime, timezone
|
|
49
49
|
from enum import Enum
|
|
50
|
-
from typing import Any
|
|
50
|
+
from typing import Any
|
|
51
51
|
|
|
52
|
-
from proxilion.exceptions import
|
|
52
|
+
from proxilion.exceptions import EmergencyHaltError
|
|
53
53
|
|
|
54
54
|
logger = logging.getLogger(__name__)
|
|
55
55
|
|
|
@@ -386,8 +386,14 @@ class BehavioralMonitor:
|
|
|
386
386
|
min_value=min(samples),
|
|
387
387
|
max_value=max(samples),
|
|
388
388
|
sample_count=len(samples),
|
|
389
|
-
percentile_95=
|
|
390
|
-
|
|
389
|
+
percentile_95=(
|
|
390
|
+
sorted_samples[p95_idx] if p95_idx < len(sorted_samples)
|
|
391
|
+
else max(samples)
|
|
392
|
+
),
|
|
393
|
+
percentile_99=(
|
|
394
|
+
sorted_samples[p99_idx] if p99_idx < len(sorted_samples)
|
|
395
|
+
else max(samples)
|
|
396
|
+
),
|
|
391
397
|
)
|
|
392
398
|
|
|
393
399
|
self._baseline_locked = True
|
|
@@ -647,7 +653,9 @@ class KillSwitch:
|
|
|
647
653
|
return {
|
|
648
654
|
"active": self._active,
|
|
649
655
|
"reason": self._activation_reason,
|
|
650
|
-
"activation_time":
|
|
656
|
+
"activation_time": (
|
|
657
|
+
self._activation_time.isoformat() if self._activation_time else None
|
|
658
|
+
),
|
|
651
659
|
}
|
|
652
660
|
|
|
653
661
|
|
|
@@ -14,7 +14,7 @@ from collections.abc import Callable
|
|
|
14
14
|
from dataclasses import dataclass, field
|
|
15
15
|
from typing import Any
|
|
16
16
|
|
|
17
|
-
from proxilion.exceptions import IDORViolationError
|
|
17
|
+
from proxilion.exceptions import IDORViolationError, ScopeLoaderError
|
|
18
18
|
|
|
19
19
|
logger = logging.getLogger(__name__)
|
|
20
20
|
|
|
@@ -228,9 +228,13 @@ class IDORProtector:
|
|
|
228
228
|
try:
|
|
229
229
|
allowed_ids = loader(user_id)
|
|
230
230
|
return object_id in allowed_ids
|
|
231
|
-
except
|
|
232
|
-
|
|
231
|
+
except (KeyError, ValueError, AttributeError) as e:
|
|
232
|
+
# Permanent configuration error - deny access
|
|
233
|
+
logger.error(f"Scope loader configuration error: {e}")
|
|
233
234
|
return False
|
|
235
|
+
except Exception as e:
|
|
236
|
+
# Temporary failure - let caller handle retry
|
|
237
|
+
raise ScopeLoaderError(resource_type, user_id, e) from e
|
|
234
238
|
|
|
235
239
|
if scope is None:
|
|
236
240
|
# No scope defined - default deny
|
|
@@ -254,8 +258,12 @@ class IDORProtector:
|
|
|
254
258
|
dynamic_ids = scope.scope_loader(user_id)
|
|
255
259
|
if object_id in dynamic_ids:
|
|
256
260
|
return True
|
|
261
|
+
except (KeyError, ValueError, AttributeError) as e:
|
|
262
|
+
# Permanent configuration error - deny access
|
|
263
|
+
logger.error(f"Dynamic scope loader configuration error: {e}")
|
|
257
264
|
except Exception as e:
|
|
258
|
-
|
|
265
|
+
# Temporary failure - let caller handle retry
|
|
266
|
+
raise ScopeLoaderError(resource_type, user_id, e) from e
|
|
259
267
|
|
|
260
268
|
return False
|
|
261
269
|
|
|
@@ -756,7 +756,6 @@ class IntentCapsuleManager:
|
|
|
756
756
|
"""Revoke a capsule."""
|
|
757
757
|
with self._lock:
|
|
758
758
|
if capsule_id in self._capsules:
|
|
759
|
-
capsule = self._capsules[capsule_id]
|
|
760
759
|
del self._capsules[capsule_id]
|
|
761
760
|
|
|
762
761
|
# Remove from user's list
|
|
@@ -770,11 +769,13 @@ class IntentCapsuleManager:
|
|
|
770
769
|
return False
|
|
771
770
|
|
|
772
771
|
def verify_capsule(self, capsule_id: str) -> bool:
|
|
773
|
-
"""Verify a capsule's signature."""
|
|
772
|
+
"""Verify a capsule's signature and that it hasn't expired."""
|
|
774
773
|
with self._lock:
|
|
775
774
|
capsule = self._capsules.get(capsule_id)
|
|
776
775
|
if not capsule:
|
|
777
776
|
return False
|
|
777
|
+
if capsule.is_expired():
|
|
778
|
+
return False
|
|
778
779
|
return capsule.verify(self._secret_key)
|
|
779
780
|
|
|
780
781
|
def create_guard(
|
|
@@ -13,6 +13,7 @@ import time
|
|
|
13
13
|
from collections import defaultdict
|
|
14
14
|
from collections.abc import Callable
|
|
15
15
|
from dataclasses import dataclass, field
|
|
16
|
+
from datetime import datetime, timezone
|
|
16
17
|
from enum import Enum
|
|
17
18
|
from typing import Any
|
|
18
19
|
|
|
@@ -128,6 +129,11 @@ class IntentValidator:
|
|
|
128
129
|
|
|
129
130
|
self._lock = threading.RLock()
|
|
130
131
|
|
|
132
|
+
# Memory cleanup tracking
|
|
133
|
+
self._last_cleanup = time.time()
|
|
134
|
+
self._cleanup_interval = 300.0 # 5 minutes
|
|
135
|
+
self._cleanup_call_count = 0
|
|
136
|
+
|
|
131
137
|
def register_workflow(
|
|
132
138
|
self,
|
|
133
139
|
workflow_name: str,
|
|
@@ -247,6 +253,16 @@ class IntentValidator:
|
|
|
247
253
|
if entry[0] > cutoff
|
|
248
254
|
]
|
|
249
255
|
|
|
256
|
+
# Remove empty user history immediately
|
|
257
|
+
if not self._call_history[user_id]:
|
|
258
|
+
del self._call_history[user_id]
|
|
259
|
+
|
|
260
|
+
# Periodic cleanup of stale users
|
|
261
|
+
self._cleanup_call_count += 1
|
|
262
|
+
if self._cleanup_call_count >= 100:
|
|
263
|
+
self._cleanup_call_count = 0
|
|
264
|
+
self._cleanup_stale_users()
|
|
265
|
+
|
|
250
266
|
def _validate_workflow(
|
|
251
267
|
self,
|
|
252
268
|
user_id: str,
|
|
@@ -339,8 +355,9 @@ class IntentValidator:
|
|
|
339
355
|
details={"unique_resources": len(unique_resources)},
|
|
340
356
|
)
|
|
341
357
|
|
|
342
|
-
# Check for unusual hours
|
|
343
|
-
|
|
358
|
+
# Check for unusual hours using explicit UTC to ensure consistent behavior
|
|
359
|
+
now_dt = datetime.fromtimestamp(now, tz=timezone.utc)
|
|
360
|
+
hour = now_dt.hour
|
|
344
361
|
if self.thresholds.suspicious_hour_start <= hour < self.thresholds.suspicious_hour_end:
|
|
345
362
|
return ValidationOutcome(
|
|
346
363
|
result=ValidationResult.SUSPICIOUS,
|
|
@@ -493,3 +510,73 @@ class IntentValidator:
|
|
|
493
510
|
"""Get a user's current workflow state."""
|
|
494
511
|
with self._lock:
|
|
495
512
|
return self._user_states.get(user_id, {}).get(workflow_name)
|
|
513
|
+
|
|
514
|
+
def _cleanup_stale_users(self) -> None:
|
|
515
|
+
"""Clean up stale user data to prevent memory growth."""
|
|
516
|
+
now = time.time()
|
|
517
|
+
cutoff = now - 3600 # 1 hour
|
|
518
|
+
|
|
519
|
+
# Clean up empty call histories
|
|
520
|
+
empty_history_users = [
|
|
521
|
+
uid for uid, history in self._call_history.items()
|
|
522
|
+
if not history
|
|
523
|
+
]
|
|
524
|
+
for uid in empty_history_users:
|
|
525
|
+
del self._call_history[uid]
|
|
526
|
+
|
|
527
|
+
# Clean up stale call histories (no recent activity)
|
|
528
|
+
stale_users = []
|
|
529
|
+
for uid, history in list(self._call_history.items()):
|
|
530
|
+
if history and history[-1][0] < cutoff:
|
|
531
|
+
stale_users.append(uid)
|
|
532
|
+
|
|
533
|
+
for uid in stale_users:
|
|
534
|
+
del self._call_history[uid]
|
|
535
|
+
# Also clean up related state for stale users
|
|
536
|
+
self._user_states.pop(uid, None)
|
|
537
|
+
self._failure_counts.pop(uid, None)
|
|
538
|
+
|
|
539
|
+
if empty_history_users or stale_users:
|
|
540
|
+
logger.debug(
|
|
541
|
+
f"Cleaned up {len(empty_history_users)} empty + "
|
|
542
|
+
f"{len(stale_users)} stale user histories"
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
def cleanup(self, max_age_seconds: float = 3600.0) -> int:
|
|
546
|
+
"""
|
|
547
|
+
Remove stale user data to prevent unbounded memory growth.
|
|
548
|
+
|
|
549
|
+
Args:
|
|
550
|
+
max_age_seconds: Maximum age for inactive users (default 1 hour).
|
|
551
|
+
|
|
552
|
+
Returns:
|
|
553
|
+
Number of users cleaned up.
|
|
554
|
+
"""
|
|
555
|
+
with self._lock:
|
|
556
|
+
now = time.time()
|
|
557
|
+
cutoff = now - max_age_seconds
|
|
558
|
+
cleaned = 0
|
|
559
|
+
|
|
560
|
+
# Clean up call histories
|
|
561
|
+
for uid in list(self._call_history.keys()):
|
|
562
|
+
history = self._call_history[uid]
|
|
563
|
+
# Remove entries older than cutoff
|
|
564
|
+
history[:] = [e for e in history if e[0] > cutoff]
|
|
565
|
+
# Remove empty histories
|
|
566
|
+
if not history:
|
|
567
|
+
del self._call_history[uid]
|
|
568
|
+
self._user_states.pop(uid, None)
|
|
569
|
+
self._failure_counts.pop(uid, None)
|
|
570
|
+
cleaned += 1
|
|
571
|
+
|
|
572
|
+
# Clean up users with only state but no history
|
|
573
|
+
orphan_users = set(self._user_states.keys()) - set(self._call_history.keys())
|
|
574
|
+
for uid in orphan_users:
|
|
575
|
+
del self._user_states[uid]
|
|
576
|
+
self._failure_counts.pop(uid, None)
|
|
577
|
+
cleaned += 1
|
|
578
|
+
|
|
579
|
+
if cleaned:
|
|
580
|
+
logger.debug(f"Cleaned up {cleaned} stale users")
|
|
581
|
+
|
|
582
|
+
return cleaned
|