empathy-framework 4.7.1__py3-none-any.whl → 4.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/METADATA +65 -2
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/RECORD +69 -59
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/WHEEL +1 -1
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/entry_points.txt +2 -1
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/top_level.txt +0 -1
- empathy_os/__init__.py +2 -0
- empathy_os/cli/__init__.py +128 -238
- empathy_os/cli/__main__.py +5 -33
- empathy_os/cli/commands/__init__.py +1 -8
- empathy_os/cli/commands/help.py +331 -0
- empathy_os/cli/commands/info.py +140 -0
- empathy_os/cli/commands/inspect.py +437 -0
- empathy_os/cli/commands/metrics.py +92 -0
- empathy_os/cli/commands/orchestrate.py +184 -0
- empathy_os/cli/commands/patterns.py +207 -0
- empathy_os/cli/commands/provider.py +93 -81
- empathy_os/cli/commands/setup.py +96 -0
- empathy_os/cli/commands/status.py +235 -0
- empathy_os/cli/commands/sync.py +166 -0
- empathy_os/cli/commands/tier.py +121 -0
- empathy_os/cli/commands/workflow.py +574 -0
- empathy_os/cli/parsers/__init__.py +62 -0
- empathy_os/cli/parsers/help.py +41 -0
- empathy_os/cli/parsers/info.py +26 -0
- empathy_os/cli/parsers/inspect.py +66 -0
- empathy_os/cli/parsers/metrics.py +42 -0
- empathy_os/cli/parsers/orchestrate.py +61 -0
- empathy_os/cli/parsers/patterns.py +54 -0
- empathy_os/cli/parsers/provider.py +40 -0
- empathy_os/cli/parsers/setup.py +42 -0
- empathy_os/cli/parsers/status.py +47 -0
- empathy_os/cli/parsers/sync.py +31 -0
- empathy_os/cli/parsers/tier.py +33 -0
- empathy_os/cli/parsers/workflow.py +77 -0
- empathy_os/cli/utils/__init__.py +1 -0
- empathy_os/cli/utils/data.py +242 -0
- empathy_os/cli/utils/helpers.py +68 -0
- empathy_os/{cli.py → cli_legacy.py} +0 -26
- empathy_os/cli_minimal.py +662 -0
- empathy_os/cli_router.py +384 -0
- empathy_os/cli_unified.py +13 -2
- empathy_os/memory/short_term.py +146 -414
- empathy_os/memory/types.py +441 -0
- empathy_os/memory/unified.py +61 -48
- empathy_os/models/fallback.py +1 -1
- empathy_os/models/provider_config.py +59 -344
- empathy_os/models/registry.py +27 -176
- empathy_os/monitoring/alerts.py +14 -20
- empathy_os/monitoring/alerts_cli.py +24 -7
- empathy_os/project_index/__init__.py +2 -0
- empathy_os/project_index/index.py +210 -5
- empathy_os/project_index/scanner.py +48 -16
- empathy_os/project_index/scanner_parallel.py +291 -0
- empathy_os/workflow_commands.py +9 -9
- empathy_os/workflows/__init__.py +31 -2
- empathy_os/workflows/base.py +295 -317
- empathy_os/workflows/bug_predict.py +10 -2
- empathy_os/workflows/builder.py +273 -0
- empathy_os/workflows/caching.py +253 -0
- empathy_os/workflows/code_review_pipeline.py +1 -0
- empathy_os/workflows/history.py +512 -0
- empathy_os/workflows/perf_audit.py +129 -23
- empathy_os/workflows/routing.py +163 -0
- empathy_os/workflows/secure_release.py +1 -0
- empathy_os/workflows/security_audit.py +1 -0
- empathy_os/workflows/security_audit_phase3.py +352 -0
- empathy_os/workflows/telemetry_mixin.py +269 -0
- empathy_os/workflows/test_gen.py +7 -7
- empathy_os/dashboard/__init__.py +0 -15
- empathy_os/dashboard/server.py +0 -941
- empathy_os/vscode_bridge 2.py +0 -173
- empathy_os/workflows/progressive/README 2.md +0 -454
- empathy_os/workflows/progressive/__init__ 2.py +0 -92
- empathy_os/workflows/progressive/cli 2.py +0 -242
- empathy_os/workflows/progressive/core 2.py +0 -488
- empathy_os/workflows/progressive/orchestrator 2.py +0 -701
- empathy_os/workflows/progressive/reports 2.py +0 -528
- empathy_os/workflows/progressive/telemetry 2.py +0 -280
- empathy_os/workflows/progressive/test_gen 2.py +0 -514
- empathy_os/workflows/progressive/workflow 2.py +0 -628
- patterns/README.md +0 -119
- patterns/__init__.py +0 -95
- patterns/behavior.py +0 -298
- patterns/code_review_memory.json +0 -441
- patterns/core.py +0 -97
- patterns/debugging.json +0 -3763
- patterns/empathy.py +0 -268
- patterns/health_check_memory.json +0 -505
- patterns/input.py +0 -161
- patterns/memory_graph.json +0 -8
- patterns/refactoring_memory.json +0 -1113
- patterns/registry.py +0 -663
- patterns/security_memory.json +0 -8
- patterns/structural.py +0 -415
- patterns/validation.py +0 -194
- {empathy_framework-4.7.1.dist-info → empathy_framework-4.9.0.dist-info}/licenses/LICENSE +0 -0
empathy_os/memory/short_term.py
CHANGED
|
@@ -26,9 +26,7 @@ import json
|
|
|
26
26
|
import threading
|
|
27
27
|
import time
|
|
28
28
|
from collections.abc import Callable
|
|
29
|
-
from dataclasses import dataclass, field
|
|
30
29
|
from datetime import datetime
|
|
31
|
-
from enum import Enum
|
|
32
30
|
from typing import Any
|
|
33
31
|
|
|
34
32
|
import structlog
|
|
@@ -37,6 +35,20 @@ from .security.pii_scrubber import PIIScrubber
|
|
|
37
35
|
from .security.secrets_detector import SecretsDetector
|
|
38
36
|
from .security.secrets_detector import Severity as SecretSeverity
|
|
39
37
|
|
|
38
|
+
# Import types from dedicated module
|
|
39
|
+
from .types import (
|
|
40
|
+
AccessTier,
|
|
41
|
+
AgentCredentials,
|
|
42
|
+
ConflictContext,
|
|
43
|
+
PaginatedResult,
|
|
44
|
+
RedisConfig,
|
|
45
|
+
RedisMetrics,
|
|
46
|
+
SecurityError,
|
|
47
|
+
StagedPattern,
|
|
48
|
+
TimeWindowQuery,
|
|
49
|
+
TTLStrategy,
|
|
50
|
+
)
|
|
51
|
+
|
|
40
52
|
logger = structlog.get_logger(__name__)
|
|
41
53
|
|
|
42
54
|
try:
|
|
@@ -51,408 +63,6 @@ except ImportError:
|
|
|
51
63
|
RedisTimeoutError = Exception # type: ignore
|
|
52
64
|
|
|
53
65
|
|
|
54
|
-
class AccessTier(Enum):
|
|
55
|
-
"""Role-based access tiers per EMPATHY_PHILOSOPHY.md
|
|
56
|
-
|
|
57
|
-
Tier 1 - Observer: Read-only access to validated patterns
|
|
58
|
-
Tier 2 - Contributor: Can stage patterns for validation
|
|
59
|
-
Tier 3 - Validator: Can promote staged patterns to active
|
|
60
|
-
Tier 4 - Steward: Full access including deprecation and audit
|
|
61
|
-
"""
|
|
62
|
-
|
|
63
|
-
OBSERVER = 1
|
|
64
|
-
CONTRIBUTOR = 2
|
|
65
|
-
VALIDATOR = 3
|
|
66
|
-
STEWARD = 4
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
class TTLStrategy(Enum):
|
|
70
|
-
"""TTL strategies for different memory types
|
|
71
|
-
|
|
72
|
-
Per EMPATHY_PHILOSOPHY.md Section 9.3:
|
|
73
|
-
- Working results: 1 hour
|
|
74
|
-
- Staged patterns: 24 hours
|
|
75
|
-
- Coordination signals: 5 minutes
|
|
76
|
-
- Conflict context: Until resolution
|
|
77
|
-
"""
|
|
78
|
-
|
|
79
|
-
WORKING_RESULTS = 3600 # 1 hour
|
|
80
|
-
STAGED_PATTERNS = 86400 # 24 hours
|
|
81
|
-
COORDINATION = 300 # 5 minutes
|
|
82
|
-
CONFLICT_CONTEXT = 604800 # 7 days (fallback for unresolved)
|
|
83
|
-
SESSION = 1800 # 30 minutes
|
|
84
|
-
STREAM_ENTRY = 86400 * 7 # 7 days for audit stream entries
|
|
85
|
-
TASK_QUEUE = 3600 * 4 # 4 hours for task queue items
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
@dataclass
|
|
89
|
-
class RedisConfig:
|
|
90
|
-
"""Enhanced Redis configuration with SSL and retry support.
|
|
91
|
-
|
|
92
|
-
Supports:
|
|
93
|
-
- Standard connections (host:port)
|
|
94
|
-
- URL-based connections (redis://...)
|
|
95
|
-
- SSL/TLS for managed services (rediss://...)
|
|
96
|
-
- Sentinel for high availability
|
|
97
|
-
- Connection pooling
|
|
98
|
-
- Retry with exponential backoff
|
|
99
|
-
"""
|
|
100
|
-
|
|
101
|
-
host: str = "localhost"
|
|
102
|
-
port: int = 6379
|
|
103
|
-
db: int = 0
|
|
104
|
-
password: str | None = None
|
|
105
|
-
use_mock: bool = False
|
|
106
|
-
|
|
107
|
-
# Security settings
|
|
108
|
-
pii_scrub_enabled: bool = True # Scrub PII before storing (HIPAA/GDPR compliance)
|
|
109
|
-
secrets_detection_enabled: bool = True # Block storage of detected secrets
|
|
110
|
-
|
|
111
|
-
# SSL/TLS settings
|
|
112
|
-
ssl: bool = False
|
|
113
|
-
ssl_cert_reqs: str | None = None # "required", "optional", "none"
|
|
114
|
-
ssl_ca_certs: str | None = None
|
|
115
|
-
ssl_certfile: str | None = None
|
|
116
|
-
ssl_keyfile: str | None = None
|
|
117
|
-
|
|
118
|
-
# Connection pool settings
|
|
119
|
-
max_connections: int = 10
|
|
120
|
-
socket_timeout: float = 5.0
|
|
121
|
-
socket_connect_timeout: float = 5.0
|
|
122
|
-
|
|
123
|
-
# Retry settings
|
|
124
|
-
retry_on_timeout: bool = True
|
|
125
|
-
retry_max_attempts: int = 3
|
|
126
|
-
retry_base_delay: float = 0.1 # seconds
|
|
127
|
-
retry_max_delay: float = 2.0 # seconds
|
|
128
|
-
|
|
129
|
-
# Sentinel settings (for HA)
|
|
130
|
-
sentinel_hosts: list[tuple[str, int]] | None = None
|
|
131
|
-
sentinel_master_name: str | None = None
|
|
132
|
-
|
|
133
|
-
def to_redis_kwargs(self) -> dict:
|
|
134
|
-
"""Convert to redis.Redis constructor kwargs."""
|
|
135
|
-
kwargs: dict[str, Any] = {
|
|
136
|
-
"host": self.host,
|
|
137
|
-
"port": self.port,
|
|
138
|
-
"db": self.db,
|
|
139
|
-
"password": self.password,
|
|
140
|
-
"decode_responses": True,
|
|
141
|
-
"socket_timeout": self.socket_timeout,
|
|
142
|
-
"socket_connect_timeout": self.socket_connect_timeout,
|
|
143
|
-
"retry_on_timeout": self.retry_on_timeout,
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
if self.ssl:
|
|
147
|
-
kwargs["ssl"] = True
|
|
148
|
-
if self.ssl_cert_reqs:
|
|
149
|
-
kwargs["ssl_cert_reqs"] = self.ssl_cert_reqs
|
|
150
|
-
if self.ssl_ca_certs:
|
|
151
|
-
kwargs["ssl_ca_certs"] = self.ssl_ca_certs
|
|
152
|
-
if self.ssl_certfile:
|
|
153
|
-
kwargs["ssl_certfile"] = self.ssl_certfile
|
|
154
|
-
if self.ssl_keyfile:
|
|
155
|
-
kwargs["ssl_keyfile"] = self.ssl_keyfile
|
|
156
|
-
|
|
157
|
-
return kwargs
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
@dataclass
|
|
161
|
-
class RedisMetrics:
|
|
162
|
-
"""Metrics for Redis operations."""
|
|
163
|
-
|
|
164
|
-
operations_total: int = 0
|
|
165
|
-
operations_success: int = 0
|
|
166
|
-
operations_failed: int = 0
|
|
167
|
-
retries_total: int = 0
|
|
168
|
-
latency_sum_ms: float = 0.0
|
|
169
|
-
latency_max_ms: float = 0.0
|
|
170
|
-
|
|
171
|
-
# Per-operation metrics
|
|
172
|
-
stash_count: int = 0
|
|
173
|
-
retrieve_count: int = 0
|
|
174
|
-
publish_count: int = 0
|
|
175
|
-
stream_append_count: int = 0
|
|
176
|
-
|
|
177
|
-
# Security metrics
|
|
178
|
-
pii_scrubbed_total: int = 0 # Total PII instances scrubbed
|
|
179
|
-
pii_scrub_operations: int = 0 # Operations that had PII scrubbed
|
|
180
|
-
secrets_blocked_total: int = 0 # Total secrets blocked from storage
|
|
181
|
-
|
|
182
|
-
def record_operation(self, operation: str, latency_ms: float, success: bool = True) -> None:
|
|
183
|
-
"""Record an operation metric."""
|
|
184
|
-
self.operations_total += 1
|
|
185
|
-
self.latency_sum_ms += latency_ms
|
|
186
|
-
self.latency_max_ms = max(self.latency_max_ms, latency_ms)
|
|
187
|
-
|
|
188
|
-
if success:
|
|
189
|
-
self.operations_success += 1
|
|
190
|
-
else:
|
|
191
|
-
self.operations_failed += 1
|
|
192
|
-
|
|
193
|
-
# Track by operation type
|
|
194
|
-
if operation == "stash":
|
|
195
|
-
self.stash_count += 1
|
|
196
|
-
elif operation == "retrieve":
|
|
197
|
-
self.retrieve_count += 1
|
|
198
|
-
elif operation == "publish":
|
|
199
|
-
self.publish_count += 1
|
|
200
|
-
elif operation == "stream_append":
|
|
201
|
-
self.stream_append_count += 1
|
|
202
|
-
|
|
203
|
-
@property
|
|
204
|
-
def latency_avg_ms(self) -> float:
|
|
205
|
-
"""Average latency in milliseconds."""
|
|
206
|
-
if self.operations_total == 0:
|
|
207
|
-
return 0.0
|
|
208
|
-
return self.latency_sum_ms / self.operations_total
|
|
209
|
-
|
|
210
|
-
@property
|
|
211
|
-
def success_rate(self) -> float:
|
|
212
|
-
"""Success rate as percentage."""
|
|
213
|
-
if self.operations_total == 0:
|
|
214
|
-
return 100.0
|
|
215
|
-
return (self.operations_success / self.operations_total) * 100
|
|
216
|
-
|
|
217
|
-
def to_dict(self) -> dict:
|
|
218
|
-
"""Convert metrics to dictionary for reporting and serialization.
|
|
219
|
-
|
|
220
|
-
Returns:
|
|
221
|
-
Dictionary with keys: operations_total, operations_success,
|
|
222
|
-
operations_failed, retries_total, latency_avg_ms, latency_max_ms,
|
|
223
|
-
success_rate, by_operation, security.
|
|
224
|
-
"""
|
|
225
|
-
return {
|
|
226
|
-
"operations_total": self.operations_total,
|
|
227
|
-
"operations_success": self.operations_success,
|
|
228
|
-
"operations_failed": self.operations_failed,
|
|
229
|
-
"retries_total": self.retries_total,
|
|
230
|
-
"latency_avg_ms": round(self.latency_avg_ms, 2),
|
|
231
|
-
"latency_max_ms": round(self.latency_max_ms, 2),
|
|
232
|
-
"success_rate": round(self.success_rate, 2),
|
|
233
|
-
"by_operation": {
|
|
234
|
-
"stash": self.stash_count,
|
|
235
|
-
"retrieve": self.retrieve_count,
|
|
236
|
-
"publish": self.publish_count,
|
|
237
|
-
"stream_append": self.stream_append_count,
|
|
238
|
-
},
|
|
239
|
-
"security": {
|
|
240
|
-
"pii_scrubbed_total": self.pii_scrubbed_total,
|
|
241
|
-
"pii_scrub_operations": self.pii_scrub_operations,
|
|
242
|
-
"secrets_blocked_total": self.secrets_blocked_total,
|
|
243
|
-
},
|
|
244
|
-
}
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
@dataclass
|
|
248
|
-
class PaginatedResult:
|
|
249
|
-
"""Result of a paginated query."""
|
|
250
|
-
|
|
251
|
-
items: list[Any]
|
|
252
|
-
cursor: str
|
|
253
|
-
has_more: bool
|
|
254
|
-
total_scanned: int = 0
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
@dataclass
|
|
258
|
-
class TimeWindowQuery:
|
|
259
|
-
"""Query parameters for time-window operations."""
|
|
260
|
-
|
|
261
|
-
start_time: datetime | None = None
|
|
262
|
-
end_time: datetime | None = None
|
|
263
|
-
limit: int = 100
|
|
264
|
-
offset: int = 0
|
|
265
|
-
|
|
266
|
-
@property
|
|
267
|
-
def start_score(self) -> float:
|
|
268
|
-
"""Start timestamp as Redis score."""
|
|
269
|
-
if self.start_time is None:
|
|
270
|
-
return float("-inf")
|
|
271
|
-
return self.start_time.timestamp()
|
|
272
|
-
|
|
273
|
-
@property
|
|
274
|
-
def end_score(self) -> float:
|
|
275
|
-
"""End timestamp as Redis score."""
|
|
276
|
-
if self.end_time is None:
|
|
277
|
-
return float("+inf")
|
|
278
|
-
return self.end_time.timestamp()
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
@dataclass
|
|
282
|
-
class AgentCredentials:
|
|
283
|
-
"""Agent identity and access permissions"""
|
|
284
|
-
|
|
285
|
-
agent_id: str
|
|
286
|
-
tier: AccessTier
|
|
287
|
-
roles: list[str] = field(default_factory=list)
|
|
288
|
-
created_at: datetime = field(default_factory=datetime.now)
|
|
289
|
-
|
|
290
|
-
def can_read(self) -> bool:
|
|
291
|
-
"""All tiers can read"""
|
|
292
|
-
return True
|
|
293
|
-
|
|
294
|
-
def can_stage(self) -> bool:
|
|
295
|
-
"""Contributor+ can stage patterns"""
|
|
296
|
-
return self.tier.value >= AccessTier.CONTRIBUTOR.value
|
|
297
|
-
|
|
298
|
-
def can_validate(self) -> bool:
|
|
299
|
-
"""Validator+ can promote patterns"""
|
|
300
|
-
return self.tier.value >= AccessTier.VALIDATOR.value
|
|
301
|
-
|
|
302
|
-
def can_administer(self) -> bool:
|
|
303
|
-
"""Only Stewards have full admin access"""
|
|
304
|
-
return self.tier.value >= AccessTier.STEWARD.value
|
|
305
|
-
|
|
306
|
-
|
|
307
|
-
@dataclass
|
|
308
|
-
class StagedPattern:
|
|
309
|
-
"""Pattern awaiting validation"""
|
|
310
|
-
|
|
311
|
-
pattern_id: str
|
|
312
|
-
agent_id: str
|
|
313
|
-
pattern_type: str
|
|
314
|
-
name: str
|
|
315
|
-
description: str
|
|
316
|
-
code: str | None = None
|
|
317
|
-
context: dict = field(default_factory=dict)
|
|
318
|
-
confidence: float = 0.5
|
|
319
|
-
staged_at: datetime = field(default_factory=datetime.now)
|
|
320
|
-
interests: list[str] = field(default_factory=list) # For negotiation
|
|
321
|
-
|
|
322
|
-
def __post_init__(self):
|
|
323
|
-
"""Validate fields after initialization"""
|
|
324
|
-
# Pattern 1: String ID validation
|
|
325
|
-
if not self.pattern_id or not self.pattern_id.strip():
|
|
326
|
-
raise ValueError("pattern_id cannot be empty")
|
|
327
|
-
if not self.agent_id or not self.agent_id.strip():
|
|
328
|
-
raise ValueError("agent_id cannot be empty")
|
|
329
|
-
if not self.pattern_type or not self.pattern_type.strip():
|
|
330
|
-
raise ValueError("pattern_type cannot be empty")
|
|
331
|
-
|
|
332
|
-
# Pattern 4: Range validation for confidence
|
|
333
|
-
if not 0.0 <= self.confidence <= 1.0:
|
|
334
|
-
raise ValueError(f"confidence must be between 0.0 and 1.0, got {self.confidence}")
|
|
335
|
-
|
|
336
|
-
# Pattern 5: Type validation
|
|
337
|
-
if not isinstance(self.context, dict):
|
|
338
|
-
raise TypeError(f"context must be dict, got {type(self.context).__name__}")
|
|
339
|
-
if not isinstance(self.interests, list):
|
|
340
|
-
raise TypeError(f"interests must be list, got {type(self.interests).__name__}")
|
|
341
|
-
|
|
342
|
-
def to_dict(self) -> dict:
|
|
343
|
-
"""Convert staged pattern to dictionary for serialization.
|
|
344
|
-
|
|
345
|
-
Returns:
|
|
346
|
-
Dictionary with keys: pattern_id, agent_id, pattern_type, name,
|
|
347
|
-
description, code, context, confidence, staged_at, interests.
|
|
348
|
-
"""
|
|
349
|
-
return {
|
|
350
|
-
"pattern_id": self.pattern_id,
|
|
351
|
-
"agent_id": self.agent_id,
|
|
352
|
-
"pattern_type": self.pattern_type,
|
|
353
|
-
"name": self.name,
|
|
354
|
-
"description": self.description,
|
|
355
|
-
"code": self.code,
|
|
356
|
-
"context": self.context,
|
|
357
|
-
"confidence": self.confidence,
|
|
358
|
-
"staged_at": self.staged_at.isoformat(),
|
|
359
|
-
"interests": self.interests,
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
@classmethod
|
|
363
|
-
def from_dict(cls, data: dict) -> "StagedPattern":
|
|
364
|
-
"""Reconstruct StagedPattern from dictionary.
|
|
365
|
-
|
|
366
|
-
Args:
|
|
367
|
-
data: Dictionary with required keys: pattern_id, agent_id,
|
|
368
|
-
pattern_type, name, description, staged_at.
|
|
369
|
-
|
|
370
|
-
Returns:
|
|
371
|
-
Reconstructed StagedPattern instance.
|
|
372
|
-
|
|
373
|
-
Raises:
|
|
374
|
-
KeyError: If required keys are missing.
|
|
375
|
-
ValueError: If data format is invalid.
|
|
376
|
-
"""
|
|
377
|
-
return cls(
|
|
378
|
-
pattern_id=data["pattern_id"],
|
|
379
|
-
agent_id=data["agent_id"],
|
|
380
|
-
pattern_type=data["pattern_type"],
|
|
381
|
-
name=data["name"],
|
|
382
|
-
description=data["description"],
|
|
383
|
-
code=data.get("code"),
|
|
384
|
-
context=data.get("context", {}),
|
|
385
|
-
confidence=data.get("confidence", 0.5),
|
|
386
|
-
staged_at=datetime.fromisoformat(data["staged_at"]),
|
|
387
|
-
interests=data.get("interests", []),
|
|
388
|
-
)
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
@dataclass
|
|
392
|
-
class ConflictContext:
|
|
393
|
-
"""Context for principled negotiation
|
|
394
|
-
|
|
395
|
-
Per Getting to Yes framework:
|
|
396
|
-
- Positions: What each party says they want
|
|
397
|
-
- Interests: Why they want it (underlying needs)
|
|
398
|
-
- BATNA: Best Alternative to Negotiated Agreement
|
|
399
|
-
"""
|
|
400
|
-
|
|
401
|
-
conflict_id: str
|
|
402
|
-
positions: dict[str, Any] # agent_id -> stated position
|
|
403
|
-
interests: dict[str, list[str]] # agent_id -> underlying interests
|
|
404
|
-
batna: str | None = None # Fallback strategy
|
|
405
|
-
created_at: datetime = field(default_factory=datetime.now)
|
|
406
|
-
resolved: bool = False
|
|
407
|
-
resolution: str | None = None
|
|
408
|
-
|
|
409
|
-
def to_dict(self) -> dict:
|
|
410
|
-
"""Convert conflict context to dictionary for serialization.
|
|
411
|
-
|
|
412
|
-
Returns:
|
|
413
|
-
Dictionary with keys: conflict_id, positions, interests,
|
|
414
|
-
batna, created_at, resolved, resolution.
|
|
415
|
-
"""
|
|
416
|
-
return {
|
|
417
|
-
"conflict_id": self.conflict_id,
|
|
418
|
-
"positions": self.positions,
|
|
419
|
-
"interests": self.interests,
|
|
420
|
-
"batna": self.batna,
|
|
421
|
-
"created_at": self.created_at.isoformat(),
|
|
422
|
-
"resolved": self.resolved,
|
|
423
|
-
"resolution": self.resolution,
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
@classmethod
|
|
427
|
-
def from_dict(cls, data: dict) -> "ConflictContext":
|
|
428
|
-
"""Reconstruct ConflictContext from dictionary.
|
|
429
|
-
|
|
430
|
-
Args:
|
|
431
|
-
data: Dictionary with required keys: conflict_id, positions,
|
|
432
|
-
interests, created_at.
|
|
433
|
-
|
|
434
|
-
Returns:
|
|
435
|
-
Reconstructed ConflictContext instance.
|
|
436
|
-
|
|
437
|
-
Raises:
|
|
438
|
-
KeyError: If required keys are missing.
|
|
439
|
-
ValueError: If data format is invalid.
|
|
440
|
-
"""
|
|
441
|
-
return cls(
|
|
442
|
-
conflict_id=data["conflict_id"],
|
|
443
|
-
positions=data["positions"],
|
|
444
|
-
interests=data["interests"],
|
|
445
|
-
batna=data.get("batna"),
|
|
446
|
-
created_at=datetime.fromisoformat(data["created_at"]),
|
|
447
|
-
resolved=data.get("resolved", False),
|
|
448
|
-
resolution=data.get("resolution"),
|
|
449
|
-
)
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
class SecurityError(Exception):
|
|
453
|
-
"""Raised when a security policy is violated (e.g., secrets detected in data)."""
|
|
454
|
-
|
|
455
|
-
|
|
456
66
|
class RedisShortTermMemory:
|
|
457
67
|
"""Redis-backed short-term memory for agent coordination
|
|
458
68
|
|
|
@@ -554,6 +164,14 @@ class RedisShortTermMemory:
|
|
|
554
164
|
self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
|
|
555
165
|
self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
|
|
556
166
|
|
|
167
|
+
# Local LRU cache for two-tier caching (memory + Redis)
|
|
168
|
+
# Reduces network I/O from 37ms to <0.001ms for frequently accessed keys
|
|
169
|
+
self._local_cache_enabled = self._config.local_cache_enabled
|
|
170
|
+
self._local_cache_max_size = self._config.local_cache_size
|
|
171
|
+
self._local_cache: dict[str, tuple[str, float, float]] = {} # key -> (value, timestamp, last_access)
|
|
172
|
+
self._local_cache_hits = 0
|
|
173
|
+
self._local_cache_misses = 0
|
|
174
|
+
|
|
557
175
|
# Security: Initialize PII scrubber and secrets detector
|
|
558
176
|
self._pii_scrubber: PIIScrubber | None = None
|
|
559
177
|
self._secrets_detector: SecretsDetector | None = None
|
|
@@ -652,43 +270,105 @@ class RedisShortTermMemory:
|
|
|
652
270
|
raise last_error if last_error else ConnectionError("Redis operation failed")
|
|
653
271
|
|
|
654
272
|
def _get(self, key: str) -> str | None:
|
|
655
|
-
"""Get value from Redis or mock"""
|
|
273
|
+
"""Get value from Redis or mock with two-tier caching (local + Redis)"""
|
|
274
|
+
# Check local cache first (0.001ms vs 37ms for Redis/mock)
|
|
275
|
+
# This works for BOTH mock and real Redis modes
|
|
276
|
+
if self._local_cache_enabled and key in self._local_cache:
|
|
277
|
+
value, timestamp, last_access = self._local_cache[key]
|
|
278
|
+
now = time.time()
|
|
279
|
+
|
|
280
|
+
# Update last access time for LRU
|
|
281
|
+
self._local_cache[key] = (value, timestamp, now)
|
|
282
|
+
self._local_cache_hits += 1
|
|
283
|
+
|
|
284
|
+
return value
|
|
285
|
+
|
|
286
|
+
# Cache miss - fetch from storage (mock or Redis)
|
|
287
|
+
self._local_cache_misses += 1
|
|
288
|
+
|
|
289
|
+
# Mock mode path
|
|
656
290
|
if self.use_mock:
|
|
657
291
|
if key in self._mock_storage:
|
|
658
292
|
value, expires = self._mock_storage[key]
|
|
659
293
|
if expires is None or datetime.now().timestamp() < expires:
|
|
660
|
-
|
|
294
|
+
result = str(value) if value is not None else None
|
|
295
|
+
# Add to local cache for next access
|
|
296
|
+
if result and self._local_cache_enabled:
|
|
297
|
+
self._add_to_local_cache(key, result)
|
|
298
|
+
return result
|
|
661
299
|
del self._mock_storage[key]
|
|
662
300
|
return None
|
|
301
|
+
|
|
302
|
+
# Real Redis path
|
|
663
303
|
if self._client is None:
|
|
664
304
|
return None
|
|
305
|
+
|
|
665
306
|
result = self._client.get(key)
|
|
307
|
+
|
|
308
|
+
# Add to local cache if successful
|
|
309
|
+
if result and self._local_cache_enabled:
|
|
310
|
+
self._add_to_local_cache(key, str(result))
|
|
311
|
+
|
|
666
312
|
return str(result) if result else None
|
|
667
313
|
|
|
668
314
|
def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
|
|
669
|
-
"""Set value in Redis or mock"""
|
|
315
|
+
"""Set value in Redis or mock with two-tier caching"""
|
|
316
|
+
# Mock mode path
|
|
670
317
|
if self.use_mock:
|
|
671
318
|
expires = datetime.now().timestamp() + ttl if ttl else None
|
|
672
319
|
self._mock_storage[key] = (value, expires)
|
|
320
|
+
|
|
321
|
+
# Update local cache in mock mode too
|
|
322
|
+
if self._local_cache_enabled:
|
|
323
|
+
self._add_to_local_cache(key, value)
|
|
324
|
+
|
|
673
325
|
return True
|
|
326
|
+
|
|
327
|
+
# Real Redis path
|
|
674
328
|
if self._client is None:
|
|
675
329
|
return False
|
|
330
|
+
|
|
331
|
+
# Set in Redis
|
|
676
332
|
if ttl:
|
|
677
333
|
self._client.setex(key, ttl, value)
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
334
|
+
else:
|
|
335
|
+
result = self._client.set(key, value)
|
|
336
|
+
if not result:
|
|
337
|
+
return False
|
|
338
|
+
|
|
339
|
+
# Update local cache if enabled
|
|
340
|
+
if self._local_cache_enabled:
|
|
341
|
+
self._add_to_local_cache(key, value)
|
|
342
|
+
|
|
343
|
+
return True
|
|
681
344
|
|
|
682
345
|
def _delete(self, key: str) -> bool:
|
|
683
|
-
"""Delete key from Redis or mock"""
|
|
346
|
+
"""Delete key from Redis or mock and local cache"""
|
|
347
|
+
# Mock mode path
|
|
684
348
|
if self.use_mock:
|
|
349
|
+
deleted = False
|
|
685
350
|
if key in self._mock_storage:
|
|
686
351
|
del self._mock_storage[key]
|
|
687
|
-
|
|
688
|
-
|
|
352
|
+
deleted = True
|
|
353
|
+
|
|
354
|
+
# Remove from local cache if present
|
|
355
|
+
if self._local_cache_enabled and key in self._local_cache:
|
|
356
|
+
del self._local_cache[key]
|
|
357
|
+
|
|
358
|
+
return deleted
|
|
359
|
+
|
|
360
|
+
# Real Redis path
|
|
689
361
|
if self._client is None:
|
|
690
362
|
return False
|
|
691
|
-
|
|
363
|
+
|
|
364
|
+
# Delete from Redis
|
|
365
|
+
result = bool(self._client.delete(key) > 0)
|
|
366
|
+
|
|
367
|
+
# Also remove from local cache if present
|
|
368
|
+
if self._local_cache_enabled and key in self._local_cache:
|
|
369
|
+
del self._local_cache[key]
|
|
370
|
+
|
|
371
|
+
return result
|
|
692
372
|
|
|
693
373
|
def _keys(self, pattern: str) -> list[str]:
|
|
694
374
|
"""Get keys matching pattern"""
|
|
@@ -703,6 +383,58 @@ class RedisShortTermMemory:
|
|
|
703
383
|
# Convert bytes to strings - needed for API return type
|
|
704
384
|
return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
|
|
705
385
|
|
|
386
|
+
# === Local LRU Cache Methods ===
|
|
387
|
+
|
|
388
|
+
def _add_to_local_cache(self, key: str, value: str) -> None:
|
|
389
|
+
"""Add entry to local cache with LRU eviction.
|
|
390
|
+
|
|
391
|
+
Args:
|
|
392
|
+
key: Cache key
|
|
393
|
+
value: Value to cache
|
|
394
|
+
"""
|
|
395
|
+
now = time.time()
|
|
396
|
+
|
|
397
|
+
# Evict oldest entry if cache is full
|
|
398
|
+
if len(self._local_cache) >= self._local_cache_max_size:
|
|
399
|
+
# Find key with oldest last_access time
|
|
400
|
+
oldest_key = min(self._local_cache, key=lambda k: self._local_cache[k][2])
|
|
401
|
+
del self._local_cache[oldest_key]
|
|
402
|
+
|
|
403
|
+
# Add new entry: (value, timestamp, last_access)
|
|
404
|
+
self._local_cache[key] = (value, now, now)
|
|
405
|
+
|
|
406
|
+
def clear_local_cache(self) -> int:
|
|
407
|
+
"""Clear all entries from local cache.
|
|
408
|
+
|
|
409
|
+
Returns:
|
|
410
|
+
Number of entries cleared
|
|
411
|
+
"""
|
|
412
|
+
count = len(self._local_cache)
|
|
413
|
+
self._local_cache.clear()
|
|
414
|
+
self._local_cache_hits = 0
|
|
415
|
+
self._local_cache_misses = 0
|
|
416
|
+
logger.info("local_cache_cleared", entries_cleared=count)
|
|
417
|
+
return count
|
|
418
|
+
|
|
419
|
+
def get_local_cache_stats(self) -> dict:
|
|
420
|
+
"""Get local cache performance statistics.
|
|
421
|
+
|
|
422
|
+
Returns:
|
|
423
|
+
Dict with cache stats (hits, misses, hit_rate, size)
|
|
424
|
+
"""
|
|
425
|
+
total = self._local_cache_hits + self._local_cache_misses
|
|
426
|
+
hit_rate = (self._local_cache_hits / total * 100) if total > 0 else 0.0
|
|
427
|
+
|
|
428
|
+
return {
|
|
429
|
+
"enabled": self._local_cache_enabled,
|
|
430
|
+
"size": len(self._local_cache),
|
|
431
|
+
"max_size": self._local_cache_max_size,
|
|
432
|
+
"hits": self._local_cache_hits,
|
|
433
|
+
"misses": self._local_cache_misses,
|
|
434
|
+
"hit_rate": hit_rate,
|
|
435
|
+
"total_requests": total,
|
|
436
|
+
}
|
|
437
|
+
|
|
706
438
|
# === Security Methods ===
|
|
707
439
|
|
|
708
440
|
def _sanitize_data(self, data: Any) -> tuple[Any, int]:
|