attune-ai 2.1.5__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/cli/__init__.py +3 -59
- attune/cli/commands/batch.py +4 -12
- attune/cli/commands/cache.py +7 -15
- attune/cli/commands/provider.py +17 -0
- attune/cli/commands/routing.py +3 -1
- attune/cli/commands/setup.py +122 -0
- attune/cli/commands/tier.py +1 -3
- attune/cli/commands/workflow.py +31 -0
- attune/cli/parsers/cache.py +1 -0
- attune/cli/parsers/help.py +1 -3
- attune/cli/parsers/provider.py +7 -0
- attune/cli/parsers/routing.py +1 -3
- attune/cli/parsers/setup.py +7 -0
- attune/cli/parsers/status.py +1 -3
- attune/cli/parsers/tier.py +1 -3
- attune/cli_minimal.py +9 -3
- attune/cli_router.py +9 -7
- attune/cli_unified.py +3 -0
- attune/dashboard/app.py +3 -1
- attune/dashboard/simple_server.py +3 -1
- attune/dashboard/standalone_server.py +7 -3
- attune/mcp/server.py +54 -102
- attune/memory/long_term.py +0 -2
- attune/memory/short_term/__init__.py +84 -0
- attune/memory/short_term/base.py +467 -0
- attune/memory/short_term/batch.py +219 -0
- attune/memory/short_term/caching.py +227 -0
- attune/memory/short_term/conflicts.py +265 -0
- attune/memory/short_term/cross_session.py +122 -0
- attune/memory/short_term/facade.py +655 -0
- attune/memory/short_term/pagination.py +215 -0
- attune/memory/short_term/patterns.py +271 -0
- attune/memory/short_term/pubsub.py +286 -0
- attune/memory/short_term/queues.py +244 -0
- attune/memory/short_term/security.py +300 -0
- attune/memory/short_term/sessions.py +250 -0
- attune/memory/short_term/streams.py +249 -0
- attune/memory/short_term/timelines.py +234 -0
- attune/memory/short_term/transactions.py +186 -0
- attune/memory/short_term/working.py +252 -0
- attune/meta_workflows/cli_commands/__init__.py +3 -0
- attune/meta_workflows/cli_commands/agent_commands.py +0 -4
- attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
- attune/meta_workflows/cli_commands/config_commands.py +0 -5
- attune/meta_workflows/cli_commands/memory_commands.py +0 -5
- attune/meta_workflows/cli_commands/template_commands.py +0 -5
- attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
- attune/models/adaptive_routing.py +4 -8
- attune/models/auth_cli.py +3 -9
- attune/models/auth_strategy.py +2 -4
- attune/models/telemetry/analytics.py +0 -2
- attune/models/telemetry/backend.py +0 -3
- attune/models/telemetry/storage.py +0 -2
- attune/orchestration/_strategies/__init__.py +156 -0
- attune/orchestration/_strategies/base.py +231 -0
- attune/orchestration/_strategies/conditional_strategies.py +373 -0
- attune/orchestration/_strategies/conditions.py +369 -0
- attune/orchestration/_strategies/core_strategies.py +491 -0
- attune/orchestration/_strategies/data_classes.py +64 -0
- attune/orchestration/_strategies/nesting.py +233 -0
- attune/orchestration/execution_strategies.py +58 -1567
- attune/orchestration/meta_orchestrator.py +1 -3
- attune/project_index/scanner.py +1 -3
- attune/project_index/scanner_parallel.py +7 -5
- attune/socratic_router.py +1 -3
- attune/telemetry/agent_coordination.py +9 -3
- attune/telemetry/agent_tracking.py +16 -3
- attune/telemetry/approval_gates.py +22 -5
- attune/telemetry/cli.py +1 -3
- attune/telemetry/commands/dashboard_commands.py +24 -8
- attune/telemetry/event_streaming.py +8 -2
- attune/telemetry/feedback_loop.py +10 -2
- attune/tools.py +1 -0
- attune/workflow_commands.py +1 -3
- attune/workflows/__init__.py +53 -10
- attune/workflows/autonomous_test_gen.py +158 -102
- attune/workflows/base.py +48 -672
- attune/workflows/batch_processing.py +1 -3
- attune/workflows/compat.py +156 -0
- attune/workflows/cost_mixin.py +141 -0
- attune/workflows/data_classes.py +92 -0
- attune/workflows/document_gen/workflow.py +11 -14
- attune/workflows/history.py +62 -37
- attune/workflows/llm_base.py +1 -3
- attune/workflows/migration.py +422 -0
- attune/workflows/output.py +2 -7
- attune/workflows/parsing_mixin.py +427 -0
- attune/workflows/perf_audit.py +3 -1
- attune/workflows/progress.py +9 -11
- attune/workflows/release_prep.py +5 -1
- attune/workflows/routing.py +0 -2
- attune/workflows/secure_release.py +2 -1
- attune/workflows/security_audit.py +19 -14
- attune/workflows/security_audit_phase3.py +28 -22
- attune/workflows/seo_optimization.py +27 -27
- attune/workflows/test_gen/test_templates.py +1 -4
- attune/workflows/test_gen/workflow.py +0 -2
- attune/workflows/test_gen_behavioral.py +6 -19
- attune/workflows/test_gen_parallel.py +6 -4
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/RECORD +116 -91
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
- attune_healthcare/monitors/monitoring/__init__.py +9 -9
- attune_llm/agent_factory/__init__.py +6 -6
- attune_llm/commands/__init__.py +10 -10
- attune_llm/commands/models.py +3 -3
- attune_llm/config/__init__.py +8 -8
- attune_llm/learning/__init__.py +3 -3
- attune_llm/learning/extractor.py +5 -3
- attune_llm/learning/storage.py +5 -3
- attune_llm/security/__init__.py +17 -17
- attune_llm/utils/tokens.py +3 -1
- attune/cli_legacy.py +0 -3978
- attune/memory/short_term.py +0 -2192
- attune/workflows/manage_docs.py +0 -87
- attune/workflows/test5.py +0 -125
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {attune_ai-2.1.5.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
attune/memory/short_term.py
DELETED
|
@@ -1,2192 +0,0 @@
|
|
|
1
|
-
"""Redis Short-Term Memory for Empathy Framework
|
|
2
|
-
|
|
3
|
-
Per EMPATHY_PHILOSOPHY.md v1.1.0:
|
|
4
|
-
- Implements fast, TTL-based working memory for agent coordination
|
|
5
|
-
- Role-based access tiers for data integrity
|
|
6
|
-
- Pattern staging before validation
|
|
7
|
-
- Principled negotiation support
|
|
8
|
-
|
|
9
|
-
Enhanced Features (v2.0):
|
|
10
|
-
- Pub/Sub for real-time agent notifications
|
|
11
|
-
- Batch operations for high-throughput workflows
|
|
12
|
-
- SCAN-based pagination for large datasets
|
|
13
|
-
- Redis Streams for audit trails
|
|
14
|
-
- Connection retry with exponential backoff
|
|
15
|
-
- SSL/TLS support for managed Redis services
|
|
16
|
-
- Time-window queries with sorted sets
|
|
17
|
-
- Task queues with Lists
|
|
18
|
-
- Atomic transactions with MULTI/EXEC
|
|
19
|
-
- Comprehensive metrics tracking
|
|
20
|
-
|
|
21
|
-
Copyright 2025 Smart AI Memory, LLC
|
|
22
|
-
Licensed under Fair Source 0.9
|
|
23
|
-
"""
|
|
24
|
-
|
|
25
|
-
import json
|
|
26
|
-
import os
|
|
27
|
-
import threading
|
|
28
|
-
import time
|
|
29
|
-
from collections.abc import Callable
|
|
30
|
-
from datetime import datetime
|
|
31
|
-
from typing import Any
|
|
32
|
-
|
|
33
|
-
import structlog
|
|
34
|
-
|
|
35
|
-
from .security.pii_scrubber import PIIScrubber
|
|
36
|
-
from .security.secrets_detector import SecretsDetector
|
|
37
|
-
from .security.secrets_detector import Severity as SecretSeverity
|
|
38
|
-
|
|
39
|
-
# Import types from dedicated module
|
|
40
|
-
from .types import (
|
|
41
|
-
AccessTier,
|
|
42
|
-
AgentCredentials,
|
|
43
|
-
ConflictContext,
|
|
44
|
-
PaginatedResult,
|
|
45
|
-
RedisConfig,
|
|
46
|
-
RedisMetrics,
|
|
47
|
-
SecurityError,
|
|
48
|
-
StagedPattern,
|
|
49
|
-
TimeWindowQuery,
|
|
50
|
-
TTLStrategy,
|
|
51
|
-
)
|
|
52
|
-
|
|
53
|
-
logger = structlog.get_logger(__name__)
|
|
54
|
-
|
|
55
|
-
try:
|
|
56
|
-
import redis
|
|
57
|
-
from redis.exceptions import ConnectionError as RedisConnectionError
|
|
58
|
-
from redis.exceptions import TimeoutError as RedisTimeoutError
|
|
59
|
-
|
|
60
|
-
REDIS_AVAILABLE = True
|
|
61
|
-
except ImportError:
|
|
62
|
-
REDIS_AVAILABLE = False
|
|
63
|
-
RedisConnectionError = Exception # type: ignore
|
|
64
|
-
RedisTimeoutError = Exception # type: ignore
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
class RedisShortTermMemory:
|
|
68
|
-
"""Redis-backed short-term memory for agent coordination
|
|
69
|
-
|
|
70
|
-
Features:
|
|
71
|
-
- Fast read/write with automatic TTL expiration
|
|
72
|
-
- Role-based access control
|
|
73
|
-
- Pattern staging workflow
|
|
74
|
-
- Conflict negotiation context
|
|
75
|
-
- Agent working memory
|
|
76
|
-
|
|
77
|
-
Enhanced Features (v2.0):
|
|
78
|
-
- Pub/Sub for real-time agent notifications
|
|
79
|
-
- Batch operations (stash_batch, retrieve_batch)
|
|
80
|
-
- SCAN-based pagination for large datasets
|
|
81
|
-
- Redis Streams for audit trails
|
|
82
|
-
- Time-window queries with sorted sets
|
|
83
|
-
- Task queues with Lists (LPUSH/RPOP)
|
|
84
|
-
- Atomic transactions with MULTI/EXEC
|
|
85
|
-
- Connection retry with exponential backoff
|
|
86
|
-
- Metrics tracking for observability
|
|
87
|
-
|
|
88
|
-
Example:
|
|
89
|
-
>>> memory = RedisShortTermMemory()
|
|
90
|
-
>>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
|
|
91
|
-
>>> memory.stash("analysis_results", {"issues": 3}, creds)
|
|
92
|
-
>>> data = memory.retrieve("analysis_results", creds)
|
|
93
|
-
|
|
94
|
-
# Pub/Sub example
|
|
95
|
-
>>> memory.subscribe("agent_signals", lambda msg: print(msg))
|
|
96
|
-
>>> memory.publish("agent_signals", {"event": "task_complete"}, creds)
|
|
97
|
-
|
|
98
|
-
# Batch operations
|
|
99
|
-
>>> items = [("key1", {"data": 1}), ("key2", {"data": 2})]
|
|
100
|
-
>>> memory.stash_batch(items, creds)
|
|
101
|
-
|
|
102
|
-
# Pagination
|
|
103
|
-
>>> result = memory.list_staged_patterns_paginated(creds, cursor="0", count=10)
|
|
104
|
-
|
|
105
|
-
"""
|
|
106
|
-
|
|
107
|
-
# Key prefixes for namespacing
|
|
108
|
-
PREFIX_WORKING = "empathy:working:"
|
|
109
|
-
PREFIX_STAGED = "empathy:staged:"
|
|
110
|
-
PREFIX_CONFLICT = "empathy:conflict:"
|
|
111
|
-
# PREFIX_COORDINATION removed in v5.0 - use attune.telemetry.CoordinationSignals
|
|
112
|
-
PREFIX_SESSION = "empathy:session:"
|
|
113
|
-
PREFIX_PUBSUB = "empathy:pubsub:"
|
|
114
|
-
PREFIX_STREAM = "empathy:stream:"
|
|
115
|
-
PREFIX_TIMELINE = "empathy:timeline:"
|
|
116
|
-
PREFIX_QUEUE = "empathy:queue:"
|
|
117
|
-
|
|
118
|
-
def __init__(
|
|
119
|
-
self,
|
|
120
|
-
host: str = "localhost",
|
|
121
|
-
port: int = 6379,
|
|
122
|
-
db: int = 0,
|
|
123
|
-
password: str | None = None,
|
|
124
|
-
use_mock: bool = False,
|
|
125
|
-
config: RedisConfig | None = None,
|
|
126
|
-
):
|
|
127
|
-
"""Initialize Redis connection
|
|
128
|
-
|
|
129
|
-
Args:
|
|
130
|
-
host: Redis host
|
|
131
|
-
port: Redis port
|
|
132
|
-
db: Redis database number
|
|
133
|
-
password: Redis password (optional)
|
|
134
|
-
use_mock: Use in-memory mock for testing
|
|
135
|
-
config: Full RedisConfig for advanced settings (overrides other args)
|
|
136
|
-
|
|
137
|
-
"""
|
|
138
|
-
# Use config if provided, otherwise build from individual args
|
|
139
|
-
if config is not None:
|
|
140
|
-
self._config = config
|
|
141
|
-
else:
|
|
142
|
-
# Check environment variable for Redis enablement (default: disabled)
|
|
143
|
-
redis_enabled = os.getenv("REDIS_ENABLED", "false").lower() in ("true", "1", "yes")
|
|
144
|
-
|
|
145
|
-
# Use environment variables for configuration if available
|
|
146
|
-
env_host = os.getenv("REDIS_HOST", host)
|
|
147
|
-
env_port = int(os.getenv("REDIS_PORT", str(port)))
|
|
148
|
-
env_db = int(os.getenv("REDIS_DB", str(db)))
|
|
149
|
-
env_password = os.getenv("REDIS_PASSWORD", password)
|
|
150
|
-
|
|
151
|
-
# If Redis is not enabled via env var, force mock mode
|
|
152
|
-
if not redis_enabled and not use_mock:
|
|
153
|
-
use_mock = True
|
|
154
|
-
logger.info("redis_disabled_via_env", message="Redis not enabled in environment, using mock mode")
|
|
155
|
-
|
|
156
|
-
self._config = RedisConfig(
|
|
157
|
-
host=env_host,
|
|
158
|
-
port=env_port,
|
|
159
|
-
db=env_db,
|
|
160
|
-
password=env_password if env_password else None,
|
|
161
|
-
use_mock=use_mock,
|
|
162
|
-
)
|
|
163
|
-
|
|
164
|
-
self.use_mock = self._config.use_mock or not REDIS_AVAILABLE
|
|
165
|
-
|
|
166
|
-
# Initialize metrics
|
|
167
|
-
self._metrics = RedisMetrics()
|
|
168
|
-
|
|
169
|
-
# Pub/Sub state
|
|
170
|
-
self._pubsub: Any | None = None
|
|
171
|
-
self._pubsub_thread: threading.Thread | None = None
|
|
172
|
-
self._subscriptions: dict[str, list[Callable[[dict], None]]] = {}
|
|
173
|
-
self._pubsub_running = False
|
|
174
|
-
|
|
175
|
-
# Mock storage for testing
|
|
176
|
-
self._mock_storage: dict[str, tuple[Any, float | None]] = {}
|
|
177
|
-
self._mock_lists: dict[str, list[str]] = {}
|
|
178
|
-
self._mock_sorted_sets: dict[str, list[tuple[float, str]]] = {}
|
|
179
|
-
self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
|
|
180
|
-
self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
|
|
181
|
-
|
|
182
|
-
# Local LRU cache for two-tier caching (memory + Redis)
|
|
183
|
-
# Reduces network I/O from 37ms to <0.001ms for frequently accessed keys
|
|
184
|
-
self._local_cache_enabled = self._config.local_cache_enabled
|
|
185
|
-
self._local_cache_max_size = self._config.local_cache_size
|
|
186
|
-
self._local_cache: dict[str, tuple[str, float, float]] = {} # key -> (value, timestamp, last_access)
|
|
187
|
-
self._local_cache_hits = 0
|
|
188
|
-
self._local_cache_misses = 0
|
|
189
|
-
|
|
190
|
-
# Security: Initialize PII scrubber and secrets detector
|
|
191
|
-
self._pii_scrubber: PIIScrubber | None = None
|
|
192
|
-
self._secrets_detector: SecretsDetector | None = None
|
|
193
|
-
|
|
194
|
-
if self._config.pii_scrub_enabled:
|
|
195
|
-
self._pii_scrubber = PIIScrubber(enable_name_detection=False)
|
|
196
|
-
logger.debug(
|
|
197
|
-
"pii_scrubber_enabled", message="PII scrubbing active for short-term memory"
|
|
198
|
-
)
|
|
199
|
-
|
|
200
|
-
if self._config.secrets_detection_enabled:
|
|
201
|
-
self._secrets_detector = SecretsDetector()
|
|
202
|
-
logger.debug(
|
|
203
|
-
"secrets_detector_enabled", message="Secrets detection active for short-term memory"
|
|
204
|
-
)
|
|
205
|
-
|
|
206
|
-
if self.use_mock:
|
|
207
|
-
self._client = None
|
|
208
|
-
else:
|
|
209
|
-
self._client = self._create_client_with_retry()
|
|
210
|
-
|
|
211
|
-
@property
|
|
212
|
-
def client(self) -> Any:
|
|
213
|
-
"""Get the Redis client instance.
|
|
214
|
-
|
|
215
|
-
Returns:
|
|
216
|
-
Redis client instance or None if using mock mode
|
|
217
|
-
|
|
218
|
-
Example:
|
|
219
|
-
>>> memory = RedisShortTermMemory()
|
|
220
|
-
>>> if memory.client:
|
|
221
|
-
... print("Redis connected")
|
|
222
|
-
"""
|
|
223
|
-
return self._client
|
|
224
|
-
|
|
225
|
-
@property
|
|
226
|
-
def metrics(self) -> "RedisMetrics":
|
|
227
|
-
"""Get Redis metrics instance.
|
|
228
|
-
|
|
229
|
-
Returns:
|
|
230
|
-
RedisMetrics instance with connection and operation statistics
|
|
231
|
-
|
|
232
|
-
Example:
|
|
233
|
-
>>> memory = RedisShortTermMemory()
|
|
234
|
-
>>> print(f"Retries: {memory.metrics.retries_total}")
|
|
235
|
-
"""
|
|
236
|
-
return self._metrics
|
|
237
|
-
|
|
238
|
-
def _create_client_with_retry(self) -> Any:
|
|
239
|
-
"""Create Redis client with retry logic."""
|
|
240
|
-
max_attempts = self._config.retry_max_attempts
|
|
241
|
-
base_delay = self._config.retry_base_delay
|
|
242
|
-
max_delay = self._config.retry_max_delay
|
|
243
|
-
|
|
244
|
-
last_error: Exception | None = None
|
|
245
|
-
|
|
246
|
-
for attempt in range(max_attempts):
|
|
247
|
-
try:
|
|
248
|
-
client = redis.Redis(**self._config.to_redis_kwargs())
|
|
249
|
-
# Test connection
|
|
250
|
-
client.ping()
|
|
251
|
-
logger.info(
|
|
252
|
-
"redis_connected",
|
|
253
|
-
host=self._config.host,
|
|
254
|
-
port=self._config.port,
|
|
255
|
-
attempt=attempt + 1,
|
|
256
|
-
)
|
|
257
|
-
return client
|
|
258
|
-
except (RedisConnectionError, RedisTimeoutError) as e:
|
|
259
|
-
last_error = e
|
|
260
|
-
self._metrics.retries_total += 1
|
|
261
|
-
|
|
262
|
-
if attempt < max_attempts - 1:
|
|
263
|
-
delay = min(base_delay * (2**attempt), max_delay)
|
|
264
|
-
logger.warning(
|
|
265
|
-
"redis_connection_retry",
|
|
266
|
-
attempt=attempt + 1,
|
|
267
|
-
max_attempts=max_attempts,
|
|
268
|
-
delay=delay,
|
|
269
|
-
error=str(e),
|
|
270
|
-
)
|
|
271
|
-
time.sleep(delay)
|
|
272
|
-
|
|
273
|
-
# All retries failed
|
|
274
|
-
logger.error(
|
|
275
|
-
"redis_connection_failed",
|
|
276
|
-
max_attempts=max_attempts,
|
|
277
|
-
error=str(last_error),
|
|
278
|
-
)
|
|
279
|
-
raise last_error if last_error else ConnectionError("Failed to connect to Redis")
|
|
280
|
-
|
|
281
|
-
def _execute_with_retry(self, operation: Callable[[], Any], op_name: str = "operation") -> Any:
|
|
282
|
-
"""Execute a Redis operation with retry logic."""
|
|
283
|
-
start_time = time.perf_counter()
|
|
284
|
-
max_attempts = self._config.retry_max_attempts
|
|
285
|
-
base_delay = self._config.retry_base_delay
|
|
286
|
-
max_delay = self._config.retry_max_delay
|
|
287
|
-
|
|
288
|
-
last_error: Exception | None = None
|
|
289
|
-
|
|
290
|
-
for attempt in range(max_attempts):
|
|
291
|
-
try:
|
|
292
|
-
result = operation()
|
|
293
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
294
|
-
self._metrics.record_operation(op_name, latency_ms, success=True)
|
|
295
|
-
return result
|
|
296
|
-
except (RedisConnectionError, RedisTimeoutError) as e:
|
|
297
|
-
last_error = e
|
|
298
|
-
self._metrics.retries_total += 1
|
|
299
|
-
|
|
300
|
-
if attempt < max_attempts - 1:
|
|
301
|
-
delay = min(base_delay * (2**attempt), max_delay)
|
|
302
|
-
logger.warning(
|
|
303
|
-
"redis_operation_retry",
|
|
304
|
-
operation=op_name,
|
|
305
|
-
attempt=attempt + 1,
|
|
306
|
-
delay=delay,
|
|
307
|
-
)
|
|
308
|
-
time.sleep(delay)
|
|
309
|
-
|
|
310
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
311
|
-
self._metrics.record_operation(op_name, latency_ms, success=False)
|
|
312
|
-
raise last_error if last_error else ConnectionError("Redis operation failed")
|
|
313
|
-
|
|
314
|
-
def _get(self, key: str) -> str | None:
|
|
315
|
-
"""Get value from Redis or mock with two-tier caching (local + Redis)"""
|
|
316
|
-
# Check local cache first (0.001ms vs 37ms for Redis/mock)
|
|
317
|
-
# This works for BOTH mock and real Redis modes
|
|
318
|
-
if self._local_cache_enabled and key in self._local_cache:
|
|
319
|
-
value, timestamp, last_access = self._local_cache[key]
|
|
320
|
-
now = time.time()
|
|
321
|
-
|
|
322
|
-
# Update last access time for LRU
|
|
323
|
-
self._local_cache[key] = (value, timestamp, now)
|
|
324
|
-
self._local_cache_hits += 1
|
|
325
|
-
|
|
326
|
-
return value
|
|
327
|
-
|
|
328
|
-
# Cache miss - fetch from storage (mock or Redis)
|
|
329
|
-
self._local_cache_misses += 1
|
|
330
|
-
|
|
331
|
-
# Mock mode path
|
|
332
|
-
if self.use_mock:
|
|
333
|
-
if key in self._mock_storage:
|
|
334
|
-
value, expires = self._mock_storage[key]
|
|
335
|
-
if expires is None or datetime.now().timestamp() < expires:
|
|
336
|
-
result = str(value) if value is not None else None
|
|
337
|
-
# Add to local cache for next access
|
|
338
|
-
if result and self._local_cache_enabled:
|
|
339
|
-
self._add_to_local_cache(key, result)
|
|
340
|
-
return result
|
|
341
|
-
del self._mock_storage[key]
|
|
342
|
-
return None
|
|
343
|
-
|
|
344
|
-
# Real Redis path
|
|
345
|
-
if self._client is None:
|
|
346
|
-
return None
|
|
347
|
-
|
|
348
|
-
result = self._client.get(key)
|
|
349
|
-
|
|
350
|
-
# Add to local cache if successful
|
|
351
|
-
if result and self._local_cache_enabled:
|
|
352
|
-
self._add_to_local_cache(key, str(result))
|
|
353
|
-
|
|
354
|
-
return str(result) if result else None
|
|
355
|
-
|
|
356
|
-
def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
|
|
357
|
-
"""Set value in Redis or mock with two-tier caching"""
|
|
358
|
-
# Mock mode path
|
|
359
|
-
if self.use_mock:
|
|
360
|
-
expires = datetime.now().timestamp() + ttl if ttl else None
|
|
361
|
-
self._mock_storage[key] = (value, expires)
|
|
362
|
-
|
|
363
|
-
# Update local cache in mock mode too
|
|
364
|
-
if self._local_cache_enabled:
|
|
365
|
-
self._add_to_local_cache(key, value)
|
|
366
|
-
|
|
367
|
-
return True
|
|
368
|
-
|
|
369
|
-
# Real Redis path
|
|
370
|
-
if self._client is None:
|
|
371
|
-
return False
|
|
372
|
-
|
|
373
|
-
# Set in Redis
|
|
374
|
-
if ttl:
|
|
375
|
-
self._client.setex(key, ttl, value)
|
|
376
|
-
else:
|
|
377
|
-
result = self._client.set(key, value)
|
|
378
|
-
if not result:
|
|
379
|
-
return False
|
|
380
|
-
|
|
381
|
-
# Update local cache if enabled
|
|
382
|
-
if self._local_cache_enabled:
|
|
383
|
-
self._add_to_local_cache(key, value)
|
|
384
|
-
|
|
385
|
-
return True
|
|
386
|
-
|
|
387
|
-
def _delete(self, key: str) -> bool:
|
|
388
|
-
"""Delete key from Redis or mock and local cache"""
|
|
389
|
-
# Mock mode path
|
|
390
|
-
if self.use_mock:
|
|
391
|
-
deleted = False
|
|
392
|
-
if key in self._mock_storage:
|
|
393
|
-
del self._mock_storage[key]
|
|
394
|
-
deleted = True
|
|
395
|
-
|
|
396
|
-
# Remove from local cache if present
|
|
397
|
-
if self._local_cache_enabled and key in self._local_cache:
|
|
398
|
-
del self._local_cache[key]
|
|
399
|
-
|
|
400
|
-
return deleted
|
|
401
|
-
|
|
402
|
-
# Real Redis path
|
|
403
|
-
if self._client is None:
|
|
404
|
-
return False
|
|
405
|
-
|
|
406
|
-
# Delete from Redis
|
|
407
|
-
result = bool(self._client.delete(key) > 0)
|
|
408
|
-
|
|
409
|
-
# Also remove from local cache if present
|
|
410
|
-
if self._local_cache_enabled and key in self._local_cache:
|
|
411
|
-
del self._local_cache[key]
|
|
412
|
-
|
|
413
|
-
return result
|
|
414
|
-
|
|
415
|
-
def _keys(self, pattern: str) -> list[str]:
|
|
416
|
-
"""Get keys matching pattern"""
|
|
417
|
-
if self.use_mock:
|
|
418
|
-
import fnmatch
|
|
419
|
-
|
|
420
|
-
# Use list comp for small result sets (typical <1000 keys)
|
|
421
|
-
return [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
|
|
422
|
-
if self._client is None:
|
|
423
|
-
return []
|
|
424
|
-
keys = self._client.keys(pattern)
|
|
425
|
-
# Convert bytes to strings - needed for API return type
|
|
426
|
-
return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
|
|
427
|
-
|
|
428
|
-
# === Local LRU Cache Methods ===
|
|
429
|
-
|
|
430
|
-
def _add_to_local_cache(self, key: str, value: str) -> None:
|
|
431
|
-
"""Add entry to local cache with LRU eviction.
|
|
432
|
-
|
|
433
|
-
Args:
|
|
434
|
-
key: Cache key
|
|
435
|
-
value: Value to cache
|
|
436
|
-
"""
|
|
437
|
-
now = time.time()
|
|
438
|
-
|
|
439
|
-
# Evict oldest entry if cache is full
|
|
440
|
-
if len(self._local_cache) >= self._local_cache_max_size:
|
|
441
|
-
# Find key with oldest last_access time
|
|
442
|
-
oldest_key = min(self._local_cache, key=lambda k: self._local_cache[k][2])
|
|
443
|
-
del self._local_cache[oldest_key]
|
|
444
|
-
|
|
445
|
-
# Add new entry: (value, timestamp, last_access)
|
|
446
|
-
self._local_cache[key] = (value, now, now)
|
|
447
|
-
|
|
448
|
-
def clear_local_cache(self) -> int:
|
|
449
|
-
"""Clear all entries from local cache.
|
|
450
|
-
|
|
451
|
-
Returns:
|
|
452
|
-
Number of entries cleared
|
|
453
|
-
"""
|
|
454
|
-
count = len(self._local_cache)
|
|
455
|
-
self._local_cache.clear()
|
|
456
|
-
self._local_cache_hits = 0
|
|
457
|
-
self._local_cache_misses = 0
|
|
458
|
-
logger.info("local_cache_cleared", entries_cleared=count)
|
|
459
|
-
return count
|
|
460
|
-
|
|
461
|
-
def get_local_cache_stats(self) -> dict:
|
|
462
|
-
"""Get local cache performance statistics.
|
|
463
|
-
|
|
464
|
-
Returns:
|
|
465
|
-
Dict with cache stats (hits, misses, hit_rate, size)
|
|
466
|
-
"""
|
|
467
|
-
total = self._local_cache_hits + self._local_cache_misses
|
|
468
|
-
hit_rate = (self._local_cache_hits / total * 100) if total > 0 else 0.0
|
|
469
|
-
|
|
470
|
-
return {
|
|
471
|
-
"enabled": self._local_cache_enabled,
|
|
472
|
-
"size": len(self._local_cache),
|
|
473
|
-
"max_size": self._local_cache_max_size,
|
|
474
|
-
"hits": self._local_cache_hits,
|
|
475
|
-
"misses": self._local_cache_misses,
|
|
476
|
-
"hit_rate": hit_rate,
|
|
477
|
-
"total_requests": total,
|
|
478
|
-
}
|
|
479
|
-
|
|
480
|
-
# === Security Methods ===
|
|
481
|
-
|
|
482
|
-
def _sanitize_data(self, data: Any) -> tuple[Any, int]:
|
|
483
|
-
"""Sanitize data by scrubbing PII and checking for secrets.
|
|
484
|
-
|
|
485
|
-
Args:
|
|
486
|
-
data: Data to sanitize (dict, list, or str)
|
|
487
|
-
|
|
488
|
-
Returns:
|
|
489
|
-
Tuple of (sanitized_data, pii_count)
|
|
490
|
-
|
|
491
|
-
Raises:
|
|
492
|
-
SecurityError: If secrets are detected and blocking is enabled
|
|
493
|
-
|
|
494
|
-
"""
|
|
495
|
-
pii_count = 0
|
|
496
|
-
|
|
497
|
-
if data is None:
|
|
498
|
-
return data, 0
|
|
499
|
-
|
|
500
|
-
# Convert data to string for scanning
|
|
501
|
-
if isinstance(data, dict):
|
|
502
|
-
data_str = json.dumps(data)
|
|
503
|
-
elif isinstance(data, list):
|
|
504
|
-
data_str = json.dumps(data)
|
|
505
|
-
elif isinstance(data, str):
|
|
506
|
-
data_str = data
|
|
507
|
-
else:
|
|
508
|
-
# For other types, convert to string
|
|
509
|
-
data_str = str(data)
|
|
510
|
-
|
|
511
|
-
# Check for secrets first (before modifying data)
|
|
512
|
-
if self._secrets_detector is not None:
|
|
513
|
-
detections = self._secrets_detector.detect(data_str)
|
|
514
|
-
# Block critical and high severity secrets
|
|
515
|
-
critical_secrets = [
|
|
516
|
-
d
|
|
517
|
-
for d in detections
|
|
518
|
-
if d.severity in (SecretSeverity.CRITICAL, SecretSeverity.HIGH)
|
|
519
|
-
]
|
|
520
|
-
if critical_secrets:
|
|
521
|
-
self._metrics.secrets_blocked_total += len(critical_secrets)
|
|
522
|
-
secret_types = [d.secret_type.value for d in critical_secrets]
|
|
523
|
-
logger.warning(
|
|
524
|
-
"secrets_detected_blocked",
|
|
525
|
-
secret_types=secret_types,
|
|
526
|
-
count=len(critical_secrets),
|
|
527
|
-
)
|
|
528
|
-
raise SecurityError(
|
|
529
|
-
f"Cannot store data containing secrets: {secret_types}. "
|
|
530
|
-
"Remove sensitive credentials before storing."
|
|
531
|
-
)
|
|
532
|
-
|
|
533
|
-
# Scrub PII
|
|
534
|
-
if self._pii_scrubber is not None:
|
|
535
|
-
sanitized_str, pii_detections = self._pii_scrubber.scrub(data_str)
|
|
536
|
-
pii_count = len(pii_detections)
|
|
537
|
-
|
|
538
|
-
if pii_count > 0:
|
|
539
|
-
self._metrics.pii_scrubbed_total += pii_count
|
|
540
|
-
self._metrics.pii_scrub_operations += 1
|
|
541
|
-
logger.debug(
|
|
542
|
-
"pii_scrubbed",
|
|
543
|
-
pii_count=pii_count,
|
|
544
|
-
pii_types=[d.pii_type for d in pii_detections],
|
|
545
|
-
)
|
|
546
|
-
|
|
547
|
-
# Convert back to original type
|
|
548
|
-
if isinstance(data, dict):
|
|
549
|
-
try:
|
|
550
|
-
return json.loads(sanitized_str), pii_count
|
|
551
|
-
except json.JSONDecodeError:
|
|
552
|
-
# If PII scrubbing broke JSON structure, return original
|
|
553
|
-
# This can happen if regex matches part of JSON syntax
|
|
554
|
-
logger.warning("pii_scrubbing_broke_json_returning_original")
|
|
555
|
-
return data, 0
|
|
556
|
-
elif isinstance(data, list):
|
|
557
|
-
try:
|
|
558
|
-
return json.loads(sanitized_str), pii_count
|
|
559
|
-
except json.JSONDecodeError:
|
|
560
|
-
logger.warning("pii_scrubbing_broke_json_returning_original")
|
|
561
|
-
return data, 0
|
|
562
|
-
else:
|
|
563
|
-
return sanitized_str, pii_count
|
|
564
|
-
|
|
565
|
-
return data, pii_count
|
|
566
|
-
|
|
567
|
-
# === Working Memory (Stash/Retrieve) ===
|
|
568
|
-
|
|
569
|
-
def stash(
|
|
570
|
-
self,
|
|
571
|
-
key: str,
|
|
572
|
-
data: Any,
|
|
573
|
-
credentials: AgentCredentials,
|
|
574
|
-
ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
|
|
575
|
-
skip_sanitization: bool = False,
|
|
576
|
-
) -> bool:
|
|
577
|
-
"""Stash data in short-term memory
|
|
578
|
-
|
|
579
|
-
Args:
|
|
580
|
-
key: Unique key for the data
|
|
581
|
-
data: Data to store (will be JSON serialized)
|
|
582
|
-
credentials: Agent credentials
|
|
583
|
-
ttl: Time-to-live strategy
|
|
584
|
-
skip_sanitization: Skip PII scrubbing and secrets detection (use with caution)
|
|
585
|
-
|
|
586
|
-
Returns:
|
|
587
|
-
True if successful
|
|
588
|
-
|
|
589
|
-
Raises:
|
|
590
|
-
ValueError: If key is empty or invalid
|
|
591
|
-
PermissionError: If credentials lack write access
|
|
592
|
-
SecurityError: If secrets are detected in data (when secrets_detection_enabled)
|
|
593
|
-
|
|
594
|
-
Note:
|
|
595
|
-
PII (emails, SSNs, phone numbers, etc.) is automatically scrubbed
|
|
596
|
-
before storage unless skip_sanitization=True or pii_scrub_enabled=False.
|
|
597
|
-
Secrets (API keys, passwords, etc.) will block storage by default.
|
|
598
|
-
|
|
599
|
-
Example:
|
|
600
|
-
>>> memory.stash("analysis_v1", {"findings": [...]}, creds)
|
|
601
|
-
|
|
602
|
-
"""
|
|
603
|
-
# Pattern 1: String ID validation
|
|
604
|
-
if not key or not key.strip():
|
|
605
|
-
raise ValueError(f"key cannot be empty. Got: {key!r}")
|
|
606
|
-
|
|
607
|
-
if not credentials.can_stage():
|
|
608
|
-
raise PermissionError(
|
|
609
|
-
f"Agent {credentials.agent_id} (Tier {credentials.tier.name}) "
|
|
610
|
-
"cannot write to memory. Requires CONTRIBUTOR or higher.",
|
|
611
|
-
)
|
|
612
|
-
|
|
613
|
-
# Sanitize data (PII scrubbing + secrets detection)
|
|
614
|
-
if not skip_sanitization:
|
|
615
|
-
data, pii_count = self._sanitize_data(data)
|
|
616
|
-
if pii_count > 0:
|
|
617
|
-
logger.info(
|
|
618
|
-
"stash_pii_scrubbed",
|
|
619
|
-
key=key,
|
|
620
|
-
agent_id=credentials.agent_id,
|
|
621
|
-
pii_count=pii_count,
|
|
622
|
-
)
|
|
623
|
-
|
|
624
|
-
full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
|
|
625
|
-
payload = {
|
|
626
|
-
"data": data,
|
|
627
|
-
"agent_id": credentials.agent_id,
|
|
628
|
-
"stashed_at": datetime.now().isoformat(),
|
|
629
|
-
}
|
|
630
|
-
return self._set(full_key, json.dumps(payload), ttl.value)
|
|
631
|
-
|
|
632
|
-
def retrieve(
|
|
633
|
-
self,
|
|
634
|
-
key: str,
|
|
635
|
-
credentials: AgentCredentials,
|
|
636
|
-
agent_id: str | None = None,
|
|
637
|
-
) -> Any | None:
|
|
638
|
-
"""Retrieve data from short-term memory
|
|
639
|
-
|
|
640
|
-
Args:
|
|
641
|
-
key: Key to retrieve
|
|
642
|
-
credentials: Agent credentials
|
|
643
|
-
agent_id: Owner agent ID (defaults to credentials agent)
|
|
644
|
-
|
|
645
|
-
Returns:
|
|
646
|
-
Retrieved data or None if not found
|
|
647
|
-
|
|
648
|
-
Raises:
|
|
649
|
-
ValueError: If key is empty or invalid
|
|
650
|
-
|
|
651
|
-
Example:
|
|
652
|
-
>>> data = memory.retrieve("analysis_v1", creds)
|
|
653
|
-
|
|
654
|
-
"""
|
|
655
|
-
# Pattern 1: String ID validation
|
|
656
|
-
if not key or not key.strip():
|
|
657
|
-
raise ValueError(f"key cannot be empty. Got: {key!r}")
|
|
658
|
-
|
|
659
|
-
owner = agent_id or credentials.agent_id
|
|
660
|
-
full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
|
|
661
|
-
raw = self._get(full_key)
|
|
662
|
-
|
|
663
|
-
if raw is None:
|
|
664
|
-
return None
|
|
665
|
-
|
|
666
|
-
payload = json.loads(raw)
|
|
667
|
-
return payload.get("data")
|
|
668
|
-
|
|
669
|
-
def clear_working_memory(self, credentials: AgentCredentials) -> int:
|
|
670
|
-
"""Clear all working memory for an agent
|
|
671
|
-
|
|
672
|
-
Args:
|
|
673
|
-
credentials: Agent credentials (must own the memory or be Steward)
|
|
674
|
-
|
|
675
|
-
Returns:
|
|
676
|
-
Number of keys deleted
|
|
677
|
-
|
|
678
|
-
"""
|
|
679
|
-
pattern = f"{self.PREFIX_WORKING}{credentials.agent_id}:*"
|
|
680
|
-
keys = self._keys(pattern)
|
|
681
|
-
count = 0
|
|
682
|
-
for key in keys:
|
|
683
|
-
if self._delete(key):
|
|
684
|
-
count += 1
|
|
685
|
-
return count
|
|
686
|
-
|
|
687
|
-
# === Pattern Staging ===
|
|
688
|
-
|
|
689
|
-
def stage_pattern(
|
|
690
|
-
self,
|
|
691
|
-
pattern: StagedPattern,
|
|
692
|
-
credentials: AgentCredentials,
|
|
693
|
-
) -> bool:
|
|
694
|
-
"""Stage a pattern for validation
|
|
695
|
-
|
|
696
|
-
Per EMPATHY_PHILOSOPHY.md: Patterns must be staged before
|
|
697
|
-
being promoted to the active library.
|
|
698
|
-
|
|
699
|
-
Args:
|
|
700
|
-
pattern: Pattern to stage
|
|
701
|
-
credentials: Must be CONTRIBUTOR or higher
|
|
702
|
-
|
|
703
|
-
Returns:
|
|
704
|
-
True if staged successfully
|
|
705
|
-
|
|
706
|
-
Raises:
|
|
707
|
-
TypeError: If pattern is not StagedPattern
|
|
708
|
-
PermissionError: If credentials lack staging access
|
|
709
|
-
|
|
710
|
-
"""
|
|
711
|
-
# Pattern 5: Type validation
|
|
712
|
-
if not isinstance(pattern, StagedPattern):
|
|
713
|
-
raise TypeError(f"pattern must be StagedPattern, got {type(pattern).__name__}")
|
|
714
|
-
|
|
715
|
-
if not credentials.can_stage():
|
|
716
|
-
raise PermissionError(
|
|
717
|
-
f"Agent {credentials.agent_id} cannot stage patterns. "
|
|
718
|
-
"Requires CONTRIBUTOR tier or higher.",
|
|
719
|
-
)
|
|
720
|
-
|
|
721
|
-
key = f"{self.PREFIX_STAGED}{pattern.pattern_id}"
|
|
722
|
-
return self._set(
|
|
723
|
-
key,
|
|
724
|
-
json.dumps(pattern.to_dict()),
|
|
725
|
-
TTLStrategy.STAGED_PATTERNS.value,
|
|
726
|
-
)
|
|
727
|
-
|
|
728
|
-
def get_staged_pattern(
|
|
729
|
-
self,
|
|
730
|
-
pattern_id: str,
|
|
731
|
-
credentials: AgentCredentials,
|
|
732
|
-
) -> StagedPattern | None:
|
|
733
|
-
"""Retrieve a staged pattern
|
|
734
|
-
|
|
735
|
-
Args:
|
|
736
|
-
pattern_id: Pattern ID
|
|
737
|
-
credentials: Any tier can read
|
|
738
|
-
|
|
739
|
-
Returns:
|
|
740
|
-
StagedPattern or None
|
|
741
|
-
|
|
742
|
-
Raises:
|
|
743
|
-
ValueError: If pattern_id is empty
|
|
744
|
-
|
|
745
|
-
"""
|
|
746
|
-
# Pattern 1: String ID validation
|
|
747
|
-
if not pattern_id or not pattern_id.strip():
|
|
748
|
-
raise ValueError(f"pattern_id cannot be empty. Got: {pattern_id!r}")
|
|
749
|
-
|
|
750
|
-
key = f"{self.PREFIX_STAGED}{pattern_id}"
|
|
751
|
-
raw = self._get(key)
|
|
752
|
-
|
|
753
|
-
if raw is None:
|
|
754
|
-
return None
|
|
755
|
-
|
|
756
|
-
return StagedPattern.from_dict(json.loads(raw))
|
|
757
|
-
|
|
758
|
-
def list_staged_patterns(
|
|
759
|
-
self,
|
|
760
|
-
credentials: AgentCredentials,
|
|
761
|
-
) -> list[StagedPattern]:
|
|
762
|
-
"""List all staged patterns awaiting validation
|
|
763
|
-
|
|
764
|
-
Args:
|
|
765
|
-
credentials: Any tier can read
|
|
766
|
-
|
|
767
|
-
Returns:
|
|
768
|
-
List of staged patterns
|
|
769
|
-
|
|
770
|
-
"""
|
|
771
|
-
pattern = f"{self.PREFIX_STAGED}*"
|
|
772
|
-
keys = self._keys(pattern)
|
|
773
|
-
patterns = []
|
|
774
|
-
|
|
775
|
-
for key in keys:
|
|
776
|
-
raw = self._get(key)
|
|
777
|
-
if raw:
|
|
778
|
-
patterns.append(StagedPattern.from_dict(json.loads(raw)))
|
|
779
|
-
|
|
780
|
-
return patterns
|
|
781
|
-
|
|
782
|
-
def promote_pattern(
|
|
783
|
-
self,
|
|
784
|
-
pattern_id: str,
|
|
785
|
-
credentials: AgentCredentials,
|
|
786
|
-
) -> StagedPattern | None:
|
|
787
|
-
"""Promote staged pattern (remove from staging for library add)
|
|
788
|
-
|
|
789
|
-
Args:
|
|
790
|
-
pattern_id: Pattern to promote
|
|
791
|
-
credentials: Must be VALIDATOR or higher
|
|
792
|
-
|
|
793
|
-
Returns:
|
|
794
|
-
The promoted pattern (for adding to PatternLibrary)
|
|
795
|
-
|
|
796
|
-
"""
|
|
797
|
-
if not credentials.can_validate():
|
|
798
|
-
raise PermissionError(
|
|
799
|
-
f"Agent {credentials.agent_id} cannot promote patterns. "
|
|
800
|
-
"Requires VALIDATOR tier or higher.",
|
|
801
|
-
)
|
|
802
|
-
|
|
803
|
-
pattern = self.get_staged_pattern(pattern_id, credentials)
|
|
804
|
-
if pattern:
|
|
805
|
-
key = f"{self.PREFIX_STAGED}{pattern_id}"
|
|
806
|
-
self._delete(key)
|
|
807
|
-
return pattern
|
|
808
|
-
|
|
809
|
-
def reject_pattern(
|
|
810
|
-
self,
|
|
811
|
-
pattern_id: str,
|
|
812
|
-
credentials: AgentCredentials,
|
|
813
|
-
reason: str = "",
|
|
814
|
-
) -> bool:
|
|
815
|
-
"""Reject a staged pattern
|
|
816
|
-
|
|
817
|
-
Args:
|
|
818
|
-
pattern_id: Pattern to reject
|
|
819
|
-
credentials: Must be VALIDATOR or higher
|
|
820
|
-
reason: Rejection reason (for audit)
|
|
821
|
-
|
|
822
|
-
Returns:
|
|
823
|
-
True if rejected
|
|
824
|
-
|
|
825
|
-
"""
|
|
826
|
-
if not credentials.can_validate():
|
|
827
|
-
raise PermissionError(
|
|
828
|
-
f"Agent {credentials.agent_id} cannot reject patterns. "
|
|
829
|
-
"Requires VALIDATOR tier or higher.",
|
|
830
|
-
)
|
|
831
|
-
|
|
832
|
-
key = f"{self.PREFIX_STAGED}{pattern_id}"
|
|
833
|
-
return self._delete(key)
|
|
834
|
-
|
|
835
|
-
# === Conflict Negotiation ===
|
|
836
|
-
|
|
837
|
-
def create_conflict_context(
|
|
838
|
-
self,
|
|
839
|
-
conflict_id: str,
|
|
840
|
-
positions: dict[str, Any],
|
|
841
|
-
interests: dict[str, list[str]],
|
|
842
|
-
credentials: AgentCredentials,
|
|
843
|
-
batna: str | None = None,
|
|
844
|
-
) -> ConflictContext:
|
|
845
|
-
"""Create context for principled negotiation
|
|
846
|
-
|
|
847
|
-
Per Getting to Yes framework:
|
|
848
|
-
- Separate positions from interests
|
|
849
|
-
- Define BATNA before negotiating
|
|
850
|
-
|
|
851
|
-
Args:
|
|
852
|
-
conflict_id: Unique conflict identifier
|
|
853
|
-
positions: agent_id -> their stated position
|
|
854
|
-
interests: agent_id -> underlying interests
|
|
855
|
-
credentials: Must be CONTRIBUTOR or higher
|
|
856
|
-
batna: Best Alternative to Negotiated Agreement
|
|
857
|
-
|
|
858
|
-
Returns:
|
|
859
|
-
ConflictContext for resolution
|
|
860
|
-
|
|
861
|
-
Raises:
|
|
862
|
-
ValueError: If conflict_id is empty
|
|
863
|
-
TypeError: If positions or interests are not dicts
|
|
864
|
-
PermissionError: If credentials lack permission
|
|
865
|
-
|
|
866
|
-
"""
|
|
867
|
-
# Pattern 1: String ID validation
|
|
868
|
-
if not conflict_id or not conflict_id.strip():
|
|
869
|
-
raise ValueError(f"conflict_id cannot be empty. Got: {conflict_id!r}")
|
|
870
|
-
|
|
871
|
-
# Pattern 5: Type validation
|
|
872
|
-
if not isinstance(positions, dict):
|
|
873
|
-
raise TypeError(f"positions must be dict, got {type(positions).__name__}")
|
|
874
|
-
if not isinstance(interests, dict):
|
|
875
|
-
raise TypeError(f"interests must be dict, got {type(interests).__name__}")
|
|
876
|
-
|
|
877
|
-
if not credentials.can_stage():
|
|
878
|
-
raise PermissionError(
|
|
879
|
-
f"Agent {credentials.agent_id} cannot create conflict context. "
|
|
880
|
-
"Requires CONTRIBUTOR tier or higher.",
|
|
881
|
-
)
|
|
882
|
-
|
|
883
|
-
context = ConflictContext(
|
|
884
|
-
conflict_id=conflict_id,
|
|
885
|
-
positions=positions,
|
|
886
|
-
interests=interests,
|
|
887
|
-
batna=batna,
|
|
888
|
-
)
|
|
889
|
-
|
|
890
|
-
key = f"{self.PREFIX_CONFLICT}{conflict_id}"
|
|
891
|
-
self._set(
|
|
892
|
-
key,
|
|
893
|
-
json.dumps(context.to_dict()),
|
|
894
|
-
TTLStrategy.CONFLICT_CONTEXT.value,
|
|
895
|
-
)
|
|
896
|
-
|
|
897
|
-
return context
|
|
898
|
-
|
|
899
|
-
def get_conflict_context(
|
|
900
|
-
self,
|
|
901
|
-
conflict_id: str,
|
|
902
|
-
credentials: AgentCredentials,
|
|
903
|
-
) -> ConflictContext | None:
|
|
904
|
-
"""Retrieve conflict context
|
|
905
|
-
|
|
906
|
-
Args:
|
|
907
|
-
conflict_id: Conflict identifier
|
|
908
|
-
credentials: Any tier can read
|
|
909
|
-
|
|
910
|
-
Returns:
|
|
911
|
-
ConflictContext or None
|
|
912
|
-
|
|
913
|
-
Raises:
|
|
914
|
-
ValueError: If conflict_id is empty
|
|
915
|
-
|
|
916
|
-
"""
|
|
917
|
-
# Pattern 1: String ID validation
|
|
918
|
-
if not conflict_id or not conflict_id.strip():
|
|
919
|
-
raise ValueError(f"conflict_id cannot be empty. Got: {conflict_id!r}")
|
|
920
|
-
|
|
921
|
-
key = f"{self.PREFIX_CONFLICT}{conflict_id}"
|
|
922
|
-
raw = self._get(key)
|
|
923
|
-
|
|
924
|
-
if raw is None:
|
|
925
|
-
return None
|
|
926
|
-
|
|
927
|
-
return ConflictContext.from_dict(json.loads(raw))
|
|
928
|
-
|
|
929
|
-
def resolve_conflict(
|
|
930
|
-
self,
|
|
931
|
-
conflict_id: str,
|
|
932
|
-
resolution: str,
|
|
933
|
-
credentials: AgentCredentials,
|
|
934
|
-
) -> bool:
|
|
935
|
-
"""Mark conflict as resolved
|
|
936
|
-
|
|
937
|
-
Args:
|
|
938
|
-
conflict_id: Conflict to resolve
|
|
939
|
-
resolution: How it was resolved
|
|
940
|
-
credentials: Must be VALIDATOR or higher
|
|
941
|
-
|
|
942
|
-
Returns:
|
|
943
|
-
True if resolved
|
|
944
|
-
|
|
945
|
-
"""
|
|
946
|
-
if not credentials.can_validate():
|
|
947
|
-
raise PermissionError(
|
|
948
|
-
f"Agent {credentials.agent_id} cannot resolve conflicts. "
|
|
949
|
-
"Requires VALIDATOR tier or higher.",
|
|
950
|
-
)
|
|
951
|
-
|
|
952
|
-
context = self.get_conflict_context(conflict_id, credentials)
|
|
953
|
-
if context is None:
|
|
954
|
-
return False
|
|
955
|
-
|
|
956
|
-
context.resolved = True
|
|
957
|
-
context.resolution = resolution
|
|
958
|
-
|
|
959
|
-
key = f"{self.PREFIX_CONFLICT}{conflict_id}"
|
|
960
|
-
# Keep resolved conflicts longer for audit
|
|
961
|
-
self._set(key, json.dumps(context.to_dict()), TTLStrategy.CONFLICT_CONTEXT.value)
|
|
962
|
-
return True
|
|
963
|
-
|
|
964
|
-
# === Coordination Signals ===
|
|
965
|
-
# REMOVED in v5.0 - Use attune.telemetry.CoordinationSignals instead
|
|
966
|
-
# - send_signal() → CoordinationSignals.signal()
|
|
967
|
-
# - receive_signals() → CoordinationSignals.get_pending_signals()
|
|
968
|
-
|
|
969
|
-
# === Session Management ===
|
|
970
|
-
|
|
971
|
-
def create_session(
|
|
972
|
-
self,
|
|
973
|
-
session_id: str,
|
|
974
|
-
credentials: AgentCredentials,
|
|
975
|
-
metadata: dict | None = None,
|
|
976
|
-
) -> bool:
|
|
977
|
-
"""Create a collaboration session
|
|
978
|
-
|
|
979
|
-
Args:
|
|
980
|
-
session_id: Unique session identifier
|
|
981
|
-
credentials: Session creator
|
|
982
|
-
metadata: Optional session metadata
|
|
983
|
-
|
|
984
|
-
Returns:
|
|
985
|
-
True if created
|
|
986
|
-
|
|
987
|
-
Raises:
|
|
988
|
-
ValueError: If session_id is empty
|
|
989
|
-
TypeError: If metadata is not dict
|
|
990
|
-
|
|
991
|
-
"""
|
|
992
|
-
# Pattern 1: String ID validation
|
|
993
|
-
if not session_id or not session_id.strip():
|
|
994
|
-
raise ValueError(f"session_id cannot be empty. Got: {session_id!r}")
|
|
995
|
-
|
|
996
|
-
# Pattern 5: Type validation
|
|
997
|
-
if metadata is not None and not isinstance(metadata, dict):
|
|
998
|
-
raise TypeError(f"metadata must be dict, got {type(metadata).__name__}")
|
|
999
|
-
|
|
1000
|
-
key = f"{self.PREFIX_SESSION}{session_id}"
|
|
1001
|
-
payload = {
|
|
1002
|
-
"session_id": session_id,
|
|
1003
|
-
"created_by": credentials.agent_id,
|
|
1004
|
-
"created_at": datetime.now().isoformat(),
|
|
1005
|
-
"participants": [credentials.agent_id],
|
|
1006
|
-
"metadata": metadata or {},
|
|
1007
|
-
}
|
|
1008
|
-
return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
|
|
1009
|
-
|
|
1010
|
-
def join_session(
|
|
1011
|
-
self,
|
|
1012
|
-
session_id: str,
|
|
1013
|
-
credentials: AgentCredentials,
|
|
1014
|
-
) -> bool:
|
|
1015
|
-
"""Join an existing session
|
|
1016
|
-
|
|
1017
|
-
Args:
|
|
1018
|
-
session_id: Session to join
|
|
1019
|
-
credentials: Joining agent
|
|
1020
|
-
|
|
1021
|
-
Returns:
|
|
1022
|
-
True if joined
|
|
1023
|
-
|
|
1024
|
-
Raises:
|
|
1025
|
-
ValueError: If session_id is empty
|
|
1026
|
-
|
|
1027
|
-
"""
|
|
1028
|
-
# Pattern 1: String ID validation
|
|
1029
|
-
if not session_id or not session_id.strip():
|
|
1030
|
-
raise ValueError(f"session_id cannot be empty. Got: {session_id!r}")
|
|
1031
|
-
|
|
1032
|
-
key = f"{self.PREFIX_SESSION}{session_id}"
|
|
1033
|
-
raw = self._get(key)
|
|
1034
|
-
|
|
1035
|
-
if raw is None:
|
|
1036
|
-
return False
|
|
1037
|
-
|
|
1038
|
-
payload = json.loads(raw)
|
|
1039
|
-
if credentials.agent_id not in payload["participants"]:
|
|
1040
|
-
payload["participants"].append(credentials.agent_id)
|
|
1041
|
-
|
|
1042
|
-
return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
|
|
1043
|
-
|
|
1044
|
-
def get_session(
|
|
1045
|
-
self,
|
|
1046
|
-
session_id: str,
|
|
1047
|
-
credentials: AgentCredentials,
|
|
1048
|
-
) -> dict | None:
|
|
1049
|
-
"""Get session information
|
|
1050
|
-
|
|
1051
|
-
Args:
|
|
1052
|
-
session_id: Session identifier
|
|
1053
|
-
credentials: Any participant can read
|
|
1054
|
-
|
|
1055
|
-
Returns:
|
|
1056
|
-
Session data or None
|
|
1057
|
-
|
|
1058
|
-
"""
|
|
1059
|
-
key = f"{self.PREFIX_SESSION}{session_id}"
|
|
1060
|
-
raw = self._get(key)
|
|
1061
|
-
|
|
1062
|
-
if raw is None:
|
|
1063
|
-
return None
|
|
1064
|
-
|
|
1065
|
-
result: dict = json.loads(raw)
|
|
1066
|
-
return result
|
|
1067
|
-
|
|
1068
|
-
# === Health Check ===
|
|
1069
|
-
|
|
1070
|
-
def ping(self) -> bool:
|
|
1071
|
-
"""Check Redis connection health
|
|
1072
|
-
|
|
1073
|
-
Returns:
|
|
1074
|
-
True if connected and responsive
|
|
1075
|
-
|
|
1076
|
-
"""
|
|
1077
|
-
if self.use_mock:
|
|
1078
|
-
return True
|
|
1079
|
-
if self._client is None:
|
|
1080
|
-
return False
|
|
1081
|
-
try:
|
|
1082
|
-
return bool(self._client.ping())
|
|
1083
|
-
except Exception:
|
|
1084
|
-
return False
|
|
1085
|
-
|
|
1086
|
-
def get_stats(self) -> dict:
|
|
1087
|
-
"""Get memory statistics
|
|
1088
|
-
|
|
1089
|
-
Returns:
|
|
1090
|
-
Dict with memory stats
|
|
1091
|
-
|
|
1092
|
-
"""
|
|
1093
|
-
if self.use_mock:
|
|
1094
|
-
# Use generator expressions for memory-efficient counting
|
|
1095
|
-
return {
|
|
1096
|
-
"mode": "mock",
|
|
1097
|
-
"total_keys": len(self._mock_storage),
|
|
1098
|
-
"working_keys": sum(
|
|
1099
|
-
1 for k in self._mock_storage if k.startswith(self.PREFIX_WORKING)
|
|
1100
|
-
),
|
|
1101
|
-
"staged_keys": sum(
|
|
1102
|
-
1 for k in self._mock_storage if k.startswith(self.PREFIX_STAGED)
|
|
1103
|
-
),
|
|
1104
|
-
"conflict_keys": sum(
|
|
1105
|
-
1 for k in self._mock_storage if k.startswith(self.PREFIX_CONFLICT)
|
|
1106
|
-
),
|
|
1107
|
-
}
|
|
1108
|
-
|
|
1109
|
-
if self._client is None:
|
|
1110
|
-
return {"mode": "disconnected", "error": "No Redis client"}
|
|
1111
|
-
info = self._client.info("memory")
|
|
1112
|
-
return {
|
|
1113
|
-
"mode": "redis",
|
|
1114
|
-
"used_memory": info.get("used_memory_human"),
|
|
1115
|
-
"peak_memory": info.get("used_memory_peak_human"),
|
|
1116
|
-
"total_keys": self._client.dbsize(),
|
|
1117
|
-
"working_keys": len(self._keys(f"{self.PREFIX_WORKING}*")),
|
|
1118
|
-
"staged_keys": len(self._keys(f"{self.PREFIX_STAGED}*")),
|
|
1119
|
-
"conflict_keys": len(self._keys(f"{self.PREFIX_CONFLICT}*")),
|
|
1120
|
-
}
|
|
1121
|
-
|
|
1122
|
-
def get_metrics(self) -> dict:
|
|
1123
|
-
"""Get operation metrics for observability.
|
|
1124
|
-
|
|
1125
|
-
Returns:
|
|
1126
|
-
Dict with operation counts, latencies, and success rates
|
|
1127
|
-
|
|
1128
|
-
"""
|
|
1129
|
-
return self._metrics.to_dict()
|
|
1130
|
-
|
|
1131
|
-
def reset_metrics(self) -> None:
|
|
1132
|
-
"""Reset all metrics to zero."""
|
|
1133
|
-
self._metrics = RedisMetrics()
|
|
1134
|
-
|
|
1135
|
-
# =========================================================================
|
|
1136
|
-
# BATCH OPERATIONS
|
|
1137
|
-
# =========================================================================
|
|
1138
|
-
|
|
1139
|
-
def stash_batch(
|
|
1140
|
-
self,
|
|
1141
|
-
items: list[tuple[str, Any]],
|
|
1142
|
-
credentials: AgentCredentials,
|
|
1143
|
-
ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
|
|
1144
|
-
) -> int:
|
|
1145
|
-
"""Stash multiple items in a single operation.
|
|
1146
|
-
|
|
1147
|
-
Uses Redis pipeline for efficiency (reduces network round-trips).
|
|
1148
|
-
|
|
1149
|
-
Args:
|
|
1150
|
-
items: List of (key, data) tuples
|
|
1151
|
-
credentials: Agent credentials
|
|
1152
|
-
ttl: Time-to-live strategy (applied to all items)
|
|
1153
|
-
|
|
1154
|
-
Returns:
|
|
1155
|
-
Number of items successfully stashed
|
|
1156
|
-
|
|
1157
|
-
Raises:
|
|
1158
|
-
TypeError: If items is not a list
|
|
1159
|
-
PermissionError: If credentials lack write access
|
|
1160
|
-
|
|
1161
|
-
Example:
|
|
1162
|
-
>>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
|
|
1163
|
-
>>> count = memory.stash_batch(items, creds)
|
|
1164
|
-
|
|
1165
|
-
"""
|
|
1166
|
-
# Pattern 5: Type validation
|
|
1167
|
-
if not isinstance(items, list):
|
|
1168
|
-
raise TypeError(f"items must be list, got {type(items).__name__}")
|
|
1169
|
-
|
|
1170
|
-
if not credentials.can_stage():
|
|
1171
|
-
raise PermissionError(
|
|
1172
|
-
f"Agent {credentials.agent_id} cannot write to memory. "
|
|
1173
|
-
"Requires CONTRIBUTOR tier or higher.",
|
|
1174
|
-
)
|
|
1175
|
-
|
|
1176
|
-
if not items:
|
|
1177
|
-
return 0
|
|
1178
|
-
|
|
1179
|
-
start_time = time.perf_counter()
|
|
1180
|
-
|
|
1181
|
-
if self.use_mock:
|
|
1182
|
-
count = 0
|
|
1183
|
-
for key, data in items:
|
|
1184
|
-
full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
|
|
1185
|
-
payload = {
|
|
1186
|
-
"data": data,
|
|
1187
|
-
"agent_id": credentials.agent_id,
|
|
1188
|
-
"stashed_at": datetime.now().isoformat(),
|
|
1189
|
-
}
|
|
1190
|
-
expires = datetime.now().timestamp() + ttl.value
|
|
1191
|
-
self._mock_storage[full_key] = (json.dumps(payload), expires)
|
|
1192
|
-
count += 1
|
|
1193
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1194
|
-
self._metrics.record_operation("stash_batch", latency_ms)
|
|
1195
|
-
return count
|
|
1196
|
-
|
|
1197
|
-
if self._client is None:
|
|
1198
|
-
return 0
|
|
1199
|
-
|
|
1200
|
-
pipe = self._client.pipeline()
|
|
1201
|
-
for key, data in items:
|
|
1202
|
-
full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
|
|
1203
|
-
payload = {
|
|
1204
|
-
"data": data,
|
|
1205
|
-
"agent_id": credentials.agent_id,
|
|
1206
|
-
"stashed_at": datetime.now().isoformat(),
|
|
1207
|
-
}
|
|
1208
|
-
pipe.setex(full_key, ttl.value, json.dumps(payload))
|
|
1209
|
-
|
|
1210
|
-
results = pipe.execute()
|
|
1211
|
-
count = sum(1 for r in results if r)
|
|
1212
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1213
|
-
self._metrics.record_operation("stash_batch", latency_ms)
|
|
1214
|
-
|
|
1215
|
-
logger.info("batch_stash_complete", count=count, total=len(items))
|
|
1216
|
-
return count
|
|
1217
|
-
|
|
1218
|
-
def retrieve_batch(
|
|
1219
|
-
self,
|
|
1220
|
-
keys: list[str],
|
|
1221
|
-
credentials: AgentCredentials,
|
|
1222
|
-
agent_id: str | None = None,
|
|
1223
|
-
) -> dict[str, Any]:
|
|
1224
|
-
"""Retrieve multiple items in a single operation.
|
|
1225
|
-
|
|
1226
|
-
Args:
|
|
1227
|
-
keys: List of keys to retrieve
|
|
1228
|
-
credentials: Agent credentials
|
|
1229
|
-
agent_id: Owner agent ID (defaults to credentials agent)
|
|
1230
|
-
|
|
1231
|
-
Returns:
|
|
1232
|
-
Dict mapping key to data (missing keys omitted)
|
|
1233
|
-
|
|
1234
|
-
Example:
|
|
1235
|
-
>>> data = memory.retrieve_batch(["key1", "key2"], creds)
|
|
1236
|
-
>>> print(data["key1"])
|
|
1237
|
-
|
|
1238
|
-
"""
|
|
1239
|
-
if not keys:
|
|
1240
|
-
return {}
|
|
1241
|
-
|
|
1242
|
-
start_time = time.perf_counter()
|
|
1243
|
-
owner = agent_id or credentials.agent_id
|
|
1244
|
-
results: dict[str, Any] = {}
|
|
1245
|
-
|
|
1246
|
-
if self.use_mock:
|
|
1247
|
-
for key in keys:
|
|
1248
|
-
full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
|
|
1249
|
-
if full_key in self._mock_storage:
|
|
1250
|
-
value, expires = self._mock_storage[full_key]
|
|
1251
|
-
if expires is None or datetime.now().timestamp() < expires:
|
|
1252
|
-
payload = json.loads(str(value))
|
|
1253
|
-
results[key] = payload.get("data")
|
|
1254
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1255
|
-
self._metrics.record_operation("retrieve_batch", latency_ms)
|
|
1256
|
-
return results
|
|
1257
|
-
|
|
1258
|
-
if self._client is None:
|
|
1259
|
-
return {}
|
|
1260
|
-
|
|
1261
|
-
full_keys = [f"{self.PREFIX_WORKING}{owner}:{key}" for key in keys]
|
|
1262
|
-
values = self._client.mget(full_keys)
|
|
1263
|
-
|
|
1264
|
-
for key, value in zip(keys, values, strict=False):
|
|
1265
|
-
if value:
|
|
1266
|
-
payload = json.loads(str(value))
|
|
1267
|
-
results[key] = payload.get("data")
|
|
1268
|
-
|
|
1269
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1270
|
-
self._metrics.record_operation("retrieve_batch", latency_ms)
|
|
1271
|
-
return results
|
|
1272
|
-
|
|
1273
|
-
# =========================================================================
|
|
1274
|
-
# SCAN-BASED PAGINATION
|
|
1275
|
-
# =========================================================================
|
|
1276
|
-
|
|
1277
|
-
def list_staged_patterns_paginated(
|
|
1278
|
-
self,
|
|
1279
|
-
credentials: AgentCredentials,
|
|
1280
|
-
cursor: str = "0",
|
|
1281
|
-
count: int = 100,
|
|
1282
|
-
) -> PaginatedResult:
|
|
1283
|
-
"""List staged patterns with pagination using SCAN.
|
|
1284
|
-
|
|
1285
|
-
More efficient than list_staged_patterns() for large datasets.
|
|
1286
|
-
|
|
1287
|
-
Args:
|
|
1288
|
-
credentials: Agent credentials
|
|
1289
|
-
cursor: Pagination cursor (start with "0")
|
|
1290
|
-
count: Maximum items per page
|
|
1291
|
-
|
|
1292
|
-
Returns:
|
|
1293
|
-
PaginatedResult with items, cursor, and has_more flag
|
|
1294
|
-
|
|
1295
|
-
Example:
|
|
1296
|
-
>>> result = memory.list_staged_patterns_paginated(creds, "0", 10)
|
|
1297
|
-
>>> for pattern in result.items:
|
|
1298
|
-
... print(pattern.name)
|
|
1299
|
-
>>> if result.has_more:
|
|
1300
|
-
... next_result = memory.list_staged_patterns_paginated(creds, result.cursor, 10)
|
|
1301
|
-
|
|
1302
|
-
"""
|
|
1303
|
-
start_time = time.perf_counter()
|
|
1304
|
-
pattern = f"{self.PREFIX_STAGED}*"
|
|
1305
|
-
|
|
1306
|
-
if self.use_mock:
|
|
1307
|
-
import fnmatch
|
|
1308
|
-
|
|
1309
|
-
all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
|
|
1310
|
-
start_idx = int(cursor)
|
|
1311
|
-
end_idx = start_idx + count
|
|
1312
|
-
page_keys = all_keys[start_idx:end_idx]
|
|
1313
|
-
|
|
1314
|
-
patterns = []
|
|
1315
|
-
for key in page_keys:
|
|
1316
|
-
raw_value, expires = self._mock_storage[key]
|
|
1317
|
-
if expires is None or datetime.now().timestamp() < expires:
|
|
1318
|
-
patterns.append(StagedPattern.from_dict(json.loads(str(raw_value))))
|
|
1319
|
-
|
|
1320
|
-
new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
|
|
1321
|
-
has_more = end_idx < len(all_keys)
|
|
1322
|
-
|
|
1323
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1324
|
-
self._metrics.record_operation("list_paginated", latency_ms)
|
|
1325
|
-
|
|
1326
|
-
return PaginatedResult(
|
|
1327
|
-
items=patterns,
|
|
1328
|
-
cursor=new_cursor,
|
|
1329
|
-
has_more=has_more,
|
|
1330
|
-
total_scanned=len(page_keys),
|
|
1331
|
-
)
|
|
1332
|
-
|
|
1333
|
-
if self._client is None:
|
|
1334
|
-
return PaginatedResult(items=[], cursor="0", has_more=False)
|
|
1335
|
-
|
|
1336
|
-
# Use SCAN for efficient iteration
|
|
1337
|
-
new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
|
|
1338
|
-
|
|
1339
|
-
patterns = []
|
|
1340
|
-
for key in keys:
|
|
1341
|
-
raw = self._client.get(key)
|
|
1342
|
-
if raw:
|
|
1343
|
-
patterns.append(StagedPattern.from_dict(json.loads(raw)))
|
|
1344
|
-
|
|
1345
|
-
has_more = new_cursor != 0
|
|
1346
|
-
|
|
1347
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1348
|
-
self._metrics.record_operation("list_paginated", latency_ms)
|
|
1349
|
-
|
|
1350
|
-
return PaginatedResult(
|
|
1351
|
-
items=patterns,
|
|
1352
|
-
cursor=str(new_cursor),
|
|
1353
|
-
has_more=has_more,
|
|
1354
|
-
total_scanned=len(keys),
|
|
1355
|
-
)
|
|
1356
|
-
|
|
1357
|
-
def scan_keys(
|
|
1358
|
-
self,
|
|
1359
|
-
pattern: str,
|
|
1360
|
-
cursor: str = "0",
|
|
1361
|
-
count: int = 100,
|
|
1362
|
-
) -> PaginatedResult:
|
|
1363
|
-
"""Scan keys matching a pattern with pagination.
|
|
1364
|
-
|
|
1365
|
-
Args:
|
|
1366
|
-
pattern: Key pattern (e.g., "empathy:working:*")
|
|
1367
|
-
cursor: Pagination cursor
|
|
1368
|
-
count: Items per page
|
|
1369
|
-
|
|
1370
|
-
Returns:
|
|
1371
|
-
PaginatedResult with key strings
|
|
1372
|
-
|
|
1373
|
-
"""
|
|
1374
|
-
if self.use_mock:
|
|
1375
|
-
import fnmatch
|
|
1376
|
-
|
|
1377
|
-
all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
|
|
1378
|
-
start_idx = int(cursor)
|
|
1379
|
-
end_idx = start_idx + count
|
|
1380
|
-
page_keys = all_keys[start_idx:end_idx]
|
|
1381
|
-
new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
|
|
1382
|
-
has_more = end_idx < len(all_keys)
|
|
1383
|
-
return PaginatedResult(items=page_keys, cursor=new_cursor, has_more=has_more)
|
|
1384
|
-
|
|
1385
|
-
if self._client is None:
|
|
1386
|
-
return PaginatedResult(items=[], cursor="0", has_more=False)
|
|
1387
|
-
|
|
1388
|
-
new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
|
|
1389
|
-
return PaginatedResult(
|
|
1390
|
-
items=[str(k) for k in keys],
|
|
1391
|
-
cursor=str(new_cursor),
|
|
1392
|
-
has_more=new_cursor != 0,
|
|
1393
|
-
)
|
|
1394
|
-
|
|
1395
|
-
# =========================================================================
|
|
1396
|
-
# PUB/SUB FOR REAL-TIME NOTIFICATIONS
|
|
1397
|
-
# =========================================================================
|
|
1398
|
-
|
|
1399
|
-
def publish(
|
|
1400
|
-
self,
|
|
1401
|
-
channel: str,
|
|
1402
|
-
message: dict,
|
|
1403
|
-
credentials: AgentCredentials,
|
|
1404
|
-
) -> int:
|
|
1405
|
-
"""Publish a message to a channel for real-time notifications.
|
|
1406
|
-
|
|
1407
|
-
Args:
|
|
1408
|
-
channel: Channel name (will be prefixed)
|
|
1409
|
-
message: Message payload (dict)
|
|
1410
|
-
credentials: Agent credentials (must be CONTRIBUTOR+)
|
|
1411
|
-
|
|
1412
|
-
Returns:
|
|
1413
|
-
Number of subscribers that received the message
|
|
1414
|
-
|
|
1415
|
-
Example:
|
|
1416
|
-
>>> memory.publish("agent_signals", {"event": "task_complete", "task_id": "123"}, creds)
|
|
1417
|
-
|
|
1418
|
-
"""
|
|
1419
|
-
if not credentials.can_stage():
|
|
1420
|
-
raise PermissionError(
|
|
1421
|
-
f"Agent {credentials.agent_id} cannot publish. Requires CONTRIBUTOR tier or higher.",
|
|
1422
|
-
)
|
|
1423
|
-
|
|
1424
|
-
start_time = time.perf_counter()
|
|
1425
|
-
full_channel = f"{self.PREFIX_PUBSUB}{channel}"
|
|
1426
|
-
|
|
1427
|
-
payload = {
|
|
1428
|
-
"channel": channel,
|
|
1429
|
-
"from_agent": credentials.agent_id,
|
|
1430
|
-
"timestamp": datetime.now().isoformat(),
|
|
1431
|
-
"data": message,
|
|
1432
|
-
}
|
|
1433
|
-
|
|
1434
|
-
if self.use_mock:
|
|
1435
|
-
handlers = self._mock_pubsub_handlers.get(full_channel, [])
|
|
1436
|
-
for handler in handlers:
|
|
1437
|
-
try:
|
|
1438
|
-
handler(payload)
|
|
1439
|
-
except Exception as e:
|
|
1440
|
-
logger.warning("pubsub_handler_error", channel=channel, error=str(e))
|
|
1441
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1442
|
-
self._metrics.record_operation("publish", latency_ms)
|
|
1443
|
-
return len(handlers)
|
|
1444
|
-
|
|
1445
|
-
if self._client is None:
|
|
1446
|
-
return 0
|
|
1447
|
-
|
|
1448
|
-
count = self._client.publish(full_channel, json.dumps(payload))
|
|
1449
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1450
|
-
self._metrics.record_operation("publish", latency_ms)
|
|
1451
|
-
|
|
1452
|
-
logger.debug("pubsub_published", channel=channel, subscribers=count)
|
|
1453
|
-
return int(count)
|
|
1454
|
-
|
|
1455
|
-
def subscribe(
|
|
1456
|
-
self,
|
|
1457
|
-
channel: str,
|
|
1458
|
-
handler: Callable[[dict], None],
|
|
1459
|
-
credentials: AgentCredentials | None = None,
|
|
1460
|
-
) -> bool:
|
|
1461
|
-
"""Subscribe to a channel for real-time notifications.
|
|
1462
|
-
|
|
1463
|
-
Args:
|
|
1464
|
-
channel: Channel name to subscribe to
|
|
1465
|
-
handler: Callback function receiving message dict
|
|
1466
|
-
credentials: Optional credentials (any tier can subscribe)
|
|
1467
|
-
|
|
1468
|
-
Returns:
|
|
1469
|
-
True if subscribed successfully
|
|
1470
|
-
|
|
1471
|
-
Example:
|
|
1472
|
-
>>> def on_message(msg):
|
|
1473
|
-
... print(f"Received: {msg['data']}")
|
|
1474
|
-
>>> memory.subscribe("agent_signals", on_message)
|
|
1475
|
-
|
|
1476
|
-
"""
|
|
1477
|
-
full_channel = f"{self.PREFIX_PUBSUB}{channel}"
|
|
1478
|
-
|
|
1479
|
-
if self.use_mock:
|
|
1480
|
-
if full_channel not in self._mock_pubsub_handlers:
|
|
1481
|
-
self._mock_pubsub_handlers[full_channel] = []
|
|
1482
|
-
self._mock_pubsub_handlers[full_channel].append(handler)
|
|
1483
|
-
logger.info("pubsub_subscribed_mock", channel=channel)
|
|
1484
|
-
return True
|
|
1485
|
-
|
|
1486
|
-
if self._client is None:
|
|
1487
|
-
return False
|
|
1488
|
-
|
|
1489
|
-
# Store handler
|
|
1490
|
-
if full_channel not in self._subscriptions:
|
|
1491
|
-
self._subscriptions[full_channel] = []
|
|
1492
|
-
self._subscriptions[full_channel].append(handler)
|
|
1493
|
-
|
|
1494
|
-
# Create pubsub if needed
|
|
1495
|
-
if self._pubsub is None:
|
|
1496
|
-
self._pubsub = self._client.pubsub()
|
|
1497
|
-
|
|
1498
|
-
# Subscribe
|
|
1499
|
-
self._pubsub.subscribe(**{full_channel: self._pubsub_message_handler})
|
|
1500
|
-
|
|
1501
|
-
# Start listener thread if not running
|
|
1502
|
-
if not self._pubsub_running:
|
|
1503
|
-
self._pubsub_running = True
|
|
1504
|
-
self._pubsub_thread = threading.Thread(
|
|
1505
|
-
target=self._pubsub_listener,
|
|
1506
|
-
daemon=True,
|
|
1507
|
-
name="redis-pubsub-listener",
|
|
1508
|
-
)
|
|
1509
|
-
self._pubsub_thread.start()
|
|
1510
|
-
|
|
1511
|
-
logger.info("pubsub_subscribed", channel=channel)
|
|
1512
|
-
return True
|
|
1513
|
-
|
|
1514
|
-
def _pubsub_message_handler(self, message: dict) -> None:
|
|
1515
|
-
"""Internal handler for pubsub messages."""
|
|
1516
|
-
if message["type"] != "message":
|
|
1517
|
-
return
|
|
1518
|
-
|
|
1519
|
-
channel = message["channel"]
|
|
1520
|
-
if isinstance(channel, bytes):
|
|
1521
|
-
channel = channel.decode()
|
|
1522
|
-
|
|
1523
|
-
try:
|
|
1524
|
-
payload = json.loads(message["data"])
|
|
1525
|
-
except json.JSONDecodeError:
|
|
1526
|
-
payload = {"raw": message["data"]}
|
|
1527
|
-
|
|
1528
|
-
handlers = self._subscriptions.get(channel, [])
|
|
1529
|
-
for handler in handlers:
|
|
1530
|
-
try:
|
|
1531
|
-
handler(payload)
|
|
1532
|
-
except Exception as e:
|
|
1533
|
-
logger.warning("pubsub_handler_error", channel=channel, error=str(e))
|
|
1534
|
-
|
|
1535
|
-
def _pubsub_listener(self) -> None:
|
|
1536
|
-
"""Background thread for listening to pubsub messages."""
|
|
1537
|
-
while self._pubsub_running and self._pubsub:
|
|
1538
|
-
try:
|
|
1539
|
-
self._pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0)
|
|
1540
|
-
except Exception as e:
|
|
1541
|
-
logger.warning("pubsub_listener_error", error=str(e))
|
|
1542
|
-
time.sleep(1)
|
|
1543
|
-
|
|
1544
|
-
def unsubscribe(self, channel: str) -> bool:
|
|
1545
|
-
"""Unsubscribe from a channel.
|
|
1546
|
-
|
|
1547
|
-
Args:
|
|
1548
|
-
channel: Channel name to unsubscribe from
|
|
1549
|
-
|
|
1550
|
-
Returns:
|
|
1551
|
-
True if unsubscribed successfully
|
|
1552
|
-
|
|
1553
|
-
"""
|
|
1554
|
-
full_channel = f"{self.PREFIX_PUBSUB}{channel}"
|
|
1555
|
-
|
|
1556
|
-
if self.use_mock:
|
|
1557
|
-
self._mock_pubsub_handlers.pop(full_channel, None)
|
|
1558
|
-
return True
|
|
1559
|
-
|
|
1560
|
-
if self._pubsub is None:
|
|
1561
|
-
return False
|
|
1562
|
-
|
|
1563
|
-
self._pubsub.unsubscribe(full_channel)
|
|
1564
|
-
self._subscriptions.pop(full_channel, None)
|
|
1565
|
-
return True
|
|
1566
|
-
|
|
1567
|
-
def close_pubsub(self) -> None:
|
|
1568
|
-
"""Close pubsub connection and stop listener thread."""
|
|
1569
|
-
self._pubsub_running = False
|
|
1570
|
-
if self._pubsub:
|
|
1571
|
-
self._pubsub.close()
|
|
1572
|
-
self._pubsub = None
|
|
1573
|
-
self._subscriptions.clear()
|
|
1574
|
-
|
|
1575
|
-
# =========================================================================
|
|
1576
|
-
# REDIS STREAMS FOR AUDIT TRAILS
|
|
1577
|
-
# =========================================================================
|
|
1578
|
-
|
|
1579
|
-
def stream_append(
|
|
1580
|
-
self,
|
|
1581
|
-
stream_name: str,
|
|
1582
|
-
data: dict,
|
|
1583
|
-
credentials: AgentCredentials,
|
|
1584
|
-
max_len: int = 10000,
|
|
1585
|
-
) -> str | None:
|
|
1586
|
-
"""Append an entry to a Redis Stream for audit trails.
|
|
1587
|
-
|
|
1588
|
-
Streams provide:
|
|
1589
|
-
- Ordered, persistent event log
|
|
1590
|
-
- Consumer groups for distributed processing
|
|
1591
|
-
- Time-based retention
|
|
1592
|
-
|
|
1593
|
-
Args:
|
|
1594
|
-
stream_name: Name of the stream
|
|
1595
|
-
data: Event data to append
|
|
1596
|
-
credentials: Agent credentials (must be CONTRIBUTOR+)
|
|
1597
|
-
max_len: Maximum stream length (older entries trimmed)
|
|
1598
|
-
|
|
1599
|
-
Returns:
|
|
1600
|
-
Entry ID if successful, None otherwise
|
|
1601
|
-
|
|
1602
|
-
Example:
|
|
1603
|
-
>>> entry_id = memory.stream_append("audit", {"action": "pattern_promoted", "pattern_id": "xyz"}, creds)
|
|
1604
|
-
|
|
1605
|
-
"""
|
|
1606
|
-
if not credentials.can_stage():
|
|
1607
|
-
raise PermissionError(
|
|
1608
|
-
f"Agent {credentials.agent_id} cannot write to stream. "
|
|
1609
|
-
"Requires CONTRIBUTOR tier or higher.",
|
|
1610
|
-
)
|
|
1611
|
-
|
|
1612
|
-
start_time = time.perf_counter()
|
|
1613
|
-
full_stream = f"{self.PREFIX_STREAM}{stream_name}"
|
|
1614
|
-
|
|
1615
|
-
entry = {
|
|
1616
|
-
"agent_id": credentials.agent_id,
|
|
1617
|
-
"timestamp": datetime.now().isoformat(),
|
|
1618
|
-
**{
|
|
1619
|
-
str(k): json.dumps(v) if isinstance(v, dict | list) else str(v)
|
|
1620
|
-
for k, v in data.items()
|
|
1621
|
-
},
|
|
1622
|
-
}
|
|
1623
|
-
|
|
1624
|
-
if self.use_mock:
|
|
1625
|
-
if full_stream not in self._mock_streams:
|
|
1626
|
-
self._mock_streams[full_stream] = []
|
|
1627
|
-
entry_id = f"{int(datetime.now().timestamp() * 1000)}-0"
|
|
1628
|
-
self._mock_streams[full_stream].append((entry_id, entry))
|
|
1629
|
-
# Trim to max_len
|
|
1630
|
-
if len(self._mock_streams[full_stream]) > max_len:
|
|
1631
|
-
self._mock_streams[full_stream] = self._mock_streams[full_stream][-max_len:]
|
|
1632
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1633
|
-
self._metrics.record_operation("stream_append", latency_ms)
|
|
1634
|
-
return entry_id
|
|
1635
|
-
|
|
1636
|
-
if self._client is None:
|
|
1637
|
-
return None
|
|
1638
|
-
|
|
1639
|
-
entry_id = self._client.xadd(full_stream, entry, maxlen=max_len)
|
|
1640
|
-
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
1641
|
-
self._metrics.record_operation("stream_append", latency_ms)
|
|
1642
|
-
|
|
1643
|
-
return str(entry_id) if entry_id else None
|
|
1644
|
-
|
|
1645
|
-
def stream_read(
|
|
1646
|
-
self,
|
|
1647
|
-
stream_name: str,
|
|
1648
|
-
credentials: AgentCredentials,
|
|
1649
|
-
start_id: str = "0",
|
|
1650
|
-
count: int = 100,
|
|
1651
|
-
) -> list[tuple[str, dict]]:
|
|
1652
|
-
"""Read entries from a Redis Stream.
|
|
1653
|
-
|
|
1654
|
-
Args:
|
|
1655
|
-
stream_name: Name of the stream
|
|
1656
|
-
credentials: Agent credentials
|
|
1657
|
-
start_id: Start reading from this ID ("0" = beginning)
|
|
1658
|
-
count: Maximum entries to read
|
|
1659
|
-
|
|
1660
|
-
Returns:
|
|
1661
|
-
List of (entry_id, data) tuples
|
|
1662
|
-
|
|
1663
|
-
Example:
|
|
1664
|
-
>>> entries = memory.stream_read("audit", creds, count=50)
|
|
1665
|
-
>>> for entry_id, data in entries:
|
|
1666
|
-
... print(f"{entry_id}: {data}")
|
|
1667
|
-
|
|
1668
|
-
"""
|
|
1669
|
-
full_stream = f"{self.PREFIX_STREAM}{stream_name}"
|
|
1670
|
-
|
|
1671
|
-
if self.use_mock:
|
|
1672
|
-
if full_stream not in self._mock_streams:
|
|
1673
|
-
return []
|
|
1674
|
-
entries = self._mock_streams[full_stream]
|
|
1675
|
-
# Filter by start_id (simple comparison)
|
|
1676
|
-
filtered = [(eid, data) for eid, data in entries if eid > start_id]
|
|
1677
|
-
return filtered[:count]
|
|
1678
|
-
|
|
1679
|
-
if self._client is None:
|
|
1680
|
-
return []
|
|
1681
|
-
|
|
1682
|
-
result = self._client.xrange(full_stream, min=start_id, count=count)
|
|
1683
|
-
return [(str(entry_id), {str(k): v for k, v in data.items()}) for entry_id, data in result]
|
|
1684
|
-
|
|
1685
|
-
def stream_read_new(
|
|
1686
|
-
self,
|
|
1687
|
-
stream_name: str,
|
|
1688
|
-
credentials: AgentCredentials,
|
|
1689
|
-
block_ms: int = 0,
|
|
1690
|
-
count: int = 100,
|
|
1691
|
-
) -> list[tuple[str, dict]]:
|
|
1692
|
-
"""Read only new entries from a stream (blocking read).
|
|
1693
|
-
|
|
1694
|
-
Args:
|
|
1695
|
-
stream_name: Name of the stream
|
|
1696
|
-
credentials: Agent credentials
|
|
1697
|
-
block_ms: Milliseconds to block waiting (0 = no block)
|
|
1698
|
-
count: Maximum entries to read
|
|
1699
|
-
|
|
1700
|
-
Returns:
|
|
1701
|
-
List of (entry_id, data) tuples
|
|
1702
|
-
|
|
1703
|
-
"""
|
|
1704
|
-
full_stream = f"{self.PREFIX_STREAM}{stream_name}"
|
|
1705
|
-
|
|
1706
|
-
if self.use_mock:
|
|
1707
|
-
return [] # Mock doesn't support blocking reads
|
|
1708
|
-
|
|
1709
|
-
if self._client is None:
|
|
1710
|
-
return []
|
|
1711
|
-
|
|
1712
|
-
result = self._client.xread({full_stream: "$"}, block=block_ms, count=count)
|
|
1713
|
-
if not result:
|
|
1714
|
-
return []
|
|
1715
|
-
|
|
1716
|
-
# Result format: [(stream_name, [(entry_id, data), ...])]
|
|
1717
|
-
entries = []
|
|
1718
|
-
for _stream, stream_entries in result:
|
|
1719
|
-
for entry_id, data in stream_entries:
|
|
1720
|
-
entries.append((str(entry_id), {str(k): v for k, v in data.items()}))
|
|
1721
|
-
return entries
|
|
1722
|
-
|
|
1723
|
-
# =========================================================================
|
|
1724
|
-
# TIME-WINDOW QUERIES (SORTED SETS)
|
|
1725
|
-
# =========================================================================
|
|
1726
|
-
|
|
1727
|
-
def timeline_add(
|
|
1728
|
-
self,
|
|
1729
|
-
timeline_name: str,
|
|
1730
|
-
event_id: str,
|
|
1731
|
-
data: dict,
|
|
1732
|
-
credentials: AgentCredentials,
|
|
1733
|
-
timestamp: datetime | None = None,
|
|
1734
|
-
) -> bool:
|
|
1735
|
-
"""Add an event to a timeline (sorted set by timestamp).
|
|
1736
|
-
|
|
1737
|
-
Args:
|
|
1738
|
-
timeline_name: Name of the timeline
|
|
1739
|
-
event_id: Unique event identifier
|
|
1740
|
-
data: Event data
|
|
1741
|
-
credentials: Agent credentials
|
|
1742
|
-
timestamp: Event timestamp (defaults to now)
|
|
1743
|
-
|
|
1744
|
-
Returns:
|
|
1745
|
-
True if added successfully
|
|
1746
|
-
|
|
1747
|
-
"""
|
|
1748
|
-
if not credentials.can_stage():
|
|
1749
|
-
raise PermissionError(
|
|
1750
|
-
f"Agent {credentials.agent_id} cannot write to timeline. "
|
|
1751
|
-
"Requires CONTRIBUTOR tier or higher.",
|
|
1752
|
-
)
|
|
1753
|
-
|
|
1754
|
-
full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
|
|
1755
|
-
ts = timestamp or datetime.now()
|
|
1756
|
-
score = ts.timestamp()
|
|
1757
|
-
|
|
1758
|
-
payload = json.dumps(
|
|
1759
|
-
{
|
|
1760
|
-
"event_id": event_id,
|
|
1761
|
-
"timestamp": ts.isoformat(),
|
|
1762
|
-
"agent_id": credentials.agent_id,
|
|
1763
|
-
"data": data,
|
|
1764
|
-
},
|
|
1765
|
-
)
|
|
1766
|
-
|
|
1767
|
-
if self.use_mock:
|
|
1768
|
-
if full_timeline not in self._mock_sorted_sets:
|
|
1769
|
-
self._mock_sorted_sets[full_timeline] = []
|
|
1770
|
-
self._mock_sorted_sets[full_timeline].append((score, payload))
|
|
1771
|
-
self._mock_sorted_sets[full_timeline].sort(key=lambda x: x[0])
|
|
1772
|
-
return True
|
|
1773
|
-
|
|
1774
|
-
if self._client is None:
|
|
1775
|
-
return False
|
|
1776
|
-
|
|
1777
|
-
self._client.zadd(full_timeline, {payload: score})
|
|
1778
|
-
return True
|
|
1779
|
-
|
|
1780
|
-
def timeline_query(
|
|
1781
|
-
self,
|
|
1782
|
-
timeline_name: str,
|
|
1783
|
-
credentials: AgentCredentials,
|
|
1784
|
-
query: TimeWindowQuery | None = None,
|
|
1785
|
-
) -> list[dict]:
|
|
1786
|
-
"""Query events from a timeline within a time window.
|
|
1787
|
-
|
|
1788
|
-
Args:
|
|
1789
|
-
timeline_name: Name of the timeline
|
|
1790
|
-
credentials: Agent credentials
|
|
1791
|
-
query: Time window query parameters
|
|
1792
|
-
|
|
1793
|
-
Returns:
|
|
1794
|
-
List of events in the time window
|
|
1795
|
-
|
|
1796
|
-
Example:
|
|
1797
|
-
>>> from datetime import datetime, timedelta
|
|
1798
|
-
>>> query = TimeWindowQuery(
|
|
1799
|
-
... start_time=datetime.now() - timedelta(hours=1),
|
|
1800
|
-
... end_time=datetime.now(),
|
|
1801
|
-
... limit=50
|
|
1802
|
-
... )
|
|
1803
|
-
>>> events = memory.timeline_query("agent_events", creds, query)
|
|
1804
|
-
|
|
1805
|
-
"""
|
|
1806
|
-
full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
|
|
1807
|
-
q = query or TimeWindowQuery()
|
|
1808
|
-
|
|
1809
|
-
if self.use_mock:
|
|
1810
|
-
if full_timeline not in self._mock_sorted_sets:
|
|
1811
|
-
return []
|
|
1812
|
-
entries = self._mock_sorted_sets[full_timeline]
|
|
1813
|
-
filtered = [
|
|
1814
|
-
json.loads(payload)
|
|
1815
|
-
for score, payload in entries
|
|
1816
|
-
if q.start_score <= score <= q.end_score
|
|
1817
|
-
]
|
|
1818
|
-
return filtered[q.offset : q.offset + q.limit]
|
|
1819
|
-
|
|
1820
|
-
if self._client is None:
|
|
1821
|
-
return []
|
|
1822
|
-
|
|
1823
|
-
results = self._client.zrangebyscore(
|
|
1824
|
-
full_timeline,
|
|
1825
|
-
min=q.start_score,
|
|
1826
|
-
max=q.end_score,
|
|
1827
|
-
start=q.offset,
|
|
1828
|
-
num=q.limit,
|
|
1829
|
-
)
|
|
1830
|
-
|
|
1831
|
-
return [json.loads(r) for r in results]
|
|
1832
|
-
|
|
1833
|
-
def timeline_count(
|
|
1834
|
-
self,
|
|
1835
|
-
timeline_name: str,
|
|
1836
|
-
credentials: AgentCredentials,
|
|
1837
|
-
query: TimeWindowQuery | None = None,
|
|
1838
|
-
) -> int:
|
|
1839
|
-
"""Count events in a timeline within a time window.
|
|
1840
|
-
|
|
1841
|
-
Args:
|
|
1842
|
-
timeline_name: Name of the timeline
|
|
1843
|
-
credentials: Agent credentials
|
|
1844
|
-
query: Time window query parameters
|
|
1845
|
-
|
|
1846
|
-
Returns:
|
|
1847
|
-
Number of events in the time window
|
|
1848
|
-
|
|
1849
|
-
"""
|
|
1850
|
-
full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
|
|
1851
|
-
q = query or TimeWindowQuery()
|
|
1852
|
-
|
|
1853
|
-
if self.use_mock:
|
|
1854
|
-
if full_timeline not in self._mock_sorted_sets:
|
|
1855
|
-
return 0
|
|
1856
|
-
entries = self._mock_sorted_sets[full_timeline]
|
|
1857
|
-
return len([1 for score, _ in entries if q.start_score <= score <= q.end_score])
|
|
1858
|
-
|
|
1859
|
-
if self._client is None:
|
|
1860
|
-
return 0
|
|
1861
|
-
|
|
1862
|
-
return int(self._client.zcount(full_timeline, q.start_score, q.end_score))
|
|
1863
|
-
|
|
1864
|
-
# =========================================================================
|
|
1865
|
-
# TASK QUEUES (LISTS)
|
|
1866
|
-
# =========================================================================
|
|
1867
|
-
|
|
1868
|
-
def queue_push(
|
|
1869
|
-
self,
|
|
1870
|
-
queue_name: str,
|
|
1871
|
-
task: dict,
|
|
1872
|
-
credentials: AgentCredentials,
|
|
1873
|
-
priority: bool = False,
|
|
1874
|
-
) -> int:
|
|
1875
|
-
"""Push a task to a queue.
|
|
1876
|
-
|
|
1877
|
-
Args:
|
|
1878
|
-
queue_name: Name of the queue
|
|
1879
|
-
task: Task data
|
|
1880
|
-
credentials: Agent credentials (must be CONTRIBUTOR+)
|
|
1881
|
-
priority: If True, push to front (high priority)
|
|
1882
|
-
|
|
1883
|
-
Returns:
|
|
1884
|
-
New queue length
|
|
1885
|
-
|
|
1886
|
-
Example:
|
|
1887
|
-
>>> task = {"type": "analyze", "file": "main.py"}
|
|
1888
|
-
>>> memory.queue_push("agent_tasks", task, creds)
|
|
1889
|
-
|
|
1890
|
-
"""
|
|
1891
|
-
if not credentials.can_stage():
|
|
1892
|
-
raise PermissionError(
|
|
1893
|
-
f"Agent {credentials.agent_id} cannot push to queue. "
|
|
1894
|
-
"Requires CONTRIBUTOR tier or higher.",
|
|
1895
|
-
)
|
|
1896
|
-
|
|
1897
|
-
full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
|
|
1898
|
-
payload = json.dumps(
|
|
1899
|
-
{
|
|
1900
|
-
"task": task,
|
|
1901
|
-
"queued_by": credentials.agent_id,
|
|
1902
|
-
"queued_at": datetime.now().isoformat(),
|
|
1903
|
-
},
|
|
1904
|
-
)
|
|
1905
|
-
|
|
1906
|
-
if self.use_mock:
|
|
1907
|
-
if full_queue not in self._mock_lists:
|
|
1908
|
-
self._mock_lists[full_queue] = []
|
|
1909
|
-
if priority:
|
|
1910
|
-
self._mock_lists[full_queue].insert(0, payload)
|
|
1911
|
-
else:
|
|
1912
|
-
self._mock_lists[full_queue].append(payload)
|
|
1913
|
-
return len(self._mock_lists[full_queue])
|
|
1914
|
-
|
|
1915
|
-
if self._client is None:
|
|
1916
|
-
return 0
|
|
1917
|
-
|
|
1918
|
-
if priority:
|
|
1919
|
-
return int(self._client.lpush(full_queue, payload))
|
|
1920
|
-
return int(self._client.rpush(full_queue, payload))
|
|
1921
|
-
|
|
1922
|
-
def queue_pop(
|
|
1923
|
-
self,
|
|
1924
|
-
queue_name: str,
|
|
1925
|
-
credentials: AgentCredentials,
|
|
1926
|
-
timeout: int = 0,
|
|
1927
|
-
) -> dict | None:
|
|
1928
|
-
"""Pop a task from a queue.
|
|
1929
|
-
|
|
1930
|
-
Args:
|
|
1931
|
-
queue_name: Name of the queue
|
|
1932
|
-
credentials: Agent credentials
|
|
1933
|
-
timeout: Seconds to block waiting (0 = no block)
|
|
1934
|
-
|
|
1935
|
-
Returns:
|
|
1936
|
-
Task data or None if queue empty
|
|
1937
|
-
|
|
1938
|
-
Example:
|
|
1939
|
-
>>> task = memory.queue_pop("agent_tasks", creds, timeout=5)
|
|
1940
|
-
>>> if task:
|
|
1941
|
-
... process(task["task"])
|
|
1942
|
-
|
|
1943
|
-
"""
|
|
1944
|
-
full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
|
|
1945
|
-
|
|
1946
|
-
if self.use_mock:
|
|
1947
|
-
if full_queue not in self._mock_lists or not self._mock_lists[full_queue]:
|
|
1948
|
-
return None
|
|
1949
|
-
payload = self._mock_lists[full_queue].pop(0)
|
|
1950
|
-
data: dict = json.loads(payload)
|
|
1951
|
-
return data
|
|
1952
|
-
|
|
1953
|
-
if self._client is None:
|
|
1954
|
-
return None
|
|
1955
|
-
|
|
1956
|
-
if timeout > 0:
|
|
1957
|
-
result = self._client.blpop(full_queue, timeout=timeout)
|
|
1958
|
-
if result:
|
|
1959
|
-
data = json.loads(result[1])
|
|
1960
|
-
return data
|
|
1961
|
-
return None
|
|
1962
|
-
|
|
1963
|
-
result = self._client.lpop(full_queue)
|
|
1964
|
-
if result:
|
|
1965
|
-
data = json.loads(result)
|
|
1966
|
-
return data
|
|
1967
|
-
return None
|
|
1968
|
-
|
|
1969
|
-
def queue_length(self, queue_name: str) -> int:
|
|
1970
|
-
"""Get the length of a queue.
|
|
1971
|
-
|
|
1972
|
-
Args:
|
|
1973
|
-
queue_name: Name of the queue
|
|
1974
|
-
|
|
1975
|
-
Returns:
|
|
1976
|
-
Number of items in the queue
|
|
1977
|
-
|
|
1978
|
-
"""
|
|
1979
|
-
full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
|
|
1980
|
-
|
|
1981
|
-
if self.use_mock:
|
|
1982
|
-
return len(self._mock_lists.get(full_queue, []))
|
|
1983
|
-
|
|
1984
|
-
if self._client is None:
|
|
1985
|
-
return 0
|
|
1986
|
-
|
|
1987
|
-
return int(self._client.llen(full_queue))
|
|
1988
|
-
|
|
1989
|
-
def queue_peek(
|
|
1990
|
-
self,
|
|
1991
|
-
queue_name: str,
|
|
1992
|
-
credentials: AgentCredentials,
|
|
1993
|
-
count: int = 1,
|
|
1994
|
-
) -> list[dict]:
|
|
1995
|
-
"""Peek at tasks in a queue without removing them.
|
|
1996
|
-
|
|
1997
|
-
Args:
|
|
1998
|
-
queue_name: Name of the queue
|
|
1999
|
-
credentials: Agent credentials
|
|
2000
|
-
count: Number of items to peek
|
|
2001
|
-
|
|
2002
|
-
Returns:
|
|
2003
|
-
List of task data
|
|
2004
|
-
|
|
2005
|
-
"""
|
|
2006
|
-
full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
|
|
2007
|
-
|
|
2008
|
-
if self.use_mock:
|
|
2009
|
-
items = self._mock_lists.get(full_queue, [])[:count]
|
|
2010
|
-
return [json.loads(item) for item in items]
|
|
2011
|
-
|
|
2012
|
-
if self._client is None:
|
|
2013
|
-
return []
|
|
2014
|
-
|
|
2015
|
-
items = self._client.lrange(full_queue, 0, count - 1)
|
|
2016
|
-
return [json.loads(item) for item in items]
|
|
2017
|
-
|
|
2018
|
-
# =========================================================================
|
|
2019
|
-
# ATOMIC TRANSACTIONS
|
|
2020
|
-
# =========================================================================
|
|
2021
|
-
|
|
2022
|
-
def atomic_promote_pattern(
|
|
2023
|
-
self,
|
|
2024
|
-
pattern_id: str,
|
|
2025
|
-
credentials: AgentCredentials,
|
|
2026
|
-
min_confidence: float = 0.0,
|
|
2027
|
-
) -> tuple[bool, StagedPattern | None, str]:
|
|
2028
|
-
"""Atomically promote a pattern with validation.
|
|
2029
|
-
|
|
2030
|
-
Uses Redis transaction (MULTI/EXEC) to ensure:
|
|
2031
|
-
- Pattern exists and meets confidence threshold
|
|
2032
|
-
- Pattern is removed from staging atomically
|
|
2033
|
-
- No race conditions with concurrent operations
|
|
2034
|
-
|
|
2035
|
-
Args:
|
|
2036
|
-
pattern_id: Pattern to promote
|
|
2037
|
-
credentials: Must be VALIDATOR or higher
|
|
2038
|
-
min_confidence: Minimum confidence threshold
|
|
2039
|
-
|
|
2040
|
-
Returns:
|
|
2041
|
-
Tuple of (success, pattern, message)
|
|
2042
|
-
|
|
2043
|
-
Raises:
|
|
2044
|
-
ValueError: If pattern_id is empty or min_confidence out of range
|
|
2045
|
-
|
|
2046
|
-
Example:
|
|
2047
|
-
>>> success, pattern, msg = memory.atomic_promote_pattern("pat_123", creds, min_confidence=0.7)
|
|
2048
|
-
>>> if success:
|
|
2049
|
-
... library.add(pattern)
|
|
2050
|
-
|
|
2051
|
-
"""
|
|
2052
|
-
# Pattern 1: String ID validation
|
|
2053
|
-
if not pattern_id or not pattern_id.strip():
|
|
2054
|
-
raise ValueError(f"pattern_id cannot be empty. Got: {pattern_id!r}")
|
|
2055
|
-
|
|
2056
|
-
# Pattern 4: Range validation
|
|
2057
|
-
if not 0.0 <= min_confidence <= 1.0:
|
|
2058
|
-
raise ValueError(f"min_confidence must be between 0.0 and 1.0, got {min_confidence}")
|
|
2059
|
-
|
|
2060
|
-
if not credentials.can_validate():
|
|
2061
|
-
return False, None, "Requires VALIDATOR tier or higher"
|
|
2062
|
-
|
|
2063
|
-
key = f"{self.PREFIX_STAGED}{pattern_id}"
|
|
2064
|
-
|
|
2065
|
-
if self.use_mock:
|
|
2066
|
-
if key not in self._mock_storage:
|
|
2067
|
-
return False, None, "Pattern not found"
|
|
2068
|
-
value, expires = self._mock_storage[key]
|
|
2069
|
-
if expires and datetime.now().timestamp() >= expires:
|
|
2070
|
-
return False, None, "Pattern expired"
|
|
2071
|
-
pattern = StagedPattern.from_dict(json.loads(str(value)))
|
|
2072
|
-
if pattern.confidence < min_confidence:
|
|
2073
|
-
return (
|
|
2074
|
-
False,
|
|
2075
|
-
None,
|
|
2076
|
-
f"Confidence {pattern.confidence} below threshold {min_confidence}",
|
|
2077
|
-
)
|
|
2078
|
-
del self._mock_storage[key]
|
|
2079
|
-
# Also invalidate local cache
|
|
2080
|
-
if key in self._local_cache:
|
|
2081
|
-
del self._local_cache[key]
|
|
2082
|
-
return True, pattern, "Pattern promoted successfully"
|
|
2083
|
-
|
|
2084
|
-
if self._client is None:
|
|
2085
|
-
return False, None, "Redis not connected"
|
|
2086
|
-
|
|
2087
|
-
# Use WATCH for optimistic locking
|
|
2088
|
-
try:
|
|
2089
|
-
self._client.watch(key)
|
|
2090
|
-
raw = self._client.get(key)
|
|
2091
|
-
|
|
2092
|
-
if raw is None:
|
|
2093
|
-
self._client.unwatch()
|
|
2094
|
-
return False, None, "Pattern not found"
|
|
2095
|
-
|
|
2096
|
-
pattern = StagedPattern.from_dict(json.loads(raw))
|
|
2097
|
-
|
|
2098
|
-
if pattern.confidence < min_confidence:
|
|
2099
|
-
self._client.unwatch()
|
|
2100
|
-
return (
|
|
2101
|
-
False,
|
|
2102
|
-
None,
|
|
2103
|
-
f"Confidence {pattern.confidence} below threshold {min_confidence}",
|
|
2104
|
-
)
|
|
2105
|
-
|
|
2106
|
-
# Execute atomic delete
|
|
2107
|
-
pipe = self._client.pipeline(True)
|
|
2108
|
-
pipe.delete(key)
|
|
2109
|
-
pipe.execute()
|
|
2110
|
-
|
|
2111
|
-
# Also invalidate local cache
|
|
2112
|
-
if key in self._local_cache:
|
|
2113
|
-
del self._local_cache[key]
|
|
2114
|
-
|
|
2115
|
-
return True, pattern, "Pattern promoted successfully"
|
|
2116
|
-
|
|
2117
|
-
except redis.WatchError:
|
|
2118
|
-
return False, None, "Pattern was modified by another process"
|
|
2119
|
-
finally:
|
|
2120
|
-
try:
|
|
2121
|
-
self._client.unwatch()
|
|
2122
|
-
except Exception:
|
|
2123
|
-
pass
|
|
2124
|
-
|
|
2125
|
-
# =========================================================================
|
|
2126
|
-
# CROSS-SESSION COMMUNICATION
|
|
2127
|
-
# =========================================================================
|
|
2128
|
-
|
|
2129
|
-
def enable_cross_session(
|
|
2130
|
-
self,
|
|
2131
|
-
access_tier: AccessTier = AccessTier.CONTRIBUTOR,
|
|
2132
|
-
auto_announce: bool = True,
|
|
2133
|
-
):
|
|
2134
|
-
"""Enable cross-session communication for this memory instance.
|
|
2135
|
-
|
|
2136
|
-
This allows agents in different Claude Code sessions to communicate
|
|
2137
|
-
and coordinate via Redis.
|
|
2138
|
-
|
|
2139
|
-
Args:
|
|
2140
|
-
access_tier: Access tier for this session
|
|
2141
|
-
auto_announce: Whether to announce presence automatically
|
|
2142
|
-
|
|
2143
|
-
Returns:
|
|
2144
|
-
CrossSessionCoordinator instance
|
|
2145
|
-
|
|
2146
|
-
Raises:
|
|
2147
|
-
ValueError: If in mock mode (Redis required for cross-session)
|
|
2148
|
-
|
|
2149
|
-
Example:
|
|
2150
|
-
>>> memory = RedisShortTermMemory()
|
|
2151
|
-
>>> coordinator = memory.enable_cross_session(AccessTier.CONTRIBUTOR)
|
|
2152
|
-
>>> print(f"Session ID: {coordinator.agent_id}")
|
|
2153
|
-
>>> sessions = coordinator.get_active_sessions()
|
|
2154
|
-
|
|
2155
|
-
"""
|
|
2156
|
-
if self.use_mock:
|
|
2157
|
-
raise ValueError(
|
|
2158
|
-
"Cross-session communication requires Redis. "
|
|
2159
|
-
"Set REDIS_HOST/REDIS_PORT or disable mock mode."
|
|
2160
|
-
)
|
|
2161
|
-
|
|
2162
|
-
from .cross_session import CrossSessionCoordinator, SessionType
|
|
2163
|
-
|
|
2164
|
-
coordinator = CrossSessionCoordinator(
|
|
2165
|
-
memory=self,
|
|
2166
|
-
session_type=SessionType.CLAUDE,
|
|
2167
|
-
access_tier=access_tier,
|
|
2168
|
-
auto_announce=auto_announce,
|
|
2169
|
-
)
|
|
2170
|
-
|
|
2171
|
-
return coordinator
|
|
2172
|
-
|
|
2173
|
-
def cross_session_available(self) -> bool:
|
|
2174
|
-
"""Check if cross-session communication is available.
|
|
2175
|
-
|
|
2176
|
-
Returns:
|
|
2177
|
-
True if Redis is connected (not mock mode)
|
|
2178
|
-
|
|
2179
|
-
"""
|
|
2180
|
-
return not self.use_mock and self._client is not None
|
|
2181
|
-
|
|
2182
|
-
# =========================================================================
|
|
2183
|
-
# CLEANUP AND LIFECYCLE
|
|
2184
|
-
# =========================================================================
|
|
2185
|
-
|
|
2186
|
-
def close(self) -> None:
|
|
2187
|
-
"""Close all connections and cleanup resources."""
|
|
2188
|
-
self.close_pubsub()
|
|
2189
|
-
if self._client:
|
|
2190
|
-
self._client.close()
|
|
2191
|
-
self._client = None
|
|
2192
|
-
logger.info("redis_connection_closed")
|