attune-ai 2.1.4__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- attune/cli/__init__.py +3 -55
- attune/cli/commands/batch.py +4 -12
- attune/cli/commands/cache.py +7 -15
- attune/cli/commands/provider.py +17 -0
- attune/cli/commands/routing.py +3 -1
- attune/cli/commands/setup.py +122 -0
- attune/cli/commands/tier.py +1 -3
- attune/cli/commands/workflow.py +31 -0
- attune/cli/parsers/cache.py +1 -0
- attune/cli/parsers/help.py +1 -3
- attune/cli/parsers/provider.py +7 -0
- attune/cli/parsers/routing.py +1 -3
- attune/cli/parsers/setup.py +7 -0
- attune/cli/parsers/status.py +1 -3
- attune/cli/parsers/tier.py +1 -3
- attune/cli_minimal.py +34 -28
- attune/cli_router.py +9 -7
- attune/cli_unified.py +3 -0
- attune/core.py +190 -0
- attune/dashboard/app.py +4 -2
- attune/dashboard/simple_server.py +3 -1
- attune/dashboard/standalone_server.py +7 -3
- attune/mcp/server.py +54 -102
- attune/memory/long_term.py +0 -2
- attune/memory/short_term/__init__.py +84 -0
- attune/memory/short_term/base.py +467 -0
- attune/memory/short_term/batch.py +219 -0
- attune/memory/short_term/caching.py +227 -0
- attune/memory/short_term/conflicts.py +265 -0
- attune/memory/short_term/cross_session.py +122 -0
- attune/memory/short_term/facade.py +655 -0
- attune/memory/short_term/pagination.py +215 -0
- attune/memory/short_term/patterns.py +271 -0
- attune/memory/short_term/pubsub.py +286 -0
- attune/memory/short_term/queues.py +244 -0
- attune/memory/short_term/security.py +300 -0
- attune/memory/short_term/sessions.py +250 -0
- attune/memory/short_term/streams.py +249 -0
- attune/memory/short_term/timelines.py +234 -0
- attune/memory/short_term/transactions.py +186 -0
- attune/memory/short_term/working.py +252 -0
- attune/meta_workflows/cli_commands/__init__.py +3 -0
- attune/meta_workflows/cli_commands/agent_commands.py +0 -4
- attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
- attune/meta_workflows/cli_commands/config_commands.py +0 -5
- attune/meta_workflows/cli_commands/memory_commands.py +0 -5
- attune/meta_workflows/cli_commands/template_commands.py +0 -5
- attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
- attune/meta_workflows/workflow.py +1 -1
- attune/models/adaptive_routing.py +4 -8
- attune/models/auth_cli.py +3 -9
- attune/models/auth_strategy.py +2 -4
- attune/models/provider_config.py +20 -1
- attune/models/telemetry/analytics.py +0 -2
- attune/models/telemetry/backend.py +0 -3
- attune/models/telemetry/storage.py +0 -2
- attune/orchestration/_strategies/__init__.py +156 -0
- attune/orchestration/_strategies/base.py +231 -0
- attune/orchestration/_strategies/conditional_strategies.py +373 -0
- attune/orchestration/_strategies/conditions.py +369 -0
- attune/orchestration/_strategies/core_strategies.py +491 -0
- attune/orchestration/_strategies/data_classes.py +64 -0
- attune/orchestration/_strategies/nesting.py +233 -0
- attune/orchestration/execution_strategies.py +58 -1567
- attune/orchestration/meta_orchestrator.py +1 -3
- attune/project_index/scanner.py +1 -3
- attune/project_index/scanner_parallel.py +7 -5
- attune/socratic_router.py +1 -3
- attune/telemetry/agent_coordination.py +9 -3
- attune/telemetry/agent_tracking.py +16 -3
- attune/telemetry/approval_gates.py +22 -5
- attune/telemetry/cli.py +3 -3
- attune/telemetry/commands/dashboard_commands.py +24 -8
- attune/telemetry/event_streaming.py +8 -2
- attune/telemetry/feedback_loop.py +10 -2
- attune/tools.py +1 -0
- attune/workflow_commands.py +1 -3
- attune/workflows/__init__.py +53 -10
- attune/workflows/autonomous_test_gen.py +160 -104
- attune/workflows/base.py +48 -664
- attune/workflows/batch_processing.py +2 -4
- attune/workflows/compat.py +156 -0
- attune/workflows/cost_mixin.py +141 -0
- attune/workflows/data_classes.py +92 -0
- attune/workflows/document_gen/workflow.py +11 -14
- attune/workflows/history.py +62 -37
- attune/workflows/llm_base.py +2 -4
- attune/workflows/migration.py +422 -0
- attune/workflows/output.py +3 -9
- attune/workflows/parsing_mixin.py +427 -0
- attune/workflows/perf_audit.py +3 -1
- attune/workflows/progress.py +10 -13
- attune/workflows/release_prep.py +5 -1
- attune/workflows/routing.py +0 -2
- attune/workflows/secure_release.py +2 -1
- attune/workflows/security_audit.py +19 -14
- attune/workflows/security_audit_phase3.py +28 -22
- attune/workflows/seo_optimization.py +29 -29
- attune/workflows/test_gen/test_templates.py +1 -4
- attune/workflows/test_gen/workflow.py +0 -2
- attune/workflows/test_gen_behavioral.py +7 -20
- attune/workflows/test_gen_parallel.py +6 -4
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/RECORD +119 -94
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
- attune_healthcare/monitors/monitoring/__init__.py +9 -9
- attune_llm/agent_factory/__init__.py +6 -6
- attune_llm/commands/__init__.py +10 -10
- attune_llm/commands/models.py +3 -3
- attune_llm/config/__init__.py +8 -8
- attune_llm/learning/__init__.py +3 -3
- attune_llm/learning/extractor.py +5 -3
- attune_llm/learning/storage.py +5 -3
- attune_llm/security/__init__.py +17 -17
- attune_llm/utils/tokens.py +3 -1
- attune/cli_legacy.py +0 -3957
- attune/memory/short_term.py +0 -2192
- attune/workflows/manage_docs.py +0 -87
- attune/workflows/test5.py +0 -125
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
- {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,467 @@
|
|
|
1
|
+
"""Core CRUD operations and connection management for short-term memory.
|
|
2
|
+
|
|
3
|
+
This module provides the foundational Redis operations:
|
|
4
|
+
- Connection management with retry logic
|
|
5
|
+
- Basic get/set/delete/keys operations
|
|
6
|
+
- Health check (ping) and statistics
|
|
7
|
+
- Cleanup and lifecycle management
|
|
8
|
+
|
|
9
|
+
The BaseOperations class is designed to be composed into the main
|
|
10
|
+
RedisShortTermMemory facade, providing backward compatibility while
|
|
11
|
+
enabling modular testing and maintenance.
|
|
12
|
+
|
|
13
|
+
Target Methods (extracted from original RedisShortTermMemory):
|
|
14
|
+
- __init__ (initialization logic)
|
|
15
|
+
- client property
|
|
16
|
+
- _create_client_with_retry
|
|
17
|
+
- _execute_with_retry
|
|
18
|
+
- _get
|
|
19
|
+
- _set
|
|
20
|
+
- _delete
|
|
21
|
+
- _keys
|
|
22
|
+
- ping
|
|
23
|
+
- get_stats
|
|
24
|
+
- close
|
|
25
|
+
|
|
26
|
+
Dependencies:
|
|
27
|
+
- RedisConfig for configuration
|
|
28
|
+
- RedisMetrics for operation tracking
|
|
29
|
+
- structlog for logging
|
|
30
|
+
|
|
31
|
+
Copyright 2025 Smart-AI-Memory
|
|
32
|
+
Licensed under Fair Source License 0.9
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
from __future__ import annotations
|
|
36
|
+
|
|
37
|
+
import os
|
|
38
|
+
import time
|
|
39
|
+
from collections.abc import Callable
|
|
40
|
+
from datetime import datetime
|
|
41
|
+
from typing import TYPE_CHECKING, Any
|
|
42
|
+
|
|
43
|
+
import structlog
|
|
44
|
+
|
|
45
|
+
from attune.memory.types import RedisConfig, RedisMetrics
|
|
46
|
+
|
|
47
|
+
if TYPE_CHECKING:
|
|
48
|
+
pass
|
|
49
|
+
|
|
50
|
+
logger = structlog.get_logger(__name__)
|
|
51
|
+
|
|
52
|
+
# Redis availability check
|
|
53
|
+
try:
|
|
54
|
+
import redis
|
|
55
|
+
from redis.exceptions import ConnectionError as RedisConnectionError
|
|
56
|
+
from redis.exceptions import TimeoutError as RedisTimeoutError
|
|
57
|
+
|
|
58
|
+
REDIS_AVAILABLE = True
|
|
59
|
+
except ImportError:
|
|
60
|
+
REDIS_AVAILABLE = False
|
|
61
|
+
redis = None # type: ignore
|
|
62
|
+
RedisConnectionError = Exception # type: ignore
|
|
63
|
+
RedisTimeoutError = Exception # type: ignore
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class BaseOperations:
|
|
67
|
+
"""Core CRUD operations and connection management.
|
|
68
|
+
|
|
69
|
+
This class provides the foundational Redis operations that other
|
|
70
|
+
modules build upon. It handles:
|
|
71
|
+
|
|
72
|
+
- Connection creation with exponential backoff retry
|
|
73
|
+
- Basic get/set/delete/keys operations
|
|
74
|
+
- Health checks and statistics
|
|
75
|
+
- Resource cleanup
|
|
76
|
+
|
|
77
|
+
Example:
|
|
78
|
+
>>> from attune.memory.short_term.base import BaseOperations
|
|
79
|
+
>>> from attune.memory.types import RedisConfig
|
|
80
|
+
>>> config = RedisConfig(use_mock=True)
|
|
81
|
+
>>> base = BaseOperations(config=config)
|
|
82
|
+
>>> base._set("key", "value")
|
|
83
|
+
True
|
|
84
|
+
>>> base._get("key")
|
|
85
|
+
'value'
|
|
86
|
+
|
|
87
|
+
Attributes:
|
|
88
|
+
use_mock: Whether using mock storage instead of Redis
|
|
89
|
+
_config: Redis configuration
|
|
90
|
+
_metrics: Operation metrics tracker
|
|
91
|
+
_client: Redis client instance (None if mock)
|
|
92
|
+
_mock_storage: In-memory storage for mock mode
|
|
93
|
+
"""
|
|
94
|
+
|
|
95
|
+
# Key prefixes for namespacing (shared across all operations)
|
|
96
|
+
PREFIX_WORKING = "empathy:working:"
|
|
97
|
+
PREFIX_STAGED = "empathy:staged:"
|
|
98
|
+
PREFIX_CONFLICT = "empathy:conflict:"
|
|
99
|
+
PREFIX_SESSION = "empathy:session:"
|
|
100
|
+
PREFIX_PUBSUB = "empathy:pubsub:"
|
|
101
|
+
PREFIX_STREAM = "empathy:stream:"
|
|
102
|
+
PREFIX_TIMELINE = "empathy:timeline:"
|
|
103
|
+
PREFIX_QUEUE = "empathy:queue:"
|
|
104
|
+
|
|
105
|
+
def __init__(
|
|
106
|
+
self,
|
|
107
|
+
host: str = "localhost",
|
|
108
|
+
port: int = 6379,
|
|
109
|
+
db: int = 0,
|
|
110
|
+
password: str | None = None,
|
|
111
|
+
use_mock: bool = False,
|
|
112
|
+
config: RedisConfig | None = None,
|
|
113
|
+
) -> None:
|
|
114
|
+
"""Initialize Redis connection and core components.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
host: Redis host
|
|
118
|
+
port: Redis port
|
|
119
|
+
db: Redis database number
|
|
120
|
+
password: Redis password (optional)
|
|
121
|
+
use_mock: Use in-memory mock for testing
|
|
122
|
+
config: Full RedisConfig for advanced settings (overrides other args)
|
|
123
|
+
"""
|
|
124
|
+
# Use config if provided, otherwise build from individual args
|
|
125
|
+
if config is not None:
|
|
126
|
+
self._config = config
|
|
127
|
+
else:
|
|
128
|
+
# Check environment variable for Redis enablement (default: disabled)
|
|
129
|
+
redis_enabled = os.getenv("REDIS_ENABLED", "false").lower() in (
|
|
130
|
+
"true",
|
|
131
|
+
"1",
|
|
132
|
+
"yes",
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Use environment variables for configuration if available
|
|
136
|
+
env_host = os.getenv("REDIS_HOST", host)
|
|
137
|
+
env_port = int(os.getenv("REDIS_PORT", str(port)))
|
|
138
|
+
env_db = int(os.getenv("REDIS_DB", str(db)))
|
|
139
|
+
env_password = os.getenv("REDIS_PASSWORD", password)
|
|
140
|
+
|
|
141
|
+
# If Redis is not enabled via env var, force mock mode
|
|
142
|
+
if not redis_enabled and not use_mock:
|
|
143
|
+
use_mock = True
|
|
144
|
+
logger.info(
|
|
145
|
+
"redis_disabled_via_env",
|
|
146
|
+
message="Redis not enabled in environment, using mock mode",
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
self._config = RedisConfig(
|
|
150
|
+
host=env_host,
|
|
151
|
+
port=env_port,
|
|
152
|
+
db=env_db,
|
|
153
|
+
password=env_password if env_password else None,
|
|
154
|
+
use_mock=use_mock,
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
self.use_mock = self._config.use_mock or not REDIS_AVAILABLE
|
|
158
|
+
|
|
159
|
+
# Initialize metrics
|
|
160
|
+
self._metrics = RedisMetrics()
|
|
161
|
+
|
|
162
|
+
# Mock storage for testing
|
|
163
|
+
self._mock_storage: dict[str, tuple[Any, float | None]] = {}
|
|
164
|
+
self._mock_lists: dict[str, list[str]] = {}
|
|
165
|
+
self._mock_sorted_sets: dict[str, list[tuple[float, str]]] = {}
|
|
166
|
+
self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
|
|
167
|
+
|
|
168
|
+
# Create client
|
|
169
|
+
if self.use_mock:
|
|
170
|
+
self._client = None
|
|
171
|
+
else:
|
|
172
|
+
self._client = self._create_client_with_retry()
|
|
173
|
+
|
|
174
|
+
@property
|
|
175
|
+
def client(self) -> Any:
|
|
176
|
+
"""Get the Redis client instance.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Redis client instance or None if using mock mode
|
|
180
|
+
|
|
181
|
+
Example:
|
|
182
|
+
>>> memory = BaseOperations(use_mock=True)
|
|
183
|
+
>>> memory.client is None
|
|
184
|
+
True
|
|
185
|
+
"""
|
|
186
|
+
return self._client
|
|
187
|
+
|
|
188
|
+
@property
|
|
189
|
+
def metrics(self) -> RedisMetrics:
|
|
190
|
+
"""Get Redis metrics instance.
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
RedisMetrics instance with connection and operation statistics
|
|
194
|
+
|
|
195
|
+
Example:
|
|
196
|
+
>>> base = BaseOperations(use_mock=True)
|
|
197
|
+
>>> base.metrics.retries_total
|
|
198
|
+
0
|
|
199
|
+
"""
|
|
200
|
+
return self._metrics
|
|
201
|
+
|
|
202
|
+
def _create_client_with_retry(self) -> Any:
|
|
203
|
+
"""Create Redis client with exponential backoff retry.
|
|
204
|
+
|
|
205
|
+
Returns:
|
|
206
|
+
Connected Redis client
|
|
207
|
+
|
|
208
|
+
Raises:
|
|
209
|
+
ConnectionError: If all retry attempts fail
|
|
210
|
+
"""
|
|
211
|
+
max_attempts = self._config.retry_max_attempts
|
|
212
|
+
base_delay = self._config.retry_base_delay
|
|
213
|
+
max_delay = self._config.retry_max_delay
|
|
214
|
+
|
|
215
|
+
last_error: Exception | None = None
|
|
216
|
+
|
|
217
|
+
for attempt in range(max_attempts):
|
|
218
|
+
try:
|
|
219
|
+
client = redis.Redis(**self._config.to_redis_kwargs())
|
|
220
|
+
# Test connection
|
|
221
|
+
client.ping()
|
|
222
|
+
logger.info(
|
|
223
|
+
"redis_connected",
|
|
224
|
+
host=self._config.host,
|
|
225
|
+
port=self._config.port,
|
|
226
|
+
attempt=attempt + 1,
|
|
227
|
+
)
|
|
228
|
+
return client
|
|
229
|
+
except (RedisConnectionError, RedisTimeoutError) as e:
|
|
230
|
+
last_error = e
|
|
231
|
+
self._metrics.retries_total += 1
|
|
232
|
+
|
|
233
|
+
if attempt < max_attempts - 1:
|
|
234
|
+
delay = min(base_delay * (2**attempt), max_delay)
|
|
235
|
+
logger.warning(
|
|
236
|
+
"redis_connection_retry",
|
|
237
|
+
attempt=attempt + 1,
|
|
238
|
+
max_attempts=max_attempts,
|
|
239
|
+
delay=delay,
|
|
240
|
+
error=str(e),
|
|
241
|
+
)
|
|
242
|
+
time.sleep(delay)
|
|
243
|
+
|
|
244
|
+
# All retries failed
|
|
245
|
+
logger.error(
|
|
246
|
+
"redis_connection_failed",
|
|
247
|
+
max_attempts=max_attempts,
|
|
248
|
+
error=str(last_error),
|
|
249
|
+
)
|
|
250
|
+
raise last_error if last_error else ConnectionError("Failed to connect to Redis")
|
|
251
|
+
|
|
252
|
+
def _execute_with_retry(
|
|
253
|
+
self, operation: Callable[[], Any], op_name: str = "operation"
|
|
254
|
+
) -> Any:
|
|
255
|
+
"""Execute a Redis operation with retry logic.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
operation: Callable that performs the Redis operation
|
|
259
|
+
op_name: Name of operation for logging/metrics
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Result of the operation
|
|
263
|
+
|
|
264
|
+
Raises:
|
|
265
|
+
ConnectionError: If all retry attempts fail
|
|
266
|
+
"""
|
|
267
|
+
start_time = time.perf_counter()
|
|
268
|
+
max_attempts = self._config.retry_max_attempts
|
|
269
|
+
base_delay = self._config.retry_base_delay
|
|
270
|
+
max_delay = self._config.retry_max_delay
|
|
271
|
+
|
|
272
|
+
last_error: Exception | None = None
|
|
273
|
+
|
|
274
|
+
for attempt in range(max_attempts):
|
|
275
|
+
try:
|
|
276
|
+
result = operation()
|
|
277
|
+
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
278
|
+
self._metrics.record_operation(op_name, latency_ms, success=True)
|
|
279
|
+
return result
|
|
280
|
+
except (RedisConnectionError, RedisTimeoutError) as e:
|
|
281
|
+
last_error = e
|
|
282
|
+
self._metrics.retries_total += 1
|
|
283
|
+
|
|
284
|
+
if attempt < max_attempts - 1:
|
|
285
|
+
delay = min(base_delay * (2**attempt), max_delay)
|
|
286
|
+
logger.warning(
|
|
287
|
+
"redis_operation_retry",
|
|
288
|
+
operation=op_name,
|
|
289
|
+
attempt=attempt + 1,
|
|
290
|
+
delay=delay,
|
|
291
|
+
)
|
|
292
|
+
time.sleep(delay)
|
|
293
|
+
|
|
294
|
+
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
295
|
+
self._metrics.record_operation(op_name, latency_ms, success=False)
|
|
296
|
+
raise last_error if last_error else ConnectionError("Redis operation failed")
|
|
297
|
+
|
|
298
|
+
def _get(self, key: str) -> str | None:
|
|
299
|
+
"""Get value from Redis or mock storage.
|
|
300
|
+
|
|
301
|
+
Args:
|
|
302
|
+
key: Key to retrieve
|
|
303
|
+
|
|
304
|
+
Returns:
|
|
305
|
+
Value as string, or None if not found
|
|
306
|
+
"""
|
|
307
|
+
# Mock mode path
|
|
308
|
+
if self.use_mock:
|
|
309
|
+
if key in self._mock_storage:
|
|
310
|
+
value, expires = self._mock_storage[key]
|
|
311
|
+
if expires is None or datetime.now().timestamp() < expires:
|
|
312
|
+
return str(value) if value is not None else None
|
|
313
|
+
del self._mock_storage[key]
|
|
314
|
+
return None
|
|
315
|
+
|
|
316
|
+
# Real Redis path
|
|
317
|
+
if self._client is None:
|
|
318
|
+
return None
|
|
319
|
+
|
|
320
|
+
result = self._client.get(key)
|
|
321
|
+
return str(result) if result else None
|
|
322
|
+
|
|
323
|
+
def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
|
|
324
|
+
"""Set value in Redis or mock storage.
|
|
325
|
+
|
|
326
|
+
Args:
|
|
327
|
+
key: Key to set
|
|
328
|
+
value: Value to store
|
|
329
|
+
ttl: Time-to-live in seconds (optional)
|
|
330
|
+
|
|
331
|
+
Returns:
|
|
332
|
+
True if successful
|
|
333
|
+
"""
|
|
334
|
+
# Mock mode path
|
|
335
|
+
if self.use_mock:
|
|
336
|
+
expires = datetime.now().timestamp() + ttl if ttl else None
|
|
337
|
+
self._mock_storage[key] = (value, expires)
|
|
338
|
+
return True
|
|
339
|
+
|
|
340
|
+
# Real Redis path
|
|
341
|
+
if self._client is None:
|
|
342
|
+
return False
|
|
343
|
+
|
|
344
|
+
# Set in Redis
|
|
345
|
+
if ttl:
|
|
346
|
+
self._client.setex(key, ttl, value)
|
|
347
|
+
else:
|
|
348
|
+
result = self._client.set(key, value)
|
|
349
|
+
if not result:
|
|
350
|
+
return False
|
|
351
|
+
|
|
352
|
+
return True
|
|
353
|
+
|
|
354
|
+
def _delete(self, key: str) -> bool:
|
|
355
|
+
"""Delete key from Redis or mock storage.
|
|
356
|
+
|
|
357
|
+
Args:
|
|
358
|
+
key: Key to delete
|
|
359
|
+
|
|
360
|
+
Returns:
|
|
361
|
+
True if key was deleted
|
|
362
|
+
"""
|
|
363
|
+
# Mock mode path
|
|
364
|
+
if self.use_mock:
|
|
365
|
+
if key in self._mock_storage:
|
|
366
|
+
del self._mock_storage[key]
|
|
367
|
+
return True
|
|
368
|
+
return False
|
|
369
|
+
|
|
370
|
+
# Real Redis path
|
|
371
|
+
if self._client is None:
|
|
372
|
+
return False
|
|
373
|
+
|
|
374
|
+
return bool(self._client.delete(key) > 0)
|
|
375
|
+
|
|
376
|
+
def _keys(self, pattern: str) -> list[str]:
|
|
377
|
+
"""Get keys matching pattern.
|
|
378
|
+
|
|
379
|
+
Args:
|
|
380
|
+
pattern: Glob-style pattern to match
|
|
381
|
+
|
|
382
|
+
Returns:
|
|
383
|
+
List of matching keys
|
|
384
|
+
"""
|
|
385
|
+
if self.use_mock:
|
|
386
|
+
import fnmatch
|
|
387
|
+
|
|
388
|
+
# Use list comp for small result sets (typical <1000 keys)
|
|
389
|
+
return [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
|
|
390
|
+
|
|
391
|
+
if self._client is None:
|
|
392
|
+
return []
|
|
393
|
+
|
|
394
|
+
keys = self._client.keys(pattern)
|
|
395
|
+
# Convert bytes to strings - needed for API return type
|
|
396
|
+
return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
|
|
397
|
+
|
|
398
|
+
def ping(self) -> bool:
|
|
399
|
+
"""Check Redis connection health.
|
|
400
|
+
|
|
401
|
+
Returns:
|
|
402
|
+
True if connected and responsive
|
|
403
|
+
"""
|
|
404
|
+
if self.use_mock:
|
|
405
|
+
return True
|
|
406
|
+
if self._client is None:
|
|
407
|
+
return False
|
|
408
|
+
try:
|
|
409
|
+
return bool(self._client.ping())
|
|
410
|
+
except Exception: # noqa: BLE001
|
|
411
|
+
# INTENTIONAL: Health check should not raise, just return False
|
|
412
|
+
return False
|
|
413
|
+
|
|
414
|
+
def get_stats(self) -> dict:
|
|
415
|
+
"""Get memory statistics.
|
|
416
|
+
|
|
417
|
+
Returns:
|
|
418
|
+
Dict with memory stats including mode, key counts by prefix
|
|
419
|
+
"""
|
|
420
|
+
if self.use_mock:
|
|
421
|
+
# Use generator expressions for memory-efficient counting
|
|
422
|
+
return {
|
|
423
|
+
"mode": "mock",
|
|
424
|
+
"total_keys": len(self._mock_storage),
|
|
425
|
+
"working_keys": sum(
|
|
426
|
+
1 for k in self._mock_storage if k.startswith(self.PREFIX_WORKING)
|
|
427
|
+
),
|
|
428
|
+
"staged_keys": sum(
|
|
429
|
+
1 for k in self._mock_storage if k.startswith(self.PREFIX_STAGED)
|
|
430
|
+
),
|
|
431
|
+
"conflict_keys": sum(
|
|
432
|
+
1 for k in self._mock_storage if k.startswith(self.PREFIX_CONFLICT)
|
|
433
|
+
),
|
|
434
|
+
}
|
|
435
|
+
|
|
436
|
+
if self._client is None:
|
|
437
|
+
return {"mode": "disconnected", "error": "No Redis client"}
|
|
438
|
+
|
|
439
|
+
info = self._client.info("memory")
|
|
440
|
+
return {
|
|
441
|
+
"mode": "redis",
|
|
442
|
+
"used_memory": info.get("used_memory_human"),
|
|
443
|
+
"peak_memory": info.get("used_memory_peak_human"),
|
|
444
|
+
"total_keys": self._client.dbsize(),
|
|
445
|
+
"working_keys": len(self._keys(f"{self.PREFIX_WORKING}*")),
|
|
446
|
+
"staged_keys": len(self._keys(f"{self.PREFIX_STAGED}*")),
|
|
447
|
+
"conflict_keys": len(self._keys(f"{self.PREFIX_CONFLICT}*")),
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
def get_metrics(self) -> dict:
|
|
451
|
+
"""Get operation metrics for observability.
|
|
452
|
+
|
|
453
|
+
Returns:
|
|
454
|
+
Dict with operation counts, latencies, and success rates
|
|
455
|
+
"""
|
|
456
|
+
return self._metrics.to_dict()
|
|
457
|
+
|
|
458
|
+
def reset_metrics(self) -> None:
|
|
459
|
+
"""Reset all metrics to zero."""
|
|
460
|
+
self._metrics = RedisMetrics()
|
|
461
|
+
|
|
462
|
+
def close(self) -> None:
|
|
463
|
+
"""Close Redis connection and cleanup resources."""
|
|
464
|
+
if self._client:
|
|
465
|
+
self._client.close()
|
|
466
|
+
self._client = None
|
|
467
|
+
logger.info("redis_connection_closed")
|
|
@@ -0,0 +1,219 @@
|
|
|
1
|
+
"""Batch operations for efficient bulk processing.
|
|
2
|
+
|
|
3
|
+
This module provides efficient batch operations using Redis pipelines:
|
|
4
|
+
- Batch stash: Store multiple items in single round-trip
|
|
5
|
+
- Batch retrieve: Get multiple items in single round-trip
|
|
6
|
+
|
|
7
|
+
Benefits:
|
|
8
|
+
- Reduces network round-trips
|
|
9
|
+
- Atomic execution (all or nothing)
|
|
10
|
+
- Better throughput for bulk operations
|
|
11
|
+
|
|
12
|
+
Classes:
|
|
13
|
+
BatchOperations: Bulk stash/retrieve with Redis pipelines
|
|
14
|
+
|
|
15
|
+
Example:
|
|
16
|
+
>>> from attune.memory.short_term.batch import BatchOperations
|
|
17
|
+
>>> from attune.memory.types import AgentCredentials, AccessTier
|
|
18
|
+
>>> batch_ops = BatchOperations(base_ops)
|
|
19
|
+
>>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
|
|
20
|
+
>>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
|
|
21
|
+
>>> count = batch_ops.stash_batch(items, creds)
|
|
22
|
+
>>> data = batch_ops.retrieve_batch(["key1", "key2"], creds)
|
|
23
|
+
|
|
24
|
+
Copyright 2025 Smart-AI-Memory
|
|
25
|
+
Licensed under Fair Source License 0.9
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
from __future__ import annotations
|
|
29
|
+
|
|
30
|
+
import json
|
|
31
|
+
import time
|
|
32
|
+
from datetime import datetime
|
|
33
|
+
from typing import TYPE_CHECKING, Any
|
|
34
|
+
|
|
35
|
+
import structlog
|
|
36
|
+
|
|
37
|
+
from attune.memory.types import (
|
|
38
|
+
AgentCredentials,
|
|
39
|
+
TTLStrategy,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
if TYPE_CHECKING:
|
|
43
|
+
from attune.memory.short_term.base import BaseOperations
|
|
44
|
+
|
|
45
|
+
logger = structlog.get_logger(__name__)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class BatchOperations:
|
|
49
|
+
"""Batch operations using Redis pipelines.
|
|
50
|
+
|
|
51
|
+
Provides efficient bulk stash/retrieve operations that reduce
|
|
52
|
+
network round-trips by batching multiple operations into a
|
|
53
|
+
single Redis pipeline execution.
|
|
54
|
+
|
|
55
|
+
The class is designed to be composed with BaseOperations
|
|
56
|
+
for dependency injection and access to Redis client.
|
|
57
|
+
|
|
58
|
+
Attributes:
|
|
59
|
+
PREFIX_WORKING: Key prefix for working memory namespace
|
|
60
|
+
|
|
61
|
+
Example:
|
|
62
|
+
>>> batch_ops = BatchOperations(base_ops)
|
|
63
|
+
>>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
|
|
64
|
+
>>> items = [("analysis", {"score": 95}), ("summary", {"text": "..."})]
|
|
65
|
+
>>> count = batch_ops.stash_batch(items, creds)
|
|
66
|
+
2
|
|
67
|
+
>>> batch_ops.retrieve_batch(["analysis", "summary"], creds)
|
|
68
|
+
{'analysis': {'score': 95}, 'summary': {'text': '...'}}
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
PREFIX_WORKING = "empathy:working:"
|
|
72
|
+
|
|
73
|
+
def __init__(self, base: BaseOperations) -> None:
|
|
74
|
+
"""Initialize batch operations.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
base: BaseOperations instance for storage access
|
|
78
|
+
"""
|
|
79
|
+
self._base = base
|
|
80
|
+
|
|
81
|
+
def stash_batch(
|
|
82
|
+
self,
|
|
83
|
+
items: list[tuple[str, Any]],
|
|
84
|
+
credentials: AgentCredentials,
|
|
85
|
+
ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
|
|
86
|
+
) -> int:
|
|
87
|
+
"""Stash multiple items in a single operation.
|
|
88
|
+
|
|
89
|
+
Uses Redis pipeline for efficiency (reduces network round-trips).
|
|
90
|
+
|
|
91
|
+
Args:
|
|
92
|
+
items: List of (key, data) tuples
|
|
93
|
+
credentials: Agent credentials
|
|
94
|
+
ttl: Time-to-live strategy (applied to all items)
|
|
95
|
+
|
|
96
|
+
Returns:
|
|
97
|
+
Number of items successfully stashed
|
|
98
|
+
|
|
99
|
+
Raises:
|
|
100
|
+
TypeError: If items is not a list
|
|
101
|
+
PermissionError: If credentials lack write access
|
|
102
|
+
|
|
103
|
+
Example:
|
|
104
|
+
>>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
|
|
105
|
+
>>> count = batch_ops.stash_batch(items, creds)
|
|
106
|
+
2
|
|
107
|
+
"""
|
|
108
|
+
# Pattern 5: Type validation
|
|
109
|
+
if not isinstance(items, list):
|
|
110
|
+
raise TypeError(f"items must be list, got {type(items).__name__}")
|
|
111
|
+
|
|
112
|
+
if not credentials.can_stage():
|
|
113
|
+
raise PermissionError(
|
|
114
|
+
f"Agent {credentials.agent_id} cannot write to memory. "
|
|
115
|
+
"Requires CONTRIBUTOR tier or higher.",
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if not items:
|
|
119
|
+
return 0
|
|
120
|
+
|
|
121
|
+
start_time = time.perf_counter()
|
|
122
|
+
|
|
123
|
+
# Handle mock storage mode
|
|
124
|
+
if self._base.use_mock:
|
|
125
|
+
count = 0
|
|
126
|
+
for key, data in items:
|
|
127
|
+
full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
|
|
128
|
+
payload = {
|
|
129
|
+
"data": data,
|
|
130
|
+
"agent_id": credentials.agent_id,
|
|
131
|
+
"stashed_at": datetime.now().isoformat(),
|
|
132
|
+
}
|
|
133
|
+
expires = datetime.now().timestamp() + ttl.value
|
|
134
|
+
self._base._mock_storage[full_key] = (json.dumps(payload), expires)
|
|
135
|
+
count += 1
|
|
136
|
+
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
137
|
+
self._base._metrics.record_operation("stash_batch", latency_ms)
|
|
138
|
+
return count
|
|
139
|
+
|
|
140
|
+
# Handle real Redis client
|
|
141
|
+
if self._base._client is None:
|
|
142
|
+
return 0
|
|
143
|
+
|
|
144
|
+
pipe = self._base._client.pipeline()
|
|
145
|
+
for key, data in items:
|
|
146
|
+
full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
|
|
147
|
+
payload = {
|
|
148
|
+
"data": data,
|
|
149
|
+
"agent_id": credentials.agent_id,
|
|
150
|
+
"stashed_at": datetime.now().isoformat(),
|
|
151
|
+
}
|
|
152
|
+
pipe.setex(full_key, ttl.value, json.dumps(payload))
|
|
153
|
+
|
|
154
|
+
results = pipe.execute()
|
|
155
|
+
count = sum(1 for r in results if r)
|
|
156
|
+
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
157
|
+
self._base._metrics.record_operation("stash_batch", latency_ms)
|
|
158
|
+
|
|
159
|
+
logger.info("batch_stash_complete", count=count, total=len(items))
|
|
160
|
+
return count
|
|
161
|
+
|
|
162
|
+
def retrieve_batch(
|
|
163
|
+
self,
|
|
164
|
+
keys: list[str],
|
|
165
|
+
credentials: AgentCredentials,
|
|
166
|
+
agent_id: str | None = None,
|
|
167
|
+
) -> dict[str, Any]:
|
|
168
|
+
"""Retrieve multiple items in a single operation.
|
|
169
|
+
|
|
170
|
+
Uses Redis MGET for efficiency (single round-trip for all keys).
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
keys: List of keys to retrieve
|
|
174
|
+
credentials: Agent credentials
|
|
175
|
+
agent_id: Owner agent ID (defaults to credentials agent)
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
Dict mapping key to data (missing keys omitted)
|
|
179
|
+
|
|
180
|
+
Example:
|
|
181
|
+
>>> data = batch_ops.retrieve_batch(["key1", "key2"], creds)
|
|
182
|
+
>>> print(data["key1"])
|
|
183
|
+
{'a': 1}
|
|
184
|
+
"""
|
|
185
|
+
if not keys:
|
|
186
|
+
return {}
|
|
187
|
+
|
|
188
|
+
start_time = time.perf_counter()
|
|
189
|
+
owner = agent_id or credentials.agent_id
|
|
190
|
+
results: dict[str, Any] = {}
|
|
191
|
+
|
|
192
|
+
# Handle mock storage mode
|
|
193
|
+
if self._base.use_mock:
|
|
194
|
+
for key in keys:
|
|
195
|
+
full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
|
|
196
|
+
if full_key in self._base._mock_storage:
|
|
197
|
+
value, expires = self._base._mock_storage[full_key]
|
|
198
|
+
if expires is None or datetime.now().timestamp() < expires:
|
|
199
|
+
payload = json.loads(str(value))
|
|
200
|
+
results[key] = payload.get("data")
|
|
201
|
+
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
202
|
+
self._base._metrics.record_operation("retrieve_batch", latency_ms)
|
|
203
|
+
return results
|
|
204
|
+
|
|
205
|
+
# Handle real Redis client
|
|
206
|
+
if self._base._client is None:
|
|
207
|
+
return {}
|
|
208
|
+
|
|
209
|
+
full_keys = [f"{self.PREFIX_WORKING}{owner}:{key}" for key in keys]
|
|
210
|
+
values = self._base._client.mget(full_keys)
|
|
211
|
+
|
|
212
|
+
for key, value in zip(keys, values, strict=False):
|
|
213
|
+
if value:
|
|
214
|
+
payload = json.loads(str(value))
|
|
215
|
+
results[key] = payload.get("data")
|
|
216
|
+
|
|
217
|
+
latency_ms = (time.perf_counter() - start_time) * 1000
|
|
218
|
+
self._base._metrics.record_operation("retrieve_batch", latency_ms)
|
|
219
|
+
return results
|