attune-ai 2.1.5__py3-none-any.whl → 2.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. attune/cli/__init__.py +3 -59
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +8 -16
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +9 -3
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/dashboard/app.py +3 -1
  20. attune/dashboard/simple_server.py +3 -1
  21. attune/dashboard/standalone_server.py +7 -3
  22. attune/mcp/server.py +54 -102
  23. attune/memory/long_term.py +0 -2
  24. attune/memory/short_term/__init__.py +84 -0
  25. attune/memory/short_term/base.py +465 -0
  26. attune/memory/short_term/batch.py +219 -0
  27. attune/memory/short_term/caching.py +227 -0
  28. attune/memory/short_term/conflicts.py +265 -0
  29. attune/memory/short_term/cross_session.py +122 -0
  30. attune/memory/short_term/facade.py +653 -0
  31. attune/memory/short_term/pagination.py +207 -0
  32. attune/memory/short_term/patterns.py +271 -0
  33. attune/memory/short_term/pubsub.py +286 -0
  34. attune/memory/short_term/queues.py +244 -0
  35. attune/memory/short_term/security.py +300 -0
  36. attune/memory/short_term/sessions.py +250 -0
  37. attune/memory/short_term/streams.py +242 -0
  38. attune/memory/short_term/timelines.py +234 -0
  39. attune/memory/short_term/transactions.py +184 -0
  40. attune/memory/short_term/working.py +252 -0
  41. attune/meta_workflows/cli_commands/__init__.py +3 -0
  42. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  43. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  44. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  45. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  48. attune/meta_workflows/plan_generator.py +2 -4
  49. attune/models/adaptive_routing.py +4 -8
  50. attune/models/auth_cli.py +3 -9
  51. attune/models/auth_strategy.py +2 -4
  52. attune/models/telemetry/analytics.py +0 -2
  53. attune/models/telemetry/backend.py +0 -3
  54. attune/models/telemetry/storage.py +0 -2
  55. attune/monitoring/alerts.py +6 -10
  56. attune/orchestration/_strategies/__init__.py +156 -0
  57. attune/orchestration/_strategies/base.py +227 -0
  58. attune/orchestration/_strategies/conditional_strategies.py +365 -0
  59. attune/orchestration/_strategies/conditions.py +369 -0
  60. attune/orchestration/_strategies/core_strategies.py +479 -0
  61. attune/orchestration/_strategies/data_classes.py +64 -0
  62. attune/orchestration/_strategies/nesting.py +233 -0
  63. attune/orchestration/execution_strategies.py +58 -1567
  64. attune/orchestration/meta_orchestrator.py +1 -3
  65. attune/project_index/scanner.py +1 -3
  66. attune/project_index/scanner_parallel.py +7 -5
  67. attune/socratic/storage.py +2 -4
  68. attune/socratic_router.py +1 -3
  69. attune/telemetry/agent_coordination.py +9 -3
  70. attune/telemetry/agent_tracking.py +16 -3
  71. attune/telemetry/approval_gates.py +22 -5
  72. attune/telemetry/cli.py +1 -3
  73. attune/telemetry/commands/dashboard_commands.py +24 -8
  74. attune/telemetry/event_streaming.py +8 -2
  75. attune/telemetry/feedback_loop.py +10 -2
  76. attune/tools.py +2 -1
  77. attune/workflow_commands.py +1 -3
  78. attune/workflow_patterns/structural.py +4 -8
  79. attune/workflows/__init__.py +54 -10
  80. attune/workflows/autonomous_test_gen.py +158 -102
  81. attune/workflows/base.py +48 -672
  82. attune/workflows/batch_processing.py +1 -3
  83. attune/workflows/compat.py +156 -0
  84. attune/workflows/cost_mixin.py +141 -0
  85. attune/workflows/data_classes.py +92 -0
  86. attune/workflows/document_gen/workflow.py +11 -14
  87. attune/workflows/history.py +16 -9
  88. attune/workflows/llm_base.py +1 -3
  89. attune/workflows/migration.py +432 -0
  90. attune/workflows/output.py +2 -7
  91. attune/workflows/parsing_mixin.py +427 -0
  92. attune/workflows/perf_audit.py +3 -1
  93. attune/workflows/progress.py +9 -11
  94. attune/workflows/release_prep.py +5 -1
  95. attune/workflows/routing.py +0 -2
  96. attune/workflows/secure_release.py +4 -1
  97. attune/workflows/security_audit.py +20 -14
  98. attune/workflows/security_audit_phase3.py +28 -22
  99. attune/workflows/seo_optimization.py +27 -27
  100. attune/workflows/test_gen/test_templates.py +1 -4
  101. attune/workflows/test_gen/workflow.py +0 -2
  102. attune/workflows/test_gen_behavioral.py +6 -19
  103. attune/workflows/test_gen_parallel.py +8 -6
  104. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/METADATA +4 -3
  105. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/RECORD +121 -96
  106. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/entry_points.txt +0 -2
  107. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  108. attune_llm/agent_factory/__init__.py +6 -6
  109. attune_llm/agent_factory/adapters/haystack_adapter.py +1 -4
  110. attune_llm/commands/__init__.py +10 -10
  111. attune_llm/commands/models.py +3 -3
  112. attune_llm/config/__init__.py +8 -8
  113. attune_llm/learning/__init__.py +3 -3
  114. attune_llm/learning/extractor.py +5 -3
  115. attune_llm/learning/storage.py +5 -3
  116. attune_llm/security/__init__.py +17 -17
  117. attune_llm/utils/tokens.py +3 -1
  118. attune/cli_legacy.py +0 -3978
  119. attune/memory/short_term.py +0 -2192
  120. attune/workflows/manage_docs.py +0 -87
  121. attune/workflows/test5.py +0 -125
  122. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/WHEEL +0 -0
  123. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/licenses/LICENSE +0 -0
  124. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  125. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,465 @@
1
+ """Core CRUD operations and connection management for short-term memory.
2
+
3
+ This module provides the foundational Redis operations:
4
+ - Connection management with retry logic
5
+ - Basic get/set/delete/keys operations
6
+ - Health check (ping) and statistics
7
+ - Cleanup and lifecycle management
8
+
9
+ The BaseOperations class is designed to be composed into the main
10
+ RedisShortTermMemory facade, providing backward compatibility while
11
+ enabling modular testing and maintenance.
12
+
13
+ Target Methods (extracted from original RedisShortTermMemory):
14
+ - __init__ (initialization logic)
15
+ - client property
16
+ - _create_client_with_retry
17
+ - _execute_with_retry
18
+ - _get
19
+ - _set
20
+ - _delete
21
+ - _keys
22
+ - ping
23
+ - get_stats
24
+ - close
25
+
26
+ Dependencies:
27
+ - RedisConfig for configuration
28
+ - RedisMetrics for operation tracking
29
+ - structlog for logging
30
+
31
+ Copyright 2025 Smart-AI-Memory
32
+ Licensed under Fair Source License 0.9
33
+ """
34
+
35
+ from __future__ import annotations
36
+
37
+ import os
38
+ import time
39
+ from collections.abc import Callable
40
+ from datetime import datetime
41
+ from typing import TYPE_CHECKING, Any
42
+
43
+ import structlog
44
+
45
+ from attune.memory.types import RedisConfig, RedisMetrics
46
+
47
+ if TYPE_CHECKING:
48
+ pass
49
+
50
+ logger = structlog.get_logger(__name__)
51
+
52
+ # Redis availability check
53
+ try:
54
+ import redis
55
+ from redis.exceptions import ConnectionError as RedisConnectionError
56
+ from redis.exceptions import TimeoutError as RedisTimeoutError
57
+
58
+ REDIS_AVAILABLE = True
59
+ except ImportError:
60
+ REDIS_AVAILABLE = False
61
+ redis = None # type: ignore
62
+ RedisConnectionError = Exception # type: ignore
63
+ RedisTimeoutError = Exception # type: ignore
64
+
65
+
66
+ class BaseOperations:
67
+ """Core CRUD operations and connection management.
68
+
69
+ This class provides the foundational Redis operations that other
70
+ modules build upon. It handles:
71
+
72
+ - Connection creation with exponential backoff retry
73
+ - Basic get/set/delete/keys operations
74
+ - Health checks and statistics
75
+ - Resource cleanup
76
+
77
+ Example:
78
+ >>> from attune.memory.short_term.base import BaseOperations
79
+ >>> from attune.memory.types import RedisConfig
80
+ >>> config = RedisConfig(use_mock=True)
81
+ >>> base = BaseOperations(config=config)
82
+ >>> base._set("key", "value")
83
+ True
84
+ >>> base._get("key")
85
+ 'value'
86
+
87
+ Attributes:
88
+ use_mock: Whether using mock storage instead of Redis
89
+ _config: Redis configuration
90
+ _metrics: Operation metrics tracker
91
+ _client: Redis client instance (None if mock)
92
+ _mock_storage: In-memory storage for mock mode
93
+ """
94
+
95
+ # Key prefixes for namespacing (shared across all operations)
96
+ PREFIX_WORKING = "empathy:working:"
97
+ PREFIX_STAGED = "empathy:staged:"
98
+ PREFIX_CONFLICT = "empathy:conflict:"
99
+ PREFIX_SESSION = "empathy:session:"
100
+ PREFIX_PUBSUB = "empathy:pubsub:"
101
+ PREFIX_STREAM = "empathy:stream:"
102
+ PREFIX_TIMELINE = "empathy:timeline:"
103
+ PREFIX_QUEUE = "empathy:queue:"
104
+
105
+ def __init__(
106
+ self,
107
+ host: str = "localhost",
108
+ port: int = 6379,
109
+ db: int = 0,
110
+ password: str | None = None,
111
+ use_mock: bool = False,
112
+ config: RedisConfig | None = None,
113
+ ) -> None:
114
+ """Initialize Redis connection and core components.
115
+
116
+ Args:
117
+ host: Redis host
118
+ port: Redis port
119
+ db: Redis database number
120
+ password: Redis password (optional)
121
+ use_mock: Use in-memory mock for testing
122
+ config: Full RedisConfig for advanced settings (overrides other args)
123
+ """
124
+ # Use config if provided, otherwise build from individual args
125
+ if config is not None:
126
+ self._config = config
127
+ else:
128
+ # Check environment variable for Redis enablement (default: disabled)
129
+ redis_enabled = os.getenv("REDIS_ENABLED", "false").lower() in (
130
+ "true",
131
+ "1",
132
+ "yes",
133
+ )
134
+
135
+ # Use environment variables for configuration if available
136
+ env_host = os.getenv("REDIS_HOST", host)
137
+ env_port = int(os.getenv("REDIS_PORT", str(port)))
138
+ env_db = int(os.getenv("REDIS_DB", str(db)))
139
+ env_password = os.getenv("REDIS_PASSWORD", password)
140
+
141
+ # If Redis is not enabled via env var, force mock mode
142
+ if not redis_enabled and not use_mock:
143
+ use_mock = True
144
+ logger.info(
145
+ "redis_disabled_via_env",
146
+ message="Redis not enabled in environment, using mock mode",
147
+ )
148
+
149
+ self._config = RedisConfig(
150
+ host=env_host,
151
+ port=env_port,
152
+ db=env_db,
153
+ password=env_password if env_password else None,
154
+ use_mock=use_mock,
155
+ )
156
+
157
+ self.use_mock = self._config.use_mock or not REDIS_AVAILABLE
158
+
159
+ # Initialize metrics
160
+ self._metrics = RedisMetrics()
161
+
162
+ # Mock storage for testing
163
+ self._mock_storage: dict[str, tuple[Any, float | None]] = {}
164
+ self._mock_lists: dict[str, list[str]] = {}
165
+ self._mock_sorted_sets: dict[str, list[tuple[float, str]]] = {}
166
+ self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
167
+
168
+ # Create client
169
+ if self.use_mock:
170
+ self._client = None
171
+ else:
172
+ self._client = self._create_client_with_retry()
173
+
174
+ @property
175
+ def client(self) -> Any:
176
+ """Get the Redis client instance.
177
+
178
+ Returns:
179
+ Redis client instance or None if using mock mode
180
+
181
+ Example:
182
+ >>> memory = BaseOperations(use_mock=True)
183
+ >>> memory.client is None
184
+ True
185
+ """
186
+ return self._client
187
+
188
+ @property
189
+ def metrics(self) -> RedisMetrics:
190
+ """Get Redis metrics instance.
191
+
192
+ Returns:
193
+ RedisMetrics instance with connection and operation statistics
194
+
195
+ Example:
196
+ >>> base = BaseOperations(use_mock=True)
197
+ >>> base.metrics.retries_total
198
+ 0
199
+ """
200
+ return self._metrics
201
+
202
+ def _create_client_with_retry(self) -> Any:
203
+ """Create Redis client with exponential backoff retry.
204
+
205
+ Returns:
206
+ Connected Redis client
207
+
208
+ Raises:
209
+ ConnectionError: If all retry attempts fail
210
+ """
211
+ max_attempts = self._config.retry_max_attempts
212
+ base_delay = self._config.retry_base_delay
213
+ max_delay = self._config.retry_max_delay
214
+
215
+ last_error: Exception | None = None
216
+
217
+ for attempt in range(max_attempts):
218
+ try:
219
+ client = redis.Redis(**self._config.to_redis_kwargs())
220
+ # Test connection
221
+ client.ping()
222
+ logger.info(
223
+ "redis_connected",
224
+ host=self._config.host,
225
+ port=self._config.port,
226
+ attempt=attempt + 1,
227
+ )
228
+ return client
229
+ except (RedisConnectionError, RedisTimeoutError) as e:
230
+ last_error = e
231
+ self._metrics.retries_total += 1
232
+
233
+ if attempt < max_attempts - 1:
234
+ delay = min(base_delay * (2**attempt), max_delay)
235
+ logger.warning(
236
+ "redis_connection_retry",
237
+ attempt=attempt + 1,
238
+ max_attempts=max_attempts,
239
+ delay=delay,
240
+ error=str(e),
241
+ )
242
+ time.sleep(delay)
243
+
244
+ # All retries failed
245
+ logger.error(
246
+ "redis_connection_failed",
247
+ max_attempts=max_attempts,
248
+ error=str(last_error),
249
+ )
250
+ raise last_error if last_error else ConnectionError("Failed to connect to Redis")
251
+
252
+ def _execute_with_retry(self, operation: Callable[[], Any], op_name: str = "operation") -> Any:
253
+ """Execute a Redis operation with retry logic.
254
+
255
+ Args:
256
+ operation: Callable that performs the Redis operation
257
+ op_name: Name of operation for logging/metrics
258
+
259
+ Returns:
260
+ Result of the operation
261
+
262
+ Raises:
263
+ ConnectionError: If all retry attempts fail
264
+ """
265
+ start_time = time.perf_counter()
266
+ max_attempts = self._config.retry_max_attempts
267
+ base_delay = self._config.retry_base_delay
268
+ max_delay = self._config.retry_max_delay
269
+
270
+ last_error: Exception | None = None
271
+
272
+ for attempt in range(max_attempts):
273
+ try:
274
+ result = operation()
275
+ latency_ms = (time.perf_counter() - start_time) * 1000
276
+ self._metrics.record_operation(op_name, latency_ms, success=True)
277
+ return result
278
+ except (RedisConnectionError, RedisTimeoutError) as e:
279
+ last_error = e
280
+ self._metrics.retries_total += 1
281
+
282
+ if attempt < max_attempts - 1:
283
+ delay = min(base_delay * (2**attempt), max_delay)
284
+ logger.warning(
285
+ "redis_operation_retry",
286
+ operation=op_name,
287
+ attempt=attempt + 1,
288
+ delay=delay,
289
+ )
290
+ time.sleep(delay)
291
+
292
+ latency_ms = (time.perf_counter() - start_time) * 1000
293
+ self._metrics.record_operation(op_name, latency_ms, success=False)
294
+ raise last_error if last_error else ConnectionError("Redis operation failed")
295
+
296
+ def _get(self, key: str) -> str | None:
297
+ """Get value from Redis or mock storage.
298
+
299
+ Args:
300
+ key: Key to retrieve
301
+
302
+ Returns:
303
+ Value as string, or None if not found
304
+ """
305
+ # Mock mode path
306
+ if self.use_mock:
307
+ if key in self._mock_storage:
308
+ value, expires = self._mock_storage[key]
309
+ if expires is None or datetime.now().timestamp() < expires:
310
+ return str(value) if value is not None else None
311
+ del self._mock_storage[key]
312
+ return None
313
+
314
+ # Real Redis path
315
+ if self._client is None:
316
+ return None
317
+
318
+ result = self._client.get(key)
319
+ return str(result) if result else None
320
+
321
+ def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
322
+ """Set value in Redis or mock storage.
323
+
324
+ Args:
325
+ key: Key to set
326
+ value: Value to store
327
+ ttl: Time-to-live in seconds (optional)
328
+
329
+ Returns:
330
+ True if successful
331
+ """
332
+ # Mock mode path
333
+ if self.use_mock:
334
+ expires = datetime.now().timestamp() + ttl if ttl else None
335
+ self._mock_storage[key] = (value, expires)
336
+ return True
337
+
338
+ # Real Redis path
339
+ if self._client is None:
340
+ return False
341
+
342
+ # Set in Redis
343
+ if ttl:
344
+ self._client.setex(key, ttl, value)
345
+ else:
346
+ result = self._client.set(key, value)
347
+ if not result:
348
+ return False
349
+
350
+ return True
351
+
352
+ def _delete(self, key: str) -> bool:
353
+ """Delete key from Redis or mock storage.
354
+
355
+ Args:
356
+ key: Key to delete
357
+
358
+ Returns:
359
+ True if key was deleted
360
+ """
361
+ # Mock mode path
362
+ if self.use_mock:
363
+ if key in self._mock_storage:
364
+ del self._mock_storage[key]
365
+ return True
366
+ return False
367
+
368
+ # Real Redis path
369
+ if self._client is None:
370
+ return False
371
+
372
+ return bool(self._client.delete(key) > 0)
373
+
374
+ def _keys(self, pattern: str) -> list[str]:
375
+ """Get keys matching pattern.
376
+
377
+ Args:
378
+ pattern: Glob-style pattern to match
379
+
380
+ Returns:
381
+ List of matching keys
382
+ """
383
+ if self.use_mock:
384
+ import fnmatch
385
+
386
+ # Use list comp for small result sets (typical <1000 keys)
387
+ return [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
388
+
389
+ if self._client is None:
390
+ return []
391
+
392
+ keys = self._client.keys(pattern)
393
+ # Convert bytes to strings - needed for API return type
394
+ return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
395
+
396
+ def ping(self) -> bool:
397
+ """Check Redis connection health.
398
+
399
+ Returns:
400
+ True if connected and responsive
401
+ """
402
+ if self.use_mock:
403
+ return True
404
+ if self._client is None:
405
+ return False
406
+ try:
407
+ return bool(self._client.ping())
408
+ except Exception: # noqa: BLE001
409
+ # INTENTIONAL: Health check should not raise, just return False
410
+ return False
411
+
412
+ def get_stats(self) -> dict:
413
+ """Get memory statistics.
414
+
415
+ Returns:
416
+ Dict with memory stats including mode, key counts by prefix
417
+ """
418
+ if self.use_mock:
419
+ # Use generator expressions for memory-efficient counting
420
+ return {
421
+ "mode": "mock",
422
+ "total_keys": len(self._mock_storage),
423
+ "working_keys": sum(
424
+ 1 for k in self._mock_storage if k.startswith(self.PREFIX_WORKING)
425
+ ),
426
+ "staged_keys": sum(
427
+ 1 for k in self._mock_storage if k.startswith(self.PREFIX_STAGED)
428
+ ),
429
+ "conflict_keys": sum(
430
+ 1 for k in self._mock_storage if k.startswith(self.PREFIX_CONFLICT)
431
+ ),
432
+ }
433
+
434
+ if self._client is None:
435
+ return {"mode": "disconnected", "error": "No Redis client"}
436
+
437
+ info = self._client.info("memory")
438
+ return {
439
+ "mode": "redis",
440
+ "used_memory": info.get("used_memory_human"),
441
+ "peak_memory": info.get("used_memory_peak_human"),
442
+ "total_keys": self._client.dbsize(),
443
+ "working_keys": len(self._keys(f"{self.PREFIX_WORKING}*")),
444
+ "staged_keys": len(self._keys(f"{self.PREFIX_STAGED}*")),
445
+ "conflict_keys": len(self._keys(f"{self.PREFIX_CONFLICT}*")),
446
+ }
447
+
448
+ def get_metrics(self) -> dict:
449
+ """Get operation metrics for observability.
450
+
451
+ Returns:
452
+ Dict with operation counts, latencies, and success rates
453
+ """
454
+ return self._metrics.to_dict()
455
+
456
+ def reset_metrics(self) -> None:
457
+ """Reset all metrics to zero."""
458
+ self._metrics = RedisMetrics()
459
+
460
+ def close(self) -> None:
461
+ """Close Redis connection and cleanup resources."""
462
+ if self._client:
463
+ self._client.close()
464
+ self._client = None
465
+ logger.info("redis_connection_closed")
@@ -0,0 +1,219 @@
1
+ """Batch operations for efficient bulk processing.
2
+
3
+ This module provides efficient batch operations using Redis pipelines:
4
+ - Batch stash: Store multiple items in single round-trip
5
+ - Batch retrieve: Get multiple items in single round-trip
6
+
7
+ Benefits:
8
+ - Reduces network round-trips
9
+ - Atomic execution (all or nothing)
10
+ - Better throughput for bulk operations
11
+
12
+ Classes:
13
+ BatchOperations: Bulk stash/retrieve with Redis pipelines
14
+
15
+ Example:
16
+ >>> from attune.memory.short_term.batch import BatchOperations
17
+ >>> from attune.memory.types import AgentCredentials, AccessTier
18
+ >>> batch_ops = BatchOperations(base_ops)
19
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
20
+ >>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
21
+ >>> count = batch_ops.stash_batch(items, creds)
22
+ >>> data = batch_ops.retrieve_batch(["key1", "key2"], creds)
23
+
24
+ Copyright 2025 Smart-AI-Memory
25
+ Licensed under Fair Source License 0.9
26
+ """
27
+
28
+ from __future__ import annotations
29
+
30
+ import json
31
+ import time
32
+ from datetime import datetime
33
+ from typing import TYPE_CHECKING, Any
34
+
35
+ import structlog
36
+
37
+ from attune.memory.types import (
38
+ AgentCredentials,
39
+ TTLStrategy,
40
+ )
41
+
42
+ if TYPE_CHECKING:
43
+ from attune.memory.short_term.base import BaseOperations
44
+
45
+ logger = structlog.get_logger(__name__)
46
+
47
+
48
+ class BatchOperations:
49
+ """Batch operations using Redis pipelines.
50
+
51
+ Provides efficient bulk stash/retrieve operations that reduce
52
+ network round-trips by batching multiple operations into a
53
+ single Redis pipeline execution.
54
+
55
+ The class is designed to be composed with BaseOperations
56
+ for dependency injection and access to Redis client.
57
+
58
+ Attributes:
59
+ PREFIX_WORKING: Key prefix for working memory namespace
60
+
61
+ Example:
62
+ >>> batch_ops = BatchOperations(base_ops)
63
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
64
+ >>> items = [("analysis", {"score": 95}), ("summary", {"text": "..."})]
65
+ >>> count = batch_ops.stash_batch(items, creds)
66
+ 2
67
+ >>> batch_ops.retrieve_batch(["analysis", "summary"], creds)
68
+ {'analysis': {'score': 95}, 'summary': {'text': '...'}}
69
+ """
70
+
71
+ PREFIX_WORKING = "empathy:working:"
72
+
73
+ def __init__(self, base: BaseOperations) -> None:
74
+ """Initialize batch operations.
75
+
76
+ Args:
77
+ base: BaseOperations instance for storage access
78
+ """
79
+ self._base = base
80
+
81
+ def stash_batch(
82
+ self,
83
+ items: list[tuple[str, Any]],
84
+ credentials: AgentCredentials,
85
+ ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
86
+ ) -> int:
87
+ """Stash multiple items in a single operation.
88
+
89
+ Uses Redis pipeline for efficiency (reduces network round-trips).
90
+
91
+ Args:
92
+ items: List of (key, data) tuples
93
+ credentials: Agent credentials
94
+ ttl: Time-to-live strategy (applied to all items)
95
+
96
+ Returns:
97
+ Number of items successfully stashed
98
+
99
+ Raises:
100
+ TypeError: If items is not a list
101
+ PermissionError: If credentials lack write access
102
+
103
+ Example:
104
+ >>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
105
+ >>> count = batch_ops.stash_batch(items, creds)
106
+ 2
107
+ """
108
+ # Pattern 5: Type validation
109
+ if not isinstance(items, list):
110
+ raise TypeError(f"items must be list, got {type(items).__name__}")
111
+
112
+ if not credentials.can_stage():
113
+ raise PermissionError(
114
+ f"Agent {credentials.agent_id} cannot write to memory. "
115
+ "Requires CONTRIBUTOR tier or higher.",
116
+ )
117
+
118
+ if not items:
119
+ return 0
120
+
121
+ start_time = time.perf_counter()
122
+
123
+ # Handle mock storage mode
124
+ if self._base.use_mock:
125
+ count = 0
126
+ for key, data in items:
127
+ full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
128
+ payload = {
129
+ "data": data,
130
+ "agent_id": credentials.agent_id,
131
+ "stashed_at": datetime.now().isoformat(),
132
+ }
133
+ expires = datetime.now().timestamp() + ttl.value
134
+ self._base._mock_storage[full_key] = (json.dumps(payload), expires)
135
+ count += 1
136
+ latency_ms = (time.perf_counter() - start_time) * 1000
137
+ self._base._metrics.record_operation("stash_batch", latency_ms)
138
+ return count
139
+
140
+ # Handle real Redis client
141
+ if self._base._client is None:
142
+ return 0
143
+
144
+ pipe = self._base._client.pipeline()
145
+ for key, data in items:
146
+ full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
147
+ payload = {
148
+ "data": data,
149
+ "agent_id": credentials.agent_id,
150
+ "stashed_at": datetime.now().isoformat(),
151
+ }
152
+ pipe.setex(full_key, ttl.value, json.dumps(payload))
153
+
154
+ results = pipe.execute()
155
+ count = sum(1 for r in results if r)
156
+ latency_ms = (time.perf_counter() - start_time) * 1000
157
+ self._base._metrics.record_operation("stash_batch", latency_ms)
158
+
159
+ logger.info("batch_stash_complete", count=count, total=len(items))
160
+ return count
161
+
162
+ def retrieve_batch(
163
+ self,
164
+ keys: list[str],
165
+ credentials: AgentCredentials,
166
+ agent_id: str | None = None,
167
+ ) -> dict[str, Any]:
168
+ """Retrieve multiple items in a single operation.
169
+
170
+ Uses Redis MGET for efficiency (single round-trip for all keys).
171
+
172
+ Args:
173
+ keys: List of keys to retrieve
174
+ credentials: Agent credentials
175
+ agent_id: Owner agent ID (defaults to credentials agent)
176
+
177
+ Returns:
178
+ Dict mapping key to data (missing keys omitted)
179
+
180
+ Example:
181
+ >>> data = batch_ops.retrieve_batch(["key1", "key2"], creds)
182
+ >>> print(data["key1"])
183
+ {'a': 1}
184
+ """
185
+ if not keys:
186
+ return {}
187
+
188
+ start_time = time.perf_counter()
189
+ owner = agent_id or credentials.agent_id
190
+ results: dict[str, Any] = {}
191
+
192
+ # Handle mock storage mode
193
+ if self._base.use_mock:
194
+ for key in keys:
195
+ full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
196
+ if full_key in self._base._mock_storage:
197
+ value, expires = self._base._mock_storage[full_key]
198
+ if expires is None or datetime.now().timestamp() < expires:
199
+ payload = json.loads(str(value))
200
+ results[key] = payload.get("data")
201
+ latency_ms = (time.perf_counter() - start_time) * 1000
202
+ self._base._metrics.record_operation("retrieve_batch", latency_ms)
203
+ return results
204
+
205
+ # Handle real Redis client
206
+ if self._base._client is None:
207
+ return {}
208
+
209
+ full_keys = [f"{self.PREFIX_WORKING}{owner}:{key}" for key in keys]
210
+ values = self._base._client.mget(full_keys)
211
+
212
+ for key, value in zip(keys, values, strict=False):
213
+ if value:
214
+ payload = json.loads(str(value))
215
+ results[key] = payload.get("data")
216
+
217
+ latency_ms = (time.perf_counter() - start_time) * 1000
218
+ self._base._metrics.record_operation("retrieve_batch", latency_ms)
219
+ return results