attune-ai 2.1.5__py3-none-any.whl → 2.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (125) hide show
  1. attune/cli/__init__.py +3 -59
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +8 -16
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +9 -3
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/dashboard/app.py +3 -1
  20. attune/dashboard/simple_server.py +3 -1
  21. attune/dashboard/standalone_server.py +7 -3
  22. attune/mcp/server.py +54 -102
  23. attune/memory/long_term.py +0 -2
  24. attune/memory/short_term/__init__.py +84 -0
  25. attune/memory/short_term/base.py +465 -0
  26. attune/memory/short_term/batch.py +219 -0
  27. attune/memory/short_term/caching.py +227 -0
  28. attune/memory/short_term/conflicts.py +265 -0
  29. attune/memory/short_term/cross_session.py +122 -0
  30. attune/memory/short_term/facade.py +653 -0
  31. attune/memory/short_term/pagination.py +207 -0
  32. attune/memory/short_term/patterns.py +271 -0
  33. attune/memory/short_term/pubsub.py +286 -0
  34. attune/memory/short_term/queues.py +244 -0
  35. attune/memory/short_term/security.py +300 -0
  36. attune/memory/short_term/sessions.py +250 -0
  37. attune/memory/short_term/streams.py +242 -0
  38. attune/memory/short_term/timelines.py +234 -0
  39. attune/memory/short_term/transactions.py +184 -0
  40. attune/memory/short_term/working.py +252 -0
  41. attune/meta_workflows/cli_commands/__init__.py +3 -0
  42. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  43. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  44. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  45. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  48. attune/meta_workflows/plan_generator.py +2 -4
  49. attune/models/adaptive_routing.py +4 -8
  50. attune/models/auth_cli.py +3 -9
  51. attune/models/auth_strategy.py +2 -4
  52. attune/models/telemetry/analytics.py +0 -2
  53. attune/models/telemetry/backend.py +0 -3
  54. attune/models/telemetry/storage.py +0 -2
  55. attune/monitoring/alerts.py +6 -10
  56. attune/orchestration/_strategies/__init__.py +156 -0
  57. attune/orchestration/_strategies/base.py +227 -0
  58. attune/orchestration/_strategies/conditional_strategies.py +365 -0
  59. attune/orchestration/_strategies/conditions.py +369 -0
  60. attune/orchestration/_strategies/core_strategies.py +479 -0
  61. attune/orchestration/_strategies/data_classes.py +64 -0
  62. attune/orchestration/_strategies/nesting.py +233 -0
  63. attune/orchestration/execution_strategies.py +58 -1567
  64. attune/orchestration/meta_orchestrator.py +1 -3
  65. attune/project_index/scanner.py +1 -3
  66. attune/project_index/scanner_parallel.py +7 -5
  67. attune/socratic/storage.py +2 -4
  68. attune/socratic_router.py +1 -3
  69. attune/telemetry/agent_coordination.py +9 -3
  70. attune/telemetry/agent_tracking.py +16 -3
  71. attune/telemetry/approval_gates.py +22 -5
  72. attune/telemetry/cli.py +1 -3
  73. attune/telemetry/commands/dashboard_commands.py +24 -8
  74. attune/telemetry/event_streaming.py +8 -2
  75. attune/telemetry/feedback_loop.py +10 -2
  76. attune/tools.py +2 -1
  77. attune/workflow_commands.py +1 -3
  78. attune/workflow_patterns/structural.py +4 -8
  79. attune/workflows/__init__.py +54 -10
  80. attune/workflows/autonomous_test_gen.py +158 -102
  81. attune/workflows/base.py +48 -672
  82. attune/workflows/batch_processing.py +1 -3
  83. attune/workflows/compat.py +156 -0
  84. attune/workflows/cost_mixin.py +141 -0
  85. attune/workflows/data_classes.py +92 -0
  86. attune/workflows/document_gen/workflow.py +11 -14
  87. attune/workflows/history.py +16 -9
  88. attune/workflows/llm_base.py +1 -3
  89. attune/workflows/migration.py +432 -0
  90. attune/workflows/output.py +2 -7
  91. attune/workflows/parsing_mixin.py +427 -0
  92. attune/workflows/perf_audit.py +3 -1
  93. attune/workflows/progress.py +9 -11
  94. attune/workflows/release_prep.py +5 -1
  95. attune/workflows/routing.py +0 -2
  96. attune/workflows/secure_release.py +4 -1
  97. attune/workflows/security_audit.py +20 -14
  98. attune/workflows/security_audit_phase3.py +28 -22
  99. attune/workflows/seo_optimization.py +27 -27
  100. attune/workflows/test_gen/test_templates.py +1 -4
  101. attune/workflows/test_gen/workflow.py +0 -2
  102. attune/workflows/test_gen_behavioral.py +6 -19
  103. attune/workflows/test_gen_parallel.py +8 -6
  104. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/METADATA +4 -3
  105. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/RECORD +121 -96
  106. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/entry_points.txt +0 -2
  107. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  108. attune_llm/agent_factory/__init__.py +6 -6
  109. attune_llm/agent_factory/adapters/haystack_adapter.py +1 -4
  110. attune_llm/commands/__init__.py +10 -10
  111. attune_llm/commands/models.py +3 -3
  112. attune_llm/config/__init__.py +8 -8
  113. attune_llm/learning/__init__.py +3 -3
  114. attune_llm/learning/extractor.py +5 -3
  115. attune_llm/learning/storage.py +5 -3
  116. attune_llm/security/__init__.py +17 -17
  117. attune_llm/utils/tokens.py +3 -1
  118. attune/cli_legacy.py +0 -3978
  119. attune/memory/short_term.py +0 -2192
  120. attune/workflows/manage_docs.py +0 -87
  121. attune/workflows/test5.py +0 -125
  122. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/WHEEL +0 -0
  123. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/licenses/LICENSE +0 -0
  124. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  125. {attune_ai-2.1.5.dist-info → attune_ai-2.2.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,653 @@
1
+ """RedisShortTermMemory facade - composes all specialized modules.
2
+
3
+ This facade provides the same public API as the original RedisShortTermMemory
4
+ class while delegating to specialized modules for implementation.
5
+
6
+ The facade pattern allows:
7
+ 1. Backward compatibility: All existing imports continue to work
8
+ 2. Incremental refactoring: Methods can be extracted one at a time
9
+ 3. Clear separation of concerns: Each module handles one responsibility
10
+
11
+ Architecture:
12
+ RedisShortTermMemory (facade)
13
+ ├── BaseOperations (base.py)
14
+ ├── CacheManager (caching.py)
15
+ ├── DataSanitizer (security.py)
16
+ ├── WorkingMemory (working.py)
17
+ ├── PatternStaging (patterns.py)
18
+ ├── ConflictNegotiation (conflicts.py)
19
+ ├── SessionManager (sessions.py)
20
+ ├── BatchOperations (batch.py)
21
+ ├── Pagination (pagination.py)
22
+ ├── PubSubManager (pubsub.py)
23
+ ├── StreamManager (streams.py)
24
+ ├── TimelineManager (timelines.py)
25
+ ├── QueueManager (queues.py)
26
+ ├── TransactionManager (transactions.py)
27
+ └── CrossSessionManager (cross_session.py)
28
+
29
+ Usage:
30
+ # During transition, use the facade:
31
+ from attune.memory.short_term import RedisShortTermMemory
32
+
33
+ # This import works the same as before the refactoring
34
+ memory = RedisShortTermMemory(config=config)
35
+ memory.stash("key", "value")
36
+ memory.retrieve("key")
37
+
38
+ Copyright 2025 Smart-AI-Memory
39
+ Licensed under Fair Source License 0.9
40
+ """
41
+
42
+ from __future__ import annotations
43
+
44
+ from collections.abc import Callable
45
+ from datetime import datetime
46
+ from typing import TYPE_CHECKING, Any
47
+
48
+ import structlog
49
+
50
+ from attune.memory.short_term.base import (
51
+ REDIS_AVAILABLE, # noqa: F401 - re-exported
52
+ BaseOperations,
53
+ )
54
+ from attune.memory.short_term.batch import BatchOperations
55
+ from attune.memory.short_term.caching import CacheManager
56
+ from attune.memory.short_term.conflicts import ConflictNegotiation
57
+ from attune.memory.short_term.cross_session import CrossSessionManager
58
+ from attune.memory.short_term.pagination import Pagination
59
+ from attune.memory.short_term.patterns import PatternStaging
60
+ from attune.memory.short_term.pubsub import PubSubManager
61
+ from attune.memory.short_term.queues import QueueManager
62
+ from attune.memory.short_term.security import DataSanitizer
63
+ from attune.memory.short_term.sessions import SessionManager
64
+ from attune.memory.short_term.streams import StreamManager
65
+ from attune.memory.short_term.timelines import TimelineManager
66
+ from attune.memory.short_term.transactions import TransactionManager
67
+ from attune.memory.short_term.working import WorkingMemory
68
+
69
+ # Module-level logger for backward compatibility
70
+ logger = structlog.get_logger(__name__)
71
+
72
+ if TYPE_CHECKING:
73
+ from attune.memory.types import (
74
+ AgentCredentials,
75
+ CollaborationSession,
76
+ ConflictContext,
77
+ PaginatedResult,
78
+ RedisConfig,
79
+ StagedPattern,
80
+ TimeWindowQuery,
81
+ TTLStrategy,
82
+ )
83
+
84
+
85
+ class RedisShortTermMemory:
86
+ """Facade composing all short-term memory operations.
87
+
88
+ This class maintains the same public API as the original
89
+ RedisShortTermMemory while delegating to specialized modules.
90
+ Each module handles a single responsibility:
91
+
92
+ - BaseOperations: Connection management, basic CRUD
93
+ - CacheManager: Local LRU cache layer
94
+ - DataSanitizer: PII scrubbing, secrets detection
95
+ - WorkingMemory: Stash/retrieve agent data
96
+ - PatternStaging: Stage/promote/reject patterns
97
+ - ConflictNegotiation: Conflict resolution workflow
98
+ - SessionManager: Collaboration sessions
99
+ - BatchOperations: Bulk stash/retrieve
100
+ - Pagination: SCAN-based key iteration
101
+ - PubSubManager: Real-time messaging
102
+ - StreamManager: Ordered event logs
103
+ - TimelineManager: Time-window queries
104
+ - QueueManager: Task queues
105
+ - TransactionManager: Atomic operations
106
+ - CrossSessionManager: Cross-session coordination
107
+
108
+ Example:
109
+ >>> from attune.memory.short_term import RedisShortTermMemory
110
+ >>> from attune.memory.types import RedisConfig
111
+ >>> config = RedisConfig(use_mock=True)
112
+ >>> memory = RedisShortTermMemory(config=config)
113
+ >>> memory.stash("key", {"data": 123}, credentials)
114
+ True
115
+ >>> memory.retrieve("key", credentials)
116
+ {'data': 123}
117
+ """
118
+
119
+ # Key prefixes for backward compatibility
120
+ # These match BaseOperations prefixes for tests that reference them directly
121
+ PREFIX_WORKING = "empathy:working:"
122
+ PREFIX_STAGED = "empathy:staged:"
123
+ PREFIX_CONFLICT = "empathy:conflict:"
124
+ PREFIX_SESSION = "empathy:session:"
125
+ PREFIX_PUBSUB = "empathy:pubsub:"
126
+ PREFIX_STREAM = "empathy:stream:"
127
+ PREFIX_TIMELINE = "empathy:timeline:"
128
+ PREFIX_QUEUE = "empathy:queue:"
129
+
130
+ def __init__(
131
+ self,
132
+ host: str = "localhost",
133
+ port: int = 6379,
134
+ db: int = 0,
135
+ password: str | None = None,
136
+ use_mock: bool = False,
137
+ config: RedisConfig | None = None,
138
+ enable_local_cache: bool = True,
139
+ local_cache_max_size: int = 1000,
140
+ ) -> None:
141
+ """Initialize Redis short-term memory facade.
142
+
143
+ Args:
144
+ host: Redis host
145
+ port: Redis port
146
+ db: Redis database number
147
+ password: Redis password (optional)
148
+ use_mock: Use in-memory mock for testing
149
+ config: Full RedisConfig for advanced settings (overrides other args)
150
+ enable_local_cache: Enable local LRU cache layer
151
+ local_cache_max_size: Maximum entries in local cache
152
+ """
153
+ # Initialize base operations (handles connection, basic CRUD)
154
+ self._base = BaseOperations(
155
+ host=host,
156
+ port=port,
157
+ db=db,
158
+ password=password,
159
+ use_mock=use_mock,
160
+ config=config,
161
+ )
162
+
163
+ # Initialize local cache layer
164
+ self._cache = CacheManager(
165
+ enabled=enable_local_cache,
166
+ max_size=local_cache_max_size,
167
+ )
168
+
169
+ # Initialize security sanitizer
170
+ self._security = DataSanitizer(self._base)
171
+
172
+ # Initialize working memory (stash/retrieve)
173
+ self._working = WorkingMemory(self._base, self._security)
174
+
175
+ # Initialize pattern staging
176
+ self._patterns = PatternStaging(self._base)
177
+
178
+ # Initialize conflict negotiation
179
+ self._conflicts = ConflictNegotiation(self._base)
180
+
181
+ # Initialize session management
182
+ self._sessions = SessionManager(self._base)
183
+
184
+ # Initialize batch operations
185
+ self._batch = BatchOperations(self._base)
186
+
187
+ # Initialize pagination
188
+ self._pagination = Pagination(self._base)
189
+
190
+ # Initialize pub/sub
191
+ self._pubsub = PubSubManager(self._base)
192
+
193
+ # Initialize streams
194
+ self._streams = StreamManager(self._base)
195
+
196
+ # Initialize timelines
197
+ self._timelines = TimelineManager(self._base)
198
+
199
+ # Initialize queues
200
+ self._queues = QueueManager(self._base)
201
+
202
+ # Initialize transactions (needs cache for invalidation)
203
+ self._transactions = TransactionManager(self._base, self._cache)
204
+
205
+ # Initialize cross-session coordination
206
+ self._cross_session = CrossSessionManager(self._base)
207
+
208
+ # =========================================================================
209
+ # Properties - delegate to base operations
210
+ # =========================================================================
211
+
212
+ @property
213
+ def use_mock(self) -> bool:
214
+ """Whether using mock storage instead of Redis."""
215
+ return self._base.use_mock
216
+
217
+ @property
218
+ def client(self) -> Any:
219
+ """Get the Redis client instance."""
220
+ return self._base.client
221
+
222
+ @property
223
+ def _subscriptions(self) -> dict:
224
+ """Expose pubsub subscriptions for backward compatibility."""
225
+ return self._pubsub._subscriptions
226
+
227
+ # =========================================================================
228
+ # Working Memory Operations - delegate to WorkingMemory
229
+ # =========================================================================
230
+
231
+ def stash(
232
+ self,
233
+ key: str,
234
+ data: Any,
235
+ credentials: AgentCredentials,
236
+ ttl: TTLStrategy | None = None,
237
+ skip_sanitization: bool = False,
238
+ ) -> bool:
239
+ """Stash data in short-term memory."""
240
+ from attune.memory.types import TTLStrategy as TTL
241
+
242
+ effective_ttl = ttl if ttl is not None else TTL.WORKING_RESULTS
243
+ return self._working.stash(key, data, credentials, effective_ttl, skip_sanitization)
244
+
245
+ def retrieve(
246
+ self,
247
+ key: str,
248
+ credentials: AgentCredentials,
249
+ agent_id: str | None = None,
250
+ ) -> Any | None:
251
+ """Retrieve data from short-term memory."""
252
+ return self._working.retrieve(key, credentials, agent_id)
253
+
254
+ def clear_working_memory(self, credentials: AgentCredentials) -> int:
255
+ """Clear all working memory for an agent."""
256
+ return self._working.clear(credentials)
257
+
258
+ # =========================================================================
259
+ # Pattern Staging Operations - delegate to PatternStaging
260
+ # =========================================================================
261
+
262
+ def stage_pattern(
263
+ self,
264
+ pattern: StagedPattern,
265
+ credentials: AgentCredentials,
266
+ ) -> bool:
267
+ """Stage a pattern for validation."""
268
+ return self._patterns.stage_pattern(pattern, credentials)
269
+
270
+ def get_staged_pattern(
271
+ self,
272
+ pattern_id: str,
273
+ credentials: AgentCredentials,
274
+ ) -> StagedPattern | None:
275
+ """Retrieve a staged pattern."""
276
+ return self._patterns.get_staged_pattern(pattern_id, credentials)
277
+
278
+ def list_staged_patterns(
279
+ self,
280
+ credentials: AgentCredentials,
281
+ ) -> list[StagedPattern]:
282
+ """List all staged patterns awaiting validation."""
283
+ return self._patterns.list_staged_patterns(credentials)
284
+
285
+ def promote_pattern(
286
+ self,
287
+ pattern_id: str,
288
+ credentials: AgentCredentials,
289
+ ) -> StagedPattern | None:
290
+ """Promote staged pattern (remove from staging for library add)."""
291
+ return self._patterns.promote_pattern(pattern_id, credentials)
292
+
293
+ def reject_pattern(
294
+ self,
295
+ pattern_id: str,
296
+ credentials: AgentCredentials,
297
+ reason: str = "",
298
+ ) -> bool:
299
+ """Reject a staged pattern."""
300
+ return self._patterns.reject_pattern(pattern_id, credentials, reason)
301
+
302
+ # =========================================================================
303
+ # Conflict Negotiation Operations - delegate to ConflictNegotiation
304
+ # =========================================================================
305
+
306
+ def create_conflict_context(
307
+ self,
308
+ conflict_id: str,
309
+ agents: list[str],
310
+ credentials: AgentCredentials,
311
+ topic: str = "",
312
+ ) -> ConflictContext | None:
313
+ """Create a conflict negotiation context."""
314
+ return self._conflicts.create_conflict_context(conflict_id, agents, credentials, topic)
315
+
316
+ def get_conflict_context(
317
+ self,
318
+ conflict_id: str,
319
+ credentials: AgentCredentials,
320
+ ) -> ConflictContext | None:
321
+ """Retrieve a conflict context."""
322
+ return self._conflicts.get_conflict_context(conflict_id, credentials)
323
+
324
+ def resolve_conflict(
325
+ self,
326
+ conflict_id: str,
327
+ resolution: str,
328
+ credentials: AgentCredentials,
329
+ ) -> bool:
330
+ """Mark a conflict as resolved."""
331
+ return self._conflicts.resolve_conflict(conflict_id, resolution, credentials)
332
+
333
+ def list_active_conflicts(
334
+ self,
335
+ credentials: AgentCredentials,
336
+ ) -> list[ConflictContext]:
337
+ """List all active (unresolved) conflicts."""
338
+ return self._conflicts.list_active_conflicts(credentials)
339
+
340
+ # =========================================================================
341
+ # Session Management Operations - delegate to SessionManager
342
+ # =========================================================================
343
+
344
+ def create_session(
345
+ self,
346
+ session_id: str,
347
+ credentials: AgentCredentials,
348
+ metadata: dict | None = None,
349
+ ) -> CollaborationSession | None:
350
+ """Create a collaboration session."""
351
+ return self._sessions.create_session(session_id, credentials, metadata)
352
+
353
+ def join_session(
354
+ self,
355
+ session_id: str,
356
+ credentials: AgentCredentials,
357
+ ) -> bool:
358
+ """Join an existing session."""
359
+ return self._sessions.join_session(session_id, credentials)
360
+
361
+ def get_session(
362
+ self,
363
+ session_id: str,
364
+ credentials: AgentCredentials,
365
+ ) -> CollaborationSession | None:
366
+ """Get session details."""
367
+ return self._sessions.get_session(session_id, credentials)
368
+
369
+ def leave_session(
370
+ self,
371
+ session_id: str,
372
+ credentials: AgentCredentials,
373
+ ) -> bool:
374
+ """Leave a session."""
375
+ return self._sessions.leave_session(session_id, credentials)
376
+
377
+ def list_sessions(
378
+ self,
379
+ credentials: AgentCredentials,
380
+ ) -> list[CollaborationSession]:
381
+ """List all active sessions."""
382
+ return self._sessions.list_sessions(credentials)
383
+
384
+ # =========================================================================
385
+ # Base Operations - delegate to BaseOperations
386
+ # =========================================================================
387
+
388
+ def ping(self) -> bool:
389
+ """Check Redis connection health."""
390
+ return self._base.ping()
391
+
392
+ def get_stats(self) -> dict:
393
+ """Get memory statistics."""
394
+ stats = self._base.get_stats()
395
+ stats["local_cache"] = self._cache.get_stats()
396
+ return stats
397
+
398
+ def get_metrics(self) -> dict:
399
+ """Get operation metrics for observability."""
400
+ return self._base.get_metrics()
401
+
402
+ def reset_metrics(self) -> None:
403
+ """Reset all metrics to zero."""
404
+ self._base.reset_metrics()
405
+
406
+ def close(self) -> None:
407
+ """Close Redis connection and cleanup resources."""
408
+ self._pubsub.close()
409
+ self._base.close()
410
+
411
+ # =========================================================================
412
+ # Batch Operations - delegate to BatchOperations
413
+ # =========================================================================
414
+
415
+ def stash_batch(
416
+ self,
417
+ items: list[tuple[str, Any]],
418
+ credentials: AgentCredentials,
419
+ ttl: TTLStrategy | None = None,
420
+ ) -> int:
421
+ """Stash multiple items in a single operation."""
422
+ from attune.memory.types import TTLStrategy as TTL
423
+
424
+ effective_ttl = ttl if ttl is not None else TTL.WORKING_RESULTS
425
+ return self._batch.stash_batch(items, credentials, effective_ttl)
426
+
427
+ def retrieve_batch(
428
+ self,
429
+ keys: list[str],
430
+ credentials: AgentCredentials,
431
+ agent_id: str | None = None,
432
+ ) -> dict[str, Any]:
433
+ """Retrieve multiple items in a single operation."""
434
+ return self._batch.retrieve_batch(keys, credentials, agent_id)
435
+
436
+ # =========================================================================
437
+ # Pagination Operations - delegate to Pagination
438
+ # =========================================================================
439
+
440
+ def list_staged_patterns_paginated(
441
+ self,
442
+ credentials: AgentCredentials,
443
+ cursor: str = "0",
444
+ count: int = 100,
445
+ ) -> PaginatedResult:
446
+ """List staged patterns with pagination using SCAN."""
447
+ return self._pagination.list_staged_patterns_paginated(credentials, cursor, count)
448
+
449
+ def scan_keys(
450
+ self,
451
+ pattern: str,
452
+ cursor: str = "0",
453
+ count: int = 100,
454
+ ) -> PaginatedResult:
455
+ """Scan keys matching a pattern with pagination."""
456
+ return self._pagination.scan_keys(pattern, cursor, count)
457
+
458
+ # =========================================================================
459
+ # Pub/Sub Operations - delegate to PubSubManager
460
+ # =========================================================================
461
+
462
+ def publish(
463
+ self,
464
+ channel: str,
465
+ message: dict,
466
+ credentials: AgentCredentials,
467
+ ) -> int:
468
+ """Publish a message to a channel."""
469
+ return self._pubsub.publish(channel, message, credentials)
470
+
471
+ def subscribe(
472
+ self,
473
+ channel: str,
474
+ handler: Callable[[dict], None],
475
+ credentials: AgentCredentials | None = None,
476
+ ) -> bool:
477
+ """Subscribe to a channel for real-time notifications."""
478
+ return self._pubsub.subscribe(channel, handler, credentials)
479
+
480
+ def unsubscribe(self, channel: str) -> bool:
481
+ """Unsubscribe from a channel."""
482
+ return self._pubsub.unsubscribe(channel)
483
+
484
+ def close_pubsub(self) -> None:
485
+ """Close pubsub connection and stop listener thread."""
486
+ self._pubsub.close()
487
+
488
+ # =========================================================================
489
+ # Stream Operations - delegate to StreamManager
490
+ # =========================================================================
491
+
492
+ def stream_append(
493
+ self,
494
+ stream_name: str,
495
+ data: dict,
496
+ credentials: AgentCredentials,
497
+ max_len: int = 10000,
498
+ ) -> str | None:
499
+ """Append an entry to a Redis Stream."""
500
+ return self._streams.append(stream_name, data, credentials, max_len)
501
+
502
+ def stream_read(
503
+ self,
504
+ stream_name: str,
505
+ credentials: AgentCredentials,
506
+ start_id: str = "0",
507
+ count: int = 100,
508
+ ) -> list[tuple[str, dict]]:
509
+ """Read entries from a Redis Stream."""
510
+ return self._streams.read(stream_name, credentials, start_id, count)
511
+
512
+ def stream_read_new(
513
+ self,
514
+ stream_name: str,
515
+ credentials: AgentCredentials,
516
+ block_ms: int = 0,
517
+ count: int = 100,
518
+ ) -> list[tuple[str, dict]]:
519
+ """Read only new entries from a stream (blocking read)."""
520
+ return self._streams.read_new(stream_name, credentials, block_ms, count)
521
+
522
+ # =========================================================================
523
+ # Timeline Operations - delegate to TimelineManager
524
+ # =========================================================================
525
+
526
+ def timeline_add(
527
+ self,
528
+ timeline_name: str,
529
+ event_id: str,
530
+ data: dict,
531
+ credentials: AgentCredentials,
532
+ timestamp: datetime | None = None,
533
+ ) -> bool:
534
+ """Add an event to a timeline (sorted set by timestamp)."""
535
+ return self._timelines.add(timeline_name, event_id, data, credentials, timestamp)
536
+
537
+ def timeline_query(
538
+ self,
539
+ timeline_name: str,
540
+ credentials: AgentCredentials,
541
+ query: TimeWindowQuery | None = None,
542
+ ) -> list[dict]:
543
+ """Query events from a timeline within a time window."""
544
+ return self._timelines.query(timeline_name, credentials, query)
545
+
546
+ def timeline_count(
547
+ self,
548
+ timeline_name: str,
549
+ credentials: AgentCredentials,
550
+ query: TimeWindowQuery | None = None,
551
+ ) -> int:
552
+ """Count events in a timeline within a time window."""
553
+ return self._timelines.count(timeline_name, credentials, query)
554
+
555
+ # =========================================================================
556
+ # Queue Operations - delegate to QueueManager
557
+ # =========================================================================
558
+
559
+ def queue_push(
560
+ self,
561
+ queue_name: str,
562
+ task: dict,
563
+ credentials: AgentCredentials,
564
+ priority: bool = False,
565
+ ) -> int:
566
+ """Push a task to a queue."""
567
+ return self._queues.push(queue_name, task, credentials, priority)
568
+
569
+ def queue_pop(
570
+ self,
571
+ queue_name: str,
572
+ credentials: AgentCredentials,
573
+ timeout: int = 0,
574
+ ) -> dict | None:
575
+ """Pop a task from a queue."""
576
+ return self._queues.pop(queue_name, credentials, timeout)
577
+
578
+ def queue_length(self, queue_name: str) -> int:
579
+ """Get the length of a queue."""
580
+ return self._queues.length(queue_name)
581
+
582
+ def queue_peek(
583
+ self,
584
+ queue_name: str,
585
+ credentials: AgentCredentials,
586
+ count: int = 1,
587
+ ) -> list[dict]:
588
+ """Peek at tasks in a queue without removing them."""
589
+ return self._queues.peek(queue_name, credentials, count)
590
+
591
+ # =========================================================================
592
+ # Transaction Operations - delegate to TransactionManager
593
+ # =========================================================================
594
+
595
+ def atomic_promote_pattern(
596
+ self,
597
+ pattern_id: str,
598
+ credentials: AgentCredentials,
599
+ min_confidence: float = 0.0,
600
+ ) -> tuple[bool, StagedPattern | None, str]:
601
+ """Atomically promote a pattern with validation."""
602
+ return self._transactions.atomic_promote_pattern(pattern_id, credentials, min_confidence)
603
+
604
+ # =========================================================================
605
+ # Cross-Session Operations - delegate to CrossSessionManager
606
+ # =========================================================================
607
+
608
+ def enable_cross_session(
609
+ self,
610
+ session_id: str,
611
+ credentials: AgentCredentials,
612
+ ) -> bool:
613
+ """Enable cross-session data sharing."""
614
+ return self._cross_session.enable(session_id, credentials)
615
+
616
+ def cross_session_available(
617
+ self,
618
+ session_id: str,
619
+ credentials: AgentCredentials,
620
+ ) -> bool:
621
+ """Check if cross-session is available."""
622
+ return self._cross_session.available(session_id, credentials)
623
+
624
+ # =========================================================================
625
+ # Cache Operations - expose cache stats
626
+ # =========================================================================
627
+
628
+ def get_cache_stats(self) -> dict:
629
+ """Get local cache statistics."""
630
+ return self._cache.get_stats()
631
+
632
+ def clear_cache(self) -> int:
633
+ """Clear local cache."""
634
+ return self._cache.clear()
635
+
636
+ # =========================================================================
637
+ # Internal - for backward compatibility with tests
638
+ # =========================================================================
639
+
640
+ @property
641
+ def _mock_storage(self) -> dict:
642
+ """Access mock storage for testing."""
643
+ return self._base._mock_storage
644
+
645
+ @property
646
+ def _client(self) -> Any:
647
+ """Access Redis client for testing."""
648
+ return self._base._client
649
+
650
+ @property
651
+ def _metrics(self) -> Any:
652
+ """Access metrics for testing."""
653
+ return self._base._metrics