attune-ai 2.1.4__py3-none-any.whl → 2.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. attune/cli/__init__.py +3 -55
  2. attune/cli/commands/batch.py +4 -12
  3. attune/cli/commands/cache.py +7 -15
  4. attune/cli/commands/provider.py +17 -0
  5. attune/cli/commands/routing.py +3 -1
  6. attune/cli/commands/setup.py +122 -0
  7. attune/cli/commands/tier.py +1 -3
  8. attune/cli/commands/workflow.py +31 -0
  9. attune/cli/parsers/cache.py +1 -0
  10. attune/cli/parsers/help.py +1 -3
  11. attune/cli/parsers/provider.py +7 -0
  12. attune/cli/parsers/routing.py +1 -3
  13. attune/cli/parsers/setup.py +7 -0
  14. attune/cli/parsers/status.py +1 -3
  15. attune/cli/parsers/tier.py +1 -3
  16. attune/cli_minimal.py +34 -28
  17. attune/cli_router.py +9 -7
  18. attune/cli_unified.py +3 -0
  19. attune/core.py +190 -0
  20. attune/dashboard/app.py +4 -2
  21. attune/dashboard/simple_server.py +3 -1
  22. attune/dashboard/standalone_server.py +7 -3
  23. attune/mcp/server.py +54 -102
  24. attune/memory/long_term.py +0 -2
  25. attune/memory/short_term/__init__.py +84 -0
  26. attune/memory/short_term/base.py +467 -0
  27. attune/memory/short_term/batch.py +219 -0
  28. attune/memory/short_term/caching.py +227 -0
  29. attune/memory/short_term/conflicts.py +265 -0
  30. attune/memory/short_term/cross_session.py +122 -0
  31. attune/memory/short_term/facade.py +655 -0
  32. attune/memory/short_term/pagination.py +215 -0
  33. attune/memory/short_term/patterns.py +271 -0
  34. attune/memory/short_term/pubsub.py +286 -0
  35. attune/memory/short_term/queues.py +244 -0
  36. attune/memory/short_term/security.py +300 -0
  37. attune/memory/short_term/sessions.py +250 -0
  38. attune/memory/short_term/streams.py +249 -0
  39. attune/memory/short_term/timelines.py +234 -0
  40. attune/memory/short_term/transactions.py +186 -0
  41. attune/memory/short_term/working.py +252 -0
  42. attune/meta_workflows/cli_commands/__init__.py +3 -0
  43. attune/meta_workflows/cli_commands/agent_commands.py +0 -4
  44. attune/meta_workflows/cli_commands/analytics_commands.py +0 -6
  45. attune/meta_workflows/cli_commands/config_commands.py +0 -5
  46. attune/meta_workflows/cli_commands/memory_commands.py +0 -5
  47. attune/meta_workflows/cli_commands/template_commands.py +0 -5
  48. attune/meta_workflows/cli_commands/workflow_commands.py +0 -6
  49. attune/meta_workflows/workflow.py +1 -1
  50. attune/models/adaptive_routing.py +4 -8
  51. attune/models/auth_cli.py +3 -9
  52. attune/models/auth_strategy.py +2 -4
  53. attune/models/provider_config.py +20 -1
  54. attune/models/telemetry/analytics.py +0 -2
  55. attune/models/telemetry/backend.py +0 -3
  56. attune/models/telemetry/storage.py +0 -2
  57. attune/orchestration/_strategies/__init__.py +156 -0
  58. attune/orchestration/_strategies/base.py +231 -0
  59. attune/orchestration/_strategies/conditional_strategies.py +373 -0
  60. attune/orchestration/_strategies/conditions.py +369 -0
  61. attune/orchestration/_strategies/core_strategies.py +491 -0
  62. attune/orchestration/_strategies/data_classes.py +64 -0
  63. attune/orchestration/_strategies/nesting.py +233 -0
  64. attune/orchestration/execution_strategies.py +58 -1567
  65. attune/orchestration/meta_orchestrator.py +1 -3
  66. attune/project_index/scanner.py +1 -3
  67. attune/project_index/scanner_parallel.py +7 -5
  68. attune/socratic_router.py +1 -3
  69. attune/telemetry/agent_coordination.py +9 -3
  70. attune/telemetry/agent_tracking.py +16 -3
  71. attune/telemetry/approval_gates.py +22 -5
  72. attune/telemetry/cli.py +3 -3
  73. attune/telemetry/commands/dashboard_commands.py +24 -8
  74. attune/telemetry/event_streaming.py +8 -2
  75. attune/telemetry/feedback_loop.py +10 -2
  76. attune/tools.py +1 -0
  77. attune/workflow_commands.py +1 -3
  78. attune/workflows/__init__.py +53 -10
  79. attune/workflows/autonomous_test_gen.py +160 -104
  80. attune/workflows/base.py +48 -664
  81. attune/workflows/batch_processing.py +2 -4
  82. attune/workflows/compat.py +156 -0
  83. attune/workflows/cost_mixin.py +141 -0
  84. attune/workflows/data_classes.py +92 -0
  85. attune/workflows/document_gen/workflow.py +11 -14
  86. attune/workflows/history.py +62 -37
  87. attune/workflows/llm_base.py +2 -4
  88. attune/workflows/migration.py +422 -0
  89. attune/workflows/output.py +3 -9
  90. attune/workflows/parsing_mixin.py +427 -0
  91. attune/workflows/perf_audit.py +3 -1
  92. attune/workflows/progress.py +10 -13
  93. attune/workflows/release_prep.py +5 -1
  94. attune/workflows/routing.py +0 -2
  95. attune/workflows/secure_release.py +2 -1
  96. attune/workflows/security_audit.py +19 -14
  97. attune/workflows/security_audit_phase3.py +28 -22
  98. attune/workflows/seo_optimization.py +29 -29
  99. attune/workflows/test_gen/test_templates.py +1 -4
  100. attune/workflows/test_gen/workflow.py +0 -2
  101. attune/workflows/test_gen_behavioral.py +7 -20
  102. attune/workflows/test_gen_parallel.py +6 -4
  103. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/METADATA +4 -3
  104. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/RECORD +119 -94
  105. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/entry_points.txt +0 -2
  106. attune_healthcare/monitors/monitoring/__init__.py +9 -9
  107. attune_llm/agent_factory/__init__.py +6 -6
  108. attune_llm/commands/__init__.py +10 -10
  109. attune_llm/commands/models.py +3 -3
  110. attune_llm/config/__init__.py +8 -8
  111. attune_llm/learning/__init__.py +3 -3
  112. attune_llm/learning/extractor.py +5 -3
  113. attune_llm/learning/storage.py +5 -3
  114. attune_llm/security/__init__.py +17 -17
  115. attune_llm/utils/tokens.py +3 -1
  116. attune/cli_legacy.py +0 -3957
  117. attune/memory/short_term.py +0 -2192
  118. attune/workflows/manage_docs.py +0 -87
  119. attune/workflows/test5.py +0 -125
  120. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/WHEEL +0 -0
  121. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE +0 -0
  122. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -0
  123. {attune_ai-2.1.4.dist-info → attune_ai-2.2.0.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,655 @@
1
+ """RedisShortTermMemory facade - composes all specialized modules.
2
+
3
+ This facade provides the same public API as the original RedisShortTermMemory
4
+ class while delegating to specialized modules for implementation.
5
+
6
+ The facade pattern allows:
7
+ 1. Backward compatibility: All existing imports continue to work
8
+ 2. Incremental refactoring: Methods can be extracted one at a time
9
+ 3. Clear separation of concerns: Each module handles one responsibility
10
+
11
+ Architecture:
12
+ RedisShortTermMemory (facade)
13
+ ├── BaseOperations (base.py)
14
+ ├── CacheManager (caching.py)
15
+ ├── DataSanitizer (security.py)
16
+ ├── WorkingMemory (working.py)
17
+ ├── PatternStaging (patterns.py)
18
+ ├── ConflictNegotiation (conflicts.py)
19
+ ├── SessionManager (sessions.py)
20
+ ├── BatchOperations (batch.py)
21
+ ├── Pagination (pagination.py)
22
+ ├── PubSubManager (pubsub.py)
23
+ ├── StreamManager (streams.py)
24
+ ├── TimelineManager (timelines.py)
25
+ ├── QueueManager (queues.py)
26
+ ├── TransactionManager (transactions.py)
27
+ └── CrossSessionManager (cross_session.py)
28
+
29
+ Usage:
30
+ # During transition, use the facade:
31
+ from attune.memory.short_term import RedisShortTermMemory
32
+
33
+ # This import works the same as before the refactoring
34
+ memory = RedisShortTermMemory(config=config)
35
+ memory.stash("key", "value")
36
+ memory.retrieve("key")
37
+
38
+ Copyright 2025 Smart-AI-Memory
39
+ Licensed under Fair Source License 0.9
40
+ """
41
+
42
+ from __future__ import annotations
43
+
44
+ from collections.abc import Callable
45
+ from datetime import datetime
46
+ from typing import TYPE_CHECKING, Any
47
+
48
+ import structlog
49
+
50
+ from attune.memory.short_term.base import REDIS_AVAILABLE # noqa: F401 - re-exported
51
+ from attune.memory.short_term.base import BaseOperations
52
+ from attune.memory.short_term.batch import BatchOperations
53
+ from attune.memory.short_term.caching import CacheManager
54
+ from attune.memory.short_term.conflicts import ConflictNegotiation
55
+ from attune.memory.short_term.cross_session import CrossSessionManager
56
+ from attune.memory.short_term.pagination import Pagination
57
+ from attune.memory.short_term.patterns import PatternStaging
58
+ from attune.memory.short_term.pubsub import PubSubManager
59
+ from attune.memory.short_term.queues import QueueManager
60
+ from attune.memory.short_term.security import DataSanitizer
61
+ from attune.memory.short_term.sessions import SessionManager
62
+ from attune.memory.short_term.streams import StreamManager
63
+ from attune.memory.short_term.timelines import TimelineManager
64
+ from attune.memory.short_term.transactions import TransactionManager
65
+ from attune.memory.short_term.working import WorkingMemory
66
+
67
+ # Module-level logger for backward compatibility
68
+ logger = structlog.get_logger(__name__)
69
+
70
+ if TYPE_CHECKING:
71
+ from attune.memory.types import (
72
+ AgentCredentials,
73
+ CollaborationSession,
74
+ ConflictContext,
75
+ PaginatedResult,
76
+ RedisConfig,
77
+ StagedPattern,
78
+ TimeWindowQuery,
79
+ TTLStrategy,
80
+ )
81
+
82
+
83
+ class RedisShortTermMemory:
84
+ """Facade composing all short-term memory operations.
85
+
86
+ This class maintains the same public API as the original
87
+ RedisShortTermMemory while delegating to specialized modules.
88
+ Each module handles a single responsibility:
89
+
90
+ - BaseOperations: Connection management, basic CRUD
91
+ - CacheManager: Local LRU cache layer
92
+ - DataSanitizer: PII scrubbing, secrets detection
93
+ - WorkingMemory: Stash/retrieve agent data
94
+ - PatternStaging: Stage/promote/reject patterns
95
+ - ConflictNegotiation: Conflict resolution workflow
96
+ - SessionManager: Collaboration sessions
97
+ - BatchOperations: Bulk stash/retrieve
98
+ - Pagination: SCAN-based key iteration
99
+ - PubSubManager: Real-time messaging
100
+ - StreamManager: Ordered event logs
101
+ - TimelineManager: Time-window queries
102
+ - QueueManager: Task queues
103
+ - TransactionManager: Atomic operations
104
+ - CrossSessionManager: Cross-session coordination
105
+
106
+ Example:
107
+ >>> from attune.memory.short_term import RedisShortTermMemory
108
+ >>> from attune.memory.types import RedisConfig
109
+ >>> config = RedisConfig(use_mock=True)
110
+ >>> memory = RedisShortTermMemory(config=config)
111
+ >>> memory.stash("key", {"data": 123}, credentials)
112
+ True
113
+ >>> memory.retrieve("key", credentials)
114
+ {'data': 123}
115
+ """
116
+
117
+ # Key prefixes for backward compatibility
118
+ # These match BaseOperations prefixes for tests that reference them directly
119
+ PREFIX_WORKING = "empathy:working:"
120
+ PREFIX_STAGED = "empathy:staged:"
121
+ PREFIX_CONFLICT = "empathy:conflict:"
122
+ PREFIX_SESSION = "empathy:session:"
123
+ PREFIX_PUBSUB = "empathy:pubsub:"
124
+ PREFIX_STREAM = "empathy:stream:"
125
+ PREFIX_TIMELINE = "empathy:timeline:"
126
+ PREFIX_QUEUE = "empathy:queue:"
127
+
128
+ def __init__(
129
+ self,
130
+ host: str = "localhost",
131
+ port: int = 6379,
132
+ db: int = 0,
133
+ password: str | None = None,
134
+ use_mock: bool = False,
135
+ config: RedisConfig | None = None,
136
+ enable_local_cache: bool = True,
137
+ local_cache_max_size: int = 1000,
138
+ ) -> None:
139
+ """Initialize Redis short-term memory facade.
140
+
141
+ Args:
142
+ host: Redis host
143
+ port: Redis port
144
+ db: Redis database number
145
+ password: Redis password (optional)
146
+ use_mock: Use in-memory mock for testing
147
+ config: Full RedisConfig for advanced settings (overrides other args)
148
+ enable_local_cache: Enable local LRU cache layer
149
+ local_cache_max_size: Maximum entries in local cache
150
+ """
151
+ # Initialize base operations (handles connection, basic CRUD)
152
+ self._base = BaseOperations(
153
+ host=host,
154
+ port=port,
155
+ db=db,
156
+ password=password,
157
+ use_mock=use_mock,
158
+ config=config,
159
+ )
160
+
161
+ # Initialize local cache layer
162
+ self._cache = CacheManager(
163
+ enabled=enable_local_cache,
164
+ max_size=local_cache_max_size,
165
+ )
166
+
167
+ # Initialize security sanitizer
168
+ self._security = DataSanitizer(self._base)
169
+
170
+ # Initialize working memory (stash/retrieve)
171
+ self._working = WorkingMemory(self._base, self._security)
172
+
173
+ # Initialize pattern staging
174
+ self._patterns = PatternStaging(self._base)
175
+
176
+ # Initialize conflict negotiation
177
+ self._conflicts = ConflictNegotiation(self._base)
178
+
179
+ # Initialize session management
180
+ self._sessions = SessionManager(self._base)
181
+
182
+ # Initialize batch operations
183
+ self._batch = BatchOperations(self._base)
184
+
185
+ # Initialize pagination
186
+ self._pagination = Pagination(self._base)
187
+
188
+ # Initialize pub/sub
189
+ self._pubsub = PubSubManager(self._base)
190
+
191
+ # Initialize streams
192
+ self._streams = StreamManager(self._base)
193
+
194
+ # Initialize timelines
195
+ self._timelines = TimelineManager(self._base)
196
+
197
+ # Initialize queues
198
+ self._queues = QueueManager(self._base)
199
+
200
+ # Initialize transactions (needs cache for invalidation)
201
+ self._transactions = TransactionManager(self._base, self._cache)
202
+
203
+ # Initialize cross-session coordination
204
+ self._cross_session = CrossSessionManager(self._base)
205
+
206
+ # =========================================================================
207
+ # Properties - delegate to base operations
208
+ # =========================================================================
209
+
210
+ @property
211
+ def use_mock(self) -> bool:
212
+ """Whether using mock storage instead of Redis."""
213
+ return self._base.use_mock
214
+
215
+ @property
216
+ def client(self) -> Any:
217
+ """Get the Redis client instance."""
218
+ return self._base.client
219
+
220
+ @property
221
+ def _subscriptions(self) -> dict:
222
+ """Expose pubsub subscriptions for backward compatibility."""
223
+ return self._pubsub._subscriptions
224
+
225
+ # =========================================================================
226
+ # Working Memory Operations - delegate to WorkingMemory
227
+ # =========================================================================
228
+
229
+ def stash(
230
+ self,
231
+ key: str,
232
+ data: Any,
233
+ credentials: AgentCredentials,
234
+ ttl: TTLStrategy | None = None,
235
+ skip_sanitization: bool = False,
236
+ ) -> bool:
237
+ """Stash data in short-term memory."""
238
+ from attune.memory.types import TTLStrategy as TTL
239
+
240
+ effective_ttl = ttl if ttl is not None else TTL.WORKING_RESULTS
241
+ return self._working.stash(key, data, credentials, effective_ttl, skip_sanitization)
242
+
243
+ def retrieve(
244
+ self,
245
+ key: str,
246
+ credentials: AgentCredentials,
247
+ agent_id: str | None = None,
248
+ ) -> Any | None:
249
+ """Retrieve data from short-term memory."""
250
+ return self._working.retrieve(key, credentials, agent_id)
251
+
252
+ def clear_working_memory(self, credentials: AgentCredentials) -> int:
253
+ """Clear all working memory for an agent."""
254
+ return self._working.clear(credentials)
255
+
256
+ # =========================================================================
257
+ # Pattern Staging Operations - delegate to PatternStaging
258
+ # =========================================================================
259
+
260
+ def stage_pattern(
261
+ self,
262
+ pattern: StagedPattern,
263
+ credentials: AgentCredentials,
264
+ ) -> bool:
265
+ """Stage a pattern for validation."""
266
+ return self._patterns.stage_pattern(pattern, credentials)
267
+
268
+ def get_staged_pattern(
269
+ self,
270
+ pattern_id: str,
271
+ credentials: AgentCredentials,
272
+ ) -> StagedPattern | None:
273
+ """Retrieve a staged pattern."""
274
+ return self._patterns.get_staged_pattern(pattern_id, credentials)
275
+
276
+ def list_staged_patterns(
277
+ self,
278
+ credentials: AgentCredentials,
279
+ ) -> list[StagedPattern]:
280
+ """List all staged patterns awaiting validation."""
281
+ return self._patterns.list_staged_patterns(credentials)
282
+
283
+ def promote_pattern(
284
+ self,
285
+ pattern_id: str,
286
+ credentials: AgentCredentials,
287
+ ) -> StagedPattern | None:
288
+ """Promote staged pattern (remove from staging for library add)."""
289
+ return self._patterns.promote_pattern(pattern_id, credentials)
290
+
291
+ def reject_pattern(
292
+ self,
293
+ pattern_id: str,
294
+ credentials: AgentCredentials,
295
+ reason: str = "",
296
+ ) -> bool:
297
+ """Reject a staged pattern."""
298
+ return self._patterns.reject_pattern(pattern_id, credentials, reason)
299
+
300
+ # =========================================================================
301
+ # Conflict Negotiation Operations - delegate to ConflictNegotiation
302
+ # =========================================================================
303
+
304
+ def create_conflict_context(
305
+ self,
306
+ conflict_id: str,
307
+ agents: list[str],
308
+ credentials: AgentCredentials,
309
+ topic: str = "",
310
+ ) -> ConflictContext | None:
311
+ """Create a conflict negotiation context."""
312
+ return self._conflicts.create_conflict_context(
313
+ conflict_id, agents, credentials, topic
314
+ )
315
+
316
+ def get_conflict_context(
317
+ self,
318
+ conflict_id: str,
319
+ credentials: AgentCredentials,
320
+ ) -> ConflictContext | None:
321
+ """Retrieve a conflict context."""
322
+ return self._conflicts.get_conflict_context(conflict_id, credentials)
323
+
324
+ def resolve_conflict(
325
+ self,
326
+ conflict_id: str,
327
+ resolution: str,
328
+ credentials: AgentCredentials,
329
+ ) -> bool:
330
+ """Mark a conflict as resolved."""
331
+ return self._conflicts.resolve_conflict(conflict_id, resolution, credentials)
332
+
333
+ def list_active_conflicts(
334
+ self,
335
+ credentials: AgentCredentials,
336
+ ) -> list[ConflictContext]:
337
+ """List all active (unresolved) conflicts."""
338
+ return self._conflicts.list_active_conflicts(credentials)
339
+
340
+ # =========================================================================
341
+ # Session Management Operations - delegate to SessionManager
342
+ # =========================================================================
343
+
344
+ def create_session(
345
+ self,
346
+ session_id: str,
347
+ credentials: AgentCredentials,
348
+ metadata: dict | None = None,
349
+ ) -> CollaborationSession | None:
350
+ """Create a collaboration session."""
351
+ return self._sessions.create_session(session_id, credentials, metadata)
352
+
353
+ def join_session(
354
+ self,
355
+ session_id: str,
356
+ credentials: AgentCredentials,
357
+ ) -> bool:
358
+ """Join an existing session."""
359
+ return self._sessions.join_session(session_id, credentials)
360
+
361
+ def get_session(
362
+ self,
363
+ session_id: str,
364
+ credentials: AgentCredentials,
365
+ ) -> CollaborationSession | None:
366
+ """Get session details."""
367
+ return self._sessions.get_session(session_id, credentials)
368
+
369
+ def leave_session(
370
+ self,
371
+ session_id: str,
372
+ credentials: AgentCredentials,
373
+ ) -> bool:
374
+ """Leave a session."""
375
+ return self._sessions.leave_session(session_id, credentials)
376
+
377
+ def list_sessions(
378
+ self,
379
+ credentials: AgentCredentials,
380
+ ) -> list[CollaborationSession]:
381
+ """List all active sessions."""
382
+ return self._sessions.list_sessions(credentials)
383
+
384
+ # =========================================================================
385
+ # Base Operations - delegate to BaseOperations
386
+ # =========================================================================
387
+
388
+ def ping(self) -> bool:
389
+ """Check Redis connection health."""
390
+ return self._base.ping()
391
+
392
+ def get_stats(self) -> dict:
393
+ """Get memory statistics."""
394
+ stats = self._base.get_stats()
395
+ stats["local_cache"] = self._cache.get_stats()
396
+ return stats
397
+
398
+ def get_metrics(self) -> dict:
399
+ """Get operation metrics for observability."""
400
+ return self._base.get_metrics()
401
+
402
+ def reset_metrics(self) -> None:
403
+ """Reset all metrics to zero."""
404
+ self._base.reset_metrics()
405
+
406
+ def close(self) -> None:
407
+ """Close Redis connection and cleanup resources."""
408
+ self._pubsub.close()
409
+ self._base.close()
410
+
411
+ # =========================================================================
412
+ # Batch Operations - delegate to BatchOperations
413
+ # =========================================================================
414
+
415
+ def stash_batch(
416
+ self,
417
+ items: list[tuple[str, Any]],
418
+ credentials: AgentCredentials,
419
+ ttl: TTLStrategy | None = None,
420
+ ) -> int:
421
+ """Stash multiple items in a single operation."""
422
+ from attune.memory.types import TTLStrategy as TTL
423
+
424
+ effective_ttl = ttl if ttl is not None else TTL.WORKING_RESULTS
425
+ return self._batch.stash_batch(items, credentials, effective_ttl)
426
+
427
+ def retrieve_batch(
428
+ self,
429
+ keys: list[str],
430
+ credentials: AgentCredentials,
431
+ agent_id: str | None = None,
432
+ ) -> dict[str, Any]:
433
+ """Retrieve multiple items in a single operation."""
434
+ return self._batch.retrieve_batch(keys, credentials, agent_id)
435
+
436
+ # =========================================================================
437
+ # Pagination Operations - delegate to Pagination
438
+ # =========================================================================
439
+
440
+ def list_staged_patterns_paginated(
441
+ self,
442
+ credentials: AgentCredentials,
443
+ cursor: str = "0",
444
+ count: int = 100,
445
+ ) -> PaginatedResult:
446
+ """List staged patterns with pagination using SCAN."""
447
+ return self._pagination.list_staged_patterns_paginated(credentials, cursor, count)
448
+
449
+ def scan_keys(
450
+ self,
451
+ pattern: str,
452
+ cursor: str = "0",
453
+ count: int = 100,
454
+ ) -> PaginatedResult:
455
+ """Scan keys matching a pattern with pagination."""
456
+ return self._pagination.scan_keys(pattern, cursor, count)
457
+
458
+ # =========================================================================
459
+ # Pub/Sub Operations - delegate to PubSubManager
460
+ # =========================================================================
461
+
462
+ def publish(
463
+ self,
464
+ channel: str,
465
+ message: dict,
466
+ credentials: AgentCredentials,
467
+ ) -> int:
468
+ """Publish a message to a channel."""
469
+ return self._pubsub.publish(channel, message, credentials)
470
+
471
+ def subscribe(
472
+ self,
473
+ channel: str,
474
+ handler: Callable[[dict], None],
475
+ credentials: AgentCredentials | None = None,
476
+ ) -> bool:
477
+ """Subscribe to a channel for real-time notifications."""
478
+ return self._pubsub.subscribe(channel, handler, credentials)
479
+
480
+ def unsubscribe(self, channel: str) -> bool:
481
+ """Unsubscribe from a channel."""
482
+ return self._pubsub.unsubscribe(channel)
483
+
484
+ def close_pubsub(self) -> None:
485
+ """Close pubsub connection and stop listener thread."""
486
+ self._pubsub.close()
487
+
488
+ # =========================================================================
489
+ # Stream Operations - delegate to StreamManager
490
+ # =========================================================================
491
+
492
+ def stream_append(
493
+ self,
494
+ stream_name: str,
495
+ data: dict,
496
+ credentials: AgentCredentials,
497
+ max_len: int = 10000,
498
+ ) -> str | None:
499
+ """Append an entry to a Redis Stream."""
500
+ return self._streams.append(stream_name, data, credentials, max_len)
501
+
502
+ def stream_read(
503
+ self,
504
+ stream_name: str,
505
+ credentials: AgentCredentials,
506
+ start_id: str = "0",
507
+ count: int = 100,
508
+ ) -> list[tuple[str, dict]]:
509
+ """Read entries from a Redis Stream."""
510
+ return self._streams.read(stream_name, credentials, start_id, count)
511
+
512
+ def stream_read_new(
513
+ self,
514
+ stream_name: str,
515
+ credentials: AgentCredentials,
516
+ block_ms: int = 0,
517
+ count: int = 100,
518
+ ) -> list[tuple[str, dict]]:
519
+ """Read only new entries from a stream (blocking read)."""
520
+ return self._streams.read_new(stream_name, credentials, block_ms, count)
521
+
522
+ # =========================================================================
523
+ # Timeline Operations - delegate to TimelineManager
524
+ # =========================================================================
525
+
526
+ def timeline_add(
527
+ self,
528
+ timeline_name: str,
529
+ event_id: str,
530
+ data: dict,
531
+ credentials: AgentCredentials,
532
+ timestamp: datetime | None = None,
533
+ ) -> bool:
534
+ """Add an event to a timeline (sorted set by timestamp)."""
535
+ return self._timelines.add(timeline_name, event_id, data, credentials, timestamp)
536
+
537
+ def timeline_query(
538
+ self,
539
+ timeline_name: str,
540
+ credentials: AgentCredentials,
541
+ query: TimeWindowQuery | None = None,
542
+ ) -> list[dict]:
543
+ """Query events from a timeline within a time window."""
544
+ return self._timelines.query(timeline_name, credentials, query)
545
+
546
+ def timeline_count(
547
+ self,
548
+ timeline_name: str,
549
+ credentials: AgentCredentials,
550
+ query: TimeWindowQuery | None = None,
551
+ ) -> int:
552
+ """Count events in a timeline within a time window."""
553
+ return self._timelines.count(timeline_name, credentials, query)
554
+
555
+ # =========================================================================
556
+ # Queue Operations - delegate to QueueManager
557
+ # =========================================================================
558
+
559
+ def queue_push(
560
+ self,
561
+ queue_name: str,
562
+ task: dict,
563
+ credentials: AgentCredentials,
564
+ priority: bool = False,
565
+ ) -> int:
566
+ """Push a task to a queue."""
567
+ return self._queues.push(queue_name, task, credentials, priority)
568
+
569
+ def queue_pop(
570
+ self,
571
+ queue_name: str,
572
+ credentials: AgentCredentials,
573
+ timeout: int = 0,
574
+ ) -> dict | None:
575
+ """Pop a task from a queue."""
576
+ return self._queues.pop(queue_name, credentials, timeout)
577
+
578
+ def queue_length(self, queue_name: str) -> int:
579
+ """Get the length of a queue."""
580
+ return self._queues.length(queue_name)
581
+
582
+ def queue_peek(
583
+ self,
584
+ queue_name: str,
585
+ credentials: AgentCredentials,
586
+ count: int = 1,
587
+ ) -> list[dict]:
588
+ """Peek at tasks in a queue without removing them."""
589
+ return self._queues.peek(queue_name, credentials, count)
590
+
591
+ # =========================================================================
592
+ # Transaction Operations - delegate to TransactionManager
593
+ # =========================================================================
594
+
595
+ def atomic_promote_pattern(
596
+ self,
597
+ pattern_id: str,
598
+ credentials: AgentCredentials,
599
+ min_confidence: float = 0.0,
600
+ ) -> tuple[bool, StagedPattern | None, str]:
601
+ """Atomically promote a pattern with validation."""
602
+ return self._transactions.atomic_promote_pattern(
603
+ pattern_id, credentials, min_confidence
604
+ )
605
+
606
+ # =========================================================================
607
+ # Cross-Session Operations - delegate to CrossSessionManager
608
+ # =========================================================================
609
+
610
+ def enable_cross_session(
611
+ self,
612
+ session_id: str,
613
+ credentials: AgentCredentials,
614
+ ) -> bool:
615
+ """Enable cross-session data sharing."""
616
+ return self._cross_session.enable(session_id, credentials)
617
+
618
+ def cross_session_available(
619
+ self,
620
+ session_id: str,
621
+ credentials: AgentCredentials,
622
+ ) -> bool:
623
+ """Check if cross-session is available."""
624
+ return self._cross_session.available(session_id, credentials)
625
+
626
+ # =========================================================================
627
+ # Cache Operations - expose cache stats
628
+ # =========================================================================
629
+
630
+ def get_cache_stats(self) -> dict:
631
+ """Get local cache statistics."""
632
+ return self._cache.get_stats()
633
+
634
+ def clear_cache(self) -> int:
635
+ """Clear local cache."""
636
+ return self._cache.clear()
637
+
638
+ # =========================================================================
639
+ # Internal - for backward compatibility with tests
640
+ # =========================================================================
641
+
642
+ @property
643
+ def _mock_storage(self) -> dict:
644
+ """Access mock storage for testing."""
645
+ return self._base._mock_storage
646
+
647
+ @property
648
+ def _client(self) -> Any:
649
+ """Access Redis client for testing."""
650
+ return self._base._client
651
+
652
+ @property
653
+ def _metrics(self) -> Any:
654
+ """Access metrics for testing."""
655
+ return self._base._metrics