empathy-framework 2.4.0__py3-none-any.whl → 3.8.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (329) hide show
  1. coach_wizards/__init__.py +13 -12
  2. coach_wizards/accessibility_wizard.py +12 -12
  3. coach_wizards/api_wizard.py +12 -12
  4. coach_wizards/base_wizard.py +26 -20
  5. coach_wizards/cicd_wizard.py +15 -13
  6. coach_wizards/code_reviewer_README.md +60 -0
  7. coach_wizards/code_reviewer_wizard.py +180 -0
  8. coach_wizards/compliance_wizard.py +12 -12
  9. coach_wizards/database_wizard.py +12 -12
  10. coach_wizards/debugging_wizard.py +12 -12
  11. coach_wizards/documentation_wizard.py +12 -12
  12. coach_wizards/generate_wizards.py +1 -2
  13. coach_wizards/localization_wizard.py +101 -19
  14. coach_wizards/migration_wizard.py +12 -12
  15. coach_wizards/monitoring_wizard.py +12 -12
  16. coach_wizards/observability_wizard.py +12 -12
  17. coach_wizards/performance_wizard.py +12 -12
  18. coach_wizards/prompt_engineering_wizard.py +661 -0
  19. coach_wizards/refactoring_wizard.py +12 -12
  20. coach_wizards/scaling_wizard.py +12 -12
  21. coach_wizards/security_wizard.py +12 -12
  22. coach_wizards/testing_wizard.py +12 -12
  23. empathy_framework-3.8.2.dist-info/METADATA +1176 -0
  24. empathy_framework-3.8.2.dist-info/RECORD +333 -0
  25. empathy_framework-3.8.2.dist-info/entry_points.txt +22 -0
  26. {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/top_level.txt +5 -1
  27. empathy_healthcare_plugin/__init__.py +1 -2
  28. empathy_healthcare_plugin/monitors/__init__.py +9 -0
  29. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +315 -0
  30. empathy_healthcare_plugin/monitors/monitoring/__init__.py +44 -0
  31. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +300 -0
  32. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +214 -0
  33. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +306 -0
  34. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +389 -0
  35. empathy_llm_toolkit/__init__.py +7 -7
  36. empathy_llm_toolkit/agent_factory/__init__.py +53 -0
  37. empathy_llm_toolkit/agent_factory/adapters/__init__.py +85 -0
  38. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +312 -0
  39. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +454 -0
  40. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +298 -0
  41. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +362 -0
  42. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +333 -0
  43. empathy_llm_toolkit/agent_factory/adapters/native.py +228 -0
  44. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +426 -0
  45. empathy_llm_toolkit/agent_factory/base.py +305 -0
  46. empathy_llm_toolkit/agent_factory/crews/__init__.py +67 -0
  47. empathy_llm_toolkit/agent_factory/crews/code_review.py +1113 -0
  48. empathy_llm_toolkit/agent_factory/crews/health_check.py +1246 -0
  49. empathy_llm_toolkit/agent_factory/crews/refactoring.py +1128 -0
  50. empathy_llm_toolkit/agent_factory/crews/security_audit.py +1018 -0
  51. empathy_llm_toolkit/agent_factory/decorators.py +286 -0
  52. empathy_llm_toolkit/agent_factory/factory.py +558 -0
  53. empathy_llm_toolkit/agent_factory/framework.py +192 -0
  54. empathy_llm_toolkit/agent_factory/memory_integration.py +324 -0
  55. empathy_llm_toolkit/agent_factory/resilient.py +320 -0
  56. empathy_llm_toolkit/claude_memory.py +14 -15
  57. empathy_llm_toolkit/cli/__init__.py +8 -0
  58. empathy_llm_toolkit/cli/sync_claude.py +487 -0
  59. empathy_llm_toolkit/code_health.py +186 -28
  60. empathy_llm_toolkit/config/__init__.py +29 -0
  61. empathy_llm_toolkit/config/unified.py +295 -0
  62. empathy_llm_toolkit/contextual_patterns.py +11 -12
  63. empathy_llm_toolkit/core.py +168 -53
  64. empathy_llm_toolkit/git_pattern_extractor.py +17 -13
  65. empathy_llm_toolkit/levels.py +6 -13
  66. empathy_llm_toolkit/pattern_confidence.py +14 -18
  67. empathy_llm_toolkit/pattern_resolver.py +10 -12
  68. empathy_llm_toolkit/pattern_summary.py +16 -14
  69. empathy_llm_toolkit/providers.py +194 -28
  70. empathy_llm_toolkit/routing/__init__.py +32 -0
  71. empathy_llm_toolkit/routing/model_router.py +362 -0
  72. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +413 -0
  73. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +384 -0
  74. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +271 -0
  75. empathy_llm_toolkit/security/QUICK_REFERENCE.md +316 -0
  76. empathy_llm_toolkit/security/README.md +262 -0
  77. empathy_llm_toolkit/security/__init__.py +62 -0
  78. empathy_llm_toolkit/security/audit_logger.py +929 -0
  79. empathy_llm_toolkit/security/audit_logger_example.py +152 -0
  80. empathy_llm_toolkit/security/pii_scrubber.py +640 -0
  81. empathy_llm_toolkit/security/secrets_detector.py +678 -0
  82. empathy_llm_toolkit/security/secrets_detector_example.py +304 -0
  83. empathy_llm_toolkit/security/secure_memdocs.py +1192 -0
  84. empathy_llm_toolkit/security/secure_memdocs_example.py +278 -0
  85. empathy_llm_toolkit/session_status.py +20 -22
  86. empathy_llm_toolkit/state.py +28 -21
  87. empathy_llm_toolkit/wizards/__init__.py +38 -0
  88. empathy_llm_toolkit/wizards/base_wizard.py +364 -0
  89. empathy_llm_toolkit/wizards/customer_support_wizard.py +190 -0
  90. empathy_llm_toolkit/wizards/healthcare_wizard.py +362 -0
  91. empathy_llm_toolkit/wizards/patient_assessment_README.md +64 -0
  92. empathy_llm_toolkit/wizards/patient_assessment_wizard.py +193 -0
  93. empathy_llm_toolkit/wizards/technology_wizard.py +194 -0
  94. empathy_os/__init__.py +125 -84
  95. empathy_os/adaptive/__init__.py +13 -0
  96. empathy_os/adaptive/task_complexity.py +127 -0
  97. empathy_os/{monitoring.py → agent_monitoring.py} +28 -28
  98. empathy_os/cache/__init__.py +117 -0
  99. empathy_os/cache/base.py +166 -0
  100. empathy_os/cache/dependency_manager.py +253 -0
  101. empathy_os/cache/hash_only.py +248 -0
  102. empathy_os/cache/hybrid.py +390 -0
  103. empathy_os/cache/storage.py +282 -0
  104. empathy_os/cli.py +1516 -70
  105. empathy_os/cli_unified.py +597 -0
  106. empathy_os/config/__init__.py +63 -0
  107. empathy_os/config/xml_config.py +239 -0
  108. empathy_os/config.py +95 -37
  109. empathy_os/coordination.py +72 -68
  110. empathy_os/core.py +94 -107
  111. empathy_os/cost_tracker.py +74 -55
  112. empathy_os/dashboard/__init__.py +15 -0
  113. empathy_os/dashboard/server.py +743 -0
  114. empathy_os/discovery.py +17 -14
  115. empathy_os/emergence.py +21 -22
  116. empathy_os/exceptions.py +18 -30
  117. empathy_os/feedback_loops.py +30 -33
  118. empathy_os/levels.py +32 -35
  119. empathy_os/leverage_points.py +31 -32
  120. empathy_os/logging_config.py +19 -16
  121. empathy_os/memory/__init__.py +195 -0
  122. empathy_os/memory/claude_memory.py +466 -0
  123. empathy_os/memory/config.py +224 -0
  124. empathy_os/memory/control_panel.py +1298 -0
  125. empathy_os/memory/edges.py +179 -0
  126. empathy_os/memory/graph.py +567 -0
  127. empathy_os/memory/long_term.py +1194 -0
  128. empathy_os/memory/nodes.py +179 -0
  129. empathy_os/memory/redis_bootstrap.py +540 -0
  130. empathy_os/memory/security/__init__.py +31 -0
  131. empathy_os/memory/security/audit_logger.py +930 -0
  132. empathy_os/memory/security/pii_scrubber.py +640 -0
  133. empathy_os/memory/security/secrets_detector.py +678 -0
  134. empathy_os/memory/short_term.py +2119 -0
  135. empathy_os/memory/storage/__init__.py +15 -0
  136. empathy_os/memory/summary_index.py +583 -0
  137. empathy_os/memory/unified.py +619 -0
  138. empathy_os/metrics/__init__.py +12 -0
  139. empathy_os/metrics/prompt_metrics.py +190 -0
  140. empathy_os/models/__init__.py +136 -0
  141. empathy_os/models/__main__.py +13 -0
  142. empathy_os/models/cli.py +655 -0
  143. empathy_os/models/empathy_executor.py +354 -0
  144. empathy_os/models/executor.py +252 -0
  145. empathy_os/models/fallback.py +671 -0
  146. empathy_os/models/provider_config.py +563 -0
  147. empathy_os/models/registry.py +382 -0
  148. empathy_os/models/tasks.py +302 -0
  149. empathy_os/models/telemetry.py +548 -0
  150. empathy_os/models/token_estimator.py +378 -0
  151. empathy_os/models/validation.py +274 -0
  152. empathy_os/monitoring/__init__.py +52 -0
  153. empathy_os/monitoring/alerts.py +23 -0
  154. empathy_os/monitoring/alerts_cli.py +268 -0
  155. empathy_os/monitoring/multi_backend.py +271 -0
  156. empathy_os/monitoring/otel_backend.py +363 -0
  157. empathy_os/optimization/__init__.py +19 -0
  158. empathy_os/optimization/context_optimizer.py +272 -0
  159. empathy_os/pattern_library.py +30 -29
  160. empathy_os/persistence.py +35 -37
  161. empathy_os/platform_utils.py +261 -0
  162. empathy_os/plugins/__init__.py +28 -0
  163. empathy_os/plugins/base.py +361 -0
  164. empathy_os/plugins/registry.py +268 -0
  165. empathy_os/project_index/__init__.py +30 -0
  166. empathy_os/project_index/cli.py +335 -0
  167. empathy_os/project_index/crew_integration.py +430 -0
  168. empathy_os/project_index/index.py +425 -0
  169. empathy_os/project_index/models.py +501 -0
  170. empathy_os/project_index/reports.py +473 -0
  171. empathy_os/project_index/scanner.py +538 -0
  172. empathy_os/prompts/__init__.py +61 -0
  173. empathy_os/prompts/config.py +77 -0
  174. empathy_os/prompts/context.py +177 -0
  175. empathy_os/prompts/parser.py +285 -0
  176. empathy_os/prompts/registry.py +313 -0
  177. empathy_os/prompts/templates.py +208 -0
  178. empathy_os/redis_config.py +144 -58
  179. empathy_os/redis_memory.py +79 -77
  180. empathy_os/resilience/__init__.py +56 -0
  181. empathy_os/resilience/circuit_breaker.py +256 -0
  182. empathy_os/resilience/fallback.py +179 -0
  183. empathy_os/resilience/health.py +300 -0
  184. empathy_os/resilience/retry.py +209 -0
  185. empathy_os/resilience/timeout.py +135 -0
  186. empathy_os/routing/__init__.py +43 -0
  187. empathy_os/routing/chain_executor.py +433 -0
  188. empathy_os/routing/classifier.py +217 -0
  189. empathy_os/routing/smart_router.py +234 -0
  190. empathy_os/routing/wizard_registry.py +307 -0
  191. empathy_os/templates.py +19 -14
  192. empathy_os/trust/__init__.py +28 -0
  193. empathy_os/trust/circuit_breaker.py +579 -0
  194. empathy_os/trust_building.py +67 -58
  195. empathy_os/validation/__init__.py +19 -0
  196. empathy_os/validation/xml_validator.py +281 -0
  197. empathy_os/wizard_factory_cli.py +170 -0
  198. empathy_os/{workflows.py → workflow_commands.py} +131 -37
  199. empathy_os/workflows/__init__.py +360 -0
  200. empathy_os/workflows/base.py +1660 -0
  201. empathy_os/workflows/bug_predict.py +962 -0
  202. empathy_os/workflows/code_review.py +960 -0
  203. empathy_os/workflows/code_review_adapters.py +310 -0
  204. empathy_os/workflows/code_review_pipeline.py +720 -0
  205. empathy_os/workflows/config.py +600 -0
  206. empathy_os/workflows/dependency_check.py +648 -0
  207. empathy_os/workflows/document_gen.py +1069 -0
  208. empathy_os/workflows/documentation_orchestrator.py +1205 -0
  209. empathy_os/workflows/health_check.py +679 -0
  210. empathy_os/workflows/keyboard_shortcuts/__init__.py +39 -0
  211. empathy_os/workflows/keyboard_shortcuts/generators.py +386 -0
  212. empathy_os/workflows/keyboard_shortcuts/parsers.py +414 -0
  213. empathy_os/workflows/keyboard_shortcuts/prompts.py +295 -0
  214. empathy_os/workflows/keyboard_shortcuts/schema.py +193 -0
  215. empathy_os/workflows/keyboard_shortcuts/workflow.py +505 -0
  216. empathy_os/workflows/manage_documentation.py +804 -0
  217. empathy_os/workflows/new_sample_workflow1.py +146 -0
  218. empathy_os/workflows/new_sample_workflow1_README.md +150 -0
  219. empathy_os/workflows/perf_audit.py +687 -0
  220. empathy_os/workflows/pr_review.py +748 -0
  221. empathy_os/workflows/progress.py +445 -0
  222. empathy_os/workflows/progress_server.py +322 -0
  223. empathy_os/workflows/refactor_plan.py +693 -0
  224. empathy_os/workflows/release_prep.py +808 -0
  225. empathy_os/workflows/research_synthesis.py +404 -0
  226. empathy_os/workflows/secure_release.py +585 -0
  227. empathy_os/workflows/security_adapters.py +297 -0
  228. empathy_os/workflows/security_audit.py +1046 -0
  229. empathy_os/workflows/step_config.py +234 -0
  230. empathy_os/workflows/test5.py +125 -0
  231. empathy_os/workflows/test5_README.md +158 -0
  232. empathy_os/workflows/test_gen.py +1855 -0
  233. empathy_os/workflows/test_lifecycle.py +526 -0
  234. empathy_os/workflows/test_maintenance.py +626 -0
  235. empathy_os/workflows/test_maintenance_cli.py +590 -0
  236. empathy_os/workflows/test_maintenance_crew.py +821 -0
  237. empathy_os/workflows/xml_enhanced_crew.py +285 -0
  238. empathy_software_plugin/__init__.py +1 -2
  239. empathy_software_plugin/cli/__init__.py +120 -0
  240. empathy_software_plugin/cli/inspect.py +362 -0
  241. empathy_software_plugin/cli.py +49 -27
  242. empathy_software_plugin/plugin.py +4 -8
  243. empathy_software_plugin/wizards/__init__.py +42 -0
  244. empathy_software_plugin/wizards/advanced_debugging_wizard.py +392 -0
  245. empathy_software_plugin/wizards/agent_orchestration_wizard.py +511 -0
  246. empathy_software_plugin/wizards/ai_collaboration_wizard.py +503 -0
  247. empathy_software_plugin/wizards/ai_context_wizard.py +441 -0
  248. empathy_software_plugin/wizards/ai_documentation_wizard.py +503 -0
  249. empathy_software_plugin/wizards/base_wizard.py +288 -0
  250. empathy_software_plugin/wizards/book_chapter_wizard.py +519 -0
  251. empathy_software_plugin/wizards/code_review_wizard.py +606 -0
  252. empathy_software_plugin/wizards/debugging/__init__.py +50 -0
  253. empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +414 -0
  254. empathy_software_plugin/wizards/debugging/config_loaders.py +442 -0
  255. empathy_software_plugin/wizards/debugging/fix_applier.py +469 -0
  256. empathy_software_plugin/wizards/debugging/language_patterns.py +383 -0
  257. empathy_software_plugin/wizards/debugging/linter_parsers.py +470 -0
  258. empathy_software_plugin/wizards/debugging/verification.py +369 -0
  259. empathy_software_plugin/wizards/enhanced_testing_wizard.py +537 -0
  260. empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +816 -0
  261. empathy_software_plugin/wizards/multi_model_wizard.py +501 -0
  262. empathy_software_plugin/wizards/pattern_extraction_wizard.py +422 -0
  263. empathy_software_plugin/wizards/pattern_retriever_wizard.py +400 -0
  264. empathy_software_plugin/wizards/performance/__init__.py +9 -0
  265. empathy_software_plugin/wizards/performance/bottleneck_detector.py +221 -0
  266. empathy_software_plugin/wizards/performance/profiler_parsers.py +278 -0
  267. empathy_software_plugin/wizards/performance/trajectory_analyzer.py +429 -0
  268. empathy_software_plugin/wizards/performance_profiling_wizard.py +305 -0
  269. empathy_software_plugin/wizards/prompt_engineering_wizard.py +425 -0
  270. empathy_software_plugin/wizards/rag_pattern_wizard.py +461 -0
  271. empathy_software_plugin/wizards/security/__init__.py +32 -0
  272. empathy_software_plugin/wizards/security/exploit_analyzer.py +290 -0
  273. empathy_software_plugin/wizards/security/owasp_patterns.py +241 -0
  274. empathy_software_plugin/wizards/security/vulnerability_scanner.py +604 -0
  275. empathy_software_plugin/wizards/security_analysis_wizard.py +322 -0
  276. empathy_software_plugin/wizards/security_learning_wizard.py +740 -0
  277. empathy_software_plugin/wizards/tech_debt_wizard.py +726 -0
  278. empathy_software_plugin/wizards/testing/__init__.py +27 -0
  279. empathy_software_plugin/wizards/testing/coverage_analyzer.py +459 -0
  280. empathy_software_plugin/wizards/testing/quality_analyzer.py +531 -0
  281. empathy_software_plugin/wizards/testing/test_suggester.py +533 -0
  282. empathy_software_plugin/wizards/testing_wizard.py +274 -0
  283. hot_reload/README.md +473 -0
  284. hot_reload/__init__.py +62 -0
  285. hot_reload/config.py +84 -0
  286. hot_reload/integration.py +228 -0
  287. hot_reload/reloader.py +298 -0
  288. hot_reload/watcher.py +179 -0
  289. hot_reload/websocket.py +176 -0
  290. scaffolding/README.md +589 -0
  291. scaffolding/__init__.py +35 -0
  292. scaffolding/__main__.py +14 -0
  293. scaffolding/cli.py +240 -0
  294. test_generator/__init__.py +38 -0
  295. test_generator/__main__.py +14 -0
  296. test_generator/cli.py +226 -0
  297. test_generator/generator.py +325 -0
  298. test_generator/risk_analyzer.py +216 -0
  299. workflow_patterns/__init__.py +33 -0
  300. workflow_patterns/behavior.py +249 -0
  301. workflow_patterns/core.py +76 -0
  302. workflow_patterns/output.py +99 -0
  303. workflow_patterns/registry.py +255 -0
  304. workflow_patterns/structural.py +288 -0
  305. workflow_scaffolding/__init__.py +11 -0
  306. workflow_scaffolding/__main__.py +12 -0
  307. workflow_scaffolding/cli.py +206 -0
  308. workflow_scaffolding/generator.py +265 -0
  309. agents/code_inspection/patterns/inspection/recurring_B112.json +0 -18
  310. agents/code_inspection/patterns/inspection/recurring_F541.json +0 -16
  311. agents/code_inspection/patterns/inspection/recurring_FORMAT.json +0 -25
  312. agents/code_inspection/patterns/inspection/recurring_bug_20250822_def456.json +0 -16
  313. agents/code_inspection/patterns/inspection/recurring_bug_20250915_abc123.json +0 -16
  314. agents/code_inspection/patterns/inspection/recurring_bug_20251212_3c5b9951.json +0 -16
  315. agents/code_inspection/patterns/inspection/recurring_bug_20251212_97c0f72f.json +0 -16
  316. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a0871d53.json +0 -16
  317. agents/code_inspection/patterns/inspection/recurring_bug_20251212_a9b6ec41.json +0 -16
  318. agents/code_inspection/patterns/inspection/recurring_bug_null_001.json +0 -16
  319. agents/code_inspection/patterns/inspection/recurring_builtin.json +0 -16
  320. agents/compliance_anticipation_agent.py +0 -1427
  321. agents/epic_integration_wizard.py +0 -541
  322. agents/trust_building_behaviors.py +0 -891
  323. empathy_framework-2.4.0.dist-info/METADATA +0 -485
  324. empathy_framework-2.4.0.dist-info/RECORD +0 -102
  325. empathy_framework-2.4.0.dist-info/entry_points.txt +0 -6
  326. empathy_llm_toolkit/htmlcov/status.json +0 -1
  327. empathy_llm_toolkit/security/htmlcov/status.json +0 -1
  328. {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/WHEEL +0 -0
  329. {empathy_framework-2.4.0.dist-info → empathy_framework-3.8.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,2119 @@
1
+ """Redis Short-Term Memory for Empathy Framework
2
+
3
+ Per EMPATHY_PHILOSOPHY.md v1.1.0:
4
+ - Implements fast, TTL-based working memory for agent coordination
5
+ - Role-based access tiers for data integrity
6
+ - Pattern staging before validation
7
+ - Principled negotiation support
8
+
9
+ Enhanced Features (v2.0):
10
+ - Pub/Sub for real-time agent notifications
11
+ - Batch operations for high-throughput workflows
12
+ - SCAN-based pagination for large datasets
13
+ - Redis Streams for audit trails
14
+ - Connection retry with exponential backoff
15
+ - SSL/TLS support for managed Redis services
16
+ - Time-window queries with sorted sets
17
+ - Task queues with Lists
18
+ - Atomic transactions with MULTI/EXEC
19
+ - Comprehensive metrics tracking
20
+
21
+ Copyright 2025 Smart AI Memory, LLC
22
+ Licensed under Fair Source 0.9
23
+ """
24
+
25
+ import json
26
+ import threading
27
+ import time
28
+ from collections.abc import Callable
29
+ from dataclasses import dataclass, field
30
+ from datetime import datetime
31
+ from enum import Enum
32
+ from typing import Any
33
+
34
+ import structlog
35
+
36
+ logger = structlog.get_logger(__name__)
37
+
38
+ try:
39
+ import redis
40
+ from redis.exceptions import ConnectionError as RedisConnectionError
41
+ from redis.exceptions import TimeoutError as RedisTimeoutError
42
+
43
+ REDIS_AVAILABLE = True
44
+ except ImportError:
45
+ REDIS_AVAILABLE = False
46
+ RedisConnectionError = Exception # type: ignore
47
+ RedisTimeoutError = Exception # type: ignore
48
+
49
+
50
+ class AccessTier(Enum):
51
+ """Role-based access tiers per EMPATHY_PHILOSOPHY.md
52
+
53
+ Tier 1 - Observer: Read-only access to validated patterns
54
+ Tier 2 - Contributor: Can stage patterns for validation
55
+ Tier 3 - Validator: Can promote staged patterns to active
56
+ Tier 4 - Steward: Full access including deprecation and audit
57
+ """
58
+
59
+ OBSERVER = 1
60
+ CONTRIBUTOR = 2
61
+ VALIDATOR = 3
62
+ STEWARD = 4
63
+
64
+
65
+ class TTLStrategy(Enum):
66
+ """TTL strategies for different memory types
67
+
68
+ Per EMPATHY_PHILOSOPHY.md Section 9.3:
69
+ - Working results: 1 hour
70
+ - Staged patterns: 24 hours
71
+ - Coordination signals: 5 minutes
72
+ - Conflict context: Until resolution
73
+ """
74
+
75
+ WORKING_RESULTS = 3600 # 1 hour
76
+ STAGED_PATTERNS = 86400 # 24 hours
77
+ COORDINATION = 300 # 5 minutes
78
+ CONFLICT_CONTEXT = 604800 # 7 days (fallback for unresolved)
79
+ SESSION = 1800 # 30 minutes
80
+ STREAM_ENTRY = 86400 * 7 # 7 days for audit stream entries
81
+ TASK_QUEUE = 3600 * 4 # 4 hours for task queue items
82
+
83
+
84
+ @dataclass
85
+ class RedisConfig:
86
+ """Enhanced Redis configuration with SSL and retry support.
87
+
88
+ Supports:
89
+ - Standard connections (host:port)
90
+ - URL-based connections (redis://...)
91
+ - SSL/TLS for managed services (rediss://...)
92
+ - Sentinel for high availability
93
+ - Connection pooling
94
+ - Retry with exponential backoff
95
+ """
96
+
97
+ host: str = "localhost"
98
+ port: int = 6379
99
+ db: int = 0
100
+ password: str | None = None
101
+ use_mock: bool = False
102
+
103
+ # SSL/TLS settings
104
+ ssl: bool = False
105
+ ssl_cert_reqs: str | None = None # "required", "optional", "none"
106
+ ssl_ca_certs: str | None = None
107
+ ssl_certfile: str | None = None
108
+ ssl_keyfile: str | None = None
109
+
110
+ # Connection pool settings
111
+ max_connections: int = 10
112
+ socket_timeout: float = 5.0
113
+ socket_connect_timeout: float = 5.0
114
+
115
+ # Retry settings
116
+ retry_on_timeout: bool = True
117
+ retry_max_attempts: int = 3
118
+ retry_base_delay: float = 0.1 # seconds
119
+ retry_max_delay: float = 2.0 # seconds
120
+
121
+ # Sentinel settings (for HA)
122
+ sentinel_hosts: list[tuple[str, int]] | None = None
123
+ sentinel_master_name: str | None = None
124
+
125
+ def to_redis_kwargs(self) -> dict:
126
+ """Convert to redis.Redis constructor kwargs."""
127
+ kwargs: dict[str, Any] = {
128
+ "host": self.host,
129
+ "port": self.port,
130
+ "db": self.db,
131
+ "password": self.password,
132
+ "decode_responses": True,
133
+ "socket_timeout": self.socket_timeout,
134
+ "socket_connect_timeout": self.socket_connect_timeout,
135
+ "retry_on_timeout": self.retry_on_timeout,
136
+ }
137
+
138
+ if self.ssl:
139
+ kwargs["ssl"] = True
140
+ if self.ssl_cert_reqs:
141
+ kwargs["ssl_cert_reqs"] = self.ssl_cert_reqs
142
+ if self.ssl_ca_certs:
143
+ kwargs["ssl_ca_certs"] = self.ssl_ca_certs
144
+ if self.ssl_certfile:
145
+ kwargs["ssl_certfile"] = self.ssl_certfile
146
+ if self.ssl_keyfile:
147
+ kwargs["ssl_keyfile"] = self.ssl_keyfile
148
+
149
+ return kwargs
150
+
151
+
152
+ @dataclass
153
+ class RedisMetrics:
154
+ """Metrics for Redis operations."""
155
+
156
+ operations_total: int = 0
157
+ operations_success: int = 0
158
+ operations_failed: int = 0
159
+ retries_total: int = 0
160
+ latency_sum_ms: float = 0.0
161
+ latency_max_ms: float = 0.0
162
+
163
+ # Per-operation metrics
164
+ stash_count: int = 0
165
+ retrieve_count: int = 0
166
+ publish_count: int = 0
167
+ stream_append_count: int = 0
168
+
169
+ def record_operation(self, operation: str, latency_ms: float, success: bool = True) -> None:
170
+ """Record an operation metric."""
171
+ self.operations_total += 1
172
+ self.latency_sum_ms += latency_ms
173
+ self.latency_max_ms = max(self.latency_max_ms, latency_ms)
174
+
175
+ if success:
176
+ self.operations_success += 1
177
+ else:
178
+ self.operations_failed += 1
179
+
180
+ # Track by operation type
181
+ if operation == "stash":
182
+ self.stash_count += 1
183
+ elif operation == "retrieve":
184
+ self.retrieve_count += 1
185
+ elif operation == "publish":
186
+ self.publish_count += 1
187
+ elif operation == "stream_append":
188
+ self.stream_append_count += 1
189
+
190
+ @property
191
+ def latency_avg_ms(self) -> float:
192
+ """Average latency in milliseconds."""
193
+ if self.operations_total == 0:
194
+ return 0.0
195
+ return self.latency_sum_ms / self.operations_total
196
+
197
+ @property
198
+ def success_rate(self) -> float:
199
+ """Success rate as percentage."""
200
+ if self.operations_total == 0:
201
+ return 100.0
202
+ return (self.operations_success / self.operations_total) * 100
203
+
204
+ def to_dict(self) -> dict:
205
+ """Convert to dictionary for reporting."""
206
+ return {
207
+ "operations_total": self.operations_total,
208
+ "operations_success": self.operations_success,
209
+ "operations_failed": self.operations_failed,
210
+ "retries_total": self.retries_total,
211
+ "latency_avg_ms": round(self.latency_avg_ms, 2),
212
+ "latency_max_ms": round(self.latency_max_ms, 2),
213
+ "success_rate": round(self.success_rate, 2),
214
+ "by_operation": {
215
+ "stash": self.stash_count,
216
+ "retrieve": self.retrieve_count,
217
+ "publish": self.publish_count,
218
+ "stream_append": self.stream_append_count,
219
+ },
220
+ }
221
+
222
+
223
+ @dataclass
224
+ class PaginatedResult:
225
+ """Result of a paginated query."""
226
+
227
+ items: list[Any]
228
+ cursor: str
229
+ has_more: bool
230
+ total_scanned: int = 0
231
+
232
+
233
+ @dataclass
234
+ class TimeWindowQuery:
235
+ """Query parameters for time-window operations."""
236
+
237
+ start_time: datetime | None = None
238
+ end_time: datetime | None = None
239
+ limit: int = 100
240
+ offset: int = 0
241
+
242
+ @property
243
+ def start_score(self) -> float:
244
+ """Start timestamp as Redis score."""
245
+ if self.start_time is None:
246
+ return float("-inf")
247
+ return self.start_time.timestamp()
248
+
249
+ @property
250
+ def end_score(self) -> float:
251
+ """End timestamp as Redis score."""
252
+ if self.end_time is None:
253
+ return float("+inf")
254
+ return self.end_time.timestamp()
255
+
256
+
257
+ @dataclass
258
+ class AgentCredentials:
259
+ """Agent identity and access permissions"""
260
+
261
+ agent_id: str
262
+ tier: AccessTier
263
+ roles: list[str] = field(default_factory=list)
264
+ created_at: datetime = field(default_factory=datetime.now)
265
+
266
+ def can_read(self) -> bool:
267
+ """All tiers can read"""
268
+ return True
269
+
270
+ def can_stage(self) -> bool:
271
+ """Contributor+ can stage patterns"""
272
+ return self.tier.value >= AccessTier.CONTRIBUTOR.value
273
+
274
+ def can_validate(self) -> bool:
275
+ """Validator+ can promote patterns"""
276
+ return self.tier.value >= AccessTier.VALIDATOR.value
277
+
278
+ def can_administer(self) -> bool:
279
+ """Only Stewards have full admin access"""
280
+ return self.tier.value >= AccessTier.STEWARD.value
281
+
282
+
283
+ @dataclass
284
+ class StagedPattern:
285
+ """Pattern awaiting validation"""
286
+
287
+ pattern_id: str
288
+ agent_id: str
289
+ pattern_type: str
290
+ name: str
291
+ description: str
292
+ code: str | None = None
293
+ context: dict = field(default_factory=dict)
294
+ confidence: float = 0.5
295
+ staged_at: datetime = field(default_factory=datetime.now)
296
+ interests: list[str] = field(default_factory=list) # For negotiation
297
+
298
+ def to_dict(self) -> dict:
299
+ return {
300
+ "pattern_id": self.pattern_id,
301
+ "agent_id": self.agent_id,
302
+ "pattern_type": self.pattern_type,
303
+ "name": self.name,
304
+ "description": self.description,
305
+ "code": self.code,
306
+ "context": self.context,
307
+ "confidence": self.confidence,
308
+ "staged_at": self.staged_at.isoformat(),
309
+ "interests": self.interests,
310
+ }
311
+
312
+ @classmethod
313
+ def from_dict(cls, data: dict) -> "StagedPattern":
314
+ return cls(
315
+ pattern_id=data["pattern_id"],
316
+ agent_id=data["agent_id"],
317
+ pattern_type=data["pattern_type"],
318
+ name=data["name"],
319
+ description=data["description"],
320
+ code=data.get("code"),
321
+ context=data.get("context", {}),
322
+ confidence=data.get("confidence", 0.5),
323
+ staged_at=datetime.fromisoformat(data["staged_at"]),
324
+ interests=data.get("interests", []),
325
+ )
326
+
327
+
328
+ @dataclass
329
+ class ConflictContext:
330
+ """Context for principled negotiation
331
+
332
+ Per Getting to Yes framework:
333
+ - Positions: What each party says they want
334
+ - Interests: Why they want it (underlying needs)
335
+ - BATNA: Best Alternative to Negotiated Agreement
336
+ """
337
+
338
+ conflict_id: str
339
+ positions: dict[str, Any] # agent_id -> stated position
340
+ interests: dict[str, list[str]] # agent_id -> underlying interests
341
+ batna: str | None = None # Fallback strategy
342
+ created_at: datetime = field(default_factory=datetime.now)
343
+ resolved: bool = False
344
+ resolution: str | None = None
345
+
346
+ def to_dict(self) -> dict:
347
+ return {
348
+ "conflict_id": self.conflict_id,
349
+ "positions": self.positions,
350
+ "interests": self.interests,
351
+ "batna": self.batna,
352
+ "created_at": self.created_at.isoformat(),
353
+ "resolved": self.resolved,
354
+ "resolution": self.resolution,
355
+ }
356
+
357
+ @classmethod
358
+ def from_dict(cls, data: dict) -> "ConflictContext":
359
+ return cls(
360
+ conflict_id=data["conflict_id"],
361
+ positions=data["positions"],
362
+ interests=data["interests"],
363
+ batna=data.get("batna"),
364
+ created_at=datetime.fromisoformat(data["created_at"]),
365
+ resolved=data.get("resolved", False),
366
+ resolution=data.get("resolution"),
367
+ )
368
+
369
+
370
+ class RedisShortTermMemory:
371
+ """Redis-backed short-term memory for agent coordination
372
+
373
+ Features:
374
+ - Fast read/write with automatic TTL expiration
375
+ - Role-based access control
376
+ - Pattern staging workflow
377
+ - Conflict negotiation context
378
+ - Agent working memory
379
+
380
+ Enhanced Features (v2.0):
381
+ - Pub/Sub for real-time agent notifications
382
+ - Batch operations (stash_batch, retrieve_batch)
383
+ - SCAN-based pagination for large datasets
384
+ - Redis Streams for audit trails
385
+ - Time-window queries with sorted sets
386
+ - Task queues with Lists (LPUSH/RPOP)
387
+ - Atomic transactions with MULTI/EXEC
388
+ - Connection retry with exponential backoff
389
+ - Metrics tracking for observability
390
+
391
+ Example:
392
+ >>> memory = RedisShortTermMemory()
393
+ >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
394
+ >>> memory.stash("analysis_results", {"issues": 3}, creds)
395
+ >>> data = memory.retrieve("analysis_results", creds)
396
+
397
+ # Pub/Sub example
398
+ >>> memory.subscribe("agent_signals", lambda msg: print(msg))
399
+ >>> memory.publish("agent_signals", {"event": "task_complete"}, creds)
400
+
401
+ # Batch operations
402
+ >>> items = [("key1", {"data": 1}), ("key2", {"data": 2})]
403
+ >>> memory.stash_batch(items, creds)
404
+
405
+ # Pagination
406
+ >>> result = memory.list_staged_patterns_paginated(creds, cursor="0", count=10)
407
+
408
+ """
409
+
410
+ # Key prefixes for namespacing
411
+ PREFIX_WORKING = "empathy:working:"
412
+ PREFIX_STAGED = "empathy:staged:"
413
+ PREFIX_CONFLICT = "empathy:conflict:"
414
+ PREFIX_COORDINATION = "empathy:coord:"
415
+ PREFIX_SESSION = "empathy:session:"
416
+ PREFIX_PUBSUB = "empathy:pubsub:"
417
+ PREFIX_STREAM = "empathy:stream:"
418
+ PREFIX_TIMELINE = "empathy:timeline:"
419
+ PREFIX_QUEUE = "empathy:queue:"
420
+
421
+ def __init__(
422
+ self,
423
+ host: str = "localhost",
424
+ port: int = 6379,
425
+ db: int = 0,
426
+ password: str | None = None,
427
+ use_mock: bool = False,
428
+ config: RedisConfig | None = None,
429
+ ):
430
+ """Initialize Redis connection
431
+
432
+ Args:
433
+ host: Redis host
434
+ port: Redis port
435
+ db: Redis database number
436
+ password: Redis password (optional)
437
+ use_mock: Use in-memory mock for testing
438
+ config: Full RedisConfig for advanced settings (overrides other args)
439
+
440
+ """
441
+ # Use config if provided, otherwise build from individual args
442
+ if config is not None:
443
+ self._config = config
444
+ else:
445
+ self._config = RedisConfig(
446
+ host=host,
447
+ port=port,
448
+ db=db,
449
+ password=password,
450
+ use_mock=use_mock,
451
+ )
452
+
453
+ self.use_mock = self._config.use_mock or not REDIS_AVAILABLE
454
+
455
+ # Initialize metrics
456
+ self._metrics = RedisMetrics()
457
+
458
+ # Pub/Sub state
459
+ self._pubsub: Any | None = None
460
+ self._pubsub_thread: threading.Thread | None = None
461
+ self._subscriptions: dict[str, list[Callable[[dict], None]]] = {}
462
+ self._pubsub_running = False
463
+
464
+ # Mock storage for testing
465
+ self._mock_storage: dict[str, tuple[Any, float | None]] = {}
466
+ self._mock_lists: dict[str, list[str]] = {}
467
+ self._mock_sorted_sets: dict[str, list[tuple[float, str]]] = {}
468
+ self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
469
+ self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
470
+
471
+ if self.use_mock:
472
+ self._client = None
473
+ else:
474
+ self._client = self._create_client_with_retry()
475
+
476
+ def _create_client_with_retry(self) -> Any:
477
+ """Create Redis client with retry logic."""
478
+ max_attempts = self._config.retry_max_attempts
479
+ base_delay = self._config.retry_base_delay
480
+ max_delay = self._config.retry_max_delay
481
+
482
+ last_error: Exception | None = None
483
+
484
+ for attempt in range(max_attempts):
485
+ try:
486
+ client = redis.Redis(**self._config.to_redis_kwargs())
487
+ # Test connection
488
+ client.ping()
489
+ logger.info(
490
+ "redis_connected",
491
+ host=self._config.host,
492
+ port=self._config.port,
493
+ attempt=attempt + 1,
494
+ )
495
+ return client
496
+ except (RedisConnectionError, RedisTimeoutError) as e:
497
+ last_error = e
498
+ self._metrics.retries_total += 1
499
+
500
+ if attempt < max_attempts - 1:
501
+ delay = min(base_delay * (2**attempt), max_delay)
502
+ logger.warning(
503
+ "redis_connection_retry",
504
+ attempt=attempt + 1,
505
+ max_attempts=max_attempts,
506
+ delay=delay,
507
+ error=str(e),
508
+ )
509
+ time.sleep(delay)
510
+
511
+ # All retries failed
512
+ logger.error(
513
+ "redis_connection_failed",
514
+ max_attempts=max_attempts,
515
+ error=str(last_error),
516
+ )
517
+ raise last_error if last_error else ConnectionError("Failed to connect to Redis")
518
+
519
+ def _execute_with_retry(self, operation: Callable[[], Any], op_name: str = "operation") -> Any:
520
+ """Execute a Redis operation with retry logic."""
521
+ start_time = time.perf_counter()
522
+ max_attempts = self._config.retry_max_attempts
523
+ base_delay = self._config.retry_base_delay
524
+ max_delay = self._config.retry_max_delay
525
+
526
+ last_error: Exception | None = None
527
+
528
+ for attempt in range(max_attempts):
529
+ try:
530
+ result = operation()
531
+ latency_ms = (time.perf_counter() - start_time) * 1000
532
+ self._metrics.record_operation(op_name, latency_ms, success=True)
533
+ return result
534
+ except (RedisConnectionError, RedisTimeoutError) as e:
535
+ last_error = e
536
+ self._metrics.retries_total += 1
537
+
538
+ if attempt < max_attempts - 1:
539
+ delay = min(base_delay * (2**attempt), max_delay)
540
+ logger.warning(
541
+ "redis_operation_retry",
542
+ operation=op_name,
543
+ attempt=attempt + 1,
544
+ delay=delay,
545
+ )
546
+ time.sleep(delay)
547
+
548
+ latency_ms = (time.perf_counter() - start_time) * 1000
549
+ self._metrics.record_operation(op_name, latency_ms, success=False)
550
+ raise last_error if last_error else ConnectionError("Redis operation failed")
551
+
552
+ def _get(self, key: str) -> str | None:
553
+ """Get value from Redis or mock"""
554
+ if self.use_mock:
555
+ if key in self._mock_storage:
556
+ value, expires = self._mock_storage[key]
557
+ if expires is None or datetime.now().timestamp() < expires:
558
+ return str(value) if value is not None else None
559
+ del self._mock_storage[key]
560
+ return None
561
+ if self._client is None:
562
+ return None
563
+ result = self._client.get(key)
564
+ return str(result) if result else None
565
+
566
+ def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
567
+ """Set value in Redis or mock"""
568
+ if self.use_mock:
569
+ expires = datetime.now().timestamp() + ttl if ttl else None
570
+ self._mock_storage[key] = (value, expires)
571
+ return True
572
+ if self._client is None:
573
+ return False
574
+ if ttl:
575
+ self._client.setex(key, ttl, value)
576
+ return True
577
+ result = self._client.set(key, value)
578
+ return bool(result)
579
+
580
+ def _delete(self, key: str) -> bool:
581
+ """Delete key from Redis or mock"""
582
+ if self.use_mock:
583
+ if key in self._mock_storage:
584
+ del self._mock_storage[key]
585
+ return True
586
+ return False
587
+ if self._client is None:
588
+ return False
589
+ return bool(self._client.delete(key) > 0)
590
+
591
+ def _keys(self, pattern: str) -> list[str]:
592
+ """Get keys matching pattern"""
593
+ if self.use_mock:
594
+ import fnmatch
595
+
596
+ return [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
597
+ if self._client is None:
598
+ return []
599
+ keys = self._client.keys(pattern)
600
+ return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
601
+
602
+ # === Working Memory (Stash/Retrieve) ===
603
+
604
+ def stash(
605
+ self,
606
+ key: str,
607
+ data: Any,
608
+ credentials: AgentCredentials,
609
+ ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
610
+ ) -> bool:
611
+ """Stash data in short-term memory
612
+
613
+ Args:
614
+ key: Unique key for the data
615
+ data: Data to store (will be JSON serialized)
616
+ credentials: Agent credentials
617
+ ttl: Time-to-live strategy
618
+
619
+ Returns:
620
+ True if successful
621
+
622
+ Example:
623
+ >>> memory.stash("analysis_v1", {"findings": [...]}, creds)
624
+
625
+ """
626
+ if not credentials.can_stage():
627
+ raise PermissionError(
628
+ f"Agent {credentials.agent_id} (Tier {credentials.tier.name}) "
629
+ "cannot write to memory. Requires CONTRIBUTOR or higher.",
630
+ )
631
+
632
+ full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
633
+ payload = {
634
+ "data": data,
635
+ "agent_id": credentials.agent_id,
636
+ "stashed_at": datetime.now().isoformat(),
637
+ }
638
+ return self._set(full_key, json.dumps(payload), ttl.value)
639
+
640
+ def retrieve(
641
+ self,
642
+ key: str,
643
+ credentials: AgentCredentials,
644
+ agent_id: str | None = None,
645
+ ) -> Any | None:
646
+ """Retrieve data from short-term memory
647
+
648
+ Args:
649
+ key: Key to retrieve
650
+ credentials: Agent credentials
651
+ agent_id: Owner agent ID (defaults to credentials agent)
652
+
653
+ Returns:
654
+ Retrieved data or None if not found
655
+
656
+ Example:
657
+ >>> data = memory.retrieve("analysis_v1", creds)
658
+
659
+ """
660
+ owner = agent_id or credentials.agent_id
661
+ full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
662
+ raw = self._get(full_key)
663
+
664
+ if raw is None:
665
+ return None
666
+
667
+ payload = json.loads(raw)
668
+ return payload.get("data")
669
+
670
+ def clear_working_memory(self, credentials: AgentCredentials) -> int:
671
+ """Clear all working memory for an agent
672
+
673
+ Args:
674
+ credentials: Agent credentials (must own the memory or be Steward)
675
+
676
+ Returns:
677
+ Number of keys deleted
678
+
679
+ """
680
+ pattern = f"{self.PREFIX_WORKING}{credentials.agent_id}:*"
681
+ keys = self._keys(pattern)
682
+ count = 0
683
+ for key in keys:
684
+ if self._delete(key):
685
+ count += 1
686
+ return count
687
+
688
+ # === Pattern Staging ===
689
+
690
+ def stage_pattern(
691
+ self,
692
+ pattern: StagedPattern,
693
+ credentials: AgentCredentials,
694
+ ) -> bool:
695
+ """Stage a pattern for validation
696
+
697
+ Per EMPATHY_PHILOSOPHY.md: Patterns must be staged before
698
+ being promoted to the active library.
699
+
700
+ Args:
701
+ pattern: Pattern to stage
702
+ credentials: Must be CONTRIBUTOR or higher
703
+
704
+ Returns:
705
+ True if staged successfully
706
+
707
+ """
708
+ if not credentials.can_stage():
709
+ raise PermissionError(
710
+ f"Agent {credentials.agent_id} cannot stage patterns. "
711
+ "Requires CONTRIBUTOR tier or higher.",
712
+ )
713
+
714
+ key = f"{self.PREFIX_STAGED}{pattern.pattern_id}"
715
+ return self._set(
716
+ key,
717
+ json.dumps(pattern.to_dict()),
718
+ TTLStrategy.STAGED_PATTERNS.value,
719
+ )
720
+
721
+ def get_staged_pattern(
722
+ self,
723
+ pattern_id: str,
724
+ credentials: AgentCredentials,
725
+ ) -> StagedPattern | None:
726
+ """Retrieve a staged pattern
727
+
728
+ Args:
729
+ pattern_id: Pattern ID
730
+ credentials: Any tier can read
731
+
732
+ Returns:
733
+ StagedPattern or None
734
+
735
+ """
736
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
737
+ raw = self._get(key)
738
+
739
+ if raw is None:
740
+ return None
741
+
742
+ return StagedPattern.from_dict(json.loads(raw))
743
+
744
+ def list_staged_patterns(
745
+ self,
746
+ credentials: AgentCredentials,
747
+ ) -> list[StagedPattern]:
748
+ """List all staged patterns awaiting validation
749
+
750
+ Args:
751
+ credentials: Any tier can read
752
+
753
+ Returns:
754
+ List of staged patterns
755
+
756
+ """
757
+ pattern = f"{self.PREFIX_STAGED}*"
758
+ keys = self._keys(pattern)
759
+ patterns = []
760
+
761
+ for key in keys:
762
+ raw = self._get(key)
763
+ if raw:
764
+ patterns.append(StagedPattern.from_dict(json.loads(raw)))
765
+
766
+ return patterns
767
+
768
+ def promote_pattern(
769
+ self,
770
+ pattern_id: str,
771
+ credentials: AgentCredentials,
772
+ ) -> StagedPattern | None:
773
+ """Promote staged pattern (remove from staging for library add)
774
+
775
+ Args:
776
+ pattern_id: Pattern to promote
777
+ credentials: Must be VALIDATOR or higher
778
+
779
+ Returns:
780
+ The promoted pattern (for adding to PatternLibrary)
781
+
782
+ """
783
+ if not credentials.can_validate():
784
+ raise PermissionError(
785
+ f"Agent {credentials.agent_id} cannot promote patterns. "
786
+ "Requires VALIDATOR tier or higher.",
787
+ )
788
+
789
+ pattern = self.get_staged_pattern(pattern_id, credentials)
790
+ if pattern:
791
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
792
+ self._delete(key)
793
+ return pattern
794
+
795
+ def reject_pattern(
796
+ self,
797
+ pattern_id: str,
798
+ credentials: AgentCredentials,
799
+ reason: str = "",
800
+ ) -> bool:
801
+ """Reject a staged pattern
802
+
803
+ Args:
804
+ pattern_id: Pattern to reject
805
+ credentials: Must be VALIDATOR or higher
806
+ reason: Rejection reason (for audit)
807
+
808
+ Returns:
809
+ True if rejected
810
+
811
+ """
812
+ if not credentials.can_validate():
813
+ raise PermissionError(
814
+ f"Agent {credentials.agent_id} cannot reject patterns. "
815
+ "Requires VALIDATOR tier or higher.",
816
+ )
817
+
818
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
819
+ return self._delete(key)
820
+
821
+ # === Conflict Negotiation ===
822
+
823
+ def create_conflict_context(
824
+ self,
825
+ conflict_id: str,
826
+ positions: dict[str, Any],
827
+ interests: dict[str, list[str]],
828
+ credentials: AgentCredentials,
829
+ batna: str | None = None,
830
+ ) -> ConflictContext:
831
+ """Create context for principled negotiation
832
+
833
+ Per Getting to Yes framework:
834
+ - Separate positions from interests
835
+ - Define BATNA before negotiating
836
+
837
+ Args:
838
+ conflict_id: Unique conflict identifier
839
+ positions: agent_id -> their stated position
840
+ interests: agent_id -> underlying interests
841
+ credentials: Must be CONTRIBUTOR or higher
842
+ batna: Best Alternative to Negotiated Agreement
843
+
844
+ Returns:
845
+ ConflictContext for resolution
846
+
847
+ """
848
+ if not credentials.can_stage():
849
+ raise PermissionError(
850
+ f"Agent {credentials.agent_id} cannot create conflict context. "
851
+ "Requires CONTRIBUTOR tier or higher.",
852
+ )
853
+
854
+ context = ConflictContext(
855
+ conflict_id=conflict_id,
856
+ positions=positions,
857
+ interests=interests,
858
+ batna=batna,
859
+ )
860
+
861
+ key = f"{self.PREFIX_CONFLICT}{conflict_id}"
862
+ self._set(
863
+ key,
864
+ json.dumps(context.to_dict()),
865
+ TTLStrategy.CONFLICT_CONTEXT.value,
866
+ )
867
+
868
+ return context
869
+
870
+ def get_conflict_context(
871
+ self,
872
+ conflict_id: str,
873
+ credentials: AgentCredentials,
874
+ ) -> ConflictContext | None:
875
+ """Retrieve conflict context
876
+
877
+ Args:
878
+ conflict_id: Conflict identifier
879
+ credentials: Any tier can read
880
+
881
+ Returns:
882
+ ConflictContext or None
883
+
884
+ """
885
+ key = f"{self.PREFIX_CONFLICT}{conflict_id}"
886
+ raw = self._get(key)
887
+
888
+ if raw is None:
889
+ return None
890
+
891
+ return ConflictContext.from_dict(json.loads(raw))
892
+
893
+ def resolve_conflict(
894
+ self,
895
+ conflict_id: str,
896
+ resolution: str,
897
+ credentials: AgentCredentials,
898
+ ) -> bool:
899
+ """Mark conflict as resolved
900
+
901
+ Args:
902
+ conflict_id: Conflict to resolve
903
+ resolution: How it was resolved
904
+ credentials: Must be VALIDATOR or higher
905
+
906
+ Returns:
907
+ True if resolved
908
+
909
+ """
910
+ if not credentials.can_validate():
911
+ raise PermissionError(
912
+ f"Agent {credentials.agent_id} cannot resolve conflicts. "
913
+ "Requires VALIDATOR tier or higher.",
914
+ )
915
+
916
+ context = self.get_conflict_context(conflict_id, credentials)
917
+ if context is None:
918
+ return False
919
+
920
+ context.resolved = True
921
+ context.resolution = resolution
922
+
923
+ key = f"{self.PREFIX_CONFLICT}{conflict_id}"
924
+ # Keep resolved conflicts longer for audit
925
+ self._set(key, json.dumps(context.to_dict()), TTLStrategy.CONFLICT_CONTEXT.value)
926
+ return True
927
+
928
+ # === Coordination Signals ===
929
+
930
+ def send_signal(
931
+ self,
932
+ signal_type: str,
933
+ data: Any,
934
+ credentials: AgentCredentials,
935
+ target_agent: str | None = None,
936
+ ) -> bool:
937
+ """Send coordination signal to other agents
938
+
939
+ Args:
940
+ signal_type: Type of signal (e.g., "ready", "blocking", "complete")
941
+ data: Signal payload
942
+ credentials: Must be CONTRIBUTOR or higher
943
+ target_agent: Specific agent to signal (None = broadcast)
944
+
945
+ Returns:
946
+ True if sent
947
+
948
+ """
949
+ if not credentials.can_stage():
950
+ raise PermissionError(
951
+ f"Agent {credentials.agent_id} cannot send signals. "
952
+ "Requires CONTRIBUTOR tier or higher.",
953
+ )
954
+
955
+ target = target_agent or "broadcast"
956
+ key = f"{self.PREFIX_COORDINATION}{signal_type}:{credentials.agent_id}:{target}"
957
+ payload = {
958
+ "signal_type": signal_type,
959
+ "from_agent": credentials.agent_id,
960
+ "to_agent": target_agent,
961
+ "data": data,
962
+ "sent_at": datetime.now().isoformat(),
963
+ }
964
+ return self._set(key, json.dumps(payload), TTLStrategy.COORDINATION.value)
965
+
966
+ def receive_signals(
967
+ self,
968
+ credentials: AgentCredentials,
969
+ signal_type: str | None = None,
970
+ ) -> list[dict]:
971
+ """Receive coordination signals
972
+
973
+ Args:
974
+ credentials: Agent receiving signals
975
+ signal_type: Filter by signal type (optional)
976
+
977
+ Returns:
978
+ List of signals
979
+
980
+ """
981
+ if signal_type:
982
+ pattern = f"{self.PREFIX_COORDINATION}{signal_type}:*:{credentials.agent_id}"
983
+ else:
984
+ pattern = f"{self.PREFIX_COORDINATION}*:{credentials.agent_id}"
985
+
986
+ # Also get broadcasts
987
+ broadcast_pattern = f"{self.PREFIX_COORDINATION}*:*:broadcast"
988
+
989
+ keys = set(self._keys(pattern)) | set(self._keys(broadcast_pattern))
990
+ signals = []
991
+
992
+ for key in keys:
993
+ raw = self._get(key)
994
+ if raw:
995
+ signals.append(json.loads(raw))
996
+
997
+ return signals
998
+
999
+ # === Session Management ===
1000
+
1001
+ def create_session(
1002
+ self,
1003
+ session_id: str,
1004
+ credentials: AgentCredentials,
1005
+ metadata: dict | None = None,
1006
+ ) -> bool:
1007
+ """Create a collaboration session
1008
+
1009
+ Args:
1010
+ session_id: Unique session identifier
1011
+ credentials: Session creator
1012
+ metadata: Optional session metadata
1013
+
1014
+ Returns:
1015
+ True if created
1016
+
1017
+ """
1018
+ key = f"{self.PREFIX_SESSION}{session_id}"
1019
+ payload = {
1020
+ "session_id": session_id,
1021
+ "created_by": credentials.agent_id,
1022
+ "created_at": datetime.now().isoformat(),
1023
+ "participants": [credentials.agent_id],
1024
+ "metadata": metadata or {},
1025
+ }
1026
+ return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
1027
+
1028
+ def join_session(
1029
+ self,
1030
+ session_id: str,
1031
+ credentials: AgentCredentials,
1032
+ ) -> bool:
1033
+ """Join an existing session
1034
+
1035
+ Args:
1036
+ session_id: Session to join
1037
+ credentials: Joining agent
1038
+
1039
+ Returns:
1040
+ True if joined
1041
+
1042
+ """
1043
+ key = f"{self.PREFIX_SESSION}{session_id}"
1044
+ raw = self._get(key)
1045
+
1046
+ if raw is None:
1047
+ return False
1048
+
1049
+ payload = json.loads(raw)
1050
+ if credentials.agent_id not in payload["participants"]:
1051
+ payload["participants"].append(credentials.agent_id)
1052
+
1053
+ return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
1054
+
1055
+ def get_session(
1056
+ self,
1057
+ session_id: str,
1058
+ credentials: AgentCredentials,
1059
+ ) -> dict | None:
1060
+ """Get session information
1061
+
1062
+ Args:
1063
+ session_id: Session identifier
1064
+ credentials: Any participant can read
1065
+
1066
+ Returns:
1067
+ Session data or None
1068
+
1069
+ """
1070
+ key = f"{self.PREFIX_SESSION}{session_id}"
1071
+ raw = self._get(key)
1072
+
1073
+ if raw is None:
1074
+ return None
1075
+
1076
+ result: dict = json.loads(raw)
1077
+ return result
1078
+
1079
+ # === Health Check ===
1080
+
1081
+ def ping(self) -> bool:
1082
+ """Check Redis connection health
1083
+
1084
+ Returns:
1085
+ True if connected and responsive
1086
+
1087
+ """
1088
+ if self.use_mock:
1089
+ return True
1090
+ if self._client is None:
1091
+ return False
1092
+ try:
1093
+ return bool(self._client.ping())
1094
+ except Exception:
1095
+ return False
1096
+
1097
+ def get_stats(self) -> dict:
1098
+ """Get memory statistics
1099
+
1100
+ Returns:
1101
+ Dict with memory stats
1102
+
1103
+ """
1104
+ if self.use_mock:
1105
+ return {
1106
+ "mode": "mock",
1107
+ "total_keys": len(self._mock_storage),
1108
+ "working_keys": len(
1109
+ [k for k in self._mock_storage if k.startswith(self.PREFIX_WORKING)],
1110
+ ),
1111
+ "staged_keys": len(
1112
+ [k for k in self._mock_storage if k.startswith(self.PREFIX_STAGED)],
1113
+ ),
1114
+ "conflict_keys": len(
1115
+ [k for k in self._mock_storage if k.startswith(self.PREFIX_CONFLICT)],
1116
+ ),
1117
+ }
1118
+
1119
+ if self._client is None:
1120
+ return {"mode": "disconnected", "error": "No Redis client"}
1121
+ info = self._client.info("memory")
1122
+ return {
1123
+ "mode": "redis",
1124
+ "used_memory": info.get("used_memory_human"),
1125
+ "peak_memory": info.get("used_memory_peak_human"),
1126
+ "total_keys": self._client.dbsize(),
1127
+ "working_keys": len(self._keys(f"{self.PREFIX_WORKING}*")),
1128
+ "staged_keys": len(self._keys(f"{self.PREFIX_STAGED}*")),
1129
+ "conflict_keys": len(self._keys(f"{self.PREFIX_CONFLICT}*")),
1130
+ }
1131
+
1132
+ def get_metrics(self) -> dict:
1133
+ """Get operation metrics for observability.
1134
+
1135
+ Returns:
1136
+ Dict with operation counts, latencies, and success rates
1137
+
1138
+ """
1139
+ return self._metrics.to_dict()
1140
+
1141
+ def reset_metrics(self) -> None:
1142
+ """Reset all metrics to zero."""
1143
+ self._metrics = RedisMetrics()
1144
+
1145
+ # =========================================================================
1146
+ # BATCH OPERATIONS
1147
+ # =========================================================================
1148
+
1149
+ def stash_batch(
1150
+ self,
1151
+ items: list[tuple[str, Any]],
1152
+ credentials: AgentCredentials,
1153
+ ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
1154
+ ) -> int:
1155
+ """Stash multiple items in a single operation.
1156
+
1157
+ Uses Redis pipeline for efficiency (reduces network round-trips).
1158
+
1159
+ Args:
1160
+ items: List of (key, data) tuples
1161
+ credentials: Agent credentials
1162
+ ttl: Time-to-live strategy (applied to all items)
1163
+
1164
+ Returns:
1165
+ Number of items successfully stashed
1166
+
1167
+ Example:
1168
+ >>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
1169
+ >>> count = memory.stash_batch(items, creds)
1170
+
1171
+ """
1172
+ if not credentials.can_stage():
1173
+ raise PermissionError(
1174
+ f"Agent {credentials.agent_id} cannot write to memory. "
1175
+ "Requires CONTRIBUTOR tier or higher.",
1176
+ )
1177
+
1178
+ if not items:
1179
+ return 0
1180
+
1181
+ start_time = time.perf_counter()
1182
+
1183
+ if self.use_mock:
1184
+ count = 0
1185
+ for key, data in items:
1186
+ full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
1187
+ payload = {
1188
+ "data": data,
1189
+ "agent_id": credentials.agent_id,
1190
+ "stashed_at": datetime.now().isoformat(),
1191
+ }
1192
+ expires = datetime.now().timestamp() + ttl.value
1193
+ self._mock_storage[full_key] = (json.dumps(payload), expires)
1194
+ count += 1
1195
+ latency_ms = (time.perf_counter() - start_time) * 1000
1196
+ self._metrics.record_operation("stash_batch", latency_ms)
1197
+ return count
1198
+
1199
+ if self._client is None:
1200
+ return 0
1201
+
1202
+ pipe = self._client.pipeline()
1203
+ for key, data in items:
1204
+ full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
1205
+ payload = {
1206
+ "data": data,
1207
+ "agent_id": credentials.agent_id,
1208
+ "stashed_at": datetime.now().isoformat(),
1209
+ }
1210
+ pipe.setex(full_key, ttl.value, json.dumps(payload))
1211
+
1212
+ results = pipe.execute()
1213
+ count = sum(1 for r in results if r)
1214
+ latency_ms = (time.perf_counter() - start_time) * 1000
1215
+ self._metrics.record_operation("stash_batch", latency_ms)
1216
+
1217
+ logger.info("batch_stash_complete", count=count, total=len(items))
1218
+ return count
1219
+
1220
+ def retrieve_batch(
1221
+ self,
1222
+ keys: list[str],
1223
+ credentials: AgentCredentials,
1224
+ agent_id: str | None = None,
1225
+ ) -> dict[str, Any]:
1226
+ """Retrieve multiple items in a single operation.
1227
+
1228
+ Args:
1229
+ keys: List of keys to retrieve
1230
+ credentials: Agent credentials
1231
+ agent_id: Owner agent ID (defaults to credentials agent)
1232
+
1233
+ Returns:
1234
+ Dict mapping key to data (missing keys omitted)
1235
+
1236
+ Example:
1237
+ >>> data = memory.retrieve_batch(["key1", "key2"], creds)
1238
+ >>> print(data["key1"])
1239
+
1240
+ """
1241
+ if not keys:
1242
+ return {}
1243
+
1244
+ start_time = time.perf_counter()
1245
+ owner = agent_id or credentials.agent_id
1246
+ results: dict[str, Any] = {}
1247
+
1248
+ if self.use_mock:
1249
+ for key in keys:
1250
+ full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
1251
+ if full_key in self._mock_storage:
1252
+ value, expires = self._mock_storage[full_key]
1253
+ if expires is None or datetime.now().timestamp() < expires:
1254
+ payload = json.loads(str(value))
1255
+ results[key] = payload.get("data")
1256
+ latency_ms = (time.perf_counter() - start_time) * 1000
1257
+ self._metrics.record_operation("retrieve_batch", latency_ms)
1258
+ return results
1259
+
1260
+ if self._client is None:
1261
+ return {}
1262
+
1263
+ full_keys = [f"{self.PREFIX_WORKING}{owner}:{key}" for key in keys]
1264
+ values = self._client.mget(full_keys)
1265
+
1266
+ for key, value in zip(keys, values, strict=False):
1267
+ if value:
1268
+ payload = json.loads(str(value))
1269
+ results[key] = payload.get("data")
1270
+
1271
+ latency_ms = (time.perf_counter() - start_time) * 1000
1272
+ self._metrics.record_operation("retrieve_batch", latency_ms)
1273
+ return results
1274
+
1275
+ # =========================================================================
1276
+ # SCAN-BASED PAGINATION
1277
+ # =========================================================================
1278
+
1279
+ def list_staged_patterns_paginated(
1280
+ self,
1281
+ credentials: AgentCredentials,
1282
+ cursor: str = "0",
1283
+ count: int = 100,
1284
+ ) -> PaginatedResult:
1285
+ """List staged patterns with pagination using SCAN.
1286
+
1287
+ More efficient than list_staged_patterns() for large datasets.
1288
+
1289
+ Args:
1290
+ credentials: Agent credentials
1291
+ cursor: Pagination cursor (start with "0")
1292
+ count: Maximum items per page
1293
+
1294
+ Returns:
1295
+ PaginatedResult with items, cursor, and has_more flag
1296
+
1297
+ Example:
1298
+ >>> result = memory.list_staged_patterns_paginated(creds, "0", 10)
1299
+ >>> for pattern in result.items:
1300
+ ... print(pattern.name)
1301
+ >>> if result.has_more:
1302
+ ... next_result = memory.list_staged_patterns_paginated(creds, result.cursor, 10)
1303
+
1304
+ """
1305
+ start_time = time.perf_counter()
1306
+ pattern = f"{self.PREFIX_STAGED}*"
1307
+
1308
+ if self.use_mock:
1309
+ import fnmatch
1310
+
1311
+ all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
1312
+ start_idx = int(cursor)
1313
+ end_idx = start_idx + count
1314
+ page_keys = all_keys[start_idx:end_idx]
1315
+
1316
+ patterns = []
1317
+ for key in page_keys:
1318
+ raw_value, expires = self._mock_storage[key]
1319
+ if expires is None or datetime.now().timestamp() < expires:
1320
+ patterns.append(StagedPattern.from_dict(json.loads(str(raw_value))))
1321
+
1322
+ new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
1323
+ has_more = end_idx < len(all_keys)
1324
+
1325
+ latency_ms = (time.perf_counter() - start_time) * 1000
1326
+ self._metrics.record_operation("list_paginated", latency_ms)
1327
+
1328
+ return PaginatedResult(
1329
+ items=patterns,
1330
+ cursor=new_cursor,
1331
+ has_more=has_more,
1332
+ total_scanned=len(page_keys),
1333
+ )
1334
+
1335
+ if self._client is None:
1336
+ return PaginatedResult(items=[], cursor="0", has_more=False)
1337
+
1338
+ # Use SCAN for efficient iteration
1339
+ new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
1340
+
1341
+ patterns = []
1342
+ for key in keys:
1343
+ raw = self._client.get(key)
1344
+ if raw:
1345
+ patterns.append(StagedPattern.from_dict(json.loads(raw)))
1346
+
1347
+ has_more = new_cursor != 0
1348
+
1349
+ latency_ms = (time.perf_counter() - start_time) * 1000
1350
+ self._metrics.record_operation("list_paginated", latency_ms)
1351
+
1352
+ return PaginatedResult(
1353
+ items=patterns,
1354
+ cursor=str(new_cursor),
1355
+ has_more=has_more,
1356
+ total_scanned=len(keys),
1357
+ )
1358
+
1359
+ def scan_keys(
1360
+ self,
1361
+ pattern: str,
1362
+ cursor: str = "0",
1363
+ count: int = 100,
1364
+ ) -> PaginatedResult:
1365
+ """Scan keys matching a pattern with pagination.
1366
+
1367
+ Args:
1368
+ pattern: Key pattern (e.g., "empathy:working:*")
1369
+ cursor: Pagination cursor
1370
+ count: Items per page
1371
+
1372
+ Returns:
1373
+ PaginatedResult with key strings
1374
+
1375
+ """
1376
+ if self.use_mock:
1377
+ import fnmatch
1378
+
1379
+ all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
1380
+ start_idx = int(cursor)
1381
+ end_idx = start_idx + count
1382
+ page_keys = all_keys[start_idx:end_idx]
1383
+ new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
1384
+ has_more = end_idx < len(all_keys)
1385
+ return PaginatedResult(items=page_keys, cursor=new_cursor, has_more=has_more)
1386
+
1387
+ if self._client is None:
1388
+ return PaginatedResult(items=[], cursor="0", has_more=False)
1389
+
1390
+ new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
1391
+ return PaginatedResult(
1392
+ items=[str(k) for k in keys],
1393
+ cursor=str(new_cursor),
1394
+ has_more=new_cursor != 0,
1395
+ )
1396
+
1397
+ # =========================================================================
1398
+ # PUB/SUB FOR REAL-TIME NOTIFICATIONS
1399
+ # =========================================================================
1400
+
1401
+ def publish(
1402
+ self,
1403
+ channel: str,
1404
+ message: dict,
1405
+ credentials: AgentCredentials,
1406
+ ) -> int:
1407
+ """Publish a message to a channel for real-time notifications.
1408
+
1409
+ Args:
1410
+ channel: Channel name (will be prefixed)
1411
+ message: Message payload (dict)
1412
+ credentials: Agent credentials (must be CONTRIBUTOR+)
1413
+
1414
+ Returns:
1415
+ Number of subscribers that received the message
1416
+
1417
+ Example:
1418
+ >>> memory.publish("agent_signals", {"event": "task_complete", "task_id": "123"}, creds)
1419
+
1420
+ """
1421
+ if not credentials.can_stage():
1422
+ raise PermissionError(
1423
+ f"Agent {credentials.agent_id} cannot publish. Requires CONTRIBUTOR tier or higher.",
1424
+ )
1425
+
1426
+ start_time = time.perf_counter()
1427
+ full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1428
+
1429
+ payload = {
1430
+ "channel": channel,
1431
+ "from_agent": credentials.agent_id,
1432
+ "timestamp": datetime.now().isoformat(),
1433
+ "data": message,
1434
+ }
1435
+
1436
+ if self.use_mock:
1437
+ handlers = self._mock_pubsub_handlers.get(full_channel, [])
1438
+ for handler in handlers:
1439
+ try:
1440
+ handler(payload)
1441
+ except Exception as e:
1442
+ logger.warning("pubsub_handler_error", channel=channel, error=str(e))
1443
+ latency_ms = (time.perf_counter() - start_time) * 1000
1444
+ self._metrics.record_operation("publish", latency_ms)
1445
+ return len(handlers)
1446
+
1447
+ if self._client is None:
1448
+ return 0
1449
+
1450
+ count = self._client.publish(full_channel, json.dumps(payload))
1451
+ latency_ms = (time.perf_counter() - start_time) * 1000
1452
+ self._metrics.record_operation("publish", latency_ms)
1453
+
1454
+ logger.debug("pubsub_published", channel=channel, subscribers=count)
1455
+ return int(count)
1456
+
1457
+ def subscribe(
1458
+ self,
1459
+ channel: str,
1460
+ handler: Callable[[dict], None],
1461
+ credentials: AgentCredentials | None = None,
1462
+ ) -> bool:
1463
+ """Subscribe to a channel for real-time notifications.
1464
+
1465
+ Args:
1466
+ channel: Channel name to subscribe to
1467
+ handler: Callback function receiving message dict
1468
+ credentials: Optional credentials (any tier can subscribe)
1469
+
1470
+ Returns:
1471
+ True if subscribed successfully
1472
+
1473
+ Example:
1474
+ >>> def on_message(msg):
1475
+ ... print(f"Received: {msg['data']}")
1476
+ >>> memory.subscribe("agent_signals", on_message)
1477
+
1478
+ """
1479
+ full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1480
+
1481
+ if self.use_mock:
1482
+ if full_channel not in self._mock_pubsub_handlers:
1483
+ self._mock_pubsub_handlers[full_channel] = []
1484
+ self._mock_pubsub_handlers[full_channel].append(handler)
1485
+ logger.info("pubsub_subscribed_mock", channel=channel)
1486
+ return True
1487
+
1488
+ if self._client is None:
1489
+ return False
1490
+
1491
+ # Store handler
1492
+ if full_channel not in self._subscriptions:
1493
+ self._subscriptions[full_channel] = []
1494
+ self._subscriptions[full_channel].append(handler)
1495
+
1496
+ # Create pubsub if needed
1497
+ if self._pubsub is None:
1498
+ self._pubsub = self._client.pubsub()
1499
+
1500
+ # Subscribe
1501
+ self._pubsub.subscribe(**{full_channel: self._pubsub_message_handler})
1502
+
1503
+ # Start listener thread if not running
1504
+ if not self._pubsub_running:
1505
+ self._pubsub_running = True
1506
+ self._pubsub_thread = threading.Thread(
1507
+ target=self._pubsub_listener,
1508
+ daemon=True,
1509
+ name="redis-pubsub-listener",
1510
+ )
1511
+ self._pubsub_thread.start()
1512
+
1513
+ logger.info("pubsub_subscribed", channel=channel)
1514
+ return True
1515
+
1516
+ def _pubsub_message_handler(self, message: dict) -> None:
1517
+ """Internal handler for pubsub messages."""
1518
+ if message["type"] != "message":
1519
+ return
1520
+
1521
+ channel = message["channel"]
1522
+ if isinstance(channel, bytes):
1523
+ channel = channel.decode()
1524
+
1525
+ try:
1526
+ payload = json.loads(message["data"])
1527
+ except json.JSONDecodeError:
1528
+ payload = {"raw": message["data"]}
1529
+
1530
+ handlers = self._subscriptions.get(channel, [])
1531
+ for handler in handlers:
1532
+ try:
1533
+ handler(payload)
1534
+ except Exception as e:
1535
+ logger.warning("pubsub_handler_error", channel=channel, error=str(e))
1536
+
1537
+ def _pubsub_listener(self) -> None:
1538
+ """Background thread for listening to pubsub messages."""
1539
+ while self._pubsub_running and self._pubsub:
1540
+ try:
1541
+ self._pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0)
1542
+ except Exception as e:
1543
+ logger.warning("pubsub_listener_error", error=str(e))
1544
+ time.sleep(1)
1545
+
1546
+ def unsubscribe(self, channel: str) -> bool:
1547
+ """Unsubscribe from a channel.
1548
+
1549
+ Args:
1550
+ channel: Channel name to unsubscribe from
1551
+
1552
+ Returns:
1553
+ True if unsubscribed successfully
1554
+
1555
+ """
1556
+ full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1557
+
1558
+ if self.use_mock:
1559
+ self._mock_pubsub_handlers.pop(full_channel, None)
1560
+ return True
1561
+
1562
+ if self._pubsub is None:
1563
+ return False
1564
+
1565
+ self._pubsub.unsubscribe(full_channel)
1566
+ self._subscriptions.pop(full_channel, None)
1567
+ return True
1568
+
1569
+ def close_pubsub(self) -> None:
1570
+ """Close pubsub connection and stop listener thread."""
1571
+ self._pubsub_running = False
1572
+ if self._pubsub:
1573
+ self._pubsub.close()
1574
+ self._pubsub = None
1575
+ self._subscriptions.clear()
1576
+
1577
+ # =========================================================================
1578
+ # REDIS STREAMS FOR AUDIT TRAILS
1579
+ # =========================================================================
1580
+
1581
+ def stream_append(
1582
+ self,
1583
+ stream_name: str,
1584
+ data: dict,
1585
+ credentials: AgentCredentials,
1586
+ max_len: int = 10000,
1587
+ ) -> str | None:
1588
+ """Append an entry to a Redis Stream for audit trails.
1589
+
1590
+ Streams provide:
1591
+ - Ordered, persistent event log
1592
+ - Consumer groups for distributed processing
1593
+ - Time-based retention
1594
+
1595
+ Args:
1596
+ stream_name: Name of the stream
1597
+ data: Event data to append
1598
+ credentials: Agent credentials (must be CONTRIBUTOR+)
1599
+ max_len: Maximum stream length (older entries trimmed)
1600
+
1601
+ Returns:
1602
+ Entry ID if successful, None otherwise
1603
+
1604
+ Example:
1605
+ >>> entry_id = memory.stream_append("audit", {"action": "pattern_promoted", "pattern_id": "xyz"}, creds)
1606
+
1607
+ """
1608
+ if not credentials.can_stage():
1609
+ raise PermissionError(
1610
+ f"Agent {credentials.agent_id} cannot write to stream. "
1611
+ "Requires CONTRIBUTOR tier or higher.",
1612
+ )
1613
+
1614
+ start_time = time.perf_counter()
1615
+ full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1616
+
1617
+ entry = {
1618
+ "agent_id": credentials.agent_id,
1619
+ "timestamp": datetime.now().isoformat(),
1620
+ **{
1621
+ str(k): json.dumps(v) if isinstance(v, dict | list) else str(v)
1622
+ for k, v in data.items()
1623
+ },
1624
+ }
1625
+
1626
+ if self.use_mock:
1627
+ if full_stream not in self._mock_streams:
1628
+ self._mock_streams[full_stream] = []
1629
+ entry_id = f"{int(datetime.now().timestamp() * 1000)}-0"
1630
+ self._mock_streams[full_stream].append((entry_id, entry))
1631
+ # Trim to max_len
1632
+ if len(self._mock_streams[full_stream]) > max_len:
1633
+ self._mock_streams[full_stream] = self._mock_streams[full_stream][-max_len:]
1634
+ latency_ms = (time.perf_counter() - start_time) * 1000
1635
+ self._metrics.record_operation("stream_append", latency_ms)
1636
+ return entry_id
1637
+
1638
+ if self._client is None:
1639
+ return None
1640
+
1641
+ entry_id = self._client.xadd(full_stream, entry, maxlen=max_len)
1642
+ latency_ms = (time.perf_counter() - start_time) * 1000
1643
+ self._metrics.record_operation("stream_append", latency_ms)
1644
+
1645
+ return str(entry_id) if entry_id else None
1646
+
1647
+ def stream_read(
1648
+ self,
1649
+ stream_name: str,
1650
+ credentials: AgentCredentials,
1651
+ start_id: str = "0",
1652
+ count: int = 100,
1653
+ ) -> list[tuple[str, dict]]:
1654
+ """Read entries from a Redis Stream.
1655
+
1656
+ Args:
1657
+ stream_name: Name of the stream
1658
+ credentials: Agent credentials
1659
+ start_id: Start reading from this ID ("0" = beginning)
1660
+ count: Maximum entries to read
1661
+
1662
+ Returns:
1663
+ List of (entry_id, data) tuples
1664
+
1665
+ Example:
1666
+ >>> entries = memory.stream_read("audit", creds, count=50)
1667
+ >>> for entry_id, data in entries:
1668
+ ... print(f"{entry_id}: {data}")
1669
+
1670
+ """
1671
+ full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1672
+
1673
+ if self.use_mock:
1674
+ if full_stream not in self._mock_streams:
1675
+ return []
1676
+ entries = self._mock_streams[full_stream]
1677
+ # Filter by start_id (simple comparison)
1678
+ filtered = [(eid, data) for eid, data in entries if eid > start_id]
1679
+ return filtered[:count]
1680
+
1681
+ if self._client is None:
1682
+ return []
1683
+
1684
+ result = self._client.xrange(full_stream, min=start_id, count=count)
1685
+ return [(str(entry_id), {str(k): v for k, v in data.items()}) for entry_id, data in result]
1686
+
1687
+ def stream_read_new(
1688
+ self,
1689
+ stream_name: str,
1690
+ credentials: AgentCredentials,
1691
+ block_ms: int = 0,
1692
+ count: int = 100,
1693
+ ) -> list[tuple[str, dict]]:
1694
+ """Read only new entries from a stream (blocking read).
1695
+
1696
+ Args:
1697
+ stream_name: Name of the stream
1698
+ credentials: Agent credentials
1699
+ block_ms: Milliseconds to block waiting (0 = no block)
1700
+ count: Maximum entries to read
1701
+
1702
+ Returns:
1703
+ List of (entry_id, data) tuples
1704
+
1705
+ """
1706
+ full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1707
+
1708
+ if self.use_mock:
1709
+ return [] # Mock doesn't support blocking reads
1710
+
1711
+ if self._client is None:
1712
+ return []
1713
+
1714
+ result = self._client.xread({full_stream: "$"}, block=block_ms, count=count)
1715
+ if not result:
1716
+ return []
1717
+
1718
+ # Result format: [(stream_name, [(entry_id, data), ...])]
1719
+ entries = []
1720
+ for _stream, stream_entries in result:
1721
+ for entry_id, data in stream_entries:
1722
+ entries.append((str(entry_id), {str(k): v for k, v in data.items()}))
1723
+ return entries
1724
+
1725
+ # =========================================================================
1726
+ # TIME-WINDOW QUERIES (SORTED SETS)
1727
+ # =========================================================================
1728
+
1729
+ def timeline_add(
1730
+ self,
1731
+ timeline_name: str,
1732
+ event_id: str,
1733
+ data: dict,
1734
+ credentials: AgentCredentials,
1735
+ timestamp: datetime | None = None,
1736
+ ) -> bool:
1737
+ """Add an event to a timeline (sorted set by timestamp).
1738
+
1739
+ Args:
1740
+ timeline_name: Name of the timeline
1741
+ event_id: Unique event identifier
1742
+ data: Event data
1743
+ credentials: Agent credentials
1744
+ timestamp: Event timestamp (defaults to now)
1745
+
1746
+ Returns:
1747
+ True if added successfully
1748
+
1749
+ """
1750
+ if not credentials.can_stage():
1751
+ raise PermissionError(
1752
+ f"Agent {credentials.agent_id} cannot write to timeline. "
1753
+ "Requires CONTRIBUTOR tier or higher.",
1754
+ )
1755
+
1756
+ full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1757
+ ts = timestamp or datetime.now()
1758
+ score = ts.timestamp()
1759
+
1760
+ payload = json.dumps(
1761
+ {
1762
+ "event_id": event_id,
1763
+ "timestamp": ts.isoformat(),
1764
+ "agent_id": credentials.agent_id,
1765
+ "data": data,
1766
+ },
1767
+ )
1768
+
1769
+ if self.use_mock:
1770
+ if full_timeline not in self._mock_sorted_sets:
1771
+ self._mock_sorted_sets[full_timeline] = []
1772
+ self._mock_sorted_sets[full_timeline].append((score, payload))
1773
+ self._mock_sorted_sets[full_timeline].sort(key=lambda x: x[0])
1774
+ return True
1775
+
1776
+ if self._client is None:
1777
+ return False
1778
+
1779
+ self._client.zadd(full_timeline, {payload: score})
1780
+ return True
1781
+
1782
+ def timeline_query(
1783
+ self,
1784
+ timeline_name: str,
1785
+ credentials: AgentCredentials,
1786
+ query: TimeWindowQuery | None = None,
1787
+ ) -> list[dict]:
1788
+ """Query events from a timeline within a time window.
1789
+
1790
+ Args:
1791
+ timeline_name: Name of the timeline
1792
+ credentials: Agent credentials
1793
+ query: Time window query parameters
1794
+
1795
+ Returns:
1796
+ List of events in the time window
1797
+
1798
+ Example:
1799
+ >>> from datetime import datetime, timedelta
1800
+ >>> query = TimeWindowQuery(
1801
+ ... start_time=datetime.now() - timedelta(hours=1),
1802
+ ... end_time=datetime.now(),
1803
+ ... limit=50
1804
+ ... )
1805
+ >>> events = memory.timeline_query("agent_events", creds, query)
1806
+
1807
+ """
1808
+ full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1809
+ q = query or TimeWindowQuery()
1810
+
1811
+ if self.use_mock:
1812
+ if full_timeline not in self._mock_sorted_sets:
1813
+ return []
1814
+ entries = self._mock_sorted_sets[full_timeline]
1815
+ filtered = [
1816
+ json.loads(payload)
1817
+ for score, payload in entries
1818
+ if q.start_score <= score <= q.end_score
1819
+ ]
1820
+ return filtered[q.offset : q.offset + q.limit]
1821
+
1822
+ if self._client is None:
1823
+ return []
1824
+
1825
+ results = self._client.zrangebyscore(
1826
+ full_timeline,
1827
+ min=q.start_score,
1828
+ max=q.end_score,
1829
+ start=q.offset,
1830
+ num=q.limit,
1831
+ )
1832
+
1833
+ return [json.loads(r) for r in results]
1834
+
1835
+ def timeline_count(
1836
+ self,
1837
+ timeline_name: str,
1838
+ credentials: AgentCredentials,
1839
+ query: TimeWindowQuery | None = None,
1840
+ ) -> int:
1841
+ """Count events in a timeline within a time window.
1842
+
1843
+ Args:
1844
+ timeline_name: Name of the timeline
1845
+ credentials: Agent credentials
1846
+ query: Time window query parameters
1847
+
1848
+ Returns:
1849
+ Number of events in the time window
1850
+
1851
+ """
1852
+ full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1853
+ q = query or TimeWindowQuery()
1854
+
1855
+ if self.use_mock:
1856
+ if full_timeline not in self._mock_sorted_sets:
1857
+ return 0
1858
+ entries = self._mock_sorted_sets[full_timeline]
1859
+ return len([1 for score, _ in entries if q.start_score <= score <= q.end_score])
1860
+
1861
+ if self._client is None:
1862
+ return 0
1863
+
1864
+ return int(self._client.zcount(full_timeline, q.start_score, q.end_score))
1865
+
1866
+ # =========================================================================
1867
+ # TASK QUEUES (LISTS)
1868
+ # =========================================================================
1869
+
1870
+ def queue_push(
1871
+ self,
1872
+ queue_name: str,
1873
+ task: dict,
1874
+ credentials: AgentCredentials,
1875
+ priority: bool = False,
1876
+ ) -> int:
1877
+ """Push a task to a queue.
1878
+
1879
+ Args:
1880
+ queue_name: Name of the queue
1881
+ task: Task data
1882
+ credentials: Agent credentials (must be CONTRIBUTOR+)
1883
+ priority: If True, push to front (high priority)
1884
+
1885
+ Returns:
1886
+ New queue length
1887
+
1888
+ Example:
1889
+ >>> task = {"type": "analyze", "file": "main.py"}
1890
+ >>> memory.queue_push("agent_tasks", task, creds)
1891
+
1892
+ """
1893
+ if not credentials.can_stage():
1894
+ raise PermissionError(
1895
+ f"Agent {credentials.agent_id} cannot push to queue. "
1896
+ "Requires CONTRIBUTOR tier or higher.",
1897
+ )
1898
+
1899
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1900
+ payload = json.dumps(
1901
+ {
1902
+ "task": task,
1903
+ "queued_by": credentials.agent_id,
1904
+ "queued_at": datetime.now().isoformat(),
1905
+ },
1906
+ )
1907
+
1908
+ if self.use_mock:
1909
+ if full_queue not in self._mock_lists:
1910
+ self._mock_lists[full_queue] = []
1911
+ if priority:
1912
+ self._mock_lists[full_queue].insert(0, payload)
1913
+ else:
1914
+ self._mock_lists[full_queue].append(payload)
1915
+ return len(self._mock_lists[full_queue])
1916
+
1917
+ if self._client is None:
1918
+ return 0
1919
+
1920
+ if priority:
1921
+ return int(self._client.lpush(full_queue, payload))
1922
+ return int(self._client.rpush(full_queue, payload))
1923
+
1924
+ def queue_pop(
1925
+ self,
1926
+ queue_name: str,
1927
+ credentials: AgentCredentials,
1928
+ timeout: int = 0,
1929
+ ) -> dict | None:
1930
+ """Pop a task from a queue.
1931
+
1932
+ Args:
1933
+ queue_name: Name of the queue
1934
+ credentials: Agent credentials
1935
+ timeout: Seconds to block waiting (0 = no block)
1936
+
1937
+ Returns:
1938
+ Task data or None if queue empty
1939
+
1940
+ Example:
1941
+ >>> task = memory.queue_pop("agent_tasks", creds, timeout=5)
1942
+ >>> if task:
1943
+ ... process(task["task"])
1944
+
1945
+ """
1946
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1947
+
1948
+ if self.use_mock:
1949
+ if full_queue not in self._mock_lists or not self._mock_lists[full_queue]:
1950
+ return None
1951
+ payload = self._mock_lists[full_queue].pop(0)
1952
+ data: dict = json.loads(payload)
1953
+ return data
1954
+
1955
+ if self._client is None:
1956
+ return None
1957
+
1958
+ if timeout > 0:
1959
+ result = self._client.blpop(full_queue, timeout=timeout)
1960
+ if result:
1961
+ data = json.loads(result[1])
1962
+ return data
1963
+ return None
1964
+
1965
+ result = self._client.lpop(full_queue)
1966
+ if result:
1967
+ data = json.loads(result)
1968
+ return data
1969
+ return None
1970
+
1971
+ def queue_length(self, queue_name: str) -> int:
1972
+ """Get the length of a queue.
1973
+
1974
+ Args:
1975
+ queue_name: Name of the queue
1976
+
1977
+ Returns:
1978
+ Number of items in the queue
1979
+
1980
+ """
1981
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1982
+
1983
+ if self.use_mock:
1984
+ return len(self._mock_lists.get(full_queue, []))
1985
+
1986
+ if self._client is None:
1987
+ return 0
1988
+
1989
+ return int(self._client.llen(full_queue))
1990
+
1991
+ def queue_peek(
1992
+ self,
1993
+ queue_name: str,
1994
+ credentials: AgentCredentials,
1995
+ count: int = 1,
1996
+ ) -> list[dict]:
1997
+ """Peek at tasks in a queue without removing them.
1998
+
1999
+ Args:
2000
+ queue_name: Name of the queue
2001
+ credentials: Agent credentials
2002
+ count: Number of items to peek
2003
+
2004
+ Returns:
2005
+ List of task data
2006
+
2007
+ """
2008
+ full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
2009
+
2010
+ if self.use_mock:
2011
+ items = self._mock_lists.get(full_queue, [])[:count]
2012
+ return [json.loads(item) for item in items]
2013
+
2014
+ if self._client is None:
2015
+ return []
2016
+
2017
+ items = self._client.lrange(full_queue, 0, count - 1)
2018
+ return [json.loads(item) for item in items]
2019
+
2020
+ # =========================================================================
2021
+ # ATOMIC TRANSACTIONS
2022
+ # =========================================================================
2023
+
2024
+ def atomic_promote_pattern(
2025
+ self,
2026
+ pattern_id: str,
2027
+ credentials: AgentCredentials,
2028
+ min_confidence: float = 0.0,
2029
+ ) -> tuple[bool, StagedPattern | None, str]:
2030
+ """Atomically promote a pattern with validation.
2031
+
2032
+ Uses Redis transaction (MULTI/EXEC) to ensure:
2033
+ - Pattern exists and meets confidence threshold
2034
+ - Pattern is removed from staging atomically
2035
+ - No race conditions with concurrent operations
2036
+
2037
+ Args:
2038
+ pattern_id: Pattern to promote
2039
+ credentials: Must be VALIDATOR or higher
2040
+ min_confidence: Minimum confidence threshold
2041
+
2042
+ Returns:
2043
+ Tuple of (success, pattern, message)
2044
+
2045
+ Example:
2046
+ >>> success, pattern, msg = memory.atomic_promote_pattern("pat_123", creds, min_confidence=0.7)
2047
+ >>> if success:
2048
+ ... library.add(pattern)
2049
+
2050
+ """
2051
+ if not credentials.can_validate():
2052
+ return False, None, "Requires VALIDATOR tier or higher"
2053
+
2054
+ key = f"{self.PREFIX_STAGED}{pattern_id}"
2055
+
2056
+ if self.use_mock:
2057
+ if key not in self._mock_storage:
2058
+ return False, None, "Pattern not found"
2059
+ value, expires = self._mock_storage[key]
2060
+ if expires and datetime.now().timestamp() >= expires:
2061
+ return False, None, "Pattern expired"
2062
+ pattern = StagedPattern.from_dict(json.loads(str(value)))
2063
+ if pattern.confidence < min_confidence:
2064
+ return (
2065
+ False,
2066
+ None,
2067
+ f"Confidence {pattern.confidence} below threshold {min_confidence}",
2068
+ )
2069
+ del self._mock_storage[key]
2070
+ return True, pattern, "Pattern promoted successfully"
2071
+
2072
+ if self._client is None:
2073
+ return False, None, "Redis not connected"
2074
+
2075
+ # Use WATCH for optimistic locking
2076
+ try:
2077
+ self._client.watch(key)
2078
+ raw = self._client.get(key)
2079
+
2080
+ if raw is None:
2081
+ self._client.unwatch()
2082
+ return False, None, "Pattern not found"
2083
+
2084
+ pattern = StagedPattern.from_dict(json.loads(raw))
2085
+
2086
+ if pattern.confidence < min_confidence:
2087
+ self._client.unwatch()
2088
+ return (
2089
+ False,
2090
+ None,
2091
+ f"Confidence {pattern.confidence} below threshold {min_confidence}",
2092
+ )
2093
+
2094
+ # Execute atomic delete
2095
+ pipe = self._client.pipeline(True)
2096
+ pipe.delete(key)
2097
+ pipe.execute()
2098
+
2099
+ return True, pattern, "Pattern promoted successfully"
2100
+
2101
+ except redis.WatchError:
2102
+ return False, None, "Pattern was modified by another process"
2103
+ finally:
2104
+ try:
2105
+ self._client.unwatch()
2106
+ except Exception:
2107
+ pass
2108
+
2109
+ # =========================================================================
2110
+ # CLEANUP AND LIFECYCLE
2111
+ # =========================================================================
2112
+
2113
+ def close(self) -> None:
2114
+ """Close all connections and cleanup resources."""
2115
+ self.close_pubsub()
2116
+ if self._client:
2117
+ self._client.close()
2118
+ self._client = None
2119
+ logger.info("redis_connection_closed")