empathy-framework 5.3.0__py3-none-any.whl → 5.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (458) hide show
  1. empathy_framework-5.4.0.dist-info/METADATA +47 -0
  2. empathy_framework-5.4.0.dist-info/RECORD +8 -0
  3. {empathy_framework-5.3.0.dist-info → empathy_framework-5.4.0.dist-info}/top_level.txt +0 -1
  4. empathy_healthcare_plugin/__init__.py +12 -11
  5. empathy_llm_toolkit/__init__.py +12 -26
  6. empathy_os/__init__.py +12 -356
  7. empathy_software_plugin/__init__.py +12 -11
  8. empathy_framework-5.3.0.dist-info/METADATA +0 -1026
  9. empathy_framework-5.3.0.dist-info/RECORD +0 -456
  10. empathy_framework-5.3.0.dist-info/entry_points.txt +0 -26
  11. empathy_framework-5.3.0.dist-info/licenses/LICENSE +0 -201
  12. empathy_framework-5.3.0.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -101
  13. empathy_healthcare_plugin/monitors/__init__.py +0 -9
  14. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +0 -315
  15. empathy_healthcare_plugin/monitors/monitoring/__init__.py +0 -44
  16. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +0 -300
  17. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +0 -214
  18. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +0 -306
  19. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +0 -389
  20. empathy_healthcare_plugin/protocols/cardiac.json +0 -93
  21. empathy_healthcare_plugin/protocols/post_operative.json +0 -92
  22. empathy_healthcare_plugin/protocols/respiratory.json +0 -92
  23. empathy_healthcare_plugin/protocols/sepsis.json +0 -141
  24. empathy_llm_toolkit/README.md +0 -553
  25. empathy_llm_toolkit/agent_factory/__init__.py +0 -53
  26. empathy_llm_toolkit/agent_factory/adapters/__init__.py +0 -85
  27. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +0 -312
  28. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +0 -483
  29. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +0 -298
  30. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +0 -362
  31. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +0 -333
  32. empathy_llm_toolkit/agent_factory/adapters/native.py +0 -228
  33. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +0 -423
  34. empathy_llm_toolkit/agent_factory/base.py +0 -305
  35. empathy_llm_toolkit/agent_factory/crews/__init__.py +0 -67
  36. empathy_llm_toolkit/agent_factory/crews/code_review.py +0 -1113
  37. empathy_llm_toolkit/agent_factory/crews/health_check.py +0 -1262
  38. empathy_llm_toolkit/agent_factory/crews/refactoring.py +0 -1128
  39. empathy_llm_toolkit/agent_factory/crews/security_audit.py +0 -1018
  40. empathy_llm_toolkit/agent_factory/decorators.py +0 -287
  41. empathy_llm_toolkit/agent_factory/factory.py +0 -558
  42. empathy_llm_toolkit/agent_factory/framework.py +0 -193
  43. empathy_llm_toolkit/agent_factory/memory_integration.py +0 -328
  44. empathy_llm_toolkit/agent_factory/resilient.py +0 -320
  45. empathy_llm_toolkit/agents_md/__init__.py +0 -22
  46. empathy_llm_toolkit/agents_md/loader.py +0 -218
  47. empathy_llm_toolkit/agents_md/parser.py +0 -271
  48. empathy_llm_toolkit/agents_md/registry.py +0 -307
  49. empathy_llm_toolkit/claude_memory.py +0 -466
  50. empathy_llm_toolkit/cli/__init__.py +0 -8
  51. empathy_llm_toolkit/cli/sync_claude.py +0 -487
  52. empathy_llm_toolkit/code_health.py +0 -1313
  53. empathy_llm_toolkit/commands/__init__.py +0 -51
  54. empathy_llm_toolkit/commands/context.py +0 -375
  55. empathy_llm_toolkit/commands/loader.py +0 -301
  56. empathy_llm_toolkit/commands/models.py +0 -231
  57. empathy_llm_toolkit/commands/parser.py +0 -371
  58. empathy_llm_toolkit/commands/registry.py +0 -429
  59. empathy_llm_toolkit/config/__init__.py +0 -29
  60. empathy_llm_toolkit/config/unified.py +0 -291
  61. empathy_llm_toolkit/context/__init__.py +0 -22
  62. empathy_llm_toolkit/context/compaction.py +0 -455
  63. empathy_llm_toolkit/context/manager.py +0 -434
  64. empathy_llm_toolkit/contextual_patterns.py +0 -361
  65. empathy_llm_toolkit/core.py +0 -907
  66. empathy_llm_toolkit/git_pattern_extractor.py +0 -435
  67. empathy_llm_toolkit/hooks/__init__.py +0 -24
  68. empathy_llm_toolkit/hooks/config.py +0 -306
  69. empathy_llm_toolkit/hooks/executor.py +0 -289
  70. empathy_llm_toolkit/hooks/registry.py +0 -302
  71. empathy_llm_toolkit/hooks/scripts/__init__.py +0 -39
  72. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +0 -201
  73. empathy_llm_toolkit/hooks/scripts/first_time_init.py +0 -285
  74. empathy_llm_toolkit/hooks/scripts/pre_compact.py +0 -207
  75. empathy_llm_toolkit/hooks/scripts/session_end.py +0 -183
  76. empathy_llm_toolkit/hooks/scripts/session_start.py +0 -163
  77. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +0 -225
  78. empathy_llm_toolkit/learning/__init__.py +0 -30
  79. empathy_llm_toolkit/learning/evaluator.py +0 -438
  80. empathy_llm_toolkit/learning/extractor.py +0 -514
  81. empathy_llm_toolkit/learning/storage.py +0 -560
  82. empathy_llm_toolkit/levels.py +0 -227
  83. empathy_llm_toolkit/pattern_confidence.py +0 -414
  84. empathy_llm_toolkit/pattern_resolver.py +0 -272
  85. empathy_llm_toolkit/pattern_summary.py +0 -350
  86. empathy_llm_toolkit/providers.py +0 -967
  87. empathy_llm_toolkit/routing/__init__.py +0 -32
  88. empathy_llm_toolkit/routing/model_router.py +0 -362
  89. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +0 -413
  90. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +0 -384
  91. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +0 -271
  92. empathy_llm_toolkit/security/QUICK_REFERENCE.md +0 -316
  93. empathy_llm_toolkit/security/README.md +0 -262
  94. empathy_llm_toolkit/security/__init__.py +0 -62
  95. empathy_llm_toolkit/security/audit_logger.py +0 -929
  96. empathy_llm_toolkit/security/audit_logger_example.py +0 -152
  97. empathy_llm_toolkit/security/pii_scrubber.py +0 -640
  98. empathy_llm_toolkit/security/secrets_detector.py +0 -678
  99. empathy_llm_toolkit/security/secrets_detector_example.py +0 -304
  100. empathy_llm_toolkit/security/secure_memdocs.py +0 -1192
  101. empathy_llm_toolkit/security/secure_memdocs_example.py +0 -278
  102. empathy_llm_toolkit/session_status.py +0 -745
  103. empathy_llm_toolkit/state.py +0 -246
  104. empathy_llm_toolkit/utils/__init__.py +0 -5
  105. empathy_llm_toolkit/utils/tokens.py +0 -349
  106. empathy_os/adaptive/__init__.py +0 -13
  107. empathy_os/adaptive/task_complexity.py +0 -127
  108. empathy_os/agent_monitoring.py +0 -414
  109. empathy_os/cache/__init__.py +0 -117
  110. empathy_os/cache/base.py +0 -166
  111. empathy_os/cache/dependency_manager.py +0 -256
  112. empathy_os/cache/hash_only.py +0 -251
  113. empathy_os/cache/hybrid.py +0 -457
  114. empathy_os/cache/storage.py +0 -285
  115. empathy_os/cache_monitor.py +0 -356
  116. empathy_os/cache_stats.py +0 -298
  117. empathy_os/cli/__init__.py +0 -152
  118. empathy_os/cli/__main__.py +0 -12
  119. empathy_os/cli/commands/__init__.py +0 -1
  120. empathy_os/cli/commands/batch.py +0 -264
  121. empathy_os/cli/commands/cache.py +0 -248
  122. empathy_os/cli/commands/help.py +0 -331
  123. empathy_os/cli/commands/info.py +0 -140
  124. empathy_os/cli/commands/inspect.py +0 -436
  125. empathy_os/cli/commands/inspection.py +0 -57
  126. empathy_os/cli/commands/memory.py +0 -48
  127. empathy_os/cli/commands/metrics.py +0 -92
  128. empathy_os/cli/commands/orchestrate.py +0 -184
  129. empathy_os/cli/commands/patterns.py +0 -207
  130. empathy_os/cli/commands/profiling.py +0 -202
  131. empathy_os/cli/commands/provider.py +0 -98
  132. empathy_os/cli/commands/routing.py +0 -285
  133. empathy_os/cli/commands/setup.py +0 -96
  134. empathy_os/cli/commands/status.py +0 -235
  135. empathy_os/cli/commands/sync.py +0 -166
  136. empathy_os/cli/commands/tier.py +0 -121
  137. empathy_os/cli/commands/utilities.py +0 -114
  138. empathy_os/cli/commands/workflow.py +0 -579
  139. empathy_os/cli/core.py +0 -32
  140. empathy_os/cli/parsers/__init__.py +0 -68
  141. empathy_os/cli/parsers/batch.py +0 -118
  142. empathy_os/cli/parsers/cache.py +0 -65
  143. empathy_os/cli/parsers/help.py +0 -41
  144. empathy_os/cli/parsers/info.py +0 -26
  145. empathy_os/cli/parsers/inspect.py +0 -66
  146. empathy_os/cli/parsers/metrics.py +0 -42
  147. empathy_os/cli/parsers/orchestrate.py +0 -61
  148. empathy_os/cli/parsers/patterns.py +0 -54
  149. empathy_os/cli/parsers/provider.py +0 -40
  150. empathy_os/cli/parsers/routing.py +0 -110
  151. empathy_os/cli/parsers/setup.py +0 -42
  152. empathy_os/cli/parsers/status.py +0 -47
  153. empathy_os/cli/parsers/sync.py +0 -31
  154. empathy_os/cli/parsers/tier.py +0 -33
  155. empathy_os/cli/parsers/workflow.py +0 -77
  156. empathy_os/cli/utils/__init__.py +0 -1
  157. empathy_os/cli/utils/data.py +0 -242
  158. empathy_os/cli/utils/helpers.py +0 -68
  159. empathy_os/cli_legacy.py +0 -3957
  160. empathy_os/cli_minimal.py +0 -1159
  161. empathy_os/cli_router.py +0 -437
  162. empathy_os/cli_unified.py +0 -814
  163. empathy_os/config/__init__.py +0 -66
  164. empathy_os/config/xml_config.py +0 -286
  165. empathy_os/config.py +0 -545
  166. empathy_os/coordination.py +0 -870
  167. empathy_os/core.py +0 -1511
  168. empathy_os/core_modules/__init__.py +0 -15
  169. empathy_os/cost_tracker.py +0 -626
  170. empathy_os/dashboard/__init__.py +0 -41
  171. empathy_os/dashboard/app.py +0 -512
  172. empathy_os/dashboard/simple_server.py +0 -435
  173. empathy_os/dashboard/standalone_server.py +0 -547
  174. empathy_os/discovery.py +0 -306
  175. empathy_os/emergence.py +0 -306
  176. empathy_os/exceptions.py +0 -123
  177. empathy_os/feedback_loops.py +0 -373
  178. empathy_os/hot_reload/README.md +0 -473
  179. empathy_os/hot_reload/__init__.py +0 -62
  180. empathy_os/hot_reload/config.py +0 -83
  181. empathy_os/hot_reload/integration.py +0 -229
  182. empathy_os/hot_reload/reloader.py +0 -298
  183. empathy_os/hot_reload/watcher.py +0 -183
  184. empathy_os/hot_reload/websocket.py +0 -177
  185. empathy_os/levels.py +0 -577
  186. empathy_os/leverage_points.py +0 -441
  187. empathy_os/logging_config.py +0 -261
  188. empathy_os/mcp/__init__.py +0 -10
  189. empathy_os/mcp/server.py +0 -506
  190. empathy_os/memory/__init__.py +0 -237
  191. empathy_os/memory/claude_memory.py +0 -469
  192. empathy_os/memory/config.py +0 -224
  193. empathy_os/memory/control_panel.py +0 -1290
  194. empathy_os/memory/control_panel_support.py +0 -145
  195. empathy_os/memory/cross_session.py +0 -845
  196. empathy_os/memory/edges.py +0 -179
  197. empathy_os/memory/encryption.py +0 -159
  198. empathy_os/memory/file_session.py +0 -770
  199. empathy_os/memory/graph.py +0 -570
  200. empathy_os/memory/long_term.py +0 -913
  201. empathy_os/memory/long_term_types.py +0 -99
  202. empathy_os/memory/mixins/__init__.py +0 -25
  203. empathy_os/memory/mixins/backend_init_mixin.py +0 -249
  204. empathy_os/memory/mixins/capabilities_mixin.py +0 -208
  205. empathy_os/memory/mixins/handoff_mixin.py +0 -208
  206. empathy_os/memory/mixins/lifecycle_mixin.py +0 -49
  207. empathy_os/memory/mixins/long_term_mixin.py +0 -352
  208. empathy_os/memory/mixins/promotion_mixin.py +0 -109
  209. empathy_os/memory/mixins/short_term_mixin.py +0 -182
  210. empathy_os/memory/nodes.py +0 -179
  211. empathy_os/memory/redis_bootstrap.py +0 -540
  212. empathy_os/memory/security/__init__.py +0 -31
  213. empathy_os/memory/security/audit_logger.py +0 -932
  214. empathy_os/memory/security/pii_scrubber.py +0 -640
  215. empathy_os/memory/security/secrets_detector.py +0 -678
  216. empathy_os/memory/short_term.py +0 -2192
  217. empathy_os/memory/simple_storage.py +0 -302
  218. empathy_os/memory/storage/__init__.py +0 -15
  219. empathy_os/memory/storage_backend.py +0 -167
  220. empathy_os/memory/summary_index.py +0 -583
  221. empathy_os/memory/types.py +0 -446
  222. empathy_os/memory/unified.py +0 -182
  223. empathy_os/meta_workflows/__init__.py +0 -74
  224. empathy_os/meta_workflows/agent_creator.py +0 -248
  225. empathy_os/meta_workflows/builtin_templates.py +0 -567
  226. empathy_os/meta_workflows/cli_commands/__init__.py +0 -56
  227. empathy_os/meta_workflows/cli_commands/agent_commands.py +0 -321
  228. empathy_os/meta_workflows/cli_commands/analytics_commands.py +0 -442
  229. empathy_os/meta_workflows/cli_commands/config_commands.py +0 -232
  230. empathy_os/meta_workflows/cli_commands/memory_commands.py +0 -182
  231. empathy_os/meta_workflows/cli_commands/template_commands.py +0 -354
  232. empathy_os/meta_workflows/cli_commands/workflow_commands.py +0 -382
  233. empathy_os/meta_workflows/cli_meta_workflows.py +0 -59
  234. empathy_os/meta_workflows/form_engine.py +0 -292
  235. empathy_os/meta_workflows/intent_detector.py +0 -409
  236. empathy_os/meta_workflows/models.py +0 -569
  237. empathy_os/meta_workflows/pattern_learner.py +0 -738
  238. empathy_os/meta_workflows/plan_generator.py +0 -384
  239. empathy_os/meta_workflows/session_context.py +0 -397
  240. empathy_os/meta_workflows/template_registry.py +0 -229
  241. empathy_os/meta_workflows/workflow.py +0 -984
  242. empathy_os/metrics/__init__.py +0 -12
  243. empathy_os/metrics/collector.py +0 -31
  244. empathy_os/metrics/prompt_metrics.py +0 -194
  245. empathy_os/models/__init__.py +0 -172
  246. empathy_os/models/__main__.py +0 -13
  247. empathy_os/models/adaptive_routing.py +0 -437
  248. empathy_os/models/auth_cli.py +0 -444
  249. empathy_os/models/auth_strategy.py +0 -450
  250. empathy_os/models/cli.py +0 -655
  251. empathy_os/models/empathy_executor.py +0 -354
  252. empathy_os/models/executor.py +0 -257
  253. empathy_os/models/fallback.py +0 -762
  254. empathy_os/models/provider_config.py +0 -282
  255. empathy_os/models/registry.py +0 -472
  256. empathy_os/models/tasks.py +0 -359
  257. empathy_os/models/telemetry/__init__.py +0 -71
  258. empathy_os/models/telemetry/analytics.py +0 -594
  259. empathy_os/models/telemetry/backend.py +0 -196
  260. empathy_os/models/telemetry/data_models.py +0 -431
  261. empathy_os/models/telemetry/storage.py +0 -489
  262. empathy_os/models/token_estimator.py +0 -420
  263. empathy_os/models/validation.py +0 -280
  264. empathy_os/monitoring/__init__.py +0 -52
  265. empathy_os/monitoring/alerts.py +0 -946
  266. empathy_os/monitoring/alerts_cli.py +0 -448
  267. empathy_os/monitoring/multi_backend.py +0 -271
  268. empathy_os/monitoring/otel_backend.py +0 -362
  269. empathy_os/optimization/__init__.py +0 -19
  270. empathy_os/optimization/context_optimizer.py +0 -272
  271. empathy_os/orchestration/__init__.py +0 -67
  272. empathy_os/orchestration/agent_templates.py +0 -707
  273. empathy_os/orchestration/config_store.py +0 -499
  274. empathy_os/orchestration/execution_strategies.py +0 -2111
  275. empathy_os/orchestration/meta_orchestrator.py +0 -1168
  276. empathy_os/orchestration/pattern_learner.py +0 -696
  277. empathy_os/orchestration/real_tools.py +0 -931
  278. empathy_os/pattern_cache.py +0 -187
  279. empathy_os/pattern_library.py +0 -542
  280. empathy_os/patterns/debugging/all_patterns.json +0 -81
  281. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +0 -77
  282. empathy_os/patterns/refactoring_memory.json +0 -89
  283. empathy_os/persistence.py +0 -564
  284. empathy_os/platform_utils.py +0 -265
  285. empathy_os/plugins/__init__.py +0 -28
  286. empathy_os/plugins/base.py +0 -361
  287. empathy_os/plugins/registry.py +0 -268
  288. empathy_os/project_index/__init__.py +0 -32
  289. empathy_os/project_index/cli.py +0 -335
  290. empathy_os/project_index/index.py +0 -667
  291. empathy_os/project_index/models.py +0 -504
  292. empathy_os/project_index/reports.py +0 -474
  293. empathy_os/project_index/scanner.py +0 -777
  294. empathy_os/project_index/scanner_parallel.py +0 -291
  295. empathy_os/prompts/__init__.py +0 -61
  296. empathy_os/prompts/config.py +0 -77
  297. empathy_os/prompts/context.py +0 -177
  298. empathy_os/prompts/parser.py +0 -285
  299. empathy_os/prompts/registry.py +0 -313
  300. empathy_os/prompts/templates.py +0 -208
  301. empathy_os/redis_config.py +0 -302
  302. empathy_os/redis_memory.py +0 -799
  303. empathy_os/resilience/__init__.py +0 -56
  304. empathy_os/resilience/circuit_breaker.py +0 -256
  305. empathy_os/resilience/fallback.py +0 -179
  306. empathy_os/resilience/health.py +0 -300
  307. empathy_os/resilience/retry.py +0 -209
  308. empathy_os/resilience/timeout.py +0 -135
  309. empathy_os/routing/__init__.py +0 -43
  310. empathy_os/routing/chain_executor.py +0 -433
  311. empathy_os/routing/classifier.py +0 -217
  312. empathy_os/routing/smart_router.py +0 -234
  313. empathy_os/routing/workflow_registry.py +0 -343
  314. empathy_os/scaffolding/README.md +0 -589
  315. empathy_os/scaffolding/__init__.py +0 -35
  316. empathy_os/scaffolding/__main__.py +0 -14
  317. empathy_os/scaffolding/cli.py +0 -240
  318. empathy_os/socratic/__init__.py +0 -256
  319. empathy_os/socratic/ab_testing.py +0 -958
  320. empathy_os/socratic/blueprint.py +0 -533
  321. empathy_os/socratic/cli.py +0 -703
  322. empathy_os/socratic/collaboration.py +0 -1114
  323. empathy_os/socratic/domain_templates.py +0 -924
  324. empathy_os/socratic/embeddings.py +0 -738
  325. empathy_os/socratic/engine.py +0 -794
  326. empathy_os/socratic/explainer.py +0 -682
  327. empathy_os/socratic/feedback.py +0 -772
  328. empathy_os/socratic/forms.py +0 -629
  329. empathy_os/socratic/generator.py +0 -732
  330. empathy_os/socratic/llm_analyzer.py +0 -637
  331. empathy_os/socratic/mcp_server.py +0 -702
  332. empathy_os/socratic/session.py +0 -312
  333. empathy_os/socratic/storage.py +0 -667
  334. empathy_os/socratic/success.py +0 -730
  335. empathy_os/socratic/visual_editor.py +0 -860
  336. empathy_os/socratic/web_ui.py +0 -958
  337. empathy_os/telemetry/__init__.py +0 -39
  338. empathy_os/telemetry/agent_coordination.py +0 -475
  339. empathy_os/telemetry/agent_tracking.py +0 -367
  340. empathy_os/telemetry/approval_gates.py +0 -545
  341. empathy_os/telemetry/cli.py +0 -1231
  342. empathy_os/telemetry/commands/__init__.py +0 -14
  343. empathy_os/telemetry/commands/dashboard_commands.py +0 -696
  344. empathy_os/telemetry/event_streaming.py +0 -409
  345. empathy_os/telemetry/feedback_loop.py +0 -567
  346. empathy_os/telemetry/usage_tracker.py +0 -591
  347. empathy_os/templates.py +0 -754
  348. empathy_os/test_generator/__init__.py +0 -38
  349. empathy_os/test_generator/__main__.py +0 -14
  350. empathy_os/test_generator/cli.py +0 -234
  351. empathy_os/test_generator/generator.py +0 -355
  352. empathy_os/test_generator/risk_analyzer.py +0 -216
  353. empathy_os/tier_recommender.py +0 -384
  354. empathy_os/tools.py +0 -183
  355. empathy_os/trust/__init__.py +0 -28
  356. empathy_os/trust/circuit_breaker.py +0 -579
  357. empathy_os/trust_building.py +0 -527
  358. empathy_os/validation/__init__.py +0 -19
  359. empathy_os/validation/xml_validator.py +0 -281
  360. empathy_os/vscode_bridge.py +0 -173
  361. empathy_os/workflow_commands.py +0 -780
  362. empathy_os/workflow_patterns/__init__.py +0 -33
  363. empathy_os/workflow_patterns/behavior.py +0 -249
  364. empathy_os/workflow_patterns/core.py +0 -76
  365. empathy_os/workflow_patterns/output.py +0 -99
  366. empathy_os/workflow_patterns/registry.py +0 -255
  367. empathy_os/workflow_patterns/structural.py +0 -288
  368. empathy_os/workflows/__init__.py +0 -539
  369. empathy_os/workflows/autonomous_test_gen.py +0 -1268
  370. empathy_os/workflows/base.py +0 -2667
  371. empathy_os/workflows/batch_processing.py +0 -342
  372. empathy_os/workflows/bug_predict.py +0 -1084
  373. empathy_os/workflows/builder.py +0 -273
  374. empathy_os/workflows/caching.py +0 -253
  375. empathy_os/workflows/code_review.py +0 -1048
  376. empathy_os/workflows/code_review_adapters.py +0 -312
  377. empathy_os/workflows/code_review_pipeline.py +0 -722
  378. empathy_os/workflows/config.py +0 -645
  379. empathy_os/workflows/dependency_check.py +0 -644
  380. empathy_os/workflows/document_gen/__init__.py +0 -25
  381. empathy_os/workflows/document_gen/config.py +0 -30
  382. empathy_os/workflows/document_gen/report_formatter.py +0 -162
  383. empathy_os/workflows/document_gen/workflow.py +0 -1426
  384. empathy_os/workflows/document_manager.py +0 -216
  385. empathy_os/workflows/document_manager_README.md +0 -134
  386. empathy_os/workflows/documentation_orchestrator.py +0 -1205
  387. empathy_os/workflows/history.py +0 -510
  388. empathy_os/workflows/keyboard_shortcuts/__init__.py +0 -39
  389. empathy_os/workflows/keyboard_shortcuts/generators.py +0 -391
  390. empathy_os/workflows/keyboard_shortcuts/parsers.py +0 -416
  391. empathy_os/workflows/keyboard_shortcuts/prompts.py +0 -295
  392. empathy_os/workflows/keyboard_shortcuts/schema.py +0 -193
  393. empathy_os/workflows/keyboard_shortcuts/workflow.py +0 -509
  394. empathy_os/workflows/llm_base.py +0 -363
  395. empathy_os/workflows/manage_docs.py +0 -87
  396. empathy_os/workflows/manage_docs_README.md +0 -134
  397. empathy_os/workflows/manage_documentation.py +0 -821
  398. empathy_os/workflows/new_sample_workflow1.py +0 -149
  399. empathy_os/workflows/new_sample_workflow1_README.md +0 -150
  400. empathy_os/workflows/orchestrated_health_check.py +0 -849
  401. empathy_os/workflows/orchestrated_release_prep.py +0 -600
  402. empathy_os/workflows/output.py +0 -413
  403. empathy_os/workflows/perf_audit.py +0 -863
  404. empathy_os/workflows/pr_review.py +0 -762
  405. empathy_os/workflows/progress.py +0 -785
  406. empathy_os/workflows/progress_server.py +0 -322
  407. empathy_os/workflows/progressive/README 2.md +0 -454
  408. empathy_os/workflows/progressive/README.md +0 -454
  409. empathy_os/workflows/progressive/__init__.py +0 -82
  410. empathy_os/workflows/progressive/cli.py +0 -219
  411. empathy_os/workflows/progressive/core.py +0 -488
  412. empathy_os/workflows/progressive/orchestrator.py +0 -723
  413. empathy_os/workflows/progressive/reports.py +0 -520
  414. empathy_os/workflows/progressive/telemetry.py +0 -274
  415. empathy_os/workflows/progressive/test_gen.py +0 -495
  416. empathy_os/workflows/progressive/workflow.py +0 -589
  417. empathy_os/workflows/refactor_plan.py +0 -694
  418. empathy_os/workflows/release_prep.py +0 -895
  419. empathy_os/workflows/release_prep_crew.py +0 -969
  420. empathy_os/workflows/research_synthesis.py +0 -404
  421. empathy_os/workflows/routing.py +0 -168
  422. empathy_os/workflows/secure_release.py +0 -593
  423. empathy_os/workflows/security_adapters.py +0 -297
  424. empathy_os/workflows/security_audit.py +0 -1329
  425. empathy_os/workflows/security_audit_phase3.py +0 -355
  426. empathy_os/workflows/seo_optimization.py +0 -633
  427. empathy_os/workflows/step_config.py +0 -234
  428. empathy_os/workflows/telemetry_mixin.py +0 -269
  429. empathy_os/workflows/test5.py +0 -125
  430. empathy_os/workflows/test5_README.md +0 -158
  431. empathy_os/workflows/test_coverage_boost_crew.py +0 -849
  432. empathy_os/workflows/test_gen/__init__.py +0 -52
  433. empathy_os/workflows/test_gen/ast_analyzer.py +0 -249
  434. empathy_os/workflows/test_gen/config.py +0 -88
  435. empathy_os/workflows/test_gen/data_models.py +0 -38
  436. empathy_os/workflows/test_gen/report_formatter.py +0 -289
  437. empathy_os/workflows/test_gen/test_templates.py +0 -381
  438. empathy_os/workflows/test_gen/workflow.py +0 -655
  439. empathy_os/workflows/test_gen.py +0 -54
  440. empathy_os/workflows/test_gen_behavioral.py +0 -477
  441. empathy_os/workflows/test_gen_parallel.py +0 -341
  442. empathy_os/workflows/test_lifecycle.py +0 -526
  443. empathy_os/workflows/test_maintenance.py +0 -627
  444. empathy_os/workflows/test_maintenance_cli.py +0 -590
  445. empathy_os/workflows/test_maintenance_crew.py +0 -840
  446. empathy_os/workflows/test_runner.py +0 -622
  447. empathy_os/workflows/tier_tracking.py +0 -531
  448. empathy_os/workflows/xml_enhanced_crew.py +0 -285
  449. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +0 -57
  450. empathy_software_plugin/cli/__init__.py +0 -120
  451. empathy_software_plugin/cli/inspect.py +0 -362
  452. empathy_software_plugin/cli.py +0 -574
  453. empathy_software_plugin/plugin.py +0 -188
  454. workflow_scaffolding/__init__.py +0 -11
  455. workflow_scaffolding/__main__.py +0 -12
  456. workflow_scaffolding/cli.py +0 -206
  457. workflow_scaffolding/generator.py +0 -265
  458. {empathy_framework-5.3.0.dist-info → empathy_framework-5.4.0.dist-info}/WHEEL +0 -0
@@ -1,2192 +0,0 @@
1
- """Redis Short-Term Memory for Empathy Framework
2
-
3
- Per EMPATHY_PHILOSOPHY.md v1.1.0:
4
- - Implements fast, TTL-based working memory for agent coordination
5
- - Role-based access tiers for data integrity
6
- - Pattern staging before validation
7
- - Principled negotiation support
8
-
9
- Enhanced Features (v2.0):
10
- - Pub/Sub for real-time agent notifications
11
- - Batch operations for high-throughput workflows
12
- - SCAN-based pagination for large datasets
13
- - Redis Streams for audit trails
14
- - Connection retry with exponential backoff
15
- - SSL/TLS support for managed Redis services
16
- - Time-window queries with sorted sets
17
- - Task queues with Lists
18
- - Atomic transactions with MULTI/EXEC
19
- - Comprehensive metrics tracking
20
-
21
- Copyright 2025 Smart AI Memory, LLC
22
- Licensed under Fair Source 0.9
23
- """
24
-
25
- import json
26
- import os
27
- import threading
28
- import time
29
- from collections.abc import Callable
30
- from datetime import datetime
31
- from typing import Any
32
-
33
- import structlog
34
-
35
- from .security.pii_scrubber import PIIScrubber
36
- from .security.secrets_detector import SecretsDetector
37
- from .security.secrets_detector import Severity as SecretSeverity
38
-
39
- # Import types from dedicated module
40
- from .types import (
41
- AccessTier,
42
- AgentCredentials,
43
- ConflictContext,
44
- PaginatedResult,
45
- RedisConfig,
46
- RedisMetrics,
47
- SecurityError,
48
- StagedPattern,
49
- TimeWindowQuery,
50
- TTLStrategy,
51
- )
52
-
53
- logger = structlog.get_logger(__name__)
54
-
55
- try:
56
- import redis
57
- from redis.exceptions import ConnectionError as RedisConnectionError
58
- from redis.exceptions import TimeoutError as RedisTimeoutError
59
-
60
- REDIS_AVAILABLE = True
61
- except ImportError:
62
- REDIS_AVAILABLE = False
63
- RedisConnectionError = Exception # type: ignore
64
- RedisTimeoutError = Exception # type: ignore
65
-
66
-
67
- class RedisShortTermMemory:
68
- """Redis-backed short-term memory for agent coordination
69
-
70
- Features:
71
- - Fast read/write with automatic TTL expiration
72
- - Role-based access control
73
- - Pattern staging workflow
74
- - Conflict negotiation context
75
- - Agent working memory
76
-
77
- Enhanced Features (v2.0):
78
- - Pub/Sub for real-time agent notifications
79
- - Batch operations (stash_batch, retrieve_batch)
80
- - SCAN-based pagination for large datasets
81
- - Redis Streams for audit trails
82
- - Time-window queries with sorted sets
83
- - Task queues with Lists (LPUSH/RPOP)
84
- - Atomic transactions with MULTI/EXEC
85
- - Connection retry with exponential backoff
86
- - Metrics tracking for observability
87
-
88
- Example:
89
- >>> memory = RedisShortTermMemory()
90
- >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
91
- >>> memory.stash("analysis_results", {"issues": 3}, creds)
92
- >>> data = memory.retrieve("analysis_results", creds)
93
-
94
- # Pub/Sub example
95
- >>> memory.subscribe("agent_signals", lambda msg: print(msg))
96
- >>> memory.publish("agent_signals", {"event": "task_complete"}, creds)
97
-
98
- # Batch operations
99
- >>> items = [("key1", {"data": 1}), ("key2", {"data": 2})]
100
- >>> memory.stash_batch(items, creds)
101
-
102
- # Pagination
103
- >>> result = memory.list_staged_patterns_paginated(creds, cursor="0", count=10)
104
-
105
- """
106
-
107
- # Key prefixes for namespacing
108
- PREFIX_WORKING = "empathy:working:"
109
- PREFIX_STAGED = "empathy:staged:"
110
- PREFIX_CONFLICT = "empathy:conflict:"
111
- # PREFIX_COORDINATION removed in v5.0 - use empathy_os.telemetry.CoordinationSignals
112
- PREFIX_SESSION = "empathy:session:"
113
- PREFIX_PUBSUB = "empathy:pubsub:"
114
- PREFIX_STREAM = "empathy:stream:"
115
- PREFIX_TIMELINE = "empathy:timeline:"
116
- PREFIX_QUEUE = "empathy:queue:"
117
-
118
- def __init__(
119
- self,
120
- host: str = "localhost",
121
- port: int = 6379,
122
- db: int = 0,
123
- password: str | None = None,
124
- use_mock: bool = False,
125
- config: RedisConfig | None = None,
126
- ):
127
- """Initialize Redis connection
128
-
129
- Args:
130
- host: Redis host
131
- port: Redis port
132
- db: Redis database number
133
- password: Redis password (optional)
134
- use_mock: Use in-memory mock for testing
135
- config: Full RedisConfig for advanced settings (overrides other args)
136
-
137
- """
138
- # Use config if provided, otherwise build from individual args
139
- if config is not None:
140
- self._config = config
141
- else:
142
- # Check environment variable for Redis enablement (default: disabled)
143
- redis_enabled = os.getenv("REDIS_ENABLED", "false").lower() in ("true", "1", "yes")
144
-
145
- # Use environment variables for configuration if available
146
- env_host = os.getenv("REDIS_HOST", host)
147
- env_port = int(os.getenv("REDIS_PORT", str(port)))
148
- env_db = int(os.getenv("REDIS_DB", str(db)))
149
- env_password = os.getenv("REDIS_PASSWORD", password)
150
-
151
- # If Redis is not enabled via env var, force mock mode
152
- if not redis_enabled and not use_mock:
153
- use_mock = True
154
- logger.info("redis_disabled_via_env", message="Redis not enabled in environment, using mock mode")
155
-
156
- self._config = RedisConfig(
157
- host=env_host,
158
- port=env_port,
159
- db=env_db,
160
- password=env_password if env_password else None,
161
- use_mock=use_mock,
162
- )
163
-
164
- self.use_mock = self._config.use_mock or not REDIS_AVAILABLE
165
-
166
- # Initialize metrics
167
- self._metrics = RedisMetrics()
168
-
169
- # Pub/Sub state
170
- self._pubsub: Any | None = None
171
- self._pubsub_thread: threading.Thread | None = None
172
- self._subscriptions: dict[str, list[Callable[[dict], None]]] = {}
173
- self._pubsub_running = False
174
-
175
- # Mock storage for testing
176
- self._mock_storage: dict[str, tuple[Any, float | None]] = {}
177
- self._mock_lists: dict[str, list[str]] = {}
178
- self._mock_sorted_sets: dict[str, list[tuple[float, str]]] = {}
179
- self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
180
- self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
181
-
182
- # Local LRU cache for two-tier caching (memory + Redis)
183
- # Reduces network I/O from 37ms to <0.001ms for frequently accessed keys
184
- self._local_cache_enabled = self._config.local_cache_enabled
185
- self._local_cache_max_size = self._config.local_cache_size
186
- self._local_cache: dict[str, tuple[str, float, float]] = {} # key -> (value, timestamp, last_access)
187
- self._local_cache_hits = 0
188
- self._local_cache_misses = 0
189
-
190
- # Security: Initialize PII scrubber and secrets detector
191
- self._pii_scrubber: PIIScrubber | None = None
192
- self._secrets_detector: SecretsDetector | None = None
193
-
194
- if self._config.pii_scrub_enabled:
195
- self._pii_scrubber = PIIScrubber(enable_name_detection=False)
196
- logger.debug(
197
- "pii_scrubber_enabled", message="PII scrubbing active for short-term memory"
198
- )
199
-
200
- if self._config.secrets_detection_enabled:
201
- self._secrets_detector = SecretsDetector()
202
- logger.debug(
203
- "secrets_detector_enabled", message="Secrets detection active for short-term memory"
204
- )
205
-
206
- if self.use_mock:
207
- self._client = None
208
- else:
209
- self._client = self._create_client_with_retry()
210
-
211
- @property
212
- def client(self) -> Any:
213
- """Get the Redis client instance.
214
-
215
- Returns:
216
- Redis client instance or None if using mock mode
217
-
218
- Example:
219
- >>> memory = RedisShortTermMemory()
220
- >>> if memory.client:
221
- ... print("Redis connected")
222
- """
223
- return self._client
224
-
225
- @property
226
- def metrics(self) -> "RedisMetrics":
227
- """Get Redis metrics instance.
228
-
229
- Returns:
230
- RedisMetrics instance with connection and operation statistics
231
-
232
- Example:
233
- >>> memory = RedisShortTermMemory()
234
- >>> print(f"Retries: {memory.metrics.retries_total}")
235
- """
236
- return self._metrics
237
-
238
- def _create_client_with_retry(self) -> Any:
239
- """Create Redis client with retry logic."""
240
- max_attempts = self._config.retry_max_attempts
241
- base_delay = self._config.retry_base_delay
242
- max_delay = self._config.retry_max_delay
243
-
244
- last_error: Exception | None = None
245
-
246
- for attempt in range(max_attempts):
247
- try:
248
- client = redis.Redis(**self._config.to_redis_kwargs())
249
- # Test connection
250
- client.ping()
251
- logger.info(
252
- "redis_connected",
253
- host=self._config.host,
254
- port=self._config.port,
255
- attempt=attempt + 1,
256
- )
257
- return client
258
- except (RedisConnectionError, RedisTimeoutError) as e:
259
- last_error = e
260
- self._metrics.retries_total += 1
261
-
262
- if attempt < max_attempts - 1:
263
- delay = min(base_delay * (2**attempt), max_delay)
264
- logger.warning(
265
- "redis_connection_retry",
266
- attempt=attempt + 1,
267
- max_attempts=max_attempts,
268
- delay=delay,
269
- error=str(e),
270
- )
271
- time.sleep(delay)
272
-
273
- # All retries failed
274
- logger.error(
275
- "redis_connection_failed",
276
- max_attempts=max_attempts,
277
- error=str(last_error),
278
- )
279
- raise last_error if last_error else ConnectionError("Failed to connect to Redis")
280
-
281
- def _execute_with_retry(self, operation: Callable[[], Any], op_name: str = "operation") -> Any:
282
- """Execute a Redis operation with retry logic."""
283
- start_time = time.perf_counter()
284
- max_attempts = self._config.retry_max_attempts
285
- base_delay = self._config.retry_base_delay
286
- max_delay = self._config.retry_max_delay
287
-
288
- last_error: Exception | None = None
289
-
290
- for attempt in range(max_attempts):
291
- try:
292
- result = operation()
293
- latency_ms = (time.perf_counter() - start_time) * 1000
294
- self._metrics.record_operation(op_name, latency_ms, success=True)
295
- return result
296
- except (RedisConnectionError, RedisTimeoutError) as e:
297
- last_error = e
298
- self._metrics.retries_total += 1
299
-
300
- if attempt < max_attempts - 1:
301
- delay = min(base_delay * (2**attempt), max_delay)
302
- logger.warning(
303
- "redis_operation_retry",
304
- operation=op_name,
305
- attempt=attempt + 1,
306
- delay=delay,
307
- )
308
- time.sleep(delay)
309
-
310
- latency_ms = (time.perf_counter() - start_time) * 1000
311
- self._metrics.record_operation(op_name, latency_ms, success=False)
312
- raise last_error if last_error else ConnectionError("Redis operation failed")
313
-
314
- def _get(self, key: str) -> str | None:
315
- """Get value from Redis or mock with two-tier caching (local + Redis)"""
316
- # Check local cache first (0.001ms vs 37ms for Redis/mock)
317
- # This works for BOTH mock and real Redis modes
318
- if self._local_cache_enabled and key in self._local_cache:
319
- value, timestamp, last_access = self._local_cache[key]
320
- now = time.time()
321
-
322
- # Update last access time for LRU
323
- self._local_cache[key] = (value, timestamp, now)
324
- self._local_cache_hits += 1
325
-
326
- return value
327
-
328
- # Cache miss - fetch from storage (mock or Redis)
329
- self._local_cache_misses += 1
330
-
331
- # Mock mode path
332
- if self.use_mock:
333
- if key in self._mock_storage:
334
- value, expires = self._mock_storage[key]
335
- if expires is None or datetime.now().timestamp() < expires:
336
- result = str(value) if value is not None else None
337
- # Add to local cache for next access
338
- if result and self._local_cache_enabled:
339
- self._add_to_local_cache(key, result)
340
- return result
341
- del self._mock_storage[key]
342
- return None
343
-
344
- # Real Redis path
345
- if self._client is None:
346
- return None
347
-
348
- result = self._client.get(key)
349
-
350
- # Add to local cache if successful
351
- if result and self._local_cache_enabled:
352
- self._add_to_local_cache(key, str(result))
353
-
354
- return str(result) if result else None
355
-
356
- def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
357
- """Set value in Redis or mock with two-tier caching"""
358
- # Mock mode path
359
- if self.use_mock:
360
- expires = datetime.now().timestamp() + ttl if ttl else None
361
- self._mock_storage[key] = (value, expires)
362
-
363
- # Update local cache in mock mode too
364
- if self._local_cache_enabled:
365
- self._add_to_local_cache(key, value)
366
-
367
- return True
368
-
369
- # Real Redis path
370
- if self._client is None:
371
- return False
372
-
373
- # Set in Redis
374
- if ttl:
375
- self._client.setex(key, ttl, value)
376
- else:
377
- result = self._client.set(key, value)
378
- if not result:
379
- return False
380
-
381
- # Update local cache if enabled
382
- if self._local_cache_enabled:
383
- self._add_to_local_cache(key, value)
384
-
385
- return True
386
-
387
- def _delete(self, key: str) -> bool:
388
- """Delete key from Redis or mock and local cache"""
389
- # Mock mode path
390
- if self.use_mock:
391
- deleted = False
392
- if key in self._mock_storage:
393
- del self._mock_storage[key]
394
- deleted = True
395
-
396
- # Remove from local cache if present
397
- if self._local_cache_enabled and key in self._local_cache:
398
- del self._local_cache[key]
399
-
400
- return deleted
401
-
402
- # Real Redis path
403
- if self._client is None:
404
- return False
405
-
406
- # Delete from Redis
407
- result = bool(self._client.delete(key) > 0)
408
-
409
- # Also remove from local cache if present
410
- if self._local_cache_enabled and key in self._local_cache:
411
- del self._local_cache[key]
412
-
413
- return result
414
-
415
- def _keys(self, pattern: str) -> list[str]:
416
- """Get keys matching pattern"""
417
- if self.use_mock:
418
- import fnmatch
419
-
420
- # Use list comp for small result sets (typical <1000 keys)
421
- return [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
422
- if self._client is None:
423
- return []
424
- keys = self._client.keys(pattern)
425
- # Convert bytes to strings - needed for API return type
426
- return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
427
-
428
- # === Local LRU Cache Methods ===
429
-
430
- def _add_to_local_cache(self, key: str, value: str) -> None:
431
- """Add entry to local cache with LRU eviction.
432
-
433
- Args:
434
- key: Cache key
435
- value: Value to cache
436
- """
437
- now = time.time()
438
-
439
- # Evict oldest entry if cache is full
440
- if len(self._local_cache) >= self._local_cache_max_size:
441
- # Find key with oldest last_access time
442
- oldest_key = min(self._local_cache, key=lambda k: self._local_cache[k][2])
443
- del self._local_cache[oldest_key]
444
-
445
- # Add new entry: (value, timestamp, last_access)
446
- self._local_cache[key] = (value, now, now)
447
-
448
- def clear_local_cache(self) -> int:
449
- """Clear all entries from local cache.
450
-
451
- Returns:
452
- Number of entries cleared
453
- """
454
- count = len(self._local_cache)
455
- self._local_cache.clear()
456
- self._local_cache_hits = 0
457
- self._local_cache_misses = 0
458
- logger.info("local_cache_cleared", entries_cleared=count)
459
- return count
460
-
461
- def get_local_cache_stats(self) -> dict:
462
- """Get local cache performance statistics.
463
-
464
- Returns:
465
- Dict with cache stats (hits, misses, hit_rate, size)
466
- """
467
- total = self._local_cache_hits + self._local_cache_misses
468
- hit_rate = (self._local_cache_hits / total * 100) if total > 0 else 0.0
469
-
470
- return {
471
- "enabled": self._local_cache_enabled,
472
- "size": len(self._local_cache),
473
- "max_size": self._local_cache_max_size,
474
- "hits": self._local_cache_hits,
475
- "misses": self._local_cache_misses,
476
- "hit_rate": hit_rate,
477
- "total_requests": total,
478
- }
479
-
480
- # === Security Methods ===
481
-
482
- def _sanitize_data(self, data: Any) -> tuple[Any, int]:
483
- """Sanitize data by scrubbing PII and checking for secrets.
484
-
485
- Args:
486
- data: Data to sanitize (dict, list, or str)
487
-
488
- Returns:
489
- Tuple of (sanitized_data, pii_count)
490
-
491
- Raises:
492
- SecurityError: If secrets are detected and blocking is enabled
493
-
494
- """
495
- pii_count = 0
496
-
497
- if data is None:
498
- return data, 0
499
-
500
- # Convert data to string for scanning
501
- if isinstance(data, dict):
502
- data_str = json.dumps(data)
503
- elif isinstance(data, list):
504
- data_str = json.dumps(data)
505
- elif isinstance(data, str):
506
- data_str = data
507
- else:
508
- # For other types, convert to string
509
- data_str = str(data)
510
-
511
- # Check for secrets first (before modifying data)
512
- if self._secrets_detector is not None:
513
- detections = self._secrets_detector.detect(data_str)
514
- # Block critical and high severity secrets
515
- critical_secrets = [
516
- d
517
- for d in detections
518
- if d.severity in (SecretSeverity.CRITICAL, SecretSeverity.HIGH)
519
- ]
520
- if critical_secrets:
521
- self._metrics.secrets_blocked_total += len(critical_secrets)
522
- secret_types = [d.secret_type.value for d in critical_secrets]
523
- logger.warning(
524
- "secrets_detected_blocked",
525
- secret_types=secret_types,
526
- count=len(critical_secrets),
527
- )
528
- raise SecurityError(
529
- f"Cannot store data containing secrets: {secret_types}. "
530
- "Remove sensitive credentials before storing."
531
- )
532
-
533
- # Scrub PII
534
- if self._pii_scrubber is not None:
535
- sanitized_str, pii_detections = self._pii_scrubber.scrub(data_str)
536
- pii_count = len(pii_detections)
537
-
538
- if pii_count > 0:
539
- self._metrics.pii_scrubbed_total += pii_count
540
- self._metrics.pii_scrub_operations += 1
541
- logger.debug(
542
- "pii_scrubbed",
543
- pii_count=pii_count,
544
- pii_types=[d.pii_type for d in pii_detections],
545
- )
546
-
547
- # Convert back to original type
548
- if isinstance(data, dict):
549
- try:
550
- return json.loads(sanitized_str), pii_count
551
- except json.JSONDecodeError:
552
- # If PII scrubbing broke JSON structure, return original
553
- # This can happen if regex matches part of JSON syntax
554
- logger.warning("pii_scrubbing_broke_json_returning_original")
555
- return data, 0
556
- elif isinstance(data, list):
557
- try:
558
- return json.loads(sanitized_str), pii_count
559
- except json.JSONDecodeError:
560
- logger.warning("pii_scrubbing_broke_json_returning_original")
561
- return data, 0
562
- else:
563
- return sanitized_str, pii_count
564
-
565
- return data, pii_count
566
-
567
- # === Working Memory (Stash/Retrieve) ===
568
-
569
- def stash(
570
- self,
571
- key: str,
572
- data: Any,
573
- credentials: AgentCredentials,
574
- ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
575
- skip_sanitization: bool = False,
576
- ) -> bool:
577
- """Stash data in short-term memory
578
-
579
- Args:
580
- key: Unique key for the data
581
- data: Data to store (will be JSON serialized)
582
- credentials: Agent credentials
583
- ttl: Time-to-live strategy
584
- skip_sanitization: Skip PII scrubbing and secrets detection (use with caution)
585
-
586
- Returns:
587
- True if successful
588
-
589
- Raises:
590
- ValueError: If key is empty or invalid
591
- PermissionError: If credentials lack write access
592
- SecurityError: If secrets are detected in data (when secrets_detection_enabled)
593
-
594
- Note:
595
- PII (emails, SSNs, phone numbers, etc.) is automatically scrubbed
596
- before storage unless skip_sanitization=True or pii_scrub_enabled=False.
597
- Secrets (API keys, passwords, etc.) will block storage by default.
598
-
599
- Example:
600
- >>> memory.stash("analysis_v1", {"findings": [...]}, creds)
601
-
602
- """
603
- # Pattern 1: String ID validation
604
- if not key or not key.strip():
605
- raise ValueError(f"key cannot be empty. Got: {key!r}")
606
-
607
- if not credentials.can_stage():
608
- raise PermissionError(
609
- f"Agent {credentials.agent_id} (Tier {credentials.tier.name}) "
610
- "cannot write to memory. Requires CONTRIBUTOR or higher.",
611
- )
612
-
613
- # Sanitize data (PII scrubbing + secrets detection)
614
- if not skip_sanitization:
615
- data, pii_count = self._sanitize_data(data)
616
- if pii_count > 0:
617
- logger.info(
618
- "stash_pii_scrubbed",
619
- key=key,
620
- agent_id=credentials.agent_id,
621
- pii_count=pii_count,
622
- )
623
-
624
- full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
625
- payload = {
626
- "data": data,
627
- "agent_id": credentials.agent_id,
628
- "stashed_at": datetime.now().isoformat(),
629
- }
630
- return self._set(full_key, json.dumps(payload), ttl.value)
631
-
632
- def retrieve(
633
- self,
634
- key: str,
635
- credentials: AgentCredentials,
636
- agent_id: str | None = None,
637
- ) -> Any | None:
638
- """Retrieve data from short-term memory
639
-
640
- Args:
641
- key: Key to retrieve
642
- credentials: Agent credentials
643
- agent_id: Owner agent ID (defaults to credentials agent)
644
-
645
- Returns:
646
- Retrieved data or None if not found
647
-
648
- Raises:
649
- ValueError: If key is empty or invalid
650
-
651
- Example:
652
- >>> data = memory.retrieve("analysis_v1", creds)
653
-
654
- """
655
- # Pattern 1: String ID validation
656
- if not key or not key.strip():
657
- raise ValueError(f"key cannot be empty. Got: {key!r}")
658
-
659
- owner = agent_id or credentials.agent_id
660
- full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
661
- raw = self._get(full_key)
662
-
663
- if raw is None:
664
- return None
665
-
666
- payload = json.loads(raw)
667
- return payload.get("data")
668
-
669
- def clear_working_memory(self, credentials: AgentCredentials) -> int:
670
- """Clear all working memory for an agent
671
-
672
- Args:
673
- credentials: Agent credentials (must own the memory or be Steward)
674
-
675
- Returns:
676
- Number of keys deleted
677
-
678
- """
679
- pattern = f"{self.PREFIX_WORKING}{credentials.agent_id}:*"
680
- keys = self._keys(pattern)
681
- count = 0
682
- for key in keys:
683
- if self._delete(key):
684
- count += 1
685
- return count
686
-
687
- # === Pattern Staging ===
688
-
689
- def stage_pattern(
690
- self,
691
- pattern: StagedPattern,
692
- credentials: AgentCredentials,
693
- ) -> bool:
694
- """Stage a pattern for validation
695
-
696
- Per EMPATHY_PHILOSOPHY.md: Patterns must be staged before
697
- being promoted to the active library.
698
-
699
- Args:
700
- pattern: Pattern to stage
701
- credentials: Must be CONTRIBUTOR or higher
702
-
703
- Returns:
704
- True if staged successfully
705
-
706
- Raises:
707
- TypeError: If pattern is not StagedPattern
708
- PermissionError: If credentials lack staging access
709
-
710
- """
711
- # Pattern 5: Type validation
712
- if not isinstance(pattern, StagedPattern):
713
- raise TypeError(f"pattern must be StagedPattern, got {type(pattern).__name__}")
714
-
715
- if not credentials.can_stage():
716
- raise PermissionError(
717
- f"Agent {credentials.agent_id} cannot stage patterns. "
718
- "Requires CONTRIBUTOR tier or higher.",
719
- )
720
-
721
- key = f"{self.PREFIX_STAGED}{pattern.pattern_id}"
722
- return self._set(
723
- key,
724
- json.dumps(pattern.to_dict()),
725
- TTLStrategy.STAGED_PATTERNS.value,
726
- )
727
-
728
- def get_staged_pattern(
729
- self,
730
- pattern_id: str,
731
- credentials: AgentCredentials,
732
- ) -> StagedPattern | None:
733
- """Retrieve a staged pattern
734
-
735
- Args:
736
- pattern_id: Pattern ID
737
- credentials: Any tier can read
738
-
739
- Returns:
740
- StagedPattern or None
741
-
742
- Raises:
743
- ValueError: If pattern_id is empty
744
-
745
- """
746
- # Pattern 1: String ID validation
747
- if not pattern_id or not pattern_id.strip():
748
- raise ValueError(f"pattern_id cannot be empty. Got: {pattern_id!r}")
749
-
750
- key = f"{self.PREFIX_STAGED}{pattern_id}"
751
- raw = self._get(key)
752
-
753
- if raw is None:
754
- return None
755
-
756
- return StagedPattern.from_dict(json.loads(raw))
757
-
758
- def list_staged_patterns(
759
- self,
760
- credentials: AgentCredentials,
761
- ) -> list[StagedPattern]:
762
- """List all staged patterns awaiting validation
763
-
764
- Args:
765
- credentials: Any tier can read
766
-
767
- Returns:
768
- List of staged patterns
769
-
770
- """
771
- pattern = f"{self.PREFIX_STAGED}*"
772
- keys = self._keys(pattern)
773
- patterns = []
774
-
775
- for key in keys:
776
- raw = self._get(key)
777
- if raw:
778
- patterns.append(StagedPattern.from_dict(json.loads(raw)))
779
-
780
- return patterns
781
-
782
- def promote_pattern(
783
- self,
784
- pattern_id: str,
785
- credentials: AgentCredentials,
786
- ) -> StagedPattern | None:
787
- """Promote staged pattern (remove from staging for library add)
788
-
789
- Args:
790
- pattern_id: Pattern to promote
791
- credentials: Must be VALIDATOR or higher
792
-
793
- Returns:
794
- The promoted pattern (for adding to PatternLibrary)
795
-
796
- """
797
- if not credentials.can_validate():
798
- raise PermissionError(
799
- f"Agent {credentials.agent_id} cannot promote patterns. "
800
- "Requires VALIDATOR tier or higher.",
801
- )
802
-
803
- pattern = self.get_staged_pattern(pattern_id, credentials)
804
- if pattern:
805
- key = f"{self.PREFIX_STAGED}{pattern_id}"
806
- self._delete(key)
807
- return pattern
808
-
809
- def reject_pattern(
810
- self,
811
- pattern_id: str,
812
- credentials: AgentCredentials,
813
- reason: str = "",
814
- ) -> bool:
815
- """Reject a staged pattern
816
-
817
- Args:
818
- pattern_id: Pattern to reject
819
- credentials: Must be VALIDATOR or higher
820
- reason: Rejection reason (for audit)
821
-
822
- Returns:
823
- True if rejected
824
-
825
- """
826
- if not credentials.can_validate():
827
- raise PermissionError(
828
- f"Agent {credentials.agent_id} cannot reject patterns. "
829
- "Requires VALIDATOR tier or higher.",
830
- )
831
-
832
- key = f"{self.PREFIX_STAGED}{pattern_id}"
833
- return self._delete(key)
834
-
835
- # === Conflict Negotiation ===
836
-
837
- def create_conflict_context(
838
- self,
839
- conflict_id: str,
840
- positions: dict[str, Any],
841
- interests: dict[str, list[str]],
842
- credentials: AgentCredentials,
843
- batna: str | None = None,
844
- ) -> ConflictContext:
845
- """Create context for principled negotiation
846
-
847
- Per Getting to Yes framework:
848
- - Separate positions from interests
849
- - Define BATNA before negotiating
850
-
851
- Args:
852
- conflict_id: Unique conflict identifier
853
- positions: agent_id -> their stated position
854
- interests: agent_id -> underlying interests
855
- credentials: Must be CONTRIBUTOR or higher
856
- batna: Best Alternative to Negotiated Agreement
857
-
858
- Returns:
859
- ConflictContext for resolution
860
-
861
- Raises:
862
- ValueError: If conflict_id is empty
863
- TypeError: If positions or interests are not dicts
864
- PermissionError: If credentials lack permission
865
-
866
- """
867
- # Pattern 1: String ID validation
868
- if not conflict_id or not conflict_id.strip():
869
- raise ValueError(f"conflict_id cannot be empty. Got: {conflict_id!r}")
870
-
871
- # Pattern 5: Type validation
872
- if not isinstance(positions, dict):
873
- raise TypeError(f"positions must be dict, got {type(positions).__name__}")
874
- if not isinstance(interests, dict):
875
- raise TypeError(f"interests must be dict, got {type(interests).__name__}")
876
-
877
- if not credentials.can_stage():
878
- raise PermissionError(
879
- f"Agent {credentials.agent_id} cannot create conflict context. "
880
- "Requires CONTRIBUTOR tier or higher.",
881
- )
882
-
883
- context = ConflictContext(
884
- conflict_id=conflict_id,
885
- positions=positions,
886
- interests=interests,
887
- batna=batna,
888
- )
889
-
890
- key = f"{self.PREFIX_CONFLICT}{conflict_id}"
891
- self._set(
892
- key,
893
- json.dumps(context.to_dict()),
894
- TTLStrategy.CONFLICT_CONTEXT.value,
895
- )
896
-
897
- return context
898
-
899
- def get_conflict_context(
900
- self,
901
- conflict_id: str,
902
- credentials: AgentCredentials,
903
- ) -> ConflictContext | None:
904
- """Retrieve conflict context
905
-
906
- Args:
907
- conflict_id: Conflict identifier
908
- credentials: Any tier can read
909
-
910
- Returns:
911
- ConflictContext or None
912
-
913
- Raises:
914
- ValueError: If conflict_id is empty
915
-
916
- """
917
- # Pattern 1: String ID validation
918
- if not conflict_id or not conflict_id.strip():
919
- raise ValueError(f"conflict_id cannot be empty. Got: {conflict_id!r}")
920
-
921
- key = f"{self.PREFIX_CONFLICT}{conflict_id}"
922
- raw = self._get(key)
923
-
924
- if raw is None:
925
- return None
926
-
927
- return ConflictContext.from_dict(json.loads(raw))
928
-
929
- def resolve_conflict(
930
- self,
931
- conflict_id: str,
932
- resolution: str,
933
- credentials: AgentCredentials,
934
- ) -> bool:
935
- """Mark conflict as resolved
936
-
937
- Args:
938
- conflict_id: Conflict to resolve
939
- resolution: How it was resolved
940
- credentials: Must be VALIDATOR or higher
941
-
942
- Returns:
943
- True if resolved
944
-
945
- """
946
- if not credentials.can_validate():
947
- raise PermissionError(
948
- f"Agent {credentials.agent_id} cannot resolve conflicts. "
949
- "Requires VALIDATOR tier or higher.",
950
- )
951
-
952
- context = self.get_conflict_context(conflict_id, credentials)
953
- if context is None:
954
- return False
955
-
956
- context.resolved = True
957
- context.resolution = resolution
958
-
959
- key = f"{self.PREFIX_CONFLICT}{conflict_id}"
960
- # Keep resolved conflicts longer for audit
961
- self._set(key, json.dumps(context.to_dict()), TTLStrategy.CONFLICT_CONTEXT.value)
962
- return True
963
-
964
- # === Coordination Signals ===
965
- # REMOVED in v5.0 - Use empathy_os.telemetry.CoordinationSignals instead
966
- # - send_signal() → CoordinationSignals.signal()
967
- # - receive_signals() → CoordinationSignals.get_pending_signals()
968
-
969
- # === Session Management ===
970
-
971
- def create_session(
972
- self,
973
- session_id: str,
974
- credentials: AgentCredentials,
975
- metadata: dict | None = None,
976
- ) -> bool:
977
- """Create a collaboration session
978
-
979
- Args:
980
- session_id: Unique session identifier
981
- credentials: Session creator
982
- metadata: Optional session metadata
983
-
984
- Returns:
985
- True if created
986
-
987
- Raises:
988
- ValueError: If session_id is empty
989
- TypeError: If metadata is not dict
990
-
991
- """
992
- # Pattern 1: String ID validation
993
- if not session_id or not session_id.strip():
994
- raise ValueError(f"session_id cannot be empty. Got: {session_id!r}")
995
-
996
- # Pattern 5: Type validation
997
- if metadata is not None and not isinstance(metadata, dict):
998
- raise TypeError(f"metadata must be dict, got {type(metadata).__name__}")
999
-
1000
- key = f"{self.PREFIX_SESSION}{session_id}"
1001
- payload = {
1002
- "session_id": session_id,
1003
- "created_by": credentials.agent_id,
1004
- "created_at": datetime.now().isoformat(),
1005
- "participants": [credentials.agent_id],
1006
- "metadata": metadata or {},
1007
- }
1008
- return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
1009
-
1010
- def join_session(
1011
- self,
1012
- session_id: str,
1013
- credentials: AgentCredentials,
1014
- ) -> bool:
1015
- """Join an existing session
1016
-
1017
- Args:
1018
- session_id: Session to join
1019
- credentials: Joining agent
1020
-
1021
- Returns:
1022
- True if joined
1023
-
1024
- Raises:
1025
- ValueError: If session_id is empty
1026
-
1027
- """
1028
- # Pattern 1: String ID validation
1029
- if not session_id or not session_id.strip():
1030
- raise ValueError(f"session_id cannot be empty. Got: {session_id!r}")
1031
-
1032
- key = f"{self.PREFIX_SESSION}{session_id}"
1033
- raw = self._get(key)
1034
-
1035
- if raw is None:
1036
- return False
1037
-
1038
- payload = json.loads(raw)
1039
- if credentials.agent_id not in payload["participants"]:
1040
- payload["participants"].append(credentials.agent_id)
1041
-
1042
- return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
1043
-
1044
- def get_session(
1045
- self,
1046
- session_id: str,
1047
- credentials: AgentCredentials,
1048
- ) -> dict | None:
1049
- """Get session information
1050
-
1051
- Args:
1052
- session_id: Session identifier
1053
- credentials: Any participant can read
1054
-
1055
- Returns:
1056
- Session data or None
1057
-
1058
- """
1059
- key = f"{self.PREFIX_SESSION}{session_id}"
1060
- raw = self._get(key)
1061
-
1062
- if raw is None:
1063
- return None
1064
-
1065
- result: dict = json.loads(raw)
1066
- return result
1067
-
1068
- # === Health Check ===
1069
-
1070
- def ping(self) -> bool:
1071
- """Check Redis connection health
1072
-
1073
- Returns:
1074
- True if connected and responsive
1075
-
1076
- """
1077
- if self.use_mock:
1078
- return True
1079
- if self._client is None:
1080
- return False
1081
- try:
1082
- return bool(self._client.ping())
1083
- except Exception:
1084
- return False
1085
-
1086
- def get_stats(self) -> dict:
1087
- """Get memory statistics
1088
-
1089
- Returns:
1090
- Dict with memory stats
1091
-
1092
- """
1093
- if self.use_mock:
1094
- # Use generator expressions for memory-efficient counting
1095
- return {
1096
- "mode": "mock",
1097
- "total_keys": len(self._mock_storage),
1098
- "working_keys": sum(
1099
- 1 for k in self._mock_storage if k.startswith(self.PREFIX_WORKING)
1100
- ),
1101
- "staged_keys": sum(
1102
- 1 for k in self._mock_storage if k.startswith(self.PREFIX_STAGED)
1103
- ),
1104
- "conflict_keys": sum(
1105
- 1 for k in self._mock_storage if k.startswith(self.PREFIX_CONFLICT)
1106
- ),
1107
- }
1108
-
1109
- if self._client is None:
1110
- return {"mode": "disconnected", "error": "No Redis client"}
1111
- info = self._client.info("memory")
1112
- return {
1113
- "mode": "redis",
1114
- "used_memory": info.get("used_memory_human"),
1115
- "peak_memory": info.get("used_memory_peak_human"),
1116
- "total_keys": self._client.dbsize(),
1117
- "working_keys": len(self._keys(f"{self.PREFIX_WORKING}*")),
1118
- "staged_keys": len(self._keys(f"{self.PREFIX_STAGED}*")),
1119
- "conflict_keys": len(self._keys(f"{self.PREFIX_CONFLICT}*")),
1120
- }
1121
-
1122
- def get_metrics(self) -> dict:
1123
- """Get operation metrics for observability.
1124
-
1125
- Returns:
1126
- Dict with operation counts, latencies, and success rates
1127
-
1128
- """
1129
- return self._metrics.to_dict()
1130
-
1131
- def reset_metrics(self) -> None:
1132
- """Reset all metrics to zero."""
1133
- self._metrics = RedisMetrics()
1134
-
1135
- # =========================================================================
1136
- # BATCH OPERATIONS
1137
- # =========================================================================
1138
-
1139
- def stash_batch(
1140
- self,
1141
- items: list[tuple[str, Any]],
1142
- credentials: AgentCredentials,
1143
- ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
1144
- ) -> int:
1145
- """Stash multiple items in a single operation.
1146
-
1147
- Uses Redis pipeline for efficiency (reduces network round-trips).
1148
-
1149
- Args:
1150
- items: List of (key, data) tuples
1151
- credentials: Agent credentials
1152
- ttl: Time-to-live strategy (applied to all items)
1153
-
1154
- Returns:
1155
- Number of items successfully stashed
1156
-
1157
- Raises:
1158
- TypeError: If items is not a list
1159
- PermissionError: If credentials lack write access
1160
-
1161
- Example:
1162
- >>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
1163
- >>> count = memory.stash_batch(items, creds)
1164
-
1165
- """
1166
- # Pattern 5: Type validation
1167
- if not isinstance(items, list):
1168
- raise TypeError(f"items must be list, got {type(items).__name__}")
1169
-
1170
- if not credentials.can_stage():
1171
- raise PermissionError(
1172
- f"Agent {credentials.agent_id} cannot write to memory. "
1173
- "Requires CONTRIBUTOR tier or higher.",
1174
- )
1175
-
1176
- if not items:
1177
- return 0
1178
-
1179
- start_time = time.perf_counter()
1180
-
1181
- if self.use_mock:
1182
- count = 0
1183
- for key, data in items:
1184
- full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
1185
- payload = {
1186
- "data": data,
1187
- "agent_id": credentials.agent_id,
1188
- "stashed_at": datetime.now().isoformat(),
1189
- }
1190
- expires = datetime.now().timestamp() + ttl.value
1191
- self._mock_storage[full_key] = (json.dumps(payload), expires)
1192
- count += 1
1193
- latency_ms = (time.perf_counter() - start_time) * 1000
1194
- self._metrics.record_operation("stash_batch", latency_ms)
1195
- return count
1196
-
1197
- if self._client is None:
1198
- return 0
1199
-
1200
- pipe = self._client.pipeline()
1201
- for key, data in items:
1202
- full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
1203
- payload = {
1204
- "data": data,
1205
- "agent_id": credentials.agent_id,
1206
- "stashed_at": datetime.now().isoformat(),
1207
- }
1208
- pipe.setex(full_key, ttl.value, json.dumps(payload))
1209
-
1210
- results = pipe.execute()
1211
- count = sum(1 for r in results if r)
1212
- latency_ms = (time.perf_counter() - start_time) * 1000
1213
- self._metrics.record_operation("stash_batch", latency_ms)
1214
-
1215
- logger.info("batch_stash_complete", count=count, total=len(items))
1216
- return count
1217
-
1218
- def retrieve_batch(
1219
- self,
1220
- keys: list[str],
1221
- credentials: AgentCredentials,
1222
- agent_id: str | None = None,
1223
- ) -> dict[str, Any]:
1224
- """Retrieve multiple items in a single operation.
1225
-
1226
- Args:
1227
- keys: List of keys to retrieve
1228
- credentials: Agent credentials
1229
- agent_id: Owner agent ID (defaults to credentials agent)
1230
-
1231
- Returns:
1232
- Dict mapping key to data (missing keys omitted)
1233
-
1234
- Example:
1235
- >>> data = memory.retrieve_batch(["key1", "key2"], creds)
1236
- >>> print(data["key1"])
1237
-
1238
- """
1239
- if not keys:
1240
- return {}
1241
-
1242
- start_time = time.perf_counter()
1243
- owner = agent_id or credentials.agent_id
1244
- results: dict[str, Any] = {}
1245
-
1246
- if self.use_mock:
1247
- for key in keys:
1248
- full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
1249
- if full_key in self._mock_storage:
1250
- value, expires = self._mock_storage[full_key]
1251
- if expires is None or datetime.now().timestamp() < expires:
1252
- payload = json.loads(str(value))
1253
- results[key] = payload.get("data")
1254
- latency_ms = (time.perf_counter() - start_time) * 1000
1255
- self._metrics.record_operation("retrieve_batch", latency_ms)
1256
- return results
1257
-
1258
- if self._client is None:
1259
- return {}
1260
-
1261
- full_keys = [f"{self.PREFIX_WORKING}{owner}:{key}" for key in keys]
1262
- values = self._client.mget(full_keys)
1263
-
1264
- for key, value in zip(keys, values, strict=False):
1265
- if value:
1266
- payload = json.loads(str(value))
1267
- results[key] = payload.get("data")
1268
-
1269
- latency_ms = (time.perf_counter() - start_time) * 1000
1270
- self._metrics.record_operation("retrieve_batch", latency_ms)
1271
- return results
1272
-
1273
- # =========================================================================
1274
- # SCAN-BASED PAGINATION
1275
- # =========================================================================
1276
-
1277
- def list_staged_patterns_paginated(
1278
- self,
1279
- credentials: AgentCredentials,
1280
- cursor: str = "0",
1281
- count: int = 100,
1282
- ) -> PaginatedResult:
1283
- """List staged patterns with pagination using SCAN.
1284
-
1285
- More efficient than list_staged_patterns() for large datasets.
1286
-
1287
- Args:
1288
- credentials: Agent credentials
1289
- cursor: Pagination cursor (start with "0")
1290
- count: Maximum items per page
1291
-
1292
- Returns:
1293
- PaginatedResult with items, cursor, and has_more flag
1294
-
1295
- Example:
1296
- >>> result = memory.list_staged_patterns_paginated(creds, "0", 10)
1297
- >>> for pattern in result.items:
1298
- ... print(pattern.name)
1299
- >>> if result.has_more:
1300
- ... next_result = memory.list_staged_patterns_paginated(creds, result.cursor, 10)
1301
-
1302
- """
1303
- start_time = time.perf_counter()
1304
- pattern = f"{self.PREFIX_STAGED}*"
1305
-
1306
- if self.use_mock:
1307
- import fnmatch
1308
-
1309
- all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
1310
- start_idx = int(cursor)
1311
- end_idx = start_idx + count
1312
- page_keys = all_keys[start_idx:end_idx]
1313
-
1314
- patterns = []
1315
- for key in page_keys:
1316
- raw_value, expires = self._mock_storage[key]
1317
- if expires is None or datetime.now().timestamp() < expires:
1318
- patterns.append(StagedPattern.from_dict(json.loads(str(raw_value))))
1319
-
1320
- new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
1321
- has_more = end_idx < len(all_keys)
1322
-
1323
- latency_ms = (time.perf_counter() - start_time) * 1000
1324
- self._metrics.record_operation("list_paginated", latency_ms)
1325
-
1326
- return PaginatedResult(
1327
- items=patterns,
1328
- cursor=new_cursor,
1329
- has_more=has_more,
1330
- total_scanned=len(page_keys),
1331
- )
1332
-
1333
- if self._client is None:
1334
- return PaginatedResult(items=[], cursor="0", has_more=False)
1335
-
1336
- # Use SCAN for efficient iteration
1337
- new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
1338
-
1339
- patterns = []
1340
- for key in keys:
1341
- raw = self._client.get(key)
1342
- if raw:
1343
- patterns.append(StagedPattern.from_dict(json.loads(raw)))
1344
-
1345
- has_more = new_cursor != 0
1346
-
1347
- latency_ms = (time.perf_counter() - start_time) * 1000
1348
- self._metrics.record_operation("list_paginated", latency_ms)
1349
-
1350
- return PaginatedResult(
1351
- items=patterns,
1352
- cursor=str(new_cursor),
1353
- has_more=has_more,
1354
- total_scanned=len(keys),
1355
- )
1356
-
1357
- def scan_keys(
1358
- self,
1359
- pattern: str,
1360
- cursor: str = "0",
1361
- count: int = 100,
1362
- ) -> PaginatedResult:
1363
- """Scan keys matching a pattern with pagination.
1364
-
1365
- Args:
1366
- pattern: Key pattern (e.g., "empathy:working:*")
1367
- cursor: Pagination cursor
1368
- count: Items per page
1369
-
1370
- Returns:
1371
- PaginatedResult with key strings
1372
-
1373
- """
1374
- if self.use_mock:
1375
- import fnmatch
1376
-
1377
- all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
1378
- start_idx = int(cursor)
1379
- end_idx = start_idx + count
1380
- page_keys = all_keys[start_idx:end_idx]
1381
- new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
1382
- has_more = end_idx < len(all_keys)
1383
- return PaginatedResult(items=page_keys, cursor=new_cursor, has_more=has_more)
1384
-
1385
- if self._client is None:
1386
- return PaginatedResult(items=[], cursor="0", has_more=False)
1387
-
1388
- new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
1389
- return PaginatedResult(
1390
- items=[str(k) for k in keys],
1391
- cursor=str(new_cursor),
1392
- has_more=new_cursor != 0,
1393
- )
1394
-
1395
- # =========================================================================
1396
- # PUB/SUB FOR REAL-TIME NOTIFICATIONS
1397
- # =========================================================================
1398
-
1399
- def publish(
1400
- self,
1401
- channel: str,
1402
- message: dict,
1403
- credentials: AgentCredentials,
1404
- ) -> int:
1405
- """Publish a message to a channel for real-time notifications.
1406
-
1407
- Args:
1408
- channel: Channel name (will be prefixed)
1409
- message: Message payload (dict)
1410
- credentials: Agent credentials (must be CONTRIBUTOR+)
1411
-
1412
- Returns:
1413
- Number of subscribers that received the message
1414
-
1415
- Example:
1416
- >>> memory.publish("agent_signals", {"event": "task_complete", "task_id": "123"}, creds)
1417
-
1418
- """
1419
- if not credentials.can_stage():
1420
- raise PermissionError(
1421
- f"Agent {credentials.agent_id} cannot publish. Requires CONTRIBUTOR tier or higher.",
1422
- )
1423
-
1424
- start_time = time.perf_counter()
1425
- full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1426
-
1427
- payload = {
1428
- "channel": channel,
1429
- "from_agent": credentials.agent_id,
1430
- "timestamp": datetime.now().isoformat(),
1431
- "data": message,
1432
- }
1433
-
1434
- if self.use_mock:
1435
- handlers = self._mock_pubsub_handlers.get(full_channel, [])
1436
- for handler in handlers:
1437
- try:
1438
- handler(payload)
1439
- except Exception as e:
1440
- logger.warning("pubsub_handler_error", channel=channel, error=str(e))
1441
- latency_ms = (time.perf_counter() - start_time) * 1000
1442
- self._metrics.record_operation("publish", latency_ms)
1443
- return len(handlers)
1444
-
1445
- if self._client is None:
1446
- return 0
1447
-
1448
- count = self._client.publish(full_channel, json.dumps(payload))
1449
- latency_ms = (time.perf_counter() - start_time) * 1000
1450
- self._metrics.record_operation("publish", latency_ms)
1451
-
1452
- logger.debug("pubsub_published", channel=channel, subscribers=count)
1453
- return int(count)
1454
-
1455
- def subscribe(
1456
- self,
1457
- channel: str,
1458
- handler: Callable[[dict], None],
1459
- credentials: AgentCredentials | None = None,
1460
- ) -> bool:
1461
- """Subscribe to a channel for real-time notifications.
1462
-
1463
- Args:
1464
- channel: Channel name to subscribe to
1465
- handler: Callback function receiving message dict
1466
- credentials: Optional credentials (any tier can subscribe)
1467
-
1468
- Returns:
1469
- True if subscribed successfully
1470
-
1471
- Example:
1472
- >>> def on_message(msg):
1473
- ... print(f"Received: {msg['data']}")
1474
- >>> memory.subscribe("agent_signals", on_message)
1475
-
1476
- """
1477
- full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1478
-
1479
- if self.use_mock:
1480
- if full_channel not in self._mock_pubsub_handlers:
1481
- self._mock_pubsub_handlers[full_channel] = []
1482
- self._mock_pubsub_handlers[full_channel].append(handler)
1483
- logger.info("pubsub_subscribed_mock", channel=channel)
1484
- return True
1485
-
1486
- if self._client is None:
1487
- return False
1488
-
1489
- # Store handler
1490
- if full_channel not in self._subscriptions:
1491
- self._subscriptions[full_channel] = []
1492
- self._subscriptions[full_channel].append(handler)
1493
-
1494
- # Create pubsub if needed
1495
- if self._pubsub is None:
1496
- self._pubsub = self._client.pubsub()
1497
-
1498
- # Subscribe
1499
- self._pubsub.subscribe(**{full_channel: self._pubsub_message_handler})
1500
-
1501
- # Start listener thread if not running
1502
- if not self._pubsub_running:
1503
- self._pubsub_running = True
1504
- self._pubsub_thread = threading.Thread(
1505
- target=self._pubsub_listener,
1506
- daemon=True,
1507
- name="redis-pubsub-listener",
1508
- )
1509
- self._pubsub_thread.start()
1510
-
1511
- logger.info("pubsub_subscribed", channel=channel)
1512
- return True
1513
-
1514
- def _pubsub_message_handler(self, message: dict) -> None:
1515
- """Internal handler for pubsub messages."""
1516
- if message["type"] != "message":
1517
- return
1518
-
1519
- channel = message["channel"]
1520
- if isinstance(channel, bytes):
1521
- channel = channel.decode()
1522
-
1523
- try:
1524
- payload = json.loads(message["data"])
1525
- except json.JSONDecodeError:
1526
- payload = {"raw": message["data"]}
1527
-
1528
- handlers = self._subscriptions.get(channel, [])
1529
- for handler in handlers:
1530
- try:
1531
- handler(payload)
1532
- except Exception as e:
1533
- logger.warning("pubsub_handler_error", channel=channel, error=str(e))
1534
-
1535
- def _pubsub_listener(self) -> None:
1536
- """Background thread for listening to pubsub messages."""
1537
- while self._pubsub_running and self._pubsub:
1538
- try:
1539
- self._pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0)
1540
- except Exception as e:
1541
- logger.warning("pubsub_listener_error", error=str(e))
1542
- time.sleep(1)
1543
-
1544
- def unsubscribe(self, channel: str) -> bool:
1545
- """Unsubscribe from a channel.
1546
-
1547
- Args:
1548
- channel: Channel name to unsubscribe from
1549
-
1550
- Returns:
1551
- True if unsubscribed successfully
1552
-
1553
- """
1554
- full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1555
-
1556
- if self.use_mock:
1557
- self._mock_pubsub_handlers.pop(full_channel, None)
1558
- return True
1559
-
1560
- if self._pubsub is None:
1561
- return False
1562
-
1563
- self._pubsub.unsubscribe(full_channel)
1564
- self._subscriptions.pop(full_channel, None)
1565
- return True
1566
-
1567
- def close_pubsub(self) -> None:
1568
- """Close pubsub connection and stop listener thread."""
1569
- self._pubsub_running = False
1570
- if self._pubsub:
1571
- self._pubsub.close()
1572
- self._pubsub = None
1573
- self._subscriptions.clear()
1574
-
1575
- # =========================================================================
1576
- # REDIS STREAMS FOR AUDIT TRAILS
1577
- # =========================================================================
1578
-
1579
- def stream_append(
1580
- self,
1581
- stream_name: str,
1582
- data: dict,
1583
- credentials: AgentCredentials,
1584
- max_len: int = 10000,
1585
- ) -> str | None:
1586
- """Append an entry to a Redis Stream for audit trails.
1587
-
1588
- Streams provide:
1589
- - Ordered, persistent event log
1590
- - Consumer groups for distributed processing
1591
- - Time-based retention
1592
-
1593
- Args:
1594
- stream_name: Name of the stream
1595
- data: Event data to append
1596
- credentials: Agent credentials (must be CONTRIBUTOR+)
1597
- max_len: Maximum stream length (older entries trimmed)
1598
-
1599
- Returns:
1600
- Entry ID if successful, None otherwise
1601
-
1602
- Example:
1603
- >>> entry_id = memory.stream_append("audit", {"action": "pattern_promoted", "pattern_id": "xyz"}, creds)
1604
-
1605
- """
1606
- if not credentials.can_stage():
1607
- raise PermissionError(
1608
- f"Agent {credentials.agent_id} cannot write to stream. "
1609
- "Requires CONTRIBUTOR tier or higher.",
1610
- )
1611
-
1612
- start_time = time.perf_counter()
1613
- full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1614
-
1615
- entry = {
1616
- "agent_id": credentials.agent_id,
1617
- "timestamp": datetime.now().isoformat(),
1618
- **{
1619
- str(k): json.dumps(v) if isinstance(v, dict | list) else str(v)
1620
- for k, v in data.items()
1621
- },
1622
- }
1623
-
1624
- if self.use_mock:
1625
- if full_stream not in self._mock_streams:
1626
- self._mock_streams[full_stream] = []
1627
- entry_id = f"{int(datetime.now().timestamp() * 1000)}-0"
1628
- self._mock_streams[full_stream].append((entry_id, entry))
1629
- # Trim to max_len
1630
- if len(self._mock_streams[full_stream]) > max_len:
1631
- self._mock_streams[full_stream] = self._mock_streams[full_stream][-max_len:]
1632
- latency_ms = (time.perf_counter() - start_time) * 1000
1633
- self._metrics.record_operation("stream_append", latency_ms)
1634
- return entry_id
1635
-
1636
- if self._client is None:
1637
- return None
1638
-
1639
- entry_id = self._client.xadd(full_stream, entry, maxlen=max_len)
1640
- latency_ms = (time.perf_counter() - start_time) * 1000
1641
- self._metrics.record_operation("stream_append", latency_ms)
1642
-
1643
- return str(entry_id) if entry_id else None
1644
-
1645
- def stream_read(
1646
- self,
1647
- stream_name: str,
1648
- credentials: AgentCredentials,
1649
- start_id: str = "0",
1650
- count: int = 100,
1651
- ) -> list[tuple[str, dict]]:
1652
- """Read entries from a Redis Stream.
1653
-
1654
- Args:
1655
- stream_name: Name of the stream
1656
- credentials: Agent credentials
1657
- start_id: Start reading from this ID ("0" = beginning)
1658
- count: Maximum entries to read
1659
-
1660
- Returns:
1661
- List of (entry_id, data) tuples
1662
-
1663
- Example:
1664
- >>> entries = memory.stream_read("audit", creds, count=50)
1665
- >>> for entry_id, data in entries:
1666
- ... print(f"{entry_id}: {data}")
1667
-
1668
- """
1669
- full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1670
-
1671
- if self.use_mock:
1672
- if full_stream not in self._mock_streams:
1673
- return []
1674
- entries = self._mock_streams[full_stream]
1675
- # Filter by start_id (simple comparison)
1676
- filtered = [(eid, data) for eid, data in entries if eid > start_id]
1677
- return filtered[:count]
1678
-
1679
- if self._client is None:
1680
- return []
1681
-
1682
- result = self._client.xrange(full_stream, min=start_id, count=count)
1683
- return [(str(entry_id), {str(k): v for k, v in data.items()}) for entry_id, data in result]
1684
-
1685
- def stream_read_new(
1686
- self,
1687
- stream_name: str,
1688
- credentials: AgentCredentials,
1689
- block_ms: int = 0,
1690
- count: int = 100,
1691
- ) -> list[tuple[str, dict]]:
1692
- """Read only new entries from a stream (blocking read).
1693
-
1694
- Args:
1695
- stream_name: Name of the stream
1696
- credentials: Agent credentials
1697
- block_ms: Milliseconds to block waiting (0 = no block)
1698
- count: Maximum entries to read
1699
-
1700
- Returns:
1701
- List of (entry_id, data) tuples
1702
-
1703
- """
1704
- full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1705
-
1706
- if self.use_mock:
1707
- return [] # Mock doesn't support blocking reads
1708
-
1709
- if self._client is None:
1710
- return []
1711
-
1712
- result = self._client.xread({full_stream: "$"}, block=block_ms, count=count)
1713
- if not result:
1714
- return []
1715
-
1716
- # Result format: [(stream_name, [(entry_id, data), ...])]
1717
- entries = []
1718
- for _stream, stream_entries in result:
1719
- for entry_id, data in stream_entries:
1720
- entries.append((str(entry_id), {str(k): v for k, v in data.items()}))
1721
- return entries
1722
-
1723
- # =========================================================================
1724
- # TIME-WINDOW QUERIES (SORTED SETS)
1725
- # =========================================================================
1726
-
1727
- def timeline_add(
1728
- self,
1729
- timeline_name: str,
1730
- event_id: str,
1731
- data: dict,
1732
- credentials: AgentCredentials,
1733
- timestamp: datetime | None = None,
1734
- ) -> bool:
1735
- """Add an event to a timeline (sorted set by timestamp).
1736
-
1737
- Args:
1738
- timeline_name: Name of the timeline
1739
- event_id: Unique event identifier
1740
- data: Event data
1741
- credentials: Agent credentials
1742
- timestamp: Event timestamp (defaults to now)
1743
-
1744
- Returns:
1745
- True if added successfully
1746
-
1747
- """
1748
- if not credentials.can_stage():
1749
- raise PermissionError(
1750
- f"Agent {credentials.agent_id} cannot write to timeline. "
1751
- "Requires CONTRIBUTOR tier or higher.",
1752
- )
1753
-
1754
- full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1755
- ts = timestamp or datetime.now()
1756
- score = ts.timestamp()
1757
-
1758
- payload = json.dumps(
1759
- {
1760
- "event_id": event_id,
1761
- "timestamp": ts.isoformat(),
1762
- "agent_id": credentials.agent_id,
1763
- "data": data,
1764
- },
1765
- )
1766
-
1767
- if self.use_mock:
1768
- if full_timeline not in self._mock_sorted_sets:
1769
- self._mock_sorted_sets[full_timeline] = []
1770
- self._mock_sorted_sets[full_timeline].append((score, payload))
1771
- self._mock_sorted_sets[full_timeline].sort(key=lambda x: x[0])
1772
- return True
1773
-
1774
- if self._client is None:
1775
- return False
1776
-
1777
- self._client.zadd(full_timeline, {payload: score})
1778
- return True
1779
-
1780
- def timeline_query(
1781
- self,
1782
- timeline_name: str,
1783
- credentials: AgentCredentials,
1784
- query: TimeWindowQuery | None = None,
1785
- ) -> list[dict]:
1786
- """Query events from a timeline within a time window.
1787
-
1788
- Args:
1789
- timeline_name: Name of the timeline
1790
- credentials: Agent credentials
1791
- query: Time window query parameters
1792
-
1793
- Returns:
1794
- List of events in the time window
1795
-
1796
- Example:
1797
- >>> from datetime import datetime, timedelta
1798
- >>> query = TimeWindowQuery(
1799
- ... start_time=datetime.now() - timedelta(hours=1),
1800
- ... end_time=datetime.now(),
1801
- ... limit=50
1802
- ... )
1803
- >>> events = memory.timeline_query("agent_events", creds, query)
1804
-
1805
- """
1806
- full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1807
- q = query or TimeWindowQuery()
1808
-
1809
- if self.use_mock:
1810
- if full_timeline not in self._mock_sorted_sets:
1811
- return []
1812
- entries = self._mock_sorted_sets[full_timeline]
1813
- filtered = [
1814
- json.loads(payload)
1815
- for score, payload in entries
1816
- if q.start_score <= score <= q.end_score
1817
- ]
1818
- return filtered[q.offset : q.offset + q.limit]
1819
-
1820
- if self._client is None:
1821
- return []
1822
-
1823
- results = self._client.zrangebyscore(
1824
- full_timeline,
1825
- min=q.start_score,
1826
- max=q.end_score,
1827
- start=q.offset,
1828
- num=q.limit,
1829
- )
1830
-
1831
- return [json.loads(r) for r in results]
1832
-
1833
- def timeline_count(
1834
- self,
1835
- timeline_name: str,
1836
- credentials: AgentCredentials,
1837
- query: TimeWindowQuery | None = None,
1838
- ) -> int:
1839
- """Count events in a timeline within a time window.
1840
-
1841
- Args:
1842
- timeline_name: Name of the timeline
1843
- credentials: Agent credentials
1844
- query: Time window query parameters
1845
-
1846
- Returns:
1847
- Number of events in the time window
1848
-
1849
- """
1850
- full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1851
- q = query or TimeWindowQuery()
1852
-
1853
- if self.use_mock:
1854
- if full_timeline not in self._mock_sorted_sets:
1855
- return 0
1856
- entries = self._mock_sorted_sets[full_timeline]
1857
- return len([1 for score, _ in entries if q.start_score <= score <= q.end_score])
1858
-
1859
- if self._client is None:
1860
- return 0
1861
-
1862
- return int(self._client.zcount(full_timeline, q.start_score, q.end_score))
1863
-
1864
- # =========================================================================
1865
- # TASK QUEUES (LISTS)
1866
- # =========================================================================
1867
-
1868
- def queue_push(
1869
- self,
1870
- queue_name: str,
1871
- task: dict,
1872
- credentials: AgentCredentials,
1873
- priority: bool = False,
1874
- ) -> int:
1875
- """Push a task to a queue.
1876
-
1877
- Args:
1878
- queue_name: Name of the queue
1879
- task: Task data
1880
- credentials: Agent credentials (must be CONTRIBUTOR+)
1881
- priority: If True, push to front (high priority)
1882
-
1883
- Returns:
1884
- New queue length
1885
-
1886
- Example:
1887
- >>> task = {"type": "analyze", "file": "main.py"}
1888
- >>> memory.queue_push("agent_tasks", task, creds)
1889
-
1890
- """
1891
- if not credentials.can_stage():
1892
- raise PermissionError(
1893
- f"Agent {credentials.agent_id} cannot push to queue. "
1894
- "Requires CONTRIBUTOR tier or higher.",
1895
- )
1896
-
1897
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1898
- payload = json.dumps(
1899
- {
1900
- "task": task,
1901
- "queued_by": credentials.agent_id,
1902
- "queued_at": datetime.now().isoformat(),
1903
- },
1904
- )
1905
-
1906
- if self.use_mock:
1907
- if full_queue not in self._mock_lists:
1908
- self._mock_lists[full_queue] = []
1909
- if priority:
1910
- self._mock_lists[full_queue].insert(0, payload)
1911
- else:
1912
- self._mock_lists[full_queue].append(payload)
1913
- return len(self._mock_lists[full_queue])
1914
-
1915
- if self._client is None:
1916
- return 0
1917
-
1918
- if priority:
1919
- return int(self._client.lpush(full_queue, payload))
1920
- return int(self._client.rpush(full_queue, payload))
1921
-
1922
- def queue_pop(
1923
- self,
1924
- queue_name: str,
1925
- credentials: AgentCredentials,
1926
- timeout: int = 0,
1927
- ) -> dict | None:
1928
- """Pop a task from a queue.
1929
-
1930
- Args:
1931
- queue_name: Name of the queue
1932
- credentials: Agent credentials
1933
- timeout: Seconds to block waiting (0 = no block)
1934
-
1935
- Returns:
1936
- Task data or None if queue empty
1937
-
1938
- Example:
1939
- >>> task = memory.queue_pop("agent_tasks", creds, timeout=5)
1940
- >>> if task:
1941
- ... process(task["task"])
1942
-
1943
- """
1944
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1945
-
1946
- if self.use_mock:
1947
- if full_queue not in self._mock_lists or not self._mock_lists[full_queue]:
1948
- return None
1949
- payload = self._mock_lists[full_queue].pop(0)
1950
- data: dict = json.loads(payload)
1951
- return data
1952
-
1953
- if self._client is None:
1954
- return None
1955
-
1956
- if timeout > 0:
1957
- result = self._client.blpop(full_queue, timeout=timeout)
1958
- if result:
1959
- data = json.loads(result[1])
1960
- return data
1961
- return None
1962
-
1963
- result = self._client.lpop(full_queue)
1964
- if result:
1965
- data = json.loads(result)
1966
- return data
1967
- return None
1968
-
1969
- def queue_length(self, queue_name: str) -> int:
1970
- """Get the length of a queue.
1971
-
1972
- Args:
1973
- queue_name: Name of the queue
1974
-
1975
- Returns:
1976
- Number of items in the queue
1977
-
1978
- """
1979
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1980
-
1981
- if self.use_mock:
1982
- return len(self._mock_lists.get(full_queue, []))
1983
-
1984
- if self._client is None:
1985
- return 0
1986
-
1987
- return int(self._client.llen(full_queue))
1988
-
1989
- def queue_peek(
1990
- self,
1991
- queue_name: str,
1992
- credentials: AgentCredentials,
1993
- count: int = 1,
1994
- ) -> list[dict]:
1995
- """Peek at tasks in a queue without removing them.
1996
-
1997
- Args:
1998
- queue_name: Name of the queue
1999
- credentials: Agent credentials
2000
- count: Number of items to peek
2001
-
2002
- Returns:
2003
- List of task data
2004
-
2005
- """
2006
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
2007
-
2008
- if self.use_mock:
2009
- items = self._mock_lists.get(full_queue, [])[:count]
2010
- return [json.loads(item) for item in items]
2011
-
2012
- if self._client is None:
2013
- return []
2014
-
2015
- items = self._client.lrange(full_queue, 0, count - 1)
2016
- return [json.loads(item) for item in items]
2017
-
2018
- # =========================================================================
2019
- # ATOMIC TRANSACTIONS
2020
- # =========================================================================
2021
-
2022
- def atomic_promote_pattern(
2023
- self,
2024
- pattern_id: str,
2025
- credentials: AgentCredentials,
2026
- min_confidence: float = 0.0,
2027
- ) -> tuple[bool, StagedPattern | None, str]:
2028
- """Atomically promote a pattern with validation.
2029
-
2030
- Uses Redis transaction (MULTI/EXEC) to ensure:
2031
- - Pattern exists and meets confidence threshold
2032
- - Pattern is removed from staging atomically
2033
- - No race conditions with concurrent operations
2034
-
2035
- Args:
2036
- pattern_id: Pattern to promote
2037
- credentials: Must be VALIDATOR or higher
2038
- min_confidence: Minimum confidence threshold
2039
-
2040
- Returns:
2041
- Tuple of (success, pattern, message)
2042
-
2043
- Raises:
2044
- ValueError: If pattern_id is empty or min_confidence out of range
2045
-
2046
- Example:
2047
- >>> success, pattern, msg = memory.atomic_promote_pattern("pat_123", creds, min_confidence=0.7)
2048
- >>> if success:
2049
- ... library.add(pattern)
2050
-
2051
- """
2052
- # Pattern 1: String ID validation
2053
- if not pattern_id or not pattern_id.strip():
2054
- raise ValueError(f"pattern_id cannot be empty. Got: {pattern_id!r}")
2055
-
2056
- # Pattern 4: Range validation
2057
- if not 0.0 <= min_confidence <= 1.0:
2058
- raise ValueError(f"min_confidence must be between 0.0 and 1.0, got {min_confidence}")
2059
-
2060
- if not credentials.can_validate():
2061
- return False, None, "Requires VALIDATOR tier or higher"
2062
-
2063
- key = f"{self.PREFIX_STAGED}{pattern_id}"
2064
-
2065
- if self.use_mock:
2066
- if key not in self._mock_storage:
2067
- return False, None, "Pattern not found"
2068
- value, expires = self._mock_storage[key]
2069
- if expires and datetime.now().timestamp() >= expires:
2070
- return False, None, "Pattern expired"
2071
- pattern = StagedPattern.from_dict(json.loads(str(value)))
2072
- if pattern.confidence < min_confidence:
2073
- return (
2074
- False,
2075
- None,
2076
- f"Confidence {pattern.confidence} below threshold {min_confidence}",
2077
- )
2078
- del self._mock_storage[key]
2079
- # Also invalidate local cache
2080
- if key in self._local_cache:
2081
- del self._local_cache[key]
2082
- return True, pattern, "Pattern promoted successfully"
2083
-
2084
- if self._client is None:
2085
- return False, None, "Redis not connected"
2086
-
2087
- # Use WATCH for optimistic locking
2088
- try:
2089
- self._client.watch(key)
2090
- raw = self._client.get(key)
2091
-
2092
- if raw is None:
2093
- self._client.unwatch()
2094
- return False, None, "Pattern not found"
2095
-
2096
- pattern = StagedPattern.from_dict(json.loads(raw))
2097
-
2098
- if pattern.confidence < min_confidence:
2099
- self._client.unwatch()
2100
- return (
2101
- False,
2102
- None,
2103
- f"Confidence {pattern.confidence} below threshold {min_confidence}",
2104
- )
2105
-
2106
- # Execute atomic delete
2107
- pipe = self._client.pipeline(True)
2108
- pipe.delete(key)
2109
- pipe.execute()
2110
-
2111
- # Also invalidate local cache
2112
- if key in self._local_cache:
2113
- del self._local_cache[key]
2114
-
2115
- return True, pattern, "Pattern promoted successfully"
2116
-
2117
- except redis.WatchError:
2118
- return False, None, "Pattern was modified by another process"
2119
- finally:
2120
- try:
2121
- self._client.unwatch()
2122
- except Exception:
2123
- pass
2124
-
2125
- # =========================================================================
2126
- # CROSS-SESSION COMMUNICATION
2127
- # =========================================================================
2128
-
2129
- def enable_cross_session(
2130
- self,
2131
- access_tier: AccessTier = AccessTier.CONTRIBUTOR,
2132
- auto_announce: bool = True,
2133
- ):
2134
- """Enable cross-session communication for this memory instance.
2135
-
2136
- This allows agents in different Claude Code sessions to communicate
2137
- and coordinate via Redis.
2138
-
2139
- Args:
2140
- access_tier: Access tier for this session
2141
- auto_announce: Whether to announce presence automatically
2142
-
2143
- Returns:
2144
- CrossSessionCoordinator instance
2145
-
2146
- Raises:
2147
- ValueError: If in mock mode (Redis required for cross-session)
2148
-
2149
- Example:
2150
- >>> memory = RedisShortTermMemory()
2151
- >>> coordinator = memory.enable_cross_session(AccessTier.CONTRIBUTOR)
2152
- >>> print(f"Session ID: {coordinator.agent_id}")
2153
- >>> sessions = coordinator.get_active_sessions()
2154
-
2155
- """
2156
- if self.use_mock:
2157
- raise ValueError(
2158
- "Cross-session communication requires Redis. "
2159
- "Set REDIS_HOST/REDIS_PORT or disable mock mode."
2160
- )
2161
-
2162
- from .cross_session import CrossSessionCoordinator, SessionType
2163
-
2164
- coordinator = CrossSessionCoordinator(
2165
- memory=self,
2166
- session_type=SessionType.CLAUDE,
2167
- access_tier=access_tier,
2168
- auto_announce=auto_announce,
2169
- )
2170
-
2171
- return coordinator
2172
-
2173
- def cross_session_available(self) -> bool:
2174
- """Check if cross-session communication is available.
2175
-
2176
- Returns:
2177
- True if Redis is connected (not mock mode)
2178
-
2179
- """
2180
- return not self.use_mock and self._client is not None
2181
-
2182
- # =========================================================================
2183
- # CLEANUP AND LIFECYCLE
2184
- # =========================================================================
2185
-
2186
- def close(self) -> None:
2187
- """Close all connections and cleanup resources."""
2188
- self.close_pubsub()
2189
- if self._client:
2190
- self._client.close()
2191
- self._client = None
2192
- logger.info("redis_connection_closed")