empathy-framework 5.2.1__py3-none-any.whl → 5.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (480) hide show
  1. empathy_framework-5.4.0.dist-info/METADATA +47 -0
  2. empathy_framework-5.4.0.dist-info/RECORD +8 -0
  3. {empathy_framework-5.2.1.dist-info → empathy_framework-5.4.0.dist-info}/top_level.txt +0 -1
  4. empathy_healthcare_plugin/__init__.py +12 -11
  5. empathy_llm_toolkit/__init__.py +12 -26
  6. empathy_os/__init__.py +12 -356
  7. empathy_software_plugin/__init__.py +12 -11
  8. empathy_framework-5.2.1.dist-info/METADATA +0 -1002
  9. empathy_framework-5.2.1.dist-info/RECORD +0 -478
  10. empathy_framework-5.2.1.dist-info/entry_points.txt +0 -26
  11. empathy_framework-5.2.1.dist-info/licenses/LICENSE +0 -201
  12. empathy_framework-5.2.1.dist-info/licenses/LICENSE_CHANGE_ANNOUNCEMENT.md +0 -101
  13. empathy_healthcare_plugin/monitors/__init__.py +0 -9
  14. empathy_healthcare_plugin/monitors/clinical_protocol_monitor.py +0 -315
  15. empathy_healthcare_plugin/monitors/monitoring/__init__.py +0 -44
  16. empathy_healthcare_plugin/monitors/monitoring/protocol_checker.py +0 -300
  17. empathy_healthcare_plugin/monitors/monitoring/protocol_loader.py +0 -214
  18. empathy_healthcare_plugin/monitors/monitoring/sensor_parsers.py +0 -306
  19. empathy_healthcare_plugin/monitors/monitoring/trajectory_analyzer.py +0 -389
  20. empathy_healthcare_plugin/protocols/cardiac.json +0 -93
  21. empathy_healthcare_plugin/protocols/post_operative.json +0 -92
  22. empathy_healthcare_plugin/protocols/respiratory.json +0 -92
  23. empathy_healthcare_plugin/protocols/sepsis.json +0 -141
  24. empathy_llm_toolkit/README.md +0 -553
  25. empathy_llm_toolkit/agent_factory/__init__.py +0 -53
  26. empathy_llm_toolkit/agent_factory/adapters/__init__.py +0 -85
  27. empathy_llm_toolkit/agent_factory/adapters/autogen_adapter.py +0 -312
  28. empathy_llm_toolkit/agent_factory/adapters/crewai_adapter.py +0 -483
  29. empathy_llm_toolkit/agent_factory/adapters/haystack_adapter.py +0 -298
  30. empathy_llm_toolkit/agent_factory/adapters/langchain_adapter.py +0 -362
  31. empathy_llm_toolkit/agent_factory/adapters/langgraph_adapter.py +0 -333
  32. empathy_llm_toolkit/agent_factory/adapters/native.py +0 -228
  33. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +0 -423
  34. empathy_llm_toolkit/agent_factory/base.py +0 -305
  35. empathy_llm_toolkit/agent_factory/crews/__init__.py +0 -67
  36. empathy_llm_toolkit/agent_factory/crews/code_review.py +0 -1113
  37. empathy_llm_toolkit/agent_factory/crews/health_check.py +0 -1262
  38. empathy_llm_toolkit/agent_factory/crews/refactoring.py +0 -1128
  39. empathy_llm_toolkit/agent_factory/crews/security_audit.py +0 -1018
  40. empathy_llm_toolkit/agent_factory/decorators.py +0 -287
  41. empathy_llm_toolkit/agent_factory/factory.py +0 -558
  42. empathy_llm_toolkit/agent_factory/framework.py +0 -193
  43. empathy_llm_toolkit/agent_factory/memory_integration.py +0 -328
  44. empathy_llm_toolkit/agent_factory/resilient.py +0 -320
  45. empathy_llm_toolkit/agents_md/__init__.py +0 -22
  46. empathy_llm_toolkit/agents_md/loader.py +0 -218
  47. empathy_llm_toolkit/agents_md/parser.py +0 -271
  48. empathy_llm_toolkit/agents_md/registry.py +0 -307
  49. empathy_llm_toolkit/claude_memory.py +0 -466
  50. empathy_llm_toolkit/cli/__init__.py +0 -8
  51. empathy_llm_toolkit/cli/sync_claude.py +0 -487
  52. empathy_llm_toolkit/code_health.py +0 -1313
  53. empathy_llm_toolkit/commands/__init__.py +0 -51
  54. empathy_llm_toolkit/commands/context.py +0 -375
  55. empathy_llm_toolkit/commands/loader.py +0 -301
  56. empathy_llm_toolkit/commands/models.py +0 -231
  57. empathy_llm_toolkit/commands/parser.py +0 -371
  58. empathy_llm_toolkit/commands/registry.py +0 -429
  59. empathy_llm_toolkit/config/__init__.py +0 -29
  60. empathy_llm_toolkit/config/unified.py +0 -291
  61. empathy_llm_toolkit/context/__init__.py +0 -22
  62. empathy_llm_toolkit/context/compaction.py +0 -455
  63. empathy_llm_toolkit/context/manager.py +0 -434
  64. empathy_llm_toolkit/contextual_patterns.py +0 -361
  65. empathy_llm_toolkit/core.py +0 -907
  66. empathy_llm_toolkit/git_pattern_extractor.py +0 -435
  67. empathy_llm_toolkit/hooks/__init__.py +0 -24
  68. empathy_llm_toolkit/hooks/config.py +0 -306
  69. empathy_llm_toolkit/hooks/executor.py +0 -289
  70. empathy_llm_toolkit/hooks/registry.py +0 -302
  71. empathy_llm_toolkit/hooks/scripts/__init__.py +0 -39
  72. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +0 -201
  73. empathy_llm_toolkit/hooks/scripts/first_time_init.py +0 -285
  74. empathy_llm_toolkit/hooks/scripts/pre_compact.py +0 -207
  75. empathy_llm_toolkit/hooks/scripts/session_end.py +0 -183
  76. empathy_llm_toolkit/hooks/scripts/session_start.py +0 -163
  77. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +0 -225
  78. empathy_llm_toolkit/learning/__init__.py +0 -30
  79. empathy_llm_toolkit/learning/evaluator.py +0 -438
  80. empathy_llm_toolkit/learning/extractor.py +0 -514
  81. empathy_llm_toolkit/learning/storage.py +0 -560
  82. empathy_llm_toolkit/levels.py +0 -227
  83. empathy_llm_toolkit/pattern_confidence.py +0 -414
  84. empathy_llm_toolkit/pattern_resolver.py +0 -272
  85. empathy_llm_toolkit/pattern_summary.py +0 -350
  86. empathy_llm_toolkit/providers.py +0 -967
  87. empathy_llm_toolkit/routing/__init__.py +0 -32
  88. empathy_llm_toolkit/routing/model_router.py +0 -362
  89. empathy_llm_toolkit/security/IMPLEMENTATION_SUMMARY.md +0 -413
  90. empathy_llm_toolkit/security/PHASE2_COMPLETE.md +0 -384
  91. empathy_llm_toolkit/security/PHASE2_SECRETS_DETECTOR_COMPLETE.md +0 -271
  92. empathy_llm_toolkit/security/QUICK_REFERENCE.md +0 -316
  93. empathy_llm_toolkit/security/README.md +0 -262
  94. empathy_llm_toolkit/security/__init__.py +0 -62
  95. empathy_llm_toolkit/security/audit_logger.py +0 -929
  96. empathy_llm_toolkit/security/audit_logger_example.py +0 -152
  97. empathy_llm_toolkit/security/pii_scrubber.py +0 -640
  98. empathy_llm_toolkit/security/secrets_detector.py +0 -678
  99. empathy_llm_toolkit/security/secrets_detector_example.py +0 -304
  100. empathy_llm_toolkit/security/secure_memdocs.py +0 -1192
  101. empathy_llm_toolkit/security/secure_memdocs_example.py +0 -278
  102. empathy_llm_toolkit/session_status.py +0 -745
  103. empathy_llm_toolkit/state.py +0 -246
  104. empathy_llm_toolkit/utils/__init__.py +0 -5
  105. empathy_llm_toolkit/utils/tokens.py +0 -349
  106. empathy_os/adaptive/__init__.py +0 -13
  107. empathy_os/adaptive/task_complexity.py +0 -127
  108. empathy_os/agent_monitoring.py +0 -414
  109. empathy_os/cache/__init__.py +0 -117
  110. empathy_os/cache/base.py +0 -166
  111. empathy_os/cache/dependency_manager.py +0 -256
  112. empathy_os/cache/hash_only.py +0 -251
  113. empathy_os/cache/hybrid.py +0 -453
  114. empathy_os/cache/storage.py +0 -285
  115. empathy_os/cache_monitor.py +0 -356
  116. empathy_os/cache_stats.py +0 -298
  117. empathy_os/cli/__init__.py +0 -152
  118. empathy_os/cli/__main__.py +0 -12
  119. empathy_os/cli/commands/__init__.py +0 -1
  120. empathy_os/cli/commands/batch.py +0 -256
  121. empathy_os/cli/commands/cache.py +0 -248
  122. empathy_os/cli/commands/help.py +0 -331
  123. empathy_os/cli/commands/info.py +0 -140
  124. empathy_os/cli/commands/inspect.py +0 -436
  125. empathy_os/cli/commands/inspection.py +0 -57
  126. empathy_os/cli/commands/memory.py +0 -48
  127. empathy_os/cli/commands/metrics.py +0 -92
  128. empathy_os/cli/commands/orchestrate.py +0 -184
  129. empathy_os/cli/commands/patterns.py +0 -207
  130. empathy_os/cli/commands/profiling.py +0 -198
  131. empathy_os/cli/commands/provider.py +0 -98
  132. empathy_os/cli/commands/routing.py +0 -285
  133. empathy_os/cli/commands/setup.py +0 -96
  134. empathy_os/cli/commands/status.py +0 -235
  135. empathy_os/cli/commands/sync.py +0 -166
  136. empathy_os/cli/commands/tier.py +0 -121
  137. empathy_os/cli/commands/utilities.py +0 -114
  138. empathy_os/cli/commands/workflow.py +0 -575
  139. empathy_os/cli/core.py +0 -32
  140. empathy_os/cli/parsers/__init__.py +0 -68
  141. empathy_os/cli/parsers/batch.py +0 -118
  142. empathy_os/cli/parsers/cache 2.py +0 -65
  143. empathy_os/cli/parsers/cache.py +0 -65
  144. empathy_os/cli/parsers/help.py +0 -41
  145. empathy_os/cli/parsers/info.py +0 -26
  146. empathy_os/cli/parsers/inspect.py +0 -66
  147. empathy_os/cli/parsers/metrics.py +0 -42
  148. empathy_os/cli/parsers/orchestrate.py +0 -61
  149. empathy_os/cli/parsers/patterns.py +0 -54
  150. empathy_os/cli/parsers/provider.py +0 -40
  151. empathy_os/cli/parsers/routing.py +0 -110
  152. empathy_os/cli/parsers/setup.py +0 -42
  153. empathy_os/cli/parsers/status.py +0 -47
  154. empathy_os/cli/parsers/sync.py +0 -31
  155. empathy_os/cli/parsers/tier.py +0 -33
  156. empathy_os/cli/parsers/workflow.py +0 -77
  157. empathy_os/cli/utils/__init__.py +0 -1
  158. empathy_os/cli/utils/data.py +0 -242
  159. empathy_os/cli/utils/helpers.py +0 -68
  160. empathy_os/cli_legacy.py +0 -3957
  161. empathy_os/cli_minimal.py +0 -1159
  162. empathy_os/cli_router 2.py +0 -416
  163. empathy_os/cli_router.py +0 -437
  164. empathy_os/cli_unified.py +0 -814
  165. empathy_os/config/__init__.py +0 -66
  166. empathy_os/config/xml_config.py +0 -286
  167. empathy_os/config.py +0 -532
  168. empathy_os/coordination.py +0 -870
  169. empathy_os/core.py +0 -1511
  170. empathy_os/core_modules/__init__.py +0 -15
  171. empathy_os/cost_tracker.py +0 -626
  172. empathy_os/dashboard/__init__.py +0 -41
  173. empathy_os/dashboard/app 2.py +0 -512
  174. empathy_os/dashboard/app.py +0 -512
  175. empathy_os/dashboard/simple_server 2.py +0 -403
  176. empathy_os/dashboard/simple_server.py +0 -403
  177. empathy_os/dashboard/standalone_server 2.py +0 -536
  178. empathy_os/dashboard/standalone_server.py +0 -547
  179. empathy_os/discovery.py +0 -306
  180. empathy_os/emergence.py +0 -306
  181. empathy_os/exceptions.py +0 -123
  182. empathy_os/feedback_loops.py +0 -373
  183. empathy_os/hot_reload/README.md +0 -473
  184. empathy_os/hot_reload/__init__.py +0 -62
  185. empathy_os/hot_reload/config.py +0 -83
  186. empathy_os/hot_reload/integration.py +0 -229
  187. empathy_os/hot_reload/reloader.py +0 -298
  188. empathy_os/hot_reload/watcher.py +0 -183
  189. empathy_os/hot_reload/websocket.py +0 -177
  190. empathy_os/levels.py +0 -577
  191. empathy_os/leverage_points.py +0 -441
  192. empathy_os/logging_config.py +0 -261
  193. empathy_os/mcp/__init__.py +0 -10
  194. empathy_os/mcp/server.py +0 -506
  195. empathy_os/memory/__init__.py +0 -237
  196. empathy_os/memory/claude_memory.py +0 -469
  197. empathy_os/memory/config.py +0 -224
  198. empathy_os/memory/control_panel.py +0 -1290
  199. empathy_os/memory/control_panel_support.py +0 -145
  200. empathy_os/memory/cross_session.py +0 -845
  201. empathy_os/memory/edges.py +0 -179
  202. empathy_os/memory/encryption.py +0 -159
  203. empathy_os/memory/file_session.py +0 -770
  204. empathy_os/memory/graph.py +0 -570
  205. empathy_os/memory/long_term.py +0 -913
  206. empathy_os/memory/long_term_types.py +0 -99
  207. empathy_os/memory/mixins/__init__.py +0 -25
  208. empathy_os/memory/mixins/backend_init_mixin.py +0 -244
  209. empathy_os/memory/mixins/capabilities_mixin.py +0 -199
  210. empathy_os/memory/mixins/handoff_mixin.py +0 -208
  211. empathy_os/memory/mixins/lifecycle_mixin.py +0 -49
  212. empathy_os/memory/mixins/long_term_mixin.py +0 -352
  213. empathy_os/memory/mixins/promotion_mixin.py +0 -109
  214. empathy_os/memory/mixins/short_term_mixin.py +0 -182
  215. empathy_os/memory/nodes.py +0 -179
  216. empathy_os/memory/redis_bootstrap.py +0 -540
  217. empathy_os/memory/security/__init__.py +0 -31
  218. empathy_os/memory/security/audit_logger.py +0 -932
  219. empathy_os/memory/security/pii_scrubber.py +0 -640
  220. empathy_os/memory/security/secrets_detector.py +0 -678
  221. empathy_os/memory/short_term.py +0 -2150
  222. empathy_os/memory/simple_storage.py +0 -302
  223. empathy_os/memory/storage/__init__.py +0 -15
  224. empathy_os/memory/storage_backend.py +0 -167
  225. empathy_os/memory/summary_index.py +0 -583
  226. empathy_os/memory/types.py +0 -441
  227. empathy_os/memory/unified.py +0 -182
  228. empathy_os/meta_workflows/__init__.py +0 -74
  229. empathy_os/meta_workflows/agent_creator.py +0 -248
  230. empathy_os/meta_workflows/builtin_templates.py +0 -567
  231. empathy_os/meta_workflows/cli_commands/__init__.py +0 -56
  232. empathy_os/meta_workflows/cli_commands/agent_commands.py +0 -321
  233. empathy_os/meta_workflows/cli_commands/analytics_commands.py +0 -442
  234. empathy_os/meta_workflows/cli_commands/config_commands.py +0 -232
  235. empathy_os/meta_workflows/cli_commands/memory_commands.py +0 -182
  236. empathy_os/meta_workflows/cli_commands/template_commands.py +0 -354
  237. empathy_os/meta_workflows/cli_commands/workflow_commands.py +0 -382
  238. empathy_os/meta_workflows/cli_meta_workflows.py +0 -59
  239. empathy_os/meta_workflows/form_engine.py +0 -292
  240. empathy_os/meta_workflows/intent_detector.py +0 -409
  241. empathy_os/meta_workflows/models.py +0 -569
  242. empathy_os/meta_workflows/pattern_learner.py +0 -738
  243. empathy_os/meta_workflows/plan_generator.py +0 -384
  244. empathy_os/meta_workflows/session_context.py +0 -397
  245. empathy_os/meta_workflows/template_registry.py +0 -229
  246. empathy_os/meta_workflows/workflow.py +0 -984
  247. empathy_os/metrics/__init__.py +0 -12
  248. empathy_os/metrics/collector.py +0 -31
  249. empathy_os/metrics/prompt_metrics.py +0 -194
  250. empathy_os/models/__init__.py +0 -172
  251. empathy_os/models/__main__.py +0 -13
  252. empathy_os/models/adaptive_routing 2.py +0 -437
  253. empathy_os/models/adaptive_routing.py +0 -437
  254. empathy_os/models/auth_cli.py +0 -444
  255. empathy_os/models/auth_strategy.py +0 -450
  256. empathy_os/models/cli.py +0 -655
  257. empathy_os/models/empathy_executor.py +0 -354
  258. empathy_os/models/executor.py +0 -257
  259. empathy_os/models/fallback.py +0 -762
  260. empathy_os/models/provider_config.py +0 -282
  261. empathy_os/models/registry.py +0 -472
  262. empathy_os/models/tasks.py +0 -359
  263. empathy_os/models/telemetry/__init__.py +0 -71
  264. empathy_os/models/telemetry/analytics.py +0 -594
  265. empathy_os/models/telemetry/backend.py +0 -196
  266. empathy_os/models/telemetry/data_models.py +0 -431
  267. empathy_os/models/telemetry/storage.py +0 -489
  268. empathy_os/models/token_estimator.py +0 -420
  269. empathy_os/models/validation.py +0 -280
  270. empathy_os/monitoring/__init__.py +0 -52
  271. empathy_os/monitoring/alerts.py +0 -946
  272. empathy_os/monitoring/alerts_cli.py +0 -448
  273. empathy_os/monitoring/multi_backend.py +0 -271
  274. empathy_os/monitoring/otel_backend.py +0 -362
  275. empathy_os/optimization/__init__.py +0 -19
  276. empathy_os/optimization/context_optimizer.py +0 -272
  277. empathy_os/orchestration/__init__.py +0 -67
  278. empathy_os/orchestration/agent_templates.py +0 -707
  279. empathy_os/orchestration/config_store.py +0 -499
  280. empathy_os/orchestration/execution_strategies.py +0 -2111
  281. empathy_os/orchestration/meta_orchestrator.py +0 -1168
  282. empathy_os/orchestration/pattern_learner.py +0 -696
  283. empathy_os/orchestration/real_tools.py +0 -931
  284. empathy_os/pattern_cache.py +0 -187
  285. empathy_os/pattern_library.py +0 -542
  286. empathy_os/patterns/debugging/all_patterns.json +0 -81
  287. empathy_os/patterns/debugging/workflow_20260107_1770825e.json +0 -77
  288. empathy_os/patterns/refactoring_memory.json +0 -89
  289. empathy_os/persistence.py +0 -564
  290. empathy_os/platform_utils.py +0 -265
  291. empathy_os/plugins/__init__.py +0 -28
  292. empathy_os/plugins/base.py +0 -361
  293. empathy_os/plugins/registry.py +0 -268
  294. empathy_os/project_index/__init__.py +0 -32
  295. empathy_os/project_index/cli.py +0 -335
  296. empathy_os/project_index/index.py +0 -667
  297. empathy_os/project_index/models.py +0 -504
  298. empathy_os/project_index/reports.py +0 -474
  299. empathy_os/project_index/scanner.py +0 -777
  300. empathy_os/project_index/scanner_parallel 2.py +0 -291
  301. empathy_os/project_index/scanner_parallel.py +0 -291
  302. empathy_os/prompts/__init__.py +0 -61
  303. empathy_os/prompts/config.py +0 -77
  304. empathy_os/prompts/context.py +0 -177
  305. empathy_os/prompts/parser.py +0 -285
  306. empathy_os/prompts/registry.py +0 -313
  307. empathy_os/prompts/templates.py +0 -208
  308. empathy_os/redis_config.py +0 -302
  309. empathy_os/redis_memory.py +0 -799
  310. empathy_os/resilience/__init__.py +0 -56
  311. empathy_os/resilience/circuit_breaker.py +0 -256
  312. empathy_os/resilience/fallback.py +0 -179
  313. empathy_os/resilience/health.py +0 -300
  314. empathy_os/resilience/retry.py +0 -209
  315. empathy_os/resilience/timeout.py +0 -135
  316. empathy_os/routing/__init__.py +0 -43
  317. empathy_os/routing/chain_executor.py +0 -433
  318. empathy_os/routing/classifier.py +0 -217
  319. empathy_os/routing/smart_router.py +0 -234
  320. empathy_os/routing/workflow_registry.py +0 -343
  321. empathy_os/scaffolding/README.md +0 -589
  322. empathy_os/scaffolding/__init__.py +0 -35
  323. empathy_os/scaffolding/__main__.py +0 -14
  324. empathy_os/scaffolding/cli.py +0 -240
  325. empathy_os/socratic/__init__.py +0 -256
  326. empathy_os/socratic/ab_testing.py +0 -958
  327. empathy_os/socratic/blueprint.py +0 -533
  328. empathy_os/socratic/cli.py +0 -703
  329. empathy_os/socratic/collaboration.py +0 -1114
  330. empathy_os/socratic/domain_templates.py +0 -924
  331. empathy_os/socratic/embeddings.py +0 -738
  332. empathy_os/socratic/engine.py +0 -794
  333. empathy_os/socratic/explainer.py +0 -682
  334. empathy_os/socratic/feedback.py +0 -772
  335. empathy_os/socratic/forms.py +0 -629
  336. empathy_os/socratic/generator.py +0 -732
  337. empathy_os/socratic/llm_analyzer.py +0 -637
  338. empathy_os/socratic/mcp_server.py +0 -702
  339. empathy_os/socratic/session.py +0 -312
  340. empathy_os/socratic/storage.py +0 -667
  341. empathy_os/socratic/success.py +0 -730
  342. empathy_os/socratic/visual_editor.py +0 -860
  343. empathy_os/socratic/web_ui.py +0 -958
  344. empathy_os/telemetry/__init__.py +0 -39
  345. empathy_os/telemetry/agent_coordination 2.py +0 -478
  346. empathy_os/telemetry/agent_coordination.py +0 -476
  347. empathy_os/telemetry/agent_tracking 2.py +0 -350
  348. empathy_os/telemetry/agent_tracking.py +0 -348
  349. empathy_os/telemetry/approval_gates 2.py +0 -563
  350. empathy_os/telemetry/approval_gates.py +0 -551
  351. empathy_os/telemetry/cli.py +0 -1231
  352. empathy_os/telemetry/commands/__init__.py +0 -14
  353. empathy_os/telemetry/commands/dashboard_commands.py +0 -696
  354. empathy_os/telemetry/event_streaming 2.py +0 -405
  355. empathy_os/telemetry/event_streaming.py +0 -405
  356. empathy_os/telemetry/feedback_loop 2.py +0 -557
  357. empathy_os/telemetry/feedback_loop.py +0 -554
  358. empathy_os/telemetry/usage_tracker.py +0 -591
  359. empathy_os/templates.py +0 -754
  360. empathy_os/test_generator/__init__.py +0 -38
  361. empathy_os/test_generator/__main__.py +0 -14
  362. empathy_os/test_generator/cli.py +0 -234
  363. empathy_os/test_generator/generator.py +0 -355
  364. empathy_os/test_generator/risk_analyzer.py +0 -216
  365. empathy_os/tier_recommender.py +0 -384
  366. empathy_os/tools.py +0 -183
  367. empathy_os/trust/__init__.py +0 -28
  368. empathy_os/trust/circuit_breaker.py +0 -579
  369. empathy_os/trust_building.py +0 -527
  370. empathy_os/validation/__init__.py +0 -19
  371. empathy_os/validation/xml_validator.py +0 -281
  372. empathy_os/vscode_bridge 2.py +0 -173
  373. empathy_os/vscode_bridge.py +0 -173
  374. empathy_os/workflow_commands.py +0 -780
  375. empathy_os/workflow_patterns/__init__.py +0 -33
  376. empathy_os/workflow_patterns/behavior.py +0 -249
  377. empathy_os/workflow_patterns/core.py +0 -76
  378. empathy_os/workflow_patterns/output.py +0 -99
  379. empathy_os/workflow_patterns/registry.py +0 -255
  380. empathy_os/workflow_patterns/structural.py +0 -288
  381. empathy_os/workflows/__init__.py +0 -539
  382. empathy_os/workflows/autonomous_test_gen.py +0 -1268
  383. empathy_os/workflows/base.py +0 -2667
  384. empathy_os/workflows/batch_processing.py +0 -342
  385. empathy_os/workflows/bug_predict.py +0 -1084
  386. empathy_os/workflows/builder.py +0 -273
  387. empathy_os/workflows/caching.py +0 -253
  388. empathy_os/workflows/code_review.py +0 -1048
  389. empathy_os/workflows/code_review_adapters.py +0 -312
  390. empathy_os/workflows/code_review_pipeline.py +0 -722
  391. empathy_os/workflows/config.py +0 -645
  392. empathy_os/workflows/dependency_check.py +0 -644
  393. empathy_os/workflows/document_gen/__init__.py +0 -25
  394. empathy_os/workflows/document_gen/config.py +0 -30
  395. empathy_os/workflows/document_gen/report_formatter.py +0 -162
  396. empathy_os/workflows/document_gen/workflow.py +0 -1426
  397. empathy_os/workflows/document_gen.py +0 -29
  398. empathy_os/workflows/document_manager.py +0 -216
  399. empathy_os/workflows/document_manager_README.md +0 -134
  400. empathy_os/workflows/documentation_orchestrator.py +0 -1205
  401. empathy_os/workflows/history.py +0 -510
  402. empathy_os/workflows/keyboard_shortcuts/__init__.py +0 -39
  403. empathy_os/workflows/keyboard_shortcuts/generators.py +0 -391
  404. empathy_os/workflows/keyboard_shortcuts/parsers.py +0 -416
  405. empathy_os/workflows/keyboard_shortcuts/prompts.py +0 -295
  406. empathy_os/workflows/keyboard_shortcuts/schema.py +0 -193
  407. empathy_os/workflows/keyboard_shortcuts/workflow.py +0 -509
  408. empathy_os/workflows/llm_base.py +0 -363
  409. empathy_os/workflows/manage_docs.py +0 -87
  410. empathy_os/workflows/manage_docs_README.md +0 -134
  411. empathy_os/workflows/manage_documentation.py +0 -821
  412. empathy_os/workflows/new_sample_workflow1.py +0 -149
  413. empathy_os/workflows/new_sample_workflow1_README.md +0 -150
  414. empathy_os/workflows/orchestrated_health_check.py +0 -849
  415. empathy_os/workflows/orchestrated_release_prep.py +0 -600
  416. empathy_os/workflows/output.py +0 -410
  417. empathy_os/workflows/perf_audit.py +0 -863
  418. empathy_os/workflows/pr_review.py +0 -762
  419. empathy_os/workflows/progress.py +0 -779
  420. empathy_os/workflows/progress_server.py +0 -322
  421. empathy_os/workflows/progressive/README 2.md +0 -454
  422. empathy_os/workflows/progressive/README.md +0 -454
  423. empathy_os/workflows/progressive/__init__ 2.py +0 -92
  424. empathy_os/workflows/progressive/__init__.py +0 -82
  425. empathy_os/workflows/progressive/cli 2.py +0 -242
  426. empathy_os/workflows/progressive/cli.py +0 -219
  427. empathy_os/workflows/progressive/core 2.py +0 -488
  428. empathy_os/workflows/progressive/core.py +0 -488
  429. empathy_os/workflows/progressive/orchestrator 2.py +0 -701
  430. empathy_os/workflows/progressive/orchestrator.py +0 -723
  431. empathy_os/workflows/progressive/reports 2.py +0 -528
  432. empathy_os/workflows/progressive/reports.py +0 -520
  433. empathy_os/workflows/progressive/telemetry 2.py +0 -280
  434. empathy_os/workflows/progressive/telemetry.py +0 -274
  435. empathy_os/workflows/progressive/test_gen 2.py +0 -514
  436. empathy_os/workflows/progressive/test_gen.py +0 -495
  437. empathy_os/workflows/progressive/workflow 2.py +0 -628
  438. empathy_os/workflows/progressive/workflow.py +0 -589
  439. empathy_os/workflows/refactor_plan.py +0 -694
  440. empathy_os/workflows/release_prep.py +0 -895
  441. empathy_os/workflows/release_prep_crew.py +0 -969
  442. empathy_os/workflows/research_synthesis.py +0 -404
  443. empathy_os/workflows/routing.py +0 -168
  444. empathy_os/workflows/secure_release.py +0 -593
  445. empathy_os/workflows/security_adapters.py +0 -297
  446. empathy_os/workflows/security_audit.py +0 -1329
  447. empathy_os/workflows/security_audit_phase3.py +0 -355
  448. empathy_os/workflows/seo_optimization.py +0 -633
  449. empathy_os/workflows/step_config.py +0 -234
  450. empathy_os/workflows/telemetry_mixin.py +0 -269
  451. empathy_os/workflows/test5.py +0 -125
  452. empathy_os/workflows/test5_README.md +0 -158
  453. empathy_os/workflows/test_coverage_boost_crew.py +0 -849
  454. empathy_os/workflows/test_gen/__init__.py +0 -52
  455. empathy_os/workflows/test_gen/ast_analyzer.py +0 -249
  456. empathy_os/workflows/test_gen/config.py +0 -88
  457. empathy_os/workflows/test_gen/data_models.py +0 -38
  458. empathy_os/workflows/test_gen/report_formatter.py +0 -289
  459. empathy_os/workflows/test_gen/test_templates.py +0 -381
  460. empathy_os/workflows/test_gen/workflow.py +0 -655
  461. empathy_os/workflows/test_gen.py +0 -54
  462. empathy_os/workflows/test_gen_behavioral.py +0 -477
  463. empathy_os/workflows/test_gen_parallel.py +0 -341
  464. empathy_os/workflows/test_lifecycle.py +0 -526
  465. empathy_os/workflows/test_maintenance.py +0 -627
  466. empathy_os/workflows/test_maintenance_cli.py +0 -590
  467. empathy_os/workflows/test_maintenance_crew.py +0 -840
  468. empathy_os/workflows/test_runner.py +0 -622
  469. empathy_os/workflows/tier_tracking.py +0 -531
  470. empathy_os/workflows/xml_enhanced_crew.py +0 -285
  471. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +0 -57
  472. empathy_software_plugin/cli/__init__.py +0 -120
  473. empathy_software_plugin/cli/inspect.py +0 -362
  474. empathy_software_plugin/cli.py +0 -574
  475. empathy_software_plugin/plugin.py +0 -188
  476. workflow_scaffolding/__init__.py +0 -11
  477. workflow_scaffolding/__main__.py +0 -12
  478. workflow_scaffolding/cli.py +0 -206
  479. workflow_scaffolding/generator.py +0 -265
  480. {empathy_framework-5.2.1.dist-info → empathy_framework-5.4.0.dist-info}/WHEEL +0 -0
@@ -1,2150 +0,0 @@
1
- """Redis Short-Term Memory for Empathy Framework
2
-
3
- Per EMPATHY_PHILOSOPHY.md v1.1.0:
4
- - Implements fast, TTL-based working memory for agent coordination
5
- - Role-based access tiers for data integrity
6
- - Pattern staging before validation
7
- - Principled negotiation support
8
-
9
- Enhanced Features (v2.0):
10
- - Pub/Sub for real-time agent notifications
11
- - Batch operations for high-throughput workflows
12
- - SCAN-based pagination for large datasets
13
- - Redis Streams for audit trails
14
- - Connection retry with exponential backoff
15
- - SSL/TLS support for managed Redis services
16
- - Time-window queries with sorted sets
17
- - Task queues with Lists
18
- - Atomic transactions with MULTI/EXEC
19
- - Comprehensive metrics tracking
20
-
21
- Copyright 2025 Smart AI Memory, LLC
22
- Licensed under Fair Source 0.9
23
- """
24
-
25
- import json
26
- import threading
27
- import time
28
- from collections.abc import Callable
29
- from datetime import datetime
30
- from typing import Any
31
-
32
- import structlog
33
-
34
- from .security.pii_scrubber import PIIScrubber
35
- from .security.secrets_detector import SecretsDetector
36
- from .security.secrets_detector import Severity as SecretSeverity
37
-
38
- # Import types from dedicated module
39
- from .types import (
40
- AccessTier,
41
- AgentCredentials,
42
- ConflictContext,
43
- PaginatedResult,
44
- RedisConfig,
45
- RedisMetrics,
46
- SecurityError,
47
- StagedPattern,
48
- TimeWindowQuery,
49
- TTLStrategy,
50
- )
51
-
52
- logger = structlog.get_logger(__name__)
53
-
54
- try:
55
- import redis
56
- from redis.exceptions import ConnectionError as RedisConnectionError
57
- from redis.exceptions import TimeoutError as RedisTimeoutError
58
-
59
- REDIS_AVAILABLE = True
60
- except ImportError:
61
- REDIS_AVAILABLE = False
62
- RedisConnectionError = Exception # type: ignore
63
- RedisTimeoutError = Exception # type: ignore
64
-
65
-
66
- class RedisShortTermMemory:
67
- """Redis-backed short-term memory for agent coordination
68
-
69
- Features:
70
- - Fast read/write with automatic TTL expiration
71
- - Role-based access control
72
- - Pattern staging workflow
73
- - Conflict negotiation context
74
- - Agent working memory
75
-
76
- Enhanced Features (v2.0):
77
- - Pub/Sub for real-time agent notifications
78
- - Batch operations (stash_batch, retrieve_batch)
79
- - SCAN-based pagination for large datasets
80
- - Redis Streams for audit trails
81
- - Time-window queries with sorted sets
82
- - Task queues with Lists (LPUSH/RPOP)
83
- - Atomic transactions with MULTI/EXEC
84
- - Connection retry with exponential backoff
85
- - Metrics tracking for observability
86
-
87
- Example:
88
- >>> memory = RedisShortTermMemory()
89
- >>> creds = AgentCredentials("agent_1", AccessTier.CONTRIBUTOR)
90
- >>> memory.stash("analysis_results", {"issues": 3}, creds)
91
- >>> data = memory.retrieve("analysis_results", creds)
92
-
93
- # Pub/Sub example
94
- >>> memory.subscribe("agent_signals", lambda msg: print(msg))
95
- >>> memory.publish("agent_signals", {"event": "task_complete"}, creds)
96
-
97
- # Batch operations
98
- >>> items = [("key1", {"data": 1}), ("key2", {"data": 2})]
99
- >>> memory.stash_batch(items, creds)
100
-
101
- # Pagination
102
- >>> result = memory.list_staged_patterns_paginated(creds, cursor="0", count=10)
103
-
104
- """
105
-
106
- # Key prefixes for namespacing
107
- PREFIX_WORKING = "empathy:working:"
108
- PREFIX_STAGED = "empathy:staged:"
109
- PREFIX_CONFLICT = "empathy:conflict:"
110
- # PREFIX_COORDINATION removed in v5.0 - use empathy_os.telemetry.CoordinationSignals
111
- PREFIX_SESSION = "empathy:session:"
112
- PREFIX_PUBSUB = "empathy:pubsub:"
113
- PREFIX_STREAM = "empathy:stream:"
114
- PREFIX_TIMELINE = "empathy:timeline:"
115
- PREFIX_QUEUE = "empathy:queue:"
116
-
117
- def __init__(
118
- self,
119
- host: str = "localhost",
120
- port: int = 6379,
121
- db: int = 0,
122
- password: str | None = None,
123
- use_mock: bool = False,
124
- config: RedisConfig | None = None,
125
- ):
126
- """Initialize Redis connection
127
-
128
- Args:
129
- host: Redis host
130
- port: Redis port
131
- db: Redis database number
132
- password: Redis password (optional)
133
- use_mock: Use in-memory mock for testing
134
- config: Full RedisConfig for advanced settings (overrides other args)
135
-
136
- """
137
- # Use config if provided, otherwise build from individual args
138
- if config is not None:
139
- self._config = config
140
- else:
141
- self._config = RedisConfig(
142
- host=host,
143
- port=port,
144
- db=db,
145
- password=password,
146
- use_mock=use_mock,
147
- )
148
-
149
- self.use_mock = self._config.use_mock or not REDIS_AVAILABLE
150
-
151
- # Initialize metrics
152
- self._metrics = RedisMetrics()
153
-
154
- # Pub/Sub state
155
- self._pubsub: Any | None = None
156
- self._pubsub_thread: threading.Thread | None = None
157
- self._subscriptions: dict[str, list[Callable[[dict], None]]] = {}
158
- self._pubsub_running = False
159
-
160
- # Mock storage for testing
161
- self._mock_storage: dict[str, tuple[Any, float | None]] = {}
162
- self._mock_lists: dict[str, list[str]] = {}
163
- self._mock_sorted_sets: dict[str, list[tuple[float, str]]] = {}
164
- self._mock_streams: dict[str, list[tuple[str, dict]]] = {}
165
- self._mock_pubsub_handlers: dict[str, list[Callable[[dict], None]]] = {}
166
-
167
- # Local LRU cache for two-tier caching (memory + Redis)
168
- # Reduces network I/O from 37ms to <0.001ms for frequently accessed keys
169
- self._local_cache_enabled = self._config.local_cache_enabled
170
- self._local_cache_max_size = self._config.local_cache_size
171
- self._local_cache: dict[str, tuple[str, float, float]] = {} # key -> (value, timestamp, last_access)
172
- self._local_cache_hits = 0
173
- self._local_cache_misses = 0
174
-
175
- # Security: Initialize PII scrubber and secrets detector
176
- self._pii_scrubber: PIIScrubber | None = None
177
- self._secrets_detector: SecretsDetector | None = None
178
-
179
- if self._config.pii_scrub_enabled:
180
- self._pii_scrubber = PIIScrubber(enable_name_detection=False)
181
- logger.debug(
182
- "pii_scrubber_enabled", message="PII scrubbing active for short-term memory"
183
- )
184
-
185
- if self._config.secrets_detection_enabled:
186
- self._secrets_detector = SecretsDetector()
187
- logger.debug(
188
- "secrets_detector_enabled", message="Secrets detection active for short-term memory"
189
- )
190
-
191
- if self.use_mock:
192
- self._client = None
193
- else:
194
- self._client = self._create_client_with_retry()
195
-
196
- def _create_client_with_retry(self) -> Any:
197
- """Create Redis client with retry logic."""
198
- max_attempts = self._config.retry_max_attempts
199
- base_delay = self._config.retry_base_delay
200
- max_delay = self._config.retry_max_delay
201
-
202
- last_error: Exception | None = None
203
-
204
- for attempt in range(max_attempts):
205
- try:
206
- client = redis.Redis(**self._config.to_redis_kwargs())
207
- # Test connection
208
- client.ping()
209
- logger.info(
210
- "redis_connected",
211
- host=self._config.host,
212
- port=self._config.port,
213
- attempt=attempt + 1,
214
- )
215
- return client
216
- except (RedisConnectionError, RedisTimeoutError) as e:
217
- last_error = e
218
- self._metrics.retries_total += 1
219
-
220
- if attempt < max_attempts - 1:
221
- delay = min(base_delay * (2**attempt), max_delay)
222
- logger.warning(
223
- "redis_connection_retry",
224
- attempt=attempt + 1,
225
- max_attempts=max_attempts,
226
- delay=delay,
227
- error=str(e),
228
- )
229
- time.sleep(delay)
230
-
231
- # All retries failed
232
- logger.error(
233
- "redis_connection_failed",
234
- max_attempts=max_attempts,
235
- error=str(last_error),
236
- )
237
- raise last_error if last_error else ConnectionError("Failed to connect to Redis")
238
-
239
- def _execute_with_retry(self, operation: Callable[[], Any], op_name: str = "operation") -> Any:
240
- """Execute a Redis operation with retry logic."""
241
- start_time = time.perf_counter()
242
- max_attempts = self._config.retry_max_attempts
243
- base_delay = self._config.retry_base_delay
244
- max_delay = self._config.retry_max_delay
245
-
246
- last_error: Exception | None = None
247
-
248
- for attempt in range(max_attempts):
249
- try:
250
- result = operation()
251
- latency_ms = (time.perf_counter() - start_time) * 1000
252
- self._metrics.record_operation(op_name, latency_ms, success=True)
253
- return result
254
- except (RedisConnectionError, RedisTimeoutError) as e:
255
- last_error = e
256
- self._metrics.retries_total += 1
257
-
258
- if attempt < max_attempts - 1:
259
- delay = min(base_delay * (2**attempt), max_delay)
260
- logger.warning(
261
- "redis_operation_retry",
262
- operation=op_name,
263
- attempt=attempt + 1,
264
- delay=delay,
265
- )
266
- time.sleep(delay)
267
-
268
- latency_ms = (time.perf_counter() - start_time) * 1000
269
- self._metrics.record_operation(op_name, latency_ms, success=False)
270
- raise last_error if last_error else ConnectionError("Redis operation failed")
271
-
272
- def _get(self, key: str) -> str | None:
273
- """Get value from Redis or mock with two-tier caching (local + Redis)"""
274
- # Check local cache first (0.001ms vs 37ms for Redis/mock)
275
- # This works for BOTH mock and real Redis modes
276
- if self._local_cache_enabled and key in self._local_cache:
277
- value, timestamp, last_access = self._local_cache[key]
278
- now = time.time()
279
-
280
- # Update last access time for LRU
281
- self._local_cache[key] = (value, timestamp, now)
282
- self._local_cache_hits += 1
283
-
284
- return value
285
-
286
- # Cache miss - fetch from storage (mock or Redis)
287
- self._local_cache_misses += 1
288
-
289
- # Mock mode path
290
- if self.use_mock:
291
- if key in self._mock_storage:
292
- value, expires = self._mock_storage[key]
293
- if expires is None or datetime.now().timestamp() < expires:
294
- result = str(value) if value is not None else None
295
- # Add to local cache for next access
296
- if result and self._local_cache_enabled:
297
- self._add_to_local_cache(key, result)
298
- return result
299
- del self._mock_storage[key]
300
- return None
301
-
302
- # Real Redis path
303
- if self._client is None:
304
- return None
305
-
306
- result = self._client.get(key)
307
-
308
- # Add to local cache if successful
309
- if result and self._local_cache_enabled:
310
- self._add_to_local_cache(key, str(result))
311
-
312
- return str(result) if result else None
313
-
314
- def _set(self, key: str, value: str, ttl: int | None = None) -> bool:
315
- """Set value in Redis or mock with two-tier caching"""
316
- # Mock mode path
317
- if self.use_mock:
318
- expires = datetime.now().timestamp() + ttl if ttl else None
319
- self._mock_storage[key] = (value, expires)
320
-
321
- # Update local cache in mock mode too
322
- if self._local_cache_enabled:
323
- self._add_to_local_cache(key, value)
324
-
325
- return True
326
-
327
- # Real Redis path
328
- if self._client is None:
329
- return False
330
-
331
- # Set in Redis
332
- if ttl:
333
- self._client.setex(key, ttl, value)
334
- else:
335
- result = self._client.set(key, value)
336
- if not result:
337
- return False
338
-
339
- # Update local cache if enabled
340
- if self._local_cache_enabled:
341
- self._add_to_local_cache(key, value)
342
-
343
- return True
344
-
345
- def _delete(self, key: str) -> bool:
346
- """Delete key from Redis or mock and local cache"""
347
- # Mock mode path
348
- if self.use_mock:
349
- deleted = False
350
- if key in self._mock_storage:
351
- del self._mock_storage[key]
352
- deleted = True
353
-
354
- # Remove from local cache if present
355
- if self._local_cache_enabled and key in self._local_cache:
356
- del self._local_cache[key]
357
-
358
- return deleted
359
-
360
- # Real Redis path
361
- if self._client is None:
362
- return False
363
-
364
- # Delete from Redis
365
- result = bool(self._client.delete(key) > 0)
366
-
367
- # Also remove from local cache if present
368
- if self._local_cache_enabled and key in self._local_cache:
369
- del self._local_cache[key]
370
-
371
- return result
372
-
373
- def _keys(self, pattern: str) -> list[str]:
374
- """Get keys matching pattern"""
375
- if self.use_mock:
376
- import fnmatch
377
-
378
- # Use list comp for small result sets (typical <1000 keys)
379
- return [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
380
- if self._client is None:
381
- return []
382
- keys = self._client.keys(pattern)
383
- # Convert bytes to strings - needed for API return type
384
- return [k.decode() if isinstance(k, bytes) else str(k) for k in keys]
385
-
386
- # === Local LRU Cache Methods ===
387
-
388
- def _add_to_local_cache(self, key: str, value: str) -> None:
389
- """Add entry to local cache with LRU eviction.
390
-
391
- Args:
392
- key: Cache key
393
- value: Value to cache
394
- """
395
- now = time.time()
396
-
397
- # Evict oldest entry if cache is full
398
- if len(self._local_cache) >= self._local_cache_max_size:
399
- # Find key with oldest last_access time
400
- oldest_key = min(self._local_cache, key=lambda k: self._local_cache[k][2])
401
- del self._local_cache[oldest_key]
402
-
403
- # Add new entry: (value, timestamp, last_access)
404
- self._local_cache[key] = (value, now, now)
405
-
406
- def clear_local_cache(self) -> int:
407
- """Clear all entries from local cache.
408
-
409
- Returns:
410
- Number of entries cleared
411
- """
412
- count = len(self._local_cache)
413
- self._local_cache.clear()
414
- self._local_cache_hits = 0
415
- self._local_cache_misses = 0
416
- logger.info("local_cache_cleared", entries_cleared=count)
417
- return count
418
-
419
- def get_local_cache_stats(self) -> dict:
420
- """Get local cache performance statistics.
421
-
422
- Returns:
423
- Dict with cache stats (hits, misses, hit_rate, size)
424
- """
425
- total = self._local_cache_hits + self._local_cache_misses
426
- hit_rate = (self._local_cache_hits / total * 100) if total > 0 else 0.0
427
-
428
- return {
429
- "enabled": self._local_cache_enabled,
430
- "size": len(self._local_cache),
431
- "max_size": self._local_cache_max_size,
432
- "hits": self._local_cache_hits,
433
- "misses": self._local_cache_misses,
434
- "hit_rate": hit_rate,
435
- "total_requests": total,
436
- }
437
-
438
- # === Security Methods ===
439
-
440
- def _sanitize_data(self, data: Any) -> tuple[Any, int]:
441
- """Sanitize data by scrubbing PII and checking for secrets.
442
-
443
- Args:
444
- data: Data to sanitize (dict, list, or str)
445
-
446
- Returns:
447
- Tuple of (sanitized_data, pii_count)
448
-
449
- Raises:
450
- SecurityError: If secrets are detected and blocking is enabled
451
-
452
- """
453
- pii_count = 0
454
-
455
- if data is None:
456
- return data, 0
457
-
458
- # Convert data to string for scanning
459
- if isinstance(data, dict):
460
- data_str = json.dumps(data)
461
- elif isinstance(data, list):
462
- data_str = json.dumps(data)
463
- elif isinstance(data, str):
464
- data_str = data
465
- else:
466
- # For other types, convert to string
467
- data_str = str(data)
468
-
469
- # Check for secrets first (before modifying data)
470
- if self._secrets_detector is not None:
471
- detections = self._secrets_detector.detect(data_str)
472
- # Block critical and high severity secrets
473
- critical_secrets = [
474
- d
475
- for d in detections
476
- if d.severity in (SecretSeverity.CRITICAL, SecretSeverity.HIGH)
477
- ]
478
- if critical_secrets:
479
- self._metrics.secrets_blocked_total += len(critical_secrets)
480
- secret_types = [d.secret_type.value for d in critical_secrets]
481
- logger.warning(
482
- "secrets_detected_blocked",
483
- secret_types=secret_types,
484
- count=len(critical_secrets),
485
- )
486
- raise SecurityError(
487
- f"Cannot store data containing secrets: {secret_types}. "
488
- "Remove sensitive credentials before storing."
489
- )
490
-
491
- # Scrub PII
492
- if self._pii_scrubber is not None:
493
- sanitized_str, pii_detections = self._pii_scrubber.scrub(data_str)
494
- pii_count = len(pii_detections)
495
-
496
- if pii_count > 0:
497
- self._metrics.pii_scrubbed_total += pii_count
498
- self._metrics.pii_scrub_operations += 1
499
- logger.debug(
500
- "pii_scrubbed",
501
- pii_count=pii_count,
502
- pii_types=[d.pii_type for d in pii_detections],
503
- )
504
-
505
- # Convert back to original type
506
- if isinstance(data, dict):
507
- try:
508
- return json.loads(sanitized_str), pii_count
509
- except json.JSONDecodeError:
510
- # If PII scrubbing broke JSON structure, return original
511
- # This can happen if regex matches part of JSON syntax
512
- logger.warning("pii_scrubbing_broke_json_returning_original")
513
- return data, 0
514
- elif isinstance(data, list):
515
- try:
516
- return json.loads(sanitized_str), pii_count
517
- except json.JSONDecodeError:
518
- logger.warning("pii_scrubbing_broke_json_returning_original")
519
- return data, 0
520
- else:
521
- return sanitized_str, pii_count
522
-
523
- return data, pii_count
524
-
525
- # === Working Memory (Stash/Retrieve) ===
526
-
527
- def stash(
528
- self,
529
- key: str,
530
- data: Any,
531
- credentials: AgentCredentials,
532
- ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
533
- skip_sanitization: bool = False,
534
- ) -> bool:
535
- """Stash data in short-term memory
536
-
537
- Args:
538
- key: Unique key for the data
539
- data: Data to store (will be JSON serialized)
540
- credentials: Agent credentials
541
- ttl: Time-to-live strategy
542
- skip_sanitization: Skip PII scrubbing and secrets detection (use with caution)
543
-
544
- Returns:
545
- True if successful
546
-
547
- Raises:
548
- ValueError: If key is empty or invalid
549
- PermissionError: If credentials lack write access
550
- SecurityError: If secrets are detected in data (when secrets_detection_enabled)
551
-
552
- Note:
553
- PII (emails, SSNs, phone numbers, etc.) is automatically scrubbed
554
- before storage unless skip_sanitization=True or pii_scrub_enabled=False.
555
- Secrets (API keys, passwords, etc.) will block storage by default.
556
-
557
- Example:
558
- >>> memory.stash("analysis_v1", {"findings": [...]}, creds)
559
-
560
- """
561
- # Pattern 1: String ID validation
562
- if not key or not key.strip():
563
- raise ValueError("key cannot be empty")
564
-
565
- if not credentials.can_stage():
566
- raise PermissionError(
567
- f"Agent {credentials.agent_id} (Tier {credentials.tier.name}) "
568
- "cannot write to memory. Requires CONTRIBUTOR or higher.",
569
- )
570
-
571
- # Sanitize data (PII scrubbing + secrets detection)
572
- if not skip_sanitization:
573
- data, pii_count = self._sanitize_data(data)
574
- if pii_count > 0:
575
- logger.info(
576
- "stash_pii_scrubbed",
577
- key=key,
578
- agent_id=credentials.agent_id,
579
- pii_count=pii_count,
580
- )
581
-
582
- full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
583
- payload = {
584
- "data": data,
585
- "agent_id": credentials.agent_id,
586
- "stashed_at": datetime.now().isoformat(),
587
- }
588
- return self._set(full_key, json.dumps(payload), ttl.value)
589
-
590
- def retrieve(
591
- self,
592
- key: str,
593
- credentials: AgentCredentials,
594
- agent_id: str | None = None,
595
- ) -> Any | None:
596
- """Retrieve data from short-term memory
597
-
598
- Args:
599
- key: Key to retrieve
600
- credentials: Agent credentials
601
- agent_id: Owner agent ID (defaults to credentials agent)
602
-
603
- Returns:
604
- Retrieved data or None if not found
605
-
606
- Raises:
607
- ValueError: If key is empty or invalid
608
-
609
- Example:
610
- >>> data = memory.retrieve("analysis_v1", creds)
611
-
612
- """
613
- # Pattern 1: String ID validation
614
- if not key or not key.strip():
615
- raise ValueError("key cannot be empty")
616
-
617
- owner = agent_id or credentials.agent_id
618
- full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
619
- raw = self._get(full_key)
620
-
621
- if raw is None:
622
- return None
623
-
624
- payload = json.loads(raw)
625
- return payload.get("data")
626
-
627
- def clear_working_memory(self, credentials: AgentCredentials) -> int:
628
- """Clear all working memory for an agent
629
-
630
- Args:
631
- credentials: Agent credentials (must own the memory or be Steward)
632
-
633
- Returns:
634
- Number of keys deleted
635
-
636
- """
637
- pattern = f"{self.PREFIX_WORKING}{credentials.agent_id}:*"
638
- keys = self._keys(pattern)
639
- count = 0
640
- for key in keys:
641
- if self._delete(key):
642
- count += 1
643
- return count
644
-
645
- # === Pattern Staging ===
646
-
647
- def stage_pattern(
648
- self,
649
- pattern: StagedPattern,
650
- credentials: AgentCredentials,
651
- ) -> bool:
652
- """Stage a pattern for validation
653
-
654
- Per EMPATHY_PHILOSOPHY.md: Patterns must be staged before
655
- being promoted to the active library.
656
-
657
- Args:
658
- pattern: Pattern to stage
659
- credentials: Must be CONTRIBUTOR or higher
660
-
661
- Returns:
662
- True if staged successfully
663
-
664
- Raises:
665
- TypeError: If pattern is not StagedPattern
666
- PermissionError: If credentials lack staging access
667
-
668
- """
669
- # Pattern 5: Type validation
670
- if not isinstance(pattern, StagedPattern):
671
- raise TypeError(f"pattern must be StagedPattern, got {type(pattern).__name__}")
672
-
673
- if not credentials.can_stage():
674
- raise PermissionError(
675
- f"Agent {credentials.agent_id} cannot stage patterns. "
676
- "Requires CONTRIBUTOR tier or higher.",
677
- )
678
-
679
- key = f"{self.PREFIX_STAGED}{pattern.pattern_id}"
680
- return self._set(
681
- key,
682
- json.dumps(pattern.to_dict()),
683
- TTLStrategy.STAGED_PATTERNS.value,
684
- )
685
-
686
- def get_staged_pattern(
687
- self,
688
- pattern_id: str,
689
- credentials: AgentCredentials,
690
- ) -> StagedPattern | None:
691
- """Retrieve a staged pattern
692
-
693
- Args:
694
- pattern_id: Pattern ID
695
- credentials: Any tier can read
696
-
697
- Returns:
698
- StagedPattern or None
699
-
700
- Raises:
701
- ValueError: If pattern_id is empty
702
-
703
- """
704
- # Pattern 1: String ID validation
705
- if not pattern_id or not pattern_id.strip():
706
- raise ValueError("pattern_id cannot be empty")
707
-
708
- key = f"{self.PREFIX_STAGED}{pattern_id}"
709
- raw = self._get(key)
710
-
711
- if raw is None:
712
- return None
713
-
714
- return StagedPattern.from_dict(json.loads(raw))
715
-
716
- def list_staged_patterns(
717
- self,
718
- credentials: AgentCredentials,
719
- ) -> list[StagedPattern]:
720
- """List all staged patterns awaiting validation
721
-
722
- Args:
723
- credentials: Any tier can read
724
-
725
- Returns:
726
- List of staged patterns
727
-
728
- """
729
- pattern = f"{self.PREFIX_STAGED}*"
730
- keys = self._keys(pattern)
731
- patterns = []
732
-
733
- for key in keys:
734
- raw = self._get(key)
735
- if raw:
736
- patterns.append(StagedPattern.from_dict(json.loads(raw)))
737
-
738
- return patterns
739
-
740
- def promote_pattern(
741
- self,
742
- pattern_id: str,
743
- credentials: AgentCredentials,
744
- ) -> StagedPattern | None:
745
- """Promote staged pattern (remove from staging for library add)
746
-
747
- Args:
748
- pattern_id: Pattern to promote
749
- credentials: Must be VALIDATOR or higher
750
-
751
- Returns:
752
- The promoted pattern (for adding to PatternLibrary)
753
-
754
- """
755
- if not credentials.can_validate():
756
- raise PermissionError(
757
- f"Agent {credentials.agent_id} cannot promote patterns. "
758
- "Requires VALIDATOR tier or higher.",
759
- )
760
-
761
- pattern = self.get_staged_pattern(pattern_id, credentials)
762
- if pattern:
763
- key = f"{self.PREFIX_STAGED}{pattern_id}"
764
- self._delete(key)
765
- return pattern
766
-
767
- def reject_pattern(
768
- self,
769
- pattern_id: str,
770
- credentials: AgentCredentials,
771
- reason: str = "",
772
- ) -> bool:
773
- """Reject a staged pattern
774
-
775
- Args:
776
- pattern_id: Pattern to reject
777
- credentials: Must be VALIDATOR or higher
778
- reason: Rejection reason (for audit)
779
-
780
- Returns:
781
- True if rejected
782
-
783
- """
784
- if not credentials.can_validate():
785
- raise PermissionError(
786
- f"Agent {credentials.agent_id} cannot reject patterns. "
787
- "Requires VALIDATOR tier or higher.",
788
- )
789
-
790
- key = f"{self.PREFIX_STAGED}{pattern_id}"
791
- return self._delete(key)
792
-
793
- # === Conflict Negotiation ===
794
-
795
- def create_conflict_context(
796
- self,
797
- conflict_id: str,
798
- positions: dict[str, Any],
799
- interests: dict[str, list[str]],
800
- credentials: AgentCredentials,
801
- batna: str | None = None,
802
- ) -> ConflictContext:
803
- """Create context for principled negotiation
804
-
805
- Per Getting to Yes framework:
806
- - Separate positions from interests
807
- - Define BATNA before negotiating
808
-
809
- Args:
810
- conflict_id: Unique conflict identifier
811
- positions: agent_id -> their stated position
812
- interests: agent_id -> underlying interests
813
- credentials: Must be CONTRIBUTOR or higher
814
- batna: Best Alternative to Negotiated Agreement
815
-
816
- Returns:
817
- ConflictContext for resolution
818
-
819
- Raises:
820
- ValueError: If conflict_id is empty
821
- TypeError: If positions or interests are not dicts
822
- PermissionError: If credentials lack permission
823
-
824
- """
825
- # Pattern 1: String ID validation
826
- if not conflict_id or not conflict_id.strip():
827
- raise ValueError("conflict_id cannot be empty")
828
-
829
- # Pattern 5: Type validation
830
- if not isinstance(positions, dict):
831
- raise TypeError(f"positions must be dict, got {type(positions).__name__}")
832
- if not isinstance(interests, dict):
833
- raise TypeError(f"interests must be dict, got {type(interests).__name__}")
834
-
835
- if not credentials.can_stage():
836
- raise PermissionError(
837
- f"Agent {credentials.agent_id} cannot create conflict context. "
838
- "Requires CONTRIBUTOR tier or higher.",
839
- )
840
-
841
- context = ConflictContext(
842
- conflict_id=conflict_id,
843
- positions=positions,
844
- interests=interests,
845
- batna=batna,
846
- )
847
-
848
- key = f"{self.PREFIX_CONFLICT}{conflict_id}"
849
- self._set(
850
- key,
851
- json.dumps(context.to_dict()),
852
- TTLStrategy.CONFLICT_CONTEXT.value,
853
- )
854
-
855
- return context
856
-
857
- def get_conflict_context(
858
- self,
859
- conflict_id: str,
860
- credentials: AgentCredentials,
861
- ) -> ConflictContext | None:
862
- """Retrieve conflict context
863
-
864
- Args:
865
- conflict_id: Conflict identifier
866
- credentials: Any tier can read
867
-
868
- Returns:
869
- ConflictContext or None
870
-
871
- Raises:
872
- ValueError: If conflict_id is empty
873
-
874
- """
875
- # Pattern 1: String ID validation
876
- if not conflict_id or not conflict_id.strip():
877
- raise ValueError("conflict_id cannot be empty")
878
-
879
- key = f"{self.PREFIX_CONFLICT}{conflict_id}"
880
- raw = self._get(key)
881
-
882
- if raw is None:
883
- return None
884
-
885
- return ConflictContext.from_dict(json.loads(raw))
886
-
887
- def resolve_conflict(
888
- self,
889
- conflict_id: str,
890
- resolution: str,
891
- credentials: AgentCredentials,
892
- ) -> bool:
893
- """Mark conflict as resolved
894
-
895
- Args:
896
- conflict_id: Conflict to resolve
897
- resolution: How it was resolved
898
- credentials: Must be VALIDATOR or higher
899
-
900
- Returns:
901
- True if resolved
902
-
903
- """
904
- if not credentials.can_validate():
905
- raise PermissionError(
906
- f"Agent {credentials.agent_id} cannot resolve conflicts. "
907
- "Requires VALIDATOR tier or higher.",
908
- )
909
-
910
- context = self.get_conflict_context(conflict_id, credentials)
911
- if context is None:
912
- return False
913
-
914
- context.resolved = True
915
- context.resolution = resolution
916
-
917
- key = f"{self.PREFIX_CONFLICT}{conflict_id}"
918
- # Keep resolved conflicts longer for audit
919
- self._set(key, json.dumps(context.to_dict()), TTLStrategy.CONFLICT_CONTEXT.value)
920
- return True
921
-
922
- # === Coordination Signals ===
923
- # REMOVED in v5.0 - Use empathy_os.telemetry.CoordinationSignals instead
924
- # - send_signal() → CoordinationSignals.signal()
925
- # - receive_signals() → CoordinationSignals.get_pending_signals()
926
-
927
- # === Session Management ===
928
-
929
- def create_session(
930
- self,
931
- session_id: str,
932
- credentials: AgentCredentials,
933
- metadata: dict | None = None,
934
- ) -> bool:
935
- """Create a collaboration session
936
-
937
- Args:
938
- session_id: Unique session identifier
939
- credentials: Session creator
940
- metadata: Optional session metadata
941
-
942
- Returns:
943
- True if created
944
-
945
- Raises:
946
- ValueError: If session_id is empty
947
- TypeError: If metadata is not dict
948
-
949
- """
950
- # Pattern 1: String ID validation
951
- if not session_id or not session_id.strip():
952
- raise ValueError("session_id cannot be empty")
953
-
954
- # Pattern 5: Type validation
955
- if metadata is not None and not isinstance(metadata, dict):
956
- raise TypeError(f"metadata must be dict, got {type(metadata).__name__}")
957
-
958
- key = f"{self.PREFIX_SESSION}{session_id}"
959
- payload = {
960
- "session_id": session_id,
961
- "created_by": credentials.agent_id,
962
- "created_at": datetime.now().isoformat(),
963
- "participants": [credentials.agent_id],
964
- "metadata": metadata or {},
965
- }
966
- return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
967
-
968
- def join_session(
969
- self,
970
- session_id: str,
971
- credentials: AgentCredentials,
972
- ) -> bool:
973
- """Join an existing session
974
-
975
- Args:
976
- session_id: Session to join
977
- credentials: Joining agent
978
-
979
- Returns:
980
- True if joined
981
-
982
- Raises:
983
- ValueError: If session_id is empty
984
-
985
- """
986
- # Pattern 1: String ID validation
987
- if not session_id or not session_id.strip():
988
- raise ValueError("session_id cannot be empty")
989
-
990
- key = f"{self.PREFIX_SESSION}{session_id}"
991
- raw = self._get(key)
992
-
993
- if raw is None:
994
- return False
995
-
996
- payload = json.loads(raw)
997
- if credentials.agent_id not in payload["participants"]:
998
- payload["participants"].append(credentials.agent_id)
999
-
1000
- return self._set(key, json.dumps(payload), TTLStrategy.SESSION.value)
1001
-
1002
- def get_session(
1003
- self,
1004
- session_id: str,
1005
- credentials: AgentCredentials,
1006
- ) -> dict | None:
1007
- """Get session information
1008
-
1009
- Args:
1010
- session_id: Session identifier
1011
- credentials: Any participant can read
1012
-
1013
- Returns:
1014
- Session data or None
1015
-
1016
- """
1017
- key = f"{self.PREFIX_SESSION}{session_id}"
1018
- raw = self._get(key)
1019
-
1020
- if raw is None:
1021
- return None
1022
-
1023
- result: dict = json.loads(raw)
1024
- return result
1025
-
1026
- # === Health Check ===
1027
-
1028
- def ping(self) -> bool:
1029
- """Check Redis connection health
1030
-
1031
- Returns:
1032
- True if connected and responsive
1033
-
1034
- """
1035
- if self.use_mock:
1036
- return True
1037
- if self._client is None:
1038
- return False
1039
- try:
1040
- return bool(self._client.ping())
1041
- except Exception:
1042
- return False
1043
-
1044
- def get_stats(self) -> dict:
1045
- """Get memory statistics
1046
-
1047
- Returns:
1048
- Dict with memory stats
1049
-
1050
- """
1051
- if self.use_mock:
1052
- # Use generator expressions for memory-efficient counting
1053
- return {
1054
- "mode": "mock",
1055
- "total_keys": len(self._mock_storage),
1056
- "working_keys": sum(
1057
- 1 for k in self._mock_storage if k.startswith(self.PREFIX_WORKING)
1058
- ),
1059
- "staged_keys": sum(
1060
- 1 for k in self._mock_storage if k.startswith(self.PREFIX_STAGED)
1061
- ),
1062
- "conflict_keys": sum(
1063
- 1 for k in self._mock_storage if k.startswith(self.PREFIX_CONFLICT)
1064
- ),
1065
- }
1066
-
1067
- if self._client is None:
1068
- return {"mode": "disconnected", "error": "No Redis client"}
1069
- info = self._client.info("memory")
1070
- return {
1071
- "mode": "redis",
1072
- "used_memory": info.get("used_memory_human"),
1073
- "peak_memory": info.get("used_memory_peak_human"),
1074
- "total_keys": self._client.dbsize(),
1075
- "working_keys": len(self._keys(f"{self.PREFIX_WORKING}*")),
1076
- "staged_keys": len(self._keys(f"{self.PREFIX_STAGED}*")),
1077
- "conflict_keys": len(self._keys(f"{self.PREFIX_CONFLICT}*")),
1078
- }
1079
-
1080
- def get_metrics(self) -> dict:
1081
- """Get operation metrics for observability.
1082
-
1083
- Returns:
1084
- Dict with operation counts, latencies, and success rates
1085
-
1086
- """
1087
- return self._metrics.to_dict()
1088
-
1089
- def reset_metrics(self) -> None:
1090
- """Reset all metrics to zero."""
1091
- self._metrics = RedisMetrics()
1092
-
1093
- # =========================================================================
1094
- # BATCH OPERATIONS
1095
- # =========================================================================
1096
-
1097
- def stash_batch(
1098
- self,
1099
- items: list[tuple[str, Any]],
1100
- credentials: AgentCredentials,
1101
- ttl: TTLStrategy = TTLStrategy.WORKING_RESULTS,
1102
- ) -> int:
1103
- """Stash multiple items in a single operation.
1104
-
1105
- Uses Redis pipeline for efficiency (reduces network round-trips).
1106
-
1107
- Args:
1108
- items: List of (key, data) tuples
1109
- credentials: Agent credentials
1110
- ttl: Time-to-live strategy (applied to all items)
1111
-
1112
- Returns:
1113
- Number of items successfully stashed
1114
-
1115
- Raises:
1116
- TypeError: If items is not a list
1117
- PermissionError: If credentials lack write access
1118
-
1119
- Example:
1120
- >>> items = [("key1", {"a": 1}), ("key2", {"b": 2})]
1121
- >>> count = memory.stash_batch(items, creds)
1122
-
1123
- """
1124
- # Pattern 5: Type validation
1125
- if not isinstance(items, list):
1126
- raise TypeError(f"items must be list, got {type(items).__name__}")
1127
-
1128
- if not credentials.can_stage():
1129
- raise PermissionError(
1130
- f"Agent {credentials.agent_id} cannot write to memory. "
1131
- "Requires CONTRIBUTOR tier or higher.",
1132
- )
1133
-
1134
- if not items:
1135
- return 0
1136
-
1137
- start_time = time.perf_counter()
1138
-
1139
- if self.use_mock:
1140
- count = 0
1141
- for key, data in items:
1142
- full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
1143
- payload = {
1144
- "data": data,
1145
- "agent_id": credentials.agent_id,
1146
- "stashed_at": datetime.now().isoformat(),
1147
- }
1148
- expires = datetime.now().timestamp() + ttl.value
1149
- self._mock_storage[full_key] = (json.dumps(payload), expires)
1150
- count += 1
1151
- latency_ms = (time.perf_counter() - start_time) * 1000
1152
- self._metrics.record_operation("stash_batch", latency_ms)
1153
- return count
1154
-
1155
- if self._client is None:
1156
- return 0
1157
-
1158
- pipe = self._client.pipeline()
1159
- for key, data in items:
1160
- full_key = f"{self.PREFIX_WORKING}{credentials.agent_id}:{key}"
1161
- payload = {
1162
- "data": data,
1163
- "agent_id": credentials.agent_id,
1164
- "stashed_at": datetime.now().isoformat(),
1165
- }
1166
- pipe.setex(full_key, ttl.value, json.dumps(payload))
1167
-
1168
- results = pipe.execute()
1169
- count = sum(1 for r in results if r)
1170
- latency_ms = (time.perf_counter() - start_time) * 1000
1171
- self._metrics.record_operation("stash_batch", latency_ms)
1172
-
1173
- logger.info("batch_stash_complete", count=count, total=len(items))
1174
- return count
1175
-
1176
- def retrieve_batch(
1177
- self,
1178
- keys: list[str],
1179
- credentials: AgentCredentials,
1180
- agent_id: str | None = None,
1181
- ) -> dict[str, Any]:
1182
- """Retrieve multiple items in a single operation.
1183
-
1184
- Args:
1185
- keys: List of keys to retrieve
1186
- credentials: Agent credentials
1187
- agent_id: Owner agent ID (defaults to credentials agent)
1188
-
1189
- Returns:
1190
- Dict mapping key to data (missing keys omitted)
1191
-
1192
- Example:
1193
- >>> data = memory.retrieve_batch(["key1", "key2"], creds)
1194
- >>> print(data["key1"])
1195
-
1196
- """
1197
- if not keys:
1198
- return {}
1199
-
1200
- start_time = time.perf_counter()
1201
- owner = agent_id or credentials.agent_id
1202
- results: dict[str, Any] = {}
1203
-
1204
- if self.use_mock:
1205
- for key in keys:
1206
- full_key = f"{self.PREFIX_WORKING}{owner}:{key}"
1207
- if full_key in self._mock_storage:
1208
- value, expires = self._mock_storage[full_key]
1209
- if expires is None or datetime.now().timestamp() < expires:
1210
- payload = json.loads(str(value))
1211
- results[key] = payload.get("data")
1212
- latency_ms = (time.perf_counter() - start_time) * 1000
1213
- self._metrics.record_operation("retrieve_batch", latency_ms)
1214
- return results
1215
-
1216
- if self._client is None:
1217
- return {}
1218
-
1219
- full_keys = [f"{self.PREFIX_WORKING}{owner}:{key}" for key in keys]
1220
- values = self._client.mget(full_keys)
1221
-
1222
- for key, value in zip(keys, values, strict=False):
1223
- if value:
1224
- payload = json.loads(str(value))
1225
- results[key] = payload.get("data")
1226
-
1227
- latency_ms = (time.perf_counter() - start_time) * 1000
1228
- self._metrics.record_operation("retrieve_batch", latency_ms)
1229
- return results
1230
-
1231
- # =========================================================================
1232
- # SCAN-BASED PAGINATION
1233
- # =========================================================================
1234
-
1235
- def list_staged_patterns_paginated(
1236
- self,
1237
- credentials: AgentCredentials,
1238
- cursor: str = "0",
1239
- count: int = 100,
1240
- ) -> PaginatedResult:
1241
- """List staged patterns with pagination using SCAN.
1242
-
1243
- More efficient than list_staged_patterns() for large datasets.
1244
-
1245
- Args:
1246
- credentials: Agent credentials
1247
- cursor: Pagination cursor (start with "0")
1248
- count: Maximum items per page
1249
-
1250
- Returns:
1251
- PaginatedResult with items, cursor, and has_more flag
1252
-
1253
- Example:
1254
- >>> result = memory.list_staged_patterns_paginated(creds, "0", 10)
1255
- >>> for pattern in result.items:
1256
- ... print(pattern.name)
1257
- >>> if result.has_more:
1258
- ... next_result = memory.list_staged_patterns_paginated(creds, result.cursor, 10)
1259
-
1260
- """
1261
- start_time = time.perf_counter()
1262
- pattern = f"{self.PREFIX_STAGED}*"
1263
-
1264
- if self.use_mock:
1265
- import fnmatch
1266
-
1267
- all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
1268
- start_idx = int(cursor)
1269
- end_idx = start_idx + count
1270
- page_keys = all_keys[start_idx:end_idx]
1271
-
1272
- patterns = []
1273
- for key in page_keys:
1274
- raw_value, expires = self._mock_storage[key]
1275
- if expires is None or datetime.now().timestamp() < expires:
1276
- patterns.append(StagedPattern.from_dict(json.loads(str(raw_value))))
1277
-
1278
- new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
1279
- has_more = end_idx < len(all_keys)
1280
-
1281
- latency_ms = (time.perf_counter() - start_time) * 1000
1282
- self._metrics.record_operation("list_paginated", latency_ms)
1283
-
1284
- return PaginatedResult(
1285
- items=patterns,
1286
- cursor=new_cursor,
1287
- has_more=has_more,
1288
- total_scanned=len(page_keys),
1289
- )
1290
-
1291
- if self._client is None:
1292
- return PaginatedResult(items=[], cursor="0", has_more=False)
1293
-
1294
- # Use SCAN for efficient iteration
1295
- new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
1296
-
1297
- patterns = []
1298
- for key in keys:
1299
- raw = self._client.get(key)
1300
- if raw:
1301
- patterns.append(StagedPattern.from_dict(json.loads(raw)))
1302
-
1303
- has_more = new_cursor != 0
1304
-
1305
- latency_ms = (time.perf_counter() - start_time) * 1000
1306
- self._metrics.record_operation("list_paginated", latency_ms)
1307
-
1308
- return PaginatedResult(
1309
- items=patterns,
1310
- cursor=str(new_cursor),
1311
- has_more=has_more,
1312
- total_scanned=len(keys),
1313
- )
1314
-
1315
- def scan_keys(
1316
- self,
1317
- pattern: str,
1318
- cursor: str = "0",
1319
- count: int = 100,
1320
- ) -> PaginatedResult:
1321
- """Scan keys matching a pattern with pagination.
1322
-
1323
- Args:
1324
- pattern: Key pattern (e.g., "empathy:working:*")
1325
- cursor: Pagination cursor
1326
- count: Items per page
1327
-
1328
- Returns:
1329
- PaginatedResult with key strings
1330
-
1331
- """
1332
- if self.use_mock:
1333
- import fnmatch
1334
-
1335
- all_keys = [k for k in self._mock_storage.keys() if fnmatch.fnmatch(k, pattern)]
1336
- start_idx = int(cursor)
1337
- end_idx = start_idx + count
1338
- page_keys = all_keys[start_idx:end_idx]
1339
- new_cursor = str(end_idx) if end_idx < len(all_keys) else "0"
1340
- has_more = end_idx < len(all_keys)
1341
- return PaginatedResult(items=page_keys, cursor=new_cursor, has_more=has_more)
1342
-
1343
- if self._client is None:
1344
- return PaginatedResult(items=[], cursor="0", has_more=False)
1345
-
1346
- new_cursor, keys = self._client.scan(cursor=int(cursor), match=pattern, count=count)
1347
- return PaginatedResult(
1348
- items=[str(k) for k in keys],
1349
- cursor=str(new_cursor),
1350
- has_more=new_cursor != 0,
1351
- )
1352
-
1353
- # =========================================================================
1354
- # PUB/SUB FOR REAL-TIME NOTIFICATIONS
1355
- # =========================================================================
1356
-
1357
- def publish(
1358
- self,
1359
- channel: str,
1360
- message: dict,
1361
- credentials: AgentCredentials,
1362
- ) -> int:
1363
- """Publish a message to a channel for real-time notifications.
1364
-
1365
- Args:
1366
- channel: Channel name (will be prefixed)
1367
- message: Message payload (dict)
1368
- credentials: Agent credentials (must be CONTRIBUTOR+)
1369
-
1370
- Returns:
1371
- Number of subscribers that received the message
1372
-
1373
- Example:
1374
- >>> memory.publish("agent_signals", {"event": "task_complete", "task_id": "123"}, creds)
1375
-
1376
- """
1377
- if not credentials.can_stage():
1378
- raise PermissionError(
1379
- f"Agent {credentials.agent_id} cannot publish. Requires CONTRIBUTOR tier or higher.",
1380
- )
1381
-
1382
- start_time = time.perf_counter()
1383
- full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1384
-
1385
- payload = {
1386
- "channel": channel,
1387
- "from_agent": credentials.agent_id,
1388
- "timestamp": datetime.now().isoformat(),
1389
- "data": message,
1390
- }
1391
-
1392
- if self.use_mock:
1393
- handlers = self._mock_pubsub_handlers.get(full_channel, [])
1394
- for handler in handlers:
1395
- try:
1396
- handler(payload)
1397
- except Exception as e:
1398
- logger.warning("pubsub_handler_error", channel=channel, error=str(e))
1399
- latency_ms = (time.perf_counter() - start_time) * 1000
1400
- self._metrics.record_operation("publish", latency_ms)
1401
- return len(handlers)
1402
-
1403
- if self._client is None:
1404
- return 0
1405
-
1406
- count = self._client.publish(full_channel, json.dumps(payload))
1407
- latency_ms = (time.perf_counter() - start_time) * 1000
1408
- self._metrics.record_operation("publish", latency_ms)
1409
-
1410
- logger.debug("pubsub_published", channel=channel, subscribers=count)
1411
- return int(count)
1412
-
1413
- def subscribe(
1414
- self,
1415
- channel: str,
1416
- handler: Callable[[dict], None],
1417
- credentials: AgentCredentials | None = None,
1418
- ) -> bool:
1419
- """Subscribe to a channel for real-time notifications.
1420
-
1421
- Args:
1422
- channel: Channel name to subscribe to
1423
- handler: Callback function receiving message dict
1424
- credentials: Optional credentials (any tier can subscribe)
1425
-
1426
- Returns:
1427
- True if subscribed successfully
1428
-
1429
- Example:
1430
- >>> def on_message(msg):
1431
- ... print(f"Received: {msg['data']}")
1432
- >>> memory.subscribe("agent_signals", on_message)
1433
-
1434
- """
1435
- full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1436
-
1437
- if self.use_mock:
1438
- if full_channel not in self._mock_pubsub_handlers:
1439
- self._mock_pubsub_handlers[full_channel] = []
1440
- self._mock_pubsub_handlers[full_channel].append(handler)
1441
- logger.info("pubsub_subscribed_mock", channel=channel)
1442
- return True
1443
-
1444
- if self._client is None:
1445
- return False
1446
-
1447
- # Store handler
1448
- if full_channel not in self._subscriptions:
1449
- self._subscriptions[full_channel] = []
1450
- self._subscriptions[full_channel].append(handler)
1451
-
1452
- # Create pubsub if needed
1453
- if self._pubsub is None:
1454
- self._pubsub = self._client.pubsub()
1455
-
1456
- # Subscribe
1457
- self._pubsub.subscribe(**{full_channel: self._pubsub_message_handler})
1458
-
1459
- # Start listener thread if not running
1460
- if not self._pubsub_running:
1461
- self._pubsub_running = True
1462
- self._pubsub_thread = threading.Thread(
1463
- target=self._pubsub_listener,
1464
- daemon=True,
1465
- name="redis-pubsub-listener",
1466
- )
1467
- self._pubsub_thread.start()
1468
-
1469
- logger.info("pubsub_subscribed", channel=channel)
1470
- return True
1471
-
1472
- def _pubsub_message_handler(self, message: dict) -> None:
1473
- """Internal handler for pubsub messages."""
1474
- if message["type"] != "message":
1475
- return
1476
-
1477
- channel = message["channel"]
1478
- if isinstance(channel, bytes):
1479
- channel = channel.decode()
1480
-
1481
- try:
1482
- payload = json.loads(message["data"])
1483
- except json.JSONDecodeError:
1484
- payload = {"raw": message["data"]}
1485
-
1486
- handlers = self._subscriptions.get(channel, [])
1487
- for handler in handlers:
1488
- try:
1489
- handler(payload)
1490
- except Exception as e:
1491
- logger.warning("pubsub_handler_error", channel=channel, error=str(e))
1492
-
1493
- def _pubsub_listener(self) -> None:
1494
- """Background thread for listening to pubsub messages."""
1495
- while self._pubsub_running and self._pubsub:
1496
- try:
1497
- self._pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0)
1498
- except Exception as e:
1499
- logger.warning("pubsub_listener_error", error=str(e))
1500
- time.sleep(1)
1501
-
1502
- def unsubscribe(self, channel: str) -> bool:
1503
- """Unsubscribe from a channel.
1504
-
1505
- Args:
1506
- channel: Channel name to unsubscribe from
1507
-
1508
- Returns:
1509
- True if unsubscribed successfully
1510
-
1511
- """
1512
- full_channel = f"{self.PREFIX_PUBSUB}{channel}"
1513
-
1514
- if self.use_mock:
1515
- self._mock_pubsub_handlers.pop(full_channel, None)
1516
- return True
1517
-
1518
- if self._pubsub is None:
1519
- return False
1520
-
1521
- self._pubsub.unsubscribe(full_channel)
1522
- self._subscriptions.pop(full_channel, None)
1523
- return True
1524
-
1525
- def close_pubsub(self) -> None:
1526
- """Close pubsub connection and stop listener thread."""
1527
- self._pubsub_running = False
1528
- if self._pubsub:
1529
- self._pubsub.close()
1530
- self._pubsub = None
1531
- self._subscriptions.clear()
1532
-
1533
- # =========================================================================
1534
- # REDIS STREAMS FOR AUDIT TRAILS
1535
- # =========================================================================
1536
-
1537
- def stream_append(
1538
- self,
1539
- stream_name: str,
1540
- data: dict,
1541
- credentials: AgentCredentials,
1542
- max_len: int = 10000,
1543
- ) -> str | None:
1544
- """Append an entry to a Redis Stream for audit trails.
1545
-
1546
- Streams provide:
1547
- - Ordered, persistent event log
1548
- - Consumer groups for distributed processing
1549
- - Time-based retention
1550
-
1551
- Args:
1552
- stream_name: Name of the stream
1553
- data: Event data to append
1554
- credentials: Agent credentials (must be CONTRIBUTOR+)
1555
- max_len: Maximum stream length (older entries trimmed)
1556
-
1557
- Returns:
1558
- Entry ID if successful, None otherwise
1559
-
1560
- Example:
1561
- >>> entry_id = memory.stream_append("audit", {"action": "pattern_promoted", "pattern_id": "xyz"}, creds)
1562
-
1563
- """
1564
- if not credentials.can_stage():
1565
- raise PermissionError(
1566
- f"Agent {credentials.agent_id} cannot write to stream. "
1567
- "Requires CONTRIBUTOR tier or higher.",
1568
- )
1569
-
1570
- start_time = time.perf_counter()
1571
- full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1572
-
1573
- entry = {
1574
- "agent_id": credentials.agent_id,
1575
- "timestamp": datetime.now().isoformat(),
1576
- **{
1577
- str(k): json.dumps(v) if isinstance(v, dict | list) else str(v)
1578
- for k, v in data.items()
1579
- },
1580
- }
1581
-
1582
- if self.use_mock:
1583
- if full_stream not in self._mock_streams:
1584
- self._mock_streams[full_stream] = []
1585
- entry_id = f"{int(datetime.now().timestamp() * 1000)}-0"
1586
- self._mock_streams[full_stream].append((entry_id, entry))
1587
- # Trim to max_len
1588
- if len(self._mock_streams[full_stream]) > max_len:
1589
- self._mock_streams[full_stream] = self._mock_streams[full_stream][-max_len:]
1590
- latency_ms = (time.perf_counter() - start_time) * 1000
1591
- self._metrics.record_operation("stream_append", latency_ms)
1592
- return entry_id
1593
-
1594
- if self._client is None:
1595
- return None
1596
-
1597
- entry_id = self._client.xadd(full_stream, entry, maxlen=max_len)
1598
- latency_ms = (time.perf_counter() - start_time) * 1000
1599
- self._metrics.record_operation("stream_append", latency_ms)
1600
-
1601
- return str(entry_id) if entry_id else None
1602
-
1603
- def stream_read(
1604
- self,
1605
- stream_name: str,
1606
- credentials: AgentCredentials,
1607
- start_id: str = "0",
1608
- count: int = 100,
1609
- ) -> list[tuple[str, dict]]:
1610
- """Read entries from a Redis Stream.
1611
-
1612
- Args:
1613
- stream_name: Name of the stream
1614
- credentials: Agent credentials
1615
- start_id: Start reading from this ID ("0" = beginning)
1616
- count: Maximum entries to read
1617
-
1618
- Returns:
1619
- List of (entry_id, data) tuples
1620
-
1621
- Example:
1622
- >>> entries = memory.stream_read("audit", creds, count=50)
1623
- >>> for entry_id, data in entries:
1624
- ... print(f"{entry_id}: {data}")
1625
-
1626
- """
1627
- full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1628
-
1629
- if self.use_mock:
1630
- if full_stream not in self._mock_streams:
1631
- return []
1632
- entries = self._mock_streams[full_stream]
1633
- # Filter by start_id (simple comparison)
1634
- filtered = [(eid, data) for eid, data in entries if eid > start_id]
1635
- return filtered[:count]
1636
-
1637
- if self._client is None:
1638
- return []
1639
-
1640
- result = self._client.xrange(full_stream, min=start_id, count=count)
1641
- return [(str(entry_id), {str(k): v for k, v in data.items()}) for entry_id, data in result]
1642
-
1643
- def stream_read_new(
1644
- self,
1645
- stream_name: str,
1646
- credentials: AgentCredentials,
1647
- block_ms: int = 0,
1648
- count: int = 100,
1649
- ) -> list[tuple[str, dict]]:
1650
- """Read only new entries from a stream (blocking read).
1651
-
1652
- Args:
1653
- stream_name: Name of the stream
1654
- credentials: Agent credentials
1655
- block_ms: Milliseconds to block waiting (0 = no block)
1656
- count: Maximum entries to read
1657
-
1658
- Returns:
1659
- List of (entry_id, data) tuples
1660
-
1661
- """
1662
- full_stream = f"{self.PREFIX_STREAM}{stream_name}"
1663
-
1664
- if self.use_mock:
1665
- return [] # Mock doesn't support blocking reads
1666
-
1667
- if self._client is None:
1668
- return []
1669
-
1670
- result = self._client.xread({full_stream: "$"}, block=block_ms, count=count)
1671
- if not result:
1672
- return []
1673
-
1674
- # Result format: [(stream_name, [(entry_id, data), ...])]
1675
- entries = []
1676
- for _stream, stream_entries in result:
1677
- for entry_id, data in stream_entries:
1678
- entries.append((str(entry_id), {str(k): v for k, v in data.items()}))
1679
- return entries
1680
-
1681
- # =========================================================================
1682
- # TIME-WINDOW QUERIES (SORTED SETS)
1683
- # =========================================================================
1684
-
1685
- def timeline_add(
1686
- self,
1687
- timeline_name: str,
1688
- event_id: str,
1689
- data: dict,
1690
- credentials: AgentCredentials,
1691
- timestamp: datetime | None = None,
1692
- ) -> bool:
1693
- """Add an event to a timeline (sorted set by timestamp).
1694
-
1695
- Args:
1696
- timeline_name: Name of the timeline
1697
- event_id: Unique event identifier
1698
- data: Event data
1699
- credentials: Agent credentials
1700
- timestamp: Event timestamp (defaults to now)
1701
-
1702
- Returns:
1703
- True if added successfully
1704
-
1705
- """
1706
- if not credentials.can_stage():
1707
- raise PermissionError(
1708
- f"Agent {credentials.agent_id} cannot write to timeline. "
1709
- "Requires CONTRIBUTOR tier or higher.",
1710
- )
1711
-
1712
- full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1713
- ts = timestamp or datetime.now()
1714
- score = ts.timestamp()
1715
-
1716
- payload = json.dumps(
1717
- {
1718
- "event_id": event_id,
1719
- "timestamp": ts.isoformat(),
1720
- "agent_id": credentials.agent_id,
1721
- "data": data,
1722
- },
1723
- )
1724
-
1725
- if self.use_mock:
1726
- if full_timeline not in self._mock_sorted_sets:
1727
- self._mock_sorted_sets[full_timeline] = []
1728
- self._mock_sorted_sets[full_timeline].append((score, payload))
1729
- self._mock_sorted_sets[full_timeline].sort(key=lambda x: x[0])
1730
- return True
1731
-
1732
- if self._client is None:
1733
- return False
1734
-
1735
- self._client.zadd(full_timeline, {payload: score})
1736
- return True
1737
-
1738
- def timeline_query(
1739
- self,
1740
- timeline_name: str,
1741
- credentials: AgentCredentials,
1742
- query: TimeWindowQuery | None = None,
1743
- ) -> list[dict]:
1744
- """Query events from a timeline within a time window.
1745
-
1746
- Args:
1747
- timeline_name: Name of the timeline
1748
- credentials: Agent credentials
1749
- query: Time window query parameters
1750
-
1751
- Returns:
1752
- List of events in the time window
1753
-
1754
- Example:
1755
- >>> from datetime import datetime, timedelta
1756
- >>> query = TimeWindowQuery(
1757
- ... start_time=datetime.now() - timedelta(hours=1),
1758
- ... end_time=datetime.now(),
1759
- ... limit=50
1760
- ... )
1761
- >>> events = memory.timeline_query("agent_events", creds, query)
1762
-
1763
- """
1764
- full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1765
- q = query or TimeWindowQuery()
1766
-
1767
- if self.use_mock:
1768
- if full_timeline not in self._mock_sorted_sets:
1769
- return []
1770
- entries = self._mock_sorted_sets[full_timeline]
1771
- filtered = [
1772
- json.loads(payload)
1773
- for score, payload in entries
1774
- if q.start_score <= score <= q.end_score
1775
- ]
1776
- return filtered[q.offset : q.offset + q.limit]
1777
-
1778
- if self._client is None:
1779
- return []
1780
-
1781
- results = self._client.zrangebyscore(
1782
- full_timeline,
1783
- min=q.start_score,
1784
- max=q.end_score,
1785
- start=q.offset,
1786
- num=q.limit,
1787
- )
1788
-
1789
- return [json.loads(r) for r in results]
1790
-
1791
- def timeline_count(
1792
- self,
1793
- timeline_name: str,
1794
- credentials: AgentCredentials,
1795
- query: TimeWindowQuery | None = None,
1796
- ) -> int:
1797
- """Count events in a timeline within a time window.
1798
-
1799
- Args:
1800
- timeline_name: Name of the timeline
1801
- credentials: Agent credentials
1802
- query: Time window query parameters
1803
-
1804
- Returns:
1805
- Number of events in the time window
1806
-
1807
- """
1808
- full_timeline = f"{self.PREFIX_TIMELINE}{timeline_name}"
1809
- q = query or TimeWindowQuery()
1810
-
1811
- if self.use_mock:
1812
- if full_timeline not in self._mock_sorted_sets:
1813
- return 0
1814
- entries = self._mock_sorted_sets[full_timeline]
1815
- return len([1 for score, _ in entries if q.start_score <= score <= q.end_score])
1816
-
1817
- if self._client is None:
1818
- return 0
1819
-
1820
- return int(self._client.zcount(full_timeline, q.start_score, q.end_score))
1821
-
1822
- # =========================================================================
1823
- # TASK QUEUES (LISTS)
1824
- # =========================================================================
1825
-
1826
- def queue_push(
1827
- self,
1828
- queue_name: str,
1829
- task: dict,
1830
- credentials: AgentCredentials,
1831
- priority: bool = False,
1832
- ) -> int:
1833
- """Push a task to a queue.
1834
-
1835
- Args:
1836
- queue_name: Name of the queue
1837
- task: Task data
1838
- credentials: Agent credentials (must be CONTRIBUTOR+)
1839
- priority: If True, push to front (high priority)
1840
-
1841
- Returns:
1842
- New queue length
1843
-
1844
- Example:
1845
- >>> task = {"type": "analyze", "file": "main.py"}
1846
- >>> memory.queue_push("agent_tasks", task, creds)
1847
-
1848
- """
1849
- if not credentials.can_stage():
1850
- raise PermissionError(
1851
- f"Agent {credentials.agent_id} cannot push to queue. "
1852
- "Requires CONTRIBUTOR tier or higher.",
1853
- )
1854
-
1855
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1856
- payload = json.dumps(
1857
- {
1858
- "task": task,
1859
- "queued_by": credentials.agent_id,
1860
- "queued_at": datetime.now().isoformat(),
1861
- },
1862
- )
1863
-
1864
- if self.use_mock:
1865
- if full_queue not in self._mock_lists:
1866
- self._mock_lists[full_queue] = []
1867
- if priority:
1868
- self._mock_lists[full_queue].insert(0, payload)
1869
- else:
1870
- self._mock_lists[full_queue].append(payload)
1871
- return len(self._mock_lists[full_queue])
1872
-
1873
- if self._client is None:
1874
- return 0
1875
-
1876
- if priority:
1877
- return int(self._client.lpush(full_queue, payload))
1878
- return int(self._client.rpush(full_queue, payload))
1879
-
1880
- def queue_pop(
1881
- self,
1882
- queue_name: str,
1883
- credentials: AgentCredentials,
1884
- timeout: int = 0,
1885
- ) -> dict | None:
1886
- """Pop a task from a queue.
1887
-
1888
- Args:
1889
- queue_name: Name of the queue
1890
- credentials: Agent credentials
1891
- timeout: Seconds to block waiting (0 = no block)
1892
-
1893
- Returns:
1894
- Task data or None if queue empty
1895
-
1896
- Example:
1897
- >>> task = memory.queue_pop("agent_tasks", creds, timeout=5)
1898
- >>> if task:
1899
- ... process(task["task"])
1900
-
1901
- """
1902
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1903
-
1904
- if self.use_mock:
1905
- if full_queue not in self._mock_lists or not self._mock_lists[full_queue]:
1906
- return None
1907
- payload = self._mock_lists[full_queue].pop(0)
1908
- data: dict = json.loads(payload)
1909
- return data
1910
-
1911
- if self._client is None:
1912
- return None
1913
-
1914
- if timeout > 0:
1915
- result = self._client.blpop(full_queue, timeout=timeout)
1916
- if result:
1917
- data = json.loads(result[1])
1918
- return data
1919
- return None
1920
-
1921
- result = self._client.lpop(full_queue)
1922
- if result:
1923
- data = json.loads(result)
1924
- return data
1925
- return None
1926
-
1927
- def queue_length(self, queue_name: str) -> int:
1928
- """Get the length of a queue.
1929
-
1930
- Args:
1931
- queue_name: Name of the queue
1932
-
1933
- Returns:
1934
- Number of items in the queue
1935
-
1936
- """
1937
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1938
-
1939
- if self.use_mock:
1940
- return len(self._mock_lists.get(full_queue, []))
1941
-
1942
- if self._client is None:
1943
- return 0
1944
-
1945
- return int(self._client.llen(full_queue))
1946
-
1947
- def queue_peek(
1948
- self,
1949
- queue_name: str,
1950
- credentials: AgentCredentials,
1951
- count: int = 1,
1952
- ) -> list[dict]:
1953
- """Peek at tasks in a queue without removing them.
1954
-
1955
- Args:
1956
- queue_name: Name of the queue
1957
- credentials: Agent credentials
1958
- count: Number of items to peek
1959
-
1960
- Returns:
1961
- List of task data
1962
-
1963
- """
1964
- full_queue = f"{self.PREFIX_QUEUE}{queue_name}"
1965
-
1966
- if self.use_mock:
1967
- items = self._mock_lists.get(full_queue, [])[:count]
1968
- return [json.loads(item) for item in items]
1969
-
1970
- if self._client is None:
1971
- return []
1972
-
1973
- items = self._client.lrange(full_queue, 0, count - 1)
1974
- return [json.loads(item) for item in items]
1975
-
1976
- # =========================================================================
1977
- # ATOMIC TRANSACTIONS
1978
- # =========================================================================
1979
-
1980
- def atomic_promote_pattern(
1981
- self,
1982
- pattern_id: str,
1983
- credentials: AgentCredentials,
1984
- min_confidence: float = 0.0,
1985
- ) -> tuple[bool, StagedPattern | None, str]:
1986
- """Atomically promote a pattern with validation.
1987
-
1988
- Uses Redis transaction (MULTI/EXEC) to ensure:
1989
- - Pattern exists and meets confidence threshold
1990
- - Pattern is removed from staging atomically
1991
- - No race conditions with concurrent operations
1992
-
1993
- Args:
1994
- pattern_id: Pattern to promote
1995
- credentials: Must be VALIDATOR or higher
1996
- min_confidence: Minimum confidence threshold
1997
-
1998
- Returns:
1999
- Tuple of (success, pattern, message)
2000
-
2001
- Raises:
2002
- ValueError: If pattern_id is empty or min_confidence out of range
2003
-
2004
- Example:
2005
- >>> success, pattern, msg = memory.atomic_promote_pattern("pat_123", creds, min_confidence=0.7)
2006
- >>> if success:
2007
- ... library.add(pattern)
2008
-
2009
- """
2010
- # Pattern 1: String ID validation
2011
- if not pattern_id or not pattern_id.strip():
2012
- raise ValueError("pattern_id cannot be empty")
2013
-
2014
- # Pattern 4: Range validation
2015
- if not 0.0 <= min_confidence <= 1.0:
2016
- raise ValueError(f"min_confidence must be between 0.0 and 1.0, got {min_confidence}")
2017
-
2018
- if not credentials.can_validate():
2019
- return False, None, "Requires VALIDATOR tier or higher"
2020
-
2021
- key = f"{self.PREFIX_STAGED}{pattern_id}"
2022
-
2023
- if self.use_mock:
2024
- if key not in self._mock_storage:
2025
- return False, None, "Pattern not found"
2026
- value, expires = self._mock_storage[key]
2027
- if expires and datetime.now().timestamp() >= expires:
2028
- return False, None, "Pattern expired"
2029
- pattern = StagedPattern.from_dict(json.loads(str(value)))
2030
- if pattern.confidence < min_confidence:
2031
- return (
2032
- False,
2033
- None,
2034
- f"Confidence {pattern.confidence} below threshold {min_confidence}",
2035
- )
2036
- del self._mock_storage[key]
2037
- # Also invalidate local cache
2038
- if key in self._local_cache:
2039
- del self._local_cache[key]
2040
- return True, pattern, "Pattern promoted successfully"
2041
-
2042
- if self._client is None:
2043
- return False, None, "Redis not connected"
2044
-
2045
- # Use WATCH for optimistic locking
2046
- try:
2047
- self._client.watch(key)
2048
- raw = self._client.get(key)
2049
-
2050
- if raw is None:
2051
- self._client.unwatch()
2052
- return False, None, "Pattern not found"
2053
-
2054
- pattern = StagedPattern.from_dict(json.loads(raw))
2055
-
2056
- if pattern.confidence < min_confidence:
2057
- self._client.unwatch()
2058
- return (
2059
- False,
2060
- None,
2061
- f"Confidence {pattern.confidence} below threshold {min_confidence}",
2062
- )
2063
-
2064
- # Execute atomic delete
2065
- pipe = self._client.pipeline(True)
2066
- pipe.delete(key)
2067
- pipe.execute()
2068
-
2069
- # Also invalidate local cache
2070
- if key in self._local_cache:
2071
- del self._local_cache[key]
2072
-
2073
- return True, pattern, "Pattern promoted successfully"
2074
-
2075
- except redis.WatchError:
2076
- return False, None, "Pattern was modified by another process"
2077
- finally:
2078
- try:
2079
- self._client.unwatch()
2080
- except Exception:
2081
- pass
2082
-
2083
- # =========================================================================
2084
- # CROSS-SESSION COMMUNICATION
2085
- # =========================================================================
2086
-
2087
- def enable_cross_session(
2088
- self,
2089
- access_tier: AccessTier = AccessTier.CONTRIBUTOR,
2090
- auto_announce: bool = True,
2091
- ):
2092
- """Enable cross-session communication for this memory instance.
2093
-
2094
- This allows agents in different Claude Code sessions to communicate
2095
- and coordinate via Redis.
2096
-
2097
- Args:
2098
- access_tier: Access tier for this session
2099
- auto_announce: Whether to announce presence automatically
2100
-
2101
- Returns:
2102
- CrossSessionCoordinator instance
2103
-
2104
- Raises:
2105
- ValueError: If in mock mode (Redis required for cross-session)
2106
-
2107
- Example:
2108
- >>> memory = RedisShortTermMemory()
2109
- >>> coordinator = memory.enable_cross_session(AccessTier.CONTRIBUTOR)
2110
- >>> print(f"Session ID: {coordinator.agent_id}")
2111
- >>> sessions = coordinator.get_active_sessions()
2112
-
2113
- """
2114
- if self.use_mock:
2115
- raise ValueError(
2116
- "Cross-session communication requires Redis. "
2117
- "Set REDIS_HOST/REDIS_PORT or disable mock mode."
2118
- )
2119
-
2120
- from .cross_session import CrossSessionCoordinator, SessionType
2121
-
2122
- coordinator = CrossSessionCoordinator(
2123
- memory=self,
2124
- session_type=SessionType.CLAUDE,
2125
- access_tier=access_tier,
2126
- auto_announce=auto_announce,
2127
- )
2128
-
2129
- return coordinator
2130
-
2131
- def cross_session_available(self) -> bool:
2132
- """Check if cross-session communication is available.
2133
-
2134
- Returns:
2135
- True if Redis is connected (not mock mode)
2136
-
2137
- """
2138
- return not self.use_mock and self._client is not None
2139
-
2140
- # =========================================================================
2141
- # CLEANUP AND LIFECYCLE
2142
- # =========================================================================
2143
-
2144
- def close(self) -> None:
2145
- """Close all connections and cleanup resources."""
2146
- self.close_pubsub()
2147
- if self._client:
2148
- self._client.close()
2149
- self._client = None
2150
- logger.info("redis_connection_closed")