empathy-framework 4.6.6__py3-none-any.whl → 4.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (273) hide show
  1. empathy_framework-4.7.1.dist-info/METADATA +690 -0
  2. empathy_framework-4.7.1.dist-info/RECORD +379 -0
  3. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/top_level.txt +1 -2
  4. empathy_healthcare_plugin/monitors/monitoring/__init__.py +9 -9
  5. empathy_llm_toolkit/agent_factory/__init__.py +6 -6
  6. empathy_llm_toolkit/agent_factory/adapters/wizard_adapter.py +7 -10
  7. empathy_llm_toolkit/agents_md/__init__.py +22 -0
  8. empathy_llm_toolkit/agents_md/loader.py +218 -0
  9. empathy_llm_toolkit/agents_md/parser.py +271 -0
  10. empathy_llm_toolkit/agents_md/registry.py +307 -0
  11. empathy_llm_toolkit/commands/__init__.py +51 -0
  12. empathy_llm_toolkit/commands/context.py +375 -0
  13. empathy_llm_toolkit/commands/loader.py +301 -0
  14. empathy_llm_toolkit/commands/models.py +231 -0
  15. empathy_llm_toolkit/commands/parser.py +371 -0
  16. empathy_llm_toolkit/commands/registry.py +429 -0
  17. empathy_llm_toolkit/config/__init__.py +8 -8
  18. empathy_llm_toolkit/config/unified.py +3 -7
  19. empathy_llm_toolkit/context/__init__.py +22 -0
  20. empathy_llm_toolkit/context/compaction.py +455 -0
  21. empathy_llm_toolkit/context/manager.py +434 -0
  22. empathy_llm_toolkit/hooks/__init__.py +24 -0
  23. empathy_llm_toolkit/hooks/config.py +306 -0
  24. empathy_llm_toolkit/hooks/executor.py +289 -0
  25. empathy_llm_toolkit/hooks/registry.py +302 -0
  26. empathy_llm_toolkit/hooks/scripts/__init__.py +39 -0
  27. empathy_llm_toolkit/hooks/scripts/evaluate_session.py +201 -0
  28. empathy_llm_toolkit/hooks/scripts/first_time_init.py +285 -0
  29. empathy_llm_toolkit/hooks/scripts/pre_compact.py +207 -0
  30. empathy_llm_toolkit/hooks/scripts/session_end.py +183 -0
  31. empathy_llm_toolkit/hooks/scripts/session_start.py +163 -0
  32. empathy_llm_toolkit/hooks/scripts/suggest_compact.py +225 -0
  33. empathy_llm_toolkit/learning/__init__.py +30 -0
  34. empathy_llm_toolkit/learning/evaluator.py +438 -0
  35. empathy_llm_toolkit/learning/extractor.py +514 -0
  36. empathy_llm_toolkit/learning/storage.py +560 -0
  37. empathy_llm_toolkit/providers.py +4 -11
  38. empathy_llm_toolkit/security/__init__.py +17 -17
  39. empathy_llm_toolkit/utils/tokens.py +2 -5
  40. empathy_os/__init__.py +202 -70
  41. empathy_os/cache_monitor.py +5 -3
  42. empathy_os/cli/__init__.py +11 -55
  43. empathy_os/cli/__main__.py +29 -15
  44. empathy_os/cli/commands/inspection.py +21 -12
  45. empathy_os/cli/commands/memory.py +4 -12
  46. empathy_os/cli/commands/profiling.py +198 -0
  47. empathy_os/cli/commands/utilities.py +27 -7
  48. empathy_os/cli.py +28 -57
  49. empathy_os/cli_unified.py +525 -1164
  50. empathy_os/cost_tracker.py +9 -3
  51. empathy_os/dashboard/server.py +200 -2
  52. empathy_os/hot_reload/__init__.py +7 -7
  53. empathy_os/hot_reload/config.py +6 -7
  54. empathy_os/hot_reload/integration.py +35 -35
  55. empathy_os/hot_reload/reloader.py +57 -57
  56. empathy_os/hot_reload/watcher.py +28 -28
  57. empathy_os/hot_reload/websocket.py +2 -2
  58. empathy_os/memory/__init__.py +11 -4
  59. empathy_os/memory/claude_memory.py +1 -1
  60. empathy_os/memory/cross_session.py +8 -12
  61. empathy_os/memory/edges.py +6 -6
  62. empathy_os/memory/file_session.py +770 -0
  63. empathy_os/memory/graph.py +30 -30
  64. empathy_os/memory/nodes.py +6 -6
  65. empathy_os/memory/short_term.py +15 -9
  66. empathy_os/memory/unified.py +606 -140
  67. empathy_os/meta_workflows/agent_creator.py +3 -9
  68. empathy_os/meta_workflows/cli_meta_workflows.py +113 -53
  69. empathy_os/meta_workflows/form_engine.py +6 -18
  70. empathy_os/meta_workflows/intent_detector.py +64 -24
  71. empathy_os/meta_workflows/models.py +3 -1
  72. empathy_os/meta_workflows/pattern_learner.py +13 -31
  73. empathy_os/meta_workflows/plan_generator.py +55 -47
  74. empathy_os/meta_workflows/session_context.py +2 -3
  75. empathy_os/meta_workflows/workflow.py +20 -51
  76. empathy_os/models/cli.py +2 -2
  77. empathy_os/models/tasks.py +1 -2
  78. empathy_os/models/telemetry.py +4 -1
  79. empathy_os/models/token_estimator.py +3 -1
  80. empathy_os/monitoring/alerts.py +938 -9
  81. empathy_os/monitoring/alerts_cli.py +346 -183
  82. empathy_os/orchestration/execution_strategies.py +12 -29
  83. empathy_os/orchestration/pattern_learner.py +20 -26
  84. empathy_os/orchestration/real_tools.py +6 -15
  85. empathy_os/platform_utils.py +2 -1
  86. empathy_os/plugins/__init__.py +2 -2
  87. empathy_os/plugins/base.py +64 -64
  88. empathy_os/plugins/registry.py +32 -32
  89. empathy_os/project_index/index.py +49 -15
  90. empathy_os/project_index/models.py +1 -2
  91. empathy_os/project_index/reports.py +1 -1
  92. empathy_os/project_index/scanner.py +1 -0
  93. empathy_os/redis_memory.py +10 -7
  94. empathy_os/resilience/__init__.py +1 -1
  95. empathy_os/resilience/health.py +10 -10
  96. empathy_os/routing/__init__.py +7 -7
  97. empathy_os/routing/chain_executor.py +37 -37
  98. empathy_os/routing/classifier.py +36 -36
  99. empathy_os/routing/smart_router.py +40 -40
  100. empathy_os/routing/{wizard_registry.py → workflow_registry.py} +47 -47
  101. empathy_os/scaffolding/__init__.py +8 -8
  102. empathy_os/scaffolding/__main__.py +1 -1
  103. empathy_os/scaffolding/cli.py +28 -28
  104. empathy_os/socratic/__init__.py +3 -19
  105. empathy_os/socratic/ab_testing.py +25 -36
  106. empathy_os/socratic/blueprint.py +38 -38
  107. empathy_os/socratic/cli.py +34 -20
  108. empathy_os/socratic/collaboration.py +30 -28
  109. empathy_os/socratic/domain_templates.py +9 -1
  110. empathy_os/socratic/embeddings.py +17 -13
  111. empathy_os/socratic/engine.py +135 -70
  112. empathy_os/socratic/explainer.py +70 -60
  113. empathy_os/socratic/feedback.py +24 -19
  114. empathy_os/socratic/forms.py +15 -10
  115. empathy_os/socratic/generator.py +51 -35
  116. empathy_os/socratic/llm_analyzer.py +25 -23
  117. empathy_os/socratic/mcp_server.py +99 -159
  118. empathy_os/socratic/session.py +19 -13
  119. empathy_os/socratic/storage.py +98 -67
  120. empathy_os/socratic/success.py +38 -27
  121. empathy_os/socratic/visual_editor.py +51 -39
  122. empathy_os/socratic/web_ui.py +99 -66
  123. empathy_os/telemetry/cli.py +3 -1
  124. empathy_os/telemetry/usage_tracker.py +1 -3
  125. empathy_os/test_generator/__init__.py +3 -3
  126. empathy_os/test_generator/cli.py +28 -28
  127. empathy_os/test_generator/generator.py +64 -66
  128. empathy_os/test_generator/risk_analyzer.py +11 -11
  129. empathy_os/vscode_bridge 2.py +173 -0
  130. empathy_os/vscode_bridge.py +173 -0
  131. empathy_os/workflows/__init__.py +212 -120
  132. empathy_os/workflows/batch_processing.py +8 -24
  133. empathy_os/workflows/bug_predict.py +1 -1
  134. empathy_os/workflows/code_review.py +20 -5
  135. empathy_os/workflows/code_review_pipeline.py +13 -8
  136. empathy_os/workflows/keyboard_shortcuts/workflow.py +6 -2
  137. empathy_os/workflows/manage_documentation.py +1 -0
  138. empathy_os/workflows/orchestrated_health_check.py +6 -11
  139. empathy_os/workflows/orchestrated_release_prep.py +3 -3
  140. empathy_os/workflows/pr_review.py +18 -10
  141. empathy_os/workflows/progressive/README 2.md +454 -0
  142. empathy_os/workflows/progressive/__init__ 2.py +92 -0
  143. empathy_os/workflows/progressive/__init__.py +2 -12
  144. empathy_os/workflows/progressive/cli 2.py +242 -0
  145. empathy_os/workflows/progressive/cli.py +14 -37
  146. empathy_os/workflows/progressive/core 2.py +488 -0
  147. empathy_os/workflows/progressive/core.py +12 -12
  148. empathy_os/workflows/progressive/orchestrator 2.py +701 -0
  149. empathy_os/workflows/progressive/orchestrator.py +166 -144
  150. empathy_os/workflows/progressive/reports 2.py +528 -0
  151. empathy_os/workflows/progressive/reports.py +22 -31
  152. empathy_os/workflows/progressive/telemetry 2.py +280 -0
  153. empathy_os/workflows/progressive/telemetry.py +8 -14
  154. empathy_os/workflows/progressive/test_gen 2.py +514 -0
  155. empathy_os/workflows/progressive/test_gen.py +29 -48
  156. empathy_os/workflows/progressive/workflow 2.py +628 -0
  157. empathy_os/workflows/progressive/workflow.py +31 -70
  158. empathy_os/workflows/release_prep.py +21 -6
  159. empathy_os/workflows/release_prep_crew.py +1 -0
  160. empathy_os/workflows/secure_release.py +13 -6
  161. empathy_os/workflows/security_audit.py +8 -3
  162. empathy_os/workflows/test_coverage_boost_crew.py +3 -2
  163. empathy_os/workflows/test_maintenance_crew.py +1 -0
  164. empathy_os/workflows/test_runner.py +16 -12
  165. empathy_software_plugin/SOFTWARE_PLUGIN_README.md +25 -703
  166. empathy_software_plugin/cli.py +0 -122
  167. patterns/README.md +119 -0
  168. patterns/__init__.py +95 -0
  169. patterns/behavior.py +298 -0
  170. patterns/code_review_memory.json +441 -0
  171. patterns/core.py +97 -0
  172. patterns/debugging.json +3763 -0
  173. patterns/empathy.py +268 -0
  174. patterns/health_check_memory.json +505 -0
  175. patterns/input.py +161 -0
  176. patterns/memory_graph.json +8 -0
  177. patterns/refactoring_memory.json +1113 -0
  178. patterns/registry.py +663 -0
  179. patterns/security_memory.json +8 -0
  180. patterns/structural.py +415 -0
  181. patterns/validation.py +194 -0
  182. coach_wizards/__init__.py +0 -45
  183. coach_wizards/accessibility_wizard.py +0 -91
  184. coach_wizards/api_wizard.py +0 -91
  185. coach_wizards/base_wizard.py +0 -209
  186. coach_wizards/cicd_wizard.py +0 -91
  187. coach_wizards/code_reviewer_README.md +0 -60
  188. coach_wizards/code_reviewer_wizard.py +0 -180
  189. coach_wizards/compliance_wizard.py +0 -91
  190. coach_wizards/database_wizard.py +0 -91
  191. coach_wizards/debugging_wizard.py +0 -91
  192. coach_wizards/documentation_wizard.py +0 -91
  193. coach_wizards/generate_wizards.py +0 -347
  194. coach_wizards/localization_wizard.py +0 -173
  195. coach_wizards/migration_wizard.py +0 -91
  196. coach_wizards/monitoring_wizard.py +0 -91
  197. coach_wizards/observability_wizard.py +0 -91
  198. coach_wizards/performance_wizard.py +0 -91
  199. coach_wizards/prompt_engineering_wizard.py +0 -661
  200. coach_wizards/refactoring_wizard.py +0 -91
  201. coach_wizards/scaling_wizard.py +0 -90
  202. coach_wizards/security_wizard.py +0 -92
  203. coach_wizards/testing_wizard.py +0 -91
  204. empathy_framework-4.6.6.dist-info/METADATA +0 -1597
  205. empathy_framework-4.6.6.dist-info/RECORD +0 -410
  206. empathy_llm_toolkit/wizards/__init__.py +0 -43
  207. empathy_llm_toolkit/wizards/base_wizard.py +0 -364
  208. empathy_llm_toolkit/wizards/customer_support_wizard.py +0 -190
  209. empathy_llm_toolkit/wizards/healthcare_wizard.py +0 -378
  210. empathy_llm_toolkit/wizards/patient_assessment_README.md +0 -64
  211. empathy_llm_toolkit/wizards/patient_assessment_wizard.py +0 -193
  212. empathy_llm_toolkit/wizards/technology_wizard.py +0 -209
  213. empathy_os/wizard_factory_cli.py +0 -170
  214. empathy_software_plugin/wizards/__init__.py +0 -42
  215. empathy_software_plugin/wizards/advanced_debugging_wizard.py +0 -395
  216. empathy_software_plugin/wizards/agent_orchestration_wizard.py +0 -511
  217. empathy_software_plugin/wizards/ai_collaboration_wizard.py +0 -503
  218. empathy_software_plugin/wizards/ai_context_wizard.py +0 -441
  219. empathy_software_plugin/wizards/ai_documentation_wizard.py +0 -503
  220. empathy_software_plugin/wizards/base_wizard.py +0 -288
  221. empathy_software_plugin/wizards/book_chapter_wizard.py +0 -519
  222. empathy_software_plugin/wizards/code_review_wizard.py +0 -604
  223. empathy_software_plugin/wizards/debugging/__init__.py +0 -50
  224. empathy_software_plugin/wizards/debugging/bug_risk_analyzer.py +0 -414
  225. empathy_software_plugin/wizards/debugging/config_loaders.py +0 -446
  226. empathy_software_plugin/wizards/debugging/fix_applier.py +0 -469
  227. empathy_software_plugin/wizards/debugging/language_patterns.py +0 -385
  228. empathy_software_plugin/wizards/debugging/linter_parsers.py +0 -470
  229. empathy_software_plugin/wizards/debugging/verification.py +0 -369
  230. empathy_software_plugin/wizards/enhanced_testing_wizard.py +0 -537
  231. empathy_software_plugin/wizards/memory_enhanced_debugging_wizard.py +0 -816
  232. empathy_software_plugin/wizards/multi_model_wizard.py +0 -501
  233. empathy_software_plugin/wizards/pattern_extraction_wizard.py +0 -422
  234. empathy_software_plugin/wizards/pattern_retriever_wizard.py +0 -400
  235. empathy_software_plugin/wizards/performance/__init__.py +0 -9
  236. empathy_software_plugin/wizards/performance/bottleneck_detector.py +0 -221
  237. empathy_software_plugin/wizards/performance/profiler_parsers.py +0 -278
  238. empathy_software_plugin/wizards/performance/trajectory_analyzer.py +0 -429
  239. empathy_software_plugin/wizards/performance_profiling_wizard.py +0 -305
  240. empathy_software_plugin/wizards/prompt_engineering_wizard.py +0 -425
  241. empathy_software_plugin/wizards/rag_pattern_wizard.py +0 -461
  242. empathy_software_plugin/wizards/security/__init__.py +0 -32
  243. empathy_software_plugin/wizards/security/exploit_analyzer.py +0 -290
  244. empathy_software_plugin/wizards/security/owasp_patterns.py +0 -241
  245. empathy_software_plugin/wizards/security/vulnerability_scanner.py +0 -604
  246. empathy_software_plugin/wizards/security_analysis_wizard.py +0 -322
  247. empathy_software_plugin/wizards/security_learning_wizard.py +0 -740
  248. empathy_software_plugin/wizards/tech_debt_wizard.py +0 -726
  249. empathy_software_plugin/wizards/testing/__init__.py +0 -27
  250. empathy_software_plugin/wizards/testing/coverage_analyzer.py +0 -459
  251. empathy_software_plugin/wizards/testing/quality_analyzer.py +0 -525
  252. empathy_software_plugin/wizards/testing/test_suggester.py +0 -533
  253. empathy_software_plugin/wizards/testing_wizard.py +0 -274
  254. wizards/__init__.py +0 -82
  255. wizards/admission_assessment_wizard.py +0 -644
  256. wizards/care_plan.py +0 -321
  257. wizards/clinical_assessment.py +0 -769
  258. wizards/discharge_planning.py +0 -77
  259. wizards/discharge_summary_wizard.py +0 -468
  260. wizards/dosage_calculation.py +0 -497
  261. wizards/incident_report_wizard.py +0 -454
  262. wizards/medication_reconciliation.py +0 -85
  263. wizards/nursing_assessment.py +0 -171
  264. wizards/patient_education.py +0 -654
  265. wizards/quality_improvement.py +0 -705
  266. wizards/sbar_report.py +0 -324
  267. wizards/sbar_wizard.py +0 -608
  268. wizards/shift_handoff_wizard.py +0 -535
  269. wizards/soap_note_wizard.py +0 -679
  270. wizards/treatment_plan.py +0 -15
  271. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/WHEEL +0 -0
  272. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/entry_points.txt +0 -0
  273. {empathy_framework-4.6.6.dist-info → empathy_framework-4.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -26,9 +26,11 @@ Copyright 2025 Smart AI Memory, LLC
26
26
  Licensed under Fair Source 0.9
27
27
  """
28
28
 
29
+ import heapq
29
30
  import json
30
31
  import os
31
32
  import uuid
33
+ from collections.abc import Iterator
32
34
  from dataclasses import dataclass, field
33
35
  from datetime import datetime
34
36
  from enum import Enum
@@ -39,6 +41,7 @@ import structlog
39
41
 
40
42
  from .claude_memory import ClaudeMemoryConfig
41
43
  from .config import get_redis_memory
44
+ from .file_session import FileSessionConfig, FileSessionMemory
42
45
  from .long_term import Classification, LongTermMemory, SecureMemDocsIntegration
43
46
  from .redis_bootstrap import RedisStartMethod, RedisStatus, ensure_redis
44
47
  from .short_term import (
@@ -67,12 +70,17 @@ class MemoryConfig:
67
70
  # Environment
68
71
  environment: Environment = Environment.DEVELOPMENT
69
72
 
70
- # Short-term memory settings
73
+ # File-first architecture settings (always available)
74
+ file_session_enabled: bool = True # Use file-based session as primary
75
+ file_session_dir: str = ".empathy" # Directory for file-based storage
76
+
77
+ # Short-term memory settings (Redis - optional enhancement)
71
78
  redis_url: str | None = None
72
79
  redis_host: str = "localhost"
73
80
  redis_port: int = 6379
74
81
  redis_mock: bool = False
75
- redis_auto_start: bool = True # Auto-start Redis if not running
82
+ redis_auto_start: bool = False # Changed to False - file-first by default
83
+ redis_required: bool = False # If True, fail without Redis
76
84
  default_ttl_seconds: int = 3600 # 1 hour
77
85
 
78
86
  # Long-term memory settings
@@ -88,14 +96,22 @@ class MemoryConfig:
88
96
  # Pattern promotion settings
89
97
  auto_promote_threshold: float = 0.8 # Confidence threshold for auto-promotion
90
98
 
99
+ # Compact state auto-generation
100
+ auto_generate_compact_state: bool = True
101
+ compact_state_path: str = ".claude/compact-state.md"
102
+
91
103
  @classmethod
92
104
  def from_environment(cls) -> "MemoryConfig":
93
105
  """Create configuration from environment variables.
94
106
 
95
107
  Environment Variables:
96
108
  EMPATHY_ENV: Environment (development/staging/production)
109
+ EMPATHY_FILE_SESSION: Enable file-based session (true/false, default: true)
110
+ EMPATHY_FILE_SESSION_DIR: Directory for file-based storage
97
111
  REDIS_URL: Redis connection URL
98
112
  EMPATHY_REDIS_MOCK: Use mock Redis (true/false)
113
+ EMPATHY_REDIS_AUTO_START: Auto-start Redis (true/false, default: false)
114
+ EMPATHY_REDIS_REQUIRED: Fail without Redis (true/false, default: false)
99
115
  EMPATHY_STORAGE_DIR: Long-term storage directory
100
116
  EMPATHY_ENCRYPTION: Enable encryption (true/false)
101
117
  """
@@ -108,14 +124,25 @@ class MemoryConfig:
108
124
 
109
125
  return cls(
110
126
  environment=environment,
127
+ # File-first settings (always available)
128
+ file_session_enabled=os.getenv("EMPATHY_FILE_SESSION", "true").lower() == "true",
129
+ file_session_dir=os.getenv("EMPATHY_FILE_SESSION_DIR", ".empathy"),
130
+ # Redis settings (optional)
111
131
  redis_url=os.getenv("REDIS_URL"),
112
132
  redis_host=os.getenv("EMPATHY_REDIS_HOST", "localhost"),
113
133
  redis_port=int(os.getenv("EMPATHY_REDIS_PORT", "6379")),
114
134
  redis_mock=os.getenv("EMPATHY_REDIS_MOCK", "").lower() == "true",
115
- redis_auto_start=os.getenv("EMPATHY_REDIS_AUTO_START", "true").lower() == "true",
135
+ redis_auto_start=os.getenv("EMPATHY_REDIS_AUTO_START", "false").lower() == "true",
136
+ redis_required=os.getenv("EMPATHY_REDIS_REQUIRED", "false").lower() == "true",
137
+ # Long-term storage
116
138
  storage_dir=os.getenv("EMPATHY_STORAGE_DIR", "./memdocs_storage"),
117
139
  encryption_enabled=os.getenv("EMPATHY_ENCRYPTION", "true").lower() == "true",
118
140
  claude_memory_enabled=os.getenv("EMPATHY_CLAUDE_MEMORY", "true").lower() == "true",
141
+ # Compact state
142
+ auto_generate_compact_state=os.getenv(
143
+ "EMPATHY_AUTO_COMPACT_STATE", "true"
144
+ ).lower() == "true",
145
+ compact_state_path=os.getenv("EMPATHY_COMPACT_STATE_PATH", ".claude/compact-state.md"),
119
146
  )
120
147
 
121
148
 
@@ -135,22 +162,49 @@ class UnifiedMemory:
135
162
  access_tier: AccessTier = AccessTier.CONTRIBUTOR
136
163
 
137
164
  # Internal state
138
- _short_term: RedisShortTermMemory | None = field(default=None, init=False)
165
+ _file_session: FileSessionMemory | None = field(default=None, init=False) # Primary storage
166
+ _short_term: RedisShortTermMemory | None = field(default=None, init=False) # Optional Redis
139
167
  _long_term: SecureMemDocsIntegration | None = field(default=None, init=False)
140
168
  _simple_long_term: LongTermMemory | None = field(default=None, init=False)
141
169
  _redis_status: RedisStatus | None = field(default=None, init=False)
142
170
  _initialized: bool = field(default=False, init=False)
171
+ # LRU cache for pattern lookups (pattern_id -> pattern_data)
172
+ _pattern_cache: dict[str, dict[str, Any]] = field(default_factory=dict, init=False)
173
+ _pattern_cache_max_size: int = field(default=100, init=False)
143
174
 
144
175
  def __post_init__(self):
145
176
  """Initialize memory backends based on configuration."""
146
177
  self._initialize_backends()
147
178
 
148
179
  def _initialize_backends(self):
149
- """Initialize short-term and long-term memory backends."""
180
+ """Initialize short-term and long-term memory backends.
181
+
182
+ File-First Architecture:
183
+ 1. FileSessionMemory is always initialized (primary storage)
184
+ 2. Redis is optional (for real-time features like pub/sub)
185
+ 3. Falls back gracefully when Redis is unavailable
186
+ """
150
187
  if self._initialized:
151
188
  return
152
189
 
153
- # Initialize short-term memory (Redis)
190
+ # Initialize file-based session memory (PRIMARY - always available)
191
+ if self.config.file_session_enabled:
192
+ try:
193
+ file_config = FileSessionConfig(base_dir=self.config.file_session_dir)
194
+ self._file_session = FileSessionMemory(
195
+ user_id=self.user_id,
196
+ config=file_config,
197
+ )
198
+ logger.info(
199
+ "file_session_memory_initialized",
200
+ base_dir=self.config.file_session_dir,
201
+ session_id=self._file_session._state.session_id,
202
+ )
203
+ except Exception as e:
204
+ logger.error("file_session_memory_failed", error=str(e))
205
+ self._file_session = None
206
+
207
+ # Initialize Redis short-term memory (OPTIONAL - for real-time features)
154
208
  try:
155
209
  if self.config.redis_mock:
156
210
  self._short_term = RedisShortTermMemory(use_mock=True)
@@ -181,24 +235,57 @@ class UnifiedMemory:
181
235
  use_mock=False,
182
236
  )
183
237
  else:
184
- self._short_term = RedisShortTermMemory(use_mock=True)
238
+ # File session is primary, so Redis mock is not needed
239
+ self._short_term = None
240
+ self._redis_status = RedisStatus(
241
+ available=False,
242
+ method=RedisStartMethod.MOCK,
243
+ message="Redis unavailable, using file-based storage",
244
+ )
185
245
  else:
186
- self._short_term = get_redis_memory()
187
- self._redis_status = RedisStatus(
188
- available=True,
189
- method=RedisStartMethod.ALREADY_RUNNING,
190
- message="Connected to existing Redis",
191
- )
246
+ # Try to connect to existing Redis
247
+ try:
248
+ self._short_term = get_redis_memory()
249
+ if self._short_term.is_connected():
250
+ self._redis_status = RedisStatus(
251
+ available=True,
252
+ method=RedisStartMethod.ALREADY_RUNNING,
253
+ message="Connected to existing Redis",
254
+ )
255
+ else:
256
+ self._short_term = None
257
+ self._redis_status = RedisStatus(
258
+ available=False,
259
+ method=RedisStartMethod.MOCK,
260
+ message="Redis not available, using file-based storage",
261
+ )
262
+ except Exception:
263
+ self._short_term = None
264
+ self._redis_status = RedisStatus(
265
+ available=False,
266
+ method=RedisStartMethod.MOCK,
267
+ message="Redis not available, using file-based storage",
268
+ )
192
269
 
193
270
  logger.info(
194
271
  "short_term_memory_initialized",
195
- mock_mode=self.config.redis_mock or not self._redis_status.available,
196
- redis_method=self._redis_status.method.value if self._redis_status else "unknown",
272
+ redis_available=self._redis_status.available if self._redis_status else False,
273
+ file_session_available=self._file_session is not None,
274
+ redis_method=self._redis_status.method.value if self._redis_status else "none",
197
275
  environment=self.config.environment.value,
198
276
  )
277
+
278
+ # Fail if Redis is required but not available
279
+ if self.config.redis_required and not (
280
+ self._redis_status and self._redis_status.available
281
+ ):
282
+ raise RuntimeError("Redis is required but not available")
283
+
284
+ except RuntimeError:
285
+ raise # Re-raise required Redis error
199
286
  except Exception as e:
200
- logger.warning("short_term_memory_failed", error=str(e))
201
- self._short_term = RedisShortTermMemory(use_mock=True)
287
+ logger.warning("redis_initialization_failed", error=str(e))
288
+ self._short_term = None
202
289
  self._redis_status = RedisStatus(
203
290
  available=False,
204
291
  method=RedisStartMethod.MOCK,
@@ -297,7 +384,10 @@ class UnifiedMemory:
297
384
  # =========================================================================
298
385
 
299
386
  def stash(self, key: str, value: Any, ttl_seconds: int | None = None) -> bool:
300
- """Store data in short-term memory with TTL.
387
+ """Store data in working memory with TTL.
388
+
389
+ Uses file-based session as primary storage, with optional Redis for
390
+ real-time features. Data is persisted to disk automatically.
301
391
 
302
392
  Args:
303
393
  key: Storage key
@@ -308,29 +398,41 @@ class UnifiedMemory:
308
398
  True if stored successfully
309
399
 
310
400
  """
311
- if not self._short_term:
312
- logger.warning("short_term_memory_unavailable")
313
- return False
314
-
315
- # Map ttl_seconds to TTLStrategy (use WORKING_RESULTS as default)
316
- ttl_strategy = TTLStrategy.WORKING_RESULTS
317
- if ttl_seconds is not None:
318
- # Find closest TTL strategy or use working results
319
- if ttl_seconds <= TTLStrategy.COORDINATION.value:
320
- ttl_strategy = TTLStrategy.COORDINATION
321
- elif ttl_seconds <= TTLStrategy.SESSION.value:
322
- ttl_strategy = TTLStrategy.SESSION
323
- elif ttl_seconds <= TTLStrategy.WORKING_RESULTS.value:
324
- ttl_strategy = TTLStrategy.WORKING_RESULTS
325
- elif ttl_seconds <= TTLStrategy.STAGED_PATTERNS.value:
326
- ttl_strategy = TTLStrategy.STAGED_PATTERNS
327
- else:
328
- ttl_strategy = TTLStrategy.CONFLICT_CONTEXT
401
+ ttl = ttl_seconds or self.config.default_ttl_seconds
402
+
403
+ # Primary: File session memory (always available)
404
+ if self._file_session:
405
+ self._file_session.stash(key, value, ttl=ttl)
406
+
407
+ # Optional: Redis for real-time sync
408
+ if self._short_term and self._redis_status and self._redis_status.available:
409
+ # Map ttl_seconds to TTLStrategy
410
+ ttl_strategy = TTLStrategy.WORKING_RESULTS
411
+ if ttl_seconds is not None:
412
+ if ttl_seconds <= TTLStrategy.COORDINATION.value:
413
+ ttl_strategy = TTLStrategy.COORDINATION
414
+ elif ttl_seconds <= TTLStrategy.SESSION.value:
415
+ ttl_strategy = TTLStrategy.SESSION
416
+ elif ttl_seconds <= TTLStrategy.WORKING_RESULTS.value:
417
+ ttl_strategy = TTLStrategy.WORKING_RESULTS
418
+ elif ttl_seconds <= TTLStrategy.STAGED_PATTERNS.value:
419
+ ttl_strategy = TTLStrategy.STAGED_PATTERNS
420
+ else:
421
+ ttl_strategy = TTLStrategy.CONFLICT_CONTEXT
422
+
423
+ try:
424
+ self._short_term.stash(key, value, self.credentials, ttl_strategy)
425
+ except Exception as e:
426
+ logger.debug("redis_stash_failed", key=key, error=str(e))
329
427
 
330
- return self._short_term.stash(key, value, self.credentials, ttl_strategy)
428
+ # Return True if at least one backend succeeded
429
+ return self._file_session is not None
331
430
 
332
431
  def retrieve(self, key: str) -> Any | None:
333
- """Retrieve data from short-term memory.
432
+ """Retrieve data from working memory.
433
+
434
+ Checks Redis first (if available) for faster access, then falls back
435
+ to file-based session storage.
334
436
 
335
437
  Args:
336
438
  key: Storage key
@@ -339,10 +441,20 @@ class UnifiedMemory:
339
441
  Stored data or None if not found
340
442
 
341
443
  """
342
- if not self._short_term:
343
- return None
444
+ # Try Redis first (faster, if available)
445
+ if self._short_term and self._redis_status and self._redis_status.available:
446
+ try:
447
+ result = self._short_term.retrieve(key, self.credentials)
448
+ if result is not None:
449
+ return result
450
+ except Exception as e:
451
+ logger.debug("redis_retrieve_failed", key=key, error=str(e))
452
+
453
+ # Fall back to file session (primary storage)
454
+ if self._file_session:
455
+ return self._file_session.retrieve(key)
344
456
 
345
- return self._short_term.retrieve(key, self.credentials)
457
+ return None
346
458
 
347
459
  def stage_pattern(
348
460
  self,
@@ -455,16 +567,30 @@ class UnifiedMemory:
455
567
  logger.error("persist_pattern_failed", error=str(e))
456
568
  return None
457
569
 
570
+ def _cache_pattern(self, pattern_id: str, pattern: dict[str, Any]) -> None:
571
+ """Add pattern to LRU cache, evicting oldest if at capacity."""
572
+ # Simple LRU: remove oldest entry if at max size
573
+ if len(self._pattern_cache) >= self._pattern_cache_max_size:
574
+ # Remove first (oldest) item
575
+ oldest_key = next(iter(self._pattern_cache))
576
+ del self._pattern_cache[oldest_key]
577
+
578
+ self._pattern_cache[pattern_id] = pattern
579
+
458
580
  def recall_pattern(
459
581
  self,
460
582
  pattern_id: str,
461
583
  check_permissions: bool = True,
584
+ use_cache: bool = True,
462
585
  ) -> dict[str, Any] | None:
463
586
  """Retrieve a pattern from long-term memory.
464
587
 
588
+ Uses LRU cache for frequently accessed patterns to reduce I/O.
589
+
465
590
  Args:
466
591
  pattern_id: ID of pattern to retrieve
467
592
  check_permissions: Verify user has access to pattern
593
+ use_cache: Whether to use/update the pattern cache (default: True)
468
594
 
469
595
  Returns:
470
596
  Pattern data with content and metadata, or None if not found
@@ -474,16 +600,119 @@ class UnifiedMemory:
474
600
  logger.error("long_term_memory_unavailable")
475
601
  return None
476
602
 
603
+ # Check cache first (if enabled)
604
+ if use_cache and pattern_id in self._pattern_cache:
605
+ logger.debug("pattern_cache_hit", pattern_id=pattern_id)
606
+ return self._pattern_cache[pattern_id]
607
+
477
608
  try:
478
- return self._long_term.retrieve_pattern(
609
+ pattern = self._long_term.retrieve_pattern(
479
610
  pattern_id=pattern_id,
480
611
  user_id=self.user_id,
481
612
  check_permissions=check_permissions,
482
613
  )
614
+
615
+ # Cache the result (if enabled and pattern found)
616
+ if use_cache and pattern:
617
+ self._cache_pattern(pattern_id, pattern)
618
+
619
+ return pattern
483
620
  except Exception as e:
484
621
  logger.error("recall_pattern_failed", pattern_id=pattern_id, error=str(e))
485
622
  return None
486
623
 
624
+ def clear_pattern_cache(self) -> int:
625
+ """Clear the pattern lookup cache.
626
+
627
+ Returns:
628
+ Number of entries cleared
629
+ """
630
+ count = len(self._pattern_cache)
631
+ self._pattern_cache.clear()
632
+ logger.debug("pattern_cache_cleared", entries=count)
633
+ return count
634
+
635
+ def _score_pattern(
636
+ self,
637
+ pattern: dict[str, Any],
638
+ query_lower: str,
639
+ query_words: list[str],
640
+ ) -> float:
641
+ """Calculate relevance score for a pattern.
642
+
643
+ Args:
644
+ pattern: Pattern data dictionary
645
+ query_lower: Lowercase query string
646
+ query_words: Pre-split query words (length >= 3)
647
+
648
+ Returns:
649
+ Relevance score (0.0 if no match)
650
+ """
651
+ if not query_lower:
652
+ return 1.0 # No query - all patterns have equal score
653
+
654
+ content = str(pattern.get("content", "")).lower()
655
+ metadata_str = str(pattern.get("metadata", {})).lower()
656
+
657
+ score = 0.0
658
+
659
+ # Exact phrase match in content (highest score)
660
+ if query_lower in content:
661
+ score += 10.0
662
+
663
+ # Keyword matching (medium score)
664
+ for word in query_words:
665
+ if word in content:
666
+ score += 2.0
667
+ if word in metadata_str:
668
+ score += 1.0
669
+
670
+ return score
671
+
672
+ def _filter_and_score_patterns(
673
+ self,
674
+ query: str | None,
675
+ pattern_type: str | None,
676
+ classification: Classification | None,
677
+ ) -> Iterator[tuple[float, dict[str, Any]]]:
678
+ """Generator that filters and scores patterns.
679
+
680
+ Memory-efficient: yields (score, pattern) tuples one at a time.
681
+ Use with heapq.nlargest() for efficient top-N selection.
682
+
683
+ Args:
684
+ query: Search query (case-insensitive)
685
+ pattern_type: Filter by pattern type
686
+ classification: Filter by classification level
687
+
688
+ Yields:
689
+ Tuples of (score, pattern) for matching patterns
690
+ """
691
+ query_lower = query.lower() if query else ""
692
+ query_words = [w for w in query_lower.split() if len(w) >= 3] if query else []
693
+
694
+ for pattern in self._iter_all_patterns():
695
+ # Apply filters
696
+ if pattern_type and pattern.get("pattern_type") != pattern_type:
697
+ continue
698
+
699
+ if classification:
700
+ pattern_class = pattern.get("classification")
701
+ if isinstance(classification, Classification):
702
+ if pattern_class != classification.value:
703
+ continue
704
+ elif pattern_class != classification:
705
+ continue
706
+
707
+ # Calculate relevance score
708
+ score = self._score_pattern(pattern, query_lower, query_words)
709
+
710
+ # Skip if no matches found (when query is provided)
711
+ if query and score == 0.0:
712
+ continue
713
+
714
+ yield (score, pattern)
715
+
487
716
  def search_patterns(
488
717
  self,
489
718
  query: str | None = None,
@@ -499,6 +728,9 @@ class UnifiedMemory:
499
728
  3. Relevance scoring (exact matches rank higher)
500
729
  4. Results sorted by relevance
501
730
 
731
+ Memory-efficient: Uses generators and heapq.nlargest() to avoid
732
+ loading all patterns into memory. Only keeps top N results.
733
+
502
734
  Args:
503
735
  query: Text to search for in pattern content (case-insensitive)
504
736
  pattern_type: Filter by pattern type (e.g., "meta_workflow_execution")
@@ -520,65 +752,74 @@ class UnifiedMemory:
520
752
  return []
521
753
 
522
754
  try:
523
- # Get all patterns from storage
524
- all_patterns = self._get_all_patterns()
755
+ # Use heapq.nlargest for memory-efficient top-N selection
756
+ # This avoids loading all patterns into memory at once
757
+ scored_patterns = heapq.nlargest(
758
+ limit,
759
+ self._filter_and_score_patterns(query, pattern_type, classification),
760
+ key=lambda x: x[0],
761
+ )
525
762
 
526
- # Filter and score patterns
527
- scored_patterns = []
528
- query_lower = query.lower() if query else ""
763
+ # Return patterns without scores
764
+ return [pattern for _, pattern in scored_patterns]
529
765
 
530
- for pattern in all_patterns:
531
- # Apply filters
532
- if pattern_type and pattern.get("pattern_type") != pattern_type:
533
- continue
766
+ except Exception as e:
767
+ logger.error("pattern_search_failed", error=str(e))
768
+ return []
534
769
 
535
- if classification:
536
- pattern_class = pattern.get("classification")
537
- if isinstance(classification, Classification):
538
- if pattern_class != classification.value:
539
- continue
540
- elif pattern_class != classification:
541
- continue
770
+ def _get_storage_dir(self) -> Path | None:
771
+ """Get the storage directory from long-term memory backend.
542
772
 
543
- # Calculate relevance score
544
- score = 0.0
545
-
546
- if query:
547
- content = str(pattern.get("content", "")).lower()
548
- metadata_str = str(pattern.get("metadata", {})).lower()
549
-
550
- # Exact phrase match in content (highest score)
551
- if query_lower in content:
552
- score += 10.0
553
-
554
- # Keyword matching (medium score)
555
- query_words = query_lower.split()
556
- for word in query_words:
557
- if len(word) < 3: # Skip short words
558
- continue
559
- if word in content:
560
- score += 2.0
561
- if word in metadata_str:
562
- score += 1.0
563
-
564
- # Skip if no matches found
565
- if score == 0.0:
566
- continue
567
- else:
568
- # No query - all filtered patterns have equal score
569
- score = 1.0
773
+ Returns:
774
+ Path to storage directory, or None if unavailable.
775
+ """
776
+ if not self._long_term:
777
+ return None
570
778
 
571
- scored_patterns.append((score, pattern))
779
+ # Try different ways to access storage directory
780
+ if hasattr(self._long_term, "storage_dir"):
781
+ return Path(self._long_term.storage_dir)
782
+ elif hasattr(self._long_term, "storage"):
783
+ if hasattr(self._long_term.storage, "storage_dir"):
784
+ return Path(self._long_term.storage.storage_dir)
785
+ elif hasattr(self._long_term, "_storage"):
786
+ if hasattr(self._long_term._storage, "storage_dir"):
787
+ return Path(self._long_term._storage.storage_dir)
572
788
 
573
- # Sort by relevance (highest score first)
574
- scored_patterns.sort(key=lambda x: x[0], reverse=True)
789
+ return None
575
790
 
576
- # Return top N patterns (without scores)
577
- return [pattern for _, pattern in scored_patterns[:limit]]
791
+ def _iter_all_patterns(self) -> Iterator[dict[str, Any]]:
792
+ """Iterate over all patterns from long-term memory storage.
578
793
 
579
- except Exception as e:
580
- logger.error("pattern_search_failed", error=str(e))
581
- return []
794
+ Memory-efficient generator that yields patterns one at a time,
795
+ avoiding loading all patterns into memory simultaneously.
796
+
797
+ Yields:
798
+ Pattern data dictionaries
799
+
800
+ Note:
801
+ This is O(1) memory vs O(n) for _get_all_patterns().
802
+ Use this for large datasets or when streaming is acceptable.
803
+ """
804
+ storage_dir = self._get_storage_dir()
805
+ if not storage_dir:
806
+ logger.warning("cannot_access_storage_directory")
807
+ return
808
+
809
+ if not storage_dir.exists():
810
+ return
811
+
812
+ # Yield patterns one at a time (memory-efficient)
813
+ for pattern_file in storage_dir.rglob("*.json"):
814
+ try:
815
+ with pattern_file.open("r", encoding="utf-8") as f:
816
+ yield json.load(f)
817
+ except json.JSONDecodeError as e:
818
+ logger.debug("pattern_json_decode_failed", file=str(pattern_file), error=str(e))
819
+ continue
820
+ except Exception as e:
821
+ logger.debug("pattern_load_failed", file=str(pattern_file), error=str(e))
822
+ continue
582
823
 
583
824
  def _get_all_patterns(self) -> list[dict[str, Any]]:
584
825
  """Get all patterns from long-term memory storage.
@@ -595,57 +836,13 @@ class UnifiedMemory:
595
836
  List of all stored patterns
596
837
 
597
838
  Note:
598
- This performs a full scan and is O(n). For large datasets,
599
- use indexed storage backends.
839
+ This performs a full scan and is O(n) memory. For large datasets,
840
+ use _iter_all_patterns() generator instead.
600
841
  """
601
- if not self._long_term:
602
- return []
603
-
604
842
  try:
605
- # Get storage directory from long-term memory
606
- storage_dir = None
607
-
608
- # Try different ways to access storage directory
609
- if hasattr(self._long_term, 'storage_dir'):
610
- storage_dir = Path(self._long_term.storage_dir)
611
- elif hasattr(self._long_term, 'storage'):
612
- if hasattr(self._long_term.storage, 'storage_dir'):
613
- storage_dir = Path(self._long_term.storage.storage_dir)
614
- elif hasattr(self._long_term, '_storage'):
615
- if hasattr(self._long_term._storage, 'storage_dir'):
616
- storage_dir = Path(self._long_term._storage.storage_dir)
617
-
618
- if not storage_dir:
619
- logger.warning("cannot_access_storage_directory")
620
- return []
621
-
622
- patterns = []
623
-
624
- # Scan for pattern files (*.json)
625
- if storage_dir.exists():
626
- for pattern_file in storage_dir.rglob("*.json"):
627
- try:
628
- with pattern_file.open('r', encoding='utf-8') as f:
629
- pattern_data = json.load(f)
630
- patterns.append(pattern_data)
631
- except json.JSONDecodeError as e:
632
- logger.debug(
633
- "pattern_json_decode_failed",
634
- file=str(pattern_file),
635
- error=str(e)
636
- )
637
- continue
638
- except Exception as e:
639
- logger.debug(
640
- "pattern_load_failed",
641
- file=str(pattern_file),
642
- error=str(e)
643
- )
644
- continue
645
-
843
+ patterns = list(self._iter_all_patterns())
646
844
  logger.debug("patterns_loaded", count=len(patterns))
647
845
  return patterns
648
-
649
846
  except Exception as e:
650
847
  logger.error("get_all_patterns_failed", error=str(e))
651
848
  return []
@@ -792,6 +989,11 @@ class UnifiedMemory:
792
989
  redis_info["port"] = self._redis_status.port
793
990
 
794
991
  return {
992
+ "file_session": {
993
+ "available": self._file_session is not None,
994
+ "session_id": self._file_session._state.session_id if self._file_session else None,
995
+ "base_dir": self.config.file_session_dir,
996
+ },
795
997
  "short_term": redis_info,
796
998
  "long_term": {
797
999
  "available": self.has_long_term,
@@ -800,3 +1002,267 @@ class UnifiedMemory:
800
1002
  },
801
1003
  "environment": self.config.environment.value,
802
1004
  }
1005
+
1006
+ # =========================================================================
1007
+ # CAPABILITY DETECTION (File-First Architecture)
1008
+ # =========================================================================
1009
+
1010
+ @property
1011
+ def has_file_session(self) -> bool:
1012
+ """Check if file-based session memory is available (always True if enabled)."""
1013
+ return self._file_session is not None
1014
+
1015
+ @property
1016
+ def file_session(self) -> FileSessionMemory:
1017
+ """Get file session memory backend for direct access.
1018
+
1019
+ Returns:
1020
+ FileSessionMemory instance
1021
+
1022
+ Raises:
1023
+ RuntimeError: If file session memory is not initialized
1024
+ """
1025
+ if self._file_session is None:
1026
+ raise RuntimeError("File session memory not initialized")
1027
+ return self._file_session
1028
+
1029
+ def supports_realtime(self) -> bool:
1030
+ """Check if real-time features are available (requires Redis).
1031
+
1032
+ Real-time features include:
1033
+ - Pub/Sub messaging between agents
1034
+ - Cross-session coordination
1035
+ - Distributed task queues
1036
+
1037
+ Returns:
1038
+ True if Redis is available and connected
1039
+ """
1040
+ return self.using_real_redis
1041
+
1042
+ def supports_distributed(self) -> bool:
1043
+ """Check if distributed features are available (requires Redis).
1044
+
1045
+ Distributed features include:
1046
+ - Multi-process coordination
1047
+ - Cross-session state sharing
1048
+ - Agent discovery
1049
+
1050
+ Returns:
1051
+ True if Redis is available and connected
1052
+ """
1053
+ return self.using_real_redis
1054
+
1055
+ def supports_persistence(self) -> bool:
1056
+ """Check if persistence is available (always True with file-first).
1057
+
1058
+ Returns:
1059
+ True if file session or long-term memory is available
1060
+ """
1061
+ return self._file_session is not None or self._long_term is not None
1062
+
1063
+ def get_capabilities(self) -> dict[str, bool]:
1064
+ """Get a summary of available memory capabilities.
1065
+
1066
+ Returns:
1067
+ Dictionary mapping capability names to availability
1068
+ """
1069
+ return {
1070
+ "file_session": self.has_file_session,
1071
+ "redis": self.using_real_redis,
1072
+ "long_term": self.has_long_term,
1073
+ "persistence": self.supports_persistence(),
1074
+ "realtime": self.supports_realtime(),
1075
+ "distributed": self.supports_distributed(),
1076
+ "encryption": self.config.encryption_enabled and self.has_long_term,
1077
+ }
1078
+
1079
+ # =========================================================================
1080
+ # COMPACT STATE GENERATION
1081
+ # =========================================================================
1082
+
1083
+ def generate_compact_state(self) -> str:
1084
+ """Generate SBAR-format compact state from current session.
1085
+
1086
+ Creates a human-readable summary of the current session state,
1087
+ suitable for Claude Code's .claude/compact-state.md file.
1088
+
1089
+ Returns:
1090
+ Markdown-formatted compact state string
1091
+ """
1092
+ from datetime import datetime
1093
+
1094
+ lines = [
1095
+ "# Compact State - Session Handoff",
1096
+ "",
1097
+ f"**Generated:** {datetime.now().strftime('%Y-%m-%d %H:%M')}",
1098
+ ]
1099
+
1100
+ # Add session info
1101
+ if self._file_session:
1102
+ session = self._file_session._state
1103
+ lines.extend([
1104
+ f"**Session ID:** {session.session_id}",
1105
+ f"**User ID:** {session.user_id}",
1106
+ "",
1107
+ ])
1108
+
1109
+ lines.extend([
1110
+ "## SBAR Handoff",
1111
+ "",
1112
+ "### Situation",
1113
+ ])
1114
+
1115
+ # Get context from file session
1116
+ context = {}
1117
+ if self._file_session:
1118
+ context = self._file_session.get_all_context()
1119
+
1120
+ situation = context.get("situation", "Session in progress.")
1121
+ background = context.get("background", "No background information recorded.")
1122
+ assessment = context.get("assessment", "No assessment recorded.")
1123
+ recommendation = context.get("recommendation", "Continue with current task.")
1124
+
1125
+ lines.extend([
1126
+ situation,
1127
+ "",
1128
+ "### Background",
1129
+ background,
1130
+ "",
1131
+ "### Assessment",
1132
+ assessment,
1133
+ "",
1134
+ "### Recommendation",
1135
+ recommendation,
1136
+ "",
1137
+ ])
1138
+
1139
+ # Add working memory summary
1140
+ if self._file_session:
1141
+ working_keys = list(self._file_session._state.working_memory.keys())
1142
+ if working_keys:
1143
+ lines.extend([
1144
+ "## Working Memory",
1145
+ "",
1146
+ f"**Active keys:** {len(working_keys)}",
1147
+ "",
1148
+ ])
1149
+ for key in working_keys[:10]: # Show max 10
1150
+ lines.append(f"- `{key}`")
1151
+ if len(working_keys) > 10:
1152
+ lines.append(f"- ... and {len(working_keys) - 10} more")
1153
+ lines.append("")
1154
+
1155
+ # Add staged patterns summary
1156
+ if self._file_session:
1157
+ staged = list(self._file_session._state.staged_patterns.values())
1158
+ if staged:
1159
+ lines.extend([
1160
+ "## Staged Patterns",
1161
+ "",
1162
+ f"**Pending validation:** {len(staged)}",
1163
+ "",
1164
+ ])
1165
+ for pattern in staged[:5]: # Show max 5
1166
+ lines.append(f"- {pattern.name} ({pattern.pattern_type}, conf: {pattern.confidence:.2f})")
1167
+ if len(staged) > 5:
1168
+ lines.append(f"- ... and {len(staged) - 5} more")
1169
+ lines.append("")
1170
+
1171
+ # Add capabilities
1172
+ caps = self.get_capabilities()
1173
+ lines.extend([
1174
+ "## Capabilities",
1175
+ "",
1176
+ f"- File session: {'Yes' if caps['file_session'] else 'No'}",
1177
+ f"- Redis: {'Yes' if caps['redis'] else 'No'}",
1178
+ f"- Long-term memory: {'Yes' if caps['long_term'] else 'No'}",
1179
+ f"- Real-time sync: {'Yes' if caps['realtime'] else 'No'}",
1180
+ "",
1181
+ ])
1182
+
1183
+ return "\n".join(lines)
1184
+
1185
+ def export_to_claude_md(self, path: str | None = None) -> Path:
1186
+ """Export current session state to Claude Code's compact-state.md.
1187
+
1188
+ Args:
1189
+ path: Path to write to (defaults to config.compact_state_path)
1190
+
1191
+ Returns:
1192
+ Path where state was written
1193
+ """
1194
+ from empathy_os.config import _validate_file_path
1195
+
1196
+ path = path or self.config.compact_state_path
1197
+ validated_path = _validate_file_path(path)
1198
+
1199
+ # Ensure parent directory exists
1200
+ validated_path.parent.mkdir(parents=True, exist_ok=True)
1201
+
1202
+ # Generate and write compact state
1203
+ content = self.generate_compact_state()
1204
+ validated_path.write_text(content, encoding="utf-8")
1205
+
1206
+ logger.info("compact_state_exported", path=str(validated_path))
1207
+ return validated_path
1208
+
1209
+ def set_handoff(
1210
+ self,
1211
+ situation: str,
1212
+ background: str,
1213
+ assessment: str,
1214
+ recommendation: str,
1215
+ **extra_context,
1216
+ ) -> None:
1217
+ """Set SBAR handoff context for session continuity.
1218
+
1219
+ This data is used by generate_compact_state() and export_to_claude_md().
1220
+
1221
+ Args:
1222
+ situation: Current situation summary
1223
+ background: Relevant background information
1224
+ assessment: Assessment of progress/state
1225
+ recommendation: Recommended next steps
1226
+ **extra_context: Additional context key-value pairs
1227
+ """
1228
+ if not self._file_session:
1229
+ logger.warning("file_session_not_available")
1230
+ return
1231
+
1232
+ self._file_session.set_context("situation", situation)
1233
+ self._file_session.set_context("background", background)
1234
+ self._file_session.set_context("assessment", assessment)
1235
+ self._file_session.set_context("recommendation", recommendation)
1236
+
1237
+ for key, value in extra_context.items():
1238
+ self._file_session.set_context(key, value)
1239
+
1240
+ # Auto-export if configured
1241
+ if self.config.auto_generate_compact_state:
1242
+ self.export_to_claude_md()
1243
+
1244
+ # =========================================================================
1245
+ # LIFECYCLE
1246
+ # =========================================================================
1247
+
1248
+ def save(self) -> None:
1249
+ """Explicitly save all memory state."""
1250
+ if self._file_session:
1251
+ self._file_session.save()
1252
+ logger.debug("memory_saved")
1253
+
1254
+ def close(self) -> None:
1255
+ """Close all memory backends and save state."""
1256
+ if self._file_session:
1257
+ self._file_session.close()
1258
+
1259
+ if self._short_term and hasattr(self._short_term, "close"):
1260
+ self._short_term.close()
1261
+
1262
+ logger.info("unified_memory_closed")
1263
+
1264
+ def __enter__(self) -> "UnifiedMemory":
1265
+ return self
1266
+
1267
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None:
1268
+ self.close()