claude_memory 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (104) hide show
  1. checksums.yaml +4 -4
  2. data/.claude/.mind.mv2.o2N83S +0 -0
  3. data/.claude/CLAUDE.md +1 -0
  4. data/.claude/rules/claude_memory.generated.md +28 -9
  5. data/.claude/settings.local.json +9 -1
  6. data/.claude/skills/check-memory/SKILL.md +77 -0
  7. data/.claude/skills/improve/SKILL.md +532 -0
  8. data/.claude/skills/improve/feature-patterns.md +1221 -0
  9. data/.claude/skills/quality-update/SKILL.md +229 -0
  10. data/.claude/skills/quality-update/implementation-guide.md +346 -0
  11. data/.claude/skills/review-commit/SKILL.md +199 -0
  12. data/.claude/skills/review-for-quality/SKILL.md +154 -0
  13. data/.claude/skills/review-for-quality/expert-checklists.md +79 -0
  14. data/.claude/skills/setup-memory/SKILL.md +168 -0
  15. data/.claude/skills/study-repo/SKILL.md +307 -0
  16. data/.claude/skills/study-repo/analysis-template.md +323 -0
  17. data/.claude/skills/study-repo/focus-examples.md +327 -0
  18. data/CHANGELOG.md +133 -0
  19. data/CLAUDE.md +130 -11
  20. data/README.md +117 -10
  21. data/db/migrations/001_create_initial_schema.rb +117 -0
  22. data/db/migrations/002_add_project_scoping.rb +33 -0
  23. data/db/migrations/003_add_session_metadata.rb +42 -0
  24. data/db/migrations/004_add_fact_embeddings.rb +20 -0
  25. data/db/migrations/005_add_incremental_sync.rb +21 -0
  26. data/db/migrations/006_add_operation_tracking.rb +40 -0
  27. data/db/migrations/007_add_ingestion_metrics.rb +26 -0
  28. data/docs/.claude/mind.mv2.lock +0 -0
  29. data/docs/GETTING_STARTED.md +587 -0
  30. data/docs/RELEASE_NOTES_v0.2.0.md +0 -1
  31. data/docs/RUBY_COMMUNITY_POST_v0.2.0.md +0 -2
  32. data/docs/architecture.md +9 -8
  33. data/docs/auto_init_design.md +230 -0
  34. data/docs/improvements.md +557 -731
  35. data/docs/influence/.gitkeep +13 -0
  36. data/docs/influence/grepai.md +933 -0
  37. data/docs/influence/qmd.md +2195 -0
  38. data/docs/plugin.md +257 -11
  39. data/docs/quality_review.md +472 -1273
  40. data/docs/remaining_improvements.md +330 -0
  41. data/lefthook.yml +13 -0
  42. data/lib/claude_memory/commands/checks/claude_md_check.rb +41 -0
  43. data/lib/claude_memory/commands/checks/database_check.rb +120 -0
  44. data/lib/claude_memory/commands/checks/hooks_check.rb +112 -0
  45. data/lib/claude_memory/commands/checks/reporter.rb +110 -0
  46. data/lib/claude_memory/commands/checks/snapshot_check.rb +30 -0
  47. data/lib/claude_memory/commands/doctor_command.rb +12 -129
  48. data/lib/claude_memory/commands/help_command.rb +1 -0
  49. data/lib/claude_memory/commands/hook_command.rb +9 -2
  50. data/lib/claude_memory/commands/index_command.rb +169 -0
  51. data/lib/claude_memory/commands/ingest_command.rb +1 -1
  52. data/lib/claude_memory/commands/init_command.rb +5 -197
  53. data/lib/claude_memory/commands/initializers/database_ensurer.rb +30 -0
  54. data/lib/claude_memory/commands/initializers/global_initializer.rb +85 -0
  55. data/lib/claude_memory/commands/initializers/hooks_configurator.rb +156 -0
  56. data/lib/claude_memory/commands/initializers/mcp_configurator.rb +56 -0
  57. data/lib/claude_memory/commands/initializers/memory_instructions_writer.rb +135 -0
  58. data/lib/claude_memory/commands/initializers/project_initializer.rb +111 -0
  59. data/lib/claude_memory/commands/recover_command.rb +75 -0
  60. data/lib/claude_memory/commands/registry.rb +5 -1
  61. data/lib/claude_memory/commands/stats_command.rb +239 -0
  62. data/lib/claude_memory/commands/uninstall_command.rb +226 -0
  63. data/lib/claude_memory/core/batch_loader.rb +32 -0
  64. data/lib/claude_memory/core/concept_ranker.rb +73 -0
  65. data/lib/claude_memory/core/embedding_candidate_builder.rb +37 -0
  66. data/lib/claude_memory/core/fact_collector.rb +51 -0
  67. data/lib/claude_memory/core/fact_query_builder.rb +154 -0
  68. data/lib/claude_memory/core/fact_ranker.rb +113 -0
  69. data/lib/claude_memory/core/result_builder.rb +54 -0
  70. data/lib/claude_memory/core/result_sorter.rb +25 -0
  71. data/lib/claude_memory/core/scope_filter.rb +61 -0
  72. data/lib/claude_memory/core/text_builder.rb +29 -0
  73. data/lib/claude_memory/embeddings/generator.rb +161 -0
  74. data/lib/claude_memory/embeddings/similarity.rb +69 -0
  75. data/lib/claude_memory/hook/handler.rb +4 -3
  76. data/lib/claude_memory/index/lexical_fts.rb +7 -2
  77. data/lib/claude_memory/infrastructure/operation_tracker.rb +158 -0
  78. data/lib/claude_memory/infrastructure/schema_validator.rb +206 -0
  79. data/lib/claude_memory/ingest/content_sanitizer.rb +6 -7
  80. data/lib/claude_memory/ingest/ingester.rb +99 -15
  81. data/lib/claude_memory/ingest/metadata_extractor.rb +57 -0
  82. data/lib/claude_memory/ingest/tool_extractor.rb +71 -0
  83. data/lib/claude_memory/mcp/response_formatter.rb +331 -0
  84. data/lib/claude_memory/mcp/server.rb +19 -0
  85. data/lib/claude_memory/mcp/setup_status_analyzer.rb +73 -0
  86. data/lib/claude_memory/mcp/tool_definitions.rb +279 -0
  87. data/lib/claude_memory/mcp/tool_helpers.rb +80 -0
  88. data/lib/claude_memory/mcp/tools.rb +330 -320
  89. data/lib/claude_memory/recall/dual_query_template.rb +63 -0
  90. data/lib/claude_memory/recall.rb +304 -237
  91. data/lib/claude_memory/resolve/resolver.rb +52 -49
  92. data/lib/claude_memory/store/sqlite_store.rb +210 -144
  93. data/lib/claude_memory/store/store_manager.rb +6 -6
  94. data/lib/claude_memory/sweep/sweeper.rb +6 -0
  95. data/lib/claude_memory/version.rb +1 -1
  96. data/lib/claude_memory.rb +35 -3
  97. metadata +71 -11
  98. data/.claude/.mind.mv2.aLCUZd +0 -0
  99. data/.claude/memory.sqlite3 +0 -0
  100. data/.mcp.json +0 -11
  101. /data/docs/{feature_adoption_plan.md → plans/feature_adoption_plan.md} +0 -0
  102. /data/docs/{feature_adoption_plan_revised.md → plans/feature_adoption_plan_revised.md} +0 -0
  103. /data/docs/{plan.md → plans/plan.md} +0 -0
  104. /data/docs/{updated_plan.md → plans/updated_plan.md} +0 -0
@@ -5,44 +5,128 @@ require "digest"
5
5
  module ClaudeMemory
6
6
  module Ingest
7
7
  class Ingester
8
- def initialize(store, fts: nil, env: ENV)
8
+ def initialize(store, fts: nil, env: ENV, metadata_extractor: nil, tool_extractor: nil)
9
9
  @store = store
10
10
  @fts = fts || Index::LexicalFTS.new(store)
11
- @env = env
11
+ @config = Configuration.new(env)
12
+ @metadata_extractor = metadata_extractor || MetadataExtractor.new
13
+ @tool_extractor = tool_extractor || ToolExtractor.new
12
14
  end
13
15
 
14
16
  def ingest(source:, session_id:, transcript_path:, project_path: nil)
17
+ # Check if file has been modified since last ingestion (incremental sync)
18
+ unless should_ingest?(transcript_path)
19
+ return {status: :skipped, bytes_read: 0, reason: "unchanged"}
20
+ end
21
+
15
22
  current_offset = @store.get_delta_cursor(session_id, transcript_path) || 0
16
23
  delta, new_offset = TranscriptReader.read_delta(transcript_path, current_offset)
17
24
 
18
25
  return {status: :no_change, bytes_read: 0} if delta.nil?
19
26
 
27
+ # Extract session metadata and tool calls before sanitization
28
+ metadata = @metadata_extractor.extract(delta)
29
+ tool_calls = @tool_extractor.extract(delta)
30
+
20
31
  # Strip privacy tags before storing
21
32
  delta = ContentSanitizer.strip_tags(delta)
22
33
 
23
34
  resolved_project = project_path || detect_project_path
24
35
 
36
+ # Get source file mtime for incremental sync
37
+ source_mtime = File.exist?(transcript_path) ? File.mtime(transcript_path).utc.iso8601 : nil
38
+
25
39
  text_hash = Digest::SHA256.hexdigest(delta)
26
- content_id = @store.upsert_content_item(
27
- source: source,
28
- session_id: session_id,
29
- transcript_path: transcript_path,
30
- project_path: resolved_project,
31
- text_hash: text_hash,
32
- byte_len: delta.bytesize,
33
- raw_text: delta
34
- )
35
-
36
- @fts.index_content_item(content_id, delta)
37
- @store.update_delta_cursor(session_id, transcript_path, new_offset)
40
+
41
+ # Wrap entire ingestion pipeline in transaction for atomicity
42
+ # If any step fails, cursor position is not updated, allowing retry
43
+ content_id = nil
44
+ begin
45
+ content_id = with_retry do
46
+ @store.db.transaction do
47
+ content_id = @store.upsert_content_item(
48
+ source: source,
49
+ session_id: session_id,
50
+ transcript_path: transcript_path,
51
+ project_path: resolved_project,
52
+ text_hash: text_hash,
53
+ byte_len: delta.bytesize,
54
+ raw_text: delta,
55
+ git_branch: metadata[:git_branch],
56
+ cwd: metadata[:cwd],
57
+ claude_version: metadata[:claude_version],
58
+ thinking_level: metadata[:thinking_level],
59
+ source_mtime: source_mtime
60
+ )
61
+
62
+ # Store tool calls if any were extracted
63
+ @store.insert_tool_calls(content_id, tool_calls) unless tool_calls.empty?
64
+
65
+ # FTS indexing (FTS5 supports transactions)
66
+ @fts.index_content_item(content_id, delta)
67
+
68
+ # Update cursor LAST - only after all other operations succeed
69
+ # This ensures that if any step fails, we can retry from the same offset
70
+ @store.update_delta_cursor(session_id, transcript_path, new_offset)
71
+
72
+ content_id
73
+ end
74
+ end
75
+ rescue Extralite::BusyError => e
76
+ # Re-raise BusyError with context after all retries exhausted
77
+ raise StandardError, "Ingestion failed for session #{session_id} after retries: #{e.message}"
78
+ rescue => e
79
+ # Re-raise other errors with context for better error messages
80
+ raise StandardError, "Ingestion failed for session #{session_id}: #{e.message}"
81
+ end
38
82
 
39
83
  {status: :ingested, content_id: content_id, bytes_read: delta.bytesize, project_path: resolved_project}
40
84
  end
41
85
 
42
86
  private
43
87
 
88
+ # Retry database operations with exponential backoff + jitter
89
+ # This handles concurrent access when MCP server and hooks both write simultaneously
90
+ # With busy_timeout=30000ms, each attempt waits up to 30s before raising BusyError
91
+ # Total potential wait time: 30s * 10 attempts + backoff delays = ~5 minutes max
92
+ def with_retry(max_attempts: 10, base_delay: 0.2, max_delay: 5.0)
93
+ attempt = 0
94
+ begin
95
+ attempt += 1
96
+ yield
97
+ rescue Extralite::BusyError, Sequel::DatabaseError => e
98
+ # Handle busy errors from extralite adapter
99
+ is_busy = e.is_a?(Extralite::BusyError) || e.message.include?("busy")
100
+ if is_busy && attempt < max_attempts
101
+ # Exponential backoff with jitter to avoid thundering herd
102
+ exponential_delay = [base_delay * (2**(attempt - 1)), max_delay].min
103
+ jitter = rand * exponential_delay * 0.5
104
+ total_delay = exponential_delay + jitter
105
+ sleep(total_delay)
106
+ retry
107
+ elsif is_busy
108
+ raise
109
+ else
110
+ # Not a busy error, re-raise immediately
111
+ raise
112
+ end
113
+ end
114
+ end
115
+
116
+ def should_ingest?(transcript_path)
117
+ return true unless File.exist?(transcript_path)
118
+
119
+ file_mtime = File.mtime(transcript_path).utc.iso8601
120
+
121
+ # Check if we've already processed this version of the file
122
+ existing = @store.content_item_by_transcript_and_mtime(transcript_path, file_mtime)
123
+
124
+ # Ingest if we haven't seen this version before
125
+ existing.nil?
126
+ end
127
+
44
128
  def detect_project_path
45
- @env["CLAUDE_PROJECT_DIR"] || Dir.pwd
129
+ @config.project_dir
46
130
  end
47
131
  end
48
132
  end
@@ -0,0 +1,57 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ module ClaudeMemory
6
+ module Ingest
7
+ # Extracts session metadata from JSONL transcript messages
8
+ # Captures git branch, working directory, Claude version, thinking level
9
+ class MetadataExtractor
10
+ # Extract metadata from raw transcript text
11
+ # @param raw_text [String] the raw JSONL transcript content
12
+ # @return [Hash] metadata hash with extracted values
13
+ def extract(raw_text)
14
+ return {} if raw_text.nil? || raw_text.empty?
15
+
16
+ # Parse first JSONL message for metadata
17
+ first_line = raw_text.lines.first
18
+ return {} unless first_line&.strip&.start_with?("{")
19
+
20
+ message = JSON.parse(first_line)
21
+ {
22
+ git_branch: extract_git_branch(message),
23
+ cwd: extract_cwd(message),
24
+ claude_version: extract_claude_version(message),
25
+ thinking_level: extract_thinking_level(message)
26
+ }.compact
27
+ rescue JSON::ParserError
28
+ {}
29
+ end
30
+
31
+ private
32
+
33
+ def extract_git_branch(message)
34
+ # Check for gitBranch in top-level or nested metadata
35
+ message["gitBranch"] || message.dig("metadata", "gitBranch")
36
+ end
37
+
38
+ def extract_cwd(message)
39
+ # Check for cwd or workingDirectory
40
+ message["cwd"] || message["workingDirectory"] ||
41
+ message.dig("metadata", "cwd") || message.dig("metadata", "workingDirectory")
42
+ end
43
+
44
+ def extract_claude_version(message)
45
+ # Check various version fields
46
+ message["version"] || message["claude_version"] ||
47
+ message.dig("metadata", "version") || message.dig("metadata", "claude_version")
48
+ end
49
+
50
+ def extract_thinking_level(message)
51
+ # Extract thinking metadata level
52
+ message.dig("thinkingMetadata", "level") ||
53
+ message.dig("metadata", "thinkingLevel")
54
+ end
55
+ end
56
+ end
57
+ end
@@ -0,0 +1,71 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ module ClaudeMemory
6
+ module Ingest
7
+ # Extracts tool usage information from JSONL transcript messages
8
+ # Tracks which tools were called during a session
9
+ class ToolExtractor
10
+ # Extract tool calls from raw transcript text
11
+ # @param raw_text [String] the raw JSONL transcript content
12
+ # @return [Array<Hash>] array of tool call hashes
13
+ def extract(raw_text)
14
+ return [] if raw_text.nil? || raw_text.empty?
15
+
16
+ tools = []
17
+
18
+ raw_text.lines.each do |line|
19
+ next unless line.strip.start_with?("{")
20
+
21
+ message = parse_message(line)
22
+ next unless message
23
+
24
+ extract_tools_from_message(message, tools)
25
+ end
26
+
27
+ tools
28
+ rescue
29
+ # If we encounter any parsing errors, return what we've collected so far
30
+ tools
31
+ end
32
+
33
+ private
34
+
35
+ def parse_message(line)
36
+ JSON.parse(line)
37
+ rescue JSON::ParserError
38
+ nil
39
+ end
40
+
41
+ def extract_tools_from_message(message, tools)
42
+ # Look for assistant messages with content blocks
43
+ return unless message["type"] == "assistant"
44
+
45
+ content = message.dig("message", "content")
46
+ return unless content.is_a?(Array)
47
+
48
+ timestamp = message["timestamp"] || Time.now.utc.iso8601
49
+
50
+ content.each do |block|
51
+ next unless block["type"] == "tool_use"
52
+
53
+ tools << {
54
+ tool_name: block["name"],
55
+ tool_input: serialize_tool_input(block["input"]),
56
+ timestamp: timestamp,
57
+ is_error: false
58
+ }
59
+ end
60
+ end
61
+
62
+ def serialize_tool_input(input)
63
+ return nil unless input
64
+
65
+ # Convert to JSON, truncating if too large
66
+ json = input.to_json
67
+ (json.length > 1000) ? json[0...1000] + "..." : json
68
+ end
69
+ end
70
+ end
71
+ end
@@ -0,0 +1,331 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ClaudeMemory
4
+ module MCP
5
+ # Pure logic for formatting domain objects into MCP tool responses
6
+ # Follows Functional Core pattern - no I/O, just transformations
7
+ class ResponseFormatter
8
+ # Format recall query results into MCP response
9
+ # @param results [Array<Hash>] Recall results with :fact and :receipts
10
+ # @return [Hash] MCP response with facts array
11
+ def self.format_recall_results(results)
12
+ {
13
+ facts: results.map { |r| format_recall_fact(r) }
14
+ }
15
+ end
16
+
17
+ # Format single recall fact result
18
+ # @param result [Hash] Single result with :fact, :receipts, :source
19
+ # @return [Hash] Formatted fact for MCP response
20
+ def self.format_recall_fact(result)
21
+ {
22
+ id: result[:fact][:id],
23
+ subject: result[:fact][:subject_name],
24
+ predicate: result[:fact][:predicate],
25
+ object: result[:fact][:object_literal],
26
+ status: result[:fact][:status],
27
+ source: result[:source],
28
+ receipts: result[:receipts].map { |p| format_receipt(p) }
29
+ }
30
+ end
31
+
32
+ # Format index query results with token estimates
33
+ # @param query [String] Original query
34
+ # @param scope [String] Scope used
35
+ # @param results [Array<Hash>] Index results with fact data
36
+ # @return [Hash] MCP response with metadata and facts
37
+ def self.format_index_results(query, scope, results)
38
+ total_tokens = results.sum { |r| r[:token_estimate] }
39
+
40
+ {
41
+ query: query,
42
+ scope: scope,
43
+ result_count: results.size,
44
+ total_estimated_tokens: total_tokens,
45
+ facts: results.map { |r| format_index_fact(r) }
46
+ }
47
+ end
48
+
49
+ # Format single index fact with preview
50
+ # @param result [Hash] Index result with fact data and token estimate
51
+ # @return [Hash] Formatted fact for index response
52
+ def self.format_index_fact(result)
53
+ {
54
+ id: result[:id],
55
+ subject: result[:subject],
56
+ predicate: result[:predicate],
57
+ object_preview: result[:object_preview],
58
+ status: result[:status],
59
+ scope: result[:scope],
60
+ confidence: result[:confidence],
61
+ tokens: result[:token_estimate],
62
+ source: result[:source]
63
+ }
64
+ end
65
+
66
+ # Format explanation with full fact details and relationships
67
+ # @param explanation [Hash] Explanation with :fact, :receipts, :supersedes, etc.
68
+ # @param scope [String] Source scope
69
+ # @return [Hash] MCP response with fact, receipts, and relationships
70
+ def self.format_explanation(explanation, scope)
71
+ {
72
+ fact: {
73
+ id: explanation[:fact][:id],
74
+ subject: explanation[:fact][:subject_name],
75
+ predicate: explanation[:fact][:predicate],
76
+ object: explanation[:fact][:object_literal],
77
+ status: explanation[:fact][:status],
78
+ valid_from: explanation[:fact][:valid_from],
79
+ valid_to: explanation[:fact][:valid_to]
80
+ },
81
+ source: scope,
82
+ receipts: explanation[:receipts].map { |p| format_receipt(p) },
83
+ supersedes: explanation[:supersedes],
84
+ superseded_by: explanation[:superseded_by],
85
+ conflicts: explanation[:conflicts].map { |c| c[:id] }
86
+ }
87
+ end
88
+
89
+ # Format detailed explanation for recall_details response
90
+ # @param explanation [Hash] Explanation with full relationships
91
+ # @return [Hash] Detailed fact response
92
+ def self.format_detailed_explanation(explanation)
93
+ {
94
+ fact: {
95
+ id: explanation[:fact][:id],
96
+ subject: explanation[:fact][:subject_name],
97
+ predicate: explanation[:fact][:predicate],
98
+ object: explanation[:fact][:object_literal],
99
+ status: explanation[:fact][:status],
100
+ confidence: explanation[:fact][:confidence],
101
+ scope: explanation[:fact][:scope],
102
+ valid_from: explanation[:fact][:valid_from],
103
+ valid_to: explanation[:fact][:valid_to]
104
+ },
105
+ receipts: explanation[:receipts].map { |r| format_detailed_receipt(r) },
106
+ relationships: {
107
+ supersedes: explanation[:supersedes],
108
+ superseded_by: explanation[:superseded_by],
109
+ conflicts: explanation[:conflicts].map { |c| {id: c[:id], status: c[:status]} }
110
+ }
111
+ }
112
+ end
113
+
114
+ # Format receipt (provenance) with minimal fields
115
+ # @param receipt [Hash] Receipt with :quote and :strength
116
+ # @return [Hash] Formatted receipt
117
+ def self.format_receipt(receipt)
118
+ {quote: receipt[:quote], strength: receipt[:strength]}
119
+ end
120
+
121
+ # Format detailed receipt with session and timestamp
122
+ # @param receipt [Hash] Receipt with full fields
123
+ # @return [Hash] Formatted detailed receipt
124
+ def self.format_detailed_receipt(receipt)
125
+ {
126
+ quote: receipt[:quote],
127
+ strength: receipt[:strength],
128
+ session_id: receipt[:session_id],
129
+ occurred_at: receipt[:occurred_at]
130
+ }
131
+ end
132
+
133
+ # Format changes list into MCP response
134
+ # @param since [String] ISO timestamp
135
+ # @param changes [Array<Hash>] Change records
136
+ # @return [Hash] MCP response with since and formatted changes
137
+ def self.format_changes(since, changes)
138
+ {
139
+ since: since,
140
+ changes: changes.map { |c| format_change(c) }
141
+ }
142
+ end
143
+
144
+ # Format single change record
145
+ # @param change [Hash] Change with fact fields
146
+ # @return [Hash] Formatted change
147
+ def self.format_change(change)
148
+ {
149
+ id: change[:id],
150
+ predicate: change[:predicate],
151
+ object: change[:object_literal],
152
+ status: change[:status],
153
+ created_at: change[:created_at],
154
+ source: change[:source]
155
+ }
156
+ end
157
+
158
+ # Format conflicts list into MCP response
159
+ # @param conflicts [Array<Hash>] Conflict records
160
+ # @return [Hash] MCP response with count and formatted conflicts
161
+ def self.format_conflicts(conflicts)
162
+ {
163
+ count: conflicts.size,
164
+ conflicts: conflicts.map { |c| format_conflict(c) }
165
+ }
166
+ end
167
+
168
+ # Format single conflict record
169
+ # @param conflict [Hash] Conflict with fact IDs
170
+ # @return [Hash] Formatted conflict
171
+ def self.format_conflict(conflict)
172
+ {
173
+ id: conflict[:id],
174
+ fact_a: conflict[:fact_a_id],
175
+ fact_b: conflict[:fact_b_id],
176
+ status: conflict[:status],
177
+ source: conflict[:source]
178
+ }
179
+ end
180
+
181
+ # Format sweep statistics into MCP response
182
+ # @param scope [String] Database scope swept
183
+ # @param stats [Hash] Sweeper stats
184
+ # @return [Hash] Formatted sweep response
185
+ def self.format_sweep_stats(scope, stats)
186
+ {
187
+ scope: scope,
188
+ proposed_expired: stats[:proposed_facts_expired],
189
+ disputed_expired: stats[:disputed_facts_expired],
190
+ orphaned_deleted: stats[:orphaned_provenance_deleted],
191
+ content_pruned: stats[:old_content_pruned],
192
+ elapsed_seconds: stats[:elapsed_seconds].round(3)
193
+ }
194
+ end
195
+
196
+ # Format semantic search results with similarity scores
197
+ # @param query [String] Search query
198
+ # @param mode [String] Search mode (vector, text, both)
199
+ # @param scope [String] Scope
200
+ # @param results [Array<Hash>] Results with similarity scores
201
+ # @return [Hash] Formatted semantic search response
202
+ def self.format_semantic_results(query, mode, scope, results)
203
+ {
204
+ query: query,
205
+ mode: mode,
206
+ scope: scope,
207
+ count: results.size,
208
+ facts: results.map { |r| format_semantic_fact(r) }
209
+ }
210
+ end
211
+
212
+ # Format single semantic search fact with similarity
213
+ # @param result [Hash] Result with fact, receipts, and similarity
214
+ # @return [Hash] Formatted fact with similarity
215
+ def self.format_semantic_fact(result)
216
+ {
217
+ id: result[:fact][:id],
218
+ subject: result[:fact][:subject_name],
219
+ predicate: result[:fact][:predicate],
220
+ object: result[:fact][:object_literal],
221
+ scope: result[:fact][:scope],
222
+ source: result[:source],
223
+ similarity: result[:similarity],
224
+ receipts: result[:receipts].map { |r| format_receipt(r) }
225
+ }
226
+ end
227
+
228
+ # Format concept search results
229
+ # @param concepts [Array<String>] Concepts searched
230
+ # @param scope [String] Scope
231
+ # @param results [Array<Hash>] Results with similarity scores
232
+ # @return [Hash] Formatted concept search response
233
+ def self.format_concept_results(concepts, scope, results)
234
+ {
235
+ concepts: concepts,
236
+ scope: scope,
237
+ count: results.size,
238
+ facts: results.map { |r| format_concept_fact(r) }
239
+ }
240
+ end
241
+
242
+ # Format single concept search fact with multi-concept similarity
243
+ # @param result [Hash] Result with average and per-concept similarities
244
+ # @return [Hash] Formatted fact with concept similarities
245
+ def self.format_concept_fact(result)
246
+ {
247
+ id: result[:fact][:id],
248
+ subject: result[:fact][:subject_name],
249
+ predicate: result[:fact][:predicate],
250
+ object: result[:fact][:object_literal],
251
+ scope: result[:fact][:scope],
252
+ source: result[:source],
253
+ average_similarity: result[:similarity],
254
+ concept_similarities: result[:concept_similarities],
255
+ receipts: result[:receipts].map { |r| format_receipt(r) }
256
+ }
257
+ end
258
+
259
+ # Format shortcut query results (decisions, architecture, etc.)
260
+ # @param category [String] Shortcut category name
261
+ # @param results [Array<Hash>] Query results
262
+ # @return [Hash] Formatted shortcut response
263
+ def self.format_shortcut_results(category, results)
264
+ {
265
+ category: category,
266
+ count: results.size,
267
+ facts: results.map { |r| format_shortcut_fact(r) }
268
+ }
269
+ end
270
+
271
+ # Format fact for shortcut queries (includes scope, no status)
272
+ # @param result [Hash] Result with fact data
273
+ # @return [Hash] Formatted fact
274
+ def self.format_shortcut_fact(result)
275
+ {
276
+ id: result[:fact][:id],
277
+ subject: result[:fact][:subject_name],
278
+ predicate: result[:fact][:predicate],
279
+ object: result[:fact][:object_literal],
280
+ scope: result[:fact][:scope],
281
+ source: result[:source]
282
+ }
283
+ end
284
+
285
+ # Format facts_by_tool query results
286
+ # @param tool_name [String] Tool name
287
+ # @param scope [String] Scope
288
+ # @param results [Array<Hash>] Query results
289
+ # @return [Hash] Formatted tool facts response
290
+ def self.format_tool_facts(tool_name, scope, results)
291
+ {
292
+ tool_name: tool_name,
293
+ scope: scope,
294
+ count: results.size,
295
+ facts: results.map { |r| format_generic_fact(r) }
296
+ }
297
+ end
298
+
299
+ # Format facts_by_context query results
300
+ # @param context_type [String] Type (git_branch, cwd)
301
+ # @param context_value [String] Value
302
+ # @param scope [String] Scope
303
+ # @param results [Array<Hash>] Query results
304
+ # @return [Hash] Formatted context facts response
305
+ def self.format_context_facts(context_type, context_value, scope, results)
306
+ {
307
+ context_type: context_type,
308
+ context_value: context_value,
309
+ scope: scope,
310
+ count: results.size,
311
+ facts: results.map { |r| format_generic_fact(r) }
312
+ }
313
+ end
314
+
315
+ # Format generic fact with scope and receipts
316
+ # @param result [Hash] Result with fact and receipts
317
+ # @return [Hash] Formatted fact
318
+ def self.format_generic_fact(result)
319
+ {
320
+ id: result[:fact][:id],
321
+ subject: result[:fact][:subject_name],
322
+ predicate: result[:fact][:predicate],
323
+ object: result[:fact][:object_literal],
324
+ scope: result[:fact][:scope],
325
+ source: result[:source],
326
+ receipts: result[:receipts].map { |r| format_receipt(r) }
327
+ }
328
+ end
329
+ end
330
+ end
331
+ end
@@ -99,6 +99,11 @@ module ClaudeMemory
99
99
 
100
100
  result = @tools.call(name, arguments)
101
101
 
102
+ # Release database connections after each tool call
103
+ # This prevents lock contention with hook commands
104
+ # Connections are automatically reopened on next use
105
+ release_connections
106
+
102
107
  {
103
108
  jsonrpc: "2.0",
104
109
  id: id,
@@ -110,6 +115,20 @@ module ClaudeMemory
110
115
  }
111
116
  end
112
117
 
118
+ def release_connections
119
+ if @store_or_manager.is_a?(Store::StoreManager)
120
+ # Release both global and project store connections
121
+ @store_or_manager.global_store&.db&.disconnect
122
+ @store_or_manager.project_store&.db&.disconnect
123
+ elsif @store_or_manager.respond_to?(:db)
124
+ # Release single store connection (legacy)
125
+ @store_or_manager.db.disconnect
126
+ end
127
+ rescue
128
+ # Silently ignore disconnect errors
129
+ # Connection will be reopened automatically on next use
130
+ end
131
+
113
132
  def send_response(response)
114
133
  @output.puts(JSON.generate(response))
115
134
  @output.flush