claude_memory 0.4.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/.claude/CLAUDE.md +1 -1
  3. data/.claude/rules/claude_memory.generated.md +14 -1
  4. data/.claude/skills/check-memory/SKILL.md +10 -0
  5. data/.claude/skills/improve/SKILL.md +12 -1
  6. data/.claude-plugin/plugin.json +1 -1
  7. data/CHANGELOG.md +70 -0
  8. data/db/migrations/008_add_provenance_line_range.rb +21 -0
  9. data/db/migrations/009_add_docid.rb +39 -0
  10. data/db/migrations/010_add_llm_cache.rb +30 -0
  11. data/docs/improvements.md +72 -1084
  12. data/docs/influence/claude-supermemory.md +498 -0
  13. data/docs/influence/qmd.md +424 -2022
  14. data/docs/quality_review.md +64 -705
  15. data/lib/claude_memory/commands/doctor_command.rb +45 -4
  16. data/lib/claude_memory/commands/explain_command.rb +11 -6
  17. data/lib/claude_memory/commands/stats_command.rb +1 -1
  18. data/lib/claude_memory/core/fact_graph.rb +122 -0
  19. data/lib/claude_memory/core/fact_query_builder.rb +34 -14
  20. data/lib/claude_memory/core/fact_ranker.rb +3 -20
  21. data/lib/claude_memory/core/relative_time.rb +45 -0
  22. data/lib/claude_memory/core/result_sorter.rb +2 -2
  23. data/lib/claude_memory/core/rr_fusion.rb +57 -0
  24. data/lib/claude_memory/core/snippet_extractor.rb +97 -0
  25. data/lib/claude_memory/domain/fact.rb +3 -1
  26. data/lib/claude_memory/index/index_query.rb +2 -0
  27. data/lib/claude_memory/index/lexical_fts.rb +18 -0
  28. data/lib/claude_memory/infrastructure/operation_tracker.rb +7 -21
  29. data/lib/claude_memory/infrastructure/schema_validator.rb +30 -25
  30. data/lib/claude_memory/ingest/content_sanitizer.rb +8 -1
  31. data/lib/claude_memory/ingest/ingester.rb +67 -56
  32. data/lib/claude_memory/ingest/tool_extractor.rb +1 -1
  33. data/lib/claude_memory/ingest/tool_filter.rb +55 -0
  34. data/lib/claude_memory/logging/logger.rb +112 -0
  35. data/lib/claude_memory/mcp/query_guide.rb +96 -0
  36. data/lib/claude_memory/mcp/response_formatter.rb +86 -23
  37. data/lib/claude_memory/mcp/server.rb +34 -4
  38. data/lib/claude_memory/mcp/text_summary.rb +257 -0
  39. data/lib/claude_memory/mcp/tool_definitions.rb +20 -4
  40. data/lib/claude_memory/mcp/tools.rb +133 -120
  41. data/lib/claude_memory/publish.rb +12 -2
  42. data/lib/claude_memory/recall/expansion_detector.rb +44 -0
  43. data/lib/claude_memory/recall.rb +93 -41
  44. data/lib/claude_memory/resolve/resolver.rb +72 -40
  45. data/lib/claude_memory/store/sqlite_store.rb +99 -24
  46. data/lib/claude_memory/sweep/sweeper.rb +6 -0
  47. data/lib/claude_memory/version.rb +1 -1
  48. data/lib/claude_memory.rb +21 -0
  49. metadata +14 -2
  50. data/docs/remaining_improvements.md +0 -330
@@ -107,23 +107,7 @@ module ClaudeMemory
107
107
  count = stuck.count
108
108
  return 0 if count.zero?
109
109
 
110
- now = Time.now.utc.iso8601
111
- error_message = "Reset by recover command - operation exceeded 24h timeout"
112
-
113
- # Fetch each stuck operation, update checkpoint in Ruby, then save
114
- stuck.all.each do |op|
115
- checkpoint = op[:checkpoint_data] ? JSON.parse(op[:checkpoint_data]) : {}
116
- checkpoint["error"] = error_message
117
-
118
- @store.db[:operation_progress]
119
- .where(id: op[:id])
120
- .update(
121
- status: "failed",
122
- completed_at: now,
123
- checkpoint_data: JSON.generate(checkpoint)
124
- )
125
- end
126
-
110
+ fail_operations(stuck, "Reset by recover command - operation exceeded 24h timeout")
127
111
  count
128
112
  end
129
113
 
@@ -132,15 +116,17 @@ module ClaudeMemory
132
116
  # Mark stale operations as failed before starting new operation
133
117
  def cleanup_stale_operations!(operation_type, scope)
134
118
  threshold_time = (Time.now.utc - STALE_THRESHOLD_SECONDS).iso8601
135
- now = Time.now.utc.iso8601
136
- error_message = "Automatically marked as failed - operation exceeded 24h timeout"
137
119
 
138
120
  stale = @store.db[:operation_progress]
139
121
  .where(operation_type: operation_type, scope: scope, status: "running")
140
122
  .where { started_at < threshold_time }
141
123
 
142
- # Fetch each stale operation, update checkpoint in Ruby, then save
143
- stale.all.each do |op|
124
+ fail_operations(stale, "Automatically marked as failed - operation exceeded 24h timeout")
125
+ end
126
+
127
+ def fail_operations(dataset, error_message)
128
+ now = Time.now.utc.iso8601
129
+ dataset.all.each do |op|
144
130
  checkpoint = op[:checkpoint_data] ? JSON.parse(op[:checkpoint_data]) : {}
145
131
  checkpoint["error"] = error_message
146
132
 
@@ -33,57 +33,62 @@ module ClaudeMemory
33
33
 
34
34
  def validate
35
35
  issues = []
36
-
37
- # Check tables exist
38
36
  tables = @store.db.tables
39
- missing_tables = EXPECTED_TABLES - tables
40
- missing_tables.each do |table|
37
+
38
+ check_tables(tables, issues)
39
+ check_columns(tables, issues)
40
+ check_indexes(issues)
41
+ check_orphaned_records(issues)
42
+ check_enum_values(issues)
43
+ check_embedding_dimensions(issues)
44
+
45
+ record_health_check(issues)
46
+
47
+ {
48
+ valid: issues.none? { |i| i[:severity] == "error" },
49
+ issues: issues
50
+ }
51
+ end
52
+
53
+ private
54
+
55
+ def check_tables(tables, issues)
56
+ (EXPECTED_TABLES - tables).each do |table|
41
57
  issues << {severity: "error", message: "Missing table: #{table}"}
42
58
  end
59
+ end
43
60
 
44
- # Check critical columns exist
61
+ def check_columns(tables, issues)
45
62
  CRITICAL_COLUMNS.each do |table, columns|
46
63
  next unless tables.include?(table)
47
64
 
48
65
  existing_columns = @store.db.schema(table).map(&:first)
49
- missing_columns = columns - existing_columns
50
- missing_columns.each do |column|
66
+ (columns - existing_columns).each do |column|
51
67
  issues << {severity: "error", message: "Missing column #{table}.#{column}"}
52
68
  end
53
69
  end
70
+ end
54
71
 
55
- # Check critical indexes exist
72
+ def check_indexes(issues)
56
73
  index_names = @store.db["SELECT name FROM sqlite_master WHERE type='index'"]
57
74
  .all.map { |r| r[:name] }
58
- missing_indexes = CRITICAL_INDEXES - index_names.map(&:to_sym)
59
- missing_indexes.each do |index|
75
+ (CRITICAL_INDEXES - index_names.map(&:to_sym)).each do |index|
60
76
  issues << {severity: "warning", message: "Missing index: #{index}"}
61
77
  end
78
+ end
62
79
 
63
- # Check for orphaned records
80
+ def check_orphaned_records(issues)
64
81
  check_orphaned_provenance(issues)
65
82
  check_orphaned_fact_links(issues)
66
83
  check_orphaned_tool_calls(issues)
84
+ end
67
85
 
68
- # Check for invalid enum values
86
+ def check_enum_values(issues)
69
87
  check_invalid_fact_scopes(issues)
70
88
  check_invalid_fact_status(issues)
71
89
  check_invalid_operation_status(issues)
72
-
73
- # Check embedding dimensions
74
- check_embedding_dimensions(issues)
75
-
76
- # Record validation result
77
- record_health_check(issues)
78
-
79
- {
80
- valid: issues.none? { |i| i[:severity] == "error" },
81
- issues: issues
82
- }
83
90
  end
84
91
 
85
- private
86
-
87
92
  def check_orphaned_provenance(issues)
88
93
  orphaned = @store.db[:provenance]
89
94
  .left_join(:facts, id: :fact_id)
@@ -9,7 +9,14 @@ module ClaudeMemory
9
9
  # Performance is O(n) and excellent even with 1000+ tags (~0.6ms).
10
10
  # Long Claude sessions legitimately accumulate many tags (100-200+).
11
11
  class ContentSanitizer
12
- SYSTEM_TAGS = ["claude-memory-context"].freeze
12
+ SYSTEM_TAGS = [
13
+ "claude-memory-context",
14
+ "system-reminder",
15
+ "local-command-caveat",
16
+ "command-message",
17
+ "command-name",
18
+ "command-args"
19
+ ].freeze
13
20
  USER_TAGS = ["private", "no-memory", "secret"].freeze
14
21
 
15
22
  def self.strip_tags(text)
@@ -5,85 +5,91 @@ require "digest"
5
5
  module ClaudeMemory
6
6
  module Ingest
7
7
  class Ingester
8
- def initialize(store, fts: nil, env: ENV, metadata_extractor: nil, tool_extractor: nil)
8
+ def initialize(store, fts: nil, env: ENV, metadata_extractor: nil, tool_extractor: nil, tool_filter: nil)
9
9
  @store = store
10
10
  @fts = fts || Index::LexicalFTS.new(store)
11
11
  @config = Configuration.new(env)
12
12
  @metadata_extractor = metadata_extractor || MetadataExtractor.new
13
13
  @tool_extractor = tool_extractor || ToolExtractor.new
14
+ @tool_filter = tool_filter || ToolFilter.new
14
15
  end
15
16
 
16
17
  def ingest(source:, session_id:, transcript_path:, project_path: nil)
17
- # Check if file has been modified since last ingestion (incremental sync)
18
18
  unless should_ingest?(transcript_path)
19
+ ClaudeMemory.logger.debug("ingest", message: "Skipped unchanged file", transcript_path: transcript_path)
19
20
  return {status: :skipped, bytes_read: 0, reason: "unchanged"}
20
21
  end
21
22
 
22
- current_offset = @store.get_delta_cursor(session_id, transcript_path) || 0
23
- delta, new_offset = TranscriptReader.read_delta(transcript_path, current_offset)
23
+ prepared = prepare_delta(session_id, transcript_path, project_path)
24
+ return {status: :no_change, bytes_read: 0} if prepared.nil?
24
25
 
25
- return {status: :no_change, bytes_read: 0} if delta.nil?
26
+ content_id = persist_content(source, session_id, transcript_path, prepared)
26
27
 
27
- # Extract session metadata and tool calls before sanitization
28
- metadata = @metadata_extractor.extract(delta)
29
- tool_calls = @tool_extractor.extract(delta)
28
+ log_ingestion(content_id, prepared, session_id)
29
+ {status: :ingested, content_id: content_id, bytes_read: prepared[:delta].bytesize, project_path: prepared[:project_path]}
30
+ end
30
31
 
31
- # Strip privacy tags before storing
32
- delta = ContentSanitizer.strip_tags(delta)
32
+ private
33
33
 
34
- resolved_project = project_path || detect_project_path
34
+ def prepare_delta(session_id, transcript_path, project_path)
35
+ current_offset = @store.get_delta_cursor(session_id, transcript_path) || 0
36
+ delta, new_offset = TranscriptReader.read_delta(transcript_path, current_offset)
37
+ return nil if delta.nil?
35
38
 
36
- # Get source file mtime for incremental sync
37
- source_mtime = File.exist?(transcript_path) ? File.mtime(transcript_path).utc.iso8601 : nil
39
+ metadata = @metadata_extractor.extract(delta)
40
+ tool_calls = @tool_filter.filter(@tool_extractor.extract(delta))
41
+ delta = ContentSanitizer.strip_tags(delta)
38
42
 
39
- text_hash = Digest::SHA256.hexdigest(delta)
43
+ {
44
+ delta: delta,
45
+ new_offset: new_offset,
46
+ metadata: metadata,
47
+ tool_calls: tool_calls,
48
+ project_path: project_path || detect_project_path,
49
+ source_mtime: File.exist?(transcript_path) ? File.mtime(transcript_path).utc.iso8601 : nil,
50
+ text_hash: Digest::SHA256.hexdigest(delta)
51
+ }
52
+ end
40
53
 
41
- # Wrap entire ingestion pipeline in transaction for atomicity
42
- # If any step fails, cursor position is not updated, allowing retry
43
- content_id = nil
44
- begin
45
- content_id = with_retry do
46
- @store.db.transaction do
47
- content_id = @store.upsert_content_item(
48
- source: source,
49
- session_id: session_id,
50
- transcript_path: transcript_path,
51
- project_path: resolved_project,
52
- text_hash: text_hash,
53
- byte_len: delta.bytesize,
54
- raw_text: delta,
55
- git_branch: metadata[:git_branch],
56
- cwd: metadata[:cwd],
57
- claude_version: metadata[:claude_version],
58
- thinking_level: metadata[:thinking_level],
59
- source_mtime: source_mtime
60
- )
61
-
62
- # Store tool calls if any were extracted
63
- @store.insert_tool_calls(content_id, tool_calls) unless tool_calls.empty?
64
-
65
- # FTS indexing (FTS5 supports transactions)
66
- @fts.index_content_item(content_id, delta)
67
-
68
- # Update cursor LAST - only after all other operations succeed
69
- # This ensures that if any step fails, we can retry from the same offset
70
- @store.update_delta_cursor(session_id, transcript_path, new_offset)
71
-
72
- content_id
73
- end
54
+ def persist_content(source, session_id, transcript_path, prepared)
55
+ with_retry do
56
+ @store.db.transaction do
57
+ content_id = @store.upsert_content_item(
58
+ source: source,
59
+ session_id: session_id,
60
+ transcript_path: transcript_path,
61
+ project_path: prepared[:project_path],
62
+ text_hash: prepared[:text_hash],
63
+ byte_len: prepared[:delta].bytesize,
64
+ raw_text: prepared[:delta],
65
+ git_branch: prepared[:metadata][:git_branch],
66
+ cwd: prepared[:metadata][:cwd],
67
+ claude_version: prepared[:metadata][:claude_version],
68
+ thinking_level: prepared[:metadata][:thinking_level],
69
+ source_mtime: prepared[:source_mtime]
70
+ )
71
+
72
+ @store.insert_tool_calls(content_id, prepared[:tool_calls]) unless prepared[:tool_calls].empty?
73
+ @fts.index_content_item(content_id, prepared[:delta])
74
+ @store.update_delta_cursor(session_id, transcript_path, prepared[:new_offset])
75
+
76
+ content_id
74
77
  end
75
- rescue Extralite::BusyError => e
76
- # Re-raise BusyError with context after all retries exhausted
77
- raise StandardError, "Ingestion failed for session #{session_id} after retries: #{e.message}"
78
- rescue => e
79
- # Re-raise other errors with context for better error messages
80
- raise StandardError, "Ingestion failed for session #{session_id}: #{e.message}"
81
78
  end
82
-
83
- {status: :ingested, content_id: content_id, bytes_read: delta.bytesize, project_path: resolved_project}
79
+ rescue Extralite::BusyError => e
80
+ raise StandardError, "Ingestion failed for session #{session_id} after retries: #{e.message}"
81
+ rescue => e
82
+ raise StandardError, "Ingestion failed for session #{session_id}: #{e.message}"
84
83
  end
85
84
 
86
- private
85
+ def log_ingestion(content_id, prepared, session_id)
86
+ ClaudeMemory.logger.info("ingest",
87
+ message: "Ingested content",
88
+ content_id: content_id,
89
+ bytes_read: prepared[:delta].bytesize,
90
+ session_id: session_id,
91
+ tool_calls: prepared[:tool_calls].size)
92
+ end
87
93
 
88
94
  # Retry database operations with exponential backoff + jitter
89
95
  # This handles concurrent access when MCP server and hooks both write simultaneously
@@ -105,6 +111,11 @@ module ClaudeMemory
105
111
  exponential_delay = [base_delay * (2**(attempt - 1)), max_delay].min
106
112
  jitter = rand * exponential_delay * 0.5
107
113
  total_delay = exponential_delay + jitter
114
+ ClaudeMemory.logger.warn("ingest",
115
+ message: "Database busy, retrying",
116
+ attempt: attempt,
117
+ max_attempts: max_attempts,
118
+ delay_seconds: total_delay.round(3))
108
119
  sleep(total_delay)
109
120
  retry
110
121
  elsif is_busy
@@ -25,7 +25,7 @@ module ClaudeMemory
25
25
  end
26
26
 
27
27
  tools
28
- rescue
28
+ rescue JSON::ParserError
29
29
  # If we encounter any parsing errors, return what we've collected so far
30
30
  tools
31
31
  end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ClaudeMemory
4
+ module Ingest
5
+ # Filters tool calls during ingestion to reduce noise
6
+ # Follows Functional Core pattern - no I/O, just predicate logic
7
+ #
8
+ # Supports two modes:
9
+ # - skip_tools (blacklist): Skip listed tools, capture everything else
10
+ # - capture_tools (whitelist): Only capture listed tools, skip everything else
11
+ #
12
+ # When both are empty, all tools are captured (no filtering).
13
+ class ToolFilter
14
+ # Default tools to skip - high-volume, read-only tools that add noise
15
+ DEFAULT_SKIP_TOOLS = %w[Read Glob Grep].freeze
16
+
17
+ attr_reader :skip_tools, :capture_tools
18
+
19
+ # @param skip_tools [Array<String>] Tool names to skip (blacklist mode)
20
+ # @param capture_tools [Array<String>] Tool names to capture exclusively (whitelist mode)
21
+ def initialize(skip_tools: nil, capture_tools: nil)
22
+ @skip_tools = skip_tools || DEFAULT_SKIP_TOOLS
23
+ @capture_tools = capture_tools
24
+ end
25
+
26
+ # Check if a tool call should be captured
27
+ # @param tool_name [String] Name of the tool
28
+ # @return [Boolean] true if the tool should be captured
29
+ def capture?(tool_name)
30
+ return false if tool_name.nil?
31
+
32
+ # Whitelist mode takes precedence
33
+ if @capture_tools && !@capture_tools.empty?
34
+ return @capture_tools.include?(tool_name)
35
+ end
36
+
37
+ # Blacklist mode
38
+ !@skip_tools.include?(tool_name)
39
+ end
40
+
41
+ # Filter an array of tool call hashes
42
+ # @param tool_calls [Array<Hash>] Tool calls with :tool_name key
43
+ # @return [Array<Hash>] Filtered tool calls
44
+ def filter(tool_calls)
45
+ tool_calls.select { |tc| capture?(tc[:tool_name]) }
46
+ end
47
+
48
+ # Create a filter with no filtering (captures all tools)
49
+ # @return [ToolFilter] Permissive filter
50
+ def self.allow_all
51
+ new(skip_tools: [], capture_tools: nil)
52
+ end
53
+ end
54
+ end
55
+ end
@@ -0,0 +1,112 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ module ClaudeMemory
6
+ module Logging
7
+ # Structured JSON logger for ClaudeMemory operations.
8
+ # Outputs machine-readable JSON log entries to a configurable stream.
9
+ #
10
+ # Log levels: DEBUG (0), INFO (1), WARN (2), ERROR (3)
11
+ # Configure via CLAUDE_MEMORY_LOG_LEVEL env var (default: WARN)
12
+ #
13
+ # @example Basic usage
14
+ # logger = ClaudeMemory::Logging::Logger.new
15
+ # logger.info("ingest", message: "Ingested 1024 bytes", content_id: 42)
16
+ #
17
+ # @example With custom output
18
+ # logger = ClaudeMemory::Logging::Logger.new(output: StringIO.new, level: :debug)
19
+ # logger.debug("recall", message: "Query executed", query: "test", results: 5)
20
+ class Logger
21
+ DEBUG = 0
22
+ INFO = 1
23
+ WARN = 2
24
+ ERROR = 3
25
+
26
+ LEVELS = {debug: DEBUG, info: INFO, warn: WARN, error: ERROR}.freeze
27
+
28
+ attr_reader :level
29
+
30
+ # @param output [IO] output stream for log entries (default: $stderr)
31
+ # @param level [Symbol, String] minimum log level (default: from env or :warn)
32
+ def initialize(output: $stderr, level: nil)
33
+ @output = output
34
+ @level = resolve_level(level)
35
+ end
36
+
37
+ def debug(component, **fields)
38
+ log(:debug, component, **fields)
39
+ end
40
+
41
+ def info(component, **fields)
42
+ log(:info, component, **fields)
43
+ end
44
+
45
+ def warn(component, **fields)
46
+ log(:warn, component, **fields)
47
+ end
48
+
49
+ def error(component, **fields)
50
+ log(:error, component, **fields)
51
+ end
52
+
53
+ def debug?
54
+ @level <= DEBUG
55
+ end
56
+
57
+ def info?
58
+ @level <= INFO
59
+ end
60
+
61
+ private
62
+
63
+ def log(level_sym, component, **fields)
64
+ return unless LEVELS[level_sym] >= @level
65
+
66
+ entry = {
67
+ timestamp: Time.now.utc.iso8601(3),
68
+ level: level_sym.to_s.upcase,
69
+ component: component
70
+ }.merge(fields)
71
+
72
+ @output.puts(JSON.generate(entry))
73
+ rescue IOError
74
+ # Silently ignore write failures (e.g., closed pipe)
75
+ end
76
+
77
+ def resolve_level(explicit_level)
78
+ if explicit_level
79
+ sym = explicit_level.to_s.downcase.to_sym
80
+ LEVELS.fetch(sym, WARN)
81
+ else
82
+ env_level = ENV["CLAUDE_MEMORY_LOG_LEVEL"]
83
+ if env_level
84
+ sym = env_level.downcase.to_sym
85
+ LEVELS.fetch(sym, WARN)
86
+ else
87
+ WARN
88
+ end
89
+ end
90
+ end
91
+ end
92
+
93
+ # Null logger that discards all output (for testing or silent operation)
94
+ class NullLogger
95
+ def debug(component, **fields) = nil
96
+
97
+ def info(component, **fields) = nil
98
+
99
+ def warn(component, **fields) = nil
100
+
101
+ def error(component, **fields) = nil
102
+
103
+ def debug? = false
104
+
105
+ def info? = false
106
+
107
+ def level
108
+ Logger::ERROR + 1
109
+ end
110
+ end
111
+ end
112
+ end
@@ -0,0 +1,96 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ClaudeMemory
4
+ module MCP
5
+ # MCP prompt that teaches Claude when to use each memory tool.
6
+ # Registered as "memory_guide" via prompts/list.
7
+ module QueryGuide
8
+ PROMPT_NAME = "memory_guide"
9
+ PROMPT_DESCRIPTION = "Guide for choosing the right memory search tool"
10
+
11
+ PROMPT_TEXT = <<~GUIDE
12
+ # ClaudeMemory Search Strategy Guide
13
+
14
+ ## Tool Selection
15
+
16
+ **memory.recall** — Full-text keyword search (fastest)
17
+ - Use for: exact terms, known predicates, specific entity names
18
+ - Example: "PostgreSQL", "authentication", "deployment"
19
+ - Returns: facts with provenance receipts
20
+
21
+ **memory.recall_semantic** — Vector similarity search
22
+ - Use for: conceptual queries, paraphrased questions, "find things like X"
23
+ - Modes: `vector` (embeddings only), `text` (FTS only), `both` (hybrid, recommended)
24
+ - Example: "how does the app handle user sessions" (no exact keyword match needed)
25
+ - Returns: facts ranked by similarity score (0.0-1.0)
26
+
27
+ **memory.search_concepts** — Multi-concept AND query
28
+ - Use for: intersection of 2-5 concepts that must ALL be present
29
+ - Example: concepts=["authentication", "JWT", "middleware"]
30
+ - Returns: facts matching all concepts, ranked by average similarity
31
+
32
+ **memory.recall_index** → **memory.recall_details** — Progressive disclosure
33
+ - Use for: browsing large result sets efficiently
34
+ - Step 1: `recall_index` returns lightweight previews with token estimates
35
+ - Step 2: `recall_details` fetches full data for selected fact IDs
36
+ - Saves tokens when you only need a few facts from many matches
37
+
38
+ ## Shortcut Tools
39
+
40
+ **memory.decisions** — Architectural decisions and constraints
41
+ **memory.conventions** — Coding style preferences and rules
42
+ **memory.architecture** — Framework choices and patterns
43
+
44
+ ## Context-Aware Tools
45
+
46
+ **memory.facts_by_tool** — Facts discovered via specific tool (Read, Edit, Bash)
47
+ **memory.facts_by_context** — Facts from specific git branch or directory
48
+
49
+ ## Decision Tree
50
+
51
+ 1. Know the exact keyword? → `memory.recall`
52
+ 2. Conceptual/fuzzy question? → `memory.recall_semantic` (mode: both)
53
+ 3. Need intersection of topics? → `memory.search_concepts`
54
+ 4. Looking for decisions? → `memory.decisions`
55
+ 5. Looking for conventions? → `memory.conventions`
56
+ 6. Many results expected? → `memory.recall_index` then `memory.recall_details`
57
+ 7. Need provenance? → `memory.explain` with fact ID
58
+
59
+ ## Score Interpretation (semantic search)
60
+
61
+ - **> 0.85**: Strong match, high confidence
62
+ - **0.70-0.85**: Good match, likely relevant
63
+ - **0.55-0.70**: Moderate match, may be tangentially related
64
+ - **< 0.55**: Weak match, probably not relevant
65
+
66
+ ## Scope Parameter
67
+
68
+ All query tools accept `scope`: `"all"` (default), `"global"`, or `"project"`.
69
+ - `global`: User-wide preferences and conventions
70
+ - `project`: Current project facts only
71
+ - `all`: Both (project facts take precedence)
72
+ GUIDE
73
+
74
+ def self.definition
75
+ {
76
+ name: PROMPT_NAME,
77
+ description: PROMPT_DESCRIPTION
78
+ }
79
+ end
80
+
81
+ def self.content
82
+ {
83
+ messages: [
84
+ {
85
+ role: "user",
86
+ content: {
87
+ type: "text",
88
+ text: PROMPT_TEXT
89
+ }
90
+ }
91
+ ]
92
+ }
93
+ end
94
+ end
95
+ end
96
+ end