claude_memory 0.9.1 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. checksums.yaml +4 -4
  2. data/.claude/memory.sqlite3 +0 -0
  3. data/.claude/skills/dashboard/SKILL.md +42 -0
  4. data/.claude-plugin/marketplace.json +1 -1
  5. data/.claude-plugin/plugin.json +1 -1
  6. data/CHANGELOG.md +130 -0
  7. data/CLAUDE.md +30 -6
  8. data/README.md +66 -2
  9. data/db/migrations/015_add_activity_events.rb +26 -0
  10. data/db/migrations/016_add_moment_feedback.rb +22 -0
  11. data/db/migrations/017_add_last_recalled_at.rb +15 -0
  12. data/docs/1_0_punchlist.md +371 -0
  13. data/docs/EXAMPLES.md +41 -2
  14. data/docs/GETTING_STARTED.md +33 -4
  15. data/docs/architecture.md +22 -7
  16. data/docs/audit-queries.md +131 -0
  17. data/docs/dashboard.md +192 -0
  18. data/docs/improvements.md +650 -9
  19. data/docs/influence/cq.md +187 -0
  20. data/docs/plugin.md +13 -6
  21. data/docs/quality_review.md +524 -172
  22. data/docs/reflection_memory_as_accumulating_judgment.md +67 -0
  23. data/lib/claude_memory/activity_log.rb +86 -0
  24. data/lib/claude_memory/commands/census_command.rb +210 -0
  25. data/lib/claude_memory/commands/completion_command.rb +3 -0
  26. data/lib/claude_memory/commands/dashboard_command.rb +54 -0
  27. data/lib/claude_memory/commands/dedupe_conflicts_command.rb +55 -0
  28. data/lib/claude_memory/commands/digest_command.rb +273 -0
  29. data/lib/claude_memory/commands/hook_command.rb +61 -2
  30. data/lib/claude_memory/commands/initializers/hooks_configurator.rb +7 -4
  31. data/lib/claude_memory/commands/reclassify_references_command.rb +56 -0
  32. data/lib/claude_memory/commands/registry.rb +7 -1
  33. data/lib/claude_memory/commands/show_command.rb +90 -0
  34. data/lib/claude_memory/commands/skills/distill-transcripts.md +13 -1
  35. data/lib/claude_memory/commands/stats_command.rb +131 -2
  36. data/lib/claude_memory/commands/sweep_command.rb +2 -0
  37. data/lib/claude_memory/configuration.rb +16 -0
  38. data/lib/claude_memory/core/relative_time.rb +9 -0
  39. data/lib/claude_memory/dashboard/api.rb +610 -0
  40. data/lib/claude_memory/dashboard/conflicts.rb +279 -0
  41. data/lib/claude_memory/dashboard/efficacy.rb +127 -0
  42. data/lib/claude_memory/dashboard/fact_presenter.rb +109 -0
  43. data/lib/claude_memory/dashboard/health.rb +175 -0
  44. data/lib/claude_memory/dashboard/index.html +2707 -0
  45. data/lib/claude_memory/dashboard/knowledge.rb +136 -0
  46. data/lib/claude_memory/dashboard/moments.rb +244 -0
  47. data/lib/claude_memory/dashboard/reuse.rb +97 -0
  48. data/lib/claude_memory/dashboard/scoped_fact_resolver.rb +95 -0
  49. data/lib/claude_memory/dashboard/server.rb +211 -0
  50. data/lib/claude_memory/dashboard/timeline.rb +68 -0
  51. data/lib/claude_memory/dashboard/trust.rb +454 -0
  52. data/lib/claude_memory/distill/bare_conclusion_detector.rb +71 -0
  53. data/lib/claude_memory/distill/reference_material_detector.rb +78 -0
  54. data/lib/claude_memory/hook/auto_memory_mirror.rb +112 -0
  55. data/lib/claude_memory/hook/context_injector.rb +97 -3
  56. data/lib/claude_memory/hook/handler.rb +191 -3
  57. data/lib/claude_memory/mcp/handlers/management_handlers.rb +8 -0
  58. data/lib/claude_memory/mcp/query_guide.rb +11 -0
  59. data/lib/claude_memory/mcp/text_summary.rb +29 -0
  60. data/lib/claude_memory/mcp/tool_definitions.rb +13 -0
  61. data/lib/claude_memory/mcp/tools.rb +148 -0
  62. data/lib/claude_memory/publish.rb +13 -21
  63. data/lib/claude_memory/recall/stale_detector.rb +67 -0
  64. data/lib/claude_memory/resolve/predicate_policy.rb +2 -0
  65. data/lib/claude_memory/resolve/resolver.rb +41 -11
  66. data/lib/claude_memory/store/llm_cache.rb +68 -0
  67. data/lib/claude_memory/store/metrics_aggregator.rb +96 -0
  68. data/lib/claude_memory/store/schema_manager.rb +1 -1
  69. data/lib/claude_memory/store/sqlite_store.rb +47 -143
  70. data/lib/claude_memory/store/store_manager.rb +29 -0
  71. data/lib/claude_memory/sweep/maintenance.rb +216 -0
  72. data/lib/claude_memory/sweep/recall_timestamp_refresher.rb +83 -0
  73. data/lib/claude_memory/sweep/sweeper.rb +2 -0
  74. data/lib/claude_memory/templates/hooks.example.json +5 -0
  75. data/lib/claude_memory/version.rb +1 -1
  76. data/lib/claude_memory.rb +24 -0
  77. metadata +51 -1
@@ -0,0 +1,67 @@
1
+ # Memory as Accumulating Judgment
2
+
3
+ *A reflection on what ClaudeMemory is really doing, and a simple formula for applying AI to engineering teams at any scale.*
4
+
5
+ ---
6
+
7
+ ## The one thing
8
+
9
+ **What accumulates, wins.** Intelligence is now cheap; context is the expensive part. Every AI interaction without memory pays full cost to re-derive what was already known. Memory converts ephemeral intelligence into accumulating judgment — the same thing that makes senior engineers valuable.
10
+
11
+ ## Thoughts on this project
12
+
13
+ ClaudeMemory is betting on the right axis. Not "make the model smarter" (commodified, Anthropic's job), but "make what's already known stop disappearing." The dual-database split (global vs project), provenance, supersession, and predicate vocabulary are all machinery in service of one goal: **judgment that persists past the session boundary**. The open conflicts and distiller-hallucination churn visible in this repo are the honest signal — accumulation is the hard problem, not retrieval.
14
+
15
+ ## 10 theories (in priority order)
16
+
17
+ 1. **Context-rebuild dominates cost.** Token count matters less than relevance ratio.
18
+ 2. **Decisions have half-lives.** Memory's job is slowing decay, not freezing truth.
19
+ 3. **Why > what.** Remembering the reason behind a decision outvalues remembering the decision.
20
+ 4. **Scope layering beats flat memory.** global → team → project → role → session.
21
+ 5. **Corrections compound.** Each remembered correction reduces N future corrections.
22
+ 6. **Onboarding = context-rebuild.** Same problem, different substrate.
23
+ 7. **Small teams need memory more.** No tribal knowledge to fall back on.
24
+ 8. **Conflict is signal, not noise.** Unresolved conflicts mark where judgment is actually forming.
25
+ 9. **Provenance is trust.** Facts without sources are guesses at scale.
26
+ 10. **Surface area of trust is the real moat.** The more an AI remembers correctly, the more humans delegate.
27
+
28
+ ## Where examples break
29
+
30
+ - **Solo dev, greenfield project:** memory underperforms — nothing to remember yet. Theory 7 bends: memory needs *inputs* before it pays off.
31
+ - **Huge monorepo, 200 engineers:** global memory collides constantly. Theory 4 becomes load-bearing — without scope, memory becomes noise.
32
+ - **Short-lived prototypes:** accumulation cost > payoff. Theories 1 and 5 invert.
33
+ - **Rapidly-evolving codebase:** half-life is short (theory 2), memory goes stale faster than it's written. Supersession machinery has to outpace change.
34
+
35
+ **Pattern:** memory's value is a function of churn rate and team size, not raw code volume.
36
+
37
+ ## The formula
38
+
39
+ Strip it to one relation, Ohm's-Law style:
40
+
41
+ > **V = R / C**
42
+ >
43
+ > - `V` = value per interaction
44
+ > - `R` = judgment retained from prior interactions (corrections, decisions, why-reasons)
45
+ > - `C` = context that must be rebuilt from scratch each time
46
+
47
+ When `C → 0`, `V → ∞`. When `R → 0`, `V → 0`. Everything ClaudeMemory does — FTS5, semantic recall, provenance, supersession — is in service of raising `R` and lowering `C`.
48
+
49
+ ## At scale
50
+
51
+ Let `C = Σ Cᵢ` across scope layers (personal, project, team, org):
52
+
53
+ - **Solo:** `C ≈ C_personal`. Memory wins by remembering your preferences and past decisions. Small surface, high per-unit payoff.
54
+ - **Small team (2–10):** `C ≈ C_project + C_personal`. Memory wins by codifying conventions so the team doesn't re-litigate them weekly.
55
+ - **Org (50+):** `C ≈ C_org + C_team + C_project`. Memory must be scoped or it becomes noise. Value shifts from *remembering* to *routing* — the right fact to the right person at the right moment.
56
+
57
+ **Rule of thumb:** apply AI where `R/C` is highest. That's wherever the same context is re-established most often — code review, architecture decisions, onboarding, incident postmortems. Avoid where `R` can't accumulate (one-off scripts, throwaway prototypes).
58
+
59
+ ## The delivered value, simply
60
+
61
+ > **To whom:** engineers re-deciding things they already decided.
62
+ >
63
+ > **For whom:** the future version of themselves and their teammates.
64
+ >
65
+ > **What:** the elimination of re-derivation.
66
+
67
+ Memory is not a feature. It's the thing that makes AI-for-engineering a compounding asset instead of a rentable tool.
@@ -0,0 +1,86 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+
5
+ module ClaudeMemory
6
+ # Records activity events for debugging and observability.
7
+ # Events are stored in the activity_events table and surfaced
8
+ # via the dashboard and `memory.activity` MCP tool.
9
+ module ActivityLog
10
+ module_function
11
+
12
+ # Record an activity event in the given store.
13
+ #
14
+ # @param store [Store::SQLiteStore] database to write to
15
+ # @param event_type [String] e.g. "hook_ingest", "hook_context", "recall"
16
+ # @param status [String] "success", "skipped", or "error"
17
+ # @param session_id [String, nil] Claude session ID
18
+ # @param duration_ms [Integer, nil] operation duration in milliseconds
19
+ # @param details [Hash, nil] event-specific metadata
20
+ def record(store, event_type:, status:, session_id: nil, duration_ms: nil, details: nil)
21
+ store.activity_events.insert(
22
+ event_type: event_type,
23
+ session_id: session_id,
24
+ status: status,
25
+ duration_ms: duration_ms,
26
+ detail_json: details&.to_json,
27
+ occurred_at: Time.now.utc.iso8601
28
+ )
29
+ rescue => e
30
+ ClaudeMemory.logger.warn("activity_log",
31
+ message: "Failed to record event",
32
+ event_type: event_type,
33
+ status: status,
34
+ error_class: e.class.name,
35
+ error: e.message,
36
+ backtrace: e.backtrace&.first(3))
37
+ nil
38
+ end
39
+
40
+ # Query recent activity events.
41
+ #
42
+ # @param store [Store::SQLiteStore] database to read from
43
+ # @param limit [Integer] max events to return
44
+ # @param event_type [String, nil] filter by type
45
+ # @param since [String, nil] ISO 8601 lower bound
46
+ # @return [Array<Hash>] event records with parsed details
47
+ def recent(store, limit: 50, event_type: nil, since: nil)
48
+ dataset = store.activity_events.order(Sequel.desc(:occurred_at)).limit(limit)
49
+ dataset = dataset.where(event_type: event_type) if event_type
50
+ dataset = dataset.where { occurred_at >= since } if since
51
+
52
+ dataset.all.map do |row|
53
+ row[:details] = row[:detail_json] ? JSON.parse(row[:detail_json], symbolize_names: true) : nil
54
+ row.delete(:detail_json)
55
+ row
56
+ end
57
+ rescue => e
58
+ ClaudeMemory.logger.warn("activity_log", message: "Failed to query events", error: e.message)
59
+ []
60
+ end
61
+
62
+ # Summarize activity counts grouped by event_type.
63
+ #
64
+ # @param store [Store::SQLiteStore]
65
+ # @param since [String, nil] ISO 8601 lower bound
66
+ # @return [Hash] e.g. {"hook_ingest" => {success: 5, error: 1}, ...}
67
+ def summary(store, since: nil)
68
+ dataset = store.activity_events
69
+ dataset = dataset.where { occurred_at >= since } if since
70
+
71
+ rows = dataset
72
+ .group_and_count(:event_type, :status)
73
+ .all
74
+
75
+ result = {}
76
+ rows.each do |row|
77
+ result[row[:event_type]] ||= {}
78
+ result[row[:event_type]][row[:status].to_sym] = row[:count]
79
+ end
80
+ result
81
+ rescue => e
82
+ ClaudeMemory.logger.warn("activity_log", message: "Failed to summarize events", error: e.message)
83
+ {}
84
+ end
85
+ end
86
+ end
@@ -0,0 +1,210 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "json"
4
+ require "digest"
5
+
6
+ module ClaudeMemory
7
+ module Commands
8
+ # Aggregates predicate/entity/schema usage across many ClaudeMemory databases
9
+ # into a privacy-safe JSON report. Used for informed vocabulary curation
10
+ # across machines without exposing content, names, or paths.
11
+ #
12
+ # What's emitted: schema versions, fact counts by predicate × status,
13
+ # entity type counts, novel predicates (outside the curated vocabulary),
14
+ # and synonym candidates (novel predicates overlapping known ones).
15
+ #
16
+ # What's *never* emitted: object_literal, entity names, project paths,
17
+ # provenance quotes, raw session IDs.
18
+ class CensusCommand < BaseCommand
19
+ DB_FILENAME = "memory.sqlite3"
20
+ TOP_PREDICATES_PER_DB = 5
21
+ SYNONYM_OVERLAP_THRESHOLD = 0.4
22
+ DEFAULT_ROOT = "~/src"
23
+
24
+ def call(args)
25
+ opts = parse_options(args, {root: DEFAULT_ROOT, output: nil, pretty: false, include_global: true}) do |o|
26
+ OptionParser.new do |parser|
27
+ parser.banner = "Usage: claude-memory census [options]"
28
+ parser.on("--root DIR", "Directory to scan (default: #{DEFAULT_ROOT})") { |v| o[:root] = v }
29
+ parser.on("--output FILE", "Write JSON to file instead of stdout") { |v| o[:output] = v }
30
+ parser.on("--pretty", "Pretty-print JSON output") { o[:pretty] = true }
31
+ parser.on("--no-global", "Skip the global database (~/.claude/memory.sqlite3)") { o[:include_global] = false }
32
+ end
33
+ end
34
+ return 1 if opts.nil?
35
+
36
+ root = File.expand_path(opts[:root])
37
+ paths = discover_databases(root)
38
+ paths << global_db_path if opts[:include_global] && File.exist?(global_db_path) && !paths.include?(global_db_path)
39
+
40
+ if paths.empty?
41
+ stderr.puts "No ClaudeMemory databases found under #{root}"
42
+ return 0
43
+ end
44
+
45
+ report = build_report(paths)
46
+ json = opts[:pretty] ? JSON.pretty_generate(report) : JSON.generate(report)
47
+
48
+ if opts[:output]
49
+ File.write(opts[:output], json)
50
+ stderr.puts "Census: scanned #{paths.size} database(s); wrote #{opts[:output]}"
51
+ else
52
+ stdout.puts json
53
+ end
54
+
55
+ 0
56
+ end
57
+
58
+ private
59
+
60
+ def global_db_path
61
+ ClaudeMemory::Configuration.new.global_db_path
62
+ end
63
+
64
+ def discover_databases(root)
65
+ return [] unless Dir.exist?(root)
66
+ Dir.glob(File.join(root, "**", ".claude", DB_FILENAME)).sort
67
+ end
68
+
69
+ def build_report(paths)
70
+ known = ClaudeMemory::Resolve::PredicatePolicy.known_predicates.to_set
71
+
72
+ report = {
73
+ version: ClaudeMemory::VERSION,
74
+ generated_at: Time.now.utc.iso8601,
75
+ database_count: paths.size,
76
+ schema_versions: Hash.new(0),
77
+ totals: {facts: Hash.new(0), entities: 0, content_items: 0},
78
+ predicates: {},
79
+ entity_types: Hash.new(0),
80
+ novel_predicates: [],
81
+ synonym_candidates: [],
82
+ databases: []
83
+ }
84
+
85
+ predicates = Hash.new { |h, k| h[k] = {total: 0, by_status: Hash.new(0), db_count: 0} }
86
+
87
+ paths.each do |path|
88
+ summary = scan_database(path)
89
+ next unless summary
90
+
91
+ report[:schema_versions][summary[:schema_version].to_s] += 1 if summary[:schema_version]
92
+ report[:totals][:entities] += summary[:entity_count]
93
+ report[:totals][:content_items] += summary[:content_count]
94
+
95
+ summary[:facts_by_status].each { |status, count| report[:totals][:facts][status] += count }
96
+
97
+ summary[:entity_types].each { |type, count| report[:entity_types][type.to_s] += count }
98
+
99
+ summary[:predicates].each do |predicate, statuses|
100
+ entry = predicates[predicate]
101
+ entry[:db_count] += 1
102
+ statuses.each do |status, count|
103
+ entry[:total] += count
104
+ entry[:by_status][status] += count
105
+ end
106
+ end
107
+
108
+ report[:databases] << anonymize_db(path, summary)
109
+ end
110
+
111
+ report[:predicates] = predicates.each_with_object({}) do |(predicate, entry), acc|
112
+ acc[predicate] = {
113
+ total: entry[:total],
114
+ by_status: entry[:by_status],
115
+ db_count: entry[:db_count],
116
+ known: known.include?(predicate)
117
+ }
118
+ end
119
+
120
+ report[:novel_predicates] = predicates.keys.reject { |p| known.include?(p) }.sort
121
+ report[:synonym_candidates] = synonym_candidates(report[:novel_predicates], known)
122
+
123
+ report
124
+ end
125
+
126
+ def scan_database(path)
127
+ db = Sequel.connect("extralite://#{path}")
128
+
129
+ schema_version = begin
130
+ db[:meta].where(key: "schema_version").get(:value)&.to_i
131
+ rescue Sequel::DatabaseError
132
+ nil
133
+ end
134
+
135
+ facts_by_status = db[:facts].group_and_count(:status).all.each_with_object(Hash.new(0)) do |row, acc|
136
+ acc[row[:status].to_s] += row[:count].to_i
137
+ end
138
+
139
+ predicates = db[:facts].select(:predicate, :status).group_and_count(:predicate, :status).all
140
+ .each_with_object(Hash.new { |h, k| h[k] = Hash.new(0) }) do |row, acc|
141
+ acc[row[:predicate].to_s][row[:status].to_s] += row[:count].to_i
142
+ end
143
+
144
+ entity_types = db[:entities].group_and_count(:type).all.each_with_object(Hash.new(0)) do |row, acc|
145
+ acc[row[:type].to_s] += row[:count].to_i
146
+ end
147
+
148
+ entity_count = db[:entities].count
149
+ content_count = db[:content_items].count
150
+
151
+ {
152
+ schema_version: schema_version,
153
+ facts_by_status: facts_by_status,
154
+ predicates: predicates,
155
+ entity_types: entity_types,
156
+ entity_count: entity_count,
157
+ content_count: content_count
158
+ }
159
+ rescue Sequel::DatabaseError, Extralite::Error => e
160
+ stderr.puts "Skipping #{path}: #{e.message}"
161
+ nil
162
+ ensure
163
+ db&.disconnect
164
+ end
165
+
166
+ def anonymize_db(path, summary)
167
+ top = summary[:predicates]
168
+ .map { |predicate, statuses| [predicate, statuses.values.sum] }
169
+ .sort_by { |(_, count)| -count }
170
+ .first(TOP_PREDICATES_PER_DB)
171
+ .to_h
172
+
173
+ {
174
+ id: Digest::SHA256.hexdigest(path)[0, 12],
175
+ schema_version: summary[:schema_version],
176
+ facts: summary[:facts_by_status],
177
+ entities: summary[:entity_count],
178
+ content_items: summary[:content_count],
179
+ top_predicates: top
180
+ }
181
+ end
182
+
183
+ def synonym_candidates(novels, known)
184
+ novels.each_with_object([]) do |novel, acc|
185
+ novel_tokens = tokenize(novel)
186
+ next if novel_tokens.empty?
187
+
188
+ best = known.map do |canonical|
189
+ {canonical: canonical, overlap: jaccard(novel_tokens, tokenize(canonical))}
190
+ end.max_by { |candidate| candidate[:overlap] }
191
+
192
+ next unless best && best[:overlap] >= SYNONYM_OVERLAP_THRESHOLD
193
+
194
+ acc << {novel: novel, closest_known: best[:canonical], overlap: best[:overlap].round(2)}
195
+ end
196
+ end
197
+
198
+ def tokenize(predicate)
199
+ predicate.to_s.downcase.split(/[_\s-]+/).reject(&:empty?).to_set
200
+ end
201
+
202
+ def jaccard(a, b)
203
+ return 0.0 if a.empty? || b.empty?
204
+ intersection = (a & b).size
205
+ union = (a | b).size
206
+ union.zero? ? 0.0 : intersection.to_f / union
207
+ end
208
+ end
209
+ end
210
+ end
@@ -80,6 +80,9 @@ module ClaudeMemory
80
80
  completion)
81
81
  _arguments '--shell[Shell type]:shell:(bash zsh)'
82
82
  ;;
83
+ dashboard)
84
+ _arguments '--port[Server port]:port:' '--no-open[Skip browser open]'
85
+ ;;
83
86
  install-skill)
84
87
  local -a skills
85
88
  skills=(#{skill_names.map { |s| "'#{s}'" }.join(" ")})
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "optparse"
4
+
5
+ module ClaudeMemory
6
+ module Commands
7
+ # Starts a local web dashboard for inspection and trust signals. Default
8
+ # port 3377; reads global + project databases, no writes from page loads.
9
+ # Panels: feed (Moments), Trust sidebar (utilization, fingerprint,
10
+ # feedback ratio, needs-review), Knowledge (predicate-grouped facts),
11
+ # Conflicts (deduped at display layer), Reuse, Health, Timeline,
12
+ # Activity drill-down, Query tester. Each panel is backed by a dedicated
13
+ # `Dashboard::*` collaborator class.
14
+ #
15
+ # See `docs/dashboard.md` for the user-facing guide.
16
+ class DashboardCommand < BaseCommand
17
+ def call(args)
18
+ opts = parse_options(args, {port: Dashboard::Server::DEFAULT_PORT, no_open: false}) do |o|
19
+ OptionParser.new do |parser|
20
+ parser.banner = "Usage: claude-memory dashboard [options]"
21
+ parser.on("--port PORT", Integer, "Server port (default: #{Dashboard::Server::DEFAULT_PORT})") { |v| o[:port] = v }
22
+ parser.on("--no-open", "Don't auto-open browser") { o[:no_open] = true }
23
+ end
24
+ end
25
+ return 1 if opts.nil?
26
+
27
+ manager = Store::StoreManager.new
28
+
29
+ unless manager.global_exists? || manager.project_exists?
30
+ stderr.puts "No memory databases found. Run 'claude-memory init' first."
31
+ manager.close
32
+ return 1
33
+ end
34
+
35
+ manager.ensure_both! if manager.global_exists? && manager.project_exists?
36
+ manager.ensure_global! if manager.global_exists?
37
+ manager.ensure_project! if manager.project_exists?
38
+
39
+ stdout.puts "Starting ClaudeMemory dashboard on http://localhost:#{opts[:port]}"
40
+ stdout.puts "Press Ctrl+C to stop."
41
+
42
+ server = Dashboard::Server.new(
43
+ manager: manager,
44
+ port: opts[:port],
45
+ open_browser: !opts[:no_open]
46
+ )
47
+
48
+ server.start
49
+ manager.close
50
+ 0
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "optparse"
4
+
5
+ module ClaudeMemory
6
+ module Commands
7
+ # One-time cleanup for historical conflict-row duplication caused by a
8
+ # resolver bug that created a new disputed fact + conflict row each time
9
+ # the same contradicting value was re-extracted (see
10
+ # Resolver#apply_conflict dedupe fix landed 2026-04-24). New conflicts
11
+ # can no longer duplicate this way; this command cleans the tail.
12
+ class DedupeConflictsCommand < BaseCommand
13
+ def call(args)
14
+ opts = parse_options(args, {scope: "project", dry_run: false}) do |o|
15
+ OptionParser.new do |parser|
16
+ parser.banner = "Usage: claude-memory dedupe-conflicts [options]"
17
+ parser.on("--scope SCOPE", %w[project global], "Database scope (default: project)") { |v| o[:scope] = v }
18
+ parser.on("--dry-run", "Show what would be resolved without writing") { o[:dry_run] = true }
19
+ end
20
+ end
21
+ return 1 if opts.nil?
22
+
23
+ manager = ClaudeMemory::Store::StoreManager.new
24
+ store = manager.store_for_scope(opts[:scope])
25
+
26
+ begin
27
+ result = Sweep::Maintenance.new(store).dedupe_open_conflicts(dry_run: opts[:dry_run])
28
+ ensure
29
+ manager.close
30
+ end
31
+
32
+ print_result(opts, result)
33
+ 0
34
+ end
35
+
36
+ private
37
+
38
+ def print_result(opts, result)
39
+ mode = opts[:dry_run] ? "DRY RUN" : "DEDUPE"
40
+ stdout.puts "#{mode}: scope=#{opts[:scope]}"
41
+ stdout.puts "=" * 50
42
+ stdout.puts "Conflicts inspected: #{result[:inspected]}"
43
+ stdout.puts "Duplicates resolved: #{result[:resolved]}"
44
+
45
+ return if result[:decisions].empty?
46
+
47
+ stdout.puts
48
+ stdout.puts "Decisions:"
49
+ result[:decisions].each do |d|
50
+ stdout.puts " conflict ##{d[:conflict_id]} -> merged into ##{d[:keeper_id]} (rejects fact ##{d[:duplicate_fact_id]})"
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end