claude_memory 0.9.1 → 0.10.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.claude/memory.sqlite3 +0 -0
- data/.claude/skills/dashboard/SKILL.md +42 -0
- data/.claude-plugin/marketplace.json +1 -1
- data/.claude-plugin/plugin.json +1 -1
- data/CHANGELOG.md +86 -0
- data/CLAUDE.md +21 -5
- data/README.md +32 -2
- data/db/migrations/015_add_activity_events.rb +26 -0
- data/db/migrations/016_add_moment_feedback.rb +22 -0
- data/db/migrations/017_add_last_recalled_at.rb +15 -0
- data/docs/1_0_punchlist.md +190 -0
- data/docs/EXAMPLES.md +41 -2
- data/docs/GETTING_STARTED.md +31 -4
- data/docs/architecture.md +22 -7
- data/docs/audit-queries.md +131 -0
- data/docs/dashboard.md +172 -0
- data/docs/improvements.md +465 -9
- data/docs/influence/cq.md +187 -0
- data/docs/plugin.md +13 -6
- data/docs/quality_review.md +489 -172
- data/docs/reflection_memory_as_accumulating_judgment.md +67 -0
- data/lib/claude_memory/activity_log.rb +86 -0
- data/lib/claude_memory/commands/census_command.rb +210 -0
- data/lib/claude_memory/commands/completion_command.rb +3 -0
- data/lib/claude_memory/commands/dashboard_command.rb +54 -0
- data/lib/claude_memory/commands/dedupe_conflicts_command.rb +55 -0
- data/lib/claude_memory/commands/digest_command.rb +181 -0
- data/lib/claude_memory/commands/hook_command.rb +34 -0
- data/lib/claude_memory/commands/reclassify_references_command.rb +56 -0
- data/lib/claude_memory/commands/registry.rb +6 -1
- data/lib/claude_memory/commands/skills/distill-transcripts.md +13 -1
- data/lib/claude_memory/commands/stats_command.rb +38 -1
- data/lib/claude_memory/commands/sweep_command.rb +2 -0
- data/lib/claude_memory/configuration.rb +16 -0
- data/lib/claude_memory/core/relative_time.rb +9 -0
- data/lib/claude_memory/dashboard/api.rb +610 -0
- data/lib/claude_memory/dashboard/conflicts.rb +279 -0
- data/lib/claude_memory/dashboard/efficacy.rb +127 -0
- data/lib/claude_memory/dashboard/fact_presenter.rb +109 -0
- data/lib/claude_memory/dashboard/health.rb +175 -0
- data/lib/claude_memory/dashboard/index.html +2707 -0
- data/lib/claude_memory/dashboard/knowledge.rb +136 -0
- data/lib/claude_memory/dashboard/moments.rb +244 -0
- data/lib/claude_memory/dashboard/reuse.rb +97 -0
- data/lib/claude_memory/dashboard/scoped_fact_resolver.rb +95 -0
- data/lib/claude_memory/dashboard/server.rb +211 -0
- data/lib/claude_memory/dashboard/timeline.rb +68 -0
- data/lib/claude_memory/dashboard/trust.rb +285 -0
- data/lib/claude_memory/distill/reference_material_detector.rb +78 -0
- data/lib/claude_memory/hook/auto_memory_mirror.rb +112 -0
- data/lib/claude_memory/hook/context_injector.rb +97 -3
- data/lib/claude_memory/hook/handler.rb +50 -3
- data/lib/claude_memory/mcp/handlers/management_handlers.rb +8 -0
- data/lib/claude_memory/mcp/query_guide.rb +11 -0
- data/lib/claude_memory/mcp/text_summary.rb +29 -0
- data/lib/claude_memory/mcp/tool_definitions.rb +13 -0
- data/lib/claude_memory/mcp/tools.rb +148 -0
- data/lib/claude_memory/publish.rb +13 -21
- data/lib/claude_memory/recall/stale_detector.rb +67 -0
- data/lib/claude_memory/resolve/predicate_policy.rb +2 -0
- data/lib/claude_memory/resolve/resolver.rb +41 -11
- data/lib/claude_memory/store/llm_cache.rb +68 -0
- data/lib/claude_memory/store/metrics_aggregator.rb +96 -0
- data/lib/claude_memory/store/schema_manager.rb +1 -1
- data/lib/claude_memory/store/sqlite_store.rb +47 -143
- data/lib/claude_memory/store/store_manager.rb +29 -0
- data/lib/claude_memory/sweep/maintenance.rb +216 -0
- data/lib/claude_memory/sweep/recall_timestamp_refresher.rb +83 -0
- data/lib/claude_memory/sweep/sweeper.rb +2 -0
- data/lib/claude_memory/version.rb +1 -1
- data/lib/claude_memory.rb +22 -0
- metadata +49 -1
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ClaudeMemory
|
|
4
|
+
module Dashboard
|
|
5
|
+
# Conflicts resource for the dashboard API. Owns list/detail/reject
|
|
6
|
+
# across both scopes (global + project) and keeps them disjoint — a
|
|
7
|
+
# conflict in one store can never reference a fact in the other.
|
|
8
|
+
#
|
|
9
|
+
# List results are deduplicated at the display layer by
|
|
10
|
+
# (source, predicate, normalized(object_a, object_b), status). Each group
|
|
11
|
+
# carries a `group_size` so the UI can label "sqlite vs postgres (×11)"
|
|
12
|
+
# instead of surfacing 11 rows that resolve identically. `counts` reflects
|
|
13
|
+
# the distinct count; `raw_counts` preserves the underlying row totals for
|
|
14
|
+
# the Advanced drawer.
|
|
15
|
+
class Conflicts
|
|
16
|
+
DEFAULT_LIMIT = 50
|
|
17
|
+
|
|
18
|
+
def initialize(manager)
|
|
19
|
+
@manager = manager
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# @param params [Hash] "scope" (project|global|all), "status"
|
|
23
|
+
# (open|resolved|all), "limit", "offset"
|
|
24
|
+
def list(params = {})
|
|
25
|
+
scope = params["scope"] || "project"
|
|
26
|
+
status_filter = params["status"] || "open"
|
|
27
|
+
limit = (params["limit"] || DEFAULT_LIMIT).to_i
|
|
28
|
+
offset = (params["offset"] || 0).to_i
|
|
29
|
+
|
|
30
|
+
stores = stores_for(scope)
|
|
31
|
+
rows = stores.flat_map { |source, store|
|
|
32
|
+
dataset = store.conflicts
|
|
33
|
+
dataset = dataset.where(status: status_filter) unless status_filter == "all"
|
|
34
|
+
dataset.all.map { |r| r.merge(source: source, store: store) }
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
groups = group_rows(rows)
|
|
38
|
+
groups.sort_by! { |g| -Core::RelativeTime.to_epoch(g[:representative][:detected_at]) }
|
|
39
|
+
|
|
40
|
+
{
|
|
41
|
+
total: groups.size,
|
|
42
|
+
limit: limit,
|
|
43
|
+
offset: offset,
|
|
44
|
+
scope: scope,
|
|
45
|
+
status: status_filter,
|
|
46
|
+
counts: counts_across_scopes,
|
|
47
|
+
raw_counts: raw_counts_across_scopes,
|
|
48
|
+
conflicts: Array(groups[offset, limit]).map { |g| serialize_group(g) }
|
|
49
|
+
}
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Count distinct open conflicts per scope (after deduplication). Used by
|
|
53
|
+
# Trust#needs_review so the sidebar backlog reflects distinct pairs
|
|
54
|
+
# rather than duplicated rows from pre-dedupe history.
|
|
55
|
+
def distinct_open_counts
|
|
56
|
+
counts = {project: 0, global: 0}
|
|
57
|
+
%w[project global].each do |scope|
|
|
58
|
+
store = @manager.store_if_exists(scope)
|
|
59
|
+
next unless store
|
|
60
|
+
rows = store.conflicts.where(status: "open").all
|
|
61
|
+
.map { |r| r.merge(source: scope, store: store) }
|
|
62
|
+
counts[scope.to_sym] = group_rows(rows).size
|
|
63
|
+
end
|
|
64
|
+
counts.merge(total: counts.values.sum)
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
# @param id [Integer, String] conflict row id
|
|
68
|
+
# @param scope [String] "project" or "global" (required — conflicts are scope-local)
|
|
69
|
+
def detail(id, scope)
|
|
70
|
+
return {error: "Invalid scope"} unless %w[global project].include?(scope)
|
|
71
|
+
store = @manager.store_if_exists(scope)
|
|
72
|
+
return {error: "#{scope} store not available"} unless store
|
|
73
|
+
|
|
74
|
+
row = store.conflicts.where(id: id.to_i).first
|
|
75
|
+
return {error: "Conflict #{id} not found"} unless row
|
|
76
|
+
|
|
77
|
+
presenter = FactPresenter.new(store)
|
|
78
|
+
{
|
|
79
|
+
conflict: {
|
|
80
|
+
id: row[:id],
|
|
81
|
+
status: row[:status],
|
|
82
|
+
detected_at: row[:detected_at],
|
|
83
|
+
detected_ago: Core::RelativeTime.format(row[:detected_at]),
|
|
84
|
+
notes: row[:notes],
|
|
85
|
+
source: scope
|
|
86
|
+
},
|
|
87
|
+
fact_a: presenter.with_provenance(store.facts.where(id: row[:fact_a_id]).first),
|
|
88
|
+
fact_b: presenter.with_provenance(store.facts.where(id: row[:fact_b_id]).first)
|
|
89
|
+
}
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
# Rejects one side of a conflict by rejecting its fact. SQLiteStore#reject_fact
|
|
93
|
+
# flips the fact to "rejected" and cascade-resolves associated conflicts in
|
|
94
|
+
# a single transaction, so duplicate rows collapse automatically.
|
|
95
|
+
def reject(id, side:, reason: nil, scope: "project")
|
|
96
|
+
return {error: "Invalid side (must be 'a' or 'b')"} unless %w[a b].include?(side)
|
|
97
|
+
return {error: "Invalid scope"} unless %w[global project].include?(scope)
|
|
98
|
+
store = @manager.store_if_exists(scope)
|
|
99
|
+
return {error: "#{scope} store not available"} unless store
|
|
100
|
+
|
|
101
|
+
row = store.conflicts.where(id: id.to_i).first
|
|
102
|
+
return {error: "Conflict #{id} not found"} unless row
|
|
103
|
+
|
|
104
|
+
fact_id = (side == "a") ? row[:fact_a_id] : row[:fact_b_id]
|
|
105
|
+
result = store.reject_fact(fact_id, reason: reason)
|
|
106
|
+
|
|
107
|
+
{
|
|
108
|
+
success: true,
|
|
109
|
+
conflict_id: id,
|
|
110
|
+
rejected_fact_id: fact_id,
|
|
111
|
+
side: side,
|
|
112
|
+
scope: scope,
|
|
113
|
+
conflicts_resolved: result[:conflicts_resolved]
|
|
114
|
+
}
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
# Bulk-reject every disputed fact that's in open conflict against a
|
|
118
|
+
# single "keeper" fact. Resolves the distiller-hallucination pattern
|
|
119
|
+
# where one correct fact (e.g. uses_database=sqlite) accumulates many
|
|
120
|
+
# contradicting candidates (postgresql, mysql, redis, ...). For each
|
|
121
|
+
# open conflict where keeper_fact_id is on either side, the fact on
|
|
122
|
+
# the OTHER side is rejected; SQLiteStore#reject_fact cascade-resolves
|
|
123
|
+
# the conflict inside its own transaction.
|
|
124
|
+
#
|
|
125
|
+
# @return [Hash] {rejected_fact_ids:, conflicts_resolved:}
|
|
126
|
+
def reject_similar(keeper_fact_id, reason: nil, scope: "project")
|
|
127
|
+
return {error: "Invalid scope"} unless %w[global project].include?(scope)
|
|
128
|
+
store = @manager.store_if_exists(scope)
|
|
129
|
+
return {error: "#{scope} store not available"} unless store
|
|
130
|
+
|
|
131
|
+
keeper_id = keeper_fact_id.to_i
|
|
132
|
+
rows = store.conflicts
|
|
133
|
+
.where(status: "open")
|
|
134
|
+
.where(Sequel.|({fact_a_id: keeper_id}, {fact_b_id: keeper_id}))
|
|
135
|
+
.all
|
|
136
|
+
|
|
137
|
+
return {success: true, keeper_fact_id: keeper_id, rejected_fact_ids: [], conflicts_resolved: 0} if rows.empty?
|
|
138
|
+
|
|
139
|
+
rejected = []
|
|
140
|
+
total_resolved = 0
|
|
141
|
+
rows.each do |row|
|
|
142
|
+
loser_id = (row[:fact_a_id] == keeper_id) ? row[:fact_b_id] : row[:fact_a_id]
|
|
143
|
+
next if rejected.include?(loser_id)
|
|
144
|
+
result = store.reject_fact(loser_id, reason: reason)
|
|
145
|
+
rejected << loser_id
|
|
146
|
+
total_resolved += result[:conflicts_resolved] || 0
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
{
|
|
150
|
+
success: true,
|
|
151
|
+
keeper_fact_id: keeper_id,
|
|
152
|
+
rejected_fact_ids: rejected,
|
|
153
|
+
conflicts_resolved: total_resolved,
|
|
154
|
+
scope: scope
|
|
155
|
+
}
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
private
|
|
159
|
+
|
|
160
|
+
def stores_for(scope)
|
|
161
|
+
case scope
|
|
162
|
+
when "project"
|
|
163
|
+
{"project" => @manager.store_if_exists("project")}.compact
|
|
164
|
+
when "global"
|
|
165
|
+
{"global" => @manager.store_if_exists("global")}.compact
|
|
166
|
+
else
|
|
167
|
+
{
|
|
168
|
+
"project" => @manager.store_if_exists("project"),
|
|
169
|
+
"global" => @manager.store_if_exists("global")
|
|
170
|
+
}.compact
|
|
171
|
+
end
|
|
172
|
+
end
|
|
173
|
+
|
|
174
|
+
def counts_across_scopes
|
|
175
|
+
counts = {project: {open: 0, resolved: 0, total: 0},
|
|
176
|
+
global: {open: 0, resolved: 0, total: 0}}
|
|
177
|
+
[:project, :global].each do |source|
|
|
178
|
+
store = @manager.store_if_exists(source.to_s)
|
|
179
|
+
next unless store
|
|
180
|
+
%w[open resolved].each do |status|
|
|
181
|
+
rows = store.conflicts.where(status: status).all
|
|
182
|
+
.map { |r| r.merge(source: source.to_s, store: store) }
|
|
183
|
+
distinct = group_rows(rows).size
|
|
184
|
+
counts[source][status.to_sym] = distinct
|
|
185
|
+
counts[source][:total] += distinct
|
|
186
|
+
end
|
|
187
|
+
end
|
|
188
|
+
counts
|
|
189
|
+
end
|
|
190
|
+
|
|
191
|
+
def raw_counts_across_scopes
|
|
192
|
+
counts = {project: {open: 0, resolved: 0, total: 0},
|
|
193
|
+
global: {open: 0, resolved: 0, total: 0}}
|
|
194
|
+
[:project, :global].each do |source|
|
|
195
|
+
store = @manager.store_if_exists(source.to_s)
|
|
196
|
+
next unless store
|
|
197
|
+
store.conflicts.group_and_count(:status).all.each do |r|
|
|
198
|
+
key = r[:status].to_sym
|
|
199
|
+
counts[source][key] = r[:count] if counts[source].key?(key)
|
|
200
|
+
counts[source][:total] += r[:count]
|
|
201
|
+
end
|
|
202
|
+
end
|
|
203
|
+
counts
|
|
204
|
+
end
|
|
205
|
+
|
|
206
|
+
# Group conflict rows by (source, predicate, normalized(objects), status)
|
|
207
|
+
# so pre-dedupe historical duplicates collapse into one display row.
|
|
208
|
+
# Returns [{representative:, members:, facts:}...] — `representative` is
|
|
209
|
+
# the most recently detected row in the group; `members` is all raw rows;
|
|
210
|
+
# `facts` maps fact_id → facts-table row for the representative's two
|
|
211
|
+
# sides (batched to avoid N+1).
|
|
212
|
+
def group_rows(rows)
|
|
213
|
+
return [] if rows.empty?
|
|
214
|
+
|
|
215
|
+
facts_by_source = load_facts_for_rows(rows)
|
|
216
|
+
|
|
217
|
+
groups = {}
|
|
218
|
+
rows.each do |row|
|
|
219
|
+
store_facts = facts_by_source[row[:source]] || {}
|
|
220
|
+
fact_a = store_facts[row[:fact_a_id]]
|
|
221
|
+
fact_b = store_facts[row[:fact_b_id]]
|
|
222
|
+
key = grouping_key(row, fact_a, fact_b)
|
|
223
|
+
groups[key] ||= {members: [], facts: {}}
|
|
224
|
+
groups[key][:members] << row
|
|
225
|
+
groups[key][:facts][row[:fact_a_id]] ||= fact_a if fact_a
|
|
226
|
+
groups[key][:facts][row[:fact_b_id]] ||= fact_b if fact_b
|
|
227
|
+
end
|
|
228
|
+
|
|
229
|
+
groups.values.map do |g|
|
|
230
|
+
sorted = g[:members].sort_by { |r| -Core::RelativeTime.to_epoch(r[:detected_at]) }
|
|
231
|
+
{representative: sorted.first, members: g[:members], facts: g[:facts]}
|
|
232
|
+
end
|
|
233
|
+
end
|
|
234
|
+
|
|
235
|
+
def load_facts_for_rows(rows)
|
|
236
|
+
by_source = {}
|
|
237
|
+
rows.group_by { |r| [r[:source], r[:store]] }.each do |(source, store), group|
|
|
238
|
+
ids = group.flat_map { |r| [r[:fact_a_id], r[:fact_b_id]] }.compact.uniq
|
|
239
|
+
by_source[source] = ids.empty? ? {} : store.facts.where(id: ids).as_hash(:id)
|
|
240
|
+
end
|
|
241
|
+
by_source
|
|
242
|
+
end
|
|
243
|
+
|
|
244
|
+
def grouping_key(row, fact_a, fact_b)
|
|
245
|
+
predicate = fact_a&.dig(:predicate) || fact_b&.dig(:predicate) || "?"
|
|
246
|
+
objects = [normalize_object(fact_a), normalize_object(fact_b)].sort
|
|
247
|
+
[row[:source], row[:status], predicate, *objects]
|
|
248
|
+
end
|
|
249
|
+
|
|
250
|
+
def normalize_object(fact)
|
|
251
|
+
return "" unless fact
|
|
252
|
+
(fact[:object_literal] || "").to_s.downcase.strip.gsub(/\s+/, " ")
|
|
253
|
+
end
|
|
254
|
+
|
|
255
|
+
def serialize_group(group)
|
|
256
|
+
row = group[:representative]
|
|
257
|
+
store = row[:store]
|
|
258
|
+
presenter = FactPresenter.new(store)
|
|
259
|
+
fact_a = group[:facts][row[:fact_a_id]] || store.facts.where(id: row[:fact_a_id]).first
|
|
260
|
+
fact_b = group[:facts][row[:fact_b_id]] || store.facts.where(id: row[:fact_b_id]).first
|
|
261
|
+
|
|
262
|
+
{
|
|
263
|
+
id: row[:id],
|
|
264
|
+
fact_a_id: row[:fact_a_id],
|
|
265
|
+
fact_b_id: row[:fact_b_id],
|
|
266
|
+
fact_a_preview: presenter.preview(fact_a),
|
|
267
|
+
fact_b_preview: presenter.preview(fact_b),
|
|
268
|
+
status: row[:status],
|
|
269
|
+
detected_at: row[:detected_at],
|
|
270
|
+
detected_ago: Core::RelativeTime.format(row[:detected_at]),
|
|
271
|
+
notes: row[:notes],
|
|
272
|
+
source: row[:source],
|
|
273
|
+
group_size: group[:members].size,
|
|
274
|
+
group_member_ids: group[:members].map { |m| m[:id] }
|
|
275
|
+
}
|
|
276
|
+
end
|
|
277
|
+
end
|
|
278
|
+
end
|
|
279
|
+
end
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ClaudeMemory
|
|
4
|
+
module Dashboard
|
|
5
|
+
module Efficacy
|
|
6
|
+
# Pure report calculator for recall activity events. Takes a list of
|
|
7
|
+
# already-loaded events and produces the shaped metrics the dashboard
|
|
8
|
+
# renders. No I/O, no database access — separating compute from
|
|
9
|
+
# loading keeps the aggregation fast-testable and portable across
|
|
10
|
+
# event sources.
|
|
11
|
+
#
|
|
12
|
+
# Expected event shape (matches {ClaudeMemory::ActivityLog.recent} output):
|
|
13
|
+
#
|
|
14
|
+
# {
|
|
15
|
+
# id:, event_type:, status:, duration_ms:, session_id:,
|
|
16
|
+
# occurred_at:, details: {tool:, query:, result_count:,
|
|
17
|
+
# results_by_scope: {"project" => N, "global" => M}, ...}
|
|
18
|
+
# }
|
|
19
|
+
module Reporter
|
|
20
|
+
RECALL_TRACE_LIMIT = 50
|
|
21
|
+
MEMORY_GAPS_LIMIT = 10
|
|
22
|
+
|
|
23
|
+
module_function
|
|
24
|
+
|
|
25
|
+
# @param events [Array<Hash>] recall activity events (post-filter)
|
|
26
|
+
# @param timeframe [Hash] {since:, session_id:} echoed into the response
|
|
27
|
+
# @return [Hash] the efficacy payload
|
|
28
|
+
def report(events, timeframe: {})
|
|
29
|
+
result_counts = events.map { |e| e.dig(:details, :result_count) || 0 }
|
|
30
|
+
latencies = events.map { |e| e[:duration_ms] }.compact
|
|
31
|
+
successful = events.count { |e| e[:status] == "success" && (e.dig(:details, :result_count) || 0) > 0 }
|
|
32
|
+
empty = events.count { |e| e[:status] == "success" && (e.dig(:details, :result_count) || 0) == 0 }
|
|
33
|
+
|
|
34
|
+
{
|
|
35
|
+
timeframe: {since: timeframe[:since], session_id: timeframe[:session_id]},
|
|
36
|
+
recall_events: events.size,
|
|
37
|
+
successful_recalls: successful,
|
|
38
|
+
empty_recalls: empty,
|
|
39
|
+
hit_rate: percentage(successful, events.size),
|
|
40
|
+
total_results_served: result_counts.sum,
|
|
41
|
+
median_results_per_query: median(result_counts),
|
|
42
|
+
median_latency_ms: median(latencies),
|
|
43
|
+
tool_mix: tool_mix(events),
|
|
44
|
+
source_contribution: source_contribution(events),
|
|
45
|
+
memory_gaps: memory_gaps(events),
|
|
46
|
+
recall_trace: recall_trace(events)
|
|
47
|
+
}
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# Percentage with zero-safe denominator, rounded to 1 decimal.
|
|
51
|
+
def percentage(part, whole)
|
|
52
|
+
return 0 if whole.to_i.zero?
|
|
53
|
+
(part.to_f / whole * 100).round(1)
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
# Sorted median — returns 0 for empty input, midpoint average for even counts.
|
|
57
|
+
def median(values)
|
|
58
|
+
return 0 if values.empty?
|
|
59
|
+
sorted = values.sort
|
|
60
|
+
mid = sorted.size / 2
|
|
61
|
+
if sorted.size.odd?
|
|
62
|
+
sorted[mid]
|
|
63
|
+
else
|
|
64
|
+
((sorted[mid - 1] + sorted[mid]) / 2.0).round(1)
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def tool_mix(events)
|
|
69
|
+
events
|
|
70
|
+
.group_by { |e| e.dig(:details, :tool) || "(unknown)" }
|
|
71
|
+
.map { |tool, rows|
|
|
72
|
+
hits = rows.count { |r| (r.dig(:details, :result_count) || 0) > 0 }
|
|
73
|
+
{
|
|
74
|
+
tool: tool,
|
|
75
|
+
count: rows.size,
|
|
76
|
+
hits: hits,
|
|
77
|
+
hit_rate: percentage(hits, rows.size)
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
.sort_by { |row| -row[:count] }
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
# Aggregate {results_by_scope} across events. Reveals where returned
|
|
84
|
+
# facts actually came from — the one question only efficacy can answer.
|
|
85
|
+
def source_contribution(events)
|
|
86
|
+
totals = Hash.new(0)
|
|
87
|
+
events.each do |e|
|
|
88
|
+
by_scope = e.dig(:details, :results_by_scope)
|
|
89
|
+
next unless by_scope.is_a?(Hash)
|
|
90
|
+
by_scope.each { |scope, n| totals[scope.to_s] += n.to_i }
|
|
91
|
+
end
|
|
92
|
+
totals.empty? ? [] : totals.map { |scope, count| {scope: scope, count: count} }.sort_by { |r| -r[:count] }
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def memory_gaps(events)
|
|
96
|
+
events
|
|
97
|
+
.select { |e| (e.dig(:details, :result_count) || 0).zero? && e.dig(:details, :query) }
|
|
98
|
+
.first(MEMORY_GAPS_LIMIT)
|
|
99
|
+
.map { |e|
|
|
100
|
+
{
|
|
101
|
+
tool: e.dig(:details, :tool),
|
|
102
|
+
query: e.dig(:details, :query),
|
|
103
|
+
occurred_at: e[:occurred_at],
|
|
104
|
+
occurred_ago: Core::RelativeTime.format(e[:occurred_at])
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
def recall_trace(events)
|
|
110
|
+
events.first(RECALL_TRACE_LIMIT).map { |e|
|
|
111
|
+
{
|
|
112
|
+
id: e[:id],
|
|
113
|
+
tool: e.dig(:details, :tool),
|
|
114
|
+
query: e.dig(:details, :query),
|
|
115
|
+
result_count: e.dig(:details, :result_count) || 0,
|
|
116
|
+
duration_ms: e[:duration_ms],
|
|
117
|
+
session_id: e[:session_id],
|
|
118
|
+
status: e[:status],
|
|
119
|
+
occurred_at: e[:occurred_at],
|
|
120
|
+
occurred_ago: Core::RelativeTime.format(e[:occurred_at])
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
end
|
|
124
|
+
end
|
|
125
|
+
end
|
|
126
|
+
end
|
|
127
|
+
end
|
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module ClaudeMemory
|
|
4
|
+
module Dashboard
|
|
5
|
+
# Shapes a facts-table row into the hashes the dashboard API emits.
|
|
6
|
+
# Callers opt in to the shape they need:
|
|
7
|
+
#
|
|
8
|
+
# - {#summary} — full fact with confidence, scope, created_at, created_ago
|
|
9
|
+
# - {#preview} — predicate + truncated object for list rows
|
|
10
|
+
# - {#with_provenance} — summary + provenance chain (quote, session_id, occurred_at)
|
|
11
|
+
# - {#list_summary} — batches entity lookups across many rows to avoid N+1
|
|
12
|
+
#
|
|
13
|
+
# All methods resolve the subject/object entities from the store passed at
|
|
14
|
+
# construction time; callers pass in raw facts-table rows (hashes) directly.
|
|
15
|
+
class FactPresenter
|
|
16
|
+
OBJECT_PREVIEW_CHARS = 120
|
|
17
|
+
|
|
18
|
+
def initialize(store)
|
|
19
|
+
@store = store
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# @param row [Hash, nil] a facts-table row
|
|
23
|
+
# @return [Hash, nil] nil when row is nil
|
|
24
|
+
def summary(row)
|
|
25
|
+
return nil unless row
|
|
26
|
+
serialize(row, load_entities([row[:subject_entity_id], row[:object_entity_id]]))
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
# @param row [Hash, nil]
|
|
30
|
+
# @return [Hash, nil] object text truncated to {OBJECT_PREVIEW_CHARS}
|
|
31
|
+
def preview(row)
|
|
32
|
+
return nil unless row
|
|
33
|
+
entities = load_entities([row[:subject_entity_id], row[:object_entity_id]])
|
|
34
|
+
subject = entities[row[:subject_entity_id]]
|
|
35
|
+
object_entity = entities[row[:object_entity_id]]
|
|
36
|
+
object_text = row[:object_literal] || object_entity&.dig(:canonical_name) || "unknown"
|
|
37
|
+
truncated = object_text.to_s.length > OBJECT_PREVIEW_CHARS
|
|
38
|
+
|
|
39
|
+
{
|
|
40
|
+
id: row[:id],
|
|
41
|
+
docid: row[:docid],
|
|
42
|
+
subject: subject&.dig(:canonical_name) || "unknown",
|
|
43
|
+
predicate: row[:predicate],
|
|
44
|
+
object: truncated ? "#{object_text[0, OBJECT_PREVIEW_CHARS]}…" : object_text,
|
|
45
|
+
scope: row[:scope],
|
|
46
|
+
status: row[:status]
|
|
47
|
+
}
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
# @param row [Hash, nil]
|
|
51
|
+
# @return [Hash, nil] summary plus :provenance array with session/date context
|
|
52
|
+
def with_provenance(row)
|
|
53
|
+
return nil unless row
|
|
54
|
+
summary(row).merge(provenance: load_provenance(row[:id]))
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# @param rows [Array<Hash>] facts-table rows
|
|
58
|
+
# @return [Array<Hash>] summaries with batched entity resolution
|
|
59
|
+
def list_summary(rows)
|
|
60
|
+
ids = rows.flat_map { |r| [r[:subject_entity_id], r[:object_entity_id]] }.compact.uniq
|
|
61
|
+
entities = ids.empty? ? {} : @store.entities.where(id: ids).as_hash(:id)
|
|
62
|
+
rows.map { |r| serialize(r, entities) }
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
private
|
|
66
|
+
|
|
67
|
+
def load_entities(ids)
|
|
68
|
+
valid = ids.compact.uniq
|
|
69
|
+
return {} if valid.empty?
|
|
70
|
+
@store.entities.where(id: valid).as_hash(:id)
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
def load_provenance(fact_id)
|
|
74
|
+
prov_rows = @store.provenance.where(fact_id: fact_id).all
|
|
75
|
+
content_ids = prov_rows.map { |p| p[:content_item_id] }.compact.uniq
|
|
76
|
+
content_items = content_ids.empty? ? {} : @store.content_items.where(id: content_ids).as_hash(:id)
|
|
77
|
+
|
|
78
|
+
prov_rows.map { |p|
|
|
79
|
+
ci = p[:content_item_id] ? content_items[p[:content_item_id]] : nil
|
|
80
|
+
{
|
|
81
|
+
quote: p[:quote],
|
|
82
|
+
strength: p[:strength],
|
|
83
|
+
content_item_id: p[:content_item_id],
|
|
84
|
+
session_id: ci&.dig(:session_id),
|
|
85
|
+
occurred_at: ci&.dig(:occurred_at)
|
|
86
|
+
}
|
|
87
|
+
}
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def serialize(row, entities)
|
|
91
|
+
subject = entities[row[:subject_entity_id]]
|
|
92
|
+
object_entity = entities[row[:object_entity_id]]
|
|
93
|
+
{
|
|
94
|
+
id: row[:id],
|
|
95
|
+
docid: row[:docid],
|
|
96
|
+
subject: subject&.dig(:canonical_name) || "unknown",
|
|
97
|
+
predicate: row[:predicate],
|
|
98
|
+
object: row[:object_literal] || object_entity&.dig(:canonical_name) || "unknown",
|
|
99
|
+
status: row[:status],
|
|
100
|
+
confidence: row[:confidence],
|
|
101
|
+
scope: row[:scope],
|
|
102
|
+
created_at: row[:created_at],
|
|
103
|
+
created_ago: Core::RelativeTime.format(row[:created_at]),
|
|
104
|
+
valid_from: row[:valid_from]
|
|
105
|
+
}
|
|
106
|
+
end
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
end
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "json"
|
|
4
|
+
|
|
5
|
+
module ClaudeMemory
|
|
6
|
+
module Dashboard
|
|
7
|
+
# Aggregates the dashboard "/health" report from four checks: per-database
|
|
8
|
+
# schema/fact health (global + project), claude-code hooks installation,
|
|
9
|
+
# and the sqlite-vec vector index. Each check returns a {name, status,
|
|
10
|
+
# message, fix?} hash; the report's overall status escalates to the
|
|
11
|
+
# worst individual status (error > warning > healthy).
|
|
12
|
+
#
|
|
13
|
+
# Pulled out of Dashboard::API so the wiring lives next to the data
|
|
14
|
+
# rather than next to the HTTP routing.
|
|
15
|
+
class Health
|
|
16
|
+
HOOKS_SETTINGS_PATHS = [".claude/settings.json", ".claude/settings.local.json"].freeze
|
|
17
|
+
VEC_LOW_COVERAGE_PCT = 10
|
|
18
|
+
VEC_WARN_COVERAGE_PCT = 50
|
|
19
|
+
|
|
20
|
+
def initialize(manager)
|
|
21
|
+
@manager = manager
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def report
|
|
25
|
+
checks = [
|
|
26
|
+
db_check("global", @manager.global_db_path),
|
|
27
|
+
db_check("project", @manager.project_db_path),
|
|
28
|
+
hooks_check,
|
|
29
|
+
vec_check
|
|
30
|
+
]
|
|
31
|
+
{status: aggregate_status(checks), checks: checks, version: ClaudeMemory::VERSION}
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
private
|
|
35
|
+
|
|
36
|
+
def aggregate_status(checks)
|
|
37
|
+
return "error" if checks.any? { |c| c[:status] == "error" }
|
|
38
|
+
return "warning" if checks.any? { |c| c[:status] == "warning" }
|
|
39
|
+
"healthy"
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def db_check(label, path)
|
|
43
|
+
unless File.exist?(path)
|
|
44
|
+
return {
|
|
45
|
+
name: "#{label}_database",
|
|
46
|
+
status: "warning",
|
|
47
|
+
message: "Not initialized",
|
|
48
|
+
fix: "Run `claude-memory init` to create the #{label} database at #{path}."
|
|
49
|
+
}
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
store = @manager.store_for_scope(label)
|
|
53
|
+
version = store.schema_version
|
|
54
|
+
{
|
|
55
|
+
name: "#{label}_database",
|
|
56
|
+
status: "healthy",
|
|
57
|
+
message: "Schema v#{version}, #{store.facts.where(status: "active").count} active facts"
|
|
58
|
+
}
|
|
59
|
+
rescue => e
|
|
60
|
+
{
|
|
61
|
+
name: "#{label}_database",
|
|
62
|
+
status: "error",
|
|
63
|
+
message: e.message,
|
|
64
|
+
fix: "Inspect the error above. Common causes: corrupt schema, file permissions, or a stale lock. Try `claude-memory recover --scope #{label}`, or remove the file at #{path} and re-run `claude-memory init`."
|
|
65
|
+
}
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def hooks_check
|
|
69
|
+
present = collect_configured_hook_types
|
|
70
|
+
expected = Commands::Checks::HooksCheck::EXPECTED_HOOKS
|
|
71
|
+
missing = expected - present
|
|
72
|
+
|
|
73
|
+
if present.empty?
|
|
74
|
+
return {
|
|
75
|
+
name: "hooks",
|
|
76
|
+
status: "error",
|
|
77
|
+
message: "No claude-memory hooks found in #{HOOKS_SETTINGS_PATHS.join(" or ")}",
|
|
78
|
+
fix: "Run `claude-memory init` to install the standard hook set (#{expected.join(", ")})."
|
|
79
|
+
}
|
|
80
|
+
end
|
|
81
|
+
|
|
82
|
+
return {name: "hooks", status: "healthy", message: "All #{expected.size} hooks configured"} if missing.empty?
|
|
83
|
+
|
|
84
|
+
{
|
|
85
|
+
name: "hooks",
|
|
86
|
+
status: "warning",
|
|
87
|
+
message: "#{present.size}/#{expected.size} hooks configured",
|
|
88
|
+
fix: "Missing hook(s): #{missing.join(", ")}. Run `claude-memory init` to install the standard set, or add them manually under `hooks.<EventName>[].hooks[]` in .claude/settings.json."
|
|
89
|
+
}
|
|
90
|
+
rescue => e
|
|
91
|
+
{
|
|
92
|
+
name: "hooks",
|
|
93
|
+
status: "error",
|
|
94
|
+
message: e.message,
|
|
95
|
+
fix: "Failed to read hook settings. Verify .claude/settings.json is valid JSON, then re-run `claude-memory doctor`."
|
|
96
|
+
}
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
# Walks the two-level hook structure Claude Code uses:
|
|
100
|
+
# hooks.<EventName>[] -> matcher hash -> .hooks[] -> { type:, command: }
|
|
101
|
+
def collect_configured_hook_types
|
|
102
|
+
types = []
|
|
103
|
+
HOOKS_SETTINGS_PATHS.each do |relpath|
|
|
104
|
+
path = File.join(Dir.pwd, relpath)
|
|
105
|
+
next unless File.exist?(path)
|
|
106
|
+
|
|
107
|
+
settings = JSON.parse(File.read(path))
|
|
108
|
+
hooks = settings["hooks"] || {}
|
|
109
|
+
hooks.each do |event_type, matchers|
|
|
110
|
+
next unless matchers.is_a?(Array)
|
|
111
|
+
has_claude_memory = matchers.any? do |matcher|
|
|
112
|
+
next false unless matcher.is_a?(Hash) && matcher["hooks"].is_a?(Array)
|
|
113
|
+
matcher["hooks"].any? { |h| h.is_a?(Hash) && h["command"]&.include?("claude-memory") }
|
|
114
|
+
end
|
|
115
|
+
types << event_type if has_claude_memory
|
|
116
|
+
end
|
|
117
|
+
end
|
|
118
|
+
types.uniq
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
def vec_check
|
|
122
|
+
store = @manager.default_store(prefer: :project)
|
|
123
|
+
unless store
|
|
124
|
+
return {
|
|
125
|
+
name: "vectors",
|
|
126
|
+
status: "warning",
|
|
127
|
+
message: "No database",
|
|
128
|
+
fix: "Initialize a database first with `claude-memory init`."
|
|
129
|
+
}
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
vec = store.vector_index
|
|
133
|
+
return vec_unavailable_check unless vec.available?
|
|
134
|
+
|
|
135
|
+
vec_coverage_check(vec)
|
|
136
|
+
rescue => e
|
|
137
|
+
{
|
|
138
|
+
name: "vectors",
|
|
139
|
+
status: "warning",
|
|
140
|
+
message: e.message,
|
|
141
|
+
fix: "Vector index threw an error. Try `claude-memory index --vec --rebuild` to rebuild from facts."
|
|
142
|
+
}
|
|
143
|
+
end
|
|
144
|
+
|
|
145
|
+
def vec_unavailable_check
|
|
146
|
+
{
|
|
147
|
+
name: "vectors",
|
|
148
|
+
status: "warning",
|
|
149
|
+
message: "sqlite-vec not available",
|
|
150
|
+
fix: "The sqlite-vec extension didn't load. Run `bundle install` to install the gem (>= 0.1.9). Semantic recall will be disabled until this is fixed; lexical recall still works."
|
|
151
|
+
}
|
|
152
|
+
end
|
|
153
|
+
|
|
154
|
+
def vec_coverage_check(vec)
|
|
155
|
+
coverage = vec.coverage_stats
|
|
156
|
+
indexed = coverage[:vec_indexed] || 0
|
|
157
|
+
total = coverage[:with_embedding] || 0
|
|
158
|
+
pct = coverage[:coverage_pct] || 0
|
|
159
|
+
message = (total > 0) ? "#{indexed}/#{total} facts indexed (#{pct}%)" : "0 facts have embeddings yet"
|
|
160
|
+
|
|
161
|
+
status = vec_status_for(total, pct)
|
|
162
|
+
result = {name: "vectors", status: status, message: message}
|
|
163
|
+
result[:fix] = "Vector coverage is low — run `claude-memory index --vec --rebuild` to regenerate embeddings and reindex all active facts. Semantic recall accuracy degrades as this drops." unless status == "healthy"
|
|
164
|
+
result
|
|
165
|
+
end
|
|
166
|
+
|
|
167
|
+
def vec_status_for(total, pct)
|
|
168
|
+
return "healthy" if total.zero?
|
|
169
|
+
return "error" if pct < VEC_LOW_COVERAGE_PCT
|
|
170
|
+
return "warning" if pct < VEC_WARN_COVERAGE_PCT
|
|
171
|
+
"healthy"
|
|
172
|
+
end
|
|
173
|
+
end
|
|
174
|
+
end
|
|
175
|
+
end
|