@exaudeus/memory-mcp 1.6.0 → 1.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/config.d.ts CHANGED
@@ -1,4 +1,5 @@
1
- import type { MemoryConfig, BehaviorConfig } from './types.js';
1
+ import type { MemoryConfig, BehaviorConfig, EmbedderConfig } from './types.js';
2
+ import type { Embedder } from './embedder.js';
2
3
  /** How the config was loaded — discriminated union so configFilePath
3
4
  * only exists when source is 'file' (illegal states unrepresentable) */
4
5
  export type ConfigOrigin = {
@@ -15,6 +16,8 @@ export interface LoadedConfig {
15
16
  readonly origin: ConfigOrigin;
16
17
  /** Resolved behavior config — present when a "behavior" block was found in memory-config.json */
17
18
  readonly behavior?: BehaviorConfig;
19
+ /** Resolved embedder — shared across all lobes. Constructed from config or auto-detected. */
20
+ readonly embedder?: Embedder;
18
21
  }
19
22
  interface MemoryConfigFileBehavior {
20
23
  staleDaysStandard?: number;
@@ -23,10 +26,26 @@ interface MemoryConfigFileBehavior {
23
26
  maxDedupSuggestions?: number;
24
27
  maxConflictPairs?: number;
25
28
  }
29
+ interface MemoryConfigFileEmbedder {
30
+ provider?: string;
31
+ model?: string;
32
+ baseUrl?: string;
33
+ timeoutMs?: number;
34
+ dimensions?: number;
35
+ }
26
36
  /** Parse and validate a behavior config block, falling back to defaults for each field.
27
37
  * Warns to stderr for unknown keys (likely typos) and out-of-range values.
28
38
  * Exported for testing — validates and clamps all fields. */
29
39
  export declare function parseBehaviorConfig(raw?: MemoryConfigFileBehavior): BehaviorConfig;
40
+ /** Parse and validate an embedder config block.
41
+ * Returns undefined when block is absent (auto-detect mode).
42
+ * Exported for testing. */
43
+ export declare function parseEmbedderConfig(raw?: MemoryConfigFileEmbedder): EmbedderConfig | undefined;
44
+ /** Create an Embedder from config.
45
+ * - provider "none" → null (keyword-only)
46
+ * - provider "ollama" → LazyEmbedder wrapping OllamaEmbedder with config params
47
+ * - No config (auto-detect) → LazyEmbedder wrapping default OllamaEmbedder */
48
+ export declare function createEmbedderFromConfig(config?: EmbedderConfig): Embedder | undefined;
30
49
  /** Load lobe configs with priority: memory-config.json -> env vars -> single-repo default */
31
50
  export declare function getLobeConfigs(): LoadedConfig;
32
51
  export {};
package/dist/config.js CHANGED
@@ -7,6 +7,7 @@ import { execFileSync } from 'child_process';
7
7
  import path from 'path';
8
8
  import os from 'os';
9
9
  import { DEFAULT_STORAGE_BUDGET_BYTES } from './types.js';
10
+ import { OllamaEmbedder, LazyEmbedder } from './embedder.js';
10
11
  import { DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, } from './thresholds.js';
11
12
  /** Validate and clamp a numeric threshold to a given range.
12
13
  * Returns the default if the value is missing, NaN, or out of range. */
@@ -46,6 +47,61 @@ export function parseBehaviorConfig(raw) {
46
47
  maxConflictPairs: clampThreshold(raw.maxConflictPairs, DEFAULT_MAX_CONFLICT_PAIRS, 1, 5),
47
48
  };
48
49
  }
50
+ /** Known embedder config keys — used to warn on typos/unknown fields. */
51
+ const KNOWN_EMBEDDER_KEYS = new Set([
52
+ 'provider', 'model', 'baseUrl', 'timeoutMs', 'dimensions',
53
+ ]);
54
+ const VALID_PROVIDERS = new Set(['ollama', 'none']);
55
+ /** Parse and validate an embedder config block.
56
+ * Returns undefined when block is absent (auto-detect mode).
57
+ * Exported for testing. */
58
+ export function parseEmbedderConfig(raw) {
59
+ if (!raw)
60
+ return undefined;
61
+ // Warn on unrecognized keys
62
+ for (const key of Object.keys(raw)) {
63
+ if (!KNOWN_EMBEDDER_KEYS.has(key)) {
64
+ process.stderr.write(`[memory-mcp] Unknown embedder config key "${key}" — ignored. ` +
65
+ `Valid keys: ${Array.from(KNOWN_EMBEDDER_KEYS).join(', ')}\n`);
66
+ }
67
+ }
68
+ // Validate provider — default to 'ollama' if present but not set
69
+ const provider = raw.provider && VALID_PROVIDERS.has(raw.provider)
70
+ ? raw.provider
71
+ : 'ollama';
72
+ if (raw.provider && !VALID_PROVIDERS.has(raw.provider)) {
73
+ process.stderr.write(`[memory-mcp] Unknown embedder provider "${raw.provider}" — using "ollama". Valid: ${Array.from(VALID_PROVIDERS).join(', ')}\n`);
74
+ }
75
+ return {
76
+ provider,
77
+ model: raw.model,
78
+ baseUrl: raw.baseUrl,
79
+ timeoutMs: raw.timeoutMs !== undefined
80
+ ? clampThreshold(raw.timeoutMs, 5000, 500, 30000)
81
+ : undefined,
82
+ dimensions: raw.dimensions !== undefined
83
+ ? clampThreshold(raw.dimensions, 384, 64, 4096)
84
+ : undefined,
85
+ };
86
+ }
87
+ /** Create an Embedder from config.
88
+ * - provider "none" → null (keyword-only)
89
+ * - provider "ollama" → LazyEmbedder wrapping OllamaEmbedder with config params
90
+ * - No config (auto-detect) → LazyEmbedder wrapping default OllamaEmbedder */
91
+ export function createEmbedderFromConfig(config) {
92
+ // Explicit opt-out
93
+ if (config?.provider === 'none')
94
+ return undefined;
95
+ // Explicit or default Ollama config
96
+ const candidate = new OllamaEmbedder({
97
+ model: config?.model,
98
+ baseUrl: config?.baseUrl,
99
+ timeoutMs: config?.timeoutMs,
100
+ dimensions: config?.dimensions,
101
+ });
102
+ // Both explicit "ollama" and auto-detect use LazyEmbedder for fast startup
103
+ return new LazyEmbedder(candidate);
104
+ }
49
105
  function resolveRoot(root) {
50
106
  return root
51
107
  .replace(/^\$HOME\b/, process.env.HOME ?? '')
@@ -84,7 +140,7 @@ function resolveMemoryPath(repoRoot, workspaceName, explicitMemoryDir) {
84
140
  /** If no lobe has alwaysInclude: true AND the legacy global store directory has actual entries,
85
141
  * auto-create a "global" lobe pointing to it. Protects existing users who haven't updated their config.
86
142
  * Only fires when the dir contains .md files — an empty dir doesn't trigger creation. */
87
- function ensureAlwaysIncludeLobe(configs, behavior) {
143
+ function ensureAlwaysIncludeLobe(configs, behavior, embedder) {
88
144
  const hasAlwaysInclude = Array.from(configs.values()).some(c => c.alwaysInclude);
89
145
  if (hasAlwaysInclude)
90
146
  return;
@@ -114,6 +170,7 @@ function ensureAlwaysIncludeLobe(configs, behavior) {
114
170
  storageBudgetBytes: DEFAULT_STORAGE_BUDGET_BYTES,
115
171
  alwaysInclude: true,
116
172
  behavior,
173
+ embedder,
117
174
  });
118
175
  process.stderr.write(`[memory-mcp] Auto-created "global" lobe (alwaysInclude) from existing ${globalPath}\n`);
119
176
  }
@@ -131,6 +188,8 @@ export function getLobeConfigs() {
131
188
  else {
132
189
  // Parse global behavior config once — applies to all lobes
133
190
  const behavior = parseBehaviorConfig(external.behavior);
191
+ const embedderConfig = parseEmbedderConfig(external.embedder);
192
+ const embedder = createEmbedderFromConfig(embedderConfig);
134
193
  for (const [name, config] of Object.entries(external.lobes)) {
135
194
  if (!config.root) {
136
195
  process.stderr.write(`[memory-mcp] Skipping lobe "${name}": missing "root" field\n`);
@@ -143,14 +202,15 @@ export function getLobeConfigs() {
143
202
  storageBudgetBytes: (config.budgetMB ?? 2) * 1024 * 1024,
144
203
  alwaysInclude: config.alwaysInclude ?? false,
145
204
  behavior,
205
+ embedder,
146
206
  });
147
207
  }
148
208
  if (configs.size > 0) {
149
209
  // Reuse the already-parsed behavior config for the alwaysInclude fallback
150
210
  const resolvedBehavior = external.behavior ? behavior : undefined;
151
- ensureAlwaysIncludeLobe(configs, resolvedBehavior);
211
+ ensureAlwaysIncludeLobe(configs, resolvedBehavior, embedder);
152
212
  process.stderr.write(`[memory-mcp] Loaded ${configs.size} lobe(s) from memory-config.json\n`);
153
- return { configs, origin: { source: 'file', path: configPath }, behavior: resolvedBehavior };
213
+ return { configs, origin: { source: 'file', path: configPath }, behavior: resolvedBehavior, embedder };
154
214
  }
155
215
  }
156
216
  }
@@ -162,6 +222,8 @@ export function getLobeConfigs() {
162
222
  process.stderr.write(`[memory-mcp] Failed to parse memory-config.json: ${message}\n`);
163
223
  }
164
224
  }
225
+ // Auto-detect embedder for env var and default modes (no config file)
226
+ const autoEmbedder = createEmbedderFromConfig(undefined);
165
227
  // 2. Try env var multi-repo mode
166
228
  const workspacesJson = process.env.MEMORY_MCP_WORKSPACES;
167
229
  if (workspacesJson) {
@@ -176,12 +238,13 @@ export function getLobeConfigs() {
176
238
  memoryPath: resolveMemoryPath(repoRoot, name, explicitDir),
177
239
  storageBudgetBytes: storageBudget,
178
240
  alwaysInclude: false,
241
+ embedder: autoEmbedder,
179
242
  });
180
243
  }
181
244
  if (configs.size > 0) {
182
- ensureAlwaysIncludeLobe(configs);
245
+ ensureAlwaysIncludeLobe(configs, undefined, autoEmbedder);
183
246
  process.stderr.write(`[memory-mcp] Loaded ${configs.size} lobe(s) from MEMORY_MCP_WORKSPACES env var\n`);
184
- return { configs, origin: { source: 'env' } };
247
+ return { configs, origin: { source: 'env' }, embedder: autoEmbedder };
185
248
  }
186
249
  }
187
250
  catch (e) {
@@ -197,8 +260,9 @@ export function getLobeConfigs() {
197
260
  memoryPath: resolveMemoryPath(repoRoot, 'default', explicitDir),
198
261
  storageBudgetBytes: storageBudget,
199
262
  alwaysInclude: false,
263
+ embedder: autoEmbedder,
200
264
  });
201
265
  // No ensureAlwaysIncludeLobe here — single-repo default users have everything in one lobe
202
266
  process.stderr.write(`[memory-mcp] Using single-lobe default mode (cwd: ${repoRoot})\n`);
203
- return { configs, origin: { source: 'default' } };
267
+ return { configs, origin: { source: 'default' }, embedder: autoEmbedder };
204
268
  }
@@ -60,6 +60,28 @@ export declare class FakeEmbedder implements Embedder {
60
60
  constructor(dimensions?: number);
61
61
  embed(text: string, _signal?: AbortSignal): Promise<EmbedResult>;
62
62
  }
63
+ /** Lazy auto-detecting embedder — probes on first use, caches the result.
64
+ * Re-probes on failure after a TTL window so the system recovers if
65
+ * Ollama starts after MCP startup.
66
+ *
67
+ * Implements the same Embedder interface — the store never knows it's lazy.
68
+ * The probe uses the candidate's own timeout (5s for cold starts).
69
+ * The caller's signal is only forwarded to the actual embed call, not the probe. */
70
+ export declare class LazyEmbedder implements Embedder {
71
+ readonly dimensions: number;
72
+ private inner;
73
+ private lastProbeTime;
74
+ private hasLoggedUnavailable;
75
+ private readonly candidate;
76
+ private readonly reprobeIntervalMs;
77
+ private readonly now;
78
+ constructor(candidate: Embedder, opts?: {
79
+ readonly reprobeIntervalMs?: number;
80
+ /** Injectable clock for testing — default Date.now */
81
+ readonly now?: () => number;
82
+ });
83
+ embed(text: string, signal?: AbortSignal): Promise<EmbedResult>;
84
+ }
63
85
  /** Batch embed texts sequentially. Pure composition over Embedder.embed().
64
86
  * Sequential because local Ollama benefits from serialized requests (single GPU/CPU).
65
87
  * Not on the interface — interface segregation. */
package/dist/embedder.js CHANGED
@@ -130,6 +130,56 @@ function trigramHash(trigram, buckets) {
130
130
  }
131
131
  return ((hash % buckets) + buckets) % buckets;
132
132
  }
133
+ // ─── LazyEmbedder ─────────────────────────────────────────────────────────
134
+ /** Default reprobe interval — how long to wait before retrying after a failed probe.
135
+ * 2 minutes balances responsiveness (recovery after Ollama starts) with
136
+ * avoiding excessive probes when Ollama isn't installed. */
137
+ const DEFAULT_REPROBE_INTERVAL_MS = 2 * 60 * 1000;
138
+ /** Lazy auto-detecting embedder — probes on first use, caches the result.
139
+ * Re-probes on failure after a TTL window so the system recovers if
140
+ * Ollama starts after MCP startup.
141
+ *
142
+ * Implements the same Embedder interface — the store never knows it's lazy.
143
+ * The probe uses the candidate's own timeout (5s for cold starts).
144
+ * The caller's signal is only forwarded to the actual embed call, not the probe. */
145
+ export class LazyEmbedder {
146
+ constructor(candidate, opts) {
147
+ this.inner = null;
148
+ this.lastProbeTime = -Infinity;
149
+ this.hasLoggedUnavailable = false;
150
+ this.candidate = candidate;
151
+ this.dimensions = candidate.dimensions;
152
+ this.reprobeIntervalMs = opts?.reprobeIntervalMs ?? DEFAULT_REPROBE_INTERVAL_MS;
153
+ this.now = opts?.now ?? Date.now;
154
+ }
155
+ async embed(text, signal) {
156
+ const now = this.now();
157
+ const shouldProbe = !this.inner && (now - this.lastProbeTime >= this.reprobeIntervalMs);
158
+ if (shouldProbe) {
159
+ this.lastProbeTime = now;
160
+ // Probe without caller's signal — use candidate's default timeout (5s)
161
+ // so cold model loads aren't aborted by a tight query-time timeout
162
+ const probe = await this.candidate.embed('probe');
163
+ if (probe.ok) {
164
+ this.inner = this.candidate;
165
+ if (this.hasLoggedUnavailable) {
166
+ // Recovery after previous failure — notify
167
+ process.stderr.write('[memory-mcp] Embedding provider recovered — semantic search active\n');
168
+ this.hasLoggedUnavailable = false;
169
+ }
170
+ }
171
+ else if (!this.hasLoggedUnavailable) {
172
+ // Only log first failure — avoid noisy repeated warnings
173
+ process.stderr.write(`[memory-mcp] Embedding provider not available — using keyword-only search (will retry in ${Math.round(this.reprobeIntervalMs / 1000)}s)\n`);
174
+ this.hasLoggedUnavailable = true;
175
+ }
176
+ }
177
+ if (!this.inner) {
178
+ return { ok: false, failure: { kind: 'provider-unavailable', reason: 'auto-detect: provider not available' } };
179
+ }
180
+ return this.inner.embed(text, signal);
181
+ }
182
+ }
133
183
  // ─── Batch utility ────────────────────────────────────────────────────────
134
184
  /** Batch embed texts sequentially. Pure composition over Embedder.embed().
135
185
  * Sequential because local Ollama benefits from serialized requests (single GPU/CPU).
@@ -1,6 +1,11 @@
1
1
  import type { MemoryStats, StaleEntry, ConflictPair, BehaviorConfig } from './types.js';
2
2
  import { type FilterGroup } from './text-analyzer.js';
3
3
  import type { MarkdownMemoryStore } from './store.js';
4
+ /** Format the search mode indicator for context/recall responses.
5
+ * Pure function — no I/O, no state.
6
+ *
7
+ * Shows whether semantic search is active and vector coverage. */
8
+ export declare function formatSearchMode(embedderAvailable: boolean, vectorCount: number, totalCount: number): string;
4
9
  /** Format the stale entries section for briefing/context responses */
5
10
  export declare function formatStaleSection(staleDetails: readonly StaleEntry[]): string;
6
11
  /** Format the conflict detection warning for query/context responses */
@@ -4,6 +4,22 @@
4
4
  // and returns a formatted string for the tool response.
5
5
  import { DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, MAX_FOOTER_TAGS, WARN_SEPARATOR, } from './thresholds.js';
6
6
  import { analyzeFilterGroups } from './text-analyzer.js';
7
+ /** Format the search mode indicator for context/recall responses.
8
+ * Pure function — no I/O, no state.
9
+ *
10
+ * Shows whether semantic search is active and vector coverage. */
11
+ export function formatSearchMode(embedderAvailable, vectorCount, totalCount) {
12
+ if (!embedderAvailable) {
13
+ return '*Search: keyword-only (install Ollama for semantic search)*';
14
+ }
15
+ if (vectorCount === 0 && totalCount > 0) {
16
+ return `*Search: semantic + keyword (0/${totalCount} entries vectorized — run memory_reembed)*`;
17
+ }
18
+ if (totalCount === 0) {
19
+ return '*Search: semantic + keyword (no entries yet)*';
20
+ }
21
+ return `*Search: semantic + keyword (${vectorCount}/${totalCount} entries vectorized)*`;
22
+ }
7
23
  /** Format the stale entries section for briefing/context responses */
8
24
  export function formatStaleSection(staleDetails) {
9
25
  const lines = [
@@ -61,11 +77,12 @@ export function formatStats(lobe, result) {
61
77
  .join('\n')
62
78
  : ' (none)';
63
79
  const corruptLine = result.corruptFiles > 0 ? `\n**Corrupt files:** ${result.corruptFiles}` : '';
80
+ const vectorLine = `\n**Vectors:** ${result.vectorCount}/${result.totalEntries} entries vectorized`;
64
81
  return [
65
82
  `## [${lobe}] Memory Stats`,
66
83
  ``,
67
84
  `**Memory location:** ${result.memoryPath}`,
68
- `**Total entries:** ${result.totalEntries}${corruptLine}`,
85
+ `**Total entries:** ${result.totalEntries}${corruptLine}${vectorLine}`,
69
86
  `**Storage:** ${result.storageSize} / ${Math.round(result.storageBudgetBytes / 1024 / 1024)}MB budget`,
70
87
  ``,
71
88
  `### By Topic`,
package/dist/index.js CHANGED
@@ -14,7 +14,7 @@ import { getLobeConfigs } from './config.js';
14
14
  import { ConfigManager } from './config-manager.js';
15
15
  import { normalizeArgs } from './normalize.js';
16
16
  import { buildCrashReport, writeCrashReport, writeCrashReportSync, readLatestCrash, readCrashHistory, clearLatestCrash, formatCrashReport, formatCrashSummary, markServerStarted, } from './crash-journal.js';
17
- import { formatStaleSection, formatConflictWarning, formatStats, formatBehaviorConfigSection, mergeTagFrequencies, buildQueryFooter, buildBriefingTagPrimerSections } from './formatters.js';
17
+ import { formatStaleSection, formatConflictWarning, formatStats, formatBehaviorConfigSection, mergeTagFrequencies, buildQueryFooter, buildBriefingTagPrimerSections, formatSearchMode } from './formatters.js';
18
18
  import { parseFilter } from './text-analyzer.js';
19
19
  import { VOCABULARY_ECHO_LIMIT, WARN_SEPARATOR } from './thresholds.js';
20
20
  import { matchRootsToLobeNames, buildLobeResolution } from './lobe-resolution.js';
@@ -398,6 +398,8 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
398
398
  // memory_diagnose is intentionally hidden from the tool list — it clutters
399
399
  // agent tool discovery and should only be called when directed by error messages
400
400
  // or crash reports. The handler still works if called directly.
401
+ // memory_reembed is hidden — utility for generating/regenerating embeddings.
402
+ // Surfaced via hint in memory_context when >50% of entries lack vectors.
401
403
  ] };
402
404
  });
403
405
  // --- Tool handlers ---
@@ -909,6 +911,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
909
911
  // --- Search mode: context provided → keyword search across all topics ---
910
912
  const max = maxResults ?? 10;
911
913
  const threshold = minMatch ?? 0.2;
914
+ // Resolve which lobes to search — follows the degradation ladder via resolveLobesForRead().
912
915
  const allLobeResults = [];
913
916
  const ctxEntryLobeMap = new Map(); // entry id → lobe name
914
917
  let label;
@@ -973,10 +976,14 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
973
976
  const noResultHint = ctxGlobalOnlyHint
974
977
  ? `\n\n> ${ctxGlobalOnlyHint}`
975
978
  : '\n\nThis is fine — proceed without prior context. As you learn things worth remembering, store them with memory_store.';
979
+ // Mode indicator on no-results path — helps diagnose why nothing was found
980
+ const modeHint = primaryStore
981
+ ? `\n${formatSearchMode(primaryStore.hasEmbedder, primaryStore.vectorCount, primaryStore.entryCount)}`
982
+ : '';
976
983
  return {
977
984
  content: [{
978
985
  type: 'text',
979
- text: `[${label}] No relevant knowledge found for: "${context}"${noResultHint}\n\n---\n${ctxFooter}`,
986
+ text: `[${label}] No relevant knowledge found for: "${context}"${noResultHint}${modeHint}\n\n---\n${ctxFooter}`,
980
987
  }],
981
988
  };
982
989
  }
@@ -1019,6 +1026,10 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1019
1026
  sections.push(formatConflictWarning(ctxConflicts));
1020
1027
  }
1021
1028
  }
1029
+ // Search mode indicator — lightweight getters, no extra disk reload
1030
+ if (primaryStore) {
1031
+ sections.push(formatSearchMode(primaryStore.hasEmbedder, primaryStore.vectorCount, primaryStore.entryCount));
1032
+ }
1022
1033
  // Collect all matched keywords and topics for the dedup hint
1023
1034
  const allMatchedKeywords = new Set();
1024
1035
  const matchedTopics = new Set();
@@ -1070,6 +1081,28 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
1070
1081
  }
1071
1082
  return { content: [{ type: 'text', text: sections.join('\n\n---\n\n') }] };
1072
1083
  }
1084
+ case 'memory_reembed': {
1085
+ const { lobe: rawLobe } = z.object({
1086
+ lobe: z.string().optional(),
1087
+ }).parse(args ?? {});
1088
+ const lobeName = rawLobe ?? lobeNames[0];
1089
+ const ctx = resolveToolContext(lobeName);
1090
+ if (!ctx.ok)
1091
+ return contextError(ctx);
1092
+ const result = await ctx.store.reEmbed();
1093
+ if (result.error) {
1094
+ return { content: [{ type: 'text', text: `[${ctx.label}] Re-embed failed: ${result.error}` }] };
1095
+ }
1096
+ const parts = [
1097
+ `[${ctx.label}] Re-embedded ${result.embedded} entries`,
1098
+ `(${result.skipped} skipped, ${result.failed} failed).`,
1099
+ ];
1100
+ // Hint if many entries were vectorized
1101
+ if (result.embedded > 0) {
1102
+ parts.push('\nSemantic search is now active for these entries.');
1103
+ }
1104
+ return { content: [{ type: 'text', text: parts.join(' ') }] };
1105
+ }
1073
1106
  case 'memory_bootstrap': {
1074
1107
  const { lobe: rawLobe, root, budgetMB } = z.object({
1075
1108
  lobe: z.string().optional(),
@@ -0,0 +1,57 @@
1
+ import type { MemoryEntry, EmbeddingVector } from './types.js';
2
+ /** Which ranking signal produced this result. */
3
+ export type RankSource = 'keyword' | 'semantic' | 'merged';
4
+ /** A scored search result. Shared return type for all ranking functions. */
5
+ export interface ScoredEntry {
6
+ readonly entry: MemoryEntry;
7
+ readonly score: number;
8
+ readonly matchedKeywords: readonly string[];
9
+ readonly source: RankSource;
10
+ /** Raw cosine similarity before boost multiplication.
11
+ * Present for semantic and merged results; absent for keyword-only.
12
+ * Used for debug logging (threshold calibration) and display. */
13
+ readonly semanticSimilarity?: number;
14
+ }
15
+ /** Shared context for ranking functions — pure data, no callbacks.
16
+ * Groups the parameters that keywordRank and semanticRank need,
17
+ * keeping function signatures tight.
18
+ *
19
+ * freshEntryIds is precomputed by the store — the ranking function checks
20
+ * set membership rather than calling into the store's private staleness logic.
21
+ * This keeps the function provably pure.
22
+ *
23
+ * defaultModuleBoost is the fallback boost for modules/* topics not in topicBoost.
24
+ * Injected here so ranking.ts needs zero direct threshold imports. */
25
+ export interface RankContext {
26
+ readonly currentBranch: string;
27
+ readonly branchFilter: string | undefined;
28
+ readonly topicBoost: Readonly<Record<string, number>>;
29
+ readonly freshEntryIds: ReadonlySet<string>;
30
+ readonly defaultModuleBoost: number;
31
+ }
32
+ /** Rank entries by keyword overlap with context keywords.
33
+ * Pure extraction of the ranking logic from contextSearch — identical scoring.
34
+ *
35
+ * Filter + rank in one pass for efficiency (~200 entries, not worth two iterations).
36
+ * Branch filtering for recent-work is applied here because it's a pre-condition
37
+ * for ranking, not a separate pipeline stage.
38
+ *
39
+ * Does NOT include the "always include user entries" policy — that's an
40
+ * orchestration concern that stays in contextSearch. */
41
+ export declare function keywordRank(entries: readonly MemoryEntry[], contextKeywords: ReadonlySet<string>, minMatch: number, ctx: RankContext): readonly ScoredEntry[];
42
+ /** Rank entries by cosine similarity between query embedding and stored vectors.
43
+ * Pure function — no I/O, no side effects.
44
+ *
45
+ * Entries without vectors are silently skipped — they participate via keyword ranking only.
46
+ * Branch filtering applied (recent-work scoped to current branch).
47
+ *
48
+ * @param minSimilarity Minimum cosine similarity to include. Caller provides the
49
+ * threshold (SEMANTIC_MIN_SIMILARITY for production, 0 for debug mode to see all scores). */
50
+ export declare function semanticRank(entries: readonly MemoryEntry[], vectors: ReadonlyMap<string, EmbeddingVector>, queryVector: EmbeddingVector, minSimilarity: number, ctx: RankContext): readonly ScoredEntry[];
51
+ /** Merge keyword and semantic ranking results using max-score strategy.
52
+ * For each entry: final score = max(keywordScore, semanticScore).
53
+ * Entries in both lists get source: 'merged', preserving matchedKeywords from
54
+ * keyword result and semanticSimilarity from semantic result.
55
+ *
56
+ * No weighted fusion, no magic constants. Deterministic — same inputs, same output. */
57
+ export declare function mergeRankings(keywordResults: readonly ScoredEntry[], semanticResults: readonly ScoredEntry[]): readonly ScoredEntry[];
@@ -0,0 +1,140 @@
1
+ // Domain ranking functions — scoring MemoryEntries using text analysis primitives.
2
+ // Pure functions: no I/O, no side effects, deterministic.
3
+ //
4
+ // Separated from text-analyzer.ts (which works on strings/sets, not domain types)
5
+ // and from store.ts (which handles orchestration and persistence).
6
+ // This module is the ranking pipeline seam for keyword, semantic, and merged ranking.
7
+ import { REFERENCE_BOOST_MULTIPLIER, TAG_MATCH_BOOST, } from './thresholds.js';
8
+ import { extractKeywords, stem, cosineSimilarity } from './text-analyzer.js';
9
+ // ─── Shared helpers ────────────────────────────────────────────────────────
10
+ /** Resolve topic boost for an entry. Falls back to defaultModuleBoost for
11
+ * modules/* topics, or 1.0 for unknown topics. */
12
+ function getTopicBoost(topic, ctx) {
13
+ return ctx.topicBoost[topic] ?? (topic.startsWith('modules/') ? ctx.defaultModuleBoost : 1.0);
14
+ }
15
+ /** Check whether a recent-work entry should be filtered out by branch. */
16
+ function isBranchFiltered(entry, ctx) {
17
+ return entry.topic === 'recent-work'
18
+ && ctx.branchFilter !== '*'
19
+ && !!entry.branch
20
+ && entry.branch !== ctx.currentBranch;
21
+ }
22
+ // ─── Keyword ranking ───────────────────────────────────────────────────────
23
+ /** Rank entries by keyword overlap with context keywords.
24
+ * Pure extraction of the ranking logic from contextSearch — identical scoring.
25
+ *
26
+ * Filter + rank in one pass for efficiency (~200 entries, not worth two iterations).
27
+ * Branch filtering for recent-work is applied here because it's a pre-condition
28
+ * for ranking, not a separate pipeline stage.
29
+ *
30
+ * Does NOT include the "always include user entries" policy — that's an
31
+ * orchestration concern that stays in contextSearch. */
32
+ export function keywordRank(entries, contextKeywords, minMatch, ctx) {
33
+ const results = [];
34
+ for (const entry of entries) {
35
+ if (isBranchFiltered(entry, ctx))
36
+ continue;
37
+ // Include tag values as keywords so tagged entries surface in context search
38
+ const tagKeywordPart = entry.tags ? ` ${entry.tags.join(' ')}` : '';
39
+ const entryKeywords = extractKeywords(`${entry.title} ${entry.content}${tagKeywordPart}`);
40
+ const matchedKeywords = [];
41
+ for (const kw of contextKeywords) {
42
+ if (entryKeywords.has(kw))
43
+ matchedKeywords.push(kw);
44
+ }
45
+ if (matchedKeywords.length === 0)
46
+ continue;
47
+ // Enforce minimum match threshold
48
+ const matchRatio = matchedKeywords.length / contextKeywords.size;
49
+ if (matchRatio < minMatch)
50
+ continue;
51
+ // Score = keyword match ratio × confidence × topic boost × freshness × reference boost × tag boost
52
+ const boost = getTopicBoost(entry.topic, ctx);
53
+ const freshnessMultiplier = ctx.freshEntryIds.has(entry.id) ? 1.0 : 0.7;
54
+ // Reference boost: exact class/file name match in references gets a 1.3x multiplier
55
+ const referenceBoost = entry.references?.some(ref => {
56
+ const basename = ref.split('/').pop()?.replace(/\.\w+$/, '') ?? ref;
57
+ return contextKeywords.has(stem(basename.toLowerCase()));
58
+ }) ? REFERENCE_BOOST_MULTIPLIER : 1.0;
59
+ // Tag boost: if any tag exactly matches a context keyword, boost the entry
60
+ const tagBoost = entry.tags?.some(tag => contextKeywords.has(tag))
61
+ ? TAG_MATCH_BOOST : 1.0;
62
+ const score = matchRatio * entry.confidence * boost * freshnessMultiplier * referenceBoost * tagBoost;
63
+ results.push({ entry, score, matchedKeywords, source: 'keyword' });
64
+ }
65
+ return results.sort((a, b) => b.score - a.score);
66
+ }
67
+ // ─── Semantic ranking ──────────────────────────────────────────────────────
68
+ /** Rank entries by cosine similarity between query embedding and stored vectors.
69
+ * Pure function — no I/O, no side effects.
70
+ *
71
+ * Entries without vectors are silently skipped — they participate via keyword ranking only.
72
+ * Branch filtering applied (recent-work scoped to current branch).
73
+ *
74
+ * @param minSimilarity Minimum cosine similarity to include. Caller provides the
75
+ * threshold (SEMANTIC_MIN_SIMILARITY for production, 0 for debug mode to see all scores). */
76
+ export function semanticRank(entries, vectors, queryVector, minSimilarity, ctx) {
77
+ const results = [];
78
+ for (const entry of entries) {
79
+ if (isBranchFiltered(entry, ctx))
80
+ continue;
81
+ const entryVector = vectors.get(entry.id);
82
+ if (!entryVector)
83
+ continue;
84
+ const similarity = cosineSimilarity(queryVector, entryVector);
85
+ if (similarity < minSimilarity)
86
+ continue;
87
+ // Score = cosine similarity × confidence × topic boost × freshness
88
+ // No reference/tag boost — those are keyword-domain signals captured by keywordRank
89
+ const boost = getTopicBoost(entry.topic, ctx);
90
+ const freshnessMultiplier = ctx.freshEntryIds.has(entry.id) ? 1.0 : 0.7;
91
+ const score = similarity * entry.confidence * boost * freshnessMultiplier;
92
+ results.push({
93
+ entry,
94
+ score,
95
+ matchedKeywords: [],
96
+ source: 'semantic',
97
+ semanticSimilarity: similarity,
98
+ });
99
+ }
100
+ return results.sort((a, b) => b.score - a.score);
101
+ }
102
+ // ─── Merge ─────────────────────────────────────────────────────────────────
103
+ /** Merge keyword and semantic ranking results using max-score strategy.
104
+ * For each entry: final score = max(keywordScore, semanticScore).
105
+ * Entries in both lists get source: 'merged', preserving matchedKeywords from
106
+ * keyword result and semanticSimilarity from semantic result.
107
+ *
108
+ * No weighted fusion, no magic constants. Deterministic — same inputs, same output. */
109
+ export function mergeRankings(keywordResults, semanticResults) {
110
+ // Index keyword results by entry ID for O(1) lookup during merge
111
+ const keywordById = new Map();
112
+ for (const r of keywordResults) {
113
+ keywordById.set(r.entry.id, r);
114
+ }
115
+ const merged = new Map();
116
+ // Process semantic results — check for keyword counterpart
117
+ for (const sem of semanticResults) {
118
+ const kw = keywordById.get(sem.entry.id);
119
+ if (kw) {
120
+ // Entry in both lists — use max score, merge signals
121
+ merged.set(sem.entry.id, {
122
+ entry: sem.entry,
123
+ score: Math.max(sem.score, kw.score),
124
+ matchedKeywords: kw.matchedKeywords, // from keyword (semantic has none)
125
+ source: 'merged',
126
+ semanticSimilarity: sem.semanticSimilarity, // from semantic
127
+ });
128
+ keywordById.delete(sem.entry.id); // consumed
129
+ }
130
+ else {
131
+ // Semantic-only
132
+ merged.set(sem.entry.id, sem);
133
+ }
134
+ }
135
+ // Remaining keyword-only results
136
+ for (const kw of keywordById.values()) {
137
+ merged.set(kw.entry.id, kw);
138
+ }
139
+ return Array.from(merged.values()).sort((a, b) => b.score - a.score);
140
+ }
package/dist/store.d.ts CHANGED
@@ -1,4 +1,5 @@
1
- import type { MemoryEntry, TopicScope, TrustLevel, DetailLevel, DurabilityDecision, QueryResult, StoreResult, CorrectResult, MemoryStats, BriefingResult, ConflictPair, MemoryConfig } from './types.js';
1
+ import type { MemoryEntry, TopicScope, TrustLevel, DetailLevel, DurabilityDecision, QueryResult, StoreResult, CorrectResult, MemoryStats, ReEmbedResult, BriefingResult, ConflictPair, MemoryConfig } from './types.js';
2
+ import type { ScoredEntry } from './ranking.js';
2
3
  export declare class MarkdownMemoryStore {
3
4
  private readonly config;
4
5
  private readonly memoryPath;
@@ -12,6 +13,14 @@ export declare class MarkdownMemoryStore {
12
13
  /** Resolved behavior thresholds — user config merged over defaults.
13
14
  * Centralizes threshold resolution so every caller gets the same value. */
14
15
  private get behavior();
16
+ /** Whether an embedder is configured — for mode indicator display. Read-only. */
17
+ get hasEmbedder(): boolean;
18
+ /** Count of vectorized entries — lightweight, no disk reload.
19
+ * For mode indicator display. Use stats() for full diagnostics. */
20
+ get vectorCount(): number;
21
+ /** Count of total entries — lightweight, no disk reload.
22
+ * For mode indicator display. Use stats() for full diagnostics. */
23
+ get entryCount(): number;
15
24
  /** Initialize the store: create memory dir and load existing entries */
16
25
  init(): Promise<void>;
17
26
  /** Store a new knowledge entry */
@@ -25,17 +34,23 @@ export declare class MarkdownMemoryStore {
25
34
  hasEntry(id: string): Promise<boolean>;
26
35
  /** Correct an existing entry */
27
36
  correct(id: string, correction: string, action: 'append' | 'replace' | 'delete'): Promise<CorrectResult>;
37
+ /** Re-embed all entries that don't have vectors.
38
+ * Idempotent: entries already in the vectors map are skipped.
39
+ * Early-exit: if the first embed fails, returns immediately (avoids burning through
40
+ * all entries just to discover the embedder is unavailable). */
41
+ reEmbed(): Promise<ReEmbedResult>;
28
42
  /** Get memory health statistics */
29
43
  stats(): Promise<MemoryStats>;
30
44
  /** Bootstrap: scan repo structure and seed initial knowledge */
31
45
  bootstrap(): Promise<StoreResult[]>;
32
- /** Search across all topics using keyword matching with topic-based boosting.
46
+ /** Search across all topics using keyword + semantic ranking with topic-based boosting.
47
+ * Orchestrator: reload, extract keywords, embed query, rank, merge, apply policies.
48
+ *
49
+ * Graceful degradation: when embedder is null or embed fails, semantic results
50
+ * are empty and merge produces keyword-only results — identical to pre-embedding behavior.
51
+ *
33
52
  * @param minMatch Minimum ratio of context keywords that must match (0-1, default 0.2) */
34
- contextSearch(context: string, maxResults?: number, branchFilter?: string, minMatch?: number): Promise<Array<{
35
- entry: MemoryEntry;
36
- score: number;
37
- matchedKeywords: string[];
38
- }>>;
53
+ contextSearch(context: string, maxResults?: number, branchFilter?: string, minMatch?: number): Promise<readonly ScoredEntry[]>;
39
54
  /** Generate a collision-resistant ID: {prefix}-{8 random hex chars} */
40
55
  private generateId;
41
56
  /** Compute relative file path for an entry within the memory directory */
@@ -93,6 +108,13 @@ export declare class MarkdownMemoryStore {
93
108
  /** Find entries in the same topic with significant overlap (dedup detection).
94
109
  * Uses hybrid jaccard+containment similarity. */
95
110
  private findRelatedEntries;
111
+ /** Find semantic duplicates by cosine similarity against stored vectors.
112
+ * Same-topic only. Returns entries above DEDUP_SEMANTIC_THRESHOLD.
113
+ * Returns empty when no embedder or no vectors available. */
114
+ private findSemanticDuplicates;
115
+ /** Merge keyword-based and semantic-based dedup results, dedup by ID,
116
+ * keeping the entry with higher similarity score. */
117
+ private mergeRelatedEntries;
96
118
  /** Tag frequency across all entries — for vocabulary echo in store responses.
97
119
  * Returns tags sorted by frequency (descending). O(N) over entries. */
98
120
  getTagFrequency(): ReadonlyMap<string, number>;
package/dist/store.js CHANGED
@@ -7,9 +7,10 @@ import crypto from 'crypto';
7
7
  import { execFile } from 'child_process';
8
8
  import { promisify } from 'util';
9
9
  import { DEFAULT_CONFIDENCE, realClock, parseTopicScope, parseTrustLevel, parseTags, asEmbeddingVector } from './types.js';
10
- import { DEDUP_SIMILARITY_THRESHOLD, CONFLICT_SIMILARITY_THRESHOLD_SAME_TOPIC, CONFLICT_SIMILARITY_THRESHOLD_CROSS_TOPIC, CONFLICT_MIN_CONTENT_CHARS, OPPOSITION_PAIRS, PREFERENCE_SURFACE_THRESHOLD, REFERENCE_BOOST_MULTIPLIER, TOPIC_BOOST, MODULE_TOPIC_BOOST, USER_ALWAYS_INCLUDE_SCORE_FRACTION, DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, DEFAULT_MAX_PREFERENCE_SUGGESTIONS, TAG_MATCH_BOOST, } from './thresholds.js';
10
+ import { DEDUP_SIMILARITY_THRESHOLD, DEDUP_SEMANTIC_THRESHOLD, SEMANTIC_MIN_SIMILARITY, CONFLICT_SIMILARITY_THRESHOLD_SAME_TOPIC, CONFLICT_SIMILARITY_THRESHOLD_CROSS_TOPIC, CONFLICT_MIN_CONTENT_CHARS, OPPOSITION_PAIRS, PREFERENCE_SURFACE_THRESHOLD, TOPIC_BOOST, MODULE_TOPIC_BOOST, USER_ALWAYS_INCLUDE_SCORE_FRACTION, QUERY_EMBED_TIMEOUT_MS, DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, DEFAULT_MAX_PREFERENCE_SUGGESTIONS, } from './thresholds.js';
11
11
  import { realGitService } from './git-service.js';
12
- import { extractKeywords, stem, similarity, matchesFilter, computeRelevanceScore, } from './text-analyzer.js';
12
+ import { extractKeywords, similarity, cosineSimilarity, matchesFilter, computeRelevanceScore, } from './text-analyzer.js';
13
+ import { keywordRank, semanticRank, mergeRankings } from './ranking.js';
13
14
  import { detectEphemeralSignals, formatEphemeralWarning, getEphemeralSeverity } from './ephemeral.js';
14
15
  // Used only by bootstrap() for git log — not part of the GitService boundary
15
16
  // because bootstrap is a one-shot utility, not a recurring operation
@@ -37,6 +38,20 @@ export class MarkdownMemoryStore {
37
38
  maxConflictPairs: b.maxConflictPairs ?? DEFAULT_MAX_CONFLICT_PAIRS,
38
39
  };
39
40
  }
41
+ /** Whether an embedder is configured — for mode indicator display. Read-only. */
42
+ get hasEmbedder() {
43
+ return this.embedder !== null;
44
+ }
45
+ /** Count of vectorized entries — lightweight, no disk reload.
46
+ * For mode indicator display. Use stats() for full diagnostics. */
47
+ get vectorCount() {
48
+ return this.vectors.size;
49
+ }
50
+ /** Count of total entries — lightweight, no disk reload.
51
+ * For mode indicator display. Use stats() for full diagnostics. */
52
+ get entryCount() {
53
+ return this.entries.size;
54
+ }
40
55
  /** Initialize the store: create memory dir and load existing entries */
41
56
  async init() {
42
57
  await fs.mkdir(this.memoryPath, { recursive: true });
@@ -101,8 +116,22 @@ export class MarkdownMemoryStore {
101
116
  this.entries.set(id, entry);
102
117
  const file = this.entryToRelativePath(entry);
103
118
  await this.persistEntry(entry);
104
- // Dedup: find related entries in the same topic (excluding the one just stored and any overwritten)
105
- const relatedEntries = this.findRelatedEntries(entry, existing?.id);
119
+ // Embed and persist vector awaited so .vec exists when store() returns
120
+ if (this.embedder) {
121
+ const embedText = `${title}\n\n${content}`;
122
+ const embedResult = await this.embedder.embed(embedText);
123
+ if (embedResult.ok) {
124
+ await this.persistVector(file, embedResult.vector);
125
+ this.vectors.set(entry.id, embedResult.vector);
126
+ }
127
+ else {
128
+ process.stderr.write(`[memory-mcp] Embedding failed for ${entry.id}: ${embedResult.failure.kind}\n`);
129
+ }
130
+ }
131
+ // Dedup: merge keyword-based and semantic-based duplicate detection
132
+ const keywordDupes = this.findRelatedEntries(entry, existing?.id);
133
+ const semanticDupes = this.findSemanticDuplicates(entry.id, topic);
134
+ const relatedEntries = this.mergeRelatedEntries(keywordDupes, semanticDupes);
106
135
  // Surface relevant preferences if storing a non-preference entry
107
136
  const relevantPreferences = (topic !== 'preferences' && topic !== 'user')
108
137
  ? this.findRelevantPreferences(entry)
@@ -306,8 +335,59 @@ export class MarkdownMemoryStore {
306
335
  };
307
336
  this.entries.set(id, updated);
308
337
  await this.persistEntry(updated);
338
+ // Re-embed: content changed, old vector is stale
339
+ if (this.embedder) {
340
+ const embedText = `${updated.title}\n\n${updated.content}`;
341
+ const embedResult = await this.embedder.embed(embedText);
342
+ if (embedResult.ok) {
343
+ const updatedFile = this.entryToRelativePath(updated);
344
+ await this.persistVector(updatedFile, embedResult.vector);
345
+ this.vectors.set(updated.id, embedResult.vector);
346
+ }
347
+ else {
348
+ process.stderr.write(`[memory-mcp] Re-embedding failed for ${updated.id}: ${embedResult.failure.kind}\n`);
349
+ }
350
+ }
309
351
  return { corrected: true, id, action, newConfidence: 1.0, trust: 'user' };
310
352
  }
353
+ /** Re-embed all entries that don't have vectors.
354
+ * Idempotent: entries already in the vectors map are skipped.
355
+ * Early-exit: if the first embed fails, returns immediately (avoids burning through
356
+ * all entries just to discover the embedder is unavailable). */
357
+ async reEmbed() {
358
+ if (!this.embedder) {
359
+ return { embedded: 0, skipped: 0, failed: 0, error: 'No embedder configured' };
360
+ }
361
+ await this.reloadFromDisk();
362
+ // Probe: try embedding a short text to check availability before iterating
363
+ const probe = await this.embedder.embed('probe');
364
+ if (!probe.ok) {
365
+ return { embedded: 0, skipped: 0, failed: 0, error: `Embedder unavailable: ${probe.failure.kind}` };
366
+ }
367
+ let embedded = 0;
368
+ let skipped = 0;
369
+ let failed = 0;
370
+ for (const entry of this.entries.values()) {
371
+ // Entry already has a vector with correct dimensions — skip
372
+ if (this.vectors.has(entry.id)) {
373
+ skipped++;
374
+ continue;
375
+ }
376
+ // Embed the entry
377
+ const embedText = `${entry.title}\n\n${entry.content}`;
378
+ const result = await this.embedder.embed(embedText);
379
+ if (result.ok) {
380
+ const file = this.entryToRelativePath(entry);
381
+ await this.persistVector(file, result.vector);
382
+ this.vectors.set(entry.id, result.vector);
383
+ embedded++;
384
+ }
385
+ else {
386
+ failed++;
387
+ }
388
+ }
389
+ return { embedded, skipped, failed };
390
+ }
311
391
  /** Get memory health statistics */
312
392
  async stats() {
313
393
  await this.reloadFromDisk();
@@ -340,6 +420,7 @@ export class MarkdownMemoryStore {
340
420
  return {
341
421
  totalEntries: allEntries.length,
342
422
  corruptFiles: this.corruptFileCount,
423
+ vectorCount: this.vectors.size,
343
424
  byTopic, byTrust, byFreshness, byTag,
344
425
  storageSize: this.formatBytes(storageSize ?? 0),
345
426
  storageBudgetBytes: this.config.storageBudgetBytes,
@@ -417,59 +498,78 @@ export class MarkdownMemoryStore {
417
498
  return results;
418
499
  }
419
500
  // --- Contextual search (memory_context) ---
420
- /** Search across all topics using keyword matching with topic-based boosting.
501
+ /** Search across all topics using keyword + semantic ranking with topic-based boosting.
502
+ * Orchestrator: reload, extract keywords, embed query, rank, merge, apply policies.
503
+ *
504
+ * Graceful degradation: when embedder is null or embed fails, semantic results
505
+ * are empty and merge produces keyword-only results — identical to pre-embedding behavior.
506
+ *
421
507
  * @param minMatch Minimum ratio of context keywords that must match (0-1, default 0.2) */
422
508
  async contextSearch(context, maxResults = 10, branchFilter, minMatch = 0.2) {
423
509
  // Reload from disk to pick up changes from other processes
424
510
  await this.reloadFromDisk();
425
511
  const contextKeywords = extractKeywords(context);
426
- if (contextKeywords.size === 0)
512
+ // Only bail on zero keywords when there's no embedder to fall back on.
513
+ // Stopword-heavy queries produce zero keywords but can yield semantic results.
514
+ if (contextKeywords.size === 0 && !this.embedder)
427
515
  return [];
428
516
  const currentBranch = branchFilter || await this.getCurrentBranch();
429
- // Topic boost factors — higher = more likely to surface
430
- const topicBoost = TOPIC_BOOST;
431
- const results = [];
432
- for (const entry of this.entries.values()) {
433
- // Filter recent-work by branch (unless branchFilter is "*")
434
- if (entry.topic === 'recent-work' && branchFilter !== '*' && entry.branch && entry.branch !== currentBranch) {
435
- continue;
436
- }
437
- // Include tag values as keywords so tagged entries surface in context search
438
- const tagKeywordPart = entry.tags ? ` ${entry.tags.join(' ')}` : '';
439
- const entryKeywords = extractKeywords(`${entry.title} ${entry.content}${tagKeywordPart}`);
440
- const matchedKeywords = [];
441
- for (const kw of contextKeywords) {
442
- if (entryKeywords.has(kw))
443
- matchedKeywords.push(kw);
517
+ const allEntries = Array.from(this.entries.values());
518
+ // Precompute freshness set — keeps ranking functions provably pure (no callbacks)
519
+ const freshEntryIds = new Set();
520
+ for (const entry of allEntries) {
521
+ if (this.isFresh(entry))
522
+ freshEntryIds.add(entry.id);
523
+ }
524
+ const ctx = {
525
+ currentBranch,
526
+ branchFilter,
527
+ topicBoost: TOPIC_BOOST,
528
+ freshEntryIds,
529
+ defaultModuleBoost: MODULE_TOPIC_BOOST,
530
+ };
531
+ // Keyword ranking (may be empty for stopword-heavy queries)
532
+ const keywordResults = contextKeywords.size > 0
533
+ ? keywordRank(allEntries, contextKeywords, minMatch, ctx)
534
+ : [];
535
+ // Semantic ranking (only if embedder available and query embeds successfully)
536
+ const debug = process.env.MEMORY_MCP_DEBUG === '1';
537
+ let semanticResults = [];
538
+ if (this.embedder) {
539
+ const querySignal = AbortSignal.timeout(QUERY_EMBED_TIMEOUT_MS);
540
+ const queryResult = await this.embedder.embed(context, querySignal);
541
+ if (queryResult.ok) {
542
+ // In debug mode, get ALL scores (threshold=0) for calibration logging
543
+ const rawSemanticResults = semanticRank(allEntries, this.vectors, queryResult.vector, debug ? 0 : SEMANTIC_MIN_SIMILARITY, ctx);
544
+ if (debug) {
545
+ for (const r of rawSemanticResults) {
546
+ const included = (r.semanticSimilarity ?? 0) >= SEMANTIC_MIN_SIMILARITY;
547
+ process.stderr.write(`[memory-mcp:debug] semantic ${(r.semanticSimilarity ?? 0).toFixed(3)} ${r.entry.id} "${r.entry.title}"${included ? '' : ' ← below threshold'}\n`);
548
+ }
549
+ // Filter to threshold after logging all scores
550
+ semanticResults = rawSemanticResults.filter(r => (r.semanticSimilarity ?? 0) >= SEMANTIC_MIN_SIMILARITY);
551
+ }
552
+ else {
553
+ semanticResults = rawSemanticResults;
554
+ }
444
555
  }
445
- if (matchedKeywords.length === 0)
446
- continue;
447
- // Enforce minimum match threshold
448
- const matchRatio = matchedKeywords.length / contextKeywords.size;
449
- if (matchRatio < minMatch)
450
- continue;
451
- // Score = keyword match ratio x confidence x topic boost x reference boost
452
- const boost = topicBoost[entry.topic] ?? (entry.topic.startsWith('modules/') ? MODULE_TOPIC_BOOST : 1.0);
453
- const freshnessMultiplier = this.isFresh(entry) ? 1.0 : 0.7;
454
- // Reference boost: exact class/file name match in references gets a 1.3x multiplier.
455
- // Extracts the basename (without extension) from each reference path and stems it,
456
- // then checks for overlap with the context keywords.
457
- const referenceBoost = entry.references?.some(ref => {
458
- const basename = ref.split('/').pop()?.replace(/\.\w+$/, '') ?? ref;
459
- return contextKeywords.has(stem(basename.toLowerCase()));
460
- }) ? REFERENCE_BOOST_MULTIPLIER : 1.0;
461
- // Tag boost: if any tag exactly matches a context keyword, boost the entry
462
- const tagBoost = entry.tags?.some(tag => contextKeywords.has(tag))
463
- ? TAG_MATCH_BOOST : 1.0;
464
- const score = matchRatio * entry.confidence * boost * freshnessMultiplier * referenceBoost * tagBoost;
465
- results.push({ entry, score, matchedKeywords });
556
+ // If embed fails: semanticResults stays empty, keyword results used alone
466
557
  }
467
- // Always include user entries even if no keyword match (they're always relevant)
558
+ // Merge keyword + semantic results using max-score strategy
559
+ const merged = mergeRankings(keywordResults, semanticResults);
560
+ // Policy: always include user entries even if no keyword/semantic match
561
+ const results = [...merged];
468
562
  for (const entry of this.entries.values()) {
469
563
  if (entry.topic === 'user' && !results.find(r => r.entry.id === entry.id)) {
470
- results.push({ entry, score: entry.confidence * USER_ALWAYS_INCLUDE_SCORE_FRACTION, matchedKeywords: [] });
564
+ results.push({
565
+ entry,
566
+ score: entry.confidence * USER_ALWAYS_INCLUDE_SCORE_FRACTION,
567
+ matchedKeywords: [],
568
+ source: 'keyword',
569
+ });
471
570
  }
472
571
  }
572
+ // Re-sort after policy injections to maintain score-descending invariant
473
573
  return results
474
574
  .sort((a, b) => b.score - a.score)
475
575
  .slice(0, maxResults);
@@ -914,6 +1014,51 @@ export class MarkdownMemoryStore {
914
1014
  trust: r.entry.trust,
915
1015
  }));
916
1016
  }
1017
+ /** Find semantic duplicates by cosine similarity against stored vectors.
1018
+ * Same-topic only. Returns entries above DEDUP_SEMANTIC_THRESHOLD.
1019
+ * Returns empty when no embedder or no vectors available. */
1020
+ findSemanticDuplicates(excludeId, topic) {
1021
+ const newVector = this.vectors.get(excludeId);
1022
+ if (!newVector)
1023
+ return [];
1024
+ const related = [];
1025
+ for (const entry of this.entries.values()) {
1026
+ if (entry.id === excludeId)
1027
+ continue;
1028
+ if (entry.topic !== topic)
1029
+ continue;
1030
+ const entryVector = this.vectors.get(entry.id);
1031
+ if (!entryVector)
1032
+ continue;
1033
+ const sim = cosineSimilarity(newVector, entryVector);
1034
+ if (sim > DEDUP_SEMANTIC_THRESHOLD) {
1035
+ related.push({ entry, similarity: sim });
1036
+ }
1037
+ }
1038
+ return related
1039
+ .sort((a, b) => b.similarity - a.similarity)
1040
+ .slice(0, this.behavior.maxDedupSuggestions)
1041
+ .map(r => ({
1042
+ id: r.entry.id,
1043
+ title: r.entry.title,
1044
+ content: r.entry.content,
1045
+ confidence: r.entry.confidence,
1046
+ trust: r.entry.trust,
1047
+ }));
1048
+ }
1049
+ /** Merge keyword-based and semantic-based dedup results, dedup by ID,
1050
+ * keeping the entry with higher similarity score. */
1051
+ mergeRelatedEntries(keywordDupes, semanticDupes) {
1052
+ const byId = new Map();
1053
+ for (const r of keywordDupes)
1054
+ byId.set(r.id, r);
1055
+ for (const r of semanticDupes) {
1056
+ if (!byId.has(r.id))
1057
+ byId.set(r.id, r);
1058
+ // If already present from keyword dedup, keep whichever — both indicate duplication
1059
+ }
1060
+ return Array.from(byId.values()).slice(0, this.behavior.maxDedupSuggestions);
1061
+ }
917
1062
  /** Tag frequency across all entries — for vocabulary echo in store responses.
918
1063
  * Returns tags sorted by frequency (descending). O(N) over entries. */
919
1064
  getTagFrequency() {
@@ -14,6 +14,26 @@ export declare const CONFLICT_MIN_CONTENT_CHARS = 50;
14
14
  /** Opposition keyword pairs for enhanced conflict detection.
15
15
  * When entries overlap AND use opposing terms, boost the conflict signal. */
16
16
  export declare const OPPOSITION_PAIRS: ReadonlyArray<readonly [string, string]>;
17
+ /** Minimum cosine similarity for semantic search results.
18
+ * Below this, entries are noise — embedding models produce non-zero similarity
19
+ * even for unrelated text.
20
+ *
21
+ * CALIBRATION NOTE: 0.45 is a strict starting point. With nomic-embed-text,
22
+ * unrelated text pairs routinely score 0.2-0.4 because the model produces
23
+ * non-orthogonal embeddings for any English text. Starting strict and loosening
24
+ * with data is safer than starting loose.
25
+ *
26
+ * Use MEMORY_MCP_DEBUG=1 env var to see raw cosine scores for calibration. */
27
+ export declare const SEMANTIC_MIN_SIMILARITY = 0.45;
28
+ /** Minimum cosine similarity for semantic dedup at store time.
29
+ * Higher than SEMANTIC_MIN_SIMILARITY because flagging false duplicates is more
30
+ * disruptive than missing real ones. Two entries must be quite similar to be
31
+ * flagged as potential duplicates. */
32
+ export declare const DEDUP_SEMANTIC_THRESHOLD = 0.8;
33
+ /** Query-time embed timeout — tighter than store-time (5s) for responsiveness.
34
+ * Model-warm latency is ~10ms; 2s covers machine-under-load with margin.
35
+ * Cold starts handled by LazyEmbedder's probe (which uses the full 5s). */
36
+ export declare const QUERY_EMBED_TIMEOUT_MS = 2000;
17
37
  /** Score multiplier when a reference path basename matches the context keywords. */
18
38
  export declare const REFERENCE_BOOST_MULTIPLIER = 1.3;
19
39
  /** Per-topic scoring boost factors for contextSearch().
@@ -40,6 +40,26 @@ export const OPPOSITION_PAIRS = [
40
40
  ['throw', 'return'], // exceptions vs Result types
41
41
  ['imperative', 'declarative'],
42
42
  ];
43
+ /** Minimum cosine similarity for semantic search results.
44
+ * Below this, entries are noise — embedding models produce non-zero similarity
45
+ * even for unrelated text.
46
+ *
47
+ * CALIBRATION NOTE: 0.45 is a strict starting point. With nomic-embed-text,
48
+ * unrelated text pairs routinely score 0.2-0.4 because the model produces
49
+ * non-orthogonal embeddings for any English text. Starting strict and loosening
50
+ * with data is safer than starting loose.
51
+ *
52
+ * Use MEMORY_MCP_DEBUG=1 env var to see raw cosine scores for calibration. */
53
+ export const SEMANTIC_MIN_SIMILARITY = 0.45;
54
+ /** Minimum cosine similarity for semantic dedup at store time.
55
+ * Higher than SEMANTIC_MIN_SIMILARITY because flagging false duplicates is more
56
+ * disruptive than missing real ones. Two entries must be quite similar to be
57
+ * flagged as potential duplicates. */
58
+ export const DEDUP_SEMANTIC_THRESHOLD = 0.80;
59
+ /** Query-time embed timeout — tighter than store-time (5s) for responsiveness.
60
+ * Model-warm latency is ~10ms; 2s covers machine-under-load with margin.
61
+ * Cold starts handled by LazyEmbedder's probe (which uses the full 5s). */
62
+ export const QUERY_EMBED_TIMEOUT_MS = 2000;
43
63
  /** Score multiplier when a reference path basename matches the context keywords. */
44
64
  export const REFERENCE_BOOST_MULTIPLIER = 1.30;
45
65
  /** Per-topic scoring boost factors for contextSearch().
package/dist/types.d.ts CHANGED
@@ -156,6 +156,7 @@ export type CorrectResult = {
156
156
  export interface MemoryStats {
157
157
  readonly totalEntries: number;
158
158
  readonly corruptFiles: number;
159
+ readonly vectorCount: number;
159
160
  readonly byTopic: Record<string, number>;
160
161
  readonly byTrust: Record<TrustLevel, number>;
161
162
  readonly byFreshness: {
@@ -222,6 +223,24 @@ export interface BehaviorConfig {
222
223
  /** Maximum conflict pairs shown per query/context response. Default: 2. Range: 1–5. */
223
224
  readonly maxConflictPairs?: number;
224
225
  }
226
+ /** Supported embedding providers — closed union for exhaustive handling. */
227
+ export type EmbedderProvider = 'ollama' | 'none';
228
+ /** Embedding configuration from memory-config.json "embedder" block.
229
+ * All fields optional except provider — defaults are sensible for nomic-embed-text on localhost. */
230
+ export interface EmbedderConfig {
231
+ readonly provider: EmbedderProvider;
232
+ readonly model?: string;
233
+ readonly baseUrl?: string;
234
+ readonly timeoutMs?: number;
235
+ readonly dimensions?: number;
236
+ }
237
+ /** Result of a re-embed operation */
238
+ export interface ReEmbedResult {
239
+ readonly embedded: number;
240
+ readonly skipped: number;
241
+ readonly failed: number;
242
+ readonly error?: string;
243
+ }
225
244
  /** Configuration for the memory MCP */
226
245
  export interface MemoryConfig {
227
246
  readonly repoRoot: string;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@exaudeus/memory-mcp",
3
- "version": "1.6.0",
3
+ "version": "1.8.0",
4
4
  "description": "Codebase memory MCP server - persistent, evolving knowledge for AI coding agents",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",