@exaudeus/memory-mcp 1.0.1 → 1.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,6 @@
1
1
  import type { MemoryStats, StaleEntry, ConflictPair, BehaviorConfig } from './types.js';
2
+ import { type FilterGroup } from './text-analyzer.js';
3
+ import type { MarkdownMemoryStore } from './store.js';
2
4
  /** Format the stale entries section for briefing/context responses */
3
5
  export declare function formatStaleSection(staleDetails: readonly StaleEntry[]): string;
4
6
  /** Format the conflict detection warning for query/context responses */
@@ -8,3 +10,16 @@ export declare function formatStats(lobe: string, result: MemoryStats): string;
8
10
  /** Format the active behavior config section for diagnostics.
9
11
  * Shows effective values and marks overrides vs defaults clearly. */
10
12
  export declare function formatBehaviorConfigSection(behavior?: BehaviorConfig): string;
13
+ /** Merge tag frequencies from multiple stores — pure function over a collection */
14
+ export declare function mergeTagFrequencies(stores: Iterable<MarkdownMemoryStore>): ReadonlyMap<string, number>;
15
+ /** Build query footer — pure function, same inputs → same output.
16
+ * Accepts parsed FilterGroup[] to avoid reparsing. */
17
+ export declare function buildQueryFooter(opts: {
18
+ readonly filterGroups: readonly FilterGroup[];
19
+ readonly rawFilter: string | undefined;
20
+ readonly tagFreq: ReadonlyMap<string, number>;
21
+ readonly resultCount: number;
22
+ readonly scope: string;
23
+ }): string;
24
+ /** Build tag primer section for session briefing — pure function */
25
+ export declare function buildTagPrimerSection(tagFreq: ReadonlyMap<string, number>): string;
@@ -2,7 +2,8 @@
2
2
  //
3
3
  // Pure functions — no side effects, no state. Each takes structured data
4
4
  // and returns a formatted string for the tool response.
5
- import { DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, } from './thresholds.js';
5
+ import { DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, MAX_FOOTER_TAGS, } from './thresholds.js';
6
+ import { analyzeFilterGroups } from './text-analyzer.js';
6
7
  /** Format the stale entries section for briefing/context responses */
7
8
  export function formatStaleSection(staleDetails) {
8
9
  const lines = [
@@ -43,6 +44,12 @@ export function formatStats(lobe, result) {
43
44
  const trustLines = Object.entries(result.byTrust)
44
45
  .map(([trust, count]) => ` - ${trust}: ${count}`)
45
46
  .join('\n');
47
+ const tagLines = Object.entries(result.byTag).length > 0
48
+ ? Object.entries(result.byTag)
49
+ .sort((a, b) => b[1] - a[1])
50
+ .map(([tag, count]) => ` - ${tag}: ${count}`)
51
+ .join('\n')
52
+ : ' (none)';
46
53
  const corruptLine = result.corruptFiles > 0 ? `\n**Corrupt files:** ${result.corruptFiles}` : '';
47
54
  return [
48
55
  `## [${lobe}] Memory Stats`,
@@ -57,6 +64,9 @@ export function formatStats(lobe, result) {
57
64
  `### By Trust Level`,
58
65
  trustLines,
59
66
  ``,
67
+ `### By Tag`,
68
+ tagLines,
69
+ ``,
60
70
  `### Freshness`,
61
71
  ` - Fresh: ${result.byFreshness.fresh}`,
62
72
  ` - Stale: ${result.byFreshness.stale}`,
@@ -90,3 +100,82 @@ export function formatBehaviorConfigSection(behavior) {
90
100
  }
91
101
  return lines.join('\n');
92
102
  }
103
+ /** Merge tag frequencies from multiple stores — pure function over a collection */
104
+ export function mergeTagFrequencies(stores) {
105
+ const merged = new Map();
106
+ for (const store of stores) {
107
+ const freq = store.getTagFrequency();
108
+ for (const [tag, count] of freq) {
109
+ merged.set(tag, (merged.get(tag) ?? 0) + count);
110
+ }
111
+ }
112
+ return merged;
113
+ }
114
+ /** Build query footer — pure function, same inputs → same output.
115
+ * Accepts parsed FilterGroup[] to avoid reparsing. */
116
+ export function buildQueryFooter(opts) {
117
+ const { filterGroups, rawFilter, tagFreq, resultCount, scope } = opts;
118
+ const mode = analyzeFilterGroups(filterGroups);
119
+ const lines = [];
120
+ // 1. Query mode explanation
121
+ switch (mode.kind) {
122
+ case 'no-filter':
123
+ lines.push(`Showing all entries in scope "${scope}"`);
124
+ break;
125
+ case 'keyword-only':
126
+ lines.push(`Searched keywords: ${mode.terms.join(', ')} (stemmed)`);
127
+ break;
128
+ case 'tag-only':
129
+ lines.push(`Filtered by tags: ${mode.tags.map(t => `#${t}`).join(', ')} (exact match)`);
130
+ break;
131
+ case 'complex':
132
+ const features = [];
133
+ if (mode.hasTags)
134
+ features.push('#tags');
135
+ if (mode.hasExact)
136
+ features.push('=exact');
137
+ if (mode.hasNot)
138
+ features.push('-NOT');
139
+ if (mode.hasOr)
140
+ features.push('|OR');
141
+ lines.push(`Complex filter: ${features.join(', ')}`);
142
+ break;
143
+ }
144
+ // 2. Available tags (always shown, capped for readability)
145
+ if (tagFreq.size > 0) {
146
+ const topTags = [...tagFreq.entries()]
147
+ .sort((a, b) => b[1] - a[1])
148
+ .slice(0, MAX_FOOTER_TAGS)
149
+ .map(([tag, count]) => `${tag}(${count})`)
150
+ .join(', ');
151
+ const remainder = tagFreq.size > MAX_FOOTER_TAGS ? ` + ${tagFreq.size - MAX_FOOTER_TAGS} more` : '';
152
+ lines.push(`Available tags: ${topTags}${remainder}`);
153
+ }
154
+ // 3. Zero-results suggestion (adaptive) — only when using keywords and tags exist
155
+ if (resultCount === 0 && mode.kind === 'keyword-only' && tagFreq.size > 0) {
156
+ const topTag = [...tagFreq.entries()].sort((a, b) => b[1] - a[1])[0][0];
157
+ lines.push(`→ No keyword matches. Try: filter: "#${topTag}" for exact category.`);
158
+ }
159
+ // 4. Syntax reference — show on failure or complex queries (not on simple successful queries)
160
+ if (resultCount === 0 || mode.kind === 'complex') {
161
+ lines.push(`Syntax: #tag | =exact | -NOT | word (stemmed) | A B (AND) | A|B (OR)`);
162
+ }
163
+ return lines.join('\n');
164
+ }
165
+ /** Build tag primer section for session briefing — pure function */
166
+ export function buildTagPrimerSection(tagFreq) {
167
+ if (tagFreq.size === 0)
168
+ return '';
169
+ const allTags = [...tagFreq.entries()]
170
+ .sort((a, b) => b[1] - a[1])
171
+ .map(([tag, count]) => `${tag}(${count})`)
172
+ .join(', ');
173
+ return [
174
+ `### Tag Vocabulary (${tagFreq.size} tags)`,
175
+ allTags,
176
+ ``,
177
+ `Filter by tags: memory_query(filter: "#auth") — exact match`,
178
+ `Combine: memory_query(filter: "#auth middleware") — tag + keyword`,
179
+ `Multiple: memory_query(filter: "#auth|#security") — OR logic`,
180
+ ].join('\n');
181
+ }
package/dist/index.js CHANGED
@@ -16,9 +16,9 @@ import { getLobeConfigs } from './config.js';
16
16
  import { ConfigManager } from './config-manager.js';
17
17
  import { normalizeArgs } from './normalize.js';
18
18
  import { buildCrashReport, writeCrashReport, writeCrashReportSync, readLatestCrash, readCrashHistory, clearLatestCrash, formatCrashReport, formatCrashSummary, markServerStarted, } from './crash-journal.js';
19
- import { formatStaleSection, formatConflictWarning, formatStats, formatBehaviorConfigSection } from './formatters.js';
20
- import { extractKeywords } from './text-analyzer.js';
21
- import { CROSS_LOBE_WEAK_SCORE_PENALTY, CROSS_LOBE_MIN_MATCH_RATIO } from './thresholds.js';
19
+ import { formatStaleSection, formatConflictWarning, formatStats, formatBehaviorConfigSection, mergeTagFrequencies, buildQueryFooter, buildTagPrimerSection } from './formatters.js';
20
+ import { extractKeywords, parseFilter } from './text-analyzer.js';
21
+ import { CROSS_LOBE_WEAK_SCORE_PENALTY, CROSS_LOBE_MIN_MATCH_RATIO, VOCABULARY_ECHO_LIMIT } from './thresholds.js';
22
22
  let serverMode = { kind: 'running' };
23
23
  const lobeHealth = new Map();
24
24
  const serverStartTime = Date.now();
@@ -183,7 +183,7 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
183
183
  // and memory_stats. The handler still works if called directly.
184
184
  {
185
185
  name: 'memory_store',
186
- description: 'Store knowledge. "user" and "preferences" are global (no lobe needed). Example: memory_store(topic: "gotchas", title: "Build cache", content: "Must clean build after Tuist changes")',
186
+ description: 'Store knowledge. "user" and "preferences" are global (no lobe needed). Use tags for exact-match categorization. Add a shared tag (e.g., "test-entry") for bulk operations. Example: memory_store(topic: "gotchas", title: "Build cache", content: "Must clean build after Tuist changes", tags: ["build", "ios"])',
187
187
  inputSchema: {
188
188
  type: 'object',
189
189
  properties: {
@@ -222,13 +222,19 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
222
222
  description: 'user (from human) > agent-confirmed > agent-inferred',
223
223
  default: 'agent-inferred',
224
224
  },
225
+ tags: {
226
+ type: 'array',
227
+ items: { type: 'string' },
228
+ description: 'Category labels for exact-match retrieval (lowercase slugs). Query with filter: "#tag". Example: ["auth", "critical-path", "mite-combat"]',
229
+ default: [],
230
+ },
225
231
  },
226
232
  required: ['topic', 'title', 'content'],
227
233
  },
228
234
  },
229
235
  {
230
236
  name: 'memory_query',
231
- description: 'Search stored knowledge. Searches all lobes when lobe is omitted. Example: memory_query(scope: "*", filter: "reducer sealed", detail: "full"). Use scope "*" to search everything. Use detail "full" for complete content.',
237
+ description: 'Search stored knowledge. Searches all lobes when lobe is omitted. Filter supports: keywords (stemmed), #tag (exact tag match), =term (exact keyword, no stemming), -term (NOT). Example: memory_query(scope: "*", filter: "#auth reducer", detail: "full")',
232
238
  inputSchema: {
233
239
  type: 'object',
234
240
  properties: {
@@ -245,7 +251,7 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
245
251
  },
246
252
  filter: {
247
253
  type: 'string',
248
- description: 'Search terms. "A B" = AND, "A|B" = OR, "-A" = NOT. Example: "reducer sealed -deprecated"',
254
+ description: 'Search terms. "A B" = AND, "A|B" = OR, "-A" = NOT, "#tag" = exact tag, "=term" = exact keyword (no stemming). Example: "#auth reducer -deprecated"',
249
255
  },
250
256
  branch: {
251
257
  type: 'string',
@@ -387,7 +393,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
387
393
  };
388
394
  }
389
395
  case 'memory_store': {
390
- const { lobe: rawLobe, topic: rawTopic, title, content, sources, references, trust: rawTrust } = z.object({
396
+ const { lobe: rawLobe, topic: rawTopic, title, content, sources, references, trust: rawTrust, tags: rawTags } = z.object({
391
397
  lobe: z.string().optional(),
392
398
  topic: z.string(),
393
399
  title: z.string().min(1),
@@ -395,6 +401,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
395
401
  sources: z.array(z.string()).default([]),
396
402
  references: z.array(z.string()).default([]),
397
403
  trust: z.enum(['user', 'agent-confirmed', 'agent-inferred']).default('agent-inferred'),
404
+ tags: z.array(z.string()).default([]),
398
405
  }).parse(args);
399
406
  // Validate topic at boundary
400
407
  const topic = parseTopicScope(rawTopic);
@@ -419,7 +426,7 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
419
426
  return contextError(ctx);
420
427
  const result = await ctx.store.store(topic, title, content, sources,
421
428
  // User/preferences default to 'user' trust unless explicitly set otherwise
422
- isGlobal && trust === 'agent-inferred' ? 'user' : trust, references);
429
+ isGlobal && trust === 'agent-inferred' ? 'user' : trust, references, rawTags);
423
430
  if (!result.stored) {
424
431
  return {
425
432
  content: [{ type: 'text', text: `[${ctx.label}] Failed to store: ${result.warning}` }],
@@ -464,6 +471,20 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
464
471
  lines.push('');
465
472
  lines.push('Review the stored entry against these preferences for potential conflicts.');
466
473
  }
474
+ // Vocabulary echo: show existing tags to drive convergence
475
+ if (hintCount < 2) {
476
+ const tagFreq = ctx.store.getTagFrequency();
477
+ if (tagFreq.size > 0) {
478
+ hintCount++;
479
+ const topTags = [...tagFreq.entries()]
480
+ .sort((a, b) => b[1] - a[1])
481
+ .slice(0, VOCABULARY_ECHO_LIMIT)
482
+ .map(([tag, count]) => `${tag}(${count})`).join(', ');
483
+ const truncated = tagFreq.size > VOCABULARY_ECHO_LIMIT ? ` (top ${VOCABULARY_ECHO_LIMIT} shown)` : '';
484
+ lines.push('');
485
+ lines.push(`Existing tags: ${topTags}${truncated}. Reuse for consistency. Query with filter: "#tag".`);
486
+ }
487
+ }
467
488
  return { content: [{ type: 'text', text: lines.join('\n') }] };
468
489
  }
469
490
  case 'memory_query': {
@@ -537,14 +558,35 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
537
558
  return true;
538
559
  })
539
560
  .sort((a, b) => b.relevanceScore - a.relevanceScore);
561
+ // Build stores collection for tag frequency aggregation
562
+ const searchedStores = [];
563
+ if (isGlobalQuery) {
564
+ searchedStores.push(globalStore);
565
+ }
566
+ else if (rawLobe) {
567
+ const store = configManager.getStore(rawLobe);
568
+ if (store)
569
+ searchedStores.push(store);
570
+ }
571
+ else {
572
+ // All lobes + global when doing wildcard search
573
+ for (const lobeName of configManager.getLobeNames()) {
574
+ const store = configManager.getStore(lobeName);
575
+ if (store)
576
+ searchedStores.push(store);
577
+ }
578
+ if (scope === '*')
579
+ searchedStores.push(globalStore);
580
+ }
581
+ const tagFreq = mergeTagFrequencies(searchedStores);
582
+ // Parse filter once for both filtering (already done) and footer display
583
+ const filterGroups = filter ? parseFilter(filter) : [];
540
584
  if (allEntries.length === 0) {
541
- const scopeHint = scope !== '*'
542
- ? ` Try scope: "*" to search all topics, or use filter: "${filter ?? scope}" to search by keyword.`
543
- : '';
585
+ const footer = buildQueryFooter({ filterGroups, rawFilter: filter, tagFreq, resultCount: 0, scope });
544
586
  return {
545
587
  content: [{
546
588
  type: 'text',
547
- text: `[${label}] No entries found for scope "${scope}"${filter ? ` with filter "${filter}"` : ''}.${scopeHint}`,
589
+ text: `[${label}] No entries found for scope "${scope}"${filter ? ` with filter "${filter}"` : ''}.\n\n---\n${footer}`,
548
590
  }],
549
591
  };
550
592
  }
@@ -563,14 +605,21 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
563
605
  `Fresh: ${e.fresh}`,
564
606
  e.sources?.length ? `Sources: ${e.sources.join(', ')}` : null,
565
607
  e.references?.length ? `References: ${e.references.join(', ')}` : null,
608
+ e.tags?.length ? `Tags: ${e.tags.join(', ')}` : null,
566
609
  `Created: ${e.created}`,
567
610
  `Last accessed: ${e.lastAccessed}`,
568
611
  e.gitSha ? `Git SHA: ${e.gitSha}` : null,
569
612
  ].filter(Boolean).join('\n');
570
613
  return `### ${e.title}\n${meta}\n\n${e.content}`;
571
614
  }
572
- if (detail === 'standard' && e.references?.length) {
573
- return `### ${e.title}\n*${e.id}${lobeTag} | confidence: ${e.confidence}${freshIndicator}*\nReferences: ${e.references.join(', ')}\n\n${e.summary}`;
615
+ if (detail === 'standard') {
616
+ const metaParts = [];
617
+ if (e.references?.length)
618
+ metaParts.push(`References: ${e.references.join(', ')}`);
619
+ if (e.tags?.length)
620
+ metaParts.push(`Tags: ${e.tags.join(', ')}`);
621
+ const metaLine = metaParts.length > 0 ? `\n${metaParts.join('\n')}\n` : '\n';
622
+ return `### ${e.title}\n*${e.id}${lobeTag} | confidence: ${e.confidence}${freshIndicator}*${metaLine}\n${e.summary}`;
574
623
  }
575
624
  return `### ${e.title}\n*${e.id}${lobeTag} | confidence: ${e.confidence}${freshIndicator}*\n\n${e.summary}`;
576
625
  });
@@ -584,25 +633,9 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
584
633
  text += '\n\n' + formatConflictWarning(conflicts);
585
634
  }
586
635
  }
587
- // Hints: teach the agent about capabilities it may not know about
588
- const hints = [];
589
- // Nudge: when searching all lobes, remind the agent to specify one for targeted results
590
- const allQueryLobeNames = configManager.getLobeNames();
591
- if (!rawLobe && !isGlobalQuery && allQueryLobeNames.length > 1) {
592
- hints.push(`Searched all lobes (${allQueryLobeNames.join(', ')}). Specify lobe: "<name>" for targeted results.`);
593
- }
594
- if (detail !== 'full') {
595
- hints.push('Use detail: "full" to see complete entry content.');
596
- }
597
- if (filter && !filter.includes(' ') && !filter.includes('|') && !filter.includes('-')) {
598
- hints.push('Tip: combine terms with spaces (AND), | (OR), -term (NOT). Example: "reducer sealed -deprecated"');
599
- }
600
- if (!filter) {
601
- hints.push('Tip: add filter: "keyword" to search within results.');
602
- }
603
- if (hints.length > 0) {
604
- text += `\n\n---\n*${hints.join(' ')}*`;
605
- }
636
+ // Build footer with query mode, tag vocabulary, and syntax reference
637
+ const footer = buildQueryFooter({ filterGroups, rawFilter: filter, tagFreq, resultCount: allEntries.length, scope });
638
+ text += `\n\n---\n${footer}`;
606
639
  return { content: [{ type: 'text', text }] };
607
640
  }
608
641
  case 'memory_correct': {
@@ -723,6 +756,18 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
723
756
  if (sections.length === 0) {
724
757
  sections.push('No knowledge stored yet. As you work, store observations with memory_store. Try memory_bootstrap to seed initial knowledge from the repo.');
725
758
  }
759
+ // Tag primer: show tag vocabulary if tags exist across any lobe
760
+ const briefingStores = [globalStore];
761
+ for (const lobeName of allBriefingLobeNames) {
762
+ const store = configManager.getStore(lobeName);
763
+ if (store)
764
+ briefingStores.push(store);
765
+ }
766
+ const briefingTagFreq = mergeTagFrequencies(briefingStores);
767
+ const tagPrimer = buildTagPrimerSection(briefingTagFreq);
768
+ if (tagPrimer) {
769
+ sections.push(tagPrimer);
770
+ }
726
771
  const briefingHints = [];
727
772
  briefingHints.push(`${totalEntries} entries${totalStale > 0 ? ` (${totalStale} stale)` : ''} across ${allBriefingLobeNames.length} ${allBriefingLobeNames.length === 1 ? 'lobe' : 'lobes'}.`);
728
773
  briefingHints.push('Use memory_context(context: "what you are about to do") for task-specific knowledge.');
@@ -797,11 +842,29 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
797
842
  return true;
798
843
  })
799
844
  .slice(0, max);
845
+ // Build stores collection for tag frequency aggregation
846
+ const ctxSearchedStores = [globalStore];
847
+ if (rawLobe) {
848
+ const store = configManager.getStore(rawLobe);
849
+ if (store)
850
+ ctxSearchedStores.push(store);
851
+ }
852
+ else {
853
+ for (const lobeName of configManager.getLobeNames()) {
854
+ const store = configManager.getStore(lobeName);
855
+ if (store)
856
+ ctxSearchedStores.push(store);
857
+ }
858
+ }
859
+ const ctxTagFreq = mergeTagFrequencies(ctxSearchedStores);
860
+ // Parse filter for footer (context search has no filter, pass empty)
861
+ const ctxFilterGroups = [];
800
862
  if (results.length === 0) {
863
+ const ctxFooter = buildQueryFooter({ filterGroups: ctxFilterGroups, rawFilter: undefined, tagFreq: ctxTagFreq, resultCount: 0, scope: 'context search' });
801
864
  return {
802
865
  content: [{
803
866
  type: 'text',
804
- text: `[${label}] No relevant knowledge found for: "${context}"\n\nThis is fine — proceed without prior context. As you learn things worth remembering, store them with memory_store.\nTry memory_query(scope: "*") to browse all entries.`,
867
+ text: `[${label}] No relevant knowledge found for: "${context}"\n\nThis is fine — proceed without prior context. As you learn things worth remembering, store them with memory_store.\n\n---\n${ctxFooter}`,
805
868
  }],
806
869
  };
807
870
  }
@@ -831,7 +894,8 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
831
894
  const marker = topic === 'gotchas' ? '[!] ' : topic === 'preferences' ? '[pref] ' : '';
832
895
  const keywords = r.matchedKeywords.length > 0 ? ` (matched: ${r.matchedKeywords.join(', ')})` : '';
833
896
  const lobeLabel = isCtxMultiLobe ? ` [${ctxEntryLobeMap.get(r.entry.id) ?? '?'}]` : '';
834
- sections.push(`- **${marker}${r.entry.title}**${lobeLabel}: ${r.entry.content}${keywords}`);
897
+ const tagsSuffix = r.entry.tags?.length ? ` [tags: ${r.entry.tags.join(', ')}]` : '';
898
+ sections.push(`- **${marker}${r.entry.title}**${lobeLabel}: ${r.entry.content}${keywords}${tagsSuffix}`);
835
899
  }
836
900
  sections.push('');
837
901
  }
@@ -850,33 +914,15 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
850
914
  allMatchedKeywords.add(kw);
851
915
  matchedTopics.add(r.entry.topic);
852
916
  }
853
- // Hints
854
- const ctxHints = [];
855
- // Nudge: when searching all lobes, infer the most relevant lobe from context keywords
856
- // matching lobe names (e.g. "minion-miner" in context → suggest lobe "minion-miner"),
857
- // falling back to the first lobe when no name overlap is found.
858
- const allCtxLobeNames = configManager.getLobeNames();
859
- if (!rawLobe && allCtxLobeNames.length > 1) {
860
- const contextKws = extractKeywords(context);
861
- const inferredLobe = allCtxLobeNames.find(name => [...extractKeywords(name)].some(kw => contextKws.has(kw))) ?? allCtxLobeNames[0];
862
- ctxHints.push(`Searched all lobes. For faster, targeted results use lobe: "${inferredLobe}" (available: ${allCtxLobeNames.join(', ')}).`);
863
- }
864
- if (results.length >= max) {
865
- ctxHints.push(`Showing top ${max} results. Increase maxResults or raise minMatch to refine.`);
866
- }
867
- if (threshold <= 0.2 && results.length > 5) {
868
- ctxHints.push('Too many results? Use minMatch: 0.4 for stricter matching.');
869
- }
870
- // Session dedup hint — tell the agent not to re-call for these keywords
871
917
  if (allMatchedKeywords.size > 0) {
872
918
  const kwList = Array.from(allMatchedKeywords).sort().join(', ');
873
919
  const topicList = Array.from(matchedTopics).sort().join(', ');
874
- ctxHints.push(`Context loaded for: ${kwList} (${topicList}). ` +
875
- `This knowledge is now in your conversation — no need to call memory_context again for these terms this session.`);
876
- }
877
- if (ctxHints.length > 0) {
878
- sections.push(`---\n*${ctxHints.join(' ')}*`);
920
+ sections.push(`---\n*Context loaded for: ${kwList} (${topicList}). ` +
921
+ `This knowledge is now in your conversation — no need to call memory_context again for these terms this session.*`);
879
922
  }
923
+ // Build footer (context search has no filter — it's natural language keyword matching)
924
+ const ctxFooter = buildQueryFooter({ filterGroups: ctxFilterGroups, rawFilter: undefined, tagFreq: ctxTagFreq, resultCount: results.length, scope: 'context search' });
925
+ sections.push(`---\n${ctxFooter}`);
880
926
  return { content: [{ type: 'text', text: sections.join('\n') }] };
881
927
  }
882
928
  case 'memory_stats': {
package/dist/normalize.js CHANGED
@@ -19,6 +19,10 @@ const PARAM_ALIASES = {
19
19
  // memory_context aliases
20
20
  description: 'context',
21
21
  task: 'context',
22
+ // tag aliases
23
+ tag: 'tags',
24
+ labels: 'tags',
25
+ categories: 'tags',
22
26
  // lobe aliases
23
27
  workspace: 'lobe',
24
28
  repo: 'lobe',
package/dist/store.d.ts CHANGED
@@ -13,7 +13,7 @@ export declare class MarkdownMemoryStore {
13
13
  /** Initialize the store: create memory dir and load existing entries */
14
14
  init(): Promise<void>;
15
15
  /** Store a new knowledge entry */
16
- store(topic: TopicScope, title: string, content: string, sources?: string[], trust?: TrustLevel, references?: string[]): Promise<StoreResult>;
16
+ store(topic: TopicScope, title: string, content: string, sources?: string[], trust?: TrustLevel, references?: string[], rawTags?: string[]): Promise<StoreResult>;
17
17
  /** Query knowledge by scope and detail level */
18
18
  query(scope: string, detail?: DetailLevel, filter?: string, branchFilter?: string): Promise<QueryResult>;
19
19
  /** Generate a session-start briefing */
@@ -68,6 +68,9 @@ export declare class MarkdownMemoryStore {
68
68
  /** Find entries in the same topic with significant overlap (dedup detection).
69
69
  * Uses hybrid jaccard+containment similarity. */
70
70
  private findRelatedEntries;
71
+ /** Tag frequency across all entries — for vocabulary echo in store responses.
72
+ * Returns tags sorted by frequency (descending). O(N) over entries. */
73
+ getTagFrequency(): ReadonlyMap<string, number>;
71
74
  /** Fetch raw MemoryEntry objects by ID for conflict detection.
72
75
  * Must be called after query() (which calls reloadFromDisk) to ensure entries are current. */
73
76
  getEntriesByIds(ids: readonly string[]): MemoryEntry[];
package/dist/store.js CHANGED
@@ -6,8 +6,8 @@ import path from 'path';
6
6
  import crypto from 'crypto';
7
7
  import { execFile } from 'child_process';
8
8
  import { promisify } from 'util';
9
- import { DEFAULT_CONFIDENCE, realClock, parseTopicScope, parseTrustLevel } from './types.js';
10
- import { DEDUP_SIMILARITY_THRESHOLD, CONFLICT_SIMILARITY_THRESHOLD_SAME_TOPIC, CONFLICT_SIMILARITY_THRESHOLD_CROSS_TOPIC, CONFLICT_MIN_CONTENT_CHARS, OPPOSITION_PAIRS, PREFERENCE_SURFACE_THRESHOLD, REFERENCE_BOOST_MULTIPLIER, TOPIC_BOOST, MODULE_TOPIC_BOOST, USER_ALWAYS_INCLUDE_SCORE_FRACTION, DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, DEFAULT_MAX_PREFERENCE_SUGGESTIONS, } from './thresholds.js';
9
+ import { DEFAULT_CONFIDENCE, realClock, parseTopicScope, parseTrustLevel, parseTags } from './types.js';
10
+ import { DEDUP_SIMILARITY_THRESHOLD, CONFLICT_SIMILARITY_THRESHOLD_SAME_TOPIC, CONFLICT_SIMILARITY_THRESHOLD_CROSS_TOPIC, CONFLICT_MIN_CONTENT_CHARS, OPPOSITION_PAIRS, PREFERENCE_SURFACE_THRESHOLD, REFERENCE_BOOST_MULTIPLIER, TOPIC_BOOST, MODULE_TOPIC_BOOST, USER_ALWAYS_INCLUDE_SCORE_FRACTION, DEFAULT_STALE_DAYS_STANDARD, DEFAULT_STALE_DAYS_PREFERENCES, DEFAULT_MAX_STALE_IN_BRIEFING, DEFAULT_MAX_DEDUP_SUGGESTIONS, DEFAULT_MAX_CONFLICT_PAIRS, DEFAULT_MAX_PREFERENCE_SUGGESTIONS, TAG_MATCH_BOOST, } from './thresholds.js';
11
11
  import { realGitService } from './git-service.js';
12
12
  import { extractKeywords, stem, similarity, matchesFilter, computeRelevanceScore, } from './text-analyzer.js';
13
13
  import { detectEphemeralSignals, formatEphemeralWarning } from './ephemeral.js';
@@ -41,7 +41,7 @@ export class MarkdownMemoryStore {
41
41
  await this.reloadFromDisk();
42
42
  }
43
43
  /** Store a new knowledge entry */
44
- async store(topic, title, content, sources = [], trust = 'agent-inferred', references = []) {
44
+ async store(topic, title, content, sources = [], trust = 'agent-inferred', references = [], rawTags = []) {
45
45
  // Check storage budget — null means we can't measure, allow the write
46
46
  const currentSize = await this.getStorageSize();
47
47
  if (currentSize !== null && currentSize >= this.config.storageBudgetBytes) {
@@ -56,10 +56,13 @@ export class MarkdownMemoryStore {
56
56
  const gitSha = await this.getGitSha(sources);
57
57
  // Auto-detect branch for recent-work entries
58
58
  const branch = topic === 'recent-work' ? await this.getCurrentBranch() : undefined;
59
+ // Validate tags at boundary — invalid ones silently dropped
60
+ const tags = parseTags(rawTags);
59
61
  const entry = {
60
62
  id, topic, title, content, confidence, trust,
61
63
  sources,
62
64
  references: references.length > 0 ? references : undefined,
65
+ tags: tags.length > 0 ? tags : undefined,
63
66
  created: now, lastAccessed: now, gitSha, branch,
64
67
  };
65
68
  // Check for existing entry with same title in same topic (and same branch for recent-work)
@@ -113,12 +116,12 @@ export class MarkdownMemoryStore {
113
116
  return false;
114
117
  }
115
118
  }
116
- // Optional keyword filter with AND/OR/NOT syntax
119
+ // Optional keyword filter with AND/OR/NOT syntax + #tag and =exact support
117
120
  if (filter) {
118
121
  const titleKeywords = extractKeywords(entry.title);
119
122
  const contentKeywords = extractKeywords(entry.content);
120
123
  const allKeywords = new Set([...titleKeywords, ...contentKeywords]);
121
- return matchesFilter(allKeywords, filter);
124
+ return matchesFilter(allKeywords, filter, entry.tags);
122
125
  }
123
126
  return true;
124
127
  });
@@ -126,7 +129,7 @@ export class MarkdownMemoryStore {
126
129
  if (filter) {
127
130
  const scores = new Map();
128
131
  for (const entry of matching) {
129
- scores.set(entry.id, computeRelevanceScore(extractKeywords(entry.title), extractKeywords(entry.content), entry.confidence, filter));
132
+ scores.set(entry.id, computeRelevanceScore(extractKeywords(entry.title), extractKeywords(entry.content), entry.confidence, filter, entry.tags));
130
133
  }
131
134
  matching.sort((a, b) => {
132
135
  const scoreDiff = (scores.get(b.id) ?? 0) - (scores.get(a.id) ?? 0);
@@ -155,7 +158,7 @@ export class MarkdownMemoryStore {
155
158
  const entries = matching.map(entry => ({
156
159
  ...this.formatEntry(entry, detail),
157
160
  relevanceScore: filter
158
- ? computeRelevanceScore(extractKeywords(entry.title), extractKeywords(entry.content), entry.confidence, filter)
161
+ ? computeRelevanceScore(extractKeywords(entry.title), extractKeywords(entry.content), entry.confidence, filter, entry.tags)
159
162
  : entry.confidence,
160
163
  }));
161
164
  return { scope, detail, entries, totalEntries: matching.length };
@@ -288,6 +291,7 @@ export class MarkdownMemoryStore {
288
291
  const byTopic = {};
289
292
  const byTrust = { 'user': 0, 'agent-confirmed': 0, 'agent-inferred': 0 };
290
293
  const byFreshness = { fresh: 0, stale: 0, unknown: 0 };
294
+ const byTag = {};
291
295
  for (const entry of allEntries) {
292
296
  byTopic[entry.topic] = (byTopic[entry.topic] ?? 0) + 1;
293
297
  byTrust[entry.trust]++;
@@ -300,12 +304,18 @@ export class MarkdownMemoryStore {
300
304
  else {
301
305
  byFreshness.stale++;
302
306
  }
307
+ // Aggregate tag frequencies
308
+ if (entry.tags) {
309
+ for (const tag of entry.tags) {
310
+ byTag[tag] = (byTag[tag] ?? 0) + 1;
311
+ }
312
+ }
303
313
  }
304
314
  const dates = allEntries.map(e => e.created).sort();
305
315
  return {
306
316
  totalEntries: allEntries.length,
307
317
  corruptFiles: this.corruptFileCount,
308
- byTopic, byTrust, byFreshness,
318
+ byTopic, byTrust, byFreshness, byTag,
309
319
  storageSize: this.formatBytes(storageSize ?? 0),
310
320
  storageBudgetBytes: this.config.storageBudgetBytes,
311
321
  memoryPath: this.memoryPath,
@@ -399,7 +409,9 @@ export class MarkdownMemoryStore {
399
409
  if (entry.topic === 'recent-work' && branchFilter !== '*' && entry.branch && entry.branch !== currentBranch) {
400
410
  continue;
401
411
  }
402
- const entryKeywords = extractKeywords(`${entry.title} ${entry.content}`);
412
+ // Include tag values as keywords so tagged entries surface in context search
413
+ const tagKeywordPart = entry.tags ? ` ${entry.tags.join(' ')}` : '';
414
+ const entryKeywords = extractKeywords(`${entry.title} ${entry.content}${tagKeywordPart}`);
403
415
  const matchedKeywords = [];
404
416
  for (const kw of contextKeywords) {
405
417
  if (entryKeywords.has(kw))
@@ -421,7 +433,10 @@ export class MarkdownMemoryStore {
421
433
  const basename = ref.split('/').pop()?.replace(/\.\w+$/, '') ?? ref;
422
434
  return contextKeywords.has(stem(basename.toLowerCase()));
423
435
  }) ? REFERENCE_BOOST_MULTIPLIER : 1.0;
424
- const score = matchRatio * entry.confidence * boost * freshnessMultiplier * referenceBoost;
436
+ // Tag boost: if any tag exactly matches a context keyword, boost the entry
437
+ const tagBoost = entry.tags?.some(tag => contextKeywords.has(tag))
438
+ ? TAG_MATCH_BOOST : 1.0;
439
+ const score = matchRatio * entry.confidence * boost * freshnessMultiplier * referenceBoost * tagBoost;
425
440
  results.push({ entry, score, matchedKeywords });
426
441
  }
427
442
  // Always include user entries even if no keyword match (they're always relevant)
@@ -485,6 +500,9 @@ export class MarkdownMemoryStore {
485
500
  if (entry.references && entry.references.length > 0) {
486
501
  meta.push(`- **references**: ${entry.references.join(', ')}`);
487
502
  }
503
+ if (entry.tags && entry.tags.length > 0) {
504
+ meta.push(`- **tags**: ${entry.tags.join(', ')}`);
505
+ }
488
506
  if (entry.gitSha) {
489
507
  meta.push(`- **gitSha**: ${entry.gitSha}`);
490
508
  }
@@ -603,6 +621,10 @@ export class MarkdownMemoryStore {
603
621
  const references = metadata['references']
604
622
  ? metadata['references'].split(',').map(s => s.trim()).filter(s => s.length > 0)
605
623
  : undefined;
624
+ // Parse tags at boundary — invalid tags silently dropped
625
+ const tags = metadata['tags']
626
+ ? parseTags(metadata['tags'].split(','))
627
+ : undefined;
606
628
  return {
607
629
  id: metadata['id'],
608
630
  topic,
@@ -612,6 +634,7 @@ export class MarkdownMemoryStore {
612
634
  trust,
613
635
  sources: metadata['source'] ? metadata['source'].split(',').map(s => s.trim()) : [],
614
636
  references: references && references.length > 0 ? references : undefined,
637
+ tags: tags && tags.length > 0 ? tags : undefined,
615
638
  created: metadata['created'] ?? now,
616
639
  lastAccessed: metadata['lastAccessed'] ?? now,
617
640
  gitSha: metadata['gitSha'],
@@ -634,8 +657,9 @@ export class MarkdownMemoryStore {
634
657
  if (detail === 'standard') {
635
658
  return {
636
659
  ...base,
637
- // Surface references in standard detail — compact but useful for navigation
660
+ // Surface references and tags in standard detail
638
661
  references: entry.references,
662
+ tags: entry.tags,
639
663
  };
640
664
  }
641
665
  if (detail === 'full') {
@@ -645,6 +669,7 @@ export class MarkdownMemoryStore {
645
669
  trust: entry.trust,
646
670
  sources: entry.sources,
647
671
  references: entry.references,
672
+ tags: entry.tags,
648
673
  created: entry.created,
649
674
  lastAccessed: entry.lastAccessed,
650
675
  gitSha: entry.gitSha,
@@ -737,6 +762,19 @@ export class MarkdownMemoryStore {
737
762
  trust: r.entry.trust,
738
763
  }));
739
764
  }
765
+ /** Tag frequency across all entries — for vocabulary echo in store responses.
766
+ * Returns tags sorted by frequency (descending). O(N) over entries. */
767
+ getTagFrequency() {
768
+ const freq = new Map();
769
+ for (const entry of this.entries.values()) {
770
+ if (!entry.tags)
771
+ continue;
772
+ for (const tag of entry.tags) {
773
+ freq.set(tag, (freq.get(tag) ?? 0) + 1);
774
+ }
775
+ }
776
+ return freq;
777
+ }
740
778
  /** Fetch raw MemoryEntry objects by ID for conflict detection.
741
779
  * Must be called after query() (which calls reloadFromDisk) to ensure entries are current. */
742
780
  getEntriesByIds(ids) {
@@ -1,8 +1,29 @@
1
- /** Parsed filter group: a set of required terms and excluded terms */
1
+ /** Parsed filter group: required, excluded, exact-match, and tag terms */
2
2
  export interface FilterGroup {
3
3
  readonly must: Set<string>;
4
4
  readonly mustNot: Set<string>;
5
+ readonly mustExact: Set<string>;
6
+ readonly mustTags: Set<string>;
5
7
  }
8
+ /** Query mode summary — describes what a parsed filter actually searches */
9
+ export type QueryMode = {
10
+ readonly kind: 'no-filter';
11
+ } | {
12
+ readonly kind: 'keyword-only';
13
+ readonly terms: readonly string[];
14
+ } | {
15
+ readonly kind: 'tag-only';
16
+ readonly tags: readonly string[];
17
+ } | {
18
+ readonly kind: 'complex';
19
+ readonly hasTags: boolean;
20
+ readonly hasExact: boolean;
21
+ readonly hasNot: boolean;
22
+ readonly hasOr: boolean;
23
+ };
24
+ /** Analyze parsed filter groups into QueryMode for display — pure function.
25
+ * Accepts already-parsed FilterGroup[] to avoid reparsing. */
26
+ export declare function analyzeFilterGroups(groups: readonly FilterGroup[]): QueryMode;
6
27
  /** Naive stem: strip common English suffixes to improve keyword matching.
7
28
  * "reducers" -> "reducer", "sealed" stays, "implementations" -> "implement" */
8
29
  export declare function stem(word: string): string;
@@ -25,8 +46,10 @@ export declare function similarity(titleA: string, contentA: string, titleB: str
25
46
  * ] */
26
47
  export declare function parseFilter(filter: string): FilterGroup[];
27
48
  /** Check if a set of keywords matches a filter string using stemmed AND/OR/NOT logic.
28
- * Entry matches if ANY OR-group is satisfied (all must-terms present, no mustNot-terms present). */
29
- export declare function matchesFilter(allKeywords: Set<string>, filter: string): boolean;
49
+ * Entry matches if ANY OR-group is satisfied (all must-terms present, no mustNot-terms present).
50
+ * Supports =exact (no stemming) and #tag (exact match against entry tags). */
51
+ export declare function matchesFilter(allKeywords: Set<string>, filter: string, tags?: readonly string[]): boolean;
30
52
  /** Compute relevance score for an entry against a filter.
31
- * Title matches get 2x weight over content-only matches. */
32
- export declare function computeRelevanceScore(titleKeywords: Set<string>, contentKeywords: Set<string>, confidence: number, filter: string): number;
53
+ * Title matches get 2x weight over content-only matches.
54
+ * Tag and exact matches count as full-weight hits (same as title). */
55
+ export declare function computeRelevanceScore(titleKeywords: Set<string>, contentKeywords: Set<string>, confidence: number, filter: string, tags?: readonly string[]): number;
@@ -20,6 +20,42 @@ const STOPWORDS = new Set([
20
20
  'he', 'him', 'his', 'she', 'her', 'they', 'them', 'their', 'about',
21
21
  'up', 'out', 'then', 'also', 'use', 'used', 'using',
22
22
  ]);
23
+ /** Analyze parsed filter groups into QueryMode for display — pure function.
24
+ * Accepts already-parsed FilterGroup[] to avoid reparsing. */
25
+ export function analyzeFilterGroups(groups) {
26
+ if (groups.length === 0)
27
+ return { kind: 'no-filter' };
28
+ // Aggregate all filter features across OR groups
29
+ const allMust = new Set();
30
+ const allMustNot = new Set();
31
+ const allMustExact = new Set();
32
+ const allMustTags = new Set();
33
+ for (const g of groups) {
34
+ for (const t of g.must)
35
+ allMust.add(t);
36
+ for (const t of g.mustNot)
37
+ allMustNot.add(t);
38
+ for (const t of g.mustExact)
39
+ allMustExact.add(t);
40
+ for (const t of g.mustTags)
41
+ allMustTags.add(t);
42
+ }
43
+ const hasTags = allMustTags.size > 0;
44
+ const hasExact = allMustExact.size > 0;
45
+ const hasNot = allMustNot.size > 0;
46
+ const hasOr = groups.length > 1;
47
+ const hasKeywords = allMust.size > 0;
48
+ // Pure tag-only (no other features)
49
+ if (hasTags && !hasExact && !hasNot && !hasOr && !hasKeywords) {
50
+ return { kind: 'tag-only', tags: [...allMustTags] };
51
+ }
52
+ // Pure keyword-only (no other features)
53
+ if (hasKeywords && !hasTags && !hasExact && !hasNot && !hasOr) {
54
+ return { kind: 'keyword-only', terms: [...allMust] };
55
+ }
56
+ // Everything else is complex (mixed features)
57
+ return { kind: 'complex', hasTags, hasExact, hasNot, hasOr };
58
+ }
23
59
  /** Naive stem: strip common English suffixes to improve keyword matching.
24
60
  * "reducers" -> "reducer", "sealed" stays, "implementations" -> "implement" */
25
61
  export function stem(word) {
@@ -126,6 +162,8 @@ export function parseFilter(filter) {
126
162
  const terms = group.split(/\s+/).filter(t => t.length > 0);
127
163
  const must = new Set();
128
164
  const mustNot = new Set();
165
+ const mustExact = new Set();
166
+ const mustTags = new Set();
129
167
  for (const term of terms) {
130
168
  if (term.startsWith('-') && term.length > 1) {
131
169
  // Negation: stem the compound as-is, WITHOUT hyphen expansion.
@@ -135,6 +173,14 @@ export function parseFilter(filter) {
135
173
  if (raw.length > 2)
136
174
  mustNot.add(stem(raw));
137
175
  }
176
+ else if (term.startsWith('#') && term.length > 1) {
177
+ // Tag filter: exact match against entry tags, no stemming
178
+ mustTags.add(term.slice(1).toLowerCase());
179
+ }
180
+ else if (term.startsWith('=') && term.length > 1) {
181
+ // Exact keyword match: bypasses stemming
182
+ mustExact.add(term.slice(1).toLowerCase());
183
+ }
138
184
  else {
139
185
  // Positive terms: full expansion (hyphens split into parts)
140
186
  for (const kw of extractKeywords(term)) {
@@ -142,16 +188,18 @@ export function parseFilter(filter) {
142
188
  }
143
189
  }
144
190
  }
145
- return { must, mustNot };
191
+ return { must, mustNot, mustExact, mustTags };
146
192
  });
147
193
  }
148
194
  /** Check if a set of keywords matches a filter string using stemmed AND/OR/NOT logic.
149
- * Entry matches if ANY OR-group is satisfied (all must-terms present, no mustNot-terms present). */
150
- export function matchesFilter(allKeywords, filter) {
195
+ * Entry matches if ANY OR-group is satisfied (all must-terms present, no mustNot-terms present).
196
+ * Supports =exact (no stemming) and #tag (exact match against entry tags). */
197
+ export function matchesFilter(allKeywords, filter, tags) {
151
198
  const groups = parseFilter(filter);
152
199
  if (groups.length === 0)
153
200
  return true;
154
- return groups.some(({ must, mustNot }) => {
201
+ const entryTags = new Set(tags ?? []);
202
+ return groups.some(({ must, mustNot, mustExact, mustTags }) => {
155
203
  for (const term of must) {
156
204
  if (!allKeywords.has(term))
157
205
  return false;
@@ -160,18 +208,29 @@ export function matchesFilter(allKeywords, filter) {
160
208
  if (allKeywords.has(term))
161
209
  return false;
162
210
  }
163
- return must.size > 0 || mustNot.size > 0;
211
+ for (const term of mustExact) {
212
+ if (!allKeywords.has(term))
213
+ return false;
214
+ }
215
+ for (const tag of mustTags) {
216
+ if (!entryTags.has(tag))
217
+ return false;
218
+ }
219
+ return must.size > 0 || mustNot.size > 0 || mustExact.size > 0 || mustTags.size > 0;
164
220
  });
165
221
  }
166
222
  /** Compute relevance score for an entry against a filter.
167
- * Title matches get 2x weight over content-only matches. */
168
- export function computeRelevanceScore(titleKeywords, contentKeywords, confidence, filter) {
223
+ * Title matches get 2x weight over content-only matches.
224
+ * Tag and exact matches count as full-weight hits (same as title). */
225
+ export function computeRelevanceScore(titleKeywords, contentKeywords, confidence, filter, tags) {
169
226
  const groups = parseFilter(filter);
170
227
  if (groups.length === 0)
171
228
  return 0;
229
+ const entryTags = new Set(tags ?? []);
172
230
  let bestScore = 0;
173
- for (const { must } of groups) {
174
- if (must.size === 0)
231
+ for (const { must, mustExact, mustTags } of groups) {
232
+ const totalTerms = must.size + mustExact.size + mustTags.size;
233
+ if (totalTerms === 0)
175
234
  continue;
176
235
  let score = 0;
177
236
  for (const term of must) {
@@ -182,7 +241,20 @@ export function computeRelevanceScore(titleKeywords, contentKeywords, confidence
182
241
  score += 1.0; // content-only match
183
242
  }
184
243
  }
185
- const normalized = score / must.size;
244
+ for (const term of mustExact) {
245
+ if (titleKeywords.has(term)) {
246
+ score += 2.0;
247
+ }
248
+ else if (contentKeywords.has(term)) {
249
+ score += 1.0;
250
+ }
251
+ }
252
+ // Tag matches count as high-value (same as title hits)
253
+ for (const tag of mustTags) {
254
+ if (entryTags.has(tag))
255
+ score += 2.0;
256
+ }
257
+ const normalized = score / totalTerms;
186
258
  if (normalized > bestScore)
187
259
  bestScore = normalized;
188
260
  }
@@ -45,3 +45,9 @@ export declare const DEFAULT_MAX_DEDUP_SUGGESTIONS = 3;
45
45
  export declare const DEFAULT_MAX_CONFLICT_PAIRS = 2;
46
46
  /** Maximum related preferences surfaced when storing a non-preference entry. */
47
47
  export declare const DEFAULT_MAX_PREFERENCE_SUGGESTIONS = 3;
48
+ /** Score multiplier when an entry's tags match context keywords in contextSearch(). */
49
+ export declare const TAG_MATCH_BOOST = 1.5;
50
+ /** Maximum tags shown in vocabulary echo after a store operation. */
51
+ export declare const VOCABULARY_ECHO_LIMIT = 8;
52
+ /** Maximum tags shown in query/context footer. */
53
+ export declare const MAX_FOOTER_TAGS = 12;
@@ -81,3 +81,9 @@ export const DEFAULT_MAX_DEDUP_SUGGESTIONS = 3;
81
81
  export const DEFAULT_MAX_CONFLICT_PAIRS = 2;
82
82
  /** Maximum related preferences surfaced when storing a non-preference entry. */
83
83
  export const DEFAULT_MAX_PREFERENCE_SUGGESTIONS = 3;
84
+ /** Score multiplier when an entry's tags match context keywords in contextSearch(). */
85
+ export const TAG_MATCH_BOOST = 1.5;
86
+ /** Maximum tags shown in vocabulary echo after a store operation. */
87
+ export const VOCABULARY_ECHO_LIMIT = 8;
88
+ /** Maximum tags shown in query/context footer. */
89
+ export const MAX_FOOTER_TAGS = 12;
package/dist/types.d.ts CHANGED
@@ -4,6 +4,17 @@ export type TrustLevel = 'user' | 'agent-confirmed' | 'agent-inferred';
4
4
  export declare function parseTrustLevel(raw: string): TrustLevel | null;
5
5
  /** Predefined topic scopes for organizing knowledge */
6
6
  export type TopicScope = 'user' | 'preferences' | 'architecture' | 'conventions' | 'gotchas' | 'recent-work' | `modules/${string}`;
7
+ /** Validated tag: lowercase alphanumeric slug (letters, digits, hyphens).
8
+ * Branded type prevents accidentally passing raw strings where validated tags are expected. */
9
+ export type Tag = string & {
10
+ readonly __brand: 'Tag';
11
+ };
12
+ /** Parse a raw string into a Tag, returning null for invalid input.
13
+ * Normalizes to lowercase. Rejects empty, too-long, or non-slug strings. */
14
+ export declare function parseTag(raw: string): Tag | null;
15
+ /** Parse an array of raw strings into Tags, silently dropping invalid/duplicate ones.
16
+ * Caps at MAX_TAGS_PER_ENTRY to prevent sprawl. */
17
+ export declare function parseTags(raw: readonly string[]): readonly Tag[];
7
18
  /** Parse a raw string into a TopicScope, returning null for invalid input */
8
19
  export declare function parseTopicScope(raw: string): TopicScope | null;
9
20
  /** Injectable clock for deterministic time in tests */
@@ -23,6 +34,7 @@ export interface MemoryEntry {
23
34
  readonly trust: TrustLevel;
24
35
  readonly sources: readonly string[];
25
36
  readonly references?: readonly string[];
37
+ readonly tags?: readonly Tag[];
26
38
  readonly created: string;
27
39
  readonly lastAccessed: string;
28
40
  readonly gitSha?: string;
@@ -57,6 +69,7 @@ export interface QueryEntry {
57
69
  readonly relevanceScore: number;
58
70
  readonly fresh: boolean;
59
71
  readonly references?: readonly string[];
72
+ readonly tags?: readonly Tag[];
60
73
  readonly content?: string;
61
74
  readonly trust?: TrustLevel;
62
75
  readonly sources?: readonly string[];
@@ -113,6 +126,7 @@ export interface MemoryStats {
113
126
  stale: number;
114
127
  unknown: number;
115
128
  };
129
+ readonly byTag: Record<string, number>;
116
130
  readonly storageSize: string;
117
131
  readonly storageBudgetBytes: number;
118
132
  readonly memoryPath: string;
package/dist/types.js CHANGED
@@ -10,6 +10,38 @@ export function parseTrustLevel(raw) {
10
10
  return TRUST_LEVELS.includes(raw) ? raw : null;
11
11
  }
12
12
  const FIXED_TOPICS = ['user', 'preferences', 'architecture', 'conventions', 'gotchas', 'recent-work'];
13
+ const TAG_PATTERN = /^[a-z0-9][a-z0-9-]*$/;
14
+ const MAX_TAG_LENGTH = 50;
15
+ const MAX_TAGS_PER_ENTRY = 10;
16
+ /** Parse a raw string into a Tag, returning null for invalid input.
17
+ * Normalizes to lowercase. Rejects empty, too-long, or non-slug strings. */
18
+ export function parseTag(raw) {
19
+ const normalized = raw.trim().toLowerCase()
20
+ .replace(/[^a-z0-9-]/g, '-') // non-slug chars → dash
21
+ .replace(/-+/g, '-') // collapse consecutive dashes
22
+ .replace(/^-|-$/g, ''); // trim leading/trailing dashes
23
+ if (normalized.length < 2 || normalized.length > MAX_TAG_LENGTH)
24
+ return null;
25
+ if (!TAG_PATTERN.test(normalized))
26
+ return null;
27
+ return normalized;
28
+ }
29
+ /** Parse an array of raw strings into Tags, silently dropping invalid/duplicate ones.
30
+ * Caps at MAX_TAGS_PER_ENTRY to prevent sprawl. */
31
+ export function parseTags(raw) {
32
+ const tags = [];
33
+ const seen = new Set();
34
+ for (const r of raw) {
35
+ const tag = parseTag(r);
36
+ if (tag && !seen.has(tag)) {
37
+ seen.add(tag);
38
+ tags.push(tag);
39
+ }
40
+ if (tags.length >= MAX_TAGS_PER_ENTRY)
41
+ break;
42
+ }
43
+ return tags;
44
+ }
13
45
  /** Parse a raw string into a TopicScope, returning null for invalid input */
14
46
  export function parseTopicScope(raw) {
15
47
  if (FIXED_TOPICS.includes(raw))
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@exaudeus/memory-mcp",
3
- "version": "1.0.1",
3
+ "version": "1.1.0",
4
4
  "description": "Codebase memory MCP server - persistent, evolving knowledge for AI coding agents",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",