@telvok/librarian-mcp 1.2.0 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,15 @@
1
+ import type { ParseResult } from './types.js';
2
+ /**
3
+ * Parse a Cursor Memory Bank folder (.cursor-memory/).
4
+ *
5
+ * Cursor Memory Bank typically contains:
6
+ * - activeContext.md - Current working context
7
+ * - progress.md - Progress log
8
+ * - projectBrief.md - Project overview
9
+ * - systemPatterns.md - System patterns
10
+ * - decisionLog.md - Decision history
11
+ * - techStack.md - Technology stack info
12
+ *
13
+ * Can also contain JSON files and subdirectories.
14
+ */
15
+ export declare function parseCursorMemory(dirPath: string): Promise<ParseResult>;
@@ -0,0 +1,168 @@
1
+ import * as fs from 'fs/promises';
2
+ import * as path from 'path';
3
+ import { glob } from 'glob';
4
+ import matter from 'gray-matter';
5
+ // ============================================================================
6
+ // Cursor Memory Bank Parser
7
+ // ============================================================================
8
+ /**
9
+ * Parse a Cursor Memory Bank folder (.cursor-memory/).
10
+ *
11
+ * Cursor Memory Bank typically contains:
12
+ * - activeContext.md - Current working context
13
+ * - progress.md - Progress log
14
+ * - projectBrief.md - Project overview
15
+ * - systemPatterns.md - System patterns
16
+ * - decisionLog.md - Decision history
17
+ * - techStack.md - Technology stack info
18
+ *
19
+ * Can also contain JSON files and subdirectories.
20
+ */
21
+ export async function parseCursorMemory(dirPath) {
22
+ const entries = [];
23
+ const errors = [];
24
+ let skipped = 0;
25
+ try {
26
+ const stats = await fs.stat(dirPath);
27
+ if (!stats.isDirectory()) {
28
+ errors.push(`${dirPath} is not a directory`);
29
+ return { entries, skipped, errors };
30
+ }
31
+ // Parse markdown files
32
+ const mdFiles = await glob(path.join(dirPath, '**/*.md'), { nodir: true });
33
+ for (const filePath of mdFiles) {
34
+ try {
35
+ const content = await fs.readFile(filePath, 'utf-8');
36
+ const { data, content: body } = matter(content);
37
+ const trimmedBody = body.trim();
38
+ if (!trimmedBody) {
39
+ skipped++;
40
+ continue;
41
+ }
42
+ // Extract title from filename (convert camelCase/snake_case to Title Case)
43
+ const filename = path.basename(filePath, '.md');
44
+ const title = data.title || formatFilename(filename);
45
+ // Determine context based on filename
46
+ const context = data.context || inferContext(filename);
47
+ entries.push({
48
+ title,
49
+ content: trimmedBody,
50
+ context,
51
+ intent: data.intent,
52
+ source: 'cursor',
53
+ originalPath: filePath,
54
+ });
55
+ }
56
+ catch (fileError) {
57
+ errors.push(`${filePath}: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
58
+ skipped++;
59
+ }
60
+ }
61
+ // Parse JSON files
62
+ const jsonFiles = await glob(path.join(dirPath, '**/*.json'), { nodir: true });
63
+ for (const filePath of jsonFiles) {
64
+ try {
65
+ const content = await fs.readFile(filePath, 'utf-8');
66
+ const data = JSON.parse(content);
67
+ // Handle different JSON structures
68
+ if (Array.isArray(data)) {
69
+ // Array of memory items
70
+ for (const item of data) {
71
+ if (typeof item === 'object' && item !== null) {
72
+ const entry = extractFromJSON(item, filePath);
73
+ if (entry) {
74
+ entries.push(entry);
75
+ }
76
+ else {
77
+ skipped++;
78
+ }
79
+ }
80
+ }
81
+ }
82
+ else if (typeof data === 'object' && data !== null) {
83
+ // Single memory object
84
+ const entry = extractFromJSON(data, filePath);
85
+ if (entry) {
86
+ entries.push(entry);
87
+ }
88
+ else {
89
+ skipped++;
90
+ }
91
+ }
92
+ }
93
+ catch (fileError) {
94
+ errors.push(`${filePath}: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
95
+ skipped++;
96
+ }
97
+ }
98
+ }
99
+ catch (dirError) {
100
+ errors.push(`Failed to access path: ${dirError instanceof Error ? dirError.message : String(dirError)}`);
101
+ }
102
+ return { entries, skipped, errors };
103
+ }
104
+ // ============================================================================
105
+ // Helper Functions
106
+ // ============================================================================
107
+ /**
108
+ * Convert filename to readable title.
109
+ * activeContext -> Active Context
110
+ * system_patterns -> System Patterns
111
+ */
112
+ function formatFilename(filename) {
113
+ return filename
114
+ // Handle camelCase
115
+ .replace(/([a-z])([A-Z])/g, '$1 $2')
116
+ // Handle snake_case
117
+ .replace(/_/g, ' ')
118
+ // Handle kebab-case
119
+ .replace(/-/g, ' ')
120
+ // Capitalize first letter of each word
121
+ .replace(/\b\w/g, c => c.toUpperCase());
122
+ }
123
+ /**
124
+ * Infer context from Cursor Memory Bank filename patterns.
125
+ */
126
+ function inferContext(filename) {
127
+ const lower = filename.toLowerCase();
128
+ if (lower.includes('context'))
129
+ return 'context';
130
+ if (lower.includes('progress'))
131
+ return 'progress';
132
+ if (lower.includes('brief') || lower.includes('overview'))
133
+ return 'project';
134
+ if (lower.includes('pattern'))
135
+ return 'patterns';
136
+ if (lower.includes('decision'))
137
+ return 'decisions';
138
+ if (lower.includes('stack') || lower.includes('tech'))
139
+ return 'technology';
140
+ return 'cursor-memory';
141
+ }
142
+ /**
143
+ * Extract a ParsedEntry from a JSON object.
144
+ */
145
+ function extractFromJSON(obj, filePath) {
146
+ // Try various common keys for title
147
+ const title = obj.title ||
148
+ obj.name ||
149
+ obj.key ||
150
+ path.basename(filePath, '.json');
151
+ // Try various common keys for content
152
+ const content = obj.content ||
153
+ obj.description ||
154
+ obj.text ||
155
+ obj.value ||
156
+ obj.memory;
157
+ if (!content) {
158
+ return null;
159
+ }
160
+ return {
161
+ title,
162
+ content,
163
+ context: obj.context || obj.category || obj.type,
164
+ intent: obj.intent,
165
+ source: 'cursor',
166
+ originalPath: filePath,
167
+ };
168
+ }
@@ -0,0 +1,6 @@
1
+ export type { ParsedEntry, ParseResult } from './types.js';
2
+ export { parseJSONL } from './jsonl.js';
3
+ export { parseMarkdown } from './markdown.js';
4
+ export { parseCursorMemory } from './cursor.js';
5
+ export { parseJSON } from './json.js';
6
+ export { parseSQLite } from './sqlite.js';
@@ -0,0 +1,5 @@
1
+ export { parseJSONL } from './jsonl.js';
2
+ export { parseMarkdown } from './markdown.js';
3
+ export { parseCursorMemory } from './cursor.js';
4
+ export { parseJSON } from './json.js';
5
+ export { parseSQLite } from './sqlite.js';
@@ -0,0 +1,11 @@
1
+ import type { ParseResult } from './types.js';
2
+ /**
3
+ * Parse a JSON file containing an array of memory entries.
4
+ *
5
+ * Supports various common structures:
6
+ * - Array of objects with title/content
7
+ * - Array of objects with name/description
8
+ * - Object with entries array
9
+ * - Object with memories array
10
+ */
11
+ export declare function parseJSON(filePath: string): Promise<ParseResult>;
@@ -0,0 +1,95 @@
1
+ import * as fs from 'fs/promises';
2
+ /**
3
+ * Parse a JSON file containing an array of memory entries.
4
+ *
5
+ * Supports various common structures:
6
+ * - Array of objects with title/content
7
+ * - Array of objects with name/description
8
+ * - Object with entries array
9
+ * - Object with memories array
10
+ */
11
+ export async function parseJSON(filePath) {
12
+ const entries = [];
13
+ const errors = [];
14
+ let skipped = 0;
15
+ try {
16
+ const content = await fs.readFile(filePath, 'utf-8');
17
+ const data = JSON.parse(content);
18
+ // Handle different JSON structures
19
+ let items = [];
20
+ if (Array.isArray(data)) {
21
+ // Direct array of entries
22
+ items = data;
23
+ }
24
+ else if (typeof data === 'object' && data !== null) {
25
+ // Object with entries/memories/items array
26
+ if (Array.isArray(data.entries)) {
27
+ items = data.entries;
28
+ }
29
+ else if (Array.isArray(data.memories)) {
30
+ items = data.memories;
31
+ }
32
+ else if (Array.isArray(data.items)) {
33
+ items = data.items;
34
+ }
35
+ else if (Array.isArray(data.data)) {
36
+ items = data.data;
37
+ }
38
+ else {
39
+ // Single object - treat as one entry
40
+ items = [data];
41
+ }
42
+ }
43
+ for (let i = 0; i < items.length; i++) {
44
+ const item = items[i];
45
+ if (typeof item !== 'object' || item === null) {
46
+ skipped++;
47
+ continue;
48
+ }
49
+ // Extract title
50
+ const title = item.title || item.name || item.key || `Entry ${i + 1}`;
51
+ // Extract content
52
+ let entryContent = item.content ||
53
+ item.text ||
54
+ item.description ||
55
+ item.value ||
56
+ item.memory ||
57
+ item.observation;
58
+ // Handle observations array
59
+ if (!entryContent && item.observations && Array.isArray(item.observations)) {
60
+ entryContent = item.observations.join('\n\n');
61
+ }
62
+ if (!entryContent) {
63
+ skipped++;
64
+ continue;
65
+ }
66
+ // Extract context
67
+ let context;
68
+ if (item.context) {
69
+ context = item.context;
70
+ }
71
+ else if (item.category) {
72
+ context = item.category;
73
+ }
74
+ else if (item.type) {
75
+ context = item.type;
76
+ }
77
+ else if (item.tags && Array.isArray(item.tags)) {
78
+ context = item.tags.join(', ');
79
+ }
80
+ entries.push({
81
+ title: String(title),
82
+ content: String(entryContent),
83
+ context,
84
+ intent: item.intent ? String(item.intent) : undefined,
85
+ reasoning: item.reasoning ? String(item.reasoning) : undefined,
86
+ example: item.example ? String(item.example) : undefined,
87
+ source: 'json',
88
+ });
89
+ }
90
+ }
91
+ catch (error) {
92
+ errors.push(`Failed to parse JSON: ${error instanceof Error ? error.message : String(error)}`);
93
+ }
94
+ return { entries, skipped, errors };
95
+ }
@@ -0,0 +1,9 @@
1
+ import type { ParseResult } from './types.js';
2
+ /**
3
+ * Parse a JSONL file (Anthropic MCP Memory / mcp-knowledge-graph format).
4
+ *
5
+ * Input format:
6
+ * {"type":"entity","name":"Stripe Webhooks","entityType":"concept","observations":["Need idempotency checks"]}
7
+ * {"type":"relation","from":"Stripe Webhooks","to":"Payment Processing","relationType":"part_of"}
8
+ */
9
+ export declare function parseJSONL(filePath: string): Promise<ParseResult>;
@@ -0,0 +1,53 @@
1
+ import * as fs from 'fs/promises';
2
+ /**
3
+ * Parse a JSONL file (Anthropic MCP Memory / mcp-knowledge-graph format).
4
+ *
5
+ * Input format:
6
+ * {"type":"entity","name":"Stripe Webhooks","entityType":"concept","observations":["Need idempotency checks"]}
7
+ * {"type":"relation","from":"Stripe Webhooks","to":"Payment Processing","relationType":"part_of"}
8
+ */
9
+ export async function parseJSONL(filePath) {
10
+ const entries = [];
11
+ const errors = [];
12
+ let skipped = 0;
13
+ try {
14
+ const content = await fs.readFile(filePath, 'utf-8');
15
+ const lines = content.trim().split('\n').filter(line => line.trim());
16
+ for (let i = 0; i < lines.length; i++) {
17
+ const line = lines[i];
18
+ try {
19
+ const item = JSON.parse(line);
20
+ // Skip safety markers and relations
21
+ if (item.type === '_aim' || item.type === 'relation') {
22
+ skipped++;
23
+ continue;
24
+ }
25
+ // Only process entities
26
+ if (item.type === 'entity' && item.name) {
27
+ // Skip if no observations (empty content)
28
+ if (!item.observations || item.observations.length === 0) {
29
+ skipped++;
30
+ continue;
31
+ }
32
+ entries.push({
33
+ title: item.name,
34
+ content: item.observations.join('\n\n'),
35
+ context: item.entityType || undefined,
36
+ source: 'jsonl',
37
+ });
38
+ }
39
+ else {
40
+ skipped++;
41
+ }
42
+ }
43
+ catch (parseError) {
44
+ errors.push(`Line ${i + 1}: Invalid JSON - ${parseError instanceof Error ? parseError.message : String(parseError)}`);
45
+ skipped++;
46
+ }
47
+ }
48
+ }
49
+ catch (fileError) {
50
+ errors.push(`Failed to read file: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
51
+ }
52
+ return { entries, skipped, errors };
53
+ }
@@ -0,0 +1,15 @@
1
+ import type { ParseResult } from './types.js';
2
+ /**
3
+ * Parse a folder of markdown files (Basic Memory MCP / Obsidian / any .md).
4
+ *
5
+ * Input format:
6
+ * ---
7
+ * title: API Rate Limits
8
+ * tags: [api, performance]
9
+ * ---
10
+ *
11
+ * # API Rate Limits
12
+ *
13
+ * Always implement exponential backoff...
14
+ */
15
+ export declare function parseMarkdown(dirPath: string): Promise<ParseResult>;
@@ -0,0 +1,77 @@
1
+ import * as fs from 'fs/promises';
2
+ import * as path from 'path';
3
+ import { glob } from 'glob';
4
+ import matter from 'gray-matter';
5
+ /**
6
+ * Parse a folder of markdown files (Basic Memory MCP / Obsidian / any .md).
7
+ *
8
+ * Input format:
9
+ * ---
10
+ * title: API Rate Limits
11
+ * tags: [api, performance]
12
+ * ---
13
+ *
14
+ * # API Rate Limits
15
+ *
16
+ * Always implement exponential backoff...
17
+ */
18
+ export async function parseMarkdown(dirPath) {
19
+ const entries = [];
20
+ const errors = [];
21
+ let skipped = 0;
22
+ try {
23
+ // Handle both single file and directory
24
+ const stats = await fs.stat(dirPath);
25
+ const files = stats.isDirectory()
26
+ ? await glob(path.join(dirPath, '**/*.md'), { nodir: true })
27
+ : [dirPath];
28
+ for (const filePath of files) {
29
+ try {
30
+ const content = await fs.readFile(filePath, 'utf-8');
31
+ const { data, content: body } = matter(content);
32
+ const frontmatter = data;
33
+ // Skip empty files
34
+ const trimmedBody = body.trim();
35
+ if (!trimmedBody) {
36
+ skipped++;
37
+ continue;
38
+ }
39
+ // Extract title from frontmatter, H1, or filename
40
+ let title = frontmatter.title;
41
+ if (!title) {
42
+ const headingMatch = trimmedBody.match(/^#\s+(.+)$/m);
43
+ if (headingMatch) {
44
+ title = headingMatch[1].trim();
45
+ }
46
+ else {
47
+ title = path.basename(filePath, '.md').replace(/-/g, ' ');
48
+ }
49
+ }
50
+ // Extract context from tags or context field
51
+ let context;
52
+ if (frontmatter.context) {
53
+ context = frontmatter.context;
54
+ }
55
+ else if (frontmatter.tags && Array.isArray(frontmatter.tags)) {
56
+ context = frontmatter.tags.join(', ');
57
+ }
58
+ entries.push({
59
+ title,
60
+ content: trimmedBody,
61
+ context,
62
+ intent: frontmatter.intent,
63
+ source: 'markdown',
64
+ originalPath: filePath,
65
+ });
66
+ }
67
+ catch (fileError) {
68
+ errors.push(`${filePath}: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
69
+ skipped++;
70
+ }
71
+ }
72
+ }
73
+ catch (dirError) {
74
+ errors.push(`Failed to access path: ${dirError instanceof Error ? dirError.message : String(dirError)}`);
75
+ }
76
+ return { entries, skipped, errors };
77
+ }
@@ -0,0 +1,8 @@
1
+ import type { ParseResult } from './types.js';
2
+ /**
3
+ * Parse a SQLite database file for memory entries.
4
+ *
5
+ * Auto-detects table and column names from common patterns.
6
+ * Supports mcp-memory-service, SQLite-vec, and custom schemas.
7
+ */
8
+ export declare function parseSQLite(filePath: string): Promise<ParseResult>;
@@ -0,0 +1,123 @@
1
+ import Database from 'better-sqlite3';
2
+ // ============================================================================
3
+ // SQLite Parser - mcp-memory-service, SQLite-vec, custom databases
4
+ // ============================================================================
5
+ // Common table names for memory storage
6
+ const TABLE_CANDIDATES = ['memories', 'entries', 'knowledge', 'notes', 'items', 'documents'];
7
+ // Common column names for different fields
8
+ const TITLE_CANDIDATES = ['title', 'name', 'key', 'id', 'subject', 'heading'];
9
+ const CONTENT_CANDIDATES = ['content', 'text', 'value', 'memory', 'observation', 'body', 'description', 'note'];
10
+ const CONTEXT_CANDIDATES = ['context', 'category', 'type', 'tags', 'topic', 'area'];
11
+ /**
12
+ * Parse a SQLite database file for memory entries.
13
+ *
14
+ * Auto-detects table and column names from common patterns.
15
+ * Supports mcp-memory-service, SQLite-vec, and custom schemas.
16
+ */
17
+ export async function parseSQLite(filePath) {
18
+ const entries = [];
19
+ const errors = [];
20
+ let skipped = 0;
21
+ let db = null;
22
+ try {
23
+ db = new Database(filePath, { readonly: true });
24
+ // Find a suitable table
25
+ const tables = db
26
+ .prepare("SELECT name FROM sqlite_master WHERE type='table'")
27
+ .all();
28
+ const tableNames = tables.map((t) => t.name.toLowerCase());
29
+ const targetTable = TABLE_CANDIDATES.find((candidate) => tableNames.includes(candidate));
30
+ if (!targetTable) {
31
+ errors.push(`No memory table found. Available tables: ${tableNames.join(', ')}. ` +
32
+ `Expected one of: ${TABLE_CANDIDATES.join(', ')}`);
33
+ return { entries, skipped, errors };
34
+ }
35
+ // Get column info for the target table
36
+ const columns = db.prepare(`PRAGMA table_info(${targetTable})`).all();
37
+ const columnNames = columns.map((c) => c.name.toLowerCase());
38
+ // Find content column (required)
39
+ const contentColumn = CONTENT_CANDIDATES.find((candidate) => columnNames.includes(candidate));
40
+ if (!contentColumn) {
41
+ errors.push(`No content column found in table "${targetTable}". ` +
42
+ `Available columns: ${columnNames.join(', ')}. ` +
43
+ `Expected one of: ${CONTENT_CANDIDATES.join(', ')}`);
44
+ return { entries, skipped, errors };
45
+ }
46
+ // Find optional columns
47
+ const titleColumn = TITLE_CANDIDATES.find((candidate) => columnNames.includes(candidate));
48
+ const contextColumn = CONTEXT_CANDIDATES.find((candidate) => columnNames.includes(candidate));
49
+ // Build and execute query
50
+ const selectColumns = [
51
+ contentColumn,
52
+ titleColumn,
53
+ contextColumn,
54
+ ].filter(Boolean);
55
+ const query = `SELECT ${selectColumns.join(', ')} FROM ${targetTable}`;
56
+ const rows = db.prepare(query).all();
57
+ for (let i = 0; i < rows.length; i++) {
58
+ const row = rows[i];
59
+ // Extract content
60
+ const content = row[contentColumn];
61
+ if (!content || typeof content !== 'string' || !content.trim()) {
62
+ skipped++;
63
+ continue;
64
+ }
65
+ // Extract title
66
+ let title;
67
+ if (titleColumn && row[titleColumn]) {
68
+ title = String(row[titleColumn]);
69
+ }
70
+ else {
71
+ // Generate title from content
72
+ title = content.slice(0, 60).replace(/\s+/g, ' ').trim();
73
+ if (content.length > 60)
74
+ title += '...';
75
+ }
76
+ // Extract context
77
+ let context;
78
+ if (contextColumn && row[contextColumn]) {
79
+ const contextValue = row[contextColumn];
80
+ if (typeof contextValue === 'string') {
81
+ context = contextValue;
82
+ }
83
+ else if (Array.isArray(contextValue)) {
84
+ context = contextValue.join(', ');
85
+ }
86
+ }
87
+ entries.push({
88
+ title,
89
+ content: content.trim(),
90
+ context,
91
+ source: 'sqlite',
92
+ originalPath: `${filePath}:${targetTable}`,
93
+ });
94
+ }
95
+ }
96
+ catch (error) {
97
+ if (error instanceof Error) {
98
+ if (error.message.includes('SQLITE_CANTOPEN')) {
99
+ errors.push(`Cannot open database file: ${filePath}`);
100
+ }
101
+ else if (error.message.includes('file is not a database')) {
102
+ errors.push(`File is not a valid SQLite database: ${filePath}`);
103
+ }
104
+ else {
105
+ errors.push(`SQLite error: ${error.message}`);
106
+ }
107
+ }
108
+ else {
109
+ errors.push(`Unknown error: ${String(error)}`);
110
+ }
111
+ }
112
+ finally {
113
+ if (db) {
114
+ try {
115
+ db.close();
116
+ }
117
+ catch {
118
+ // Ignore close errors
119
+ }
120
+ }
121
+ }
122
+ return { entries, skipped, errors };
123
+ }
@@ -0,0 +1,21 @@
1
+ /**
2
+ * A parsed memory entry ready to be converted to Librarian format.
3
+ */
4
+ export interface ParsedEntry {
5
+ title: string;
6
+ content: string;
7
+ context?: string;
8
+ intent?: string;
9
+ reasoning?: string;
10
+ example?: string;
11
+ source: 'jsonl' | 'markdown' | 'cursor' | 'json' | 'sqlite';
12
+ originalPath?: string;
13
+ }
14
+ /**
15
+ * Result of a parse operation.
16
+ */
17
+ export interface ParseResult {
18
+ entries: ParsedEntry[];
19
+ skipped: number;
20
+ errors: string[];
21
+ }
@@ -0,0 +1,4 @@
1
+ // ============================================================================
2
+ // Shared Types for Memory Parsers
3
+ // ============================================================================
4
+ export {};
@@ -9,8 +9,13 @@ export declare function getLibraryPath(): string;
9
9
  export declare function getLocalPath(libraryPath: string): string;
10
10
  /**
11
11
  * Get the imported entries path.
12
+ * @deprecated Use getPackagesPath for marketplace content
12
13
  */
13
14
  export declare function getImportedPath(libraryPath: string): string;
15
+ /**
16
+ * Get the packages path (marketplace content from others).
17
+ */
18
+ export declare function getPackagesPath(libraryPath: string): string;
14
19
  /**
15
20
  * Get the archived entries path.
16
21
  */
@@ -17,10 +17,17 @@ export function getLocalPath(libraryPath) {
17
17
  }
18
18
  /**
19
19
  * Get the imported entries path.
20
+ * @deprecated Use getPackagesPath for marketplace content
20
21
  */
21
22
  export function getImportedPath(libraryPath) {
22
23
  return path.join(libraryPath, 'imported');
23
24
  }
25
+ /**
26
+ * Get the packages path (marketplace content from others).
27
+ */
28
+ export function getPackagesPath(libraryPath) {
29
+ return path.join(libraryPath, 'packages');
30
+ }
24
31
  /**
25
32
  * Get the archived entries path.
26
33
  */
package/dist/server.js CHANGED
@@ -6,6 +6,8 @@ import { briefTool } from './tools/brief.js';
6
6
  import { recordTool } from './tools/record.js';
7
7
  import { adoptTool } from './tools/adopt.js';
8
8
  import { markHitTool } from './tools/mark-hit.js';
9
+ import { importMemoriesTool } from './tools/import-memories.js';
10
+ import { rebuildIndexTool } from './tools/rebuild-index.js';
9
11
  const server = new Server({
10
12
  name: 'librarian',
11
13
  version: '1.0.0',
@@ -38,6 +40,16 @@ server.setRequestHandler(ListToolsRequestSchema, async () => {
38
40
  description: markHitTool.description,
39
41
  inputSchema: markHitTool.inputSchema,
40
42
  },
43
+ {
44
+ name: importMemoriesTool.name,
45
+ description: importMemoriesTool.description,
46
+ inputSchema: importMemoriesTool.inputSchema,
47
+ },
48
+ {
49
+ name: rebuildIndexTool.name,
50
+ description: rebuildIndexTool.description,
51
+ inputSchema: rebuildIndexTool.inputSchema,
52
+ },
41
53
  ],
42
54
  };
43
55
  });
@@ -59,6 +71,12 @@ server.setRequestHandler(CallToolRequestSchema, async (request) => {
59
71
  case 'mark_hit':
60
72
  result = await markHitTool.handler(args);
61
73
  break;
74
+ case 'import_memories':
75
+ result = await importMemoriesTool.handler(args);
76
+ break;
77
+ case 'rebuild_index':
78
+ result = await rebuildIndexTool.handler(args);
79
+ break;
62
80
  default:
63
81
  throw new Error(`Unknown tool: ${name}`);
64
82
  }
@@ -2,7 +2,7 @@ import * as fs from 'fs/promises';
2
2
  import * as path from 'path';
3
3
  import matter from 'gray-matter';
4
4
  import { glob } from 'glob';
5
- import { getLibraryPath, getLocalPath, getImportedPath } from '../library/storage.js';
5
+ import { getLibraryPath, getLocalPath, getImportedPath, getPackagesPath } from '../library/storage.js';
6
6
  import { loadIndex, semanticSearch, isIndexStale } from '../library/vector-index.js';
7
7
  // ============================================================================
8
8
  // Tool Definition
@@ -39,6 +39,7 @@ Examples:
39
39
  const libraryPath = getLibraryPath();
40
40
  const localPath = getLocalPath(libraryPath);
41
41
  const importedPath = getImportedPath(libraryPath);
42
+ const packagesPath = getPackagesPath(libraryPath);
42
43
  let allEntries = [];
43
44
  let useSemanticSearch = false;
44
45
  let semanticMatches = [];
@@ -96,7 +97,7 @@ Examples:
96
97
  catch {
97
98
  // No local files yet
98
99
  }
99
- // Read imported entries
100
+ // Read imported entries (legacy - deprecated)
100
101
  try {
101
102
  const importedFiles = await glob(path.join(importedPath, '**/*.md'), { nodir: true });
102
103
  for (const filePath of importedFiles) {
@@ -109,6 +110,19 @@ Examples:
109
110
  catch {
110
111
  // No imported files
111
112
  }
113
+ // Read packages entries (marketplace content)
114
+ try {
115
+ const packagesFiles = await glob(path.join(packagesPath, '**/*.md'), { nodir: true });
116
+ for (const filePath of packagesFiles) {
117
+ const entry = await readEntry(filePath, libraryPath);
118
+ if (entry) {
119
+ allEntries.push(entry);
120
+ }
121
+ }
122
+ }
123
+ catch {
124
+ // No packages files
125
+ }
112
126
  // If no entries at all
113
127
  if (allEntries.length === 0) {
114
128
  return {
@@ -0,0 +1,32 @@
1
+ export interface ImportResult {
2
+ success: boolean;
3
+ imported: number;
4
+ skipped: number;
5
+ errors: string[];
6
+ outputPath: string;
7
+ message: string;
8
+ }
9
+ export declare const importMemoriesTool: {
10
+ name: string;
11
+ description: string;
12
+ inputSchema: {
13
+ type: "object";
14
+ properties: {
15
+ format: {
16
+ type: string;
17
+ enum: string[];
18
+ description: string;
19
+ };
20
+ path: {
21
+ type: string;
22
+ description: string;
23
+ };
24
+ source_name: {
25
+ type: string;
26
+ description: string;
27
+ };
28
+ };
29
+ required: string[];
30
+ };
31
+ handler(args: unknown): Promise<ImportResult>;
32
+ };
@@ -0,0 +1,229 @@
1
+ import * as fs from 'fs/promises';
2
+ import * as path from 'path';
3
+ import { getLibraryPath, getLocalPath } from '../library/storage.js';
4
+ import { loadIndex, saveIndex, addToIndex } from '../library/vector-index.js';
5
+ import { parseJSONL, parseMarkdown, parseCursorMemory, parseJSON, parseSQLite } from '../library/parsers/index.js';
6
+ // ============================================================================
7
+ // Tool Definition
8
+ // ============================================================================
9
+ export const importMemoriesTool = {
10
+ name: 'import_memories',
11
+ description: `Import memories from other AI tools into Librarian.
12
+
13
+ Supported formats:
14
+ - jsonl: Anthropic MCP Memory, mcp-knowledge-graph (.jsonl files)
15
+ - markdown: Basic Memory, Obsidian, any .md files
16
+ - cursor: Cursor Memory Bank (.cursor-memory/)
17
+ - json: Simple memory servers, knowledge stores (.json files)
18
+ - sqlite: mcp-memory-service, SQLite-vec (.db, .sqlite files)
19
+
20
+ Imports go to .librarian/local/[source-name]/ and are automatically indexed for semantic search.
21
+
22
+ Examples:
23
+ - import_memories({ format: "jsonl", path: "~/.aim/memory.jsonl", source_name: "anthropic-memory" })
24
+ - import_memories({ format: "markdown", path: "~/basic-memory/", source_name: "basic-memory" })
25
+ - import_memories({ format: "cursor", path: ".cursor-memory/", source_name: "cursor-memory" })
26
+ - import_memories({ format: "json", path: "~/memories.json", source_name: "json-memory" })
27
+ - import_memories({ format: "sqlite", path: "~/memory.db", source_name: "sqlite-memory" })`,
28
+ inputSchema: {
29
+ type: 'object',
30
+ properties: {
31
+ format: {
32
+ type: 'string',
33
+ enum: ['jsonl', 'markdown', 'cursor', 'json', 'sqlite'],
34
+ description: 'Format of the source memories',
35
+ },
36
+ path: {
37
+ type: 'string',
38
+ description: 'Path to memory file or folder',
39
+ },
40
+ source_name: {
41
+ type: 'string',
42
+ description: 'Name for the import folder (e.g., "anthropic-memory"). Auto-generated if not provided.',
43
+ },
44
+ },
45
+ required: ['format', 'path'],
46
+ },
47
+ async handler(args) {
48
+ const { format, path: inputPath, source_name } = args;
49
+ if (!format || !inputPath) {
50
+ throw new Error('format and path are required');
51
+ }
52
+ // Expand ~ to home directory
53
+ const expandedPath = inputPath.replace(/^~/, process.env.HOME || '');
54
+ // Generate source name if not provided
55
+ const sourceName = source_name || generateSourceName(format, expandedPath);
56
+ // Setup output directory
57
+ const libraryPath = getLibraryPath();
58
+ const localPath = getLocalPath(libraryPath);
59
+ const outputPath = path.join(localPath, sourceName);
60
+ // Check if output directory already exists
61
+ try {
62
+ await fs.access(outputPath);
63
+ // Directory exists - we'll add to it but warn about potential duplicates
64
+ }
65
+ catch {
66
+ // Directory doesn't exist - create it
67
+ await fs.mkdir(outputPath, { recursive: true });
68
+ }
69
+ // Parse the source based on format
70
+ let parseResult;
71
+ switch (format) {
72
+ case 'jsonl':
73
+ parseResult = await parseJSONL(expandedPath);
74
+ break;
75
+ case 'markdown':
76
+ parseResult = await parseMarkdown(expandedPath);
77
+ break;
78
+ case 'cursor':
79
+ parseResult = await parseCursorMemory(expandedPath);
80
+ break;
81
+ case 'json':
82
+ parseResult = await parseJSON(expandedPath);
83
+ break;
84
+ case 'sqlite':
85
+ parseResult = await parseSQLite(expandedPath);
86
+ break;
87
+ default:
88
+ throw new Error(`Unknown format: ${format}`);
89
+ }
90
+ // Convert and save entries
91
+ const index = await loadIndex();
92
+ let imported = 0;
93
+ const errors = [...parseResult.errors];
94
+ for (const entry of parseResult.entries) {
95
+ try {
96
+ const relativePath = await saveEntry(entry, outputPath, libraryPath);
97
+ // Add to vector index
98
+ const fullContent = [
99
+ entry.title,
100
+ entry.intent || '',
101
+ entry.content,
102
+ entry.reasoning || '',
103
+ entry.example || '',
104
+ entry.context || '',
105
+ ].filter(Boolean).join('\n\n');
106
+ await addToIndex(index, relativePath, entry.title, fullContent);
107
+ imported++;
108
+ }
109
+ catch (saveError) {
110
+ errors.push(`Failed to save "${entry.title}": ${saveError instanceof Error ? saveError.message : String(saveError)}`);
111
+ }
112
+ }
113
+ // Save the updated index
114
+ await saveIndex(index);
115
+ const message = imported > 0
116
+ ? `Imported ${imported} entries from ${format} format into ${sourceName}/`
117
+ : `No entries imported. ${parseResult.skipped} skipped, ${errors.length} errors.`;
118
+ return {
119
+ success: imported > 0,
120
+ imported,
121
+ skipped: parseResult.skipped,
122
+ errors,
123
+ outputPath: path.relative(libraryPath, outputPath),
124
+ message,
125
+ };
126
+ },
127
+ };
128
+ // ============================================================================
129
+ // Helper Functions
130
+ // ============================================================================
131
+ /**
132
+ * Generate a source name from the format and path.
133
+ */
134
+ function generateSourceName(format, inputPath) {
135
+ const basename = path.basename(inputPath, path.extname(inputPath));
136
+ switch (format) {
137
+ case 'jsonl':
138
+ if (basename.includes('memory')) {
139
+ return 'imported-memory';
140
+ }
141
+ return `imported-${basename}`;
142
+ case 'cursor':
143
+ return 'cursor-memory';
144
+ case 'markdown':
145
+ return basename.replace(/[^a-z0-9-]/gi, '-').toLowerCase() || 'imported-markdown';
146
+ case 'json':
147
+ return basename.replace(/[^a-z0-9-]/gi, '-').toLowerCase() || 'imported-json';
148
+ case 'sqlite':
149
+ return basename.replace(/[^a-z0-9-]/gi, '-').toLowerCase() || 'imported-sqlite';
150
+ default:
151
+ return `imported-${format}`;
152
+ }
153
+ }
154
+ /**
155
+ * Save a parsed entry to the output directory.
156
+ * Returns the relative path from library root.
157
+ */
158
+ async function saveEntry(entry, outputPath, libraryPath) {
159
+ const slug = slugify(entry.title);
160
+ const created = new Date().toISOString();
161
+ // Handle filename collisions
162
+ let filename = `${slug}.md`;
163
+ let filePath = path.join(outputPath, filename);
164
+ let counter = 1;
165
+ while (await fileExists(filePath)) {
166
+ filename = `${slug}-${counter}.md`;
167
+ filePath = path.join(outputPath, filename);
168
+ counter++;
169
+ }
170
+ // Build frontmatter
171
+ const frontmatterLines = ['---'];
172
+ if (entry.intent) {
173
+ frontmatterLines.push(`intent: "${escapeYaml(entry.intent)}"`);
174
+ }
175
+ if (entry.context) {
176
+ frontmatterLines.push(`context: "${escapeYaml(entry.context)}"`);
177
+ }
178
+ frontmatterLines.push(`created: "${created}"`);
179
+ frontmatterLines.push(`updated: "${created}"`);
180
+ frontmatterLines.push(`source: "${entry.source}"`);
181
+ if (entry.originalPath) {
182
+ frontmatterLines.push(`original_path: "${escapeYaml(entry.originalPath)}"`);
183
+ }
184
+ frontmatterLines.push('hits: 0');
185
+ frontmatterLines.push('last_hit: null');
186
+ frontmatterLines.push('---');
187
+ // Build body
188
+ const bodyLines = [];
189
+ bodyLines.push(`# ${entry.title}`);
190
+ bodyLines.push('');
191
+ bodyLines.push(entry.content);
192
+ if (entry.reasoning) {
193
+ bodyLines.push('');
194
+ bodyLines.push('## Reasoning');
195
+ bodyLines.push('');
196
+ bodyLines.push(entry.reasoning);
197
+ }
198
+ if (entry.example) {
199
+ bodyLines.push('');
200
+ bodyLines.push('## Example');
201
+ bodyLines.push('');
202
+ bodyLines.push('```');
203
+ bodyLines.push(entry.example);
204
+ bodyLines.push('```');
205
+ }
206
+ // Combine and write
207
+ const fileContent = frontmatterLines.join('\n') + '\n\n' + bodyLines.join('\n') + '\n';
208
+ await fs.writeFile(filePath, fileContent, 'utf-8');
209
+ return path.relative(libraryPath, filePath);
210
+ }
211
+ function slugify(text) {
212
+ return text
213
+ .toLowerCase()
214
+ .replace(/[^a-z0-9]+/g, '-')
215
+ .replace(/^-+|-+$/g, '')
216
+ .slice(0, 50);
217
+ }
218
+ function escapeYaml(text) {
219
+ return text.replace(/"/g, '\\"').replace(/\n/g, ' ');
220
+ }
221
+ async function fileExists(filePath) {
222
+ try {
223
+ await fs.access(filePath);
224
+ return true;
225
+ }
226
+ catch {
227
+ return false;
228
+ }
229
+ }
@@ -2,3 +2,5 @@ export { briefTool } from './brief.js';
2
2
  export { recordTool } from './record.js';
3
3
  export { adoptTool } from './adopt.js';
4
4
  export { markHitTool } from './mark-hit.js';
5
+ export { importMemoriesTool } from './import-memories.js';
6
+ export { rebuildIndexTool } from './rebuild-index.js';
@@ -2,3 +2,5 @@ export { briefTool } from './brief.js';
2
2
  export { recordTool } from './record.js';
3
3
  export { adoptTool } from './adopt.js';
4
4
  export { markHitTool } from './mark-hit.js';
5
+ export { importMemoriesTool } from './import-memories.js';
6
+ export { rebuildIndexTool } from './rebuild-index.js';
@@ -0,0 +1,23 @@
1
+ export interface RebuildResult {
2
+ success: boolean;
3
+ indexed: number;
4
+ skipped: number;
5
+ errors: string[];
6
+ stats: {
7
+ entryCount: number;
8
+ chunkCount: number;
9
+ modelId: string;
10
+ rebuilt: string;
11
+ };
12
+ message: string;
13
+ }
14
+ export declare const rebuildIndexTool: {
15
+ name: string;
16
+ description: string;
17
+ inputSchema: {
18
+ type: "object";
19
+ properties: {};
20
+ required: never[];
21
+ };
22
+ handler(_args: unknown): Promise<RebuildResult>;
23
+ };
@@ -0,0 +1,105 @@
1
+ import * as fs from 'fs/promises';
2
+ import * as path from 'path';
3
+ import { glob } from 'glob';
4
+ import matter from 'gray-matter';
5
+ import { getLibraryPath, getLocalPath, getImportedPath, getPackagesPath } from '../library/storage.js';
6
+ import { saveIndex, addToIndex, getIndexStats } from '../library/vector-index.js';
7
+ // ============================================================================
8
+ // Tool Definition
9
+ // ============================================================================
10
+ export const rebuildIndexTool = {
11
+ name: 'rebuild_index',
12
+ description: `Rebuild the semantic search index for all library entries.
13
+
14
+ Use this after:
15
+ - Upgrading to v1.2.0 (existing entries need embeddings)
16
+ - Importing memories from other tools
17
+ - If semantic search seems broken
18
+
19
+ Reads all .md entries and generates embeddings. May take a minute on first run.`,
20
+ inputSchema: {
21
+ type: 'object',
22
+ properties: {},
23
+ required: [],
24
+ },
25
+ async handler(_args) {
26
+ const libraryPath = getLibraryPath();
27
+ const localPath = getLocalPath(libraryPath);
28
+ const importedPath = getImportedPath(libraryPath);
29
+ const packagesPath = getPackagesPath(libraryPath);
30
+ // Create a fresh index
31
+ const index = {
32
+ version: 1,
33
+ rebuilt: '',
34
+ modelId: '',
35
+ entries: [],
36
+ };
37
+ let indexed = 0;
38
+ let skipped = 0;
39
+ const errors = [];
40
+ // Collect all .md files from all directories
41
+ const allDirs = [localPath, importedPath, packagesPath];
42
+ const allFiles = [];
43
+ for (const dir of allDirs) {
44
+ try {
45
+ const files = await glob(path.join(dir, '**/*.md'), { nodir: true });
46
+ allFiles.push(...files);
47
+ }
48
+ catch {
49
+ // Directory doesn't exist, skip
50
+ }
51
+ }
52
+ // Process each file
53
+ for (const filePath of allFiles) {
54
+ try {
55
+ const content = await fs.readFile(filePath, 'utf-8');
56
+ const { data, content: body } = matter(content);
57
+ // Skip empty files
58
+ if (!body.trim()) {
59
+ skipped++;
60
+ continue;
61
+ }
62
+ // Extract title
63
+ let title = data.title;
64
+ if (!title) {
65
+ const headingMatch = body.match(/^#\s+(.+)$/m);
66
+ if (headingMatch) {
67
+ title = headingMatch[1].trim();
68
+ }
69
+ else {
70
+ title = path.basename(filePath, '.md').replace(/-/g, ' ');
71
+ }
72
+ }
73
+ // Build full content for embedding
74
+ const fullContent = [
75
+ title,
76
+ data.intent || '',
77
+ body.trim(),
78
+ data.context || '',
79
+ ].filter(Boolean).join('\n\n');
80
+ const relativePath = path.relative(libraryPath, filePath);
81
+ await addToIndex(index, relativePath, title, fullContent);
82
+ indexed++;
83
+ }
84
+ catch (error) {
85
+ const relativePath = path.relative(libraryPath, filePath);
86
+ errors.push(`${relativePath}: ${error instanceof Error ? error.message : String(error)}`);
87
+ skipped++;
88
+ }
89
+ }
90
+ // Save the index
91
+ await saveIndex(index);
92
+ const stats = getIndexStats(index);
93
+ const message = indexed > 0
94
+ ? `Rebuilt index with ${indexed} entries (${stats.chunkCount} chunks). ${skipped} skipped.`
95
+ : 'No entries found to index.';
96
+ return {
97
+ success: indexed > 0 || allFiles.length === 0,
98
+ indexed,
99
+ skipped,
100
+ errors,
101
+ stats,
102
+ message,
103
+ };
104
+ },
105
+ };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@telvok/librarian-mcp",
3
- "version": "1.2.0",
3
+ "version": "1.4.0",
4
4
  "description": "Knowledge capture MCP server - remember what you learn with AI",
5
5
  "type": "module",
6
6
  "main": "dist/server.js",
@@ -33,12 +33,14 @@
33
33
  "dependencies": {
34
34
  "@huggingface/transformers": "^3.0.0",
35
35
  "@modelcontextprotocol/sdk": "^1.0.0",
36
+ "better-sqlite3": "^11.0.0",
36
37
  "glob": "^11.0.0",
37
38
  "gray-matter": "^4.0.3",
38
39
  "uuid": "^11.0.0",
39
40
  "zod": "^3.24.0"
40
41
  },
41
42
  "devDependencies": {
43
+ "@types/better-sqlite3": "^7.6.0",
42
44
  "@types/node": "^22.0.0",
43
45
  "@types/uuid": "^10.0.0",
44
46
  "typescript": "^5.7.0"