@telvok/librarian-mcp 1.5.4 → 2.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/library/errors.d.ts +48 -0
- package/dist/library/errors.js +80 -0
- package/dist/library/schemas.d.ts +6 -6
- package/dist/library/sensitive-scanner.d.ts +20 -0
- package/dist/library/sensitive-scanner.js +56 -0
- package/dist/library/storage.d.ts +2 -2
- package/dist/library/storage.js +2 -2
- package/dist/library 2/embeddings.d.ts +21 -0
- package/dist/library 2/embeddings.js +86 -0
- package/dist/library 2/manager.d.ts +42 -0
- package/dist/library 2/manager.js +218 -0
- package/dist/library 2/parsers/cursor.d.ts +15 -0
- package/dist/library 2/parsers/cursor.js +168 -0
- package/dist/library 2/parsers/index.d.ts +6 -0
- package/dist/library 2/parsers/index.js +5 -0
- package/dist/library 2/parsers/json.d.ts +11 -0
- package/dist/library 2/parsers/json.js +95 -0
- package/dist/library 2/parsers/jsonl.d.ts +14 -0
- package/dist/library 2/parsers/jsonl.js +85 -0
- package/dist/library 2/parsers/markdown.d.ts +15 -0
- package/dist/library 2/parsers/markdown.js +77 -0
- package/dist/library 2/parsers/sqlite.d.ts +8 -0
- package/dist/library 2/parsers/sqlite.js +123 -0
- package/dist/library 2/parsers/types.d.ts +21 -0
- package/dist/library 2/parsers/types.js +4 -0
- package/dist/library 2/query.d.ts +26 -0
- package/dist/library 2/query.js +104 -0
- package/dist/library 2/schemas.d.ts +324 -0
- package/dist/library 2/schemas.js +79 -0
- package/dist/library 2/storage.d.ts +22 -0
- package/dist/library 2/storage.js +36 -0
- package/dist/library 2/vector-index.d.ts +55 -0
- package/dist/library 2/vector-index.js +160 -0
- package/dist/server 2.js +199 -0
- package/dist/server.d 2.ts +2 -0
- package/dist/server.js +104 -54
- package/dist/tools/adopt.d.ts +1 -0
- package/dist/tools/adopt.js +37 -10
- package/dist/tools/audit.d.ts +27 -0
- package/dist/tools/audit.js +126 -0
- package/dist/tools/auth.d.ts +69 -0
- package/dist/tools/auth.js +379 -0
- package/dist/tools/bounty-claim.d.ts +28 -0
- package/dist/tools/bounty-claim.js +92 -0
- package/dist/tools/bounty-create.d.ts +47 -0
- package/dist/tools/bounty-create.js +118 -0
- package/dist/tools/bounty-list.d.ts +50 -0
- package/dist/tools/bounty-list.js +116 -0
- package/dist/tools/bounty-submit.d.ts +34 -0
- package/dist/tools/bounty-submit.js +94 -0
- package/dist/tools/brief.d.ts +94 -0
- package/dist/tools/brief.js +234 -15
- package/dist/tools/delete.d.ts +87 -0
- package/dist/tools/delete.js +266 -0
- package/dist/tools/feedback.d.ts +27 -0
- package/dist/tools/feedback.js +98 -0
- package/dist/tools/help.d.ts +22 -0
- package/dist/tools/help.js +482 -0
- package/dist/tools/import-memories.d.ts +1 -0
- package/dist/tools/import-memories.js +18 -13
- package/dist/tools/index.d.ts +11 -0
- package/dist/tools/index.js +12 -0
- package/dist/tools/library-buy.d.ts +31 -0
- package/dist/tools/library-buy.js +104 -0
- package/dist/tools/library-download.d.ts +27 -0
- package/dist/tools/library-download.js +177 -0
- package/dist/tools/library-publish.d.ts +117 -0
- package/dist/tools/library-publish.js +447 -0
- package/dist/tools/library-search.d.ts +110 -0
- package/dist/tools/library-search.js +132 -0
- package/dist/tools/mark-hit.d.ts +1 -0
- package/dist/tools/mark-hit.js +83 -5
- package/dist/tools/my-books.d.ts +51 -0
- package/dist/tools/my-books.js +115 -0
- package/dist/tools/my-bounties.d.ts +43 -0
- package/dist/tools/my-bounties.js +126 -0
- package/dist/tools/rate-book.d.ts +40 -0
- package/dist/tools/rate-book.js +147 -0
- package/dist/tools/rebuild-index.d.ts +1 -0
- package/dist/tools/rebuild-index.js +40 -8
- package/dist/tools/record.d.ts +18 -0
- package/dist/tools/record.js +30 -26
- package/dist/tools/seller-analytics.d.ts +53 -0
- package/dist/tools/seller-analytics.js +180 -0
- package/dist/tools/sync.d.ts +55 -0
- package/dist/tools/sync.js +304 -0
- package/dist/tools/unsubscribe.d.ts +48 -0
- package/dist/tools/unsubscribe.js +120 -0
- package/dist/tools 2/adopt.d.ts +24 -0
- package/dist/tools 2/adopt.js +154 -0
- package/dist/tools 2/auth.d.ts +35 -0
- package/dist/tools 2/auth.js +229 -0
- package/dist/tools 2/brief.d.ts +56 -0
- package/dist/tools 2/brief.js +414 -0
- package/dist/tools 2/help.d.ts +21 -0
- package/dist/tools 2/help.js +267 -0
- package/dist/tools 2/import-memories.d.ts +32 -0
- package/dist/tools 2/import-memories.js +231 -0
- package/dist/tools 2/index.d.ts +12 -0
- package/dist/tools 2/index.js +12 -0
- package/dist/tools 2/mark-hit.d.ts +20 -0
- package/dist/tools 2/mark-hit.js +71 -0
- package/dist/tools 2/marketplace-buy.d.ts +30 -0
- package/dist/tools 2/marketplace-buy.js +97 -0
- package/dist/tools 2/marketplace-download.d.ts +26 -0
- package/dist/tools 2/marketplace-download.js +160 -0
- package/dist/tools 2/marketplace-publish.d.ts +111 -0
- package/dist/tools 2/marketplace-publish.js +377 -0
- package/dist/tools 2/marketplace-search.d.ts +57 -0
- package/dist/tools 2/marketplace-search.js +96 -0
- package/dist/tools 2/my-books.d.ts +50 -0
- package/dist/tools 2/my-books.js +107 -0
- package/dist/tools 2/rate-book.d.ts +39 -0
- package/dist/tools 2/rate-book.js +139 -0
- package/dist/tools 2/rebuild-index.d.ts +23 -0
- package/dist/tools 2/rebuild-index.js +107 -0
- package/dist/tools 2/record.d.ts +40 -0
- package/dist/tools 2/record.js +205 -0
- package/dist/tools 2/seller-analytics.d.ts +35 -0
- package/dist/tools 2/seller-analytics.js +102 -0
- package/dist/tools 2/sync.d.ts +54 -0
- package/dist/tools 2/sync.js +298 -0
- package/package.json +1 -1
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
import * as fs from 'fs/promises';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
import { glob } from 'glob';
|
|
4
|
+
import matter from 'gray-matter';
|
|
5
|
+
// ============================================================================
|
|
6
|
+
// Cursor Memory Bank Parser
|
|
7
|
+
// ============================================================================
|
|
8
|
+
/**
|
|
9
|
+
* Parse a Cursor Memory Bank folder (.cursor-memory/).
|
|
10
|
+
*
|
|
11
|
+
* Cursor Memory Bank typically contains:
|
|
12
|
+
* - activeContext.md - Current working context
|
|
13
|
+
* - progress.md - Progress log
|
|
14
|
+
* - projectBrief.md - Project overview
|
|
15
|
+
* - systemPatterns.md - System patterns
|
|
16
|
+
* - decisionLog.md - Decision history
|
|
17
|
+
* - techStack.md - Technology stack info
|
|
18
|
+
*
|
|
19
|
+
* Can also contain JSON files and subdirectories.
|
|
20
|
+
*/
|
|
21
|
+
export async function parseCursorMemory(dirPath) {
|
|
22
|
+
const entries = [];
|
|
23
|
+
const errors = [];
|
|
24
|
+
let skipped = 0;
|
|
25
|
+
try {
|
|
26
|
+
const stats = await fs.stat(dirPath);
|
|
27
|
+
if (!stats.isDirectory()) {
|
|
28
|
+
errors.push(`${dirPath} is not a directory`);
|
|
29
|
+
return { entries, skipped, errors };
|
|
30
|
+
}
|
|
31
|
+
// Parse markdown files
|
|
32
|
+
const mdFiles = await glob(path.join(dirPath, '**/*.md'), { nodir: true });
|
|
33
|
+
for (const filePath of mdFiles) {
|
|
34
|
+
try {
|
|
35
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
36
|
+
const { data, content: body } = matter(content);
|
|
37
|
+
const trimmedBody = body.trim();
|
|
38
|
+
if (!trimmedBody) {
|
|
39
|
+
skipped++;
|
|
40
|
+
continue;
|
|
41
|
+
}
|
|
42
|
+
// Extract title from filename (convert camelCase/snake_case to Title Case)
|
|
43
|
+
const filename = path.basename(filePath, '.md');
|
|
44
|
+
const title = data.title || formatFilename(filename);
|
|
45
|
+
// Determine context based on filename
|
|
46
|
+
const context = data.context || inferContext(filename);
|
|
47
|
+
entries.push({
|
|
48
|
+
title,
|
|
49
|
+
content: trimmedBody,
|
|
50
|
+
context,
|
|
51
|
+
intent: data.intent,
|
|
52
|
+
source: 'cursor',
|
|
53
|
+
originalPath: filePath,
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
catch (fileError) {
|
|
57
|
+
errors.push(`${filePath}: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
|
|
58
|
+
skipped++;
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
// Parse JSON files
|
|
62
|
+
const jsonFiles = await glob(path.join(dirPath, '**/*.json'), { nodir: true });
|
|
63
|
+
for (const filePath of jsonFiles) {
|
|
64
|
+
try {
|
|
65
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
66
|
+
const data = JSON.parse(content);
|
|
67
|
+
// Handle different JSON structures
|
|
68
|
+
if (Array.isArray(data)) {
|
|
69
|
+
// Array of memory items
|
|
70
|
+
for (const item of data) {
|
|
71
|
+
if (typeof item === 'object' && item !== null) {
|
|
72
|
+
const entry = extractFromJSON(item, filePath);
|
|
73
|
+
if (entry) {
|
|
74
|
+
entries.push(entry);
|
|
75
|
+
}
|
|
76
|
+
else {
|
|
77
|
+
skipped++;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
else if (typeof data === 'object' && data !== null) {
|
|
83
|
+
// Single memory object
|
|
84
|
+
const entry = extractFromJSON(data, filePath);
|
|
85
|
+
if (entry) {
|
|
86
|
+
entries.push(entry);
|
|
87
|
+
}
|
|
88
|
+
else {
|
|
89
|
+
skipped++;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
catch (fileError) {
|
|
94
|
+
errors.push(`${filePath}: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
|
|
95
|
+
skipped++;
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
}
|
|
99
|
+
catch (dirError) {
|
|
100
|
+
errors.push(`Failed to access path: ${dirError instanceof Error ? dirError.message : String(dirError)}`);
|
|
101
|
+
}
|
|
102
|
+
return { entries, skipped, errors };
|
|
103
|
+
}
|
|
104
|
+
// ============================================================================
|
|
105
|
+
// Helper Functions
|
|
106
|
+
// ============================================================================
|
|
107
|
+
/**
|
|
108
|
+
* Convert filename to readable title.
|
|
109
|
+
* activeContext -> Active Context
|
|
110
|
+
* system_patterns -> System Patterns
|
|
111
|
+
*/
|
|
112
|
+
function formatFilename(filename) {
|
|
113
|
+
return filename
|
|
114
|
+
// Handle camelCase
|
|
115
|
+
.replace(/([a-z])([A-Z])/g, '$1 $2')
|
|
116
|
+
// Handle snake_case
|
|
117
|
+
.replace(/_/g, ' ')
|
|
118
|
+
// Handle kebab-case
|
|
119
|
+
.replace(/-/g, ' ')
|
|
120
|
+
// Capitalize first letter of each word
|
|
121
|
+
.replace(/\b\w/g, c => c.toUpperCase());
|
|
122
|
+
}
|
|
123
|
+
/**
|
|
124
|
+
* Infer context from Cursor Memory Bank filename patterns.
|
|
125
|
+
*/
|
|
126
|
+
function inferContext(filename) {
|
|
127
|
+
const lower = filename.toLowerCase();
|
|
128
|
+
if (lower.includes('context'))
|
|
129
|
+
return 'context';
|
|
130
|
+
if (lower.includes('progress'))
|
|
131
|
+
return 'progress';
|
|
132
|
+
if (lower.includes('brief') || lower.includes('overview'))
|
|
133
|
+
return 'project';
|
|
134
|
+
if (lower.includes('pattern'))
|
|
135
|
+
return 'patterns';
|
|
136
|
+
if (lower.includes('decision'))
|
|
137
|
+
return 'decisions';
|
|
138
|
+
if (lower.includes('stack') || lower.includes('tech'))
|
|
139
|
+
return 'technology';
|
|
140
|
+
return 'cursor-memory';
|
|
141
|
+
}
|
|
142
|
+
/**
|
|
143
|
+
* Extract a ParsedEntry from a JSON object.
|
|
144
|
+
*/
|
|
145
|
+
function extractFromJSON(obj, filePath) {
|
|
146
|
+
// Try various common keys for title
|
|
147
|
+
const title = obj.title ||
|
|
148
|
+
obj.name ||
|
|
149
|
+
obj.key ||
|
|
150
|
+
path.basename(filePath, '.json');
|
|
151
|
+
// Try various common keys for content
|
|
152
|
+
const content = obj.content ||
|
|
153
|
+
obj.description ||
|
|
154
|
+
obj.text ||
|
|
155
|
+
obj.value ||
|
|
156
|
+
obj.memory;
|
|
157
|
+
if (!content) {
|
|
158
|
+
return null;
|
|
159
|
+
}
|
|
160
|
+
return {
|
|
161
|
+
title,
|
|
162
|
+
content,
|
|
163
|
+
context: obj.context || obj.category || obj.type,
|
|
164
|
+
intent: obj.intent,
|
|
165
|
+
source: 'cursor',
|
|
166
|
+
originalPath: filePath,
|
|
167
|
+
};
|
|
168
|
+
}
|
|
@@ -0,0 +1,6 @@
|
|
|
1
|
+
export type { ParsedEntry, ParseResult } from './types.js';
|
|
2
|
+
export { parseJSONL } from './jsonl.js';
|
|
3
|
+
export { parseMarkdown } from './markdown.js';
|
|
4
|
+
export { parseCursorMemory } from './cursor.js';
|
|
5
|
+
export { parseJSON } from './json.js';
|
|
6
|
+
export { parseSQLite } from './sqlite.js';
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
import type { ParseResult } from './types.js';
|
|
2
|
+
/**
|
|
3
|
+
* Parse a JSON file containing an array of memory entries.
|
|
4
|
+
*
|
|
5
|
+
* Supports various common structures:
|
|
6
|
+
* - Array of objects with title/content
|
|
7
|
+
* - Array of objects with name/description
|
|
8
|
+
* - Object with entries array
|
|
9
|
+
* - Object with memories array
|
|
10
|
+
*/
|
|
11
|
+
export declare function parseJSON(filePath: string): Promise<ParseResult>;
|
|
@@ -0,0 +1,95 @@
|
|
|
1
|
+
import * as fs from 'fs/promises';
|
|
2
|
+
/**
|
|
3
|
+
* Parse a JSON file containing an array of memory entries.
|
|
4
|
+
*
|
|
5
|
+
* Supports various common structures:
|
|
6
|
+
* - Array of objects with title/content
|
|
7
|
+
* - Array of objects with name/description
|
|
8
|
+
* - Object with entries array
|
|
9
|
+
* - Object with memories array
|
|
10
|
+
*/
|
|
11
|
+
export async function parseJSON(filePath) {
|
|
12
|
+
const entries = [];
|
|
13
|
+
const errors = [];
|
|
14
|
+
let skipped = 0;
|
|
15
|
+
try {
|
|
16
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
17
|
+
const data = JSON.parse(content);
|
|
18
|
+
// Handle different JSON structures
|
|
19
|
+
let items = [];
|
|
20
|
+
if (Array.isArray(data)) {
|
|
21
|
+
// Direct array of entries
|
|
22
|
+
items = data;
|
|
23
|
+
}
|
|
24
|
+
else if (typeof data === 'object' && data !== null) {
|
|
25
|
+
// Object with entries/memories/items array
|
|
26
|
+
if (Array.isArray(data.entries)) {
|
|
27
|
+
items = data.entries;
|
|
28
|
+
}
|
|
29
|
+
else if (Array.isArray(data.memories)) {
|
|
30
|
+
items = data.memories;
|
|
31
|
+
}
|
|
32
|
+
else if (Array.isArray(data.items)) {
|
|
33
|
+
items = data.items;
|
|
34
|
+
}
|
|
35
|
+
else if (Array.isArray(data.data)) {
|
|
36
|
+
items = data.data;
|
|
37
|
+
}
|
|
38
|
+
else {
|
|
39
|
+
// Single object - treat as one entry
|
|
40
|
+
items = [data];
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
for (let i = 0; i < items.length; i++) {
|
|
44
|
+
const item = items[i];
|
|
45
|
+
if (typeof item !== 'object' || item === null) {
|
|
46
|
+
skipped++;
|
|
47
|
+
continue;
|
|
48
|
+
}
|
|
49
|
+
// Extract title
|
|
50
|
+
const title = item.title || item.name || item.key || `Entry ${i + 1}`;
|
|
51
|
+
// Extract content
|
|
52
|
+
let entryContent = item.content ||
|
|
53
|
+
item.text ||
|
|
54
|
+
item.description ||
|
|
55
|
+
item.value ||
|
|
56
|
+
item.memory ||
|
|
57
|
+
item.observation;
|
|
58
|
+
// Handle observations array
|
|
59
|
+
if (!entryContent && item.observations && Array.isArray(item.observations)) {
|
|
60
|
+
entryContent = item.observations.join('\n\n');
|
|
61
|
+
}
|
|
62
|
+
if (!entryContent) {
|
|
63
|
+
skipped++;
|
|
64
|
+
continue;
|
|
65
|
+
}
|
|
66
|
+
// Extract context
|
|
67
|
+
let context;
|
|
68
|
+
if (item.context) {
|
|
69
|
+
context = item.context;
|
|
70
|
+
}
|
|
71
|
+
else if (item.category) {
|
|
72
|
+
context = item.category;
|
|
73
|
+
}
|
|
74
|
+
else if (item.type) {
|
|
75
|
+
context = item.type;
|
|
76
|
+
}
|
|
77
|
+
else if (item.tags && Array.isArray(item.tags)) {
|
|
78
|
+
context = item.tags.join(', ');
|
|
79
|
+
}
|
|
80
|
+
entries.push({
|
|
81
|
+
title: String(title),
|
|
82
|
+
content: String(entryContent),
|
|
83
|
+
context,
|
|
84
|
+
intent: item.intent ? String(item.intent) : undefined,
|
|
85
|
+
reasoning: item.reasoning ? String(item.reasoning) : undefined,
|
|
86
|
+
example: item.example ? String(item.example) : undefined,
|
|
87
|
+
source: 'json',
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
catch (error) {
|
|
92
|
+
errors.push(`Failed to parse JSON: ${error instanceof Error ? error.message : String(error)}`);
|
|
93
|
+
}
|
|
94
|
+
return { entries, skipped, errors };
|
|
95
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
import type { ParseResult } from './types.js';
|
|
2
|
+
/**
|
|
3
|
+
* Parse a JSONL file. Supports multiple formats:
|
|
4
|
+
*
|
|
5
|
+
* mcp-knowledge-graph:
|
|
6
|
+
* {"type":"entity","name":"Topic","observations":["fact 1","fact 2"]}
|
|
7
|
+
*
|
|
8
|
+
* Generic memory formats:
|
|
9
|
+
* {"title":"Topic","content":"..."}
|
|
10
|
+
* {"content":"...","timestamp":"..."}
|
|
11
|
+
* {"text":"...","metadata":{}}
|
|
12
|
+
* {"memory":"...","created_at":"..."}
|
|
13
|
+
*/
|
|
14
|
+
export declare function parseJSONL(filePath: string): Promise<ParseResult>;
|
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
import * as fs from 'fs/promises';
|
|
2
|
+
/**
|
|
3
|
+
* Parse a JSONL file. Supports multiple formats:
|
|
4
|
+
*
|
|
5
|
+
* mcp-knowledge-graph:
|
|
6
|
+
* {"type":"entity","name":"Topic","observations":["fact 1","fact 2"]}
|
|
7
|
+
*
|
|
8
|
+
* Generic memory formats:
|
|
9
|
+
* {"title":"Topic","content":"..."}
|
|
10
|
+
* {"content":"...","timestamp":"..."}
|
|
11
|
+
* {"text":"...","metadata":{}}
|
|
12
|
+
* {"memory":"...","created_at":"..."}
|
|
13
|
+
*/
|
|
14
|
+
export async function parseJSONL(filePath) {
|
|
15
|
+
const entries = [];
|
|
16
|
+
const errors = [];
|
|
17
|
+
let skipped = 0;
|
|
18
|
+
try {
|
|
19
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
20
|
+
const lines = content.trim().split('\n').filter(line => line.trim());
|
|
21
|
+
for (let i = 0; i < lines.length; i++) {
|
|
22
|
+
const line = lines[i];
|
|
23
|
+
try {
|
|
24
|
+
const item = JSON.parse(line);
|
|
25
|
+
// Skip internal markers and relations
|
|
26
|
+
if (item.type === '_aim' || item.type === 'relation') {
|
|
27
|
+
skipped++;
|
|
28
|
+
continue;
|
|
29
|
+
}
|
|
30
|
+
// Extract title (try multiple fields)
|
|
31
|
+
const title = item.name || item.title || item.key || `Entry ${i + 1}`;
|
|
32
|
+
// Extract content (try multiple fields)
|
|
33
|
+
let entryContent;
|
|
34
|
+
// Check observations array first (mcp-knowledge-graph)
|
|
35
|
+
if (item.observations && Array.isArray(item.observations) && item.observations.length > 0) {
|
|
36
|
+
entryContent = item.observations.join('\n\n');
|
|
37
|
+
}
|
|
38
|
+
// Fall back to common content fields
|
|
39
|
+
if (!entryContent) {
|
|
40
|
+
entryContent = item.content || item.text || item.description ||
|
|
41
|
+
item.value || item.memory || item.observation || item.body;
|
|
42
|
+
}
|
|
43
|
+
// Skip if no content found
|
|
44
|
+
if (!entryContent) {
|
|
45
|
+
skipped++;
|
|
46
|
+
continue;
|
|
47
|
+
}
|
|
48
|
+
// Extract context
|
|
49
|
+
let context;
|
|
50
|
+
if (item.entityType) {
|
|
51
|
+
context = item.entityType;
|
|
52
|
+
}
|
|
53
|
+
else if (item.context) {
|
|
54
|
+
context = item.context;
|
|
55
|
+
}
|
|
56
|
+
else if (item.category) {
|
|
57
|
+
context = item.category;
|
|
58
|
+
}
|
|
59
|
+
else if (item.type && item.type !== 'entity' && item.type !== 'memory') {
|
|
60
|
+
context = item.type;
|
|
61
|
+
}
|
|
62
|
+
else if (item.tags && Array.isArray(item.tags)) {
|
|
63
|
+
context = item.tags.join(', ');
|
|
64
|
+
}
|
|
65
|
+
entries.push({
|
|
66
|
+
title: String(title),
|
|
67
|
+
content: String(entryContent),
|
|
68
|
+
context,
|
|
69
|
+
intent: item.intent ? String(item.intent) : undefined,
|
|
70
|
+
reasoning: item.reasoning ? String(item.reasoning) : undefined,
|
|
71
|
+
example: item.example ? String(item.example) : undefined,
|
|
72
|
+
source: 'jsonl',
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
catch (parseError) {
|
|
76
|
+
errors.push(`Line ${i + 1}: Invalid JSON - ${parseError instanceof Error ? parseError.message : String(parseError)}`);
|
|
77
|
+
skipped++;
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
catch (fileError) {
|
|
82
|
+
errors.push(`Failed to read file: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
|
|
83
|
+
}
|
|
84
|
+
return { entries, skipped, errors };
|
|
85
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
import type { ParseResult } from './types.js';
|
|
2
|
+
/**
|
|
3
|
+
* Parse a folder of markdown files (Basic Memory MCP / Obsidian / any .md).
|
|
4
|
+
*
|
|
5
|
+
* Input format:
|
|
6
|
+
* ---
|
|
7
|
+
* title: API Rate Limits
|
|
8
|
+
* tags: [api, performance]
|
|
9
|
+
* ---
|
|
10
|
+
*
|
|
11
|
+
* # API Rate Limits
|
|
12
|
+
*
|
|
13
|
+
* Always implement exponential backoff...
|
|
14
|
+
*/
|
|
15
|
+
export declare function parseMarkdown(dirPath: string): Promise<ParseResult>;
|
|
@@ -0,0 +1,77 @@
|
|
|
1
|
+
import * as fs from 'fs/promises';
|
|
2
|
+
import * as path from 'path';
|
|
3
|
+
import { glob } from 'glob';
|
|
4
|
+
import matter from 'gray-matter';
|
|
5
|
+
/**
|
|
6
|
+
* Parse a folder of markdown files (Basic Memory MCP / Obsidian / any .md).
|
|
7
|
+
*
|
|
8
|
+
* Input format:
|
|
9
|
+
* ---
|
|
10
|
+
* title: API Rate Limits
|
|
11
|
+
* tags: [api, performance]
|
|
12
|
+
* ---
|
|
13
|
+
*
|
|
14
|
+
* # API Rate Limits
|
|
15
|
+
*
|
|
16
|
+
* Always implement exponential backoff...
|
|
17
|
+
*/
|
|
18
|
+
export async function parseMarkdown(dirPath) {
|
|
19
|
+
const entries = [];
|
|
20
|
+
const errors = [];
|
|
21
|
+
let skipped = 0;
|
|
22
|
+
try {
|
|
23
|
+
// Handle both single file and directory
|
|
24
|
+
const stats = await fs.stat(dirPath);
|
|
25
|
+
const files = stats.isDirectory()
|
|
26
|
+
? await glob(path.join(dirPath, '**/*.md'), { nodir: true })
|
|
27
|
+
: [dirPath];
|
|
28
|
+
for (const filePath of files) {
|
|
29
|
+
try {
|
|
30
|
+
const content = await fs.readFile(filePath, 'utf-8');
|
|
31
|
+
const { data, content: body } = matter(content);
|
|
32
|
+
const frontmatter = data;
|
|
33
|
+
// Skip empty files
|
|
34
|
+
const trimmedBody = body.trim();
|
|
35
|
+
if (!trimmedBody) {
|
|
36
|
+
skipped++;
|
|
37
|
+
continue;
|
|
38
|
+
}
|
|
39
|
+
// Extract title from frontmatter, H1, or filename
|
|
40
|
+
let title = frontmatter.title;
|
|
41
|
+
if (!title) {
|
|
42
|
+
const headingMatch = trimmedBody.match(/^#\s+(.+)$/m);
|
|
43
|
+
if (headingMatch) {
|
|
44
|
+
title = headingMatch[1].trim();
|
|
45
|
+
}
|
|
46
|
+
else {
|
|
47
|
+
title = path.basename(filePath, '.md').replace(/-/g, ' ');
|
|
48
|
+
}
|
|
49
|
+
}
|
|
50
|
+
// Extract context from tags or context field
|
|
51
|
+
let context;
|
|
52
|
+
if (frontmatter.context) {
|
|
53
|
+
context = frontmatter.context;
|
|
54
|
+
}
|
|
55
|
+
else if (frontmatter.tags && Array.isArray(frontmatter.tags)) {
|
|
56
|
+
context = frontmatter.tags.join(', ');
|
|
57
|
+
}
|
|
58
|
+
entries.push({
|
|
59
|
+
title,
|
|
60
|
+
content: trimmedBody,
|
|
61
|
+
context,
|
|
62
|
+
intent: frontmatter.intent,
|
|
63
|
+
source: 'markdown',
|
|
64
|
+
originalPath: filePath,
|
|
65
|
+
});
|
|
66
|
+
}
|
|
67
|
+
catch (fileError) {
|
|
68
|
+
errors.push(`${filePath}: ${fileError instanceof Error ? fileError.message : String(fileError)}`);
|
|
69
|
+
skipped++;
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
catch (dirError) {
|
|
74
|
+
errors.push(`Failed to access path: ${dirError instanceof Error ? dirError.message : String(dirError)}`);
|
|
75
|
+
}
|
|
76
|
+
return { entries, skipped, errors };
|
|
77
|
+
}
|
|
@@ -0,0 +1,8 @@
|
|
|
1
|
+
import type { ParseResult } from './types.js';
|
|
2
|
+
/**
|
|
3
|
+
* Parse a SQLite database file for memory entries.
|
|
4
|
+
*
|
|
5
|
+
* Auto-detects table and column names from common patterns.
|
|
6
|
+
* Supports mcp-memory-service, SQLite-vec, and custom schemas.
|
|
7
|
+
*/
|
|
8
|
+
export declare function parseSQLite(filePath: string): Promise<ParseResult>;
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
import Database from 'better-sqlite3';
|
|
2
|
+
// ============================================================================
|
|
3
|
+
// SQLite Parser - mcp-memory-service, SQLite-vec, custom databases
|
|
4
|
+
// ============================================================================
|
|
5
|
+
// Common table names for memory storage
|
|
6
|
+
const TABLE_CANDIDATES = ['memories', 'entries', 'knowledge', 'notes', 'items', 'documents'];
|
|
7
|
+
// Common column names for different fields
|
|
8
|
+
const TITLE_CANDIDATES = ['title', 'name', 'key', 'id', 'subject', 'heading'];
|
|
9
|
+
const CONTENT_CANDIDATES = ['content', 'text', 'value', 'memory', 'observation', 'body', 'description', 'note'];
|
|
10
|
+
const CONTEXT_CANDIDATES = ['context', 'category', 'type', 'tags', 'topic', 'area'];
|
|
11
|
+
/**
|
|
12
|
+
* Parse a SQLite database file for memory entries.
|
|
13
|
+
*
|
|
14
|
+
* Auto-detects table and column names from common patterns.
|
|
15
|
+
* Supports mcp-memory-service, SQLite-vec, and custom schemas.
|
|
16
|
+
*/
|
|
17
|
+
export async function parseSQLite(filePath) {
|
|
18
|
+
const entries = [];
|
|
19
|
+
const errors = [];
|
|
20
|
+
let skipped = 0;
|
|
21
|
+
let db = null;
|
|
22
|
+
try {
|
|
23
|
+
db = new Database(filePath, { readonly: true });
|
|
24
|
+
// Find a suitable table
|
|
25
|
+
const tables = db
|
|
26
|
+
.prepare("SELECT name FROM sqlite_master WHERE type='table'")
|
|
27
|
+
.all();
|
|
28
|
+
const tableNames = tables.map((t) => t.name.toLowerCase());
|
|
29
|
+
const targetTable = TABLE_CANDIDATES.find((candidate) => tableNames.includes(candidate));
|
|
30
|
+
if (!targetTable) {
|
|
31
|
+
errors.push(`No memory table found. Available tables: ${tableNames.join(', ')}. ` +
|
|
32
|
+
`Expected one of: ${TABLE_CANDIDATES.join(', ')}`);
|
|
33
|
+
return { entries, skipped, errors };
|
|
34
|
+
}
|
|
35
|
+
// Get column info for the target table
|
|
36
|
+
const columns = db.prepare(`PRAGMA table_info(${targetTable})`).all();
|
|
37
|
+
const columnNames = columns.map((c) => c.name.toLowerCase());
|
|
38
|
+
// Find content column (required)
|
|
39
|
+
const contentColumn = CONTENT_CANDIDATES.find((candidate) => columnNames.includes(candidate));
|
|
40
|
+
if (!contentColumn) {
|
|
41
|
+
errors.push(`No content column found in table "${targetTable}". ` +
|
|
42
|
+
`Available columns: ${columnNames.join(', ')}. ` +
|
|
43
|
+
`Expected one of: ${CONTENT_CANDIDATES.join(', ')}`);
|
|
44
|
+
return { entries, skipped, errors };
|
|
45
|
+
}
|
|
46
|
+
// Find optional columns
|
|
47
|
+
const titleColumn = TITLE_CANDIDATES.find((candidate) => columnNames.includes(candidate));
|
|
48
|
+
const contextColumn = CONTEXT_CANDIDATES.find((candidate) => columnNames.includes(candidate));
|
|
49
|
+
// Build and execute query
|
|
50
|
+
const selectColumns = [
|
|
51
|
+
contentColumn,
|
|
52
|
+
titleColumn,
|
|
53
|
+
contextColumn,
|
|
54
|
+
].filter(Boolean);
|
|
55
|
+
const query = `SELECT ${selectColumns.join(', ')} FROM ${targetTable}`;
|
|
56
|
+
const rows = db.prepare(query).all();
|
|
57
|
+
for (let i = 0; i < rows.length; i++) {
|
|
58
|
+
const row = rows[i];
|
|
59
|
+
// Extract content
|
|
60
|
+
const content = row[contentColumn];
|
|
61
|
+
if (!content || typeof content !== 'string' || !content.trim()) {
|
|
62
|
+
skipped++;
|
|
63
|
+
continue;
|
|
64
|
+
}
|
|
65
|
+
// Extract title
|
|
66
|
+
let title;
|
|
67
|
+
if (titleColumn && row[titleColumn]) {
|
|
68
|
+
title = String(row[titleColumn]);
|
|
69
|
+
}
|
|
70
|
+
else {
|
|
71
|
+
// Generate title from content
|
|
72
|
+
title = content.slice(0, 60).replace(/\s+/g, ' ').trim();
|
|
73
|
+
if (content.length > 60)
|
|
74
|
+
title += '...';
|
|
75
|
+
}
|
|
76
|
+
// Extract context
|
|
77
|
+
let context;
|
|
78
|
+
if (contextColumn && row[contextColumn]) {
|
|
79
|
+
const contextValue = row[contextColumn];
|
|
80
|
+
if (typeof contextValue === 'string') {
|
|
81
|
+
context = contextValue;
|
|
82
|
+
}
|
|
83
|
+
else if (Array.isArray(contextValue)) {
|
|
84
|
+
context = contextValue.join(', ');
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
entries.push({
|
|
88
|
+
title,
|
|
89
|
+
content: content.trim(),
|
|
90
|
+
context,
|
|
91
|
+
source: 'sqlite',
|
|
92
|
+
originalPath: `${filePath}:${targetTable}`,
|
|
93
|
+
});
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
catch (error) {
|
|
97
|
+
if (error instanceof Error) {
|
|
98
|
+
if (error.message.includes('SQLITE_CANTOPEN')) {
|
|
99
|
+
errors.push(`Cannot open database file: ${filePath}`);
|
|
100
|
+
}
|
|
101
|
+
else if (error.message.includes('file is not a database')) {
|
|
102
|
+
errors.push(`File is not a valid SQLite database: ${filePath}`);
|
|
103
|
+
}
|
|
104
|
+
else {
|
|
105
|
+
errors.push(`SQLite error: ${error.message}`);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
else {
|
|
109
|
+
errors.push(`Unknown error: ${String(error)}`);
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
finally {
|
|
113
|
+
if (db) {
|
|
114
|
+
try {
|
|
115
|
+
db.close();
|
|
116
|
+
}
|
|
117
|
+
catch {
|
|
118
|
+
// Ignore close errors
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
return { entries, skipped, errors };
|
|
123
|
+
}
|
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* A parsed memory entry ready to be converted to Librarian format.
|
|
3
|
+
*/
|
|
4
|
+
export interface ParsedEntry {
|
|
5
|
+
title: string;
|
|
6
|
+
content: string;
|
|
7
|
+
context?: string;
|
|
8
|
+
intent?: string;
|
|
9
|
+
reasoning?: string;
|
|
10
|
+
example?: string;
|
|
11
|
+
source: 'jsonl' | 'markdown' | 'cursor' | 'json' | 'sqlite';
|
|
12
|
+
originalPath?: string;
|
|
13
|
+
}
|
|
14
|
+
/**
|
|
15
|
+
* Result of a parse operation.
|
|
16
|
+
*/
|
|
17
|
+
export interface ParseResult {
|
|
18
|
+
entries: ParsedEntry[];
|
|
19
|
+
skipped: number;
|
|
20
|
+
errors: string[];
|
|
21
|
+
}
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import type { LibraryEntry } from './schemas.js';
|
|
2
|
+
/**
|
|
3
|
+
* Score how well an entry matches a search term.
|
|
4
|
+
*/
|
|
5
|
+
export declare function scoreMatch(entry: LibraryEntry, searchTerm: string): number;
|
|
6
|
+
/**
|
|
7
|
+
* Detect potential conflicts between entries.
|
|
8
|
+
* Returns true if entries on the same topic give contradictory advice.
|
|
9
|
+
*/
|
|
10
|
+
export declare function detectConflict(entries: LibraryEntry[]): boolean;
|
|
11
|
+
/**
|
|
12
|
+
* Generate a conflict summary.
|
|
13
|
+
*/
|
|
14
|
+
export declare function generateConflictSummary(entries: LibraryEntry[]): string;
|
|
15
|
+
/**
|
|
16
|
+
* Filter entries by topic.
|
|
17
|
+
*/
|
|
18
|
+
export declare function filterByTopic(entries: LibraryEntry[], topic: string): LibraryEntry[];
|
|
19
|
+
/**
|
|
20
|
+
* Sort entries by relevance to search term.
|
|
21
|
+
*/
|
|
22
|
+
export declare function sortByRelevance(entries: LibraryEntry[], searchTerm: string): LibraryEntry[];
|
|
23
|
+
/**
|
|
24
|
+
* Group entries by source.
|
|
25
|
+
*/
|
|
26
|
+
export declare function groupBySource(entries: LibraryEntry[]): Record<string, LibraryEntry[]>;
|