@getlore/cli 0.5.2 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +12 -4
- package/README.md +66 -5
- package/dist/cli/commands/sync.js +4 -1
- package/dist/core/git.js +36 -4
- package/dist/core/vector-store.d.ts +13 -0
- package/dist/core/vector-store.js +28 -3
- package/dist/mcp/handlers/research-agent.d.ts +2 -1
- package/dist/mcp/handlers/research-agent.js +37 -7
- package/dist/mcp/handlers/research.d.ts +19 -0
- package/dist/mcp/handlers/research.js +144 -3
- package/dist/mcp/handlers/sync.d.ts +2 -0
- package/dist/mcp/handlers/sync.js +70 -3
- package/dist/mcp/server.js +28 -5
- package/dist/mcp/tools.js +16 -2
- package/dist/sync/process.d.ts +8 -0
- package/dist/sync/process.js +77 -17
- package/dist/sync/processors.d.ts +7 -0
- package/dist/sync/processors.js +95 -1
- package/dist/tui/browse-handlers.js +71 -32
- package/dist/tui/browse-render.js +28 -12
- package/dist/tui/browse-types.d.ts +1 -0
- package/package.json +3 -2
package/LICENSE
CHANGED
|
@@ -1,8 +1,16 @@
|
|
|
1
|
-
|
|
1
|
+
MIT License
|
|
2
2
|
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
3
|
+
Copyright (c) 2026 Mishkin Faustini
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
6
14
|
|
|
7
15
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
8
16
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
package/README.md
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
1
|
# Lore
|
|
2
2
|
|
|
3
|
-
> The lore behind your projects.
|
|
3
|
+
> The lore behind your projects. — [getlore.ai](https://getlore.ai) · [npm](https://www.npmjs.com/package/@getlore/cli)
|
|
4
|
+
|
|
5
|
+
Every project accumulates lore — the decisions, conversations, research, and context that explain why things are the way they are. Most of it gets lost between chat threads and forgotten docs. Lore keeps it searchable and citable.
|
|
4
6
|
|
|
5
7
|
A research knowledge repository with **semantic search** and **citations**. Unlike memory systems that store processed facts, Lore preserves your original sources and lets you cite exactly what was said, by whom, and when.
|
|
6
8
|
|
|
@@ -31,14 +33,23 @@ lore search "user pain points"
|
|
|
31
33
|
|
|
32
34
|
## MCP Configuration
|
|
33
35
|
|
|
34
|
-
|
|
36
|
+
**One-click install:**
|
|
37
|
+
|
|
38
|
+
[](https://cursor.com/en-US/install-mcp?name=lore&config=eyJjb21tYW5kIjoibnB4IiwiYXJncyI6WyIteSIsIkBnZXRsb3JlL2NsaSIsIm1jcCJdfQ%3D%3D)
|
|
39
|
+
[](https://insiders.vscode.dev/redirect/mcp/install?name=lore&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40getlore%2Fcli%22%2C%22mcp%22%5D%7D)
|
|
40
|
+
[](https://insiders.vscode.dev/redirect/mcp/install?name=lore&config=%7B%22command%22%3A%22npx%22%2C%22args%22%3A%5B%22-y%22%2C%22%40getlore%2Fcli%22%2C%22mcp%22%5D%7D&quality=insiders)
|
|
41
|
+
[](goose://extension?cmd=npx&arg=-y&arg=%40getlore%2Fcli&arg=mcp&timeout=300&id=lore-mcp&name=Lore&description=Research%20knowledge%20repository%20with%20semantic%20search%20and%20citations)
|
|
42
|
+
|
|
43
|
+
After installing, run `npx @getlore/cli setup` to configure API keys and sign in.
|
|
44
|
+
|
|
45
|
+
**Manual config** — add to your MCP client config (`.mcp.json`, `.cursor/mcp.json`, etc.):
|
|
35
46
|
|
|
36
47
|
```json
|
|
37
48
|
{
|
|
38
49
|
"mcpServers": {
|
|
39
50
|
"lore": {
|
|
40
|
-
"command": "
|
|
41
|
-
"args": ["mcp"]
|
|
51
|
+
"command": "npx",
|
|
52
|
+
"args": ["-y", "@getlore/cli", "mcp"]
|
|
42
53
|
}
|
|
43
54
|
}
|
|
44
55
|
}
|
|
@@ -75,6 +86,56 @@ If the MCP host doesn't inherit your shell environment (e.g. Claude Desktop), ad
|
|
|
75
86
|
|
|
76
87
|
Same content on different machines produces the same hash — no duplicate processing.
|
|
77
88
|
|
|
89
|
+
## Agent Platform Install
|
|
90
|
+
|
|
91
|
+
Lore works with any agent that supports MCP. Use `lore skills install` or install directly from your platform's registry.
|
|
92
|
+
|
|
93
|
+
### Claude Code
|
|
94
|
+
|
|
95
|
+
```bash
|
|
96
|
+
# From plugin directory (once approved)
|
|
97
|
+
/plugin install lore
|
|
98
|
+
|
|
99
|
+
# Or install directly from GitHub
|
|
100
|
+
/plugin install https://github.com/getlore-ai/lore/tree/main/plugins/claude-code
|
|
101
|
+
|
|
102
|
+
# Or via Lore CLI
|
|
103
|
+
lore skills install claude-code
|
|
104
|
+
```
|
|
105
|
+
|
|
106
|
+
### Gemini CLI
|
|
107
|
+
|
|
108
|
+
```bash
|
|
109
|
+
# From Extensions Gallery
|
|
110
|
+
gemini extensions install lore
|
|
111
|
+
|
|
112
|
+
# Or install directly from GitHub
|
|
113
|
+
gemini extensions install https://github.com/getlore-ai/lore --path plugins/gemini
|
|
114
|
+
|
|
115
|
+
# Or via Lore CLI
|
|
116
|
+
lore skills install gemini
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
### Codex CLI
|
|
120
|
+
|
|
121
|
+
```bash
|
|
122
|
+
# Add MCP server
|
|
123
|
+
codex mcp add lore -- npx -y @getlore/cli mcp
|
|
124
|
+
|
|
125
|
+
# Install skill
|
|
126
|
+
lore skills install codex
|
|
127
|
+
```
|
|
128
|
+
|
|
129
|
+
### OpenClaw
|
|
130
|
+
|
|
131
|
+
```bash
|
|
132
|
+
# From ClawHub
|
|
133
|
+
clawhub install lore
|
|
134
|
+
|
|
135
|
+
# Or via Lore CLI
|
|
136
|
+
lore skills install openclaw
|
|
137
|
+
```
|
|
138
|
+
|
|
78
139
|
## License
|
|
79
140
|
|
|
80
|
-
|
|
141
|
+
MIT
|
|
@@ -205,7 +205,7 @@ export function registerSyncCommand(program, defaultDataDir) {
|
|
|
205
205
|
console.log(` ... and ${result.processing.titles.length - 10} more`);
|
|
206
206
|
}
|
|
207
207
|
if (result.processing.errors > 0) {
|
|
208
|
-
console.log(`
|
|
208
|
+
console.log(` ⚠ ${result.processing.errors} file(s) failed to process (check logs above)`);
|
|
209
209
|
}
|
|
210
210
|
}
|
|
211
211
|
if (result.sources_found > 0 || result.sources_indexed > 0) {
|
|
@@ -214,6 +214,9 @@ export function registerSyncCommand(program, defaultDataDir) {
|
|
|
214
214
|
console.log(` Newly indexed: ${result.sources_indexed}`);
|
|
215
215
|
console.log(` Already indexed: ${result.already_indexed}`);
|
|
216
216
|
}
|
|
217
|
+
if (result.reconciled > 0) {
|
|
218
|
+
console.log(`\nReconciled ${result.reconciled} source(s) missing local content`);
|
|
219
|
+
}
|
|
217
220
|
if (result.git_pushed) {
|
|
218
221
|
console.log('\n✓ Pushed changes to git');
|
|
219
222
|
}
|
package/dist/core/git.js
CHANGED
|
@@ -51,11 +51,43 @@ export async function gitPull(dir) {
|
|
|
51
51
|
if (!(await hasRemote(dir))) {
|
|
52
52
|
return { success: false, error: 'No remote configured' };
|
|
53
53
|
}
|
|
54
|
-
// Stash any local changes
|
|
55
|
-
|
|
54
|
+
// Stash any local changes before pulling
|
|
55
|
+
let didStash = false;
|
|
56
|
+
if (await hasChanges(dir)) {
|
|
57
|
+
try {
|
|
58
|
+
const { stdout: stashOut } = await execAsync('git stash', { cwd: dir });
|
|
59
|
+
didStash = !stashOut.includes('No local changes');
|
|
60
|
+
}
|
|
61
|
+
catch (stashErr) {
|
|
62
|
+
console.error(`[git] Stash failed: ${stashErr}`);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
56
65
|
// Pull with rebase
|
|
57
|
-
|
|
58
|
-
|
|
66
|
+
let pullOutput;
|
|
67
|
+
try {
|
|
68
|
+
const { stdout } = await execAsync('git pull --rebase', { cwd: dir });
|
|
69
|
+
pullOutput = stdout;
|
|
70
|
+
}
|
|
71
|
+
catch (pullErr) {
|
|
72
|
+
// Restore stashed changes before returning error
|
|
73
|
+
if (didStash) {
|
|
74
|
+
await execAsync('git stash pop', { cwd: dir }).catch((popErr) => {
|
|
75
|
+
console.error(`[git] Stash pop failed after pull error: ${popErr}`);
|
|
76
|
+
});
|
|
77
|
+
}
|
|
78
|
+
throw pullErr;
|
|
79
|
+
}
|
|
80
|
+
// Restore stashed changes after successful pull
|
|
81
|
+
if (didStash) {
|
|
82
|
+
try {
|
|
83
|
+
await execAsync('git stash pop', { cwd: dir });
|
|
84
|
+
}
|
|
85
|
+
catch (popErr) {
|
|
86
|
+
console.error(`[git] Stash pop failed (possible conflict): ${popErr}`);
|
|
87
|
+
// Don't fail the pull — stashed content is still in `git stash list`
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
const pulled = !pullOutput.includes('Already up to date');
|
|
59
91
|
return {
|
|
60
92
|
success: true,
|
|
61
93
|
message: pulled ? 'Pulled new changes' : 'Already up to date'
|
|
@@ -79,6 +79,7 @@ export declare function getAllSources(_dbPath: string, options?: {
|
|
|
79
79
|
project?: string;
|
|
80
80
|
source_type?: SourceType;
|
|
81
81
|
limit?: number;
|
|
82
|
+
sort_by?: 'indexed_at' | 'created_at';
|
|
82
83
|
}): Promise<Array<{
|
|
83
84
|
id: string;
|
|
84
85
|
title: string;
|
|
@@ -86,8 +87,19 @@ export declare function getAllSources(_dbPath: string, options?: {
|
|
|
86
87
|
content_type: ContentType;
|
|
87
88
|
projects: string[];
|
|
88
89
|
created_at: string;
|
|
90
|
+
indexed_at: string;
|
|
89
91
|
summary: string;
|
|
90
92
|
}>>;
|
|
93
|
+
/**
|
|
94
|
+
* Get all sources that have a source_path set.
|
|
95
|
+
* Used by reconciliation to ensure local content.md files exist.
|
|
96
|
+
*/
|
|
97
|
+
export declare function getSourcesWithPaths(_dbPath: string): Promise<Array<{
|
|
98
|
+
id: string;
|
|
99
|
+
title: string;
|
|
100
|
+
summary: string;
|
|
101
|
+
source_path: string;
|
|
102
|
+
}>>;
|
|
91
103
|
export declare function getSourceById(_dbPath: string, sourceId: string): Promise<{
|
|
92
104
|
id: string;
|
|
93
105
|
title: string;
|
|
@@ -101,6 +113,7 @@ export declare function getSourceById(_dbPath: string, sourceId: string): Promis
|
|
|
101
113
|
quotes: Quote[];
|
|
102
114
|
source_url?: string;
|
|
103
115
|
source_name?: string;
|
|
116
|
+
source_path?: string;
|
|
104
117
|
} | null>;
|
|
105
118
|
export declare function deleteSource(_dbPath: string, sourceId: string): Promise<{
|
|
106
119
|
deleted: boolean;
|
|
@@ -321,12 +321,12 @@ export async function searchSources(_dbPath, queryVector, options = {}) {
|
|
|
321
321
|
// Retrieval Operations
|
|
322
322
|
// ============================================================================
|
|
323
323
|
export async function getAllSources(_dbPath, options = {}) {
|
|
324
|
-
const { project, source_type, limit } = options;
|
|
324
|
+
const { project, source_type, limit, sort_by = 'indexed_at' } = options;
|
|
325
325
|
const client = await getSupabase();
|
|
326
326
|
let query = client
|
|
327
327
|
.from('sources')
|
|
328
|
-
.select('id, title, source_type, content_type, projects, created_at, summary')
|
|
329
|
-
.order(
|
|
328
|
+
.select('id, title, source_type, content_type, projects, created_at, indexed_at, summary')
|
|
329
|
+
.order(sort_by, { ascending: false });
|
|
330
330
|
if (source_type) {
|
|
331
331
|
query = query.eq('source_type', source_type);
|
|
332
332
|
}
|
|
@@ -348,9 +348,33 @@ export async function getAllSources(_dbPath, options = {}) {
|
|
|
348
348
|
content_type: row.content_type,
|
|
349
349
|
projects: row.projects,
|
|
350
350
|
created_at: row.created_at,
|
|
351
|
+
indexed_at: row.indexed_at || row.created_at,
|
|
351
352
|
summary: row.summary,
|
|
352
353
|
}));
|
|
353
354
|
}
|
|
355
|
+
/**
|
|
356
|
+
* Get all sources that have a source_path set.
|
|
357
|
+
* Used by reconciliation to ensure local content.md files exist.
|
|
358
|
+
*/
|
|
359
|
+
export async function getSourcesWithPaths(_dbPath) {
|
|
360
|
+
const client = await getSupabase();
|
|
361
|
+
const { data, error } = await client
|
|
362
|
+
.from('sources')
|
|
363
|
+
.select('id, title, summary, source_path')
|
|
364
|
+
.not('source_path', 'is', null);
|
|
365
|
+
if (error) {
|
|
366
|
+
console.error('Error getting sources with paths:', error);
|
|
367
|
+
return [];
|
|
368
|
+
}
|
|
369
|
+
return (data || [])
|
|
370
|
+
.filter((row) => row.source_path)
|
|
371
|
+
.map((row) => ({
|
|
372
|
+
id: row.id,
|
|
373
|
+
title: row.title,
|
|
374
|
+
summary: row.summary || '',
|
|
375
|
+
source_path: row.source_path,
|
|
376
|
+
}));
|
|
377
|
+
}
|
|
354
378
|
export async function getSourceById(_dbPath, sourceId) {
|
|
355
379
|
const client = await getSupabase();
|
|
356
380
|
const { data, error } = await client
|
|
@@ -375,6 +399,7 @@ export async function getSourceById(_dbPath, sourceId) {
|
|
|
375
399
|
quotes: data.quotes_json || [],
|
|
376
400
|
source_url: data.source_url || undefined,
|
|
377
401
|
source_name: data.source_name || undefined,
|
|
402
|
+
source_path: data.source_path || undefined,
|
|
378
403
|
};
|
|
379
404
|
}
|
|
380
405
|
export async function deleteSource(_dbPath, sourceId) {
|
|
@@ -8,6 +8,7 @@
|
|
|
8
8
|
* 4. Synthesizes findings into a comprehensive research package
|
|
9
9
|
*/
|
|
10
10
|
import type { ResearchPackage } from '../../core/types.js';
|
|
11
|
+
import type { ProgressCallback } from './research.js';
|
|
11
12
|
interface ResearchAgentArgs {
|
|
12
13
|
task: string;
|
|
13
14
|
project?: string;
|
|
@@ -17,5 +18,5 @@ interface ResearchAgentArgs {
|
|
|
17
18
|
/**
|
|
18
19
|
* Run the agentic research
|
|
19
20
|
*/
|
|
20
|
-
export declare function runResearchAgent(dbPath: string, dataDir: string, args: ResearchAgentArgs): Promise<ResearchPackage>;
|
|
21
|
+
export declare function runResearchAgent(dbPath: string, dataDir: string, args: ResearchAgentArgs, onProgress?: ProgressCallback): Promise<ResearchPackage>;
|
|
21
22
|
export {};
|
|
@@ -232,7 +232,7 @@ Now begin your research. Use the tools iteratively until you have comprehensive
|
|
|
232
232
|
/**
|
|
233
233
|
* Run the agentic research
|
|
234
234
|
*/
|
|
235
|
-
export async function runResearchAgent(dbPath, dataDir, args) {
|
|
235
|
+
export async function runResearchAgent(dbPath, dataDir, args, onProgress) {
|
|
236
236
|
const { task, project, include_sources = true } = args;
|
|
237
237
|
// Load archived projects to filter (extract just the project names)
|
|
238
238
|
const archivedProjectsData = await loadArchivedProjects(dataDir);
|
|
@@ -245,6 +245,8 @@ export async function runResearchAgent(dbPath, dataDir, args) {
|
|
|
245
245
|
let lastAssistantMessage = '';
|
|
246
246
|
try {
|
|
247
247
|
// Run the agent
|
|
248
|
+
let turnCount = 0;
|
|
249
|
+
await onProgress?.(5, undefined, 'Starting research agent...');
|
|
248
250
|
for await (const message of query({
|
|
249
251
|
prompt: `Research task: ${task}${project ? ` (project: ${project})` : ''}`,
|
|
250
252
|
options: {
|
|
@@ -261,8 +263,9 @@ export async function runResearchAgent(dbPath, dataDir, args) {
|
|
|
261
263
|
permissionMode: 'acceptEdits', // Auto-approve tool calls
|
|
262
264
|
},
|
|
263
265
|
})) {
|
|
264
|
-
// Capture assistant messages
|
|
266
|
+
// Capture assistant messages and extract tool call details
|
|
265
267
|
if (message.type === 'assistant') {
|
|
268
|
+
turnCount++;
|
|
266
269
|
const msg = message;
|
|
267
270
|
if (msg.message?.content) {
|
|
268
271
|
const content = msg.message.content;
|
|
@@ -270,9 +273,30 @@ export async function runResearchAgent(dbPath, dataDir, args) {
|
|
|
270
273
|
lastAssistantMessage = content;
|
|
271
274
|
}
|
|
272
275
|
else if (Array.isArray(content)) {
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
+
// Extract tool_use blocks to report what the agent is doing
|
|
277
|
+
for (const block of content) {
|
|
278
|
+
if (block.type === 'tool_use') {
|
|
279
|
+
const input = block.input;
|
|
280
|
+
const toolShort = block.name.replace('mcp__lore-tools__', '');
|
|
281
|
+
if (toolShort === 'search' && input.query) {
|
|
282
|
+
await onProgress?.(0, undefined, `Searching: "${input.query}"`);
|
|
283
|
+
}
|
|
284
|
+
else if (toolShort === 'get_source' && input.source_id) {
|
|
285
|
+
await onProgress?.(0, undefined, `Reading source: ${input.source_id}`);
|
|
286
|
+
}
|
|
287
|
+
else if (toolShort === 'list_sources') {
|
|
288
|
+
const filter = input.project ? ` (project: ${input.project})` : '';
|
|
289
|
+
await onProgress?.(0, undefined, `Listing sources${filter}`);
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
else if (block.type === 'text' && block.text) {
|
|
293
|
+
lastAssistantMessage = block.text;
|
|
294
|
+
// Send a brief snippet of agent reasoning
|
|
295
|
+
const snippet = block.text.substring(0, 120).replace(/\n/g, ' ');
|
|
296
|
+
if (snippet.length > 10) {
|
|
297
|
+
await onProgress?.(0, undefined, `Agent thinking: ${snippet}...`);
|
|
298
|
+
}
|
|
299
|
+
}
|
|
276
300
|
}
|
|
277
301
|
}
|
|
278
302
|
}
|
|
@@ -282,16 +306,22 @@ export async function runResearchAgent(dbPath, dataDir, args) {
|
|
|
282
306
|
const msg = message;
|
|
283
307
|
if (msg.subtype === 'success' && msg.result) {
|
|
284
308
|
lastAssistantMessage = msg.result;
|
|
309
|
+
await onProgress?.(0, undefined, `Research complete (${msg.num_turns} turns)`);
|
|
285
310
|
console.error(`[research-agent] Completed in ${msg.num_turns} turns`);
|
|
286
311
|
}
|
|
287
312
|
else if (msg.subtype?.startsWith('error')) {
|
|
288
313
|
console.error(`[research-agent] Error: ${msg.subtype}`, msg.errors);
|
|
289
314
|
}
|
|
290
315
|
}
|
|
291
|
-
// Log tool
|
|
316
|
+
// Log tool results via the summary message
|
|
292
317
|
if (message.type === 'tool_use_summary') {
|
|
293
318
|
const msg = message;
|
|
294
|
-
|
|
319
|
+
if (msg.summary) {
|
|
320
|
+
// The summary often contains "Found X results" or similar
|
|
321
|
+
const summarySnippet = msg.summary.substring(0, 150).replace(/\n/g, ' ');
|
|
322
|
+
await onProgress?.(0, undefined, `Result: ${summarySnippet}`);
|
|
323
|
+
}
|
|
324
|
+
console.error(`[research-agent] Tool complete (turn ${turnCount})`);
|
|
295
325
|
}
|
|
296
326
|
}
|
|
297
327
|
// Parse the final result from the agent's output
|
|
@@ -6,17 +6,36 @@
|
|
|
6
6
|
* 2. SIMPLE (fallback): Single-pass search + GPT-4o-mini synthesis
|
|
7
7
|
*
|
|
8
8
|
* Set LORE_RESEARCH_MODE=simple to use the fallback mode.
|
|
9
|
+
*
|
|
10
|
+
* MCP integration: Research runs asynchronously. The `research` tool returns
|
|
11
|
+
* immediately with a job_id. Use `research_status` to poll for results.
|
|
9
12
|
*/
|
|
10
13
|
import type { ResearchPackage } from '../../core/types.js';
|
|
14
|
+
/**
|
|
15
|
+
* Start research asynchronously and return a job ID immediately.
|
|
16
|
+
*/
|
|
17
|
+
export declare function startResearchJob(dbPath: string, dataDir: string, args: ResearchArgs, options?: {
|
|
18
|
+
hookContext?: {
|
|
19
|
+
mode: 'mcp' | 'cli';
|
|
20
|
+
};
|
|
21
|
+
onProgress?: ProgressCallback;
|
|
22
|
+
}): {
|
|
23
|
+
job_id: string;
|
|
24
|
+
status: string;
|
|
25
|
+
message: string;
|
|
26
|
+
};
|
|
27
|
+
export declare function getResearchJobStatus(jobId: string): Promise<Record<string, unknown>>;
|
|
11
28
|
interface ResearchArgs {
|
|
12
29
|
task: string;
|
|
13
30
|
project?: string;
|
|
14
31
|
content_type?: string;
|
|
15
32
|
include_sources?: boolean;
|
|
16
33
|
}
|
|
34
|
+
export type ProgressCallback = (progress: number, total?: number, message?: string) => Promise<void>;
|
|
17
35
|
export declare function handleResearch(dbPath: string, dataDir: string, args: ResearchArgs, options?: {
|
|
18
36
|
hookContext?: {
|
|
19
37
|
mode: 'mcp' | 'cli';
|
|
20
38
|
};
|
|
39
|
+
onProgress?: ProgressCallback;
|
|
21
40
|
}): Promise<ResearchPackage>;
|
|
22
41
|
export {};
|
|
@@ -6,13 +6,145 @@
|
|
|
6
6
|
* 2. SIMPLE (fallback): Single-pass search + GPT-4o-mini synthesis
|
|
7
7
|
*
|
|
8
8
|
* Set LORE_RESEARCH_MODE=simple to use the fallback mode.
|
|
9
|
+
*
|
|
10
|
+
* MCP integration: Research runs asynchronously. The `research` tool returns
|
|
11
|
+
* immediately with a job_id. Use `research_status` to poll for results.
|
|
9
12
|
*/
|
|
10
13
|
import OpenAI from 'openai';
|
|
14
|
+
import { randomUUID } from 'crypto';
|
|
11
15
|
import { searchSources } from '../../core/vector-store.js';
|
|
12
16
|
import { generateEmbedding } from '../../core/embedder.js';
|
|
13
17
|
import { loadArchivedProjects } from './archive-project.js';
|
|
14
18
|
import { runResearchAgent } from './research-agent.js';
|
|
15
19
|
import { getExtensionRegistry } from '../../extensions/registry.js';
|
|
20
|
+
const jobStore = new Map();
|
|
21
|
+
// Clean up old jobs after 10 minutes
|
|
22
|
+
const JOB_TTL_MS = 10 * 60 * 1000;
|
|
23
|
+
function cleanOldJobs() {
|
|
24
|
+
const now = Date.now();
|
|
25
|
+
for (const [id, job] of jobStore) {
|
|
26
|
+
const startTime = new Date(job.startedAt).getTime();
|
|
27
|
+
if (now - startTime > JOB_TTL_MS) {
|
|
28
|
+
jobStore.delete(id);
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Start research asynchronously and return a job ID immediately.
|
|
34
|
+
*/
|
|
35
|
+
export function startResearchJob(dbPath, dataDir, args, options = {}) {
|
|
36
|
+
cleanOldJobs();
|
|
37
|
+
const jobId = randomUUID();
|
|
38
|
+
const now = new Date().toISOString();
|
|
39
|
+
const job = {
|
|
40
|
+
id: jobId,
|
|
41
|
+
task: args.task,
|
|
42
|
+
project: args.project,
|
|
43
|
+
status: 'running',
|
|
44
|
+
startedAt: now,
|
|
45
|
+
lastActivityAt: now,
|
|
46
|
+
activity: ['Starting research...'],
|
|
47
|
+
};
|
|
48
|
+
jobStore.set(jobId, job);
|
|
49
|
+
// Fire and forget — runs in the background
|
|
50
|
+
handleResearch(dbPath, dataDir, args, {
|
|
51
|
+
...options,
|
|
52
|
+
onProgress: async (_p, _t, message) => {
|
|
53
|
+
const j = jobStore.get(jobId);
|
|
54
|
+
if (j && message) {
|
|
55
|
+
j.activity.push(message);
|
|
56
|
+
j.lastActivityAt = new Date().toISOString();
|
|
57
|
+
}
|
|
58
|
+
},
|
|
59
|
+
})
|
|
60
|
+
.then((result) => {
|
|
61
|
+
const j = jobStore.get(jobId);
|
|
62
|
+
if (j) {
|
|
63
|
+
j.status = 'complete';
|
|
64
|
+
j.completedAt = new Date().toISOString();
|
|
65
|
+
j.result = result;
|
|
66
|
+
j.activity.push('Research complete');
|
|
67
|
+
}
|
|
68
|
+
})
|
|
69
|
+
.catch((err) => {
|
|
70
|
+
const j = jobStore.get(jobId);
|
|
71
|
+
if (j) {
|
|
72
|
+
j.status = 'error';
|
|
73
|
+
j.completedAt = new Date().toISOString();
|
|
74
|
+
j.error = err instanceof Error ? err.message : String(err);
|
|
75
|
+
j.activity.push(`Failed: ${j.error}`);
|
|
76
|
+
}
|
|
77
|
+
})
|
|
78
|
+
.catch((err) => {
|
|
79
|
+
// Final safety net for errors in the handlers above
|
|
80
|
+
console.error(`[research] Critical error in job ${jobId}:`, err);
|
|
81
|
+
});
|
|
82
|
+
return {
|
|
83
|
+
job_id: jobId,
|
|
84
|
+
status: 'running',
|
|
85
|
+
message: `Research started for: "${args.task}". Poll research_status with job_id "${jobId}" every 15-20 seconds. This typically takes 2-8 minutes — do not abandon early.`,
|
|
86
|
+
};
|
|
87
|
+
}
|
|
88
|
+
/**
|
|
89
|
+
* Check status of a research job.
|
|
90
|
+
* Long-polls for up to POLL_WAIT_MS, returning early if the job completes.
|
|
91
|
+
*/
|
|
92
|
+
const POLL_WAIT_MS = 20_000;
|
|
93
|
+
const POLL_INTERVAL_MS = 1_000;
|
|
94
|
+
export async function getResearchJobStatus(jobId) {
|
|
95
|
+
let job = jobStore.get(jobId);
|
|
96
|
+
if (!job) {
|
|
97
|
+
return { status: 'not_found', job_id: jobId };
|
|
98
|
+
}
|
|
99
|
+
// If already done, return immediately
|
|
100
|
+
if (job.status !== 'running') {
|
|
101
|
+
return formatJobResponse(job);
|
|
102
|
+
}
|
|
103
|
+
// Long-poll: wait up to POLL_WAIT_MS for completion, checking every second
|
|
104
|
+
const deadline = Date.now() + POLL_WAIT_MS;
|
|
105
|
+
while (Date.now() < deadline) {
|
|
106
|
+
await new Promise((resolve) => setTimeout(resolve, POLL_INTERVAL_MS));
|
|
107
|
+
// Re-fetch to avoid stale reference if job was cleaned up
|
|
108
|
+
job = jobStore.get(jobId);
|
|
109
|
+
if (!job) {
|
|
110
|
+
return { status: 'not_found', job_id: jobId };
|
|
111
|
+
}
|
|
112
|
+
if (job.status !== 'running') {
|
|
113
|
+
return formatJobResponse(job);
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
return formatJobResponse(job);
|
|
117
|
+
}
|
|
118
|
+
function formatJobResponse(job) {
|
|
119
|
+
const elapsed = Math.round((Date.now() - new Date(job.startedAt).getTime()) / 1000);
|
|
120
|
+
if (job.status === 'complete') {
|
|
121
|
+
return {
|
|
122
|
+
status: 'complete',
|
|
123
|
+
job_id: job.id,
|
|
124
|
+
task: job.task,
|
|
125
|
+
elapsed_seconds: elapsed,
|
|
126
|
+
result: job.result,
|
|
127
|
+
};
|
|
128
|
+
}
|
|
129
|
+
if (job.status === 'error') {
|
|
130
|
+
return {
|
|
131
|
+
status: 'error',
|
|
132
|
+
job_id: job.id,
|
|
133
|
+
task: job.task,
|
|
134
|
+
elapsed_seconds: elapsed,
|
|
135
|
+
error: job.error,
|
|
136
|
+
};
|
|
137
|
+
}
|
|
138
|
+
return {
|
|
139
|
+
status: 'running',
|
|
140
|
+
job_id: job.id,
|
|
141
|
+
task: job.task,
|
|
142
|
+
elapsed_seconds: elapsed,
|
|
143
|
+
total_steps: job.activity.length,
|
|
144
|
+
activity: job.activity,
|
|
145
|
+
message: `Research is still running (${elapsed}s elapsed, ${job.activity.length} steps completed). This is normal — deep research takes 2-8 minutes. Keep polling.`,
|
|
146
|
+
};
|
|
147
|
+
}
|
|
16
148
|
// Lazy initialization for OpenAI (only used in simple mode)
|
|
17
149
|
let openaiClient = null;
|
|
18
150
|
function getOpenAI() {
|
|
@@ -109,12 +241,15 @@ Respond with only the JSON object.`;
|
|
|
109
241
|
}
|
|
110
242
|
export async function handleResearch(dbPath, dataDir, args, options = {}) {
|
|
111
243
|
const { task, project, include_sources = true } = args;
|
|
244
|
+
const { onProgress } = options;
|
|
112
245
|
// Check if we should use agentic mode (default) or simple mode (fallback)
|
|
113
246
|
const useAgenticMode = process.env.LORE_RESEARCH_MODE !== 'simple';
|
|
114
247
|
if (useAgenticMode) {
|
|
115
248
|
console.error('[research] Using agentic mode (Claude Agent SDK)');
|
|
249
|
+
await onProgress?.(0, undefined, 'Starting agentic research...');
|
|
116
250
|
try {
|
|
117
|
-
const result = await runResearchAgent(dbPath, dataDir, args);
|
|
251
|
+
const result = await runResearchAgent(dbPath, dataDir, args, onProgress);
|
|
252
|
+
await onProgress?.(100, 100, 'Research complete');
|
|
118
253
|
await runResearchCompletedHook(result, {
|
|
119
254
|
mode: options.hookContext?.mode || 'mcp',
|
|
120
255
|
dataDir,
|
|
@@ -124,11 +259,14 @@ export async function handleResearch(dbPath, dataDir, args, options = {}) {
|
|
|
124
259
|
}
|
|
125
260
|
catch (error) {
|
|
126
261
|
console.error('[research] Agentic mode failed, falling back to simple mode:', error);
|
|
262
|
+
await onProgress?.(0, undefined, 'Agentic mode failed, falling back to simple mode...');
|
|
127
263
|
// Fall through to simple mode
|
|
128
264
|
}
|
|
129
265
|
}
|
|
130
266
|
console.error('[research] Using simple mode (single-pass synthesis)');
|
|
131
|
-
|
|
267
|
+
await onProgress?.(0, undefined, 'Starting simple research...');
|
|
268
|
+
const result = await handleResearchSimple(dbPath, dataDir, args, onProgress);
|
|
269
|
+
await onProgress?.(100, 100, 'Research complete');
|
|
132
270
|
await runResearchCompletedHook(result, {
|
|
133
271
|
mode: options.hookContext?.mode || 'mcp',
|
|
134
272
|
dataDir,
|
|
@@ -140,7 +278,7 @@ export async function handleResearch(dbPath, dataDir, args, options = {}) {
|
|
|
140
278
|
* Simple research mode - single pass search + synthesis
|
|
141
279
|
* This is the fallback when agentic mode fails or is disabled
|
|
142
280
|
*/
|
|
143
|
-
async function handleResearchSimple(dbPath, dataDir, args) {
|
|
281
|
+
async function handleResearchSimple(dbPath, dataDir, args, onProgress) {
|
|
144
282
|
const { task, project, include_sources = true } = args;
|
|
145
283
|
// Use sensible defaults for simple mode
|
|
146
284
|
const sourceLimit = 10;
|
|
@@ -149,7 +287,9 @@ async function handleResearchSimple(dbPath, dataDir, args) {
|
|
|
149
287
|
const archivedProjects = await loadArchivedProjects(dataDir);
|
|
150
288
|
const archivedNames = new Set(archivedProjects.map((p) => p.project.toLowerCase()));
|
|
151
289
|
// Step 1: Search for relevant sources (fetch extra to account for archived filtering)
|
|
290
|
+
await onProgress?.(10, 100, 'Generating embeddings...');
|
|
152
291
|
const queryVector = await generateEmbedding(task);
|
|
292
|
+
await onProgress?.(30, 100, 'Searching sources...');
|
|
153
293
|
const rawSources = await searchSources(dbPath, queryVector, {
|
|
154
294
|
limit: sourceLimit * 2,
|
|
155
295
|
project,
|
|
@@ -172,6 +312,7 @@ async function handleResearchSimple(dbPath, dataDir, args) {
|
|
|
172
312
|
}
|
|
173
313
|
}
|
|
174
314
|
// Step 3: Synthesize findings with LLM (conflict-aware)
|
|
315
|
+
await onProgress?.(60, 100, 'Synthesizing findings...');
|
|
175
316
|
// Note: Decisions are now extracted at query time by the agentic research mode
|
|
176
317
|
const synthesis = await synthesizeFindings(task, sources.map((s) => ({
|
|
177
318
|
id: s.id,
|
|
@@ -38,10 +38,12 @@ interface SyncResult {
|
|
|
38
38
|
errors: number;
|
|
39
39
|
titles: string[];
|
|
40
40
|
};
|
|
41
|
+
reconciled: number;
|
|
41
42
|
}
|
|
42
43
|
export declare function handleSync(dbPath: string, dataDir: string, args: SyncArgs, options?: {
|
|
43
44
|
hookContext?: {
|
|
44
45
|
mode: 'mcp' | 'cli';
|
|
45
46
|
};
|
|
47
|
+
onProgress?: (progress: number, total?: number, message?: string) => Promise<void>;
|
|
46
48
|
}): Promise<SyncResult>;
|
|
47
49
|
export {};
|