@echoes-io/mcp-server 1.3.0 → 1.3.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +2 -1
- package/lib/tools/rag-index.d.ts +3 -0
- package/lib/tools/rag-index.js +48 -5
- package/lib/tools/timeline-sync.js +42 -11
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -97,8 +97,9 @@ All tools operate on the timeline specified by the `ECHOES_TIMELINE` environment
|
|
|
97
97
|
|
|
98
98
|
### RAG (Semantic Search)
|
|
99
99
|
- **`rag-index`** - Index chapters into vector database for semantic search
|
|
100
|
-
- Input: optional: `arc`, `episode` (to index specific content)
|
|
100
|
+
- Input: `contentPath` (path to content directory, required for full content indexing), optional: `arc`, `episode` (to index specific content)
|
|
101
101
|
- Output: Number of chapters indexed
|
|
102
|
+
- Note: Requires `contentPath` to read and index actual chapter content. Without it, only metadata is indexed.
|
|
102
103
|
|
|
103
104
|
- **`rag-search`** - Semantic search across timeline content
|
|
104
105
|
- Input: `query`, optional: `arc`, `pov`, `maxResults`
|
package/lib/tools/rag-index.d.ts
CHANGED
|
@@ -2,12 +2,15 @@ import type { RAGSystem } from '@echoes-io/rag';
|
|
|
2
2
|
import type { Tracker } from '@echoes-io/tracker';
|
|
3
3
|
import { z } from 'zod';
|
|
4
4
|
export declare const ragIndexSchema: z.ZodObject<{
|
|
5
|
+
contentPath: z.ZodOptional<z.ZodString>;
|
|
5
6
|
arc: z.ZodOptional<z.ZodString>;
|
|
6
7
|
episode: z.ZodOptional<z.ZodNumber>;
|
|
7
8
|
}, "strip", z.ZodTypeAny, {
|
|
9
|
+
contentPath?: string | undefined;
|
|
8
10
|
arc?: string | undefined;
|
|
9
11
|
episode?: number | undefined;
|
|
10
12
|
}, {
|
|
13
|
+
contentPath?: string | undefined;
|
|
11
14
|
arc?: string | undefined;
|
|
12
15
|
episode?: number | undefined;
|
|
13
16
|
}>;
|
package/lib/tools/rag-index.js
CHANGED
|
@@ -1,6 +1,10 @@
|
|
|
1
|
+
import { readdirSync, readFileSync } from 'node:fs';
|
|
2
|
+
import { join } from 'node:path';
|
|
3
|
+
import { parseMarkdown } from '@echoes-io/utils';
|
|
1
4
|
import { z } from 'zod';
|
|
2
5
|
import { getTimeline } from '../utils.js';
|
|
3
6
|
export const ragIndexSchema = z.object({
|
|
7
|
+
contentPath: z.string().optional().describe('Path to content directory (required for indexing)'),
|
|
4
8
|
arc: z.string().optional().describe('Index specific arc only'),
|
|
5
9
|
episode: z.number().optional().describe('Index specific episode only (requires arc)'),
|
|
6
10
|
});
|
|
@@ -30,11 +34,50 @@ export async function ragIndex(args, tracker, rag) {
|
|
|
30
34
|
}
|
|
31
35
|
}
|
|
32
36
|
// Convert to embedding format and add to RAG
|
|
33
|
-
const embeddingChapters = chapters
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
37
|
+
const embeddingChapters = chapters
|
|
38
|
+
.map((ch) => {
|
|
39
|
+
// If contentPath is provided, read actual file content
|
|
40
|
+
if (args.contentPath) {
|
|
41
|
+
try {
|
|
42
|
+
// Reconstruct file path from chapter metadata
|
|
43
|
+
const episodeDir = `ep${String(ch.episodeNumber).padStart(2, '0')}`;
|
|
44
|
+
const chapterFile = `ep${String(ch.episodeNumber).padStart(2, '0')}-ch${String(ch.number).padStart(3, '0')}-${ch.pov}`;
|
|
45
|
+
// Try to find the file (we don't know the exact title part)
|
|
46
|
+
const arcPath = join(args.contentPath, ch.arcName);
|
|
47
|
+
const episodePath = readdirSync(arcPath, { withFileTypes: true })
|
|
48
|
+
.filter((e) => e.isDirectory() && e.name.startsWith(episodeDir))
|
|
49
|
+
.map((e) => join(arcPath, e.name))[0];
|
|
50
|
+
if (!episodePath) {
|
|
51
|
+
console.error(`Episode directory not found for ${ch.arcName}/ep${ch.episodeNumber}`);
|
|
52
|
+
return null;
|
|
53
|
+
}
|
|
54
|
+
const chapterFiles = readdirSync(episodePath).filter((f) => f.startsWith(chapterFile) && f.endsWith('.md'));
|
|
55
|
+
if (chapterFiles.length === 0) {
|
|
56
|
+
console.error(`Chapter file not found for ${ch.arcName}/ep${ch.episodeNumber}/ch${ch.number}`);
|
|
57
|
+
return null;
|
|
58
|
+
}
|
|
59
|
+
const filePath = join(episodePath, chapterFiles[0]);
|
|
60
|
+
const fileContent = readFileSync(filePath, 'utf-8');
|
|
61
|
+
const { content } = parseMarkdown(fileContent);
|
|
62
|
+
return {
|
|
63
|
+
id: `${ch.timelineName}-${ch.arcName}-${ch.episodeNumber}-${ch.number}`,
|
|
64
|
+
metadata: ch,
|
|
65
|
+
content,
|
|
66
|
+
};
|
|
67
|
+
}
|
|
68
|
+
catch (error) {
|
|
69
|
+
console.error(`Error reading chapter ${ch.arcName}/ep${ch.episodeNumber}/ch${ch.number}:`, error);
|
|
70
|
+
return null;
|
|
71
|
+
}
|
|
72
|
+
}
|
|
73
|
+
// Fallback: no content (for tests or when contentPath not provided)
|
|
74
|
+
return {
|
|
75
|
+
id: `${ch.timelineName}-${ch.arcName}-${ch.episodeNumber}-${ch.number}`,
|
|
76
|
+
metadata: ch,
|
|
77
|
+
content: '',
|
|
78
|
+
};
|
|
79
|
+
})
|
|
80
|
+
.filter((ch) => ch !== null);
|
|
38
81
|
await rag.addChapters(embeddingChapters);
|
|
39
82
|
return {
|
|
40
83
|
content: [
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { existsSync, readdirSync } from 'node:fs';
|
|
1
|
+
import { existsSync, readdirSync, readFileSync } from 'node:fs';
|
|
2
2
|
import { extname, join } from 'node:path';
|
|
3
3
|
import { getTextStats, parseMarkdown } from '@echoes-io/utils';
|
|
4
4
|
import { z } from 'zod';
|
|
@@ -43,22 +43,29 @@ export async function timelineSync(args, tracker) {
|
|
|
43
43
|
const episodePath = join(arcPath, ep.name);
|
|
44
44
|
let episode = await tracker.getEpisode(timeline, arcName, ep.number);
|
|
45
45
|
if (!episode) {
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
46
|
+
try {
|
|
47
|
+
episode = await tracker.createEpisode({
|
|
48
|
+
timelineName: timeline,
|
|
49
|
+
arcName: arcName,
|
|
50
|
+
number: ep.number,
|
|
51
|
+
slug: ep.name,
|
|
52
|
+
title: ep.name,
|
|
53
|
+
description: `Episode ${ep.number}`,
|
|
54
|
+
});
|
|
55
|
+
added++;
|
|
56
|
+
}
|
|
57
|
+
catch (error) {
|
|
58
|
+
console.error(`Error creating episode ${arcName}/ep${ep.number}:`, error instanceof Error ? error.message : error);
|
|
59
|
+
errors++;
|
|
60
|
+
continue; // Skip chapters if episode creation failed
|
|
61
|
+
}
|
|
55
62
|
}
|
|
56
63
|
const chapters = readdirSync(episodePath)
|
|
57
64
|
.filter((file) => extname(file) === '.md')
|
|
58
65
|
.map((file) => {
|
|
59
66
|
try {
|
|
60
67
|
const filePath = join(episodePath, file);
|
|
61
|
-
const content =
|
|
68
|
+
const content = readFileSync(filePath, 'utf-8');
|
|
62
69
|
const { metadata, content: markdownContent } = parseMarkdown(content);
|
|
63
70
|
const stats = getTextStats(markdownContent);
|
|
64
71
|
return {
|
|
@@ -73,6 +80,30 @@ export async function timelineSync(args, tracker) {
|
|
|
73
80
|
}
|
|
74
81
|
})
|
|
75
82
|
.filter((ch) => ch !== null);
|
|
83
|
+
// Collect unique part numbers
|
|
84
|
+
const partNumbers = new Set(chapters.map((ch) => ch?.metadata.part || 1));
|
|
85
|
+
// Create parts if they don't exist
|
|
86
|
+
for (const partNum of partNumbers) {
|
|
87
|
+
try {
|
|
88
|
+
const existingPart = await tracker.getPart(timeline, arcName, ep.number, partNum);
|
|
89
|
+
if (!existingPart) {
|
|
90
|
+
await tracker.createPart({
|
|
91
|
+
timelineName: timeline,
|
|
92
|
+
arcName: arcName,
|
|
93
|
+
episodeNumber: ep.number,
|
|
94
|
+
number: partNum,
|
|
95
|
+
slug: `part-${partNum}`,
|
|
96
|
+
title: `Part ${partNum}`,
|
|
97
|
+
description: `Part ${partNum} of Episode ${ep.number}`,
|
|
98
|
+
});
|
|
99
|
+
added++;
|
|
100
|
+
}
|
|
101
|
+
}
|
|
102
|
+
catch (error) {
|
|
103
|
+
console.error(`Error creating part ${arcName}/ep${ep.number}/part${partNum}:`, error instanceof Error ? error.message : error);
|
|
104
|
+
errors++;
|
|
105
|
+
}
|
|
106
|
+
}
|
|
76
107
|
for (const chapterData of chapters) {
|
|
77
108
|
if (!chapterData)
|
|
78
109
|
continue;
|
package/package.json
CHANGED