@devista/docs-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +99 -0
- package/bin/docs-mcp.js +40 -0
- package/package.json +36 -0
- package/src/config.js +47 -0
- package/src/freshness.js +29 -0
- package/src/indexer.js +121 -0
- package/src/server.js +89 -0
- package/src/tools/get-page.js +19 -0
- package/src/tools/list-sections.js +23 -0
- package/src/tools/search-docs.js +53 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Devista Consulting
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
# @devista/docs-mcp
|
|
2
|
+
|
|
3
|
+
A generic MCP server that provides full-text search over Markdown/MDX documentation folders. Point it at any docs directory and AI agents (Claude Code, etc.) can search, browse, and read your documentation.
|
|
4
|
+
|
|
5
|
+
## Quick Start
|
|
6
|
+
|
|
7
|
+
Add to your project's `.mcp.json`:
|
|
8
|
+
|
|
9
|
+
```json
|
|
10
|
+
{
|
|
11
|
+
"mcpServers": {
|
|
12
|
+
"docs": {
|
|
13
|
+
"command": "npx",
|
|
14
|
+
"args": ["-y", "@devista/docs-mcp", "--docs", "./docs"]
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
That's it. The server will index your docs on first start and auto-rebuild when files change.
|
|
21
|
+
|
|
22
|
+
## Configuration
|
|
23
|
+
|
|
24
|
+
### CLI Args
|
|
25
|
+
|
|
26
|
+
| Arg | Description |
|
|
27
|
+
|-----|-------------|
|
|
28
|
+
| `--docs <path>` | Path to documentation folder (required) |
|
|
29
|
+
| `--name <name>` | Server name shown in MCP clients (default: `docs-mcp`) |
|
|
30
|
+
|
|
31
|
+
### Config File
|
|
32
|
+
|
|
33
|
+
Create `.docs-mcp.json` in your project root for persistent settings:
|
|
34
|
+
|
|
35
|
+
```json
|
|
36
|
+
{
|
|
37
|
+
"docs": "./src/content/docs",
|
|
38
|
+
"include": ["**/*.md", "**/*.mdx"],
|
|
39
|
+
"exclude": ["**/drafts/**"],
|
|
40
|
+
"name": "my-project-docs"
|
|
41
|
+
}
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
CLI args override config file values. When using a config file, your `.mcp.json` simplifies to:
|
|
45
|
+
|
|
46
|
+
```json
|
|
47
|
+
{
|
|
48
|
+
"mcpServers": {
|
|
49
|
+
"docs": {
|
|
50
|
+
"command": "npx",
|
|
51
|
+
"args": ["-y", "@devista/docs-mcp"]
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Defaults
|
|
58
|
+
|
|
59
|
+
| Setting | Default |
|
|
60
|
+
|---------|---------|
|
|
61
|
+
| `include` | `["**/*.md", "**/*.mdx"]` |
|
|
62
|
+
| `exclude` | `["**/node_modules/**"]` |
|
|
63
|
+
| `name` | `docs-mcp` |
|
|
64
|
+
|
|
65
|
+
## Tools
|
|
66
|
+
|
|
67
|
+
### `search_docs`
|
|
68
|
+
|
|
69
|
+
Full-text search across all documentation pages.
|
|
70
|
+
|
|
71
|
+
- **query** (string, required) — The search query
|
|
72
|
+
- **limit** (number, optional, default: 5) — Max results
|
|
73
|
+
|
|
74
|
+
### `get_page`
|
|
75
|
+
|
|
76
|
+
Retrieve the full content of a specific page.
|
|
77
|
+
|
|
78
|
+
- **path** (string, required) — Page path relative to docs root (e.g. `backend/payments`)
|
|
79
|
+
|
|
80
|
+
### `list_sections`
|
|
81
|
+
|
|
82
|
+
List all documentation sections and their pages. No parameters.
|
|
83
|
+
|
|
84
|
+
## How It Works
|
|
85
|
+
|
|
86
|
+
1. On startup, the server checks if the search index needs rebuilding (compares file modification times)
|
|
87
|
+
2. If stale or missing, it parses all Markdown/MDX files, strips MDX syntax, and builds a [Flexsearch](https://github.com/nextapps-de/flexsearch) index
|
|
88
|
+
3. The index is stored in `.docs-mcp/` in your project root (add to `.gitignore`)
|
|
89
|
+
4. Three MCP tools are exposed over stdio transport
|
|
90
|
+
|
|
91
|
+
## Add to .gitignore
|
|
92
|
+
|
|
93
|
+
```
|
|
94
|
+
.docs-mcp/
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## License
|
|
98
|
+
|
|
99
|
+
MIT
|
package/bin/docs-mcp.js
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
|
|
3
|
+
import { promises as fs } from "fs";
|
|
4
|
+
import { parseCliArgs, loadConfigFile, resolveConfig } from "../src/config.js";
|
|
5
|
+
import { startServer } from "../src/server.js";
|
|
6
|
+
|
|
7
|
+
async function main() {
|
|
8
|
+
const cwd = process.cwd();
|
|
9
|
+
const cliArgs = parseCliArgs(process.argv);
|
|
10
|
+
const fileConfig = await loadConfigFile(cwd);
|
|
11
|
+
|
|
12
|
+
let config;
|
|
13
|
+
try {
|
|
14
|
+
config = resolveConfig(cliArgs, fileConfig, cwd);
|
|
15
|
+
} catch (err) {
|
|
16
|
+
console.error(`Error: ${err.message}`);
|
|
17
|
+
process.exit(1);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
// Validate docs path exists
|
|
21
|
+
try {
|
|
22
|
+
const stat = await fs.stat(config.docs);
|
|
23
|
+
if (!stat.isDirectory()) {
|
|
24
|
+
console.error(`Error: docs path '${config.docs}' is not a directory`);
|
|
25
|
+
process.exit(1);
|
|
26
|
+
}
|
|
27
|
+
} catch {
|
|
28
|
+
console.error(
|
|
29
|
+
`Error: docs path '${config.docs}' does not exist or is not a directory`
|
|
30
|
+
);
|
|
31
|
+
process.exit(1);
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
await startServer(config);
|
|
35
|
+
}
|
|
36
|
+
|
|
37
|
+
main().catch((err) => {
|
|
38
|
+
console.error("docs-mcp failed to start:", err);
|
|
39
|
+
process.exit(1);
|
|
40
|
+
});
|
package/package.json
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@devista/docs-mcp",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Generic MCP server for full-text search over Markdown/MDX documentation",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"bin": {
|
|
7
|
+
"docs-mcp": "./bin/docs-mcp.js"
|
|
8
|
+
},
|
|
9
|
+
"files": [
|
|
10
|
+
"bin/",
|
|
11
|
+
"src/"
|
|
12
|
+
],
|
|
13
|
+
"scripts": {
|
|
14
|
+
"test": "vitest run",
|
|
15
|
+
"test:watch": "vitest"
|
|
16
|
+
},
|
|
17
|
+
"publishConfig": {
|
|
18
|
+
"access": "public"
|
|
19
|
+
},
|
|
20
|
+
"repository": {
|
|
21
|
+
"type": "git",
|
|
22
|
+
"url": "git+https://github.com/devista-consulting/docs-mcp.git"
|
|
23
|
+
},
|
|
24
|
+
"keywords": ["mcp", "documentation", "search", "markdown", "ai", "claude"],
|
|
25
|
+
"license": "MIT",
|
|
26
|
+
"dependencies": {
|
|
27
|
+
"@modelcontextprotocol/sdk": "^1.29.0",
|
|
28
|
+
"fast-glob": "^3.3.0",
|
|
29
|
+
"flexsearch": "^0.8.0",
|
|
30
|
+
"gray-matter": "^4.0.0",
|
|
31
|
+
"zod": "^3.25.0"
|
|
32
|
+
},
|
|
33
|
+
"devDependencies": {
|
|
34
|
+
"vitest": "^4.0.0"
|
|
35
|
+
}
|
|
36
|
+
}
|
package/src/config.js
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { promises as fs } from "fs";
|
|
2
|
+
import path from "path";
|
|
3
|
+
|
|
4
|
+
const DEFAULTS = {
|
|
5
|
+
include: ["**/*.md", "**/*.mdx"],
|
|
6
|
+
exclude: ["**/node_modules/**"],
|
|
7
|
+
name: "docs-mcp",
|
|
8
|
+
};
|
|
9
|
+
|
|
10
|
+
export function parseCliArgs(argv) {
|
|
11
|
+
const args = {};
|
|
12
|
+
for (let i = 2; i < argv.length; i++) {
|
|
13
|
+
if (argv[i] === "--docs" && argv[i + 1]) {
|
|
14
|
+
args.docs = argv[++i];
|
|
15
|
+
} else if (argv[i] === "--name" && argv[i + 1]) {
|
|
16
|
+
args.name = argv[++i];
|
|
17
|
+
}
|
|
18
|
+
}
|
|
19
|
+
return args;
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
export async function loadConfigFile(cwd) {
|
|
23
|
+
try {
|
|
24
|
+
const configPath = path.join(cwd, ".docs-mcp.json");
|
|
25
|
+
const raw = await fs.readFile(configPath, "utf8");
|
|
26
|
+
return JSON.parse(raw);
|
|
27
|
+
} catch {
|
|
28
|
+
return {};
|
|
29
|
+
}
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export function resolveConfig(cliArgs, fileConfig, cwd) {
|
|
33
|
+
const docs = cliArgs.docs || fileConfig.docs;
|
|
34
|
+
if (!docs) {
|
|
35
|
+
throw new Error(
|
|
36
|
+
"--docs path is required. Provide it via CLI arg or .docs-mcp.json"
|
|
37
|
+
);
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
return {
|
|
41
|
+
docs: path.resolve(cwd, docs),
|
|
42
|
+
include: cliArgs.include || fileConfig.include || DEFAULTS.include,
|
|
43
|
+
exclude: cliArgs.exclude || fileConfig.exclude || DEFAULTS.exclude,
|
|
44
|
+
name: cliArgs.name || fileConfig.name || DEFAULTS.name,
|
|
45
|
+
dataDir: path.join(cwd, ".docs-mcp"),
|
|
46
|
+
};
|
|
47
|
+
}
|
package/src/freshness.js
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
import { promises as fs } from "fs";
|
|
2
|
+
import path from "path";
|
|
3
|
+
import fg from "fast-glob";
|
|
4
|
+
|
|
5
|
+
export async function isIndexStale(config) {
|
|
6
|
+
const indexPath = path.join(config.dataDir, "documents.json");
|
|
7
|
+
|
|
8
|
+
try {
|
|
9
|
+
const indexStat = await fs.stat(indexPath);
|
|
10
|
+
const indexMtime = indexStat.mtimeMs;
|
|
11
|
+
|
|
12
|
+
const files = await fg(config.include, {
|
|
13
|
+
cwd: config.docs,
|
|
14
|
+
ignore: config.exclude,
|
|
15
|
+
absolute: true,
|
|
16
|
+
});
|
|
17
|
+
|
|
18
|
+
for (const file of files) {
|
|
19
|
+
const stat = await fs.stat(file);
|
|
20
|
+
if (stat.mtimeMs > indexMtime) {
|
|
21
|
+
return true;
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
return false;
|
|
26
|
+
} catch {
|
|
27
|
+
return true;
|
|
28
|
+
}
|
|
29
|
+
}
|
package/src/indexer.js
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
import matter from "gray-matter";
|
|
2
|
+
import { promises as fs } from "fs";
|
|
3
|
+
import path from "path";
|
|
4
|
+
import { Document } from "flexsearch";
|
|
5
|
+
import fg from "fast-glob";
|
|
6
|
+
|
|
7
|
+
export function parseDocFile(rawContent, relativePath) {
|
|
8
|
+
const { data: frontmatter, content } = matter(rawContent);
|
|
9
|
+
|
|
10
|
+
const body = stripMdxSyntax(content);
|
|
11
|
+
const section = relativePath.includes("/")
|
|
12
|
+
? relativePath.split("/")[0]
|
|
13
|
+
: "";
|
|
14
|
+
|
|
15
|
+
return {
|
|
16
|
+
path: relativePath,
|
|
17
|
+
title: frontmatter.title || "",
|
|
18
|
+
description: frontmatter.description || "",
|
|
19
|
+
body: body.trim(),
|
|
20
|
+
section,
|
|
21
|
+
};
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
function stripMdxSyntax(content) {
|
|
25
|
+
return content
|
|
26
|
+
.replace(/^import\s+.*$/gm, "")
|
|
27
|
+
.replace(/<[A-Z][A-Za-z]*(?:\s[^>]*)?\/?>/g, "")
|
|
28
|
+
.replace(/<\/[A-Z][A-Za-z]*>/g, "")
|
|
29
|
+
.replace(/\n{3,}/g, "\n\n");
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
export async function collectDocFiles(docsDir, include, exclude) {
|
|
33
|
+
const files = await fg(include, {
|
|
34
|
+
cwd: docsDir,
|
|
35
|
+
ignore: exclude,
|
|
36
|
+
absolute: false,
|
|
37
|
+
});
|
|
38
|
+
|
|
39
|
+
const docs = [];
|
|
40
|
+
for (const file of files.sort()) {
|
|
41
|
+
const fullPath = path.join(docsDir, file);
|
|
42
|
+
try {
|
|
43
|
+
const rawContent = await fs.readFile(fullPath, "utf8");
|
|
44
|
+
const stat = await fs.stat(fullPath);
|
|
45
|
+
const pathWithoutExt = file.replace(/\.(mdx?|md)$/, "");
|
|
46
|
+
const doc = parseDocFile(rawContent, pathWithoutExt);
|
|
47
|
+
docs.push({ ...doc, lastModified: stat.mtime.toISOString() });
|
|
48
|
+
} catch (err) {
|
|
49
|
+
console.error(`Warning: skipping ${file}: ${err.message}`);
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
return docs;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
export function createSearchIndex() {
|
|
57
|
+
return new Document({
|
|
58
|
+
tokenize: "forward",
|
|
59
|
+
document: {
|
|
60
|
+
id: "path",
|
|
61
|
+
index: [
|
|
62
|
+
{ field: "title", resolution: 9 },
|
|
63
|
+
{ field: "description", resolution: 7 },
|
|
64
|
+
{ field: "body", resolution: 5 },
|
|
65
|
+
],
|
|
66
|
+
store: ["title", "description", "section"],
|
|
67
|
+
},
|
|
68
|
+
});
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
export async function buildIndex(docs, outputDir) {
|
|
72
|
+
const index = createSearchIndex();
|
|
73
|
+
|
|
74
|
+
const documentStore = {};
|
|
75
|
+
for (const doc of docs) {
|
|
76
|
+
index.add(doc);
|
|
77
|
+
documentStore[doc.path] = {
|
|
78
|
+
title: doc.title,
|
|
79
|
+
description: doc.description,
|
|
80
|
+
body: doc.body,
|
|
81
|
+
section: doc.section,
|
|
82
|
+
lastModified: doc.lastModified,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
await fs.mkdir(outputDir, { recursive: true });
|
|
87
|
+
await fs.writeFile(
|
|
88
|
+
path.join(outputDir, "documents.json"),
|
|
89
|
+
JSON.stringify(documentStore, null, 2),
|
|
90
|
+
"utf8"
|
|
91
|
+
);
|
|
92
|
+
|
|
93
|
+
const indexDir = path.join(outputDir, "search-index");
|
|
94
|
+
await fs.mkdir(indexDir, { recursive: true });
|
|
95
|
+
await index.export(async (key, data) => {
|
|
96
|
+
if (data !== undefined && data !== null) {
|
|
97
|
+
await fs.writeFile(
|
|
98
|
+
path.join(indexDir, key.toString()),
|
|
99
|
+
typeof data === "string" ? data : JSON.stringify(data),
|
|
100
|
+
"utf8"
|
|
101
|
+
);
|
|
102
|
+
}
|
|
103
|
+
});
|
|
104
|
+
|
|
105
|
+
return { documentCount: docs.length };
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
export async function loadIndex(dataDir) {
|
|
109
|
+
const documentsPath = path.join(dataDir, "documents.json");
|
|
110
|
+
const documents = JSON.parse(await fs.readFile(documentsPath, "utf8"));
|
|
111
|
+
|
|
112
|
+
const index = createSearchIndex();
|
|
113
|
+
const indexDir = path.join(dataDir, "search-index");
|
|
114
|
+
const files = await fs.readdir(indexDir);
|
|
115
|
+
for (const file of files) {
|
|
116
|
+
const data = await fs.readFile(path.join(indexDir, file), "utf8");
|
|
117
|
+
index.import(file, data);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return { index, documents };
|
|
121
|
+
}
|
package/src/server.js
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
import { McpServer } from "@modelcontextprotocol/sdk/server/mcp.js";
|
|
2
|
+
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
|
|
3
|
+
import { z } from "zod";
|
|
4
|
+
import { isIndexStale } from "./freshness.js";
|
|
5
|
+
import { collectDocFiles, buildIndex, loadIndex } from "./indexer.js";
|
|
6
|
+
import { searchDocs } from "./tools/search-docs.js";
|
|
7
|
+
import { getPage } from "./tools/get-page.js";
|
|
8
|
+
import { listSections } from "./tools/list-sections.js";
|
|
9
|
+
|
|
10
|
+
async function ensureIndex(config) {
|
|
11
|
+
const stale = await isIndexStale(config);
|
|
12
|
+
|
|
13
|
+
if (stale) {
|
|
14
|
+
console.error("Index is stale or missing, rebuilding...");
|
|
15
|
+
const docs = await collectDocFiles(config.docs, config.include, config.exclude);
|
|
16
|
+
await buildIndex(docs, config.dataDir);
|
|
17
|
+
console.error(`Rebuilt index: ${docs.length} documents`);
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
return loadIndex(config.dataDir);
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
export async function startServer(config) {
|
|
24
|
+
const { index, documents } = await ensureIndex(config);
|
|
25
|
+
|
|
26
|
+
const server = new McpServer({
|
|
27
|
+
name: config.name,
|
|
28
|
+
version: "1.0.0",
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
server.registerTool(
|
|
32
|
+
"search_docs",
|
|
33
|
+
{
|
|
34
|
+
description:
|
|
35
|
+
"Full-text search across documentation. Returns ranked results with excerpts.",
|
|
36
|
+
inputSchema: {
|
|
37
|
+
query: z.string().describe("The search query"),
|
|
38
|
+
limit: z
|
|
39
|
+
.number()
|
|
40
|
+
.optional()
|
|
41
|
+
.default(5)
|
|
42
|
+
.describe("Maximum number of results to return (default: 5)"),
|
|
43
|
+
},
|
|
44
|
+
},
|
|
45
|
+
async ({ query, limit }) => {
|
|
46
|
+
const results = searchDocs(index, documents, query, limit);
|
|
47
|
+
return {
|
|
48
|
+
content: [{ type: "text", text: JSON.stringify(results, null, 2) }],
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
);
|
|
52
|
+
|
|
53
|
+
server.registerTool(
|
|
54
|
+
"get_page",
|
|
55
|
+
{
|
|
56
|
+
description:
|
|
57
|
+
"Retrieve the full content of a specific documentation page by its path.",
|
|
58
|
+
inputSchema: {
|
|
59
|
+
path: z
|
|
60
|
+
.string()
|
|
61
|
+
.describe("Page path relative to docs root (e.g. 'backend/payments')"),
|
|
62
|
+
},
|
|
63
|
+
},
|
|
64
|
+
async ({ path: pagePath }) => {
|
|
65
|
+
const result = getPage(documents, pagePath);
|
|
66
|
+
return {
|
|
67
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
68
|
+
};
|
|
69
|
+
}
|
|
70
|
+
);
|
|
71
|
+
|
|
72
|
+
server.registerTool(
|
|
73
|
+
"list_sections",
|
|
74
|
+
{
|
|
75
|
+
description:
|
|
76
|
+
"List all documentation sections and their pages.",
|
|
77
|
+
inputSchema: {},
|
|
78
|
+
},
|
|
79
|
+
async () => {
|
|
80
|
+
const result = listSections(documents);
|
|
81
|
+
return {
|
|
82
|
+
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
);
|
|
86
|
+
|
|
87
|
+
const transport = new StdioServerTransport();
|
|
88
|
+
await server.connect(transport);
|
|
89
|
+
}
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
export function getPage(documents, pagePath) {
|
|
2
|
+
const normalizedPath = pagePath.replace(/\/+$/, "");
|
|
3
|
+
const doc = documents[normalizedPath];
|
|
4
|
+
|
|
5
|
+
if (!doc) {
|
|
6
|
+
return {
|
|
7
|
+
error: `Page '${normalizedPath}' not found. Use list_sections to see available pages.`,
|
|
8
|
+
};
|
|
9
|
+
}
|
|
10
|
+
|
|
11
|
+
return {
|
|
12
|
+
path: normalizedPath,
|
|
13
|
+
title: doc.title,
|
|
14
|
+
description: doc.description,
|
|
15
|
+
body: doc.body,
|
|
16
|
+
section: doc.section,
|
|
17
|
+
lastModified: doc.lastModified,
|
|
18
|
+
};
|
|
19
|
+
}
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
export function listSections(documents) {
|
|
2
|
+
const sectionMap = new Map();
|
|
3
|
+
|
|
4
|
+
for (const [docPath, doc] of Object.entries(documents)) {
|
|
5
|
+
const section = doc.section || "";
|
|
6
|
+
if (!sectionMap.has(section)) {
|
|
7
|
+
sectionMap.set(section, []);
|
|
8
|
+
}
|
|
9
|
+
sectionMap.get(section).push({ path: docPath, title: doc.title });
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
for (const pages of sectionMap.values()) {
|
|
13
|
+
pages.sort((a, b) => a.path.localeCompare(b.path));
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
const sortedSections = [...sectionMap.entries()]
|
|
17
|
+
.sort(([a], [b]) => a.localeCompare(b))
|
|
18
|
+
.map(([name, pages]) => ({ name, pages }));
|
|
19
|
+
|
|
20
|
+
const totalPages = Object.keys(documents).length;
|
|
21
|
+
|
|
22
|
+
return { sections: sortedSections, totalPages };
|
|
23
|
+
}
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
const EXCERPT_LENGTH = 200;
|
|
2
|
+
|
|
3
|
+
export function generateExcerpt(body, query) {
|
|
4
|
+
const terms = query.toLowerCase().split(/\s+/);
|
|
5
|
+
const lowerBody = body.toLowerCase();
|
|
6
|
+
|
|
7
|
+
let firstIndex = -1;
|
|
8
|
+
for (const term of terms) {
|
|
9
|
+
const idx = lowerBody.indexOf(term);
|
|
10
|
+
if (idx !== -1 && (firstIndex === -1 || idx < firstIndex)) {
|
|
11
|
+
firstIndex = idx;
|
|
12
|
+
}
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
if (firstIndex === -1) {
|
|
16
|
+
return body.slice(0, EXCERPT_LENGTH).trim();
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
const start = Math.max(0, firstIndex - 80);
|
|
20
|
+
const end = Math.min(body.length, firstIndex + EXCERPT_LENGTH - 80);
|
|
21
|
+
const excerpt = body.slice(start, end).trim();
|
|
22
|
+
|
|
23
|
+
return (start > 0 ? "..." : "") + excerpt + (end < body.length ? "..." : "");
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
export function searchDocs(index, documents, query, limit) {
|
|
27
|
+
const rawResults = index.search(query, { limit, enrich: true });
|
|
28
|
+
|
|
29
|
+
const seen = new Set();
|
|
30
|
+
const results = [];
|
|
31
|
+
|
|
32
|
+
for (const fieldResult of rawResults) {
|
|
33
|
+
for (const item of fieldResult.result) {
|
|
34
|
+
const docPath = typeof item === "object" ? item.id : item;
|
|
35
|
+
if (seen.has(docPath)) continue;
|
|
36
|
+
seen.add(docPath);
|
|
37
|
+
|
|
38
|
+
const doc = documents[docPath];
|
|
39
|
+
if (!doc) continue;
|
|
40
|
+
|
|
41
|
+
results.push({
|
|
42
|
+
path: docPath,
|
|
43
|
+
title: doc.title,
|
|
44
|
+
excerpt: generateExcerpt(doc.body, query),
|
|
45
|
+
});
|
|
46
|
+
|
|
47
|
+
if (results.length >= limit) break;
|
|
48
|
+
}
|
|
49
|
+
if (results.length >= limit) break;
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
return results;
|
|
53
|
+
}
|