nuxt-ai-ready 0.1.5 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. package/README.md +2 -2
  2. package/dist/module.d.mts +0 -1
  3. package/dist/module.json +1 -1
  4. package/dist/module.mjs +147 -72
  5. package/dist/runtime/server/mcp/resources/pages-chunks.d.ts +17 -0
  6. package/dist/runtime/server/mcp/resources/pages-chunks.js +22 -0
  7. package/dist/runtime/server/mcp/resources/pages.d.ts +16 -1
  8. package/dist/runtime/server/mcp/resources/pages.js +20 -21
  9. package/dist/runtime/server/mcp/tools/list-pages.d.ts +85 -1
  10. package/dist/runtime/server/mcp/tools/list-pages.js +16 -77
  11. package/dist/runtime/server/mcp/utils.d.ts +3 -0
  12. package/dist/runtime/server/mcp/utils.js +7 -0
  13. package/dist/runtime/server/middleware/mdream.js +3 -48
  14. package/dist/runtime/server/utils.d.ts +7 -0
  15. package/dist/runtime/server/utils.js +50 -0
  16. package/dist/runtime/types.d.ts +46 -15
  17. package/package.json +3 -2
  18. package/dist/runtime/server/mcp/prompts/explain-concept.d.ts +0 -2
  19. package/dist/runtime/server/mcp/prompts/explain-concept.js +0 -63
  20. package/dist/runtime/server/mcp/prompts/find-information.d.ts +0 -2
  21. package/dist/runtime/server/mcp/prompts/find-information.js +0 -58
  22. package/dist/runtime/server/mcp/prompts/search-content.d.ts +0 -2
  23. package/dist/runtime/server/mcp/prompts/search-content.js +0 -59
  24. package/dist/runtime/server/mcp/resources/all-content.d.ts +0 -2
  25. package/dist/runtime/server/mcp/resources/all-content.js +0 -14
  26. package/dist/runtime/server/mcp/tools/get-page.d.ts +0 -2
  27. package/dist/runtime/server/mcp/tools/get-page.js +0 -43
  28. package/dist/runtime/server/utils/db.d.ts +0 -8
  29. package/dist/runtime/server/utils/db.js +0 -48
package/README.md CHANGED
@@ -21,11 +21,11 @@ Nuxt AI Ready converts your indexable pages into clean markdown that AI systems
21
21
 
22
22
  ## Features
23
23
 
24
- - 📄 **llms.txt Generation**: Auto-generates `llms.txt` and `llms-full.txt` at build time
24
+ - 📄 **LLM Optimized Output**: Generate `llms.txt` as well as `llms.toon` (tabular, token efficient) artifacts
25
25
  - 🚀 **On-Demand Markdown**: Any route available as `.md` (e.g., `/about` → `/about.md`)
26
26
  - 🤖 **Smart Bot Detection**: Serves markdown to AI crawlers automatically
27
27
  - 📡 **Content Signals**: Help AI systems understand how to use your pages
28
- - 📦 **RAG-Ready Output**: Chunked content for semantic search and AI chat pipelines
28
+ - 📦 **Bulk Chunk Export**: Exported token optimized chunks ready for RAG and semantic search
29
29
  - ⚡ **MCP Integration**: Let AI agents query your site directly
30
30
 
31
31
  ## Installation
package/dist/module.d.mts CHANGED
@@ -22,7 +22,6 @@ interface ModuleHooks {
22
22
  }) => void | Promise<void>;
23
23
  }
24
24
  interface ModulePublicRuntimeConfig {
25
- bulkRoute: string | false;
26
25
  debug: boolean;
27
26
  version: string;
28
27
  mdreamOptions: ModuleOptions['mdreamOptions'];
package/dist/module.json CHANGED
@@ -4,7 +4,7 @@
4
4
  "nuxt": ">=4.0.0"
5
5
  },
6
6
  "configKey": "aiReady",
7
- "version": "0.1.5",
7
+ "version": "0.2.0",
8
8
  "builder": {
9
9
  "@nuxt/module-builder": "1.0.2",
10
10
  "unbuild": "3.6.1"
package/dist/module.mjs CHANGED
@@ -1,29 +1,46 @@
1
1
  import { useLogger, useNuxt, defineNuxtModule, createResolver, addTypeTemplate, hasNuxtModule, addServerHandler, addPlugin } from '@nuxt/kit';
2
2
  import defu from 'defu';
3
3
  import { useSiteConfig, installNuxtSiteConfig, withSiteUrl } from 'nuxt-site-config/kit';
4
- import { relative as relative$1 } from 'pathe';
4
+ import { relative } from 'pathe';
5
5
  import { readPackageJSON } from 'pkg-types';
6
6
  import { createHash } from 'node:crypto';
7
7
  import { mkdirSync, createWriteStream } from 'node:fs';
8
- import { stat } from 'node:fs/promises';
9
- import { join, dirname, relative } from 'node:path';
8
+ import { stat, open } from 'node:fs/promises';
9
+ import { join, dirname } from 'node:path';
10
+ import { encodeLines } from '@toon-format/toon';
10
11
  import { createLlmsTxtStream } from 'mdream/llms-txt';
11
12
 
12
13
  const logger = useLogger("nuxt-ai-ready");
13
14
 
14
15
  function generateVectorId(route, chunkIdx) {
15
- const hash = createHash("sha256").update(route).digest("hex").substring(0, 48);
16
+ const hash = createHash("sha256").update(route).digest("hex").substring(0, 8);
16
17
  return `${hash}-${chunkIdx}`;
17
18
  }
18
- function setupPrerenderHandler() {
19
+ async function updateFirstLine(filePath, newFirstLine) {
20
+ const fh = await open(filePath, "r+");
21
+ try {
22
+ const buffer = Buffer.alloc(1024);
23
+ await fh.read(buffer, 0, 1024, 0);
24
+ const content = buffer.toString("utf-8");
25
+ const firstLineEnd = content.indexOf("\n");
26
+ const oldFirstLine = content.substring(0, firstLineEnd);
27
+ const paddedLine = newFirstLine.padEnd(oldFirstLine.length, " ");
28
+ await fh.write(paddedLine, 0, "utf-8");
29
+ } finally {
30
+ await fh.close();
31
+ }
32
+ }
33
+ function setupPrerenderHandler(llmsTxtConfig) {
19
34
  const nuxt = useNuxt();
20
35
  nuxt.hooks.hook("nitro:init", async (nitro) => {
21
36
  let writer = null;
22
- let bulkStream = null;
23
- let bulkStreamEntries = 0;
37
+ let chunksStream = null;
38
+ let pagesStream = null;
39
+ let chunksProcessed = 0;
24
40
  let pageCount = 0;
25
41
  const startTime = Date.now();
26
- const bulkPath = join(nitro.options.output.publicDir, "content.jsonl");
42
+ const pagesChunksPath = join(nitro.options.output.publicDir, "llms-full.toon");
43
+ const pagesPath = join(nitro.options.output.publicDir, "llms.toon");
27
44
  nitro.hooks.hook("prerender:generate", async (route) => {
28
45
  if (!route.fileName?.endsWith(".md")) {
29
46
  return;
@@ -39,19 +56,21 @@ function setupPrerenderHandler() {
39
56
  description: siteConfig.description,
40
57
  origin: siteConfig.url,
41
58
  generateFull: true,
42
- outputDir: nitro.options.output.publicDir
59
+ outputDir: nitro.options.output.publicDir,
60
+ sections: llmsTxtConfig.sections,
61
+ notes: llmsTxtConfig.notes
43
62
  });
44
63
  writer = stream.getWriter();
45
- }
46
- if (!bulkStream) {
47
- mkdirSync(dirname(bulkPath), { recursive: true });
48
- bulkStream = createWriteStream(bulkPath);
49
- logger.info(`Bulk JSONL stream created at ${relative(nuxt.options.rootDir, bulkPath)}`);
64
+ mkdirSync(dirname(pagesChunksPath), { recursive: true });
65
+ mkdirSync(dirname(pagesPath), { recursive: true });
66
+ chunksStream = createWriteStream(pagesChunksPath, "utf-8");
67
+ chunksStream.write("pageChunks[999999]{id,route,content}:\n");
68
+ pagesStream = createWriteStream(pagesPath, "utf-8");
69
+ pagesStream.write("pages[999999]{route,title,description,headings,chunkIds}:\n");
50
70
  }
51
71
  const { chunks, title, description, headings } = JSON.parse(route.contents || "{}");
52
72
  const markdown = chunks.map((c) => c.content).join("\n\n");
53
73
  await writer.write({
54
- filePath: route.fileName,
55
74
  url: pageRoute,
56
75
  title,
57
76
  content: markdown,
@@ -62,32 +81,55 @@ function setupPrerenderHandler() {
62
81
  });
63
82
  pageCount++;
64
83
  logger.debug(`Processing ${chunks.length} chunks for route: ${pageRoute}`);
84
+ const chunkIds = [];
65
85
  for (let idx = 0; idx < chunks.length; idx++) {
66
86
  const chunk = chunks[idx];
67
87
  if (!chunk)
68
88
  continue;
89
+ const chunkId = generateVectorId(pageRoute, idx);
90
+ chunkIds.push(chunkId);
69
91
  const bulkChunk = {
70
- id: generateVectorId(pageRoute, idx),
92
+ id: chunkId,
71
93
  route: pageRoute,
72
- chunkIndex: idx,
73
- content: chunk.content,
74
- headers: chunk.metadata?.headers,
75
- loc: chunk.metadata?.loc,
76
- title,
77
- description
94
+ content: chunk.content
78
95
  };
79
96
  await nuxt.hooks.callHook("ai-ready:chunk", {
80
97
  chunk: bulkChunk,
81
98
  route: pageRoute,
82
99
  title,
83
100
  description,
84
- headings
101
+ headings: Object.entries(headings).flatMap(
102
+ ([tag, texts]) => texts.map((text) => ({ [tag]: text }))
103
+ )
85
104
  });
86
- bulkStream.write(`${JSON.stringify(bulkChunk)}
105
+ if (chunksStream) {
106
+ const lines = Array.from(encodeLines({ pageChunks: [bulkChunk] }));
107
+ if (lines[1]) {
108
+ chunksStream.write(`${lines[1]}
87
109
  `);
88
- bulkStreamEntries++;
110
+ }
111
+ }
112
+ chunksProcessed++;
89
113
  }
90
114
  logger.debug(`Completed ${chunks.length} chunks for ${pageRoute}`);
115
+ const pageDoc = {
116
+ route: pageRoute,
117
+ title,
118
+ description,
119
+ // Convert headings object to readable string format (h1:Title|h2:Subtitle,...)
120
+ headings: headings && Object.keys(headings).length ? Object.entries(headings).flatMap(
121
+ ([tag, texts]) => texts.map((text) => `${tag}:${text}`)
122
+ ).join("|") : "",
123
+ // Join chunkIds array to comma-separated string
124
+ chunkIds: chunkIds.join(",")
125
+ };
126
+ if (pagesStream) {
127
+ const lines = Array.from(encodeLines({ pages: [pageDoc] }));
128
+ if (lines[1]) {
129
+ pagesStream.write(`${lines[1]}
130
+ `);
131
+ }
132
+ }
91
133
  route.contents = markdown;
92
134
  });
93
135
  nitro.hooks.hook("prerender:done", async () => {
@@ -95,10 +137,24 @@ function setupPrerenderHandler() {
95
137
  return;
96
138
  }
97
139
  await writer.close();
98
- if (bulkStream) {
99
- bulkStream.end();
100
- logger.info(`Bulk JSONL stream closed with ${bulkStreamEntries} chunks`);
140
+ if (chunksStream) {
141
+ await new Promise((resolve, reject) => {
142
+ chunksStream.on("error", reject);
143
+ chunksStream.on("finish", resolve);
144
+ chunksStream.end();
145
+ });
101
146
  }
147
+ if (pagesStream) {
148
+ await new Promise((resolve, reject) => {
149
+ pagesStream.on("error", reject);
150
+ pagesStream.on("finish", resolve);
151
+ pagesStream.end();
152
+ });
153
+ }
154
+ await updateFirstLine(pagesChunksPath, `pageChunks[${chunksProcessed}]{id,route,content}:`);
155
+ await updateFirstLine(pagesPath, `pages[${pageCount}]{route,title,description,headings,chunkIds}:`);
156
+ logger.info(`Wrote llms-full.toon with ${chunksProcessed} chunks`);
157
+ logger.info(`Wrote llms.toon with ${pageCount} pages`);
102
158
  const llmsTxtPath = join(nitro.options.output.publicDir, "llms.txt");
103
159
  const llmsFullTxtPath = join(nitro.options.output.publicDir, "llms-full.txt");
104
160
  const files = [
@@ -111,26 +167,31 @@ function setupPrerenderHandler() {
111
167
  route: "/llms-full.txt",
112
168
  fileName: llmsFullTxtPath,
113
169
  generateTimeMS: 0
170
+ },
171
+ {
172
+ route: "/llms-full.toon",
173
+ fileName: pagesChunksPath,
174
+ generateTimeMS: 0
175
+ },
176
+ {
177
+ route: "/llms.toon",
178
+ fileName: pagesPath,
179
+ generateTimeMS: 0
114
180
  }
115
181
  ];
116
- if (bulkStream) {
117
- files.push({
118
- route: "/content.jsonl",
119
- fileName: bulkPath,
120
- generateTimeMS: 0
121
- });
122
- }
123
- const [llmsStats, llmsFullStats, bulkStats] = await Promise.all([
182
+ const [llmsStats, llmsFullStats, pagesChunksStats, pagesStats] = await Promise.all([
124
183
  stat(llmsTxtPath),
125
184
  stat(llmsFullTxtPath),
126
- bulkStream ? stat(bulkPath) : Promise.resolve(null)
185
+ stat(pagesChunksPath),
186
+ stat(pagesPath)
127
187
  ]);
128
188
  nitro._prerenderedRoutes.push(...files);
129
189
  const elapsed = Date.now() - startTime;
130
190
  const llmsKb = (llmsStats.size / 1024).toFixed(2);
131
191
  const llmsFullKb = (llmsFullStats.size / 1024).toFixed(2);
132
- const bulkKb = bulkStats ? (bulkStats.size / 1024).toFixed(2) : "0";
133
- logger.info(`Generated llms.txt (${llmsKb}kb), llms-full.txt (${llmsFullKb}kb), and content.jsonl (${bulkKb}kb) from ${pageCount} pages (${bulkStreamEntries} chunks) in ${elapsed}ms`);
192
+ const pagesChunksKb = (pagesChunksStats.size / 1024).toFixed(2);
193
+ const pagesKb = (pagesStats.size / 1024).toFixed(2);
194
+ logger.info(`Generated llms.txt (${llmsKb}kb), llms-full.txt (${llmsFullKb}kb), llms-full.toon (${pagesChunksKb}kb), and llms.toon (${pagesKb}kb) from ${pageCount} pages (${chunksProcessed} chunks) in ${elapsed}ms`);
134
195
  });
135
196
  });
136
197
  }
@@ -159,7 +220,6 @@ const module = defineNuxtModule({
159
220
  return {
160
221
  enabled: true,
161
222
  debug: false,
162
- bulkRoute: "/content.jsonl",
163
223
  mdreamOptions: {
164
224
  preset: "minimal"
165
225
  },
@@ -200,7 +260,7 @@ const module = defineNuxtModule({
200
260
  addTypeTemplate({
201
261
  filename: "module/nuxt-ai-ready.d.ts",
202
262
  getContents: (data) => {
203
- const typesPath = relative$1(resolve(data.nuxt.options.rootDir, data.nuxt.options.buildDir, "module"), resolve("runtime/types"));
263
+ const typesPath = relative(resolve(data.nuxt.options.rootDir, data.nuxt.options.buildDir, "module"), resolve("runtime/types"));
204
264
  const nitroTypes = ` interface NitroRuntimeHooks {
205
265
  'ai-ready:markdown': (context: import('${typesPath}').MarkdownContext) => void | Promise<void>
206
266
  'ai-ready:mdreamConfig': (config: import('mdream').HTMLToMarkdownOptions) => void | Promise<void>
@@ -221,23 +281,27 @@ export {}
221
281
  nitro: true
222
282
  });
223
283
  const defaultLlmsTxtSections = [];
224
- if (config.bulkRoute !== false) {
225
- const resolvedBulkRoute = withSiteUrl(config.bulkRoute);
226
- defaultLlmsTxtSections.push({
227
- title: "LLM Tools",
228
- links: [
229
- {
230
- title: "Bulk Data",
231
- href: resolvedBulkRoute,
232
- description: `\`\`\`bash
233
- curl "${resolvedBulkRoute}"
234
- \`\`\`
284
+ const pagesRoute = withSiteUrl("llms.toon");
285
+ const pagesChunksRoute = withSiteUrl("llms-full.toon");
286
+ defaultLlmsTxtSections.push({
287
+ title: "LLM Resources",
288
+ links: [
289
+ {
290
+ title: "Pages Minimal",
291
+ href: pagesRoute,
292
+ description: `Page-level metadata in TOON format (token-efficient JSON encoding, see https://toonformat.dev). Contains: route, title, description, headings, chunkIds. Use with llms-full.toon for complete content. Fields: { route, title, description, headings, chunkIds }.
235
293
 
236
- Returns JSONL (newline-delimited JSON) with all indexed content.`
237
- }
238
- ]
239
- });
240
- }
294
+ <code lang="bash">curl "${pagesRoute}"</code>`
295
+ },
296
+ {
297
+ title: "Page Chunks",
298
+ href: pagesChunksRoute,
299
+ description: `Individual content chunks in TOON format for RAG/embeddings. Contains: id, route, content. Fields: { id, route, content }. Join with llms.toon using route to get title/description/headings metadata. Chunk index inferred from id suffix (e.g., "hash-0", "hash-1").
300
+
301
+ <code lang="bash">curl "${pagesChunksRoute}"</code>`
302
+ }
303
+ ]
304
+ });
241
305
  const hasMCP = hasNuxtModule("@nuxtjs/mcp-toolkit");
242
306
  if (hasMCP) {
243
307
  nuxt.hook("mcp:definitions:paths", (paths) => {
@@ -245,15 +309,25 @@ Returns JSONL (newline-delimited JSON) with all indexed content.`
245
309
  paths.tools = paths.tools || [];
246
310
  paths.resources = paths.resources || [];
247
311
  paths.prompts = paths.prompts || [];
248
- paths.tools.push(`${mcpRuntimeDir}/tools`);
249
- paths.resources.push(`${mcpRuntimeDir}/resources`);
250
- paths.prompts.push(`${mcpRuntimeDir}/prompts`);
312
+ const mcpConfig = config.mcp || {};
313
+ const toolsConfig = mcpConfig.tools ?? {};
314
+ const resourcesConfig = mcpConfig.resources ?? {};
315
+ if (toolsConfig.listPages !== false) {
316
+ paths.tools.push(`${mcpRuntimeDir}/tools/list-pages.ts`);
317
+ }
318
+ if (resourcesConfig.pages !== false) {
319
+ paths.resources.push(`${mcpRuntimeDir}/resources/pages.ts`);
320
+ }
321
+ if (resourcesConfig.pagesChunks !== false) {
322
+ paths.resources.push(`${mcpRuntimeDir}/resources/pages-chunks.ts`);
323
+ }
251
324
  });
252
325
  const mcpLink = {
253
326
  title: "MCP",
254
- href: withSiteUrl(nuxt.options.mcp?.route || "/mcp")
327
+ href: withSiteUrl(nuxt.options.mcp?.route || "/mcp"),
328
+ description: "Model Context Protocol server endpoint for AI agent integration."
255
329
  };
256
- if (config.bulkRoute !== false && defaultLlmsTxtSections[0]) {
330
+ if (defaultLlmsTxtSections[0]) {
257
331
  defaultLlmsTxtSections[0].links.push(mcpLink);
258
332
  } else {
259
333
  defaultLlmsTxtSections.push({
@@ -269,10 +343,16 @@ Returns JSONL (newline-delimited JSON) with all indexed content.`
269
343
  ],
270
344
  notes: config.llmsTxt.notes
271
345
  } : { sections: defaultLlmsTxtSections };
346
+ const llmsTxtPayload = {
347
+ sections: mergedLlmsTxt.sections || [],
348
+ notes: typeof mergedLlmsTxt.notes === "string" ? [mergedLlmsTxt.notes] : mergedLlmsTxt.notes || []
349
+ };
350
+ await nuxt.callHook("ai-ready:llms-txt", llmsTxtPayload);
351
+ mergedLlmsTxt.sections = llmsTxtPayload.sections;
352
+ mergedLlmsTxt.notes = llmsTxtPayload.notes.length > 0 ? llmsTxtPayload.notes : void 0;
272
353
  nuxt.options.runtimeConfig["nuxt-ai-ready"] = {
273
354
  version: version || "0.0.0",
274
355
  debug: config.debug || false,
275
- bulkRoute: config.bulkRoute,
276
356
  mdreamOptions: config.mdreamOptions || {},
277
357
  markdownCacheHeaders: defu(config.markdownCacheHeaders, {
278
358
  maxAge: 3600,
@@ -292,16 +372,11 @@ Returns JSONL (newline-delimited JSON) with all indexed content.`
292
372
  }
293
373
  const isStatic = nuxt.options.nitro.static || nuxt.options._generate || false;
294
374
  if (isStatic || nuxt.options.nitro.prerender?.routes?.length) {
295
- setupPrerenderHandler();
296
- }
297
- if (config.bulkRoute !== false) {
298
- nuxt.options.nitro.routeRules = nuxt.options.nitro.routeRules || {};
299
- nuxt.options.nitro.routeRules[config.bulkRoute] = {
300
- headers: {
301
- "Content-Type": "application/x-ndjson; charset=utf-8"
302
- }
303
- };
375
+ setupPrerenderHandler(mergedLlmsTxt);
304
376
  }
377
+ nuxt.options.nitro.routeRules = nuxt.options.nitro.routeRules || {};
378
+ nuxt.options.nitro.routeRules["/llms.toon"] = { headers: { "Content-Type": "text/toon; charset=utf-8" } };
379
+ nuxt.options.nitro.routeRules["/llms-full.toon"] = { headers: { "Content-Type": "text/toon; charset=utf-8" } };
305
380
  }
306
381
  });
307
382
 
@@ -0,0 +1,17 @@
1
+ declare const _default: {
2
+ uri: string;
3
+ name: string;
4
+ description: string;
5
+ metadata: {
6
+ mimeType: string;
7
+ };
8
+ cache: "1h";
9
+ handler(uri: URL): Promise<{
10
+ contents: {
11
+ uri: string;
12
+ mimeType: string;
13
+ text: string;
14
+ }[];
15
+ }>;
16
+ };
17
+ export default _default;
@@ -0,0 +1,22 @@
1
+ export default {
2
+ uri: "resource://nuxt-ai-ready/pages-chunks",
3
+ name: "All Page Chunks",
4
+ description: "Chunk-level content (id, route, content) in TOON format for RAG/embeddings. Join with pages resource using id field - match chunk.id with page.chunkIds[] to get title, description, headings. TOON is token-efficient JSON encoding (see https://toonformat.dev)",
5
+ metadata: {
6
+ mimeType: "text/plain"
7
+ },
8
+ cache: "1h",
9
+ async handler(uri) {
10
+ const response = await fetch("/llms-full.toon");
11
+ if (!response.ok)
12
+ throw new Error(`Failed to fetch chunks: ${response.statusText}`);
13
+ const text = await response.text();
14
+ return {
15
+ contents: [{
16
+ uri: uri.toString(),
17
+ mimeType: "text/plain",
18
+ text
19
+ }]
20
+ };
21
+ }
22
+ };
@@ -1,2 +1,17 @@
1
- declare const _default: any;
1
+ declare const _default: {
2
+ uri: string;
3
+ name: string;
4
+ description: string;
5
+ metadata: {
6
+ mimeType: string;
7
+ };
8
+ cache: "1h";
9
+ handler(uri: URL): Promise<{
10
+ contents: {
11
+ uri: string;
12
+ mimeType: string;
13
+ text: string;
14
+ }[];
15
+ }>;
16
+ };
2
17
  export default _default;
@@ -1,23 +1,22 @@
1
- import { defineMcpResource, jsonResult } from "#imports";
2
- import { streamBulkDocuments } from "../../utils/db.js";
3
- export default defineMcpResource({
4
- uri: "pages://list",
1
+ export default {
2
+ uri: "resource://nuxt-ai-ready/pages",
5
3
  name: "All Pages",
6
- description: "Complete list of all indexed pages with basic metadata (route, title, description)",
7
- mimeType: "application/json",
8
- handler: async () => {
9
- const pages = [];
10
- for await (const doc of streamBulkDocuments()) {
11
- pages.push({
12
- route: doc.route,
13
- title: doc.title,
14
- description: doc.description,
15
- id: doc.id
16
- });
17
- }
18
- return jsonResult({
19
- total: pages.length,
20
- pages
21
- });
4
+ description: "Page-level metadata (route, title, description, markdown, headings, chunkIds) in TOON format. Each page includes chunkIds[] array to join with pages-chunks resource for chunk-level content. TOON is token-efficient JSON encoding (see https://toonformat.dev)",
5
+ metadata: {
6
+ mimeType: "text/plain"
7
+ },
8
+ cache: "1h",
9
+ async handler(uri) {
10
+ const response = await fetch("/llms.toon");
11
+ if (!response.ok)
12
+ throw new Error(`Failed to fetch pages: ${response.statusText}`);
13
+ const text = await response.text();
14
+ return {
15
+ contents: [{
16
+ uri: uri.toString(),
17
+ mimeType: "text/plain",
18
+ text
19
+ }]
20
+ };
22
21
  }
23
- });
22
+ };
@@ -1,2 +1,86 @@
1
- declare const _default: any;
1
+ import { z } from 'zod';
2
+ /**
3
+ * Lists all pages by fetching and returning TOON-encoded data
4
+ * TOON (Token-Oriented Object Notation) is a compact encoding that minimizes tokens for LLM input
5
+ * See https://toonformat.dev
6
+ */
7
+ declare const _default: {
8
+ name: string;
9
+ description: string;
10
+ inputSchema: {
11
+ mode: z.ZodDefault<z.ZodEnum<{
12
+ minimal: "minimal";
13
+ chunks: "chunks";
14
+ }>>;
15
+ };
16
+ cache: "1h";
17
+ handler({ mode }: import("@modelcontextprotocol/sdk/server/zod-compat.js").ShapeOutput<Readonly<{
18
+ [k: string]: z.core.$ZodType<unknown, unknown, z.core.$ZodTypeInternals<unknown, unknown>>;
19
+ }>>): Promise<{
20
+ [x: string]: unknown;
21
+ content: ({
22
+ type: "text";
23
+ text: string;
24
+ _meta?: {
25
+ [x: string]: unknown;
26
+ } | undefined;
27
+ } | {
28
+ type: "image";
29
+ data: string;
30
+ mimeType: string;
31
+ _meta?: {
32
+ [x: string]: unknown;
33
+ } | undefined;
34
+ } | {
35
+ type: "audio";
36
+ data: string;
37
+ mimeType: string;
38
+ _meta?: {
39
+ [x: string]: unknown;
40
+ } | undefined;
41
+ } | {
42
+ uri: string;
43
+ name: string;
44
+ type: "resource_link";
45
+ description?: string | undefined;
46
+ mimeType?: string | undefined;
47
+ _meta?: {
48
+ [x: string]: unknown;
49
+ } | undefined;
50
+ icons?: {
51
+ src: string;
52
+ mimeType?: string | undefined;
53
+ sizes?: string[] | undefined;
54
+ }[] | undefined;
55
+ title?: string | undefined;
56
+ } | {
57
+ type: "resource";
58
+ resource: {
59
+ uri: string;
60
+ text: string;
61
+ mimeType?: string | undefined;
62
+ _meta?: {
63
+ [x: string]: unknown;
64
+ } | undefined;
65
+ } | {
66
+ uri: string;
67
+ blob: string;
68
+ mimeType?: string | undefined;
69
+ _meta?: {
70
+ [x: string]: unknown;
71
+ } | undefined;
72
+ };
73
+ _meta?: {
74
+ [x: string]: unknown;
75
+ } | undefined;
76
+ })[];
77
+ _meta?: {
78
+ [x: string]: unknown;
79
+ } | undefined;
80
+ structuredContent?: {
81
+ [x: string]: unknown;
82
+ } | undefined;
83
+ isError?: boolean | undefined;
84
+ }>;
85
+ };
2
86
  export default _default;
@@ -1,79 +1,18 @@
1
- import { defineMcpTool, jsonResult } from "#imports";
2
- import { streamBulkDocuments } from "../../utils/db.js";
3
- export default defineMcpTool({
1
+ import { z } from "zod";
2
+ import { toonResult } from "../utils.js";
3
+ const schema = {
4
+ mode: z.enum(["chunks", "minimal"]).default("minimal").describe("Return individual content chunks (chunks) or page-level metadata (minimal)")
5
+ };
6
+ export default {
4
7
  name: "list_pages",
5
- description: `Lists all indexed pages from the site with configurable output fields.
6
-
7
- WHEN TO USE: Use this tool when you need to DISCOVER or SEARCH for pages. Common scenarios:
8
- - "What pages are available?" - browse all pages
9
- - "Find pages about X topic" - search by title/description
10
- - "Show me the site structure" - explore content organization
11
- - "What documentation exists?" - discover available content
12
-
13
- WHEN NOT TO USE: If you already know the exact page route, use get_page directly.
14
-
15
- WORKFLOW: This tool returns page metadata (route, title, description, etc.). After finding relevant pages, use get_page to retrieve full content.
16
-
17
- FIELD OPTIONS: Control which fields to include in the output:
18
- - route: Page URL path (always included)
19
- - title: Page title
20
- - description: Page meta description
21
- - headings: Document structure (h1, h2, h3, etc.)
22
- - markdown: Full markdown content (warning: can be large, avoid unless needed)
23
- - id: Document identifier
24
- - chunkIds: Associated chunk identifiers`,
25
- parameters: {
26
- type: "object",
27
- properties: {
28
- fields: {
29
- type: "array",
30
- description: "Fields to include in output. Defaults to [route, title, description]",
31
- items: {
32
- type: "string",
33
- enum: ["route", "title", "description", "headings", "markdown", "id", "chunkIds"]
34
- },
35
- default: ["route", "title", "description"]
36
- },
37
- search: {
38
- type: "string",
39
- description: "Optional search term to filter pages by title or description"
40
- },
41
- limit: {
42
- type: "number",
43
- description: "Maximum number of pages to return",
44
- minimum: 1,
45
- maximum: 1e3,
46
- default: 100
47
- }
48
- }
49
- },
50
- // @ts-expect-error untyped
51
- handler: async ({ fields = ["route", "title", "description"], search, limit = 100 }) => {
52
- const searchLower = search?.toLowerCase();
53
- const result = [];
54
- let total = 0;
55
- let filtered = 0;
56
- for await (const doc of streamBulkDocuments()) {
57
- total++;
58
- if (searchLower) {
59
- const matches = doc.title?.toLowerCase().includes(searchLower) || doc.description?.toLowerCase().includes(searchLower) || doc.route?.toLowerCase().includes(searchLower);
60
- if (!matches)
61
- continue;
62
- }
63
- filtered++;
64
- if (result.length < limit) {
65
- const projected = { route: doc.route };
66
- fields.forEach((field) => {
67
- if (field !== "route" && field in doc)
68
- projected[field] = doc[field];
69
- });
70
- result.push(projected);
71
- }
72
- }
73
- return jsonResult({
74
- total,
75
- filtered,
76
- pages: result
77
- });
8
+ description: 'Lists all available pages in TOON format (token-efficient). Use "chunks" mode to get individual content chunks, or "minimal" for page-level metadata.',
9
+ inputSchema: schema,
10
+ cache: "1h",
11
+ async handler({ mode }) {
12
+ const response = await fetch(mode === "chunks" ? "/llms-full.toon" : "/llms.toon");
13
+ if (!response.ok)
14
+ throw new Error(`Failed to fetch pages: ${response.statusText}`);
15
+ const toon = await response.text();
16
+ return toonResult(toon);
78
17
  }
79
- });
18
+ };
@@ -0,0 +1,3 @@
1
+ import type { CallToolResult } from '@modelcontextprotocol/sdk/types.js';
2
+ export declare function jsonResult(data: any, pretty?: boolean): CallToolResult;
3
+ export declare function toonResult(toon: string): CallToolResult;
@@ -0,0 +1,7 @@
1
+ export function jsonResult(data, pretty = true) {
2
+ const text = pretty ? JSON.stringify(data, null, 2) : JSON.stringify(data);
3
+ return { content: [{ type: "text", text }] };
4
+ }
5
+ export function toonResult(toon) {
6
+ return { content: [{ type: "text", text: toon }] };
7
+ }
@@ -1,12 +1,11 @@
1
1
  import { withSiteUrl } from "#site-config/server/composables/utils";
2
2
  import { createError, defineEventHandler, getHeader, setHeader } from "h3";
3
- import { htmlToMarkdown, TagIdMap } from "mdream";
3
+ import { htmlToMarkdown } from "mdream";
4
4
  import { extractionPlugin } from "mdream/plugins";
5
5
  import { withMinimalPreset } from "mdream/preset/minimal";
6
- import { htmlToMarkdownSplitChunksStream } from "mdream/splitter";
7
6
  import { useNitroApp, useRuntimeConfig } from "nitropack/runtime";
8
- import { estimateTokenCount } from "tokenx";
9
7
  import { logger } from "../logger.js";
8
+ import { convertHtmlToMarkdownChunks } from "../utils.js";
10
9
  function shouldServeMarkdown(event) {
11
10
  const accept = getHeader(event, "accept") || "";
12
11
  const secFetchDest = getHeader(event, "sec-fetch-dest") || "";
@@ -62,50 +61,6 @@ async function convertHtmlToMarkdown(html, url, config, route, event) {
62
61
  markdown = context.markdown;
63
62
  return { markdown, title, description, headings };
64
63
  }
65
- async function convertHtmlToMarkdownChunks(html, url, config) {
66
- let title = "";
67
- let description = "";
68
- const headings = [];
69
- const extractPlugin = extractionPlugin({
70
- title(el) {
71
- title = el.textContent;
72
- },
73
- 'meta[name="description"]': (el) => {
74
- description = el.attributes.content || "";
75
- },
76
- "h1, h2, h3, h4, h5, h6": (el) => {
77
- const text = el.textContent?.trim();
78
- const level = el.name.toLowerCase();
79
- if (text)
80
- headings.push({ [level]: text });
81
- }
82
- });
83
- let options = {
84
- origin: url,
85
- ...config.mdreamOptions
86
- };
87
- if (config.mdreamOptions?.preset === "minimal") {
88
- options = withMinimalPreset(options);
89
- options.plugins = [extractPlugin, ...options.plugins || []];
90
- } else {
91
- options.plugins = [extractPlugin, ...options.plugins || []];
92
- }
93
- const chunksStream = htmlToMarkdownSplitChunksStream(html, {
94
- ...options,
95
- headersToSplitOn: [TagIdMap.h1, TagIdMap.h2, TagIdMap.h3],
96
- origin: url,
97
- chunkSize: 256,
98
- stripHeaders: false,
99
- lengthFunction(text) {
100
- return estimateTokenCount(text);
101
- }
102
- });
103
- const chunks = [];
104
- for await (const chunk of chunksStream) {
105
- chunks.push(chunk);
106
- }
107
- return { chunks, title, description, headings };
108
- }
109
64
  export default defineEventHandler(async (event) => {
110
65
  let path = event.path;
111
66
  const config = useRuntimeConfig(event)["nuxt-ai-ready"];
@@ -169,7 +124,7 @@ export default defineEventHandler(async (event) => {
169
124
  const result2 = await convertHtmlToMarkdownChunks(
170
125
  html,
171
126
  withSiteUrl(event, path),
172
- config
127
+ config.mdreamOptions
173
128
  );
174
129
  return JSON.stringify(result2);
175
130
  }
@@ -0,0 +1,7 @@
1
+ import type { ModulePublicRuntimeConfig } from '../../module.js';
2
+ export declare function convertHtmlToMarkdownChunks(html: string, url: string, mdreamOptions: ModulePublicRuntimeConfig['mdreamOptions']): Promise<{
3
+ chunks: import("mdream").MarkdownChunk[];
4
+ title: string;
5
+ description: string;
6
+ headings: Record<string, string[]>;
7
+ }>;
@@ -0,0 +1,50 @@
1
+ import { TagIdMap } from "mdream";
2
+ import { extractionPlugin } from "mdream/plugins";
3
+ import { withMinimalPreset } from "mdream/preset/minimal";
4
+ import { htmlToMarkdownSplitChunksStream } from "mdream/splitter";
5
+ import { estimateTokenCount } from "tokenx";
6
+ export async function convertHtmlToMarkdownChunks(html, url, mdreamOptions) {
7
+ let title = "";
8
+ let description = "";
9
+ const extractPlugin = extractionPlugin({
10
+ title(el) {
11
+ title = el.textContent;
12
+ },
13
+ 'meta[name="description"]': (el) => {
14
+ description = el.attributes.content || "";
15
+ }
16
+ });
17
+ let options = {
18
+ origin: url,
19
+ ...mdreamOptions
20
+ };
21
+ if (mdreamOptions?.preset === "minimal") {
22
+ options = withMinimalPreset(options);
23
+ options.plugins = [extractPlugin, ...options.plugins || []];
24
+ } else {
25
+ options.plugins = [extractPlugin, ...options.plugins || []];
26
+ }
27
+ const chunksStream = htmlToMarkdownSplitChunksStream(html, {
28
+ ...options,
29
+ headersToSplitOn: [TagIdMap.h1, TagIdMap.h2, TagIdMap.h3],
30
+ origin: url,
31
+ chunkSize: 256,
32
+ stripHeaders: false,
33
+ lengthFunction(text) {
34
+ return estimateTokenCount(text);
35
+ }
36
+ });
37
+ const chunks = [];
38
+ for await (const chunk of chunksStream) {
39
+ chunks.push(chunk);
40
+ }
41
+ return { chunks, title, description, headings: chunks.reduce((set, m) => {
42
+ Object.entries(m.metadata?.headers || {}).forEach(([k, v]) => {
43
+ if (!set[k])
44
+ set[k] = [];
45
+ if (v && !set[k].includes(v))
46
+ set[k].push(v);
47
+ });
48
+ return set;
49
+ }, {}) };
50
+ }
@@ -11,11 +11,6 @@ export interface ModuleOptions {
11
11
  * @default false
12
12
  */
13
13
  debug?: boolean;
14
- /**
15
- * Bulk data API (JSONL streaming)
16
- * @default '/_ai-ready/bulk'
17
- */
18
- bulkRoute: string | false;
19
14
  /**
20
15
  * Options to pass to mdream htmlToMarkdown function
21
16
  */
@@ -62,25 +57,61 @@ export interface ModuleOptions {
62
57
  */
63
58
  aiInput?: boolean;
64
59
  };
60
+ /**
61
+ * MCP (Model Context Protocol) configuration
62
+ * Control which tools and resources are exposed via MCP
63
+ * @default All enabled when @nuxtjs/mcp-toolkit is installed
64
+ */
65
+ mcp?: {
66
+ /**
67
+ * Enable/disable specific MCP tools
68
+ * @default All tools enabled
69
+ */
70
+ tools?: {
71
+ /** Get page by route - fetches markdown content for specific page */
72
+ listPages?: boolean;
73
+ };
74
+ /**
75
+ * Enable/disable specific MCP resources
76
+ * @default All resources enabled
77
+ */
78
+ resources?: {
79
+ /** pages://list - all pages without markdown content */
80
+ pages?: boolean;
81
+ /** pages://chunks - individual content chunks from all pages */
82
+ pagesChunks?: boolean;
83
+ };
84
+ };
65
85
  }
66
86
  /**
67
- * Individual chunk entry in bulk.jsonl (one per chunk)
68
- * Consumers can reassemble by route if needed
87
+ * Individual chunk entry in llms-full.toon (one per chunk)
88
+ * Used for RAG, embeddings, and semantic search
89
+ * Optimized for token efficiency - join with llms.toon for title/description
90
+ * Chunk index can be inferred from id suffix (e.g., "hash-0", "hash-1")
91
+ * Tabular TOON format (primitives only)
69
92
  */
70
93
  export interface BulkChunk {
71
94
  id: string;
72
95
  route: string;
73
- chunkIndex: number;
74
96
  content: string;
75
- headers?: Record<string, string>;
76
- loc?: {
77
- lines: {
78
- from: number;
79
- to: number;
80
- };
81
- };
97
+ }
98
+ /**
99
+ * Page-level entry in llms.toon (one per page)
100
+ * Used for page discovery, listing, and metadata queries
101
+ */
102
+ export interface BulkDocument {
103
+ /** Page route/path */
104
+ route: string;
105
+ /** Page title */
82
106
  title: string;
107
+ /** Page description */
83
108
  description: string;
109
+ /** Full markdown content reassembled from chunks */
110
+ markdown: string;
111
+ /** Page headings structure (e.g., [{ "h1": "Title" }, { "h2": "Subtitle" }]) */
112
+ headings: Array<Record<string, string>>;
113
+ /** All chunk IDs for this page (first ID can be used as document ID) */
114
+ chunkIds: string[];
84
115
  }
85
116
  /**
86
117
  * Hook context for markdown processing (Nitro runtime hook)
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "nuxt-ai-ready",
3
3
  "type": "module",
4
- "version": "0.1.5",
4
+ "version": "0.2.0",
5
5
  "description": "Best practice AI & LLM discoverability for Nuxt sites.",
6
6
  "author": {
7
7
  "name": "Harlan Wilton",
@@ -33,9 +33,10 @@
33
33
  ],
34
34
  "dependencies": {
35
35
  "@nuxt/kit": "4.2.1",
36
+ "@toon-format/toon": "^2.0.1",
36
37
  "consola": "^3.4.2",
37
38
  "defu": "^6.1.4",
38
- "mdream": "^0.15.0",
39
+ "mdream": "^0.15.1",
39
40
  "minimatch": "^10.1.1",
40
41
  "nuxt-site-config": "^3.2.11",
41
42
  "pathe": "^2.0.3",
@@ -1,2 +0,0 @@
1
- declare const _default: any;
2
- export default _default;
@@ -1,63 +0,0 @@
1
- import { defineMcpPrompt } from "#imports";
2
- import { streamBulkDocuments } from "../../utils/db.js";
3
- export default defineMcpPrompt({
4
- name: "explain_concept",
5
- description: "Get a detailed explanation of a concept by finding and reading relevant pages",
6
- arguments: [
7
- {
8
- name: "concept",
9
- description: "The concept or feature to explain",
10
- required: true
11
- },
12
- {
13
- name: "level",
14
- description: "Explanation level: beginner, intermediate, or advanced",
15
- required: false
16
- }
17
- ],
18
- // @ts-expect-error untyped
19
- handler: async ({ concept, level = "intermediate" }) => {
20
- const searchLower = concept.toLowerCase();
21
- const seenRoutes = /* @__PURE__ */ new Set();
22
- const relevantPages = [];
23
- for await (const doc of streamBulkDocuments()) {
24
- if (seenRoutes.has(doc.route))
25
- continue;
26
- const matches = doc.title?.toLowerCase().includes(searchLower) || doc.description?.toLowerCase().includes(searchLower) || doc.route?.toLowerCase().includes(searchLower);
27
- if (matches) {
28
- seenRoutes.add(doc.route);
29
- relevantPages.push({
30
- route: doc.route,
31
- title: doc.title,
32
- description: doc.description
33
- });
34
- if (relevantPages.length >= 10)
35
- break;
36
- }
37
- }
38
- return {
39
- messages: [
40
- {
41
- role: "user",
42
- content: {
43
- type: "text",
44
- text: `Please explain "${concept}" at a ${level} level.
45
-
46
- Here are the relevant pages found: ${JSON.stringify(relevantPages, null, 2)}
47
-
48
- Please:
49
- 1. Use get_page to read the most relevant pages (top 2-3)
50
- 2. Synthesize the information to create an explanation that:
51
- - Provides clear definitions
52
- - Includes practical examples from the pages
53
- - Explains use cases
54
- - Mentions related concepts
55
- - References the specific pages used
56
-
57
- Tailor the explanation for a ${level} audience.`
58
- }
59
- }
60
- ]
61
- };
62
- }
63
- });
@@ -1,2 +0,0 @@
1
- declare const _default: any;
2
- export default _default;
@@ -1,58 +0,0 @@
1
- import { defineMcpPrompt } from "#imports";
2
- import { streamBulkDocuments } from "../../utils/db.js";
3
- export default defineMcpPrompt({
4
- name: "find_information",
5
- description: "Find information about a specific topic by searching site pages and retrieving relevant content",
6
- arguments: [
7
- {
8
- name: "topic",
9
- description: "Topic, feature, or question to find information about",
10
- required: true
11
- },
12
- {
13
- name: "detail",
14
- description: "Level of detail needed: summary, detailed, or comprehensive",
15
- required: false
16
- }
17
- ],
18
- // @ts-expect-error untyped
19
- handler: async ({ topic, detail = "detailed" }) => {
20
- const searchLower = topic.toLowerCase();
21
- const seenRoutes = /* @__PURE__ */ new Set();
22
- const relevantPages = [];
23
- for await (const doc of streamBulkDocuments()) {
24
- if (seenRoutes.has(doc.route))
25
- continue;
26
- const matches = doc.title?.toLowerCase().includes(searchLower) || doc.description?.toLowerCase().includes(searchLower) || doc.route?.toLowerCase().includes(searchLower);
27
- if (matches) {
28
- seenRoutes.add(doc.route);
29
- relevantPages.push({
30
- route: doc.route,
31
- title: doc.title,
32
- description: doc.description
33
- });
34
- if (relevantPages.length >= 10)
35
- break;
36
- }
37
- }
38
- return {
39
- messages: [
40
- {
41
- role: "user",
42
- content: {
43
- type: "text",
44
- text: `Help me find information about: "${topic}"
45
-
46
- Here are the relevant pages found: ${JSON.stringify(relevantPages, null, 2)}
47
-
48
- Please:
49
- 1. Review the page titles and descriptions to identify the most relevant ones
50
- 2. Use get_page to retrieve full content of the top 2-3 most relevant pages
51
- 3. ${detail === "summary" ? "Provide a concise summary (2-3 paragraphs)" : detail === "comprehensive" ? "Provide a comprehensive explanation with all details and examples from the pages" : "Provide a detailed explanation covering the key points"}
52
- 4. Always cite which pages the information came from`
53
- }
54
- }
55
- ]
56
- };
57
- }
58
- });
@@ -1,2 +0,0 @@
1
- declare const _default: any;
2
- export default _default;
@@ -1,59 +0,0 @@
1
- import { defineMcpPrompt } from "#imports";
2
- import { streamBulkDocuments } from "../../utils/db.js";
3
- export default defineMcpPrompt({
4
- name: "browse_pages",
5
- description: "Browse and discover pages by topic, with results ready for exploration",
6
- arguments: [
7
- {
8
- name: "topic",
9
- description: "Topic or keyword to search for in page titles/descriptions",
10
- required: true
11
- },
12
- {
13
- name: "maxResults",
14
- description: "Maximum number of pages to retrieve",
15
- required: false
16
- }
17
- ],
18
- // @ts-expect-error untyped
19
- handler: async ({ topic, maxResults = 10 }) => {
20
- const searchLower = topic.toLowerCase();
21
- const seenRoutes = /* @__PURE__ */ new Set();
22
- const filteredPages = [];
23
- let total = 0;
24
- for await (const doc of streamBulkDocuments()) {
25
- total++;
26
- if (seenRoutes.has(doc.route))
27
- continue;
28
- const matches = doc.title?.toLowerCase().includes(searchLower) || doc.description?.toLowerCase().includes(searchLower) || doc.route?.toLowerCase().includes(searchLower);
29
- if (matches) {
30
- seenRoutes.add(doc.route);
31
- filteredPages.push({
32
- route: doc.route,
33
- title: doc.title,
34
- description: doc.description
35
- });
36
- if (filteredPages.length >= maxResults)
37
- break;
38
- }
39
- }
40
- return {
41
- messages: [
42
- {
43
- role: "user",
44
- content: {
45
- type: "text",
46
- text: `Help the user find pages about: "${topic}"
47
-
48
- Here are ${filteredPages.length} pages found (out of ${total} total pages): ${JSON.stringify(filteredPages, null, 2)}
49
-
50
- Please:
51
- 1. Review the filtered results and identify the most relevant pages
52
- 2. If specific pages look relevant, use get_page to retrieve their full content
53
- 3. Summarize findings and reference the source pages`
54
- }
55
- }
56
- ]
57
- };
58
- }
59
- });
@@ -1,2 +0,0 @@
1
- declare const _default: any;
2
- export default _default;
@@ -1,14 +0,0 @@
1
- import { defineMcpResource, textResult } from "#imports";
2
- import { streamBulkDocuments } from "../../utils/db.js";
3
- export default defineMcpResource({
4
- uri: "content://all",
5
- name: "All Site Content",
6
- description: "Complete indexed site content in JSONL format (newline-delimited JSON)",
7
- mimeType: "application/x-ndjson",
8
- handler: async () => {
9
- const lines = [];
10
- for await (const doc of streamBulkDocuments())
11
- lines.push(JSON.stringify(doc));
12
- return textResult(lines.join("\n"));
13
- }
14
- });
@@ -1,2 +0,0 @@
1
- declare const _default: any;
2
- export default _default;
@@ -1,43 +0,0 @@
1
- import { defineMcpTool, errorResult, jsonResult } from "#imports";
2
- import { streamBulkDocuments } from "../../utils/db.js";
3
- export default defineMcpTool({
4
- name: "get_page",
5
- description: `Retrieves the full content and details of a specific page by its route.
6
-
7
- WHEN TO USE: Use this tool when you know the EXACT route to a page. Common scenarios:
8
- - User asks for a specific page: "Get the /about page"
9
- - You found a relevant route from list_pages and want full content
10
- - You need complete page details including markdown content
11
-
12
- WHEN NOT TO USE: If you don't know the exact route, use list_pages first to discover available pages.
13
-
14
- OUTPUT: Returns complete page data including:
15
- - route: Page URL path
16
- - title: Page title
17
- - description: Page meta description
18
- - markdown: Full markdown content
19
- - headings: Document structure
20
- - id: Document identifier
21
- - chunkIds: Associated chunk identifiers`,
22
- parameters: {
23
- type: "object",
24
- properties: {
25
- route: {
26
- type: "string",
27
- description: 'The exact route/path to the page (e.g., "/docs/getting-started", "/about", "/blog/my-post")'
28
- }
29
- },
30
- required: ["route"]
31
- },
32
- // @ts-expect-error untyped
33
- handler: async ({ route }) => {
34
- const normalizedRoute = route.startsWith("/") ? route : `/${route}`;
35
- const cleanRoute = normalizedRoute.replace(/\/$/, "") || "/";
36
- for await (const doc of streamBulkDocuments()) {
37
- const docRoute = doc.route?.replace(/\/$/, "") || "/";
38
- if (docRoute === cleanRoute || doc.route === route)
39
- return jsonResult(doc);
40
- }
41
- return errorResult(`Page not found: ${route}. Use list_pages to discover available pages.`);
42
- }
43
- });
@@ -1,8 +0,0 @@
1
- import type { BulkChunk } from '../../types.js';
2
- declare module 'nitropack' {
3
- interface NitroApp {
4
- _bulkDocuments?: Promise<BulkChunk[]>;
5
- }
6
- }
7
- export declare function streamBulkDocuments(): AsyncGenerator<BulkChunk>;
8
- export declare function useBulkDocuments(): Promise<BulkChunk[]>;
@@ -1,48 +0,0 @@
1
- import { useNitroApp, useRuntimeConfig } from "nitropack/runtime";
2
- import { logger } from "../logger.js";
3
- export async function* streamBulkDocuments() {
4
- const config = useRuntimeConfig();
5
- const bulkRoute = config["nuxt-ai-ready"]?.bulkRoute;
6
- const response = await fetch(bulkRoute).catch((err) => {
7
- logger.warn("Documents loading failed:", err);
8
- throw err;
9
- });
10
- if (!response.ok || !response.body)
11
- throw new Error(`Failed to fetch bulk documents: ${response.statusText}`);
12
- const reader = response.body.getReader();
13
- const decoder = new TextDecoder();
14
- let buffer = "";
15
- try {
16
- while (true) {
17
- const { done, value } = await reader.read();
18
- if (done)
19
- break;
20
- buffer += decoder.decode(value, { stream: true });
21
- let newlineIndex = buffer.indexOf("\n");
22
- while (newlineIndex !== -1) {
23
- const line = buffer.slice(0, newlineIndex).trim();
24
- buffer = buffer.slice(newlineIndex + 1);
25
- if (line)
26
- yield JSON.parse(line);
27
- newlineIndex = buffer.indexOf("\n");
28
- }
29
- }
30
- if (buffer.trim())
31
- yield JSON.parse(buffer.trim());
32
- } finally {
33
- reader.releaseLock();
34
- }
35
- }
36
- export async function useBulkDocuments() {
37
- const nitroApp = useNitroApp();
38
- if (nitroApp._bulkDocuments)
39
- return await nitroApp._bulkDocuments;
40
- logger.debug("Lazy loading bulk documents...");
41
- nitroApp._bulkDocuments = (async () => {
42
- const documents = [];
43
- for await (const chunk of streamBulkDocuments())
44
- documents.push(chunk);
45
- return documents;
46
- })();
47
- return await nitroApp._bulkDocuments;
48
- }