nuxt-ai-ready 0.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/LICENSE.md +9 -0
  2. package/README.md +63 -0
  3. package/dist/module.d.mts +62 -0
  4. package/dist/module.json +12 -0
  5. package/dist/module.mjs +406 -0
  6. package/dist/runtime/nuxt/plugins/prerender.d.ts +2 -0
  7. package/dist/runtime/nuxt/plugins/prerender.js +20 -0
  8. package/dist/runtime/server/logger.d.ts +1 -0
  9. package/dist/runtime/server/logger.js +4 -0
  10. package/dist/runtime/server/mcp/prompts/explain-concept.d.ts +2 -0
  11. package/dist/runtime/server/mcp/prompts/explain-concept.js +62 -0
  12. package/dist/runtime/server/mcp/prompts/find-information.d.ts +2 -0
  13. package/dist/runtime/server/mcp/prompts/find-information.js +57 -0
  14. package/dist/runtime/server/mcp/prompts/search-content.d.ts +2 -0
  15. package/dist/runtime/server/mcp/prompts/search-content.js +58 -0
  16. package/dist/runtime/server/mcp/resources/all-content.d.ts +2 -0
  17. package/dist/runtime/server/mcp/resources/all-content.js +14 -0
  18. package/dist/runtime/server/mcp/resources/pages.d.ts +2 -0
  19. package/dist/runtime/server/mcp/resources/pages.js +23 -0
  20. package/dist/runtime/server/mcp/tools/get-page.d.ts +2 -0
  21. package/dist/runtime/server/mcp/tools/get-page.js +42 -0
  22. package/dist/runtime/server/mcp/tools/list-pages.d.ts +2 -0
  23. package/dist/runtime/server/mcp/tools/list-pages.js +78 -0
  24. package/dist/runtime/server/middleware/mdream.d.ts +2 -0
  25. package/dist/runtime/server/middleware/mdream.js +132 -0
  26. package/dist/runtime/server/routes/llms.txt.get.d.ts +2 -0
  27. package/dist/runtime/server/routes/llms.txt.get.js +23 -0
  28. package/dist/runtime/server/tsconfig.json +3 -0
  29. package/dist/runtime/server/utils/db.d.ts +8 -0
  30. package/dist/runtime/server/utils/db.js +48 -0
  31. package/dist/runtime/types.d.ts +166 -0
  32. package/dist/runtime/types.js +0 -0
  33. package/dist/types.d.mts +12 -0
  34. package/package.json +99 -0
@@ -0,0 +1,57 @@
1
+ import { defineMcpPrompt } from "#imports";
2
+ import { streamBulkDocuments } from "../../utils/db.js";
3
+ export default defineMcpPrompt({
4
+ name: "find_information",
5
+ description: "Find information about a specific topic by searching site pages and retrieving relevant content",
6
+ arguments: [
7
+ {
8
+ name: "topic",
9
+ description: "Topic, feature, or question to find information about",
10
+ required: true
11
+ },
12
+ {
13
+ name: "detail",
14
+ description: "Level of detail needed: summary, detailed, or comprehensive",
15
+ required: false
16
+ }
17
+ ],
18
+ handler: async ({ topic, detail = "detailed" }) => {
19
+ const searchLower = topic.toLowerCase();
20
+ const seenRoutes = /* @__PURE__ */ new Set();
21
+ const relevantPages = [];
22
+ for await (const doc of streamBulkDocuments()) {
23
+ if (seenRoutes.has(doc.route))
24
+ continue;
25
+ const matches = doc.title?.toLowerCase().includes(searchLower) || doc.description?.toLowerCase().includes(searchLower) || doc.route?.toLowerCase().includes(searchLower);
26
+ if (matches) {
27
+ seenRoutes.add(doc.route);
28
+ relevantPages.push({
29
+ route: doc.route,
30
+ title: doc.title,
31
+ description: doc.description
32
+ });
33
+ if (relevantPages.length >= 10)
34
+ break;
35
+ }
36
+ }
37
+ return {
38
+ messages: [
39
+ {
40
+ role: "user",
41
+ content: {
42
+ type: "text",
43
+ text: `Help me find information about: "${topic}"
44
+
45
+ Here are the relevant pages found: ${JSON.stringify(relevantPages, null, 2)}
46
+
47
+ Please:
48
+ 1. Review the page titles and descriptions to identify the most relevant ones
49
+ 2. Use get_page to retrieve full content of the top 2-3 most relevant pages
50
+ 3. ${detail === "summary" ? "Provide a concise summary (2-3 paragraphs)" : detail === "comprehensive" ? "Provide a comprehensive explanation with all details and examples from the pages" : "Provide a detailed explanation covering the key points"}
51
+ 4. Always cite which pages the information came from`
52
+ }
53
+ }
54
+ ]
55
+ };
56
+ }
57
+ });
@@ -0,0 +1,2 @@
1
+ declare const _default: any;
2
+ export default _default;
@@ -0,0 +1,58 @@
1
+ import { defineMcpPrompt } from "#imports";
2
+ import { streamBulkDocuments } from "../../utils/db.js";
3
+ export default defineMcpPrompt({
4
+ name: "browse_pages",
5
+ description: "Browse and discover pages by topic, with results ready for exploration",
6
+ arguments: [
7
+ {
8
+ name: "topic",
9
+ description: "Topic or keyword to search for in page titles/descriptions",
10
+ required: true
11
+ },
12
+ {
13
+ name: "maxResults",
14
+ description: "Maximum number of pages to retrieve",
15
+ required: false
16
+ }
17
+ ],
18
+ handler: async ({ topic, maxResults = 10 }) => {
19
+ const searchLower = topic.toLowerCase();
20
+ const seenRoutes = /* @__PURE__ */ new Set();
21
+ const filteredPages = [];
22
+ let total = 0;
23
+ for await (const doc of streamBulkDocuments()) {
24
+ total++;
25
+ if (seenRoutes.has(doc.route))
26
+ continue;
27
+ const matches = doc.title?.toLowerCase().includes(searchLower) || doc.description?.toLowerCase().includes(searchLower) || doc.route?.toLowerCase().includes(searchLower);
28
+ if (matches) {
29
+ seenRoutes.add(doc.route);
30
+ filteredPages.push({
31
+ route: doc.route,
32
+ title: doc.title,
33
+ description: doc.description
34
+ });
35
+ if (filteredPages.length >= maxResults)
36
+ break;
37
+ }
38
+ }
39
+ return {
40
+ messages: [
41
+ {
42
+ role: "user",
43
+ content: {
44
+ type: "text",
45
+ text: `Help the user find pages about: "${topic}"
46
+
47
+ Here are ${filteredPages.length} pages found (out of ${total} total pages): ${JSON.stringify(filteredPages, null, 2)}
48
+
49
+ Please:
50
+ 1. Review the filtered results and identify the most relevant pages
51
+ 2. If specific pages look relevant, use get_page to retrieve their full content
52
+ 3. Summarize findings and reference the source pages`
53
+ }
54
+ }
55
+ ]
56
+ };
57
+ }
58
+ });
@@ -0,0 +1,2 @@
1
+ declare const _default: any;
2
+ export default _default;
@@ -0,0 +1,14 @@
1
+ import { defineMcpResource, textResult } from "#imports";
2
+ import { streamBulkDocuments } from "../../utils/db.js";
3
+ export default defineMcpResource({
4
+ uri: "content://all",
5
+ name: "All Site Content",
6
+ description: "Complete indexed site content in JSONL format (newline-delimited JSON)",
7
+ mimeType: "application/x-ndjson",
8
+ handler: async () => {
9
+ const lines = [];
10
+ for await (const doc of streamBulkDocuments())
11
+ lines.push(JSON.stringify(doc));
12
+ return textResult(lines.join("\n"));
13
+ }
14
+ });
@@ -0,0 +1,2 @@
1
+ declare const _default: any;
2
+ export default _default;
@@ -0,0 +1,23 @@
1
+ import { defineMcpResource, jsonResult } from "#imports";
2
+ import { streamBulkDocuments } from "../../utils/db.js";
3
+ export default defineMcpResource({
4
+ uri: "pages://list",
5
+ name: "All Pages",
6
+ description: "Complete list of all indexed pages with basic metadata (route, title, description)",
7
+ mimeType: "application/json",
8
+ handler: async () => {
9
+ const pages = [];
10
+ for await (const doc of streamBulkDocuments()) {
11
+ pages.push({
12
+ route: doc.route,
13
+ title: doc.title,
14
+ description: doc.description,
15
+ id: doc.id
16
+ });
17
+ }
18
+ return jsonResult({
19
+ total: pages.length,
20
+ pages
21
+ });
22
+ }
23
+ });
@@ -0,0 +1,2 @@
1
+ declare const _default: any;
2
+ export default _default;
@@ -0,0 +1,42 @@
1
+ import { defineMcpTool, errorResult, jsonResult } from "#imports";
2
+ import { streamBulkDocuments } from "../../utils/db.js";
3
+ export default defineMcpTool({
4
+ name: "get_page",
5
+ description: `Retrieves the full content and details of a specific page by its route.
6
+
7
+ WHEN TO USE: Use this tool when you know the EXACT route to a page. Common scenarios:
8
+ - User asks for a specific page: "Get the /about page"
9
+ - You found a relevant route from list_pages and want full content
10
+ - You need complete page details including markdown content
11
+
12
+ WHEN NOT TO USE: If you don't know the exact route, use list_pages first to discover available pages.
13
+
14
+ OUTPUT: Returns complete page data including:
15
+ - route: Page URL path
16
+ - title: Page title
17
+ - description: Page meta description
18
+ - markdown: Full markdown content
19
+ - headings: Document structure
20
+ - id: Document identifier
21
+ - chunkIds: Associated chunk identifiers`,
22
+ parameters: {
23
+ type: "object",
24
+ properties: {
25
+ route: {
26
+ type: "string",
27
+ description: 'The exact route/path to the page (e.g., "/docs/getting-started", "/about", "/blog/my-post")'
28
+ }
29
+ },
30
+ required: ["route"]
31
+ },
32
+ handler: async ({ route }) => {
33
+ const normalizedRoute = route.startsWith("/") ? route : `/${route}`;
34
+ const cleanRoute = normalizedRoute.replace(/\/$/, "") || "/";
35
+ for await (const doc of streamBulkDocuments()) {
36
+ const docRoute = doc.route?.replace(/\/$/, "") || "/";
37
+ if (docRoute === cleanRoute || doc.route === route)
38
+ return jsonResult(doc);
39
+ }
40
+ return errorResult(`Page not found: ${route}. Use list_pages to discover available pages.`);
41
+ }
42
+ });
@@ -0,0 +1,2 @@
1
+ declare const _default: any;
2
+ export default _default;
@@ -0,0 +1,78 @@
1
+ import { defineMcpTool, jsonResult } from "#imports";
2
+ import { streamBulkDocuments } from "../../utils/db.js";
3
+ export default defineMcpTool({
4
+ name: "list_pages",
5
+ description: `Lists all indexed pages from the site with configurable output fields.
6
+
7
+ WHEN TO USE: Use this tool when you need to DISCOVER or SEARCH for pages. Common scenarios:
8
+ - "What pages are available?" - browse all pages
9
+ - "Find pages about X topic" - search by title/description
10
+ - "Show me the site structure" - explore content organization
11
+ - "What documentation exists?" - discover available content
12
+
13
+ WHEN NOT TO USE: If you already know the exact page route, use get_page directly.
14
+
15
+ WORKFLOW: This tool returns page metadata (route, title, description, etc.). After finding relevant pages, use get_page to retrieve full content.
16
+
17
+ FIELD OPTIONS: Control which fields to include in the output:
18
+ - route: Page URL path (always included)
19
+ - title: Page title
20
+ - description: Page meta description
21
+ - headings: Document structure (h1, h2, h3, etc.)
22
+ - markdown: Full markdown content (warning: can be large, avoid unless needed)
23
+ - id: Document identifier
24
+ - chunkIds: Associated chunk identifiers`,
25
+ parameters: {
26
+ type: "object",
27
+ properties: {
28
+ fields: {
29
+ type: "array",
30
+ description: "Fields to include in output. Defaults to [route, title, description]",
31
+ items: {
32
+ type: "string",
33
+ enum: ["route", "title", "description", "headings", "markdown", "id", "chunkIds"]
34
+ },
35
+ default: ["route", "title", "description"]
36
+ },
37
+ search: {
38
+ type: "string",
39
+ description: "Optional search term to filter pages by title or description"
40
+ },
41
+ limit: {
42
+ type: "number",
43
+ description: "Maximum number of pages to return",
44
+ minimum: 1,
45
+ maximum: 1e3,
46
+ default: 100
47
+ }
48
+ }
49
+ },
50
+ handler: async ({ fields = ["route", "title", "description"], search, limit = 100 }) => {
51
+ const searchLower = search?.toLowerCase();
52
+ const result = [];
53
+ let total = 0;
54
+ let filtered = 0;
55
+ for await (const doc of streamBulkDocuments()) {
56
+ total++;
57
+ if (searchLower) {
58
+ const matches = doc.title?.toLowerCase().includes(searchLower) || doc.description?.toLowerCase().includes(searchLower) || doc.route?.toLowerCase().includes(searchLower);
59
+ if (!matches)
60
+ continue;
61
+ }
62
+ filtered++;
63
+ if (result.length < limit) {
64
+ const projected = { route: doc.route };
65
+ fields.forEach((field) => {
66
+ if (field !== "route" && field in doc)
67
+ projected[field] = doc[field];
68
+ });
69
+ result.push(projected);
70
+ }
71
+ }
72
+ return jsonResult({
73
+ total,
74
+ filtered,
75
+ pages: result
76
+ });
77
+ }
78
+ });
@@ -0,0 +1,2 @@
1
+ declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<string | import("h3").H3Error<unknown> | undefined>>;
2
+ export default _default;
@@ -0,0 +1,132 @@
1
+ import { withSiteUrl } from "#site-config/server/composables/utils";
2
+ import { createError, defineEventHandler, getHeader, setHeader } from "h3";
3
+ import { htmlToMarkdown } from "mdream";
4
+ import { extractionPlugin } from "mdream/plugins";
5
+ import { withMinimalPreset } from "mdream/preset/minimal";
6
+ import { useNitroApp, useRuntimeConfig } from "nitropack/runtime";
7
+ import { logger } from "../logger.js";
8
+ function shouldServeMarkdown(event) {
9
+ const accept = getHeader(event, "accept") || "";
10
+ const secFetchDest = getHeader(event, "sec-fetch-dest") || "";
11
+ if (secFetchDest === "document") {
12
+ return false;
13
+ }
14
+ if (accept.includes("text/html")) {
15
+ return false;
16
+ }
17
+ return accept.includes("*/*") || accept.includes("text/markdown");
18
+ }
19
+ async function convertHtmlToMarkdown(html, url, config, route, event) {
20
+ const nitroApp = useNitroApp();
21
+ let title = "";
22
+ let description = "";
23
+ const extractPlugin = extractionPlugin({
24
+ title(el) {
25
+ title = el.textContent;
26
+ },
27
+ 'meta[name="description"]': (el) => {
28
+ description = el.attributes.content || "";
29
+ }
30
+ });
31
+ let options = {
32
+ origin: url,
33
+ ...config.mdreamOptions
34
+ };
35
+ if (config.mdreamOptions?.preset === "minimal") {
36
+ options = withMinimalPreset(options);
37
+ options.plugins = [extractPlugin, ...options.plugins || []];
38
+ } else {
39
+ options.plugins = [extractPlugin, ...options.plugins || []];
40
+ }
41
+ await nitroApp.hooks.callHook("ai-ready:mdreamConfig", options);
42
+ let markdown = htmlToMarkdown(html, options);
43
+ const context = {
44
+ html,
45
+ markdown,
46
+ route,
47
+ title,
48
+ description,
49
+ isPrerender: Boolean(import.meta.prerender),
50
+ event
51
+ };
52
+ await nitroApp.hooks.callHook("ai-ready:markdown", context);
53
+ markdown = context.markdown;
54
+ return { markdown, title, description };
55
+ }
56
+ export default defineEventHandler(async (event) => {
57
+ let path = event.path;
58
+ const config = useRuntimeConfig(event)["nuxt-ai-ready"];
59
+ if (path.startsWith("/api") || path.startsWith("/_") || path.startsWith("/@")) {
60
+ return;
61
+ }
62
+ const lastSegment = path.split("/").pop() || "";
63
+ const hasExtension = lastSegment.includes(".");
64
+ const extension = hasExtension ? lastSegment.substring(lastSegment.lastIndexOf(".")) : "";
65
+ if (hasExtension && extension !== ".md") {
66
+ return;
67
+ }
68
+ const hasMarkdownExtension = path.endsWith(".md");
69
+ const clientPrefersMarkdown = shouldServeMarkdown(event);
70
+ if (!hasMarkdownExtension && !clientPrefersMarkdown) {
71
+ return;
72
+ }
73
+ if (hasMarkdownExtension) {
74
+ path = path.slice(0, -3);
75
+ }
76
+ if (path === "/index") {
77
+ path = "/";
78
+ }
79
+ let html;
80
+ try {
81
+ const response = await globalThis.$fetch.raw(path);
82
+ if (!response.ok) {
83
+ if (hasMarkdownExtension) {
84
+ return createError({
85
+ statusCode: response.status,
86
+ statusMessage: response.statusText,
87
+ message: `Failed to fetch HTML for ${path}`
88
+ });
89
+ }
90
+ return;
91
+ }
92
+ const contentType = response.headers.get("content-type") || "";
93
+ if (!contentType.includes("text/html")) {
94
+ if (hasMarkdownExtension) {
95
+ return createError({
96
+ statusCode: 415,
97
+ statusMessage: "Unsupported Media Type",
98
+ message: `Expected text/html but got ${contentType} for ${path}`
99
+ });
100
+ }
101
+ return;
102
+ }
103
+ html = response._data;
104
+ } catch (e) {
105
+ logger.error(`Failed to fetch HTML for ${path}`, e);
106
+ if (hasMarkdownExtension) {
107
+ return createError({
108
+ statusCode: 500,
109
+ statusMessage: "Internal Server Error",
110
+ message: `Failed to fetch HTML for ${path}`
111
+ });
112
+ }
113
+ return;
114
+ }
115
+ const result = await convertHtmlToMarkdown(
116
+ html,
117
+ withSiteUrl(event, path),
118
+ config,
119
+ path,
120
+ event
121
+ );
122
+ setHeader(event, "content-type", "text/markdown; charset=utf-8");
123
+ if (!import.meta.prerender && config.markdownCacheHeaders) {
124
+ const { maxAge, swr } = config.markdownCacheHeaders;
125
+ const cacheControl = swr ? `public, max-age=${maxAge}, stale-while-revalidate=${maxAge}` : `public, max-age=${maxAge}`;
126
+ setHeader(event, "cache-control", cacheControl);
127
+ }
128
+ if (import.meta.prerender) {
129
+ return JSON.stringify(result);
130
+ }
131
+ return result.markdown;
132
+ });
@@ -0,0 +1,2 @@
1
+ declare const _default: import("h3").EventHandler<import("h3").EventHandlerRequest, Promise<string>>;
2
+ export default _default;
@@ -0,0 +1,23 @@
1
+ import { getSiteConfig } from "#site-config/server/composables/getSiteConfig";
2
+ import { eventHandler, setHeader } from "h3";
3
+ import { useRuntimeConfig } from "nitropack/runtime";
4
+ import { normalizeLlmsTxtConfig } from "../../../utils";
5
+ export default eventHandler(async (event) => {
6
+ const runtimeConfig = useRuntimeConfig(event)["nuxt-ai-ready"];
7
+ const siteConfig = getSiteConfig(event);
8
+ const llmsTxtConfig = runtimeConfig.llmsTxt;
9
+ const parts = [];
10
+ parts.push(`# ${siteConfig.name || siteConfig.url}`);
11
+ if (siteConfig.description) {
12
+ parts.push(`
13
+ > ${siteConfig.description}
14
+ `);
15
+ }
16
+ parts.push("<!-- Pages will be generated at build time -->\n");
17
+ const normalizedContent = normalizeLlmsTxtConfig(llmsTxtConfig);
18
+ if (normalizedContent) {
19
+ parts.push(normalizedContent);
20
+ }
21
+ setHeader(event, "Content-Type", "text/plain; charset=utf-8");
22
+ return parts.join("\n");
23
+ });
@@ -0,0 +1,3 @@
1
+ {
2
+ "extends": "../../../.nuxt/tsconfig.server.json"
3
+ }
@@ -0,0 +1,8 @@
1
+ import type { BulkChunk } from '../../types.js';
2
+ declare module 'nitropack' {
3
+ interface NitroApp {
4
+ _bulkDocuments?: Promise<BulkChunk[]>;
5
+ }
6
+ }
7
+ export declare function streamBulkDocuments(): AsyncGenerator<BulkChunk>;
8
+ export declare function useBulkDocuments(): Promise<BulkChunk[]>;
@@ -0,0 +1,48 @@
1
+ import { useNitroApp, useRuntimeConfig } from "nitropack/runtime";
2
+ import { logger } from "../logger.js";
3
+ export async function* streamBulkDocuments() {
4
+ const config = useRuntimeConfig();
5
+ const bulkRoute = config["nuxt-ai-ready"]?.bulkRoute;
6
+ const response = await fetch(bulkRoute).catch((err) => {
7
+ logger.warn("Documents loading failed:", err);
8
+ throw err;
9
+ });
10
+ if (!response.ok || !response.body)
11
+ throw new Error(`Failed to fetch bulk documents: ${response.statusText}`);
12
+ const reader = response.body.getReader();
13
+ const decoder = new TextDecoder();
14
+ let buffer = "";
15
+ try {
16
+ while (true) {
17
+ const { done, value } = await reader.read();
18
+ if (done)
19
+ break;
20
+ buffer += decoder.decode(value, { stream: true });
21
+ let newlineIndex = buffer.indexOf("\n");
22
+ while (newlineIndex !== -1) {
23
+ const line = buffer.slice(0, newlineIndex).trim();
24
+ buffer = buffer.slice(newlineIndex + 1);
25
+ if (line)
26
+ yield JSON.parse(line);
27
+ newlineIndex = buffer.indexOf("\n");
28
+ }
29
+ }
30
+ if (buffer.trim())
31
+ yield JSON.parse(buffer.trim());
32
+ } finally {
33
+ reader.releaseLock();
34
+ }
35
+ }
36
+ export async function useBulkDocuments() {
37
+ const nitroApp = useNitroApp();
38
+ if (nitroApp._bulkDocuments)
39
+ return await nitroApp._bulkDocuments;
40
+ logger.debug("Lazy loading bulk documents...");
41
+ nitroApp._bulkDocuments = (async () => {
42
+ const documents = [];
43
+ for await (const chunk of streamBulkDocuments())
44
+ documents.push(chunk);
45
+ return documents;
46
+ })();
47
+ return await nitroApp._bulkDocuments;
48
+ }