@agent-seo/core 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/dist/edge-AywqjCEh.d.cts +198 -0
- package/dist/edge-AywqjCEh.d.ts +198 -0
- package/dist/edge.cjs +247 -0
- package/dist/edge.cjs.map +1 -0
- package/dist/edge.d.cts +1 -0
- package/dist/edge.d.ts +1 -0
- package/dist/edge.js +214 -0
- package/dist/edge.js.map +1 -0
- package/dist/index.cjs +918 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +108 -0
- package/dist/index.d.ts +108 -0
- package/dist/index.js +867 -0
- package/dist/index.js.map +1 -0
- package/package.json +75 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../src/detect.ts","../src/transform.ts","../src/sanitize.ts","../src/markdown.ts","../src/json-ld.ts","../src/tokens.ts","../src/frontmatter.ts","../src/llms-txt.ts","../src/discover.ts","../src/headers.ts","../src/cache.ts"],"sourcesContent":["// Types\nexport type {\n BotPurpose,\n BotInfo,\n AIRequestContext,\n TransformOptions,\n TurndownRule,\n TransformResult,\n LlmsTxtRoute,\n LlmsTxtOptions,\n LlmsTxtResult,\n AgentSeoHeaders,\n AgentSeoOptions,\n} from './types.js';\n\n// Bot Detection\nexport { detectAgent, shouldServeMarkdown, AI_BOT_REGISTRY } from './detect.js';\n\n// Transformation Pipeline\nexport { transform } from './transform.js';\n\n// Sanitization\nexport { sanitizeHtml } from './sanitize.js';\n\n// Markdown Conversion\nexport { htmlToMarkdown } from './markdown.js';\n\n// Token Estimation\nexport { estimateTokens } from './tokens.js';\n\n// JSON-LD Extraction\nexport { extractJsonLdBlocks } from './json-ld.js';\n\n// llms.txt Generation\nexport { generateLlmsTxt } from './llms-txt.js';\n\n// Route Discovery\nexport {\n discoverNextRoutes,\n discoverFilesystemRoutes,\n extractMetadataFromSource,\n} from './discover.js';\nexport type { DiscoverOptions } from './discover.js';\n\n// Response Headers\nexport { buildMarkdownHeaders, buildAlternateLinkHeader } from './headers.js';\n\n// Cache\nexport { createCache } from './cache.js';\nexport type { CacheOptions } from './cache.js';\n","import type { AIRequestContext, BotInfo, BotPurpose } from './types.js';\n\ninterface BotEntry {\n /** Regex pattern to match against User-Agent string */\n pattern: RegExp;\n /** Bot metadata */\n info: BotInfo;\n}\n\nconst AI_BOT_REGISTRY: BotEntry[] = [\n // === OpenAI ===\n {\n pattern: /GPTBot/i,\n info: { name: 'GPTBot', operator: 'OpenAI', purpose: 'training', rendersJs: false },\n },\n {\n pattern: /OAI-SearchBot/i,\n info: { name: 'OAI-SearchBot', operator: 'OpenAI', purpose: 'search', rendersJs: false },\n },\n {\n pattern: /ChatGPT-User/i,\n info: { name: 'ChatGPT-User', operator: 'OpenAI', purpose: 'agent-browsing', rendersJs: true },\n },\n\n // === Anthropic ===\n {\n pattern: /ClaudeBot/i,\n info: { name: 'ClaudeBot', operator: 'Anthropic', purpose: 'training', rendersJs: false },\n },\n {\n pattern: /Claude-User/i,\n info: { name: 'Claude-User', operator: 'Anthropic', purpose: 'agent-browsing', rendersJs: true },\n },\n {\n pattern: /Claude-SearchBot/i,\n info: { name: 'Claude-SearchBot', operator: 'Anthropic', purpose: 'search', rendersJs: false },\n },\n {\n pattern: /anthropic-ai/i,\n info: { name: 'anthropic-ai', operator: 'Anthropic', purpose: 'training', rendersJs: false },\n },\n\n // === Perplexity ===\n {\n pattern: /PerplexityBot/i,\n info: { name: 'PerplexityBot', operator: 'Perplexity', purpose: 'search', rendersJs: false },\n },\n {\n pattern: /Perplexity-User/i,\n info: { name: 'Perplexity-User', operator: 'Perplexity', purpose: 'agent-browsing', rendersJs: true },\n },\n\n // === Google ===\n {\n pattern: /Google-Extended/i,\n info: { name: 'Google-Extended', operator: 'Google', purpose: 'training', rendersJs: true },\n },\n\n // === Apple ===\n {\n pattern: /Applebot-Extended/i,\n info: { name: 'Applebot-Extended', operator: 'Apple', purpose: 'training', rendersJs: true },\n },\n\n // === Meta ===\n {\n pattern: /meta-externalagent/i,\n info: { name: 'Meta-ExternalAgent', operator: 'Meta', purpose: 'training', rendersJs: false },\n },\n {\n pattern: /FacebookBot/i,\n info: { name: 'FacebookBot', operator: 'Meta', purpose: 'search', rendersJs: false },\n },\n\n // === Common Crawl ===\n {\n pattern: /CCBot/i,\n info: { name: 'CCBot', operator: 'Common Crawl', purpose: 'training', rendersJs: false },\n },\n\n // === Cohere ===\n {\n pattern: /cohere-ai/i,\n info: { name: 'cohere-ai', operator: 'Cohere', purpose: 'training', rendersJs: false },\n },\n\n // === Amazon ===\n {\n pattern: /Amazonbot/i,\n info: { name: 'Amazonbot', operator: 'Amazon', purpose: 'search', rendersJs: false },\n },\n\n // === Bytedance ===\n {\n pattern: /Bytespider/i,\n info: { name: 'Bytespider', operator: 'ByteDance', purpose: 'training', rendersJs: false },\n },\n\n // === You.com ===\n {\n pattern: /YouBot/i,\n info: { name: 'YouBot', operator: 'You.com', purpose: 'search', rendersJs: false },\n },\n\n // === DeepSeek ===\n {\n pattern: /Deepseek/i,\n info: { name: 'DeepSeekBot', operator: 'DeepSeek', purpose: 'training', rendersJs: false },\n },\n];\n\nconst TOKEN_REGISTRY = AI_BOT_REGISTRY.map((entry) => ({\n entry,\n token: regexToToken(entry.pattern),\n}));\n\n/**\n * Detect if an incoming request is from an AI bot.\n *\n * @param userAgent - The User-Agent header value\n * @param acceptHeader - The Accept header value (optional)\n * @returns AIRequestContext with bot detection results\n */\nexport function detectAgent(\n userAgent: string | null | undefined,\n acceptHeader?: string | null\n): AIRequestContext {\n const wantsMarkdown = acceptHeader\n ? /text\\/markdown/i.test(acceptHeader)\n : false;\n\n if (!userAgent) {\n return { isAIBot: false, bot: null, wantsMarkdown };\n }\n\n const ua = userAgent.toLowerCase();\n\n // Check against our AI-specific registry first (more precise)\n for (const { entry, token } of TOKEN_REGISTRY) {\n if (token) {\n if (ua.includes(token)) {\n return { isAIBot: true, bot: entry.info, wantsMarkdown };\n }\n continue;\n }\n if (entry.pattern.test(userAgent)) {\n return { isAIBot: true, bot: entry.info, wantsMarkdown };\n }\n }\n\n // Fallback: we do NOT serve markdown to generic bots — only AI bots get the special treatment\n return { isAIBot: false, bot: null, wantsMarkdown };\n}\n\n/**\n * Check if a request should receive Markdown.\n * Returns true if: the request is from a known AI bot OR the Accept header requests text/markdown.\n */\nexport function shouldServeMarkdown(\n userAgent: string | null | undefined,\n acceptHeader?: string | null\n): boolean {\n const ctx = detectAgent(userAgent, acceptHeader);\n return ctx.isAIBot || ctx.wantsMarkdown;\n}\n\nexport { AI_BOT_REGISTRY };\n\nfunction regexToToken(pattern: RegExp): string | null {\n const source = pattern.source;\n if (/^[A-Za-z0-9-]+$/.test(source)) return source.toLowerCase();\n return null;\n}\n","import { JSDOM } from 'jsdom';\nimport { Readability, isProbablyReaderable } from '@mozilla/readability';\nimport { sanitizeHtml } from './sanitize.js';\nimport { htmlToMarkdown } from './markdown.js';\nimport { extractJsonLdBlocks } from './json-ld.js';\nimport { estimateTokens } from './tokens.js';\nimport { buildFrontmatter } from './frontmatter.js';\nimport type { TransformOptions, TransformResult } from './types.js';\n\n/**\n * Transform an HTML string into clean, LLM-optimized Markdown.\n *\n * Pipeline: Parse DOM → Extract JSON-LD → Readability extract → Sanitize → Turndown → Frontmatter → Token budget\n */\nexport async function transform(\n html: string,\n options: TransformOptions = {}\n): Promise<TransformResult> {\n const {\n url,\n tokenBudget,\n extractJsonLd = true,\n stripSelectors = [],\n preserveSelectors = [],\n frontmatter = true,\n turndownRules = [],\n } = options;\n\n // Stage 1: DOM Construction\n const dom = new JSDOM(html, { url: url || 'https://localhost' });\n const document = dom.window.document;\n\n // Extract metadata before Readability mutates the DOM\n const title =\n document.querySelector('title')?.textContent?.trim() ||\n document.querySelector('h1')?.textContent?.trim() ||\n '';\n const description =\n document.querySelector('meta[name=\"description\"]')?.getAttribute('content')?.trim() || '';\n const canonicalUrl =\n document.querySelector('link[rel=\"canonical\"]')?.getAttribute('href') || null;\n const lang = document.documentElement.getAttribute('lang') || null;\n const lastModified =\n document.querySelector('meta[property=\"article:modified_time\"]')?.getAttribute('content') ||\n document.querySelector('meta[name=\"last-modified\"]')?.getAttribute('content') ||\n null;\n\n // Stage 2: JSON-LD Extraction\n const jsonLd = extractJsonLd ? extractJsonLdBlocks(document) : [];\n\n // Stage 3: Content Extraction via Readability\n let contentHtml: string;\n if (isProbablyReaderable(document)) {\n const reader = new Readability(document, { charThreshold: 100 });\n const article = reader.parse();\n contentHtml = article?.content || document.body?.innerHTML || html;\n } else {\n // Page is not article-like (e.g., docs, API reference) — use the body directly\n contentHtml = document.body?.innerHTML || html;\n }\n\n // Stage 4: HTML Sanitization\n const cleanHtml = sanitizeHtml(contentHtml, {\n stripSelectors,\n preserveSelectors,\n });\n\n // Stage 5: Markdown Conversion\n let markdown = htmlToMarkdown(cleanHtml, { url, customRules: turndownRules });\n\n // Stage 6: Add Frontmatter\n if (frontmatter) {\n const fm = buildFrontmatter({ title, description, url, lang, lastModified, jsonLd });\n markdown = fm + '\\n\\n' + markdown;\n }\n\n // Stage 7: Token Budget Enforcement\n let tokenEstimate = estimateTokens(markdown);\n if (tokenBudget && tokenEstimate > tokenBudget) {\n markdown = truncateToTokenBudget(markdown, tokenBudget);\n tokenEstimate = estimateTokens(markdown);\n }\n\n dom.window.close();\n\n return {\n markdown,\n tokenEstimate,\n title,\n description,\n jsonLd,\n canonicalUrl,\n lastModified,\n lang,\n };\n}\n\n/**\n * Intelligent truncation: preserve headings and first sentences of paragraphs,\n * remove later paragraphs.\n */\nfunction truncateToTokenBudget(markdown: string, budget: number): string {\n const lines = markdown.split('\\n');\n const result: string[] = [];\n let currentTokens = 0;\n\n for (const line of lines) {\n const lineTokens = estimateTokens(line);\n if (currentTokens + lineTokens > budget) {\n if (/^#{1,6}\\s/.test(line)) {\n result.push(line);\n result.push('\\n*[Content truncated for token budget]*\\n');\n }\n break;\n }\n result.push(line);\n currentTokens += lineTokens;\n }\n\n return result.join('\\n');\n}\n","import { JSDOM } from 'jsdom';\n\ninterface SanitizeOptions {\n stripSelectors?: string[];\n preserveSelectors?: string[];\n}\n\n/**\n * Elements that are always removed (noise generators).\n */\nconst DEFAULT_STRIP_TAGS = [\n 'script',\n 'style',\n 'noscript',\n 'iframe',\n 'svg',\n 'canvas',\n 'video',\n 'audio',\n 'map',\n 'object',\n 'embed',\n 'applet',\n 'link[rel=\"stylesheet\"]',\n 'meta',\n];\n\n/**\n * CSS selectors that match common noise patterns.\n * These are stripped regardless of their tag type.\n */\nconst DEFAULT_STRIP_SELECTORS = [\n // Navigation & chrome\n 'nav',\n 'header:not(article header)',\n 'footer:not(article footer)',\n '[role=\"navigation\"]',\n '[role=\"banner\"]',\n '[role=\"contentinfo\"]',\n '[role=\"complementary\"]',\n 'aside',\n\n // Ads, cookies, popups\n '.advertisement', '.ad', '.ads', '[class*=\"ad-\"]', '[class*=\"ad_\"]',\n '.cookie-banner', '.cookie-consent', '[class*=\"cookie\"]',\n '.popup', '.modal', '[class*=\"popup\"]', '[class*=\"modal\"]',\n '.overlay',\n\n // Social & sharing\n '.social-share', '.share-buttons', '[class*=\"social\"]', '[class*=\"share\"]',\n '.follow-us',\n\n // Comments & forms (not the content)\n '.comments', '#comments', '.comment-form',\n 'form:not([class*=\"search\"])',\n\n // Related content / sidebar noise\n '.related-posts', '.recommended', '.sidebar', '.widget',\n '[class*=\"related\"]', '[class*=\"sidebar\"]', '[class*=\"widget\"]',\n '.newsletter', '.subscribe', '[class*=\"newsletter\"]',\n '.cta', '[class*=\"cta\"]',\n\n // Visual-only elements\n '.breadcrumb', '.breadcrumbs',\n '.pagination',\n '.skip-link',\n '[aria-hidden=\"true\"]',\n\n // JS framework artifacts\n '[data-reactroot] > noscript',\n '.hydration-overlay',\n];\n\n/**\n * Sanitize HTML by removing noise elements.\n * This is run AFTER Readability extraction (which does the heavy lifting)\n * to catch remaining noise that Readability missed.\n */\nexport function sanitizeHtml(html: string, options: SanitizeOptions = {}): string {\n const { stripSelectors = [], preserveSelectors = [] } = options;\n\n const dom = new JSDOM(html);\n const document = dom.window.document;\n\n // Build the preserve set first\n const preserveSet = new Set<Node>();\n for (const selector of preserveSelectors) {\n try {\n document.querySelectorAll(selector).forEach((el) => preserveSet.add(el));\n } catch {\n // invalid selector — skip\n }\n }\n\n // Strip default tags\n for (const tag of DEFAULT_STRIP_TAGS) {\n try {\n document.querySelectorAll(tag).forEach((el) => {\n if (!preserveSet.has(el)) el.remove();\n });\n } catch {\n // skip invalid\n }\n }\n\n // Strip default selectors + custom selectors\n const allStripSelectors = [...DEFAULT_STRIP_SELECTORS, ...stripSelectors];\n for (const selector of allStripSelectors) {\n try {\n document.querySelectorAll(selector).forEach((el) => {\n if (!preserveSet.has(el)) el.remove();\n });\n } catch {\n // skip invalid\n }\n }\n\n // Strip elements with zero or near-zero text density\n stripLowDensityElements(document);\n\n // Remove empty elements left behind\n removeEmptyElements(document);\n\n // Remove all class/id/style/data-* attributes (noise for Markdown)\n cleanAttributes(document);\n\n const result = document.body?.innerHTML || '';\n dom.window.close();\n return result;\n}\n\nfunction stripLowDensityElements(document: Document): void {\n const candidates = document.querySelectorAll('div, section, span');\n for (const el of candidates) {\n const textLength = (el.textContent || '').trim().length;\n const childElementCount = el.querySelectorAll('*').length;\n\n if (childElementCount > 10 && textLength < 50) {\n el.remove();\n }\n }\n}\n\nfunction removeEmptyElements(document: Document): void {\n const candidates = document.querySelectorAll('div, span, p, section, article');\n for (const el of candidates) {\n if (!(el.textContent || '').trim() && !el.querySelector('img, table, pre, code')) {\n el.remove();\n }\n }\n}\n\nfunction cleanAttributes(document: Document): void {\n const all = document.querySelectorAll('*');\n const keepAttrs = new Set(['href', 'src', 'alt', 'title', 'colspan', 'rowspan', 'scope', 'headers', 'lang', 'dir', 'type']);\n\n for (const el of all) {\n const attrs = Array.from(el.attributes);\n for (const attr of attrs) {\n // Preserve class on code elements (needed for language hints like class=\"language-js\")\n if (attr.name === 'class' && el.tagName === 'CODE') continue;\n if (!keepAttrs.has(attr.name)) {\n el.removeAttribute(attr.name);\n }\n }\n }\n}\n","import TurndownService from 'turndown';\nimport { gfm } from 'turndown-plugin-gfm';\nimport type { TurndownRule } from './types.js';\n\ninterface MarkdownOptions {\n url?: string;\n customRules?: TurndownRule[];\n}\n\nexport function htmlToMarkdown(html: string, options: MarkdownOptions = {}): string {\n const { url, customRules = [] } = options;\n\n const turndown = new TurndownService({\n headingStyle: 'atx',\n codeBlockStyle: 'fenced',\n bulletListMarker: '-',\n emDelimiter: '*',\n strongDelimiter: '**',\n linkStyle: 'inlined',\n hr: '---',\n });\n\n // Enable GFM (tables, strikethrough, task lists)\n turndown.use(gfm);\n\n // Custom rule: preserve code block language hints\n turndown.addRule('fencedCodeBlock', {\n filter: (node) => {\n return (\n node.nodeName === 'PRE' &&\n node.firstChild !== null &&\n node.firstChild.nodeName === 'CODE'\n );\n },\n replacement: (_content, node) => {\n const codeEl = node.firstChild as Element;\n const className = codeEl?.getAttribute?.('class') || '';\n\n // Extract language from class=\"language-xxx\" or \"hljs xxx\" or \"highlight-xxx\"\n const langMatch = className.match(\n /(?:language-|lang-|hljs\\s+|highlight-)([a-zA-Z0-9_+-]+)/\n );\n const lang = langMatch ? langMatch[1] : '';\n const code = codeEl?.textContent || '';\n\n return `\\n\\n\\`\\`\\`${lang}\\n${code.replace(/\\n+$/, '')}\\n\\`\\`\\`\\n\\n`;\n },\n });\n\n // Custom rule: images with alt text only (skip decorative images)\n turndown.addRule('meaningfulImages', {\n filter: (node) => node.nodeName === 'IMG',\n replacement: (_content, node) => {\n const el = node as Element;\n const alt = el.getAttribute('alt')?.trim();\n const src = el.getAttribute('src')?.trim();\n\n // Skip decorative images (no alt text or empty alt)\n if (!alt) return '';\n\n // Resolve relative URLs\n let resolvedSrc = src || '';\n if (url && src && !src.startsWith('http') && !src.startsWith('data:')) {\n try {\n resolvedSrc = new URL(src, url).href;\n } catch {\n resolvedSrc = src;\n }\n }\n\n return ``;\n },\n });\n\n // Custom rule: resolve relative links\n turndown.addRule('resolveLinks', {\n filter: 'a',\n replacement: (content, node) => {\n const el = node as Element;\n const href = el.getAttribute('href');\n if (!href || !content.trim()) return content;\n\n // Skip anchor-only links\n if (href.startsWith('#')) return content;\n\n // Strip dangerous protocol links entirely\n if (href.startsWith('javascript:') || href.startsWith('data:text/html')) return '';\n\n let resolvedHref = href;\n if (url && !href.startsWith('http') && !href.startsWith('mailto:')) {\n try {\n resolvedHref = new URL(href, url).href;\n } catch {\n resolvedHref = href;\n }\n }\n\n const title = el.getAttribute('title');\n return title\n ? `[${content}](${resolvedHref} \"${title}\")`\n : `[${content}](${resolvedHref})`;\n },\n });\n\n // Add user-supplied custom rules\n for (const rule of customRules) {\n turndown.addRule(rule.name, {\n filter: rule.filter as any,\n replacement: rule.replacement as any,\n });\n }\n\n let markdown = turndown.turndown(html);\n\n // Post-processing: collapse excessive blank lines\n markdown = markdown.replace(/\\n{3,}/g, '\\n\\n').trim();\n\n return markdown;\n}\n","/**\n * Extract all JSON-LD blocks from a document.\n * These are <script type=\"application/ld+json\"> elements.\n */\nexport function extractJsonLdBlocks(document: Document): Record<string, unknown>[] {\n const results: Record<string, unknown>[] = [];\n const scripts = document.querySelectorAll('script[type=\"application/ld+json\"]');\n\n for (const script of scripts) {\n try {\n const data = JSON.parse(script.textContent || '');\n // Handle @graph arrays\n if (data['@graph'] && Array.isArray(data['@graph'])) {\n results.push(...data['@graph']);\n } else {\n results.push(data);\n }\n } catch {\n // Invalid JSON-LD — skip silently\n }\n }\n\n return results;\n}\n","/**\n * Estimate token count using the chars/4 heuristic.\n * This is the same heuristic used by Cloudflare's X-Markdown-Tokens header.\n * Accurate to within ~10% for English text across GPT and Claude tokenizers.\n */\nexport function estimateTokens(text: string): number {\n return Math.ceil(text.length / 4);\n}\n","interface FrontmatterInput {\n title: string;\n description: string;\n url?: string;\n lang?: string | null;\n lastModified?: string | null;\n jsonLd?: Record<string, unknown>[];\n}\n\n/**\n * Build a YAML frontmatter block for the Markdown output.\n */\nexport function buildFrontmatter(input: FrontmatterInput): string {\n const lines: string[] = ['---'];\n\n if (input.title) lines.push(`title: \"${escapeYaml(input.title)}\"`);\n if (input.description)\n lines.push(`description: \"${escapeYaml(input.description)}\"`);\n if (input.url) lines.push(`url: \"${escapeYaml(input.url)}\"`);\n if (input.lang) lines.push(`lang: \"${escapeYaml(input.lang)}\"`);\n if (input.lastModified) lines.push(`lastModified: \"${escapeYaml(input.lastModified)}\"`);\n\n // Extract structured data from JSON-LD\n if (input.jsonLd?.length) {\n const primary = input.jsonLd[0];\n\n const primaryType = primary?.['@type'];\n if (primaryType) {\n const typeStr = Array.isArray(primaryType) ? primaryType[0] : primaryType;\n if (typeof typeStr === 'string') {\n lines.push(`schema: \"${escapeYaml(typeStr)}\"`);\n }\n }\n\n // Author\n const author = primary?.author as Record<string, unknown> | undefined;\n if (author) {\n const authorName = author.name as string | undefined;\n if (authorName) lines.push(`author: \"${escapeYaml(authorName)}\"`);\n }\n\n // Dates\n const datePublished = primary?.datePublished as string | undefined;\n if (datePublished) lines.push(`datePublished: \"${escapeYaml(datePublished)}\"`);\n\n const dateModified = primary?.dateModified as string | undefined;\n if (dateModified) lines.push(`dateModified: \"${escapeYaml(dateModified)}\"`);\n }\n\n lines.push('---');\n return lines.join('\\n');\n}\n\nfunction escapeYaml(str: string): string {\n // Replace backslashes first, then double quotes, then collapse newlines\n return str\n .replace(/\\\\/g, '\\\\\\\\')\n .replace(/\"/g, '\\\\\"')\n .replace(/\\n/g, ' ');\n}\n","import type { LlmsTxtOptions, LlmsTxtRoute, LlmsTxtResult } from './types.js';\n\n/**\n * Generate llms.txt and llms-full.txt content from route data.\n *\n * llms.txt format (per spec at llmstxt.org):\n * # Site Name\n * > Site description blockquote\n *\n * ## Section Name\n * - [Page Title](url): Description\n */\nexport function generateLlmsTxt(\n options: LlmsTxtOptions,\n routes: LlmsTxtRoute[],\n fullTextContents?: Map<string, string>\n): LlmsTxtResult {\n const { siteName, siteDescription, baseUrl, markdownExtension = '.md' } = options;\n\n // Group routes by section\n const sections = new Map<string, LlmsTxtRoute[]>();\n for (const route of routes) {\n const section = route.section || 'Pages';\n if (!sections.has(section)) sections.set(section, []);\n sections.get(section)!.push(route);\n }\n\n // Build llms.txt\n const lines: string[] = [];\n lines.push(`# ${siteName}`);\n lines.push('');\n lines.push(`> ${siteDescription}`);\n lines.push('');\n\n for (const [section, sectionRoutes] of sections) {\n lines.push(`## ${section}`);\n lines.push('');\n for (const route of sectionRoutes) {\n const url = `${baseUrl}${route.path}${markdownExtension}`;\n const desc = route.description ? `: ${route.description}` : '';\n lines.push(`- [${route.title}](${url})${desc}`);\n }\n lines.push('');\n }\n\n const llmsTxt = lines.join('\\n').trim() + '\\n';\n\n // Build llms-full.txt (concatenated content of all routes)\n const fullLines: string[] = [];\n fullLines.push(`# ${siteName}`);\n fullLines.push('');\n fullLines.push(`> ${siteDescription}`);\n fullLines.push('');\n\n if (fullTextContents) {\n for (const route of routes) {\n const content = fullTextContents.get(route.path);\n if (content) {\n fullLines.push(`---`);\n fullLines.push('');\n fullLines.push(`## ${route.title}`);\n fullLines.push(`Source: ${baseUrl}${route.path}`);\n fullLines.push('');\n fullLines.push(content);\n fullLines.push('');\n }\n }\n }\n\n const llmsFullTxt = fullLines.join('\\n').trim() + '\\n';\n\n return {\n llmsTxt,\n llmsFullTxt,\n routeCount: routes.length,\n };\n}\n","import { readFileSync, existsSync, readdirSync, lstatSync, realpathSync } from 'node:fs';\nimport type { Stats } from 'node:fs';\nimport { join, sep } from 'node:path';\nimport type { LlmsTxtRoute } from './types.js';\n\n// ============================================================\n// ROUTE DISCOVERY\n// ============================================================\n\n/**\n * Options for route discovery.\n */\nexport interface DiscoverOptions {\n /**\n * Glob-like patterns to exclude from discovery.\n * Matches against the URL path, e.g. [\"/api\", \"/admin\", \"/_internal\"].\n * Default: [\"/api\", \"/_*\"]\n */\n exclude?: string[];\n\n /**\n * How to derive the section name from a route path.\n * - 'directory': uses the first path segment (e.g., \"/docs/intro\" → \"Docs\")\n * - A custom function: (path: string) => string\n * Default: 'directory'\n */\n sectionStrategy?: 'directory' | ((path: string) => string);\n\n /**\n * Default section name for top-level pages (e.g., \"/\" or \"/about\").\n * Default: \"Pages\"\n */\n defaultSection?: string;\n}\n\n// ============================================================\n// NEXT.JS APP ROUTER DISCOVERY\n// ============================================================\n\nconst PAGE_FILE_PATTERNS = [\n 'page.tsx',\n 'page.ts',\n 'page.jsx',\n 'page.js',\n 'page.mdx',\n 'page.md',\n];\n\nconst SKIP_DIRS = new Set([\n 'node_modules',\n '.next',\n '.git',\n 'dist',\n '.turbo',\n '_components',\n '_lib',\n '_utils',\n '_hooks',\n '_actions',\n 'components',\n 'lib',\n 'utils',\n 'hooks',\n 'actions',\n 'api', // API routes are not pages\n]);\n\n/**\n * Scan a Next.js `app/` directory and discover all page routes.\n *\n * For each `page.tsx` found, it:\n * 1. Derives the URL path from the directory structure\n * 2. Attempts to extract `metadata.title` and `metadata.description` from the file\n * 3. Assigns a section based on the top-level directory\n *\n * @param appDir - Absolute path to the `app/` directory\n * @param options - Discovery options\n * @returns Array of discovered routes\n *\n * @example\n * ```ts\n * const routes = discoverNextRoutes('/path/to/app');\n * // [\n * // { path: '/', title: 'Home', description: '...', section: 'Pages' },\n * // { path: '/docs/intro', title: 'Introduction', description: '...', section: 'Docs' },\n * // ]\n * ```\n */\nexport function discoverNextRoutes(\n appDir: string,\n options: DiscoverOptions = {},\n): LlmsTxtRoute[] {\n const {\n exclude = ['/api'],\n sectionStrategy = 'directory',\n defaultSection = 'Pages',\n } = options;\n\n if (!existsSync(appDir)) {\n return [];\n }\n\n const routes: LlmsTxtRoute[] = [];\n let rootReal = appDir;\n try {\n rootReal = realpathSync(appDir);\n } catch {\n return routes;\n }\n scanAppDir(appDir, appDir, rootReal, routes, exclude, sectionStrategy, defaultSection);\n\n // Sort: homepage first, then alphabetically\n routes.sort((a, b) => {\n if (a.path === '/') return -1;\n if (b.path === '/') return 1;\n return a.path.localeCompare(b.path);\n });\n\n return routes;\n}\n\nfunction scanAppDir(\n rootDir: string,\n currentDir: string,\n rootReal: string,\n routes: LlmsTxtRoute[],\n exclude: string[],\n sectionStrategy: 'directory' | ((path: string) => string),\n defaultSection: string,\n): void {\n const entries = readdirSync(currentDir);\n\n for (const entry of entries) {\n const fullPath = join(currentDir, entry);\n let stat: Stats;\n try {\n stat = lstatSync(fullPath);\n } catch {\n continue;\n }\n\n if (stat.isSymbolicLink()) continue;\n\n if (stat.isDirectory()) {\n let realDir = fullPath;\n try {\n realDir = realpathSync(fullPath);\n } catch {\n continue;\n }\n if (!isWithinRoot(realDir, rootReal)) continue;\n\n // Skip private directories (prefixed with _) and known non-page dirs\n const baseName = entry.toLowerCase();\n if (\n baseName.startsWith('_') ||\n baseName.startsWith('.') ||\n SKIP_DIRS.has(baseName)\n )\n continue;\n\n // Skip route groups like (marketing) — they don't affect the URL path\n // but we still need to scan inside them\n scanAppDir(\n rootDir,\n fullPath,\n rootReal,\n routes,\n exclude,\n sectionStrategy,\n defaultSection,\n );\n continue;\n }\n\n // Check if this is a page file\n if (!PAGE_FILE_PATTERNS.includes(entry)) continue;\n\n // Derive URL path from directory structure\n const relativePath = currentDir.substring(rootDir.length);\n let urlPath = relativePath.replace(/\\\\/g, '/');\n\n // Handle route groups: strip (groupName) segments\n urlPath = urlPath.replace(/\\/\\([^)]+\\)/g, '');\n\n // Handle dynamic segments: [slug] → keep as-is for now\n // Handle catch-all: [...slug] and [[...slug]] → skip (too dynamic)\n if (urlPath.includes('[...') || urlPath.includes('[[...')) continue;\n\n // Root page\n if (urlPath === '') urlPath = '/';\n\n // Ensure leading slash\n if (!urlPath.startsWith('/')) urlPath = '/' + urlPath;\n\n // Check exclusions\n if (shouldExclude(urlPath, exclude)) continue;\n\n // Skip llms.txt route handler itself\n if (urlPath === '/llms.txt' || urlPath === '/llms-full.txt') continue;\n\n // Extract metadata from the page file\n const { title, description } = extractMetadataFromFile(fullPath);\n\n // Derive section\n const section = deriveSection(urlPath, sectionStrategy, defaultSection);\n\n routes.push({\n path: urlPath,\n title: title || pathToTitle(urlPath),\n description: description || undefined,\n section,\n });\n }\n}\n\n/**\n * Extract metadata.title and metadata.description from a Next.js page file.\n *\n * This uses simple regex parsing — it doesn't evaluate the code.\n * It handles the two common patterns:\n *\n * 1. `export const metadata = { title: \"...\", description: \"...\" }`\n * 2. `export const metadata: Metadata = { title: \"...\", description: \"...\" }`\n */\nfunction extractMetadataFromFile(filePath: string): {\n title: string;\n description: string;\n} {\n try {\n const content = readFileSync(filePath, 'utf-8');\n return extractMetadataFromSource(content);\n } catch {\n return { title: '', description: '' };\n }\n}\n\n/**\n * Parse metadata from source code string.\n * Exported for testing.\n */\nexport function extractMetadataFromSource(source: string): {\n title: string;\n description: string;\n} {\n let title = '';\n let description = '';\n\n // Match `export const metadata = { ... }` or `export const metadata: Metadata = { ... }`\n // We need to handle multi-line objects, so we find the opening { and match to closing }\n const metadataMatch = source.match(\n /export\\s+const\\s+metadata[\\s:]*(?:Metadata\\s*)?=\\s*\\{/,\n );\n\n if (!metadataMatch) {\n return { title, description };\n }\n\n // Extract the object content by counting braces\n const startIdx = metadataMatch.index! + metadataMatch[0].length - 1; // position of {\n const objectStr = extractBalancedBraces(source, startIdx);\n\n if (!objectStr) {\n return { title, description };\n }\n\n // Extract title\n const titleMatch = objectStr.match(\n /title\\s*:\\s*(?:'([^']*)'|\"([^\"]*)\"|`([^`]*)`)/,\n );\n if (titleMatch) {\n title = sanitizeMetadataValue(titleMatch[1] || titleMatch[2] || titleMatch[3] || '');\n }\n\n // Extract description\n const descMatch = objectStr.match(\n /description\\s*:\\s*(?:'([^']*)'|\"([^\"]*)\"|`([^`]*)`)/,\n );\n if (descMatch) {\n description = sanitizeMetadataValue(descMatch[1] || descMatch[2] || descMatch[3] || '');\n }\n\n return { title, description };\n}\n\n/**\n * Extract a balanced brace block starting from position `start` (which should be `{`).\n */\nfunction extractBalancedBraces(source: string, start: number): string | null {\n if (source[start] !== '{') return null;\n\n let depth = 0;\n for (let i = start; i < source.length; i++) {\n if (source[i] === '{') depth++;\n else if (source[i] === '}') depth--;\n\n if (depth === 0) {\n return source.substring(start, i + 1);\n }\n }\n\n return null;\n}\n\n/**\n * Derive a section name from a URL path.\n */\nfunction deriveSection(\n urlPath: string,\n strategy: 'directory' | ((path: string) => string),\n defaultSection: string,\n): string {\n if (typeof strategy === 'function') {\n return strategy(urlPath);\n }\n\n // 'directory' strategy: use the first path segment, capitalize it\n const segments = urlPath.split('/').filter(Boolean);\n if (segments.length === 0) return defaultSection;\n\n const firstSegment = segments[0];\n\n // If the first segment contains a dynamic param like [slug], use default\n if (firstSegment.startsWith('[')) return defaultSection;\n\n // Capitalize and humanize: \"tools\" → \"Tools\", \"api-reference\" → \"Api Reference\"\n return firstSegment\n .split('-')\n .map((word) => word.charAt(0).toUpperCase() + word.slice(1))\n .join(' ');\n}\n\n/**\n * Convert a URL path into a human-readable title.\n * \"/docs/getting-started\" → \"Getting Started\"\n * \"/\" → \"Home\"\n */\nfunction pathToTitle(urlPath: string): string {\n if (urlPath === '/') return 'Home';\n\n const lastSegment = urlPath.split('/').filter(Boolean).pop() || '';\n\n // Skip dynamic segments\n if (lastSegment.startsWith('[')) {\n return lastSegment.replace(/^\\[|\\]$/g, '');\n }\n\n return lastSegment\n .split('-')\n .map((word) => word.charAt(0).toUpperCase() + word.slice(1))\n .join(' ');\n}\n\n/**\n * Check if a path should be excluded based on patterns.\n */\nfunction shouldExclude(urlPath: string, patterns: string[]): boolean {\n for (const pattern of patterns) {\n // Simple prefix matching\n if (pattern.endsWith('/**') || pattern.endsWith('/*')) {\n const prefix = pattern.replace(/\\/\\*\\*?$/, '');\n if (urlPath === prefix || urlPath.startsWith(prefix + '/')) return true;\n } else if (pattern.startsWith('_')) {\n // Exclude paths starting with _\n if (\n urlPath.startsWith('/' + pattern) ||\n urlPath.includes('/' + pattern + '/')\n )\n return true;\n } else if (urlPath === pattern || urlPath.startsWith(pattern + '/')) {\n return true;\n }\n }\n return false;\n}\n\n// ============================================================\n// FILESYSTEM DISCOVERY (HTML files)\n// ============================================================\n\n/**\n * Discover routes by scanning a directory for HTML files.\n * Useful for static sites or build output directories.\n */\nexport function discoverFilesystemRoutes(\n dir: string,\n options: DiscoverOptions = {},\n): LlmsTxtRoute[] {\n const {\n exclude = [],\n sectionStrategy = 'directory',\n defaultSection = 'Pages',\n } = options;\n\n if (!existsSync(dir)) {\n return [];\n }\n\n let rootReal = dir;\n try {\n rootReal = realpathSync(dir);\n } catch {\n return [];\n }\n\n const htmlFiles = findHtmlFiles(dir, rootReal);\n const routes: LlmsTxtRoute[] = [];\n\n for (const filePath of htmlFiles) {\n const relativePath = filePath.substring(dir.length);\n let urlPath = relativePath\n .replace(/\\\\/g, '/')\n .replace(/\\/index\\.html$/, '/')\n .replace(/\\.html$/, '');\n\n if (urlPath === '') urlPath = '/';\n if (!urlPath.startsWith('/')) urlPath = '/' + urlPath;\n\n if (shouldExclude(urlPath, exclude)) continue;\n\n const section = deriveSection(urlPath, sectionStrategy, defaultSection);\n\n // Try to extract title from HTML\n let title = '';\n try {\n const html = readFileSync(filePath, 'utf-8');\n const titleMatch = html.match(/<title[^>]*>([^<]+)<\\/title>/i);\n title = sanitizeMetadataValue(titleMatch?.[1]?.trim() || '');\n } catch {\n // ignore\n }\n\n routes.push({\n path: urlPath,\n title: title || pathToTitle(urlPath),\n section,\n });\n }\n\n routes.sort((a, b) => {\n if (a.path === '/') return -1;\n if (b.path === '/') return 1;\n return a.path.localeCompare(b.path);\n });\n\n return routes;\n}\n\nfunction findHtmlFiles(dir: string, rootReal: string): string[] {\n const results: string[] = [];\n if (!existsSync(dir)) return results;\n\n const entries = readdirSync(dir);\n for (const entry of entries) {\n const fullPath = join(dir, entry);\n let stat: Stats;\n try {\n stat = lstatSync(fullPath);\n } catch {\n continue;\n }\n\n if (stat.isSymbolicLink()) continue;\n\n if (stat.isDirectory()) {\n if (['node_modules', '.next', '.git', 'dist', '.turbo'].includes(entry))\n continue;\n let realDir = fullPath;\n try {\n realDir = realpathSync(fullPath);\n } catch {\n continue;\n }\n if (!isWithinRoot(realDir, rootReal)) continue;\n results.push(...findHtmlFiles(fullPath, rootReal));\n } else if (entry.endsWith('.html')) {\n results.push(fullPath);\n }\n }\n\n return results;\n}\n\nfunction isWithinRoot(realPath: string, rootReal: string): boolean {\n if (realPath === rootReal) return true;\n const normalizedRoot = rootReal.endsWith(sep) ? rootReal : rootReal + sep;\n return realPath.startsWith(normalizedRoot);\n}\n\nfunction sanitizeMetadataValue(value: string): string {\n if (!value) return '';\n const sanitized = value\n .replace(/[\\u0000-\\u001F\\u007F]/g, ' ')\n .replace(/[<>]/g, '')\n .replace(/\\s+/g, ' ')\n .trim();\n return sanitized.length > 200 ? sanitized.slice(0, 200) : sanitized;\n}\n","import type { TransformResult, AgentSeoHeaders, AgentSeoOptions } from './types.js';\n\n/**\n * Build the response headers for a Markdown response.\n */\nexport function buildMarkdownHeaders(\n result: TransformResult,\n options: Pick<AgentSeoOptions, 'contentSignal'>,\n originalPath?: string\n): AgentSeoHeaders {\n const headers: AgentSeoHeaders = {\n 'Content-Type': 'text/markdown; charset=utf-8',\n 'Content-Disposition': 'inline',\n 'Vary': 'Accept, User-Agent',\n 'X-Markdown-Tokens': String(result.tokenEstimate),\n };\n\n // Content-Signal header (Cloudflare convention)\n const signal = options.contentSignal ?? { aiTrain: true, search: true, aiInput: true };\n const signalParts: string[] = [];\n if (signal.aiTrain !== false) signalParts.push('ai-train=yes');\n if (signal.search !== false) signalParts.push('search=yes');\n if (signal.aiInput !== false) signalParts.push('ai-input=yes');\n if (signalParts.length > 0) {\n headers['Content-Signal'] = signalParts.join(', ');\n }\n\n // X-Robots-Tag: let all AI bots index\n headers['X-Robots-Tag'] = 'all';\n\n return headers;\n}\n\n/**\n * Build a Link header pointing to the Markdown alternate.\n * This is injected into ALL HTML responses (not just Markdown ones)\n * so crawlers can discover the alternate representation.\n */\nexport function buildAlternateLinkHeader(path: string, ext: string = '.md'): string {\n // /docs/getting-started → /docs/getting-started.md\n const mdPath = path.endsWith('/') ? `${path}index${ext}` : `${path}${ext}`;\n return `<${mdPath}>; rel=\"alternate\"; type=\"text/markdown\"`;\n}\n","import { LRUCache } from 'lru-cache';\nimport type { TransformResult } from './types.js';\n\nexport interface CacheOptions {\n /** Max number of entries. Default: 100 */\n maxEntries?: number;\n /** TTL in milliseconds. Default: 300_000 (5 minutes) */\n ttl?: number;\n}\n\nexport function createCache(options: CacheOptions = {}) {\n const { maxEntries = 100, ttl = 300_000 } = options;\n\n const cache = new LRUCache<string, TransformResult>({\n max: maxEntries,\n ttl,\n });\n\n return {\n get: (key: string) => cache.get(key),\n set: (key: string, value: TransformResult) => cache.set(key, value),\n has: (key: string) => cache.has(key),\n clear: () => cache.clear(),\n size: () => cache.size,\n };\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACSA,IAAM,kBAA8B;AAAA;AAAA,EAElC;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,UAAU,UAAU,UAAU,SAAS,YAAY,WAAW,MAAM;AAAA,EACpF;AAAA,EACA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,iBAAiB,UAAU,UAAU,SAAS,UAAU,WAAW,MAAM;AAAA,EACzF;AAAA,EACA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,gBAAgB,UAAU,UAAU,SAAS,kBAAkB,WAAW,KAAK;AAAA,EAC/F;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,aAAa,UAAU,aAAa,SAAS,YAAY,WAAW,MAAM;AAAA,EAC1F;AAAA,EACA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,eAAe,UAAU,aAAa,SAAS,kBAAkB,WAAW,KAAK;AAAA,EACjG;AAAA,EACA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,oBAAoB,UAAU,aAAa,SAAS,UAAU,WAAW,MAAM;AAAA,EAC/F;AAAA,EACA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,gBAAgB,UAAU,aAAa,SAAS,YAAY,WAAW,MAAM;AAAA,EAC7F;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,iBAAiB,UAAU,cAAc,SAAS,UAAU,WAAW,MAAM;AAAA,EAC7F;AAAA,EACA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,mBAAmB,UAAU,cAAc,SAAS,kBAAkB,WAAW,KAAK;AAAA,EACtG;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,mBAAmB,UAAU,UAAU,SAAS,YAAY,WAAW,KAAK;AAAA,EAC5F;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,qBAAqB,UAAU,SAAS,SAAS,YAAY,WAAW,KAAK;AAAA,EAC7F;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,sBAAsB,UAAU,QAAQ,SAAS,YAAY,WAAW,MAAM;AAAA,EAC9F;AAAA,EACA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,eAAe,UAAU,QAAQ,SAAS,UAAU,WAAW,MAAM;AAAA,EACrF;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,SAAS,UAAU,gBAAgB,SAAS,YAAY,WAAW,MAAM;AAAA,EACzF;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,aAAa,UAAU,UAAU,SAAS,YAAY,WAAW,MAAM;AAAA,EACvF;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,aAAa,UAAU,UAAU,SAAS,UAAU,WAAW,MAAM;AAAA,EACrF;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,cAAc,UAAU,aAAa,SAAS,YAAY,WAAW,MAAM;AAAA,EAC3F;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,UAAU,UAAU,WAAW,SAAS,UAAU,WAAW,MAAM;AAAA,EACnF;AAAA;AAAA,EAGA;AAAA,IACE,SAAS;AAAA,IACT,MAAM,EAAE,MAAM,eAAe,UAAU,YAAY,SAAS,YAAY,WAAW,MAAM;AAAA,EAC3F;AACF;AAEA,IAAM,iBAAiB,gBAAgB,IAAI,CAAC,WAAW;AAAA,EACrD;AAAA,EACA,OAAO,aAAa,MAAM,OAAO;AACnC,EAAE;AASK,SAAS,YACd,WACA,cACkB;AAClB,QAAM,gBAAgB,eAClB,kBAAkB,KAAK,YAAY,IACnC;AAEJ,MAAI,CAAC,WAAW;AACd,WAAO,EAAE,SAAS,OAAO,KAAK,MAAM,cAAc;AAAA,EACpD;AAEA,QAAM,KAAK,UAAU,YAAY;AAGjC,aAAW,EAAE,OAAO,MAAM,KAAK,gBAAgB;AAC7C,QAAI,OAAO;AACT,UAAI,GAAG,SAAS,KAAK,GAAG;AACtB,eAAO,EAAE,SAAS,MAAM,KAAK,MAAM,MAAM,cAAc;AAAA,MACzD;AACA;AAAA,IACF;AACA,QAAI,MAAM,QAAQ,KAAK,SAAS,GAAG;AACjC,aAAO,EAAE,SAAS,MAAM,KAAK,MAAM,MAAM,cAAc;AAAA,IACzD;AAAA,EACF;AAGA,SAAO,EAAE,SAAS,OAAO,KAAK,MAAM,cAAc;AACpD;AAMO,SAAS,oBACd,WACA,cACS;AACT,QAAM,MAAM,YAAY,WAAW,YAAY;AAC/C,SAAO,IAAI,WAAW,IAAI;AAC5B;AAIA,SAAS,aAAa,SAAgC;AACpD,QAAM,SAAS,QAAQ;AACvB,MAAI,kBAAkB,KAAK,MAAM,EAAG,QAAO,OAAO,YAAY;AAC9D,SAAO;AACT;;;AC5KA,IAAAA,gBAAsB;AACtB,yBAAkD;;;ACDlD,mBAAsB;AAUtB,IAAM,qBAAqB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAMA,IAAM,0BAA0B;AAAA;AAAA,EAE9B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAGA;AAAA,EAAkB;AAAA,EAAO;AAAA,EAAQ;AAAA,EAAkB;AAAA,EACnD;AAAA,EAAkB;AAAA,EAAmB;AAAA,EACrC;AAAA,EAAU;AAAA,EAAU;AAAA,EAAoB;AAAA,EACxC;AAAA;AAAA,EAGA;AAAA,EAAiB;AAAA,EAAkB;AAAA,EAAqB;AAAA,EACxD;AAAA;AAAA,EAGA;AAAA,EAAa;AAAA,EAAa;AAAA,EAC1B;AAAA;AAAA,EAGA;AAAA,EAAkB;AAAA,EAAgB;AAAA,EAAY;AAAA,EAC9C;AAAA,EAAsB;AAAA,EAAsB;AAAA,EAC5C;AAAA,EAAe;AAAA,EAAc;AAAA,EAC7B;AAAA,EAAQ;AAAA;AAAA,EAGR;AAAA,EAAe;AAAA,EACf;AAAA,EACA;AAAA,EACA;AAAA;AAAA,EAGA;AAAA,EACA;AACF;AAOO,SAAS,aAAa,MAAc,UAA2B,CAAC,GAAW;AAChF,QAAM,EAAE,iBAAiB,CAAC,GAAG,oBAAoB,CAAC,EAAE,IAAI;AAExD,QAAM,MAAM,IAAI,mBAAM,IAAI;AAC1B,QAAM,WAAW,IAAI,OAAO;AAG5B,QAAM,cAAc,oBAAI,IAAU;AAClC,aAAW,YAAY,mBAAmB;AACxC,QAAI;AACF,eAAS,iBAAiB,QAAQ,EAAE,QAAQ,CAAC,OAAO,YAAY,IAAI,EAAE,CAAC;AAAA,IACzE,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,aAAW,OAAO,oBAAoB;AACpC,QAAI;AACF,eAAS,iBAAiB,GAAG,EAAE,QAAQ,CAAC,OAAO;AAC7C,YAAI,CAAC,YAAY,IAAI,EAAE,EAAG,IAAG,OAAO;AAAA,MACtC,CAAC;AAAA,IACH,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,QAAM,oBAAoB,CAAC,GAAG,yBAAyB,GAAG,cAAc;AACxE,aAAW,YAAY,mBAAmB;AACxC,QAAI;AACF,eAAS,iBAAiB,QAAQ,EAAE,QAAQ,CAAC,OAAO;AAClD,YAAI,CAAC,YAAY,IAAI,EAAE,EAAG,IAAG,OAAO;AAAA,MACtC,CAAC;AAAA,IACH,QAAQ;AAAA,IAER;AAAA,EACF;AAGA,0BAAwB,QAAQ;AAGhC,sBAAoB,QAAQ;AAG5B,kBAAgB,QAAQ;AAExB,QAAM,SAAS,SAAS,MAAM,aAAa;AAC3C,MAAI,OAAO,MAAM;AACjB,SAAO;AACT;AAEA,SAAS,wBAAwB,UAA0B;AACzD,QAAM,aAAa,SAAS,iBAAiB,oBAAoB;AACjE,aAAW,MAAM,YAAY;AAC3B,UAAM,cAAc,GAAG,eAAe,IAAI,KAAK,EAAE;AACjD,UAAM,oBAAoB,GAAG,iBAAiB,GAAG,EAAE;AAEnD,QAAI,oBAAoB,MAAM,aAAa,IAAI;AAC7C,SAAG,OAAO;AAAA,IACZ;AAAA,EACF;AACF;AAEA,SAAS,oBAAoB,UAA0B;AACrD,QAAM,aAAa,SAAS,iBAAiB,gCAAgC;AAC7E,aAAW,MAAM,YAAY;AAC3B,QAAI,EAAE,GAAG,eAAe,IAAI,KAAK,KAAK,CAAC,GAAG,cAAc,uBAAuB,GAAG;AAChF,SAAG,OAAO;AAAA,IACZ;AAAA,EACF;AACF;AAEA,SAAS,gBAAgB,UAA0B;AACjD,QAAM,MAAM,SAAS,iBAAiB,GAAG;AACzC,QAAM,YAAY,oBAAI,IAAI,CAAC,QAAQ,OAAO,OAAO,SAAS,WAAW,WAAW,SAAS,WAAW,QAAQ,OAAO,MAAM,CAAC;AAE1H,aAAW,MAAM,KAAK;AACpB,UAAM,QAAQ,MAAM,KAAK,GAAG,UAAU;AACtC,eAAW,QAAQ,OAAO;AAExB,UAAI,KAAK,SAAS,WAAW,GAAG,YAAY,OAAQ;AACpD,UAAI,CAAC,UAAU,IAAI,KAAK,IAAI,GAAG;AAC7B,WAAG,gBAAgB,KAAK,IAAI;AAAA,MAC9B;AAAA,IACF;AAAA,EACF;AACF;;;ACtKA,sBAA4B;AAC5B,iCAAoB;AAQb,SAAS,eAAe,MAAc,UAA2B,CAAC,GAAW;AAClF,QAAM,EAAE,KAAK,cAAc,CAAC,EAAE,IAAI;AAElC,QAAM,WAAW,IAAI,gBAAAC,QAAgB;AAAA,IACnC,cAAc;AAAA,IACd,gBAAgB;AAAA,IAChB,kBAAkB;AAAA,IAClB,aAAa;AAAA,IACb,iBAAiB;AAAA,IACjB,WAAW;AAAA,IACX,IAAI;AAAA,EACN,CAAC;AAGD,WAAS,IAAI,8BAAG;AAGhB,WAAS,QAAQ,mBAAmB;AAAA,IAClC,QAAQ,CAAC,SAAS;AAChB,aACE,KAAK,aAAa,SAClB,KAAK,eAAe,QACpB,KAAK,WAAW,aAAa;AAAA,IAEjC;AAAA,IACA,aAAa,CAAC,UAAU,SAAS;AAC/B,YAAM,SAAS,KAAK;AACpB,YAAM,YAAY,QAAQ,eAAe,OAAO,KAAK;AAGrD,YAAM,YAAY,UAAU;AAAA,QAC1B;AAAA,MACF;AACA,YAAM,OAAO,YAAY,UAAU,CAAC,IAAI;AACxC,YAAM,OAAO,QAAQ,eAAe;AAEpC,aAAO;AAAA;AAAA,QAAa,IAAI;AAAA,EAAK,KAAK,QAAQ,QAAQ,EAAE,CAAC;AAAA;AAAA;AAAA;AAAA,IACvD;AAAA,EACF,CAAC;AAGD,WAAS,QAAQ,oBAAoB;AAAA,IACnC,QAAQ,CAAC,SAAS,KAAK,aAAa;AAAA,IACpC,aAAa,CAAC,UAAU,SAAS;AAC/B,YAAM,KAAK;AACX,YAAM,MAAM,GAAG,aAAa,KAAK,GAAG,KAAK;AACzC,YAAM,MAAM,GAAG,aAAa,KAAK,GAAG,KAAK;AAGzC,UAAI,CAAC,IAAK,QAAO;AAGjB,UAAI,cAAc,OAAO;AACzB,UAAI,OAAO,OAAO,CAAC,IAAI,WAAW,MAAM,KAAK,CAAC,IAAI,WAAW,OAAO,GAAG;AACrE,YAAI;AACF,wBAAc,IAAI,IAAI,KAAK,GAAG,EAAE;AAAA,QAClC,QAAQ;AACN,wBAAc;AAAA,QAChB;AAAA,MACF;AAEA,aAAO,KAAK,GAAG,KAAK,WAAW;AAAA,IACjC;AAAA,EACF,CAAC;AAGD,WAAS,QAAQ,gBAAgB;AAAA,IAC/B,QAAQ;AAAA,IACR,aAAa,CAAC,SAAS,SAAS;AAC9B,YAAM,KAAK;AACX,YAAM,OAAO,GAAG,aAAa,MAAM;AACnC,UAAI,CAAC,QAAQ,CAAC,QAAQ,KAAK,EAAG,QAAO;AAGrC,UAAI,KAAK,WAAW,GAAG,EAAG,QAAO;AAGjC,UAAI,KAAK,WAAW,aAAa,KAAK,KAAK,WAAW,gBAAgB,EAAG,QAAO;AAEhF,UAAI,eAAe;AACnB,UAAI,OAAO,CAAC,KAAK,WAAW,MAAM,KAAK,CAAC,KAAK,WAAW,SAAS,GAAG;AAClE,YAAI;AACF,yBAAe,IAAI,IAAI,MAAM,GAAG,EAAE;AAAA,QACpC,QAAQ;AACN,yBAAe;AAAA,QACjB;AAAA,MACF;AAEA,YAAM,QAAQ,GAAG,aAAa,OAAO;AACrC,aAAO,QACH,IAAI,OAAO,KAAK,YAAY,KAAK,KAAK,OACtC,IAAI,OAAO,KAAK,YAAY;AAAA,IAClC;AAAA,EACF,CAAC;AAGD,aAAW,QAAQ,aAAa;AAC9B,aAAS,QAAQ,KAAK,MAAM;AAAA,MAC1B,QAAQ,KAAK;AAAA,MACb,aAAa,KAAK;AAAA,IACpB,CAAC;AAAA,EACH;AAEA,MAAI,WAAW,SAAS,SAAS,IAAI;AAGrC,aAAW,SAAS,QAAQ,WAAW,MAAM,EAAE,KAAK;AAEpD,SAAO;AACT;;;AClHO,SAAS,oBAAoB,UAA+C;AACjF,QAAM,UAAqC,CAAC;AAC5C,QAAM,UAAU,SAAS,iBAAiB,oCAAoC;AAE9E,aAAW,UAAU,SAAS;AAC5B,QAAI;AACF,YAAM,OAAO,KAAK,MAAM,OAAO,eAAe,EAAE;AAEhD,UAAI,KAAK,QAAQ,KAAK,MAAM,QAAQ,KAAK,QAAQ,CAAC,GAAG;AACnD,gBAAQ,KAAK,GAAG,KAAK,QAAQ,CAAC;AAAA,MAChC,OAAO;AACL,gBAAQ,KAAK,IAAI;AAAA,MACnB;AAAA,IACF,QAAQ;AAAA,IAER;AAAA,EACF;AAEA,SAAO;AACT;;;AClBO,SAAS,eAAe,MAAsB;AACnD,SAAO,KAAK,KAAK,KAAK,SAAS,CAAC;AAClC;;;ACKO,SAAS,iBAAiB,OAAiC;AAChE,QAAM,QAAkB,CAAC,KAAK;AAE9B,MAAI,MAAM,MAAO,OAAM,KAAK,WAAW,WAAW,MAAM,KAAK,CAAC,GAAG;AACjE,MAAI,MAAM;AACR,UAAM,KAAK,iBAAiB,WAAW,MAAM,WAAW,CAAC,GAAG;AAC9D,MAAI,MAAM,IAAK,OAAM,KAAK,SAAS,WAAW,MAAM,GAAG,CAAC,GAAG;AAC3D,MAAI,MAAM,KAAM,OAAM,KAAK,UAAU,WAAW,MAAM,IAAI,CAAC,GAAG;AAC9D,MAAI,MAAM,aAAc,OAAM,KAAK,kBAAkB,WAAW,MAAM,YAAY,CAAC,GAAG;AAGtF,MAAI,MAAM,QAAQ,QAAQ;AACxB,UAAM,UAAU,MAAM,OAAO,CAAC;AAE9B,UAAM,cAAc,UAAU,OAAO;AACrC,QAAI,aAAa;AACf,YAAM,UAAU,MAAM,QAAQ,WAAW,IAAI,YAAY,CAAC,IAAI;AAC9D,UAAI,OAAO,YAAY,UAAU;AAC/B,cAAM,KAAK,YAAY,WAAW,OAAO,CAAC,GAAG;AAAA,MAC/C;AAAA,IACF;AAGA,UAAM,SAAS,SAAS;AACxB,QAAI,QAAQ;AACV,YAAM,aAAa,OAAO;AAC1B,UAAI,WAAY,OAAM,KAAK,YAAY,WAAW,UAAU,CAAC,GAAG;AAAA,IAClE;AAGA,UAAM,gBAAgB,SAAS;AAC/B,QAAI,cAAe,OAAM,KAAK,mBAAmB,WAAW,aAAa,CAAC,GAAG;AAE7E,UAAM,eAAe,SAAS;AAC9B,QAAI,aAAc,OAAM,KAAK,kBAAkB,WAAW,YAAY,CAAC,GAAG;AAAA,EAC5E;AAEA,QAAM,KAAK,KAAK;AAChB,SAAO,MAAM,KAAK,IAAI;AACxB;AAEA,SAAS,WAAW,KAAqB;AAEvC,SAAO,IACJ,QAAQ,OAAO,MAAM,EACrB,QAAQ,MAAM,KAAK,EACnB,QAAQ,OAAO,GAAG;AACvB;;;AL7CA,eAAsB,UACpB,MACA,UAA4B,CAAC,GACH;AAC1B,QAAM;AAAA,IACJ;AAAA,IACA;AAAA,IACA,gBAAgB;AAAA,IAChB,iBAAiB,CAAC;AAAA,IAClB,oBAAoB,CAAC;AAAA,IACrB,cAAc;AAAA,IACd,gBAAgB,CAAC;AAAA,EACnB,IAAI;AAGJ,QAAM,MAAM,IAAI,oBAAM,MAAM,EAAE,KAAK,OAAO,oBAAoB,CAAC;AAC/D,QAAM,WAAW,IAAI,OAAO;AAG5B,QAAM,QACJ,SAAS,cAAc,OAAO,GAAG,aAAa,KAAK,KACnD,SAAS,cAAc,IAAI,GAAG,aAAa,KAAK,KAChD;AACF,QAAM,cACJ,SAAS,cAAc,0BAA0B,GAAG,aAAa,SAAS,GAAG,KAAK,KAAK;AACzF,QAAM,eACJ,SAAS,cAAc,uBAAuB,GAAG,aAAa,MAAM,KAAK;AAC3E,QAAM,OAAO,SAAS,gBAAgB,aAAa,MAAM,KAAK;AAC9D,QAAM,eACJ,SAAS,cAAc,wCAAwC,GAAG,aAAa,SAAS,KACxF,SAAS,cAAc,4BAA4B,GAAG,aAAa,SAAS,KAC5E;AAGF,QAAM,SAAS,gBAAgB,oBAAoB,QAAQ,IAAI,CAAC;AAGhE,MAAI;AACJ,UAAI,yCAAqB,QAAQ,GAAG;AAClC,UAAM,SAAS,IAAI,+BAAY,UAAU,EAAE,eAAe,IAAI,CAAC;AAC/D,UAAM,UAAU,OAAO,MAAM;AAC7B,kBAAc,SAAS,WAAW,SAAS,MAAM,aAAa;AAAA,EAChE,OAAO;AAEL,kBAAc,SAAS,MAAM,aAAa;AAAA,EAC5C;AAGA,QAAM,YAAY,aAAa,aAAa;AAAA,IAC1C;AAAA,IACA;AAAA,EACF,CAAC;AAGD,MAAI,WAAW,eAAe,WAAW,EAAE,KAAK,aAAa,cAAc,CAAC;AAG5E,MAAI,aAAa;AACf,UAAM,KAAK,iBAAiB,EAAE,OAAO,aAAa,KAAK,MAAM,cAAc,OAAO,CAAC;AACnF,eAAW,KAAK,SAAS;AAAA,EAC3B;AAGA,MAAI,gBAAgB,eAAe,QAAQ;AAC3C,MAAI,eAAe,gBAAgB,aAAa;AAC9C,eAAW,sBAAsB,UAAU,WAAW;AACtD,oBAAgB,eAAe,QAAQ;AAAA,EACzC;AAEA,MAAI,OAAO,MAAM;AAEjB,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAMA,SAAS,sBAAsB,UAAkB,QAAwB;AACvE,QAAM,QAAQ,SAAS,MAAM,IAAI;AACjC,QAAM,SAAmB,CAAC;AAC1B,MAAI,gBAAgB;AAEpB,aAAW,QAAQ,OAAO;AACxB,UAAM,aAAa,eAAe,IAAI;AACtC,QAAI,gBAAgB,aAAa,QAAQ;AACvC,UAAI,YAAY,KAAK,IAAI,GAAG;AAC1B,eAAO,KAAK,IAAI;AAChB,eAAO,KAAK,4CAA4C;AAAA,MAC1D;AACA;AAAA,IACF;AACA,WAAO,KAAK,IAAI;AAChB,qBAAiB;AAAA,EACnB;AAEA,SAAO,OAAO,KAAK,IAAI;AACzB;;;AM5GO,SAAS,gBACd,SACA,QACA,kBACe;AACf,QAAM,EAAE,UAAU,iBAAiB,SAAS,oBAAoB,MAAM,IAAI;AAG1E,QAAM,WAAW,oBAAI,IAA4B;AACjD,aAAW,SAAS,QAAQ;AAC1B,UAAM,UAAU,MAAM,WAAW;AACjC,QAAI,CAAC,SAAS,IAAI,OAAO,EAAG,UAAS,IAAI,SAAS,CAAC,CAAC;AACpD,aAAS,IAAI,OAAO,EAAG,KAAK,KAAK;AAAA,EACnC;AAGA,QAAM,QAAkB,CAAC;AACzB,QAAM,KAAK,KAAK,QAAQ,EAAE;AAC1B,QAAM,KAAK,EAAE;AACb,QAAM,KAAK,KAAK,eAAe,EAAE;AACjC,QAAM,KAAK,EAAE;AAEb,aAAW,CAAC,SAAS,aAAa,KAAK,UAAU;AAC/C,UAAM,KAAK,MAAM,OAAO,EAAE;AAC1B,UAAM,KAAK,EAAE;AACb,eAAW,SAAS,eAAe;AACjC,YAAM,MAAM,GAAG,OAAO,GAAG,MAAM,IAAI,GAAG,iBAAiB;AACvD,YAAM,OAAO,MAAM,cAAc,KAAK,MAAM,WAAW,KAAK;AAC5D,YAAM,KAAK,MAAM,MAAM,KAAK,KAAK,GAAG,IAAI,IAAI,EAAE;AAAA,IAChD;AACA,UAAM,KAAK,EAAE;AAAA,EACf;AAEA,QAAM,UAAU,MAAM,KAAK,IAAI,EAAE,KAAK,IAAI;AAG1C,QAAM,YAAsB,CAAC;AAC7B,YAAU,KAAK,KAAK,QAAQ,EAAE;AAC9B,YAAU,KAAK,EAAE;AACjB,YAAU,KAAK,KAAK,eAAe,EAAE;AACrC,YAAU,KAAK,EAAE;AAEjB,MAAI,kBAAkB;AACpB,eAAW,SAAS,QAAQ;AAC1B,YAAM,UAAU,iBAAiB,IAAI,MAAM,IAAI;AAC/C,UAAI,SAAS;AACX,kBAAU,KAAK,KAAK;AACpB,kBAAU,KAAK,EAAE;AACjB,kBAAU,KAAK,MAAM,MAAM,KAAK,EAAE;AAClC,kBAAU,KAAK,WAAW,OAAO,GAAG,MAAM,IAAI,EAAE;AAChD,kBAAU,KAAK,EAAE;AACjB,kBAAU,KAAK,OAAO;AACtB,kBAAU,KAAK,EAAE;AAAA,MACnB;AAAA,IACF;AAAA,EACF;AAEA,QAAM,cAAc,UAAU,KAAK,IAAI,EAAE,KAAK,IAAI;AAElD,SAAO;AAAA,IACL;AAAA,IACA;AAAA,IACA,YAAY,OAAO;AAAA,EACrB;AACF;;;AC5EA,qBAA+E;AAE/E,uBAA0B;AAqC1B,IAAM,qBAAqB;AAAA,EACzB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,IAAM,YAAY,oBAAI,IAAI;AAAA,EACxB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AACF,CAAC;AAuBM,SAAS,mBACd,QACA,UAA2B,CAAC,GACZ;AAChB,QAAM;AAAA,IACJ,UAAU,CAAC,MAAM;AAAA,IACjB,kBAAkB;AAAA,IAClB,iBAAiB;AAAA,EACnB,IAAI;AAEJ,MAAI,KAAC,2BAAW,MAAM,GAAG;AACvB,WAAO,CAAC;AAAA,EACV;AAEA,QAAM,SAAyB,CAAC;AAChC,MAAI,WAAW;AACf,MAAI;AACF,mBAAW,6BAAa,MAAM;AAAA,EAChC,QAAQ;AACN,WAAO;AAAA,EACT;AACA,aAAW,QAAQ,QAAQ,UAAU,QAAQ,SAAS,iBAAiB,cAAc;AAGrF,SAAO,KAAK,CAAC,GAAG,MAAM;AACpB,QAAI,EAAE,SAAS,IAAK,QAAO;AAC3B,QAAI,EAAE,SAAS,IAAK,QAAO;AAC3B,WAAO,EAAE,KAAK,cAAc,EAAE,IAAI;AAAA,EACpC,CAAC;AAED,SAAO;AACT;AAEA,SAAS,WACP,SACA,YACA,UACA,QACA,SACA,iBACA,gBACM;AACN,QAAM,cAAU,4BAAY,UAAU;AAEtC,aAAW,SAAS,SAAS;AAC3B,UAAM,eAAW,uBAAK,YAAY,KAAK;AACvC,QAAI;AACJ,QAAI;AACF,iBAAO,0BAAU,QAAQ;AAAA,IAC3B,QAAQ;AACN;AAAA,IACF;AAEA,QAAI,KAAK,eAAe,EAAG;AAE3B,QAAI,KAAK,YAAY,GAAG;AACtB,UAAI,UAAU;AACd,UAAI;AACF,sBAAU,6BAAa,QAAQ;AAAA,MACjC,QAAQ;AACN;AAAA,MACF;AACA,UAAI,CAAC,aAAa,SAAS,QAAQ,EAAG;AAGtC,YAAM,WAAW,MAAM,YAAY;AACnC,UACE,SAAS,WAAW,GAAG,KACvB,SAAS,WAAW,GAAG,KACvB,UAAU,IAAI,QAAQ;AAEtB;AAIF;AAAA,QACE;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AACA;AAAA,IACF;AAGA,QAAI,CAAC,mBAAmB,SAAS,KAAK,EAAG;AAGzC,UAAM,eAAe,WAAW,UAAU,QAAQ,MAAM;AACxD,QAAI,UAAU,aAAa,QAAQ,OAAO,GAAG;AAG7C,cAAU,QAAQ,QAAQ,gBAAgB,EAAE;AAI5C,QAAI,QAAQ,SAAS,MAAM,KAAK,QAAQ,SAAS,OAAO,EAAG;AAG3D,QAAI,YAAY,GAAI,WAAU;AAG9B,QAAI,CAAC,QAAQ,WAAW,GAAG,EAAG,WAAU,MAAM;AAG9C,QAAI,cAAc,SAAS,OAAO,EAAG;AAGrC,QAAI,YAAY,eAAe,YAAY,iBAAkB;AAG7D,UAAM,EAAE,OAAO,YAAY,IAAI,wBAAwB,QAAQ;AAG/D,UAAM,UAAU,cAAc,SAAS,iBAAiB,cAAc;AAEtE,WAAO,KAAK;AAAA,MACV,MAAM;AAAA,MACN,OAAO,SAAS,YAAY,OAAO;AAAA,MACnC,aAAa,eAAe;AAAA,MAC5B;AAAA,IACF,CAAC;AAAA,EACH;AACF;AAWA,SAAS,wBAAwB,UAG/B;AACA,MAAI;AACF,UAAM,cAAU,6BAAa,UAAU,OAAO;AAC9C,WAAO,0BAA0B,OAAO;AAAA,EAC1C,QAAQ;AACN,WAAO,EAAE,OAAO,IAAI,aAAa,GAAG;AAAA,EACtC;AACF;AAMO,SAAS,0BAA0B,QAGxC;AACA,MAAI,QAAQ;AACZ,MAAI,cAAc;AAIlB,QAAM,gBAAgB,OAAO;AAAA,IAC3B;AAAA,EACF;AAEA,MAAI,CAAC,eAAe;AAClB,WAAO,EAAE,OAAO,YAAY;AAAA,EAC9B;AAGA,QAAM,WAAW,cAAc,QAAS,cAAc,CAAC,EAAE,SAAS;AAClE,QAAM,YAAY,sBAAsB,QAAQ,QAAQ;AAExD,MAAI,CAAC,WAAW;AACd,WAAO,EAAE,OAAO,YAAY;AAAA,EAC9B;AAGA,QAAM,aAAa,UAAU;AAAA,IAC3B;AAAA,EACF;AACA,MAAI,YAAY;AACd,YAAQ,sBAAsB,WAAW,CAAC,KAAK,WAAW,CAAC,KAAK,WAAW,CAAC,KAAK,EAAE;AAAA,EACrF;AAGA,QAAM,YAAY,UAAU;AAAA,IAC1B;AAAA,EACF;AACA,MAAI,WAAW;AACb,kBAAc,sBAAsB,UAAU,CAAC,KAAK,UAAU,CAAC,KAAK,UAAU,CAAC,KAAK,EAAE;AAAA,EACxF;AAEA,SAAO,EAAE,OAAO,YAAY;AAC9B;AAKA,SAAS,sBAAsB,QAAgB,OAA8B;AAC3E,MAAI,OAAO,KAAK,MAAM,IAAK,QAAO;AAElC,MAAI,QAAQ;AACZ,WAAS,IAAI,OAAO,IAAI,OAAO,QAAQ,KAAK;AAC1C,QAAI,OAAO,CAAC,MAAM,IAAK;AAAA,aACd,OAAO,CAAC,MAAM,IAAK;AAE5B,QAAI,UAAU,GAAG;AACf,aAAO,OAAO,UAAU,OAAO,IAAI,CAAC;AAAA,IACtC;AAAA,EACF;AAEA,SAAO;AACT;AAKA,SAAS,cACP,SACA,UACA,gBACQ;AACR,MAAI,OAAO,aAAa,YAAY;AAClC,WAAO,SAAS,OAAO;AAAA,EACzB;AAGA,QAAM,WAAW,QAAQ,MAAM,GAAG,EAAE,OAAO,OAAO;AAClD,MAAI,SAAS,WAAW,EAAG,QAAO;AAElC,QAAM,eAAe,SAAS,CAAC;AAG/B,MAAI,aAAa,WAAW,GAAG,EAAG,QAAO;AAGzC,SAAO,aACJ,MAAM,GAAG,EACT,IAAI,CAAC,SAAS,KAAK,OAAO,CAAC,EAAE,YAAY,IAAI,KAAK,MAAM,CAAC,CAAC,EAC1D,KAAK,GAAG;AACb;AAOA,SAAS,YAAY,SAAyB;AAC5C,MAAI,YAAY,IAAK,QAAO;AAE5B,QAAM,cAAc,QAAQ,MAAM,GAAG,EAAE,OAAO,OAAO,EAAE,IAAI,KAAK;AAGhE,MAAI,YAAY,WAAW,GAAG,GAAG;AAC/B,WAAO,YAAY,QAAQ,YAAY,EAAE;AAAA,EAC3C;AAEA,SAAO,YACJ,MAAM,GAAG,EACT,IAAI,CAAC,SAAS,KAAK,OAAO,CAAC,EAAE,YAAY,IAAI,KAAK,MAAM,CAAC,CAAC,EAC1D,KAAK,GAAG;AACb;AAKA,SAAS,cAAc,SAAiB,UAA6B;AACnE,aAAW,WAAW,UAAU;AAE9B,QAAI,QAAQ,SAAS,KAAK,KAAK,QAAQ,SAAS,IAAI,GAAG;AACrD,YAAM,SAAS,QAAQ,QAAQ,YAAY,EAAE;AAC7C,UAAI,YAAY,UAAU,QAAQ,WAAW,SAAS,GAAG,EAAG,QAAO;AAAA,IACrE,WAAW,QAAQ,WAAW,GAAG,GAAG;AAElC,UACE,QAAQ,WAAW,MAAM,OAAO,KAChC,QAAQ,SAAS,MAAM,UAAU,GAAG;AAEpC,eAAO;AAAA,IACX,WAAW,YAAY,WAAW,QAAQ,WAAW,UAAU,GAAG,GAAG;AACnE,aAAO;AAAA,IACT;AAAA,EACF;AACA,SAAO;AACT;AAUO,SAAS,yBACd,KACA,UAA2B,CAAC,GACZ;AAChB,QAAM;AAAA,IACJ,UAAU,CAAC;AAAA,IACX,kBAAkB;AAAA,IAClB,iBAAiB;AAAA,EACnB,IAAI;AAEJ,MAAI,KAAC,2BAAW,GAAG,GAAG;AACpB,WAAO,CAAC;AAAA,EACV;AAEA,MAAI,WAAW;AACf,MAAI;AACF,mBAAW,6BAAa,GAAG;AAAA,EAC7B,QAAQ;AACN,WAAO,CAAC;AAAA,EACV;AAEA,QAAM,YAAY,cAAc,KAAK,QAAQ;AAC7C,QAAM,SAAyB,CAAC;AAEhC,aAAW,YAAY,WAAW;AAChC,UAAM,eAAe,SAAS,UAAU,IAAI,MAAM;AAClD,QAAI,UAAU,aACX,QAAQ,OAAO,GAAG,EAClB,QAAQ,kBAAkB,GAAG,EAC7B,QAAQ,WAAW,EAAE;AAExB,QAAI,YAAY,GAAI,WAAU;AAC9B,QAAI,CAAC,QAAQ,WAAW,GAAG,EAAG,WAAU,MAAM;AAE9C,QAAI,cAAc,SAAS,OAAO,EAAG;AAErC,UAAM,UAAU,cAAc,SAAS,iBAAiB,cAAc;AAGtE,QAAI,QAAQ;AACZ,QAAI;AACF,YAAM,WAAO,6BAAa,UAAU,OAAO;AAC3C,YAAM,aAAa,KAAK,MAAM,+BAA+B;AAC7D,cAAQ,sBAAsB,aAAa,CAAC,GAAG,KAAK,KAAK,EAAE;AAAA,IAC7D,QAAQ;AAAA,IAER;AAEA,WAAO,KAAK;AAAA,MACV,MAAM;AAAA,MACN,OAAO,SAAS,YAAY,OAAO;AAAA,MACnC;AAAA,IACF,CAAC;AAAA,EACH;AAEA,SAAO,KAAK,CAAC,GAAG,MAAM;AACpB,QAAI,EAAE,SAAS,IAAK,QAAO;AAC3B,QAAI,EAAE,SAAS,IAAK,QAAO;AAC3B,WAAO,EAAE,KAAK,cAAc,EAAE,IAAI;AAAA,EACpC,CAAC;AAED,SAAO;AACT;AAEA,SAAS,cAAc,KAAa,UAA4B;AAC9D,QAAM,UAAoB,CAAC;AAC3B,MAAI,KAAC,2BAAW,GAAG,EAAG,QAAO;AAE7B,QAAM,cAAU,4BAAY,GAAG;AAC/B,aAAW,SAAS,SAAS;AAC3B,UAAM,eAAW,uBAAK,KAAK,KAAK;AAChC,QAAI;AACJ,QAAI;AACF,iBAAO,0BAAU,QAAQ;AAAA,IAC3B,QAAQ;AACN;AAAA,IACF;AAEA,QAAI,KAAK,eAAe,EAAG;AAE3B,QAAI,KAAK,YAAY,GAAG;AACtB,UAAI,CAAC,gBAAgB,SAAS,QAAQ,QAAQ,QAAQ,EAAE,SAAS,KAAK;AACpE;AACF,UAAI,UAAU;AACd,UAAI;AACF,sBAAU,6BAAa,QAAQ;AAAA,MACjC,QAAQ;AACN;AAAA,MACF;AACA,UAAI,CAAC,aAAa,SAAS,QAAQ,EAAG;AACtC,cAAQ,KAAK,GAAG,cAAc,UAAU,QAAQ,CAAC;AAAA,IACnD,WAAW,MAAM,SAAS,OAAO,GAAG;AAClC,cAAQ,KAAK,QAAQ;AAAA,IACvB;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,aAAa,UAAkB,UAA2B;AACjE,MAAI,aAAa,SAAU,QAAO;AAClC,QAAM,iBAAiB,SAAS,SAAS,oBAAG,IAAI,WAAW,WAAW;AACtE,SAAO,SAAS,WAAW,cAAc;AAC3C;AAEA,SAAS,sBAAsB,OAAuB;AACpD,MAAI,CAAC,MAAO,QAAO;AACnB,QAAM,YAAY,MACf,QAAQ,0BAA0B,GAAG,EACrC,QAAQ,SAAS,EAAE,EACnB,QAAQ,QAAQ,GAAG,EACnB,KAAK;AACR,SAAO,UAAU,SAAS,MAAM,UAAU,MAAM,GAAG,GAAG,IAAI;AAC5D;;;AC5eO,SAAS,qBACd,QACA,SACA,cACiB;AACjB,QAAM,UAA2B;AAAA,IAC/B,gBAAgB;AAAA,IAChB,uBAAuB;AAAA,IACvB,QAAQ;AAAA,IACR,qBAAqB,OAAO,OAAO,aAAa;AAAA,EAClD;AAGA,QAAM,SAAS,QAAQ,iBAAiB,EAAE,SAAS,MAAM,QAAQ,MAAM,SAAS,KAAK;AACrF,QAAM,cAAwB,CAAC;AAC/B,MAAI,OAAO,YAAY,MAAO,aAAY,KAAK,cAAc;AAC7D,MAAI,OAAO,WAAW,MAAO,aAAY,KAAK,YAAY;AAC1D,MAAI,OAAO,YAAY,MAAO,aAAY,KAAK,cAAc;AAC7D,MAAI,YAAY,SAAS,GAAG;AAC1B,YAAQ,gBAAgB,IAAI,YAAY,KAAK,IAAI;AAAA,EACnD;AAGA,UAAQ,cAAc,IAAI;AAE1B,SAAO;AACT;AAOO,SAAS,yBAAyB,MAAc,MAAc,OAAe;AAElF,QAAM,SAAS,KAAK,SAAS,GAAG,IAAI,GAAG,IAAI,QAAQ,GAAG,KAAK,GAAG,IAAI,GAAG,GAAG;AACxE,SAAO,IAAI,MAAM;AACnB;;;AC1CA,uBAAyB;AAUlB,SAAS,YAAY,UAAwB,CAAC,GAAG;AACtD,QAAM,EAAE,aAAa,KAAK,MAAM,IAAQ,IAAI;AAE5C,QAAM,QAAQ,IAAI,0BAAkC;AAAA,IAClD,KAAK;AAAA,IACL;AAAA,EACF,CAAC;AAED,SAAO;AAAA,IACL,KAAK,CAAC,QAAgB,MAAM,IAAI,GAAG;AAAA,IACnC,KAAK,CAAC,KAAa,UAA2B,MAAM,IAAI,KAAK,KAAK;AAAA,IAClE,KAAK,CAAC,QAAgB,MAAM,IAAI,GAAG;AAAA,IACnC,OAAO,MAAM,MAAM,MAAM;AAAA,IACzB,MAAM,MAAM,MAAM;AAAA,EACpB;AACF;","names":["import_jsdom","TurndownService"]}
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { T as TransformOptions, a as TransformResult, b as TurndownRule, L as LlmsTxtRoute } from './edge-AywqjCEh.cjs';
|
|
2
|
+
export { A as AIRequestContext, c as AI_BOT_REGISTRY, d as AgentSeoHeaders, e as AgentSeoOptions, B as BotInfo, f as BotPurpose, g as LlmsTxtOptions, h as LlmsTxtResult, i as buildAlternateLinkHeader, j as buildMarkdownHeaders, k as detectAgent, l as estimateTokens, m as generateLlmsTxt, s as shouldServeMarkdown } from './edge-AywqjCEh.cjs';
|
|
3
|
+
import { LRUCache } from 'lru-cache';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Transform an HTML string into clean, LLM-optimized Markdown.
|
|
7
|
+
*
|
|
8
|
+
* Pipeline: Parse DOM → Extract JSON-LD → Readability extract → Sanitize → Turndown → Frontmatter → Token budget
|
|
9
|
+
*/
|
|
10
|
+
declare function transform(html: string, options?: TransformOptions): Promise<TransformResult>;
|
|
11
|
+
|
|
12
|
+
interface SanitizeOptions {
|
|
13
|
+
stripSelectors?: string[];
|
|
14
|
+
preserveSelectors?: string[];
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Sanitize HTML by removing noise elements.
|
|
18
|
+
* This is run AFTER Readability extraction (which does the heavy lifting)
|
|
19
|
+
* to catch remaining noise that Readability missed.
|
|
20
|
+
*/
|
|
21
|
+
declare function sanitizeHtml(html: string, options?: SanitizeOptions): string;
|
|
22
|
+
|
|
23
|
+
interface MarkdownOptions {
|
|
24
|
+
url?: string;
|
|
25
|
+
customRules?: TurndownRule[];
|
|
26
|
+
}
|
|
27
|
+
declare function htmlToMarkdown(html: string, options?: MarkdownOptions): string;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Extract all JSON-LD blocks from a document.
|
|
31
|
+
* These are <script type="application/ld+json"> elements.
|
|
32
|
+
*/
|
|
33
|
+
declare function extractJsonLdBlocks(document: Document): Record<string, unknown>[];
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Options for route discovery.
|
|
37
|
+
*/
|
|
38
|
+
interface DiscoverOptions {
|
|
39
|
+
/**
|
|
40
|
+
* Glob-like patterns to exclude from discovery.
|
|
41
|
+
* Matches against the URL path, e.g. ["/api", "/admin", "/_internal"].
|
|
42
|
+
* Default: ["/api", "/_*"]
|
|
43
|
+
*/
|
|
44
|
+
exclude?: string[];
|
|
45
|
+
/**
|
|
46
|
+
* How to derive the section name from a route path.
|
|
47
|
+
* - 'directory': uses the first path segment (e.g., "/docs/intro" → "Docs")
|
|
48
|
+
* - A custom function: (path: string) => string
|
|
49
|
+
* Default: 'directory'
|
|
50
|
+
*/
|
|
51
|
+
sectionStrategy?: 'directory' | ((path: string) => string);
|
|
52
|
+
/**
|
|
53
|
+
* Default section name for top-level pages (e.g., "/" or "/about").
|
|
54
|
+
* Default: "Pages"
|
|
55
|
+
*/
|
|
56
|
+
defaultSection?: string;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Scan a Next.js `app/` directory and discover all page routes.
|
|
60
|
+
*
|
|
61
|
+
* For each `page.tsx` found, it:
|
|
62
|
+
* 1. Derives the URL path from the directory structure
|
|
63
|
+
* 2. Attempts to extract `metadata.title` and `metadata.description` from the file
|
|
64
|
+
* 3. Assigns a section based on the top-level directory
|
|
65
|
+
*
|
|
66
|
+
* @param appDir - Absolute path to the `app/` directory
|
|
67
|
+
* @param options - Discovery options
|
|
68
|
+
* @returns Array of discovered routes
|
|
69
|
+
*
|
|
70
|
+
* @example
|
|
71
|
+
* ```ts
|
|
72
|
+
* const routes = discoverNextRoutes('/path/to/app');
|
|
73
|
+
* // [
|
|
74
|
+
* // { path: '/', title: 'Home', description: '...', section: 'Pages' },
|
|
75
|
+
* // { path: '/docs/intro', title: 'Introduction', description: '...', section: 'Docs' },
|
|
76
|
+
* // ]
|
|
77
|
+
* ```
|
|
78
|
+
*/
|
|
79
|
+
declare function discoverNextRoutes(appDir: string, options?: DiscoverOptions): LlmsTxtRoute[];
|
|
80
|
+
/**
|
|
81
|
+
* Parse metadata from source code string.
|
|
82
|
+
* Exported for testing.
|
|
83
|
+
*/
|
|
84
|
+
declare function extractMetadataFromSource(source: string): {
|
|
85
|
+
title: string;
|
|
86
|
+
description: string;
|
|
87
|
+
};
|
|
88
|
+
/**
|
|
89
|
+
* Discover routes by scanning a directory for HTML files.
|
|
90
|
+
* Useful for static sites or build output directories.
|
|
91
|
+
*/
|
|
92
|
+
declare function discoverFilesystemRoutes(dir: string, options?: DiscoverOptions): LlmsTxtRoute[];
|
|
93
|
+
|
|
94
|
+
interface CacheOptions {
|
|
95
|
+
/** Max number of entries. Default: 100 */
|
|
96
|
+
maxEntries?: number;
|
|
97
|
+
/** TTL in milliseconds. Default: 300_000 (5 minutes) */
|
|
98
|
+
ttl?: number;
|
|
99
|
+
}
|
|
100
|
+
declare function createCache(options?: CacheOptions): {
|
|
101
|
+
get: (key: string) => TransformResult | undefined;
|
|
102
|
+
set: (key: string, value: TransformResult) => LRUCache<string, TransformResult, unknown>;
|
|
103
|
+
has: (key: string) => boolean;
|
|
104
|
+
clear: () => void;
|
|
105
|
+
size: () => number;
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
export { type CacheOptions, type DiscoverOptions, LlmsTxtRoute, TransformOptions, TransformResult, TurndownRule, createCache, discoverFilesystemRoutes, discoverNextRoutes, extractJsonLdBlocks, extractMetadataFromSource, htmlToMarkdown, sanitizeHtml, transform };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
import { T as TransformOptions, a as TransformResult, b as TurndownRule, L as LlmsTxtRoute } from './edge-AywqjCEh.js';
|
|
2
|
+
export { A as AIRequestContext, c as AI_BOT_REGISTRY, d as AgentSeoHeaders, e as AgentSeoOptions, B as BotInfo, f as BotPurpose, g as LlmsTxtOptions, h as LlmsTxtResult, i as buildAlternateLinkHeader, j as buildMarkdownHeaders, k as detectAgent, l as estimateTokens, m as generateLlmsTxt, s as shouldServeMarkdown } from './edge-AywqjCEh.js';
|
|
3
|
+
import { LRUCache } from 'lru-cache';
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Transform an HTML string into clean, LLM-optimized Markdown.
|
|
7
|
+
*
|
|
8
|
+
* Pipeline: Parse DOM → Extract JSON-LD → Readability extract → Sanitize → Turndown → Frontmatter → Token budget
|
|
9
|
+
*/
|
|
10
|
+
declare function transform(html: string, options?: TransformOptions): Promise<TransformResult>;
|
|
11
|
+
|
|
12
|
+
interface SanitizeOptions {
|
|
13
|
+
stripSelectors?: string[];
|
|
14
|
+
preserveSelectors?: string[];
|
|
15
|
+
}
|
|
16
|
+
/**
|
|
17
|
+
* Sanitize HTML by removing noise elements.
|
|
18
|
+
* This is run AFTER Readability extraction (which does the heavy lifting)
|
|
19
|
+
* to catch remaining noise that Readability missed.
|
|
20
|
+
*/
|
|
21
|
+
declare function sanitizeHtml(html: string, options?: SanitizeOptions): string;
|
|
22
|
+
|
|
23
|
+
interface MarkdownOptions {
|
|
24
|
+
url?: string;
|
|
25
|
+
customRules?: TurndownRule[];
|
|
26
|
+
}
|
|
27
|
+
declare function htmlToMarkdown(html: string, options?: MarkdownOptions): string;
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Extract all JSON-LD blocks from a document.
|
|
31
|
+
* These are <script type="application/ld+json"> elements.
|
|
32
|
+
*/
|
|
33
|
+
declare function extractJsonLdBlocks(document: Document): Record<string, unknown>[];
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* Options for route discovery.
|
|
37
|
+
*/
|
|
38
|
+
interface DiscoverOptions {
|
|
39
|
+
/**
|
|
40
|
+
* Glob-like patterns to exclude from discovery.
|
|
41
|
+
* Matches against the URL path, e.g. ["/api", "/admin", "/_internal"].
|
|
42
|
+
* Default: ["/api", "/_*"]
|
|
43
|
+
*/
|
|
44
|
+
exclude?: string[];
|
|
45
|
+
/**
|
|
46
|
+
* How to derive the section name from a route path.
|
|
47
|
+
* - 'directory': uses the first path segment (e.g., "/docs/intro" → "Docs")
|
|
48
|
+
* - A custom function: (path: string) => string
|
|
49
|
+
* Default: 'directory'
|
|
50
|
+
*/
|
|
51
|
+
sectionStrategy?: 'directory' | ((path: string) => string);
|
|
52
|
+
/**
|
|
53
|
+
* Default section name for top-level pages (e.g., "/" or "/about").
|
|
54
|
+
* Default: "Pages"
|
|
55
|
+
*/
|
|
56
|
+
defaultSection?: string;
|
|
57
|
+
}
|
|
58
|
+
/**
|
|
59
|
+
* Scan a Next.js `app/` directory and discover all page routes.
|
|
60
|
+
*
|
|
61
|
+
* For each `page.tsx` found, it:
|
|
62
|
+
* 1. Derives the URL path from the directory structure
|
|
63
|
+
* 2. Attempts to extract `metadata.title` and `metadata.description` from the file
|
|
64
|
+
* 3. Assigns a section based on the top-level directory
|
|
65
|
+
*
|
|
66
|
+
* @param appDir - Absolute path to the `app/` directory
|
|
67
|
+
* @param options - Discovery options
|
|
68
|
+
* @returns Array of discovered routes
|
|
69
|
+
*
|
|
70
|
+
* @example
|
|
71
|
+
* ```ts
|
|
72
|
+
* const routes = discoverNextRoutes('/path/to/app');
|
|
73
|
+
* // [
|
|
74
|
+
* // { path: '/', title: 'Home', description: '...', section: 'Pages' },
|
|
75
|
+
* // { path: '/docs/intro', title: 'Introduction', description: '...', section: 'Docs' },
|
|
76
|
+
* // ]
|
|
77
|
+
* ```
|
|
78
|
+
*/
|
|
79
|
+
declare function discoverNextRoutes(appDir: string, options?: DiscoverOptions): LlmsTxtRoute[];
|
|
80
|
+
/**
|
|
81
|
+
* Parse metadata from source code string.
|
|
82
|
+
* Exported for testing.
|
|
83
|
+
*/
|
|
84
|
+
declare function extractMetadataFromSource(source: string): {
|
|
85
|
+
title: string;
|
|
86
|
+
description: string;
|
|
87
|
+
};
|
|
88
|
+
/**
|
|
89
|
+
* Discover routes by scanning a directory for HTML files.
|
|
90
|
+
* Useful for static sites or build output directories.
|
|
91
|
+
*/
|
|
92
|
+
declare function discoverFilesystemRoutes(dir: string, options?: DiscoverOptions): LlmsTxtRoute[];
|
|
93
|
+
|
|
94
|
+
interface CacheOptions {
|
|
95
|
+
/** Max number of entries. Default: 100 */
|
|
96
|
+
maxEntries?: number;
|
|
97
|
+
/** TTL in milliseconds. Default: 300_000 (5 minutes) */
|
|
98
|
+
ttl?: number;
|
|
99
|
+
}
|
|
100
|
+
declare function createCache(options?: CacheOptions): {
|
|
101
|
+
get: (key: string) => TransformResult | undefined;
|
|
102
|
+
set: (key: string, value: TransformResult) => LRUCache<string, TransformResult, unknown>;
|
|
103
|
+
has: (key: string) => boolean;
|
|
104
|
+
clear: () => void;
|
|
105
|
+
size: () => number;
|
|
106
|
+
};
|
|
107
|
+
|
|
108
|
+
export { type CacheOptions, type DiscoverOptions, LlmsTxtRoute, TransformOptions, TransformResult, TurndownRule, createCache, discoverFilesystemRoutes, discoverNextRoutes, extractJsonLdBlocks, extractMetadataFromSource, htmlToMarkdown, sanitizeHtml, transform };
|