aeorank 3.0.2 → 3.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/browser.js CHANGED
@@ -3,7 +3,7 @@ import {
3
3
  extractAllUrlsFromSitemap,
4
4
  extractInternalLinks,
5
5
  inferCategory
6
- } from "./chunk-OCLAIHX6.js";
6
+ } from "./chunk-RYV25AUV.js";
7
7
 
8
8
  // src/parked-domain.ts
9
9
  var PARKING_PATHS = ["/lander", "/parking", "/park", "/sedoparking"];
@@ -6049,7 +6049,7 @@ async function audit(domain, options) {
6049
6049
  }
6050
6050
  }
6051
6051
  if (options?.fullCrawl) {
6052
- const { crawlFullSite: crawlFullSite2 } = await import("./full-site-crawler-BCJS67WQ.js");
6052
+ const { crawlFullSite: crawlFullSite2 } = await import("./full-site-crawler-TQ35TB2X.js");
6053
6053
  const crawlResult = await crawlFullSite2(siteData, {
6054
6054
  maxPages: options.maxPages ?? 200,
6055
6055
  concurrency: options.concurrency ?? 5
@@ -1,6 +1,7 @@
1
1
  // src/full-site-crawler.ts
2
2
  var RESOURCE_EXTENSIONS = /\.(js|css|png|jpg|jpeg|gif|svg|ico|pdf|xml|txt|woff|woff2|ttf|eot|mp4|mp3|webp|avif|zip|gz|tar|json)$/i;
3
- var SKIP_PATH_PATTERNS = /^\/(api|wp-admin|wp-json|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc)\b/i;
3
+ var SKIP_PATH_PATTERNS = /^\/(api|wp-admin|wp-json|wp-content|wp-includes|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc|tag|tags|author|authors|category|categories|attachment|embed|trackback|comments|search|print|amp)\b/i;
4
+ var SKIP_URL_PATTERNS = /\/page\/\d+\/?$|[?&](s|replytocom|p|preview|share|like|amp)=/i;
4
5
  function parseRobotsTxt(robotsText) {
5
6
  const lines = robotsText.split("\n");
6
7
  const rules = { disallow: [], allow: [] };
@@ -141,6 +142,7 @@ function extractInternalLinks(html, domain) {
141
142
  if (RESOURCE_EXTENSIONS.test(path)) continue;
142
143
  if (SKIP_PATH_PATTERNS.test(path)) continue;
143
144
  const normalized = parsed.origin + path.replace(/\/+$/, "") + parsed.search;
145
+ if (SKIP_URL_PATTERNS.test(normalized)) continue;
144
146
  urls.add(normalized);
145
147
  } catch {
146
148
  continue;
@@ -299,4 +301,4 @@ export {
299
301
  inferCategory,
300
302
  crawlFullSite
301
303
  };
302
- //# sourceMappingURL=chunk-OCLAIHX6.js.map
304
+ //# sourceMappingURL=chunk-RYV25AUV.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/full-site-crawler.ts"],"sourcesContent":["/**\n * Full-site crawler for deep AEO audits.\n * BFS crawl that discovers all internal pages up to a configurable limit.\n */\n\nimport type { FetchResult, SiteData, PageCategory } from './site-crawler.js';\n\n// ─── Types ──────────────────────────────────────────────────────────────────\n\nexport interface CrawlOptions {\n /** Maximum pages to fetch (default 200) */\n maxPages?: number;\n /** Per-page fetch timeout in ms (default 10000) */\n timeoutMs?: number;\n /** Parallel fetches (default 5) */\n concurrency?: number;\n /** Honor robots.txt Disallow rules (default true) */\n respectRobots?: boolean;\n /** Include asset files — skipped by default */\n includeAssets?: boolean;\n /** Called after each batch with per-URL results */\n onProgress?: (event: CrawlProgressEvent) => void;\n}\n\nexport interface CrawlProgressEvent {\n /** URLs attempted in this batch */\n urls: string[];\n /** Whether each URL succeeded */\n results: Array<{ url: string; ok: boolean; status?: number }>;\n /** Total fetched so far */\n fetched: number;\n /** Total discovered so far */\n discovered: number;\n /** Max pages limit */\n maxPages: number;\n}\n\nexport interface CrawlResult {\n pages: FetchResult[];\n discoveredUrls: string[];\n fetchedUrls: string[];\n skippedUrls: string[];\n elapsed: number;\n}\n\n// ─── Resource file extensions to skip ────────────────────────────────────────\n\nconst RESOURCE_EXTENSIONS = /\\.(js|css|png|jpg|jpeg|gif|svg|ico|pdf|xml|txt|woff|woff2|ttf|eot|mp4|mp3|webp|avif|zip|gz|tar|json)$/i;\n\nconst SKIP_PATH_PATTERNS = /^\\/(api|wp-admin|wp-json|wp-content|wp-includes|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc|tag|tags|author|authors|category|categories|attachment|embed|trackback|comments|search|print|amp)\\b/i;\n\n/** WordPress pagination and query-string junk */\nconst SKIP_URL_PATTERNS = /\\/page\\/\\d+\\/?$|[?&](s|replytocom|p|preview|share|like|amp)=/i;\n\n// ─── Robots.txt parsing ─────────────────────────────────────────────────────\n\ninterface RobotsRules {\n disallow: string[];\n allow: string[];\n}\n\nexport function parseRobotsTxt(robotsText: string): RobotsRules {\n const lines = robotsText.split('\\n');\n const rules: RobotsRules = { disallow: [], allow: [] };\n\n // Collect rules for User-agent: * and User-agent: AEO-Visibility-Bot\n let inRelevantSection = false;\n\n for (const rawLine of lines) {\n const line = rawLine.trim();\n if (!line || line.startsWith('#')) continue;\n\n const uaMatch = line.match(/^user-agent:\\s*(.+)/i);\n if (uaMatch) {\n const agent = uaMatch[1].trim().toLowerCase();\n inRelevantSection = agent === '*' || agent === 'aeo-visibility-bot';\n continue;\n }\n\n if (!inRelevantSection) continue;\n\n const disallowMatch = line.match(/^disallow:\\s*(.*)/i);\n if (disallowMatch) {\n const path = disallowMatch[1].trim();\n if (path) rules.disallow.push(path);\n continue;\n }\n\n const allowMatch = line.match(/^allow:\\s*(.*)/i);\n if (allowMatch) {\n const path = allowMatch[1].trim();\n if (path) rules.allow.push(path);\n }\n }\n\n return rules;\n}\n\nexport function isDisallowedByRobots(urlPath: string, rules: RobotsRules): boolean {\n // Check allow rules first — more specific (longer) rules take precedence\n let longestAllow = 0;\n let longestDisallow = 0;\n\n for (const pattern of rules.allow) {\n if (urlPath.startsWith(pattern) && pattern.length > longestAllow) {\n longestAllow = pattern.length;\n }\n }\n\n for (const pattern of rules.disallow) {\n if (urlPath.startsWith(pattern) && pattern.length > longestDisallow) {\n longestDisallow = pattern.length;\n }\n }\n\n // More specific (longer) rule wins; if equal length, allow wins\n if (longestAllow === 0 && longestDisallow === 0) return false;\n return longestDisallow > longestAllow;\n}\n\n// ─── Fetch helper (matches multi-page-fetcher.ts fetchPage) ──────────────────\n\nasync function fetchPage(url: string, timeoutMs = 10000): Promise<FetchResult | null> {\n try {\n const res = await fetch(url, {\n signal: AbortSignal.timeout(timeoutMs),\n headers: { 'User-Agent': 'AEO-Visibility-Bot/1.0' },\n redirect: 'follow',\n });\n if (res.status !== 200) return null;\n const text = await res.text();\n if (text.length < 200) return null;\n return { text: text.slice(0, 500_000), status: res.status, finalUrl: res.url };\n } catch {\n return null;\n }\n}\n\nasync function fetchSitemapXml(url: string, timeoutMs = 10000): Promise<string | null> {\n try {\n const res = await fetch(url, {\n signal: AbortSignal.timeout(timeoutMs),\n headers: { 'User-Agent': 'AEO-Visibility-Bot/1.0' },\n redirect: 'follow',\n });\n if (res.status !== 200) return null;\n return await res.text();\n } catch {\n return null;\n }\n}\n\n// ─── Sitemap parsing ────────────────────────────────────────────────────────\n\n/**\n * Extract all page URLs from sitemap XML (handles sitemapindex with sub-sitemaps).\n * Filters to same domain only, skips resource files.\n */\nexport async function extractAllUrlsFromSitemap(\n sitemapText: string,\n domain: string,\n timeoutMs = 10000,\n): Promise<string[]> {\n const cleanDomain = domain.replace(/^www\\./, '').toLowerCase();\n const urls = new Set<string>();\n\n // Check for sitemapindex — fetch sub-sitemaps\n const subSitemapLocs = sitemapText.match(/<sitemap>[\\s\\S]*?<loc>([^<]+)<\\/loc>[\\s\\S]*?<\\/sitemap>/gi) || [];\n if (subSitemapLocs.length > 0) {\n const subUrls: string[] = [];\n for (const block of subSitemapLocs) {\n const locMatch = block.match(/<loc>([^<]+)<\\/loc>/i);\n if (locMatch) subUrls.push(locMatch[1].trim());\n }\n\n // Fetch sub-sitemaps in parallel (limit to 10)\n const fetches = subUrls.slice(0, 10).map(u => fetchSitemapXml(u, timeoutMs));\n const results = await Promise.all(fetches);\n for (const text of results) {\n if (text) {\n extractLocsFromXml(text, cleanDomain, urls);\n }\n }\n }\n\n // Also extract <url><loc> from the main sitemap text (could be a regular sitemap)\n extractLocsFromXml(sitemapText, cleanDomain, urls);\n\n return Array.from(urls);\n}\n\nfunction extractLocsFromXml(xml: string, cleanDomain: string, urls: Set<string>): void {\n const locMatches = xml.match(/<url>[\\s\\S]*?<loc>([^<]+)<\\/loc>[\\s\\S]*?<\\/url>/gi) || [];\n for (const block of locMatches) {\n const locMatch = block.match(/<loc>([^<]+)<\\/loc>/i);\n if (!locMatch) continue;\n const url = locMatch[1].trim();\n\n try {\n const parsed = new URL(url);\n const urlDomain = parsed.hostname.replace(/^www\\./, '').toLowerCase();\n if (urlDomain !== cleanDomain) continue;\n if (RESOURCE_EXTENSIONS.test(parsed.pathname)) continue;\n urls.add(url);\n } catch {\n continue;\n }\n }\n}\n\n// ─── Internal link extraction ───────────────────────────────────────────────\n\n/**\n * Extract ALL internal links from HTML (not just nav).\n * Returns deduplicated full URLs for the same domain.\n */\nexport function extractInternalLinks(html: string, domain: string): string[] {\n const cleanDomain = domain.replace(/^www\\./, '').toLowerCase();\n const hrefMatches = html.match(/href=\"([^\"]*)\"/gi) || [];\n const urls = new Set<string>();\n\n for (const match of hrefMatches) {\n const href = match.match(/href=\"([^\"]*)\"/i)?.[1];\n if (!href || !href.trim()) continue;\n\n let fullUrl: string;\n\n if (href.startsWith('//')) {\n fullUrl = `https:${href}`;\n } else if (href.startsWith('/')) {\n // Skip fragment-only, query-only, and anchor links\n if (href === '/' || href.startsWith('/#')) continue;\n fullUrl = `https://${domain}${href}`;\n } else if (href.startsWith('http')) {\n fullUrl = href;\n } else if (href.startsWith('#') || href.startsWith('?') || href.startsWith('mailto:') || href.startsWith('tel:') || href.startsWith('javascript:')) {\n continue;\n } else {\n // Relative path\n fullUrl = `https://${domain}/${href}`;\n }\n\n try {\n const parsed = new URL(fullUrl);\n const linkDomain = parsed.hostname.replace(/^www\\./, '').toLowerCase();\n if (linkDomain !== cleanDomain) continue;\n\n // Strip hash and normalize\n parsed.hash = '';\n const path = parsed.pathname;\n\n if (path === '/' || path === '') continue;\n if (RESOURCE_EXTENSIONS.test(path)) continue;\n if (SKIP_PATH_PATTERNS.test(path)) continue;\n\n // Normalize: strip trailing slash\n const normalized = parsed.origin + path.replace(/\\/+$/, '') + parsed.search;\n if (SKIP_URL_PATTERNS.test(normalized)) continue;\n urls.add(normalized);\n } catch {\n continue;\n }\n }\n\n return Array.from(urls);\n}\n\n// ─── Category inference ─────────────────────────────────────────────────────\n\nconst CATEGORY_PATTERNS: Array<[RegExp, PageCategory]> = [\n [/\\/([^/]*-?)?(blog|articles?|posts?|news|insights|guides)\\b/i, 'blog'],\n [/\\/(about|about-us|company|who-we-are)\\b/i, 'about'],\n [/\\/(pricing|plans|packages)\\b/i, 'pricing'],\n [/\\/(services?|features?|solutions?|products?|what-we-do|offerings?)\\b/i, 'services'],\n [/\\/(contact|contact-us|get-in-touch)\\b/i, 'contact'],\n [/\\/(team|our-team|authors?|people|leadership|staff)\\b/i, 'team'],\n [/\\/(resources?|resource-center|library|downloads?)\\b/i, 'resources'],\n [/\\/(docs?|documentation|help|help-center|support|knowledge-base)\\b/i, 'docs'],\n [/\\/(case-stud\\w*|cases|customers?|success-stor\\w*|testimonials?)\\b/i, 'cases'],\n [/\\/(faq|frequently-asked|questions)\\b/i, 'faq'],\n];\n\n/**\n * Infer PageCategory from URL path patterns.\n */\nexport function inferCategory(url: string): PageCategory {\n try {\n const path = new URL(url).pathname;\n for (const [pattern, category] of CATEGORY_PATTERNS) {\n if (pattern.test(path)) return category;\n }\n } catch {\n // Fall through to default\n }\n return 'content';\n}\n\n// ─── Main crawler ───────────────────────────────────────────────────────────\n\n/**\n * BFS crawl of a site, discovering all internal pages up to maxPages.\n * Seeds from sitemap URLs + homepage internal links.\n * Skips URLs already in siteData.blogSample and homepage.\n */\nexport async function crawlFullSite(\n siteData: SiteData,\n options?: CrawlOptions,\n): Promise<CrawlResult> {\n const startTime = Date.now();\n const maxPages = options?.maxPages ?? 200;\n const timeoutMs = options?.timeoutMs ?? 10000;\n const concurrency = options?.concurrency ?? 5;\n const respectRobots = options?.respectRobots ?? true;\n\n const pages: FetchResult[] = [];\n const discoveredUrls = new Set<string>();\n const fetchedUrls = new Set<string>();\n const skippedUrls = new Set<string>();\n const visited = new Set<string>();\n\n // Parse robots.txt rules\n let robotsRules: RobotsRules = { disallow: [], allow: [] };\n if (respectRobots && siteData.robotsTxt?.text) {\n robotsRules = parseRobotsTxt(siteData.robotsTxt.text);\n }\n\n const baseUrl = `${siteData.protocol}://${siteData.domain}`;\n\n // Mark already-fetched URLs as visited\n visited.add(normalizeUrl(baseUrl));\n visited.add(normalizeUrl(baseUrl + '/'));\n if (siteData.blogSample) {\n for (const page of siteData.blogSample) {\n if (page.finalUrl) visited.add(normalizeUrl(page.finalUrl));\n }\n }\n\n // Seed the queue from sitemap\n const queue: string[] = [];\n if (siteData.sitemapXml?.text) {\n const sitemapUrls = await extractAllUrlsFromSitemap(\n siteData.sitemapXml.text,\n siteData.domain,\n timeoutMs,\n );\n for (const url of sitemapUrls) {\n const norm = normalizeUrl(url);\n if (!visited.has(norm)) {\n discoveredUrls.add(url);\n if (!queue.includes(url)) queue.push(url);\n }\n }\n }\n\n // Seed from homepage internal links\n if (siteData.homepage?.text) {\n const homeLinks = extractInternalLinks(siteData.homepage.text, siteData.domain);\n for (const url of homeLinks) {\n const norm = normalizeUrl(url);\n if (!visited.has(norm) && !discoveredUrls.has(url)) {\n discoveredUrls.add(url);\n if (!queue.includes(url)) queue.push(url);\n }\n }\n }\n\n // BFS loop\n while (queue.length > 0 && fetchedUrls.size < maxPages) {\n // Take a batch\n const batchSize = Math.min(concurrency, maxPages - fetchedUrls.size, queue.length);\n const batch: string[] = [];\n\n while (batch.length < batchSize && queue.length > 0) {\n const url = queue.shift()!;\n const norm = normalizeUrl(url);\n\n if (visited.has(norm)) continue;\n visited.add(norm);\n\n // Check robots.txt\n if (respectRobots) {\n try {\n const path = new URL(url).pathname;\n if (isDisallowedByRobots(path, robotsRules)) {\n skippedUrls.add(url);\n continue;\n }\n } catch {\n // Skip malformed URLs\n continue;\n }\n }\n\n batch.push(url);\n }\n\n if (batch.length === 0) continue;\n\n // Fetch batch in parallel\n const fetchResults = await Promise.all(batch.map(url => fetchPage(url, timeoutMs)));\n\n const batchResults: Array<{ url: string; ok: boolean; status?: number }> = [];\n\n for (let i = 0; i < fetchResults.length; i++) {\n const result = fetchResults[i];\n const url = batch[i];\n fetchedUrls.add(url);\n batchResults.push({ url, ok: !!result, status: result?.status });\n\n if (!result) continue;\n\n result.category = inferCategory(url);\n pages.push(result);\n\n // Extract new internal links from fetched page\n const newLinks = extractInternalLinks(result.text, siteData.domain);\n for (const link of newLinks) {\n const norm = normalizeUrl(link);\n if (!visited.has(norm) && !discoveredUrls.has(link)) {\n discoveredUrls.add(link);\n queue.push(link);\n }\n }\n }\n\n // Notify caller of per-batch progress\n if (options?.onProgress) {\n options.onProgress({\n urls: batch,\n results: batchResults,\n fetched: fetchedUrls.size,\n discovered: discoveredUrls.size,\n maxPages,\n });\n }\n }\n\n // Any remaining queued URLs count as discovered but skipped\n for (const url of queue) {\n if (!fetchedUrls.has(url)) {\n skippedUrls.add(url);\n }\n }\n\n return {\n pages,\n discoveredUrls: Array.from(discoveredUrls),\n fetchedUrls: Array.from(fetchedUrls),\n skippedUrls: Array.from(skippedUrls),\n elapsed: Math.round((Date.now() - startTime) / 100) / 10,\n };\n}\n\nfunction normalizeUrl(url: string): string {\n try {\n const parsed = new URL(url);\n // Normalize: lowercase host, strip trailing slash, strip hash\n return (parsed.origin + parsed.pathname.replace(/\\/+$/, '') + parsed.search).toLowerCase();\n } catch {\n return url.toLowerCase();\n }\n}\n"],"mappings":";AA+CA,IAAM,sBAAsB;AAE5B,IAAM,qBAAqB;AAG3B,IAAM,oBAAoB;AASnB,SAAS,eAAe,YAAiC;AAC9D,QAAM,QAAQ,WAAW,MAAM,IAAI;AACnC,QAAM,QAAqB,EAAE,UAAU,CAAC,GAAG,OAAO,CAAC,EAAE;AAGrD,MAAI,oBAAoB;AAExB,aAAW,WAAW,OAAO;AAC3B,UAAM,OAAO,QAAQ,KAAK;AAC1B,QAAI,CAAC,QAAQ,KAAK,WAAW,GAAG,EAAG;AAEnC,UAAM,UAAU,KAAK,MAAM,sBAAsB;AACjD,QAAI,SAAS;AACX,YAAM,QAAQ,QAAQ,CAAC,EAAE,KAAK,EAAE,YAAY;AAC5C,0BAAoB,UAAU,OAAO,UAAU;AAC/C;AAAA,IACF;AAEA,QAAI,CAAC,kBAAmB;AAExB,UAAM,gBAAgB,KAAK,MAAM,oBAAoB;AACrD,QAAI,eAAe;AACjB,YAAM,OAAO,cAAc,CAAC,EAAE,KAAK;AACnC,UAAI,KAAM,OAAM,SAAS,KAAK,IAAI;AAClC;AAAA,IACF;AAEA,UAAM,aAAa,KAAK,MAAM,iBAAiB;AAC/C,QAAI,YAAY;AACd,YAAM,OAAO,WAAW,CAAC,EAAE,KAAK;AAChC,UAAI,KAAM,OAAM,MAAM,KAAK,IAAI;AAAA,IACjC;AAAA,EACF;AAEA,SAAO;AACT;AAEO,SAAS,qBAAqB,SAAiB,OAA6B;AAEjF,MAAI,eAAe;AACnB,MAAI,kBAAkB;AAEtB,aAAW,WAAW,MAAM,OAAO;AACjC,QAAI,QAAQ,WAAW,OAAO,KAAK,QAAQ,SAAS,cAAc;AAChE,qBAAe,QAAQ;AAAA,IACzB;AAAA,EACF;AAEA,aAAW,WAAW,MAAM,UAAU;AACpC,QAAI,QAAQ,WAAW,OAAO,KAAK,QAAQ,SAAS,iBAAiB;AACnE,wBAAkB,QAAQ;AAAA,IAC5B;AAAA,EACF;AAGA,MAAI,iBAAiB,KAAK,oBAAoB,EAAG,QAAO;AACxD,SAAO,kBAAkB;AAC3B;AAIA,eAAe,UAAU,KAAa,YAAY,KAAoC;AACpF,MAAI;AACF,UAAM,MAAM,MAAM,MAAM,KAAK;AAAA,MAC3B,QAAQ,YAAY,QAAQ,SAAS;AAAA,MACrC,SAAS,EAAE,cAAc,yBAAyB;AAAA,MAClD,UAAU;AAAA,IACZ,CAAC;AACD,QAAI,IAAI,WAAW,IAAK,QAAO;AAC/B,UAAM,OAAO,MAAM,IAAI,KAAK;AAC5B,QAAI,KAAK,SAAS,IAAK,QAAO;AAC9B,WAAO,EAAE,MAAM,KAAK,MAAM,GAAG,GAAO,GAAG,QAAQ,IAAI,QAAQ,UAAU,IAAI,IAAI;AAAA,EAC/E,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,eAAe,gBAAgB,KAAa,YAAY,KAA+B;AACrF,MAAI;AACF,UAAM,MAAM,MAAM,MAAM,KAAK;AAAA,MAC3B,QAAQ,YAAY,QAAQ,SAAS;AAAA,MACrC,SAAS,EAAE,cAAc,yBAAyB;AAAA,MAClD,UAAU;AAAA,IACZ,CAAC;AACD,QAAI,IAAI,WAAW,IAAK,QAAO;AAC/B,WAAO,MAAM,IAAI,KAAK;AAAA,EACxB,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAQA,eAAsB,0BACpB,aACA,QACA,YAAY,KACO;AACnB,QAAM,cAAc,OAAO,QAAQ,UAAU,EAAE,EAAE,YAAY;AAC7D,QAAM,OAAO,oBAAI,IAAY;AAG7B,QAAM,iBAAiB,YAAY,MAAM,2DAA2D,KAAK,CAAC;AAC1G,MAAI,eAAe,SAAS,GAAG;AAC7B,UAAM,UAAoB,CAAC;AAC3B,eAAW,SAAS,gBAAgB;AAClC,YAAM,WAAW,MAAM,MAAM,sBAAsB;AACnD,UAAI,SAAU,SAAQ,KAAK,SAAS,CAAC,EAAE,KAAK,CAAC;AAAA,IAC/C;AAGA,UAAM,UAAU,QAAQ,MAAM,GAAG,EAAE,EAAE,IAAI,OAAK,gBAAgB,GAAG,SAAS,CAAC;AAC3E,UAAM,UAAU,MAAM,QAAQ,IAAI,OAAO;AACzC,eAAW,QAAQ,SAAS;AAC1B,UAAI,MAAM;AACR,2BAAmB,MAAM,aAAa,IAAI;AAAA,MAC5C;AAAA,IACF;AAAA,EACF;AAGA,qBAAmB,aAAa,aAAa,IAAI;AAEjD,SAAO,MAAM,KAAK,IAAI;AACxB;AAEA,SAAS,mBAAmB,KAAa,aAAqB,MAAyB;AACrF,QAAM,aAAa,IAAI,MAAM,mDAAmD,KAAK,CAAC;AACtF,aAAW,SAAS,YAAY;AAC9B,UAAM,WAAW,MAAM,MAAM,sBAAsB;AACnD,QAAI,CAAC,SAAU;AACf,UAAM,MAAM,SAAS,CAAC,EAAE,KAAK;AAE7B,QAAI;AACF,YAAM,SAAS,IAAI,IAAI,GAAG;AAC1B,YAAM,YAAY,OAAO,SAAS,QAAQ,UAAU,EAAE,EAAE,YAAY;AACpE,UAAI,cAAc,YAAa;AAC/B,UAAI,oBAAoB,KAAK,OAAO,QAAQ,EAAG;AAC/C,WAAK,IAAI,GAAG;AAAA,IACd,QAAQ;AACN;AAAA,IACF;AAAA,EACF;AACF;AAQO,SAAS,qBAAqB,MAAc,QAA0B;AAC3E,QAAM,cAAc,OAAO,QAAQ,UAAU,EAAE,EAAE,YAAY;AAC7D,QAAM,cAAc,KAAK,MAAM,kBAAkB,KAAK,CAAC;AACvD,QAAM,OAAO,oBAAI,IAAY;AAE7B,aAAW,SAAS,aAAa;AAC/B,UAAM,OAAO,MAAM,MAAM,iBAAiB,IAAI,CAAC;AAC/C,QAAI,CAAC,QAAQ,CAAC,KAAK,KAAK,EAAG;AAE3B,QAAI;AAEJ,QAAI,KAAK,WAAW,IAAI,GAAG;AACzB,gBAAU,SAAS,IAAI;AAAA,IACzB,WAAW,KAAK,WAAW,GAAG,GAAG;AAE/B,UAAI,SAAS,OAAO,KAAK,WAAW,IAAI,EAAG;AAC3C,gBAAU,WAAW,MAAM,GAAG,IAAI;AAAA,IACpC,WAAW,KAAK,WAAW,MAAM,GAAG;AAClC,gBAAU;AAAA,IACZ,WAAW,KAAK,WAAW,GAAG,KAAK,KAAK,WAAW,GAAG,KAAK,KAAK,WAAW,SAAS,KAAK,KAAK,WAAW,MAAM,KAAK,KAAK,WAAW,aAAa,GAAG;AAClJ;AAAA,IACF,OAAO;AAEL,gBAAU,WAAW,MAAM,IAAI,IAAI;AAAA,IACrC;AAEA,QAAI;AACF,YAAM,SAAS,IAAI,IAAI,OAAO;AAC9B,YAAM,aAAa,OAAO,SAAS,QAAQ,UAAU,EAAE,EAAE,YAAY;AACrE,UAAI,eAAe,YAAa;AAGhC,aAAO,OAAO;AACd,YAAM,OAAO,OAAO;AAEpB,UAAI,SAAS,OAAO,SAAS,GAAI;AACjC,UAAI,oBAAoB,KAAK,IAAI,EAAG;AACpC,UAAI,mBAAmB,KAAK,IAAI,EAAG;AAGnC,YAAM,aAAa,OAAO,SAAS,KAAK,QAAQ,QAAQ,EAAE,IAAI,OAAO;AACrE,UAAI,kBAAkB,KAAK,UAAU,EAAG;AACxC,WAAK,IAAI,UAAU;AAAA,IACrB,QAAQ;AACN;AAAA,IACF;AAAA,EACF;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAIA,IAAM,oBAAmD;AAAA,EACvD,CAAC,+DAA+D,MAAM;AAAA,EACtE,CAAC,4CAA4C,OAAO;AAAA,EACpD,CAAC,iCAAiC,SAAS;AAAA,EAC3C,CAAC,yEAAyE,UAAU;AAAA,EACpF,CAAC,0CAA0C,SAAS;AAAA,EACpD,CAAC,yDAAyD,MAAM;AAAA,EAChE,CAAC,wDAAwD,WAAW;AAAA,EACpE,CAAC,sEAAsE,MAAM;AAAA,EAC7E,CAAC,sEAAsE,OAAO;AAAA,EAC9E,CAAC,yCAAyC,KAAK;AACjD;AAKO,SAAS,cAAc,KAA2B;AACvD,MAAI;AACF,UAAM,OAAO,IAAI,IAAI,GAAG,EAAE;AAC1B,eAAW,CAAC,SAAS,QAAQ,KAAK,mBAAmB;AACnD,UAAI,QAAQ,KAAK,IAAI,EAAG,QAAO;AAAA,IACjC;AAAA,EACF,QAAQ;AAAA,EAER;AACA,SAAO;AACT;AASA,eAAsB,cACpB,UACA,SACsB;AACtB,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,WAAW,SAAS,YAAY;AACtC,QAAM,YAAY,SAAS,aAAa;AACxC,QAAM,cAAc,SAAS,eAAe;AAC5C,QAAM,gBAAgB,SAAS,iBAAiB;AAEhD,QAAM,QAAuB,CAAC;AAC9B,QAAM,iBAAiB,oBAAI,IAAY;AACvC,QAAM,cAAc,oBAAI,IAAY;AACpC,QAAM,cAAc,oBAAI,IAAY;AACpC,QAAM,UAAU,oBAAI,IAAY;AAGhC,MAAI,cAA2B,EAAE,UAAU,CAAC,GAAG,OAAO,CAAC,EAAE;AACzD,MAAI,iBAAiB,SAAS,WAAW,MAAM;AAC7C,kBAAc,eAAe,SAAS,UAAU,IAAI;AAAA,EACtD;AAEA,QAAM,UAAU,GAAG,SAAS,QAAQ,MAAM,SAAS,MAAM;AAGzD,UAAQ,IAAI,aAAa,OAAO,CAAC;AACjC,UAAQ,IAAI,aAAa,UAAU,GAAG,CAAC;AACvC,MAAI,SAAS,YAAY;AACvB,eAAW,QAAQ,SAAS,YAAY;AACtC,UAAI,KAAK,SAAU,SAAQ,IAAI,aAAa,KAAK,QAAQ,CAAC;AAAA,IAC5D;AAAA,EACF;AAGA,QAAM,QAAkB,CAAC;AACzB,MAAI,SAAS,YAAY,MAAM;AAC7B,UAAM,cAAc,MAAM;AAAA,MACxB,SAAS,WAAW;AAAA,MACpB,SAAS;AAAA,MACT;AAAA,IACF;AACA,eAAW,OAAO,aAAa;AAC7B,YAAM,OAAO,aAAa,GAAG;AAC7B,UAAI,CAAC,QAAQ,IAAI,IAAI,GAAG;AACtB,uBAAe,IAAI,GAAG;AACtB,YAAI,CAAC,MAAM,SAAS,GAAG,EAAG,OAAM,KAAK,GAAG;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AAGA,MAAI,SAAS,UAAU,MAAM;AAC3B,UAAM,YAAY,qBAAqB,SAAS,SAAS,MAAM,SAAS,MAAM;AAC9E,eAAW,OAAO,WAAW;AAC3B,YAAM,OAAO,aAAa,GAAG;AAC7B,UAAI,CAAC,QAAQ,IAAI,IAAI,KAAK,CAAC,eAAe,IAAI,GAAG,GAAG;AAClD,uBAAe,IAAI,GAAG;AACtB,YAAI,CAAC,MAAM,SAAS,GAAG,EAAG,OAAM,KAAK,GAAG;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AAGA,SAAO,MAAM,SAAS,KAAK,YAAY,OAAO,UAAU;AAEtD,UAAM,YAAY,KAAK,IAAI,aAAa,WAAW,YAAY,MAAM,MAAM,MAAM;AACjF,UAAM,QAAkB,CAAC;AAEzB,WAAO,MAAM,SAAS,aAAa,MAAM,SAAS,GAAG;AACnD,YAAM,MAAM,MAAM,MAAM;AACxB,YAAM,OAAO,aAAa,GAAG;AAE7B,UAAI,QAAQ,IAAI,IAAI,EAAG;AACvB,cAAQ,IAAI,IAAI;AAGhB,UAAI,eAAe;AACjB,YAAI;AACF,gBAAM,OAAO,IAAI,IAAI,GAAG,EAAE;AAC1B,cAAI,qBAAqB,MAAM,WAAW,GAAG;AAC3C,wBAAY,IAAI,GAAG;AACnB;AAAA,UACF;AAAA,QACF,QAAQ;AAEN;AAAA,QACF;AAAA,MACF;AAEA,YAAM,KAAK,GAAG;AAAA,IAChB;AAEA,QAAI,MAAM,WAAW,EAAG;AAGxB,UAAM,eAAe,MAAM,QAAQ,IAAI,MAAM,IAAI,SAAO,UAAU,KAAK,SAAS,CAAC,CAAC;AAElF,UAAM,eAAqE,CAAC;AAE5E,aAAS,IAAI,GAAG,IAAI,aAAa,QAAQ,KAAK;AAC5C,YAAM,SAAS,aAAa,CAAC;AAC7B,YAAM,MAAM,MAAM,CAAC;AACnB,kBAAY,IAAI,GAAG;AACnB,mBAAa,KAAK,EAAE,KAAK,IAAI,CAAC,CAAC,QAAQ,QAAQ,QAAQ,OAAO,CAAC;AAE/D,UAAI,CAAC,OAAQ;AAEb,aAAO,WAAW,cAAc,GAAG;AACnC,YAAM,KAAK,MAAM;AAGjB,YAAM,WAAW,qBAAqB,OAAO,MAAM,SAAS,MAAM;AAClE,iBAAW,QAAQ,UAAU;AAC3B,cAAM,OAAO,aAAa,IAAI;AAC9B,YAAI,CAAC,QAAQ,IAAI,IAAI,KAAK,CAAC,eAAe,IAAI,IAAI,GAAG;AACnD,yBAAe,IAAI,IAAI;AACvB,gBAAM,KAAK,IAAI;AAAA,QACjB;AAAA,MACF;AAAA,IACF;AAGA,QAAI,SAAS,YAAY;AACvB,cAAQ,WAAW;AAAA,QACjB,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SAAS,YAAY;AAAA,QACrB,YAAY,eAAe;AAAA,QAC3B;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAGA,aAAW,OAAO,OAAO;AACvB,QAAI,CAAC,YAAY,IAAI,GAAG,GAAG;AACzB,kBAAY,IAAI,GAAG;AAAA,IACrB;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA,gBAAgB,MAAM,KAAK,cAAc;AAAA,IACzC,aAAa,MAAM,KAAK,WAAW;AAAA,IACnC,aAAa,MAAM,KAAK,WAAW;AAAA,IACnC,SAAS,KAAK,OAAO,KAAK,IAAI,IAAI,aAAa,GAAG,IAAI;AAAA,EACxD;AACF;AAEA,SAAS,aAAa,KAAqB;AACzC,MAAI;AACF,UAAM,SAAS,IAAI,IAAI,GAAG;AAE1B,YAAQ,OAAO,SAAS,OAAO,SAAS,QAAQ,QAAQ,EAAE,IAAI,OAAO,QAAQ,YAAY;AAAA,EAC3F,QAAQ;AACN,WAAO,IAAI,YAAY;AAAA,EACzB;AACF;","names":[]}
package/dist/cli.js CHANGED
@@ -4496,7 +4496,7 @@ async function audit(domain, options) {
4496
4496
  }
4497
4497
  }
4498
4498
  if (options?.fullCrawl) {
4499
- const { crawlFullSite } = await import("./full-site-crawler-5AYKCZQY.js");
4499
+ const { crawlFullSite } = await import("./full-site-crawler-OBECS7AT.js");
4500
4500
  const crawlResult = await crawlFullSite(siteData, {
4501
4501
  maxPages: options.maxPages ?? 200,
4502
4502
  concurrency: options.concurrency ?? 5
@@ -2,7 +2,8 @@
2
2
 
3
3
  // src/full-site-crawler.ts
4
4
  var RESOURCE_EXTENSIONS = /\.(js|css|png|jpg|jpeg|gif|svg|ico|pdf|xml|txt|woff|woff2|ttf|eot|mp4|mp3|webp|avif|zip|gz|tar|json)$/i;
5
- var SKIP_PATH_PATTERNS = /^\/(api|wp-admin|wp-json|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc)\b/i;
5
+ var SKIP_PATH_PATTERNS = /^\/(api|wp-admin|wp-json|wp-content|wp-includes|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc|tag|tags|author|authors|category|categories|attachment|embed|trackback|comments|search|print|amp)\b/i;
6
+ var SKIP_URL_PATTERNS = /\/page\/\d+\/?$|[?&](s|replytocom|p|preview|share|like|amp)=/i;
6
7
  function parseRobotsTxt(robotsText) {
7
8
  const lines = robotsText.split("\n");
8
9
  const rules = { disallow: [], allow: [] };
@@ -143,6 +144,7 @@ function extractInternalLinks(html, domain) {
143
144
  if (RESOURCE_EXTENSIONS.test(path)) continue;
144
145
  if (SKIP_PATH_PATTERNS.test(path)) continue;
145
146
  const normalized = parsed.origin + path.replace(/\/+$/, "") + parsed.search;
147
+ if (SKIP_URL_PATTERNS.test(normalized)) continue;
146
148
  urls.add(normalized);
147
149
  } catch {
148
150
  continue;
@@ -300,4 +302,4 @@ export {
300
302
  isDisallowedByRobots,
301
303
  parseRobotsTxt
302
304
  };
303
- //# sourceMappingURL=full-site-crawler-5AYKCZQY.js.map
305
+ //# sourceMappingURL=full-site-crawler-OBECS7AT.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/full-site-crawler.ts"],"sourcesContent":["/**\n * Full-site crawler for deep AEO audits.\n * BFS crawl that discovers all internal pages up to a configurable limit.\n */\n\nimport type { FetchResult, SiteData, PageCategory } from './site-crawler.js';\n\n// ─── Types ──────────────────────────────────────────────────────────────────\n\nexport interface CrawlOptions {\n /** Maximum pages to fetch (default 200) */\n maxPages?: number;\n /** Per-page fetch timeout in ms (default 10000) */\n timeoutMs?: number;\n /** Parallel fetches (default 5) */\n concurrency?: number;\n /** Honor robots.txt Disallow rules (default true) */\n respectRobots?: boolean;\n /** Include asset files — skipped by default */\n includeAssets?: boolean;\n /** Called after each batch with per-URL results */\n onProgress?: (event: CrawlProgressEvent) => void;\n}\n\nexport interface CrawlProgressEvent {\n /** URLs attempted in this batch */\n urls: string[];\n /** Whether each URL succeeded */\n results: Array<{ url: string; ok: boolean; status?: number }>;\n /** Total fetched so far */\n fetched: number;\n /** Total discovered so far */\n discovered: number;\n /** Max pages limit */\n maxPages: number;\n}\n\nexport interface CrawlResult {\n pages: FetchResult[];\n discoveredUrls: string[];\n fetchedUrls: string[];\n skippedUrls: string[];\n elapsed: number;\n}\n\n// ─── Resource file extensions to skip ────────────────────────────────────────\n\nconst RESOURCE_EXTENSIONS = /\\.(js|css|png|jpg|jpeg|gif|svg|ico|pdf|xml|txt|woff|woff2|ttf|eot|mp4|mp3|webp|avif|zip|gz|tar|json)$/i;\n\nconst SKIP_PATH_PATTERNS = /^\\/(api|wp-admin|wp-json|wp-content|wp-includes|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc|tag|tags|author|authors|category|categories|attachment|embed|trackback|comments|search|print|amp)\\b/i;\n\n/** WordPress pagination and query-string junk */\nconst SKIP_URL_PATTERNS = /\\/page\\/\\d+\\/?$|[?&](s|replytocom|p|preview|share|like|amp)=/i;\n\n// ─── Robots.txt parsing ─────────────────────────────────────────────────────\n\ninterface RobotsRules {\n disallow: string[];\n allow: string[];\n}\n\nexport function parseRobotsTxt(robotsText: string): RobotsRules {\n const lines = robotsText.split('\\n');\n const rules: RobotsRules = { disallow: [], allow: [] };\n\n // Collect rules for User-agent: * and User-agent: AEO-Visibility-Bot\n let inRelevantSection = false;\n\n for (const rawLine of lines) {\n const line = rawLine.trim();\n if (!line || line.startsWith('#')) continue;\n\n const uaMatch = line.match(/^user-agent:\\s*(.+)/i);\n if (uaMatch) {\n const agent = uaMatch[1].trim().toLowerCase();\n inRelevantSection = agent === '*' || agent === 'aeo-visibility-bot';\n continue;\n }\n\n if (!inRelevantSection) continue;\n\n const disallowMatch = line.match(/^disallow:\\s*(.*)/i);\n if (disallowMatch) {\n const path = disallowMatch[1].trim();\n if (path) rules.disallow.push(path);\n continue;\n }\n\n const allowMatch = line.match(/^allow:\\s*(.*)/i);\n if (allowMatch) {\n const path = allowMatch[1].trim();\n if (path) rules.allow.push(path);\n }\n }\n\n return rules;\n}\n\nexport function isDisallowedByRobots(urlPath: string, rules: RobotsRules): boolean {\n // Check allow rules first — more specific (longer) rules take precedence\n let longestAllow = 0;\n let longestDisallow = 0;\n\n for (const pattern of rules.allow) {\n if (urlPath.startsWith(pattern) && pattern.length > longestAllow) {\n longestAllow = pattern.length;\n }\n }\n\n for (const pattern of rules.disallow) {\n if (urlPath.startsWith(pattern) && pattern.length > longestDisallow) {\n longestDisallow = pattern.length;\n }\n }\n\n // More specific (longer) rule wins; if equal length, allow wins\n if (longestAllow === 0 && longestDisallow === 0) return false;\n return longestDisallow > longestAllow;\n}\n\n// ─── Fetch helper (matches multi-page-fetcher.ts fetchPage) ──────────────────\n\nasync function fetchPage(url: string, timeoutMs = 10000): Promise<FetchResult | null> {\n try {\n const res = await fetch(url, {\n signal: AbortSignal.timeout(timeoutMs),\n headers: { 'User-Agent': 'AEO-Visibility-Bot/1.0' },\n redirect: 'follow',\n });\n if (res.status !== 200) return null;\n const text = await res.text();\n if (text.length < 200) return null;\n return { text: text.slice(0, 500_000), status: res.status, finalUrl: res.url };\n } catch {\n return null;\n }\n}\n\nasync function fetchSitemapXml(url: string, timeoutMs = 10000): Promise<string | null> {\n try {\n const res = await fetch(url, {\n signal: AbortSignal.timeout(timeoutMs),\n headers: { 'User-Agent': 'AEO-Visibility-Bot/1.0' },\n redirect: 'follow',\n });\n if (res.status !== 200) return null;\n return await res.text();\n } catch {\n return null;\n }\n}\n\n// ─── Sitemap parsing ────────────────────────────────────────────────────────\n\n/**\n * Extract all page URLs from sitemap XML (handles sitemapindex with sub-sitemaps).\n * Filters to same domain only, skips resource files.\n */\nexport async function extractAllUrlsFromSitemap(\n sitemapText: string,\n domain: string,\n timeoutMs = 10000,\n): Promise<string[]> {\n const cleanDomain = domain.replace(/^www\\./, '').toLowerCase();\n const urls = new Set<string>();\n\n // Check for sitemapindex — fetch sub-sitemaps\n const subSitemapLocs = sitemapText.match(/<sitemap>[\\s\\S]*?<loc>([^<]+)<\\/loc>[\\s\\S]*?<\\/sitemap>/gi) || [];\n if (subSitemapLocs.length > 0) {\n const subUrls: string[] = [];\n for (const block of subSitemapLocs) {\n const locMatch = block.match(/<loc>([^<]+)<\\/loc>/i);\n if (locMatch) subUrls.push(locMatch[1].trim());\n }\n\n // Fetch sub-sitemaps in parallel (limit to 10)\n const fetches = subUrls.slice(0, 10).map(u => fetchSitemapXml(u, timeoutMs));\n const results = await Promise.all(fetches);\n for (const text of results) {\n if (text) {\n extractLocsFromXml(text, cleanDomain, urls);\n }\n }\n }\n\n // Also extract <url><loc> from the main sitemap text (could be a regular sitemap)\n extractLocsFromXml(sitemapText, cleanDomain, urls);\n\n return Array.from(urls);\n}\n\nfunction extractLocsFromXml(xml: string, cleanDomain: string, urls: Set<string>): void {\n const locMatches = xml.match(/<url>[\\s\\S]*?<loc>([^<]+)<\\/loc>[\\s\\S]*?<\\/url>/gi) || [];\n for (const block of locMatches) {\n const locMatch = block.match(/<loc>([^<]+)<\\/loc>/i);\n if (!locMatch) continue;\n const url = locMatch[1].trim();\n\n try {\n const parsed = new URL(url);\n const urlDomain = parsed.hostname.replace(/^www\\./, '').toLowerCase();\n if (urlDomain !== cleanDomain) continue;\n if (RESOURCE_EXTENSIONS.test(parsed.pathname)) continue;\n urls.add(url);\n } catch {\n continue;\n }\n }\n}\n\n// ─── Internal link extraction ───────────────────────────────────────────────\n\n/**\n * Extract ALL internal links from HTML (not just nav).\n * Returns deduplicated full URLs for the same domain.\n */\nexport function extractInternalLinks(html: string, domain: string): string[] {\n const cleanDomain = domain.replace(/^www\\./, '').toLowerCase();\n const hrefMatches = html.match(/href=\"([^\"]*)\"/gi) || [];\n const urls = new Set<string>();\n\n for (const match of hrefMatches) {\n const href = match.match(/href=\"([^\"]*)\"/i)?.[1];\n if (!href || !href.trim()) continue;\n\n let fullUrl: string;\n\n if (href.startsWith('//')) {\n fullUrl = `https:${href}`;\n } else if (href.startsWith('/')) {\n // Skip fragment-only, query-only, and anchor links\n if (href === '/' || href.startsWith('/#')) continue;\n fullUrl = `https://${domain}${href}`;\n } else if (href.startsWith('http')) {\n fullUrl = href;\n } else if (href.startsWith('#') || href.startsWith('?') || href.startsWith('mailto:') || href.startsWith('tel:') || href.startsWith('javascript:')) {\n continue;\n } else {\n // Relative path\n fullUrl = `https://${domain}/${href}`;\n }\n\n try {\n const parsed = new URL(fullUrl);\n const linkDomain = parsed.hostname.replace(/^www\\./, '').toLowerCase();\n if (linkDomain !== cleanDomain) continue;\n\n // Strip hash and normalize\n parsed.hash = '';\n const path = parsed.pathname;\n\n if (path === '/' || path === '') continue;\n if (RESOURCE_EXTENSIONS.test(path)) continue;\n if (SKIP_PATH_PATTERNS.test(path)) continue;\n\n // Normalize: strip trailing slash\n const normalized = parsed.origin + path.replace(/\\/+$/, '') + parsed.search;\n if (SKIP_URL_PATTERNS.test(normalized)) continue;\n urls.add(normalized);\n } catch {\n continue;\n }\n }\n\n return Array.from(urls);\n}\n\n// ─── Category inference ─────────────────────────────────────────────────────\n\nconst CATEGORY_PATTERNS: Array<[RegExp, PageCategory]> = [\n [/\\/([^/]*-?)?(blog|articles?|posts?|news|insights|guides)\\b/i, 'blog'],\n [/\\/(about|about-us|company|who-we-are)\\b/i, 'about'],\n [/\\/(pricing|plans|packages)\\b/i, 'pricing'],\n [/\\/(services?|features?|solutions?|products?|what-we-do|offerings?)\\b/i, 'services'],\n [/\\/(contact|contact-us|get-in-touch)\\b/i, 'contact'],\n [/\\/(team|our-team|authors?|people|leadership|staff)\\b/i, 'team'],\n [/\\/(resources?|resource-center|library|downloads?)\\b/i, 'resources'],\n [/\\/(docs?|documentation|help|help-center|support|knowledge-base)\\b/i, 'docs'],\n [/\\/(case-stud\\w*|cases|customers?|success-stor\\w*|testimonials?)\\b/i, 'cases'],\n [/\\/(faq|frequently-asked|questions)\\b/i, 'faq'],\n];\n\n/**\n * Infer PageCategory from URL path patterns.\n */\nexport function inferCategory(url: string): PageCategory {\n try {\n const path = new URL(url).pathname;\n for (const [pattern, category] of CATEGORY_PATTERNS) {\n if (pattern.test(path)) return category;\n }\n } catch {\n // Fall through to default\n }\n return 'content';\n}\n\n// ─── Main crawler ───────────────────────────────────────────────────────────\n\n/**\n * BFS crawl of a site, discovering all internal pages up to maxPages.\n * Seeds from sitemap URLs + homepage internal links.\n * Skips URLs already in siteData.blogSample and homepage.\n */\nexport async function crawlFullSite(\n siteData: SiteData,\n options?: CrawlOptions,\n): Promise<CrawlResult> {\n const startTime = Date.now();\n const maxPages = options?.maxPages ?? 200;\n const timeoutMs = options?.timeoutMs ?? 10000;\n const concurrency = options?.concurrency ?? 5;\n const respectRobots = options?.respectRobots ?? true;\n\n const pages: FetchResult[] = [];\n const discoveredUrls = new Set<string>();\n const fetchedUrls = new Set<string>();\n const skippedUrls = new Set<string>();\n const visited = new Set<string>();\n\n // Parse robots.txt rules\n let robotsRules: RobotsRules = { disallow: [], allow: [] };\n if (respectRobots && siteData.robotsTxt?.text) {\n robotsRules = parseRobotsTxt(siteData.robotsTxt.text);\n }\n\n const baseUrl = `${siteData.protocol}://${siteData.domain}`;\n\n // Mark already-fetched URLs as visited\n visited.add(normalizeUrl(baseUrl));\n visited.add(normalizeUrl(baseUrl + '/'));\n if (siteData.blogSample) {\n for (const page of siteData.blogSample) {\n if (page.finalUrl) visited.add(normalizeUrl(page.finalUrl));\n }\n }\n\n // Seed the queue from sitemap\n const queue: string[] = [];\n if (siteData.sitemapXml?.text) {\n const sitemapUrls = await extractAllUrlsFromSitemap(\n siteData.sitemapXml.text,\n siteData.domain,\n timeoutMs,\n );\n for (const url of sitemapUrls) {\n const norm = normalizeUrl(url);\n if (!visited.has(norm)) {\n discoveredUrls.add(url);\n if (!queue.includes(url)) queue.push(url);\n }\n }\n }\n\n // Seed from homepage internal links\n if (siteData.homepage?.text) {\n const homeLinks = extractInternalLinks(siteData.homepage.text, siteData.domain);\n for (const url of homeLinks) {\n const norm = normalizeUrl(url);\n if (!visited.has(norm) && !discoveredUrls.has(url)) {\n discoveredUrls.add(url);\n if (!queue.includes(url)) queue.push(url);\n }\n }\n }\n\n // BFS loop\n while (queue.length > 0 && fetchedUrls.size < maxPages) {\n // Take a batch\n const batchSize = Math.min(concurrency, maxPages - fetchedUrls.size, queue.length);\n const batch: string[] = [];\n\n while (batch.length < batchSize && queue.length > 0) {\n const url = queue.shift()!;\n const norm = normalizeUrl(url);\n\n if (visited.has(norm)) continue;\n visited.add(norm);\n\n // Check robots.txt\n if (respectRobots) {\n try {\n const path = new URL(url).pathname;\n if (isDisallowedByRobots(path, robotsRules)) {\n skippedUrls.add(url);\n continue;\n }\n } catch {\n // Skip malformed URLs\n continue;\n }\n }\n\n batch.push(url);\n }\n\n if (batch.length === 0) continue;\n\n // Fetch batch in parallel\n const fetchResults = await Promise.all(batch.map(url => fetchPage(url, timeoutMs)));\n\n const batchResults: Array<{ url: string; ok: boolean; status?: number }> = [];\n\n for (let i = 0; i < fetchResults.length; i++) {\n const result = fetchResults[i];\n const url = batch[i];\n fetchedUrls.add(url);\n batchResults.push({ url, ok: !!result, status: result?.status });\n\n if (!result) continue;\n\n result.category = inferCategory(url);\n pages.push(result);\n\n // Extract new internal links from fetched page\n const newLinks = extractInternalLinks(result.text, siteData.domain);\n for (const link of newLinks) {\n const norm = normalizeUrl(link);\n if (!visited.has(norm) && !discoveredUrls.has(link)) {\n discoveredUrls.add(link);\n queue.push(link);\n }\n }\n }\n\n // Notify caller of per-batch progress\n if (options?.onProgress) {\n options.onProgress({\n urls: batch,\n results: batchResults,\n fetched: fetchedUrls.size,\n discovered: discoveredUrls.size,\n maxPages,\n });\n }\n }\n\n // Any remaining queued URLs count as discovered but skipped\n for (const url of queue) {\n if (!fetchedUrls.has(url)) {\n skippedUrls.add(url);\n }\n }\n\n return {\n pages,\n discoveredUrls: Array.from(discoveredUrls),\n fetchedUrls: Array.from(fetchedUrls),\n skippedUrls: Array.from(skippedUrls),\n elapsed: Math.round((Date.now() - startTime) / 100) / 10,\n };\n}\n\nfunction normalizeUrl(url: string): string {\n try {\n const parsed = new URL(url);\n // Normalize: lowercase host, strip trailing slash, strip hash\n return (parsed.origin + parsed.pathname.replace(/\\/+$/, '') + parsed.search).toLowerCase();\n } catch {\n return url.toLowerCase();\n }\n}\n"],"mappings":";;;AA+CA,IAAM,sBAAsB;AAE5B,IAAM,qBAAqB;AAG3B,IAAM,oBAAoB;AASnB,SAAS,eAAe,YAAiC;AAC9D,QAAM,QAAQ,WAAW,MAAM,IAAI;AACnC,QAAM,QAAqB,EAAE,UAAU,CAAC,GAAG,OAAO,CAAC,EAAE;AAGrD,MAAI,oBAAoB;AAExB,aAAW,WAAW,OAAO;AAC3B,UAAM,OAAO,QAAQ,KAAK;AAC1B,QAAI,CAAC,QAAQ,KAAK,WAAW,GAAG,EAAG;AAEnC,UAAM,UAAU,KAAK,MAAM,sBAAsB;AACjD,QAAI,SAAS;AACX,YAAM,QAAQ,QAAQ,CAAC,EAAE,KAAK,EAAE,YAAY;AAC5C,0BAAoB,UAAU,OAAO,UAAU;AAC/C;AAAA,IACF;AAEA,QAAI,CAAC,kBAAmB;AAExB,UAAM,gBAAgB,KAAK,MAAM,oBAAoB;AACrD,QAAI,eAAe;AACjB,YAAM,OAAO,cAAc,CAAC,EAAE,KAAK;AACnC,UAAI,KAAM,OAAM,SAAS,KAAK,IAAI;AAClC;AAAA,IACF;AAEA,UAAM,aAAa,KAAK,MAAM,iBAAiB;AAC/C,QAAI,YAAY;AACd,YAAM,OAAO,WAAW,CAAC,EAAE,KAAK;AAChC,UAAI,KAAM,OAAM,MAAM,KAAK,IAAI;AAAA,IACjC;AAAA,EACF;AAEA,SAAO;AACT;AAEO,SAAS,qBAAqB,SAAiB,OAA6B;AAEjF,MAAI,eAAe;AACnB,MAAI,kBAAkB;AAEtB,aAAW,WAAW,MAAM,OAAO;AACjC,QAAI,QAAQ,WAAW,OAAO,KAAK,QAAQ,SAAS,cAAc;AAChE,qBAAe,QAAQ;AAAA,IACzB;AAAA,EACF;AAEA,aAAW,WAAW,MAAM,UAAU;AACpC,QAAI,QAAQ,WAAW,OAAO,KAAK,QAAQ,SAAS,iBAAiB;AACnE,wBAAkB,QAAQ;AAAA,IAC5B;AAAA,EACF;AAGA,MAAI,iBAAiB,KAAK,oBAAoB,EAAG,QAAO;AACxD,SAAO,kBAAkB;AAC3B;AAIA,eAAe,UAAU,KAAa,YAAY,KAAoC;AACpF,MAAI;AACF,UAAM,MAAM,MAAM,MAAM,KAAK;AAAA,MAC3B,QAAQ,YAAY,QAAQ,SAAS;AAAA,MACrC,SAAS,EAAE,cAAc,yBAAyB;AAAA,MAClD,UAAU;AAAA,IACZ,CAAC;AACD,QAAI,IAAI,WAAW,IAAK,QAAO;AAC/B,UAAM,OAAO,MAAM,IAAI,KAAK;AAC5B,QAAI,KAAK,SAAS,IAAK,QAAO;AAC9B,WAAO,EAAE,MAAM,KAAK,MAAM,GAAG,GAAO,GAAG,QAAQ,IAAI,QAAQ,UAAU,IAAI,IAAI;AAAA,EAC/E,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,eAAe,gBAAgB,KAAa,YAAY,KAA+B;AACrF,MAAI;AACF,UAAM,MAAM,MAAM,MAAM,KAAK;AAAA,MAC3B,QAAQ,YAAY,QAAQ,SAAS;AAAA,MACrC,SAAS,EAAE,cAAc,yBAAyB;AAAA,MAClD,UAAU;AAAA,IACZ,CAAC;AACD,QAAI,IAAI,WAAW,IAAK,QAAO;AAC/B,WAAO,MAAM,IAAI,KAAK;AAAA,EACxB,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAQA,eAAsB,0BACpB,aACA,QACA,YAAY,KACO;AACnB,QAAM,cAAc,OAAO,QAAQ,UAAU,EAAE,EAAE,YAAY;AAC7D,QAAM,OAAO,oBAAI,IAAY;AAG7B,QAAM,iBAAiB,YAAY,MAAM,2DAA2D,KAAK,CAAC;AAC1G,MAAI,eAAe,SAAS,GAAG;AAC7B,UAAM,UAAoB,CAAC;AAC3B,eAAW,SAAS,gBAAgB;AAClC,YAAM,WAAW,MAAM,MAAM,sBAAsB;AACnD,UAAI,SAAU,SAAQ,KAAK,SAAS,CAAC,EAAE,KAAK,CAAC;AAAA,IAC/C;AAGA,UAAM,UAAU,QAAQ,MAAM,GAAG,EAAE,EAAE,IAAI,OAAK,gBAAgB,GAAG,SAAS,CAAC;AAC3E,UAAM,UAAU,MAAM,QAAQ,IAAI,OAAO;AACzC,eAAW,QAAQ,SAAS;AAC1B,UAAI,MAAM;AACR,2BAAmB,MAAM,aAAa,IAAI;AAAA,MAC5C;AAAA,IACF;AAAA,EACF;AAGA,qBAAmB,aAAa,aAAa,IAAI;AAEjD,SAAO,MAAM,KAAK,IAAI;AACxB;AAEA,SAAS,mBAAmB,KAAa,aAAqB,MAAyB;AACrF,QAAM,aAAa,IAAI,MAAM,mDAAmD,KAAK,CAAC;AACtF,aAAW,SAAS,YAAY;AAC9B,UAAM,WAAW,MAAM,MAAM,sBAAsB;AACnD,QAAI,CAAC,SAAU;AACf,UAAM,MAAM,SAAS,CAAC,EAAE,KAAK;AAE7B,QAAI;AACF,YAAM,SAAS,IAAI,IAAI,GAAG;AAC1B,YAAM,YAAY,OAAO,SAAS,QAAQ,UAAU,EAAE,EAAE,YAAY;AACpE,UAAI,cAAc,YAAa;AAC/B,UAAI,oBAAoB,KAAK,OAAO,QAAQ,EAAG;AAC/C,WAAK,IAAI,GAAG;AAAA,IACd,QAAQ;AACN;AAAA,IACF;AAAA,EACF;AACF;AAQO,SAAS,qBAAqB,MAAc,QAA0B;AAC3E,QAAM,cAAc,OAAO,QAAQ,UAAU,EAAE,EAAE,YAAY;AAC7D,QAAM,cAAc,KAAK,MAAM,kBAAkB,KAAK,CAAC;AACvD,QAAM,OAAO,oBAAI,IAAY;AAE7B,aAAW,SAAS,aAAa;AAC/B,UAAM,OAAO,MAAM,MAAM,iBAAiB,IAAI,CAAC;AAC/C,QAAI,CAAC,QAAQ,CAAC,KAAK,KAAK,EAAG;AAE3B,QAAI;AAEJ,QAAI,KAAK,WAAW,IAAI,GAAG;AACzB,gBAAU,SAAS,IAAI;AAAA,IACzB,WAAW,KAAK,WAAW,GAAG,GAAG;AAE/B,UAAI,SAAS,OAAO,KAAK,WAAW,IAAI,EAAG;AAC3C,gBAAU,WAAW,MAAM,GAAG,IAAI;AAAA,IACpC,WAAW,KAAK,WAAW,MAAM,GAAG;AAClC,gBAAU;AAAA,IACZ,WAAW,KAAK,WAAW,GAAG,KAAK,KAAK,WAAW,GAAG,KAAK,KAAK,WAAW,SAAS,KAAK,KAAK,WAAW,MAAM,KAAK,KAAK,WAAW,aAAa,GAAG;AAClJ;AAAA,IACF,OAAO;AAEL,gBAAU,WAAW,MAAM,IAAI,IAAI;AAAA,IACrC;AAEA,QAAI;AACF,YAAM,SAAS,IAAI,IAAI,OAAO;AAC9B,YAAM,aAAa,OAAO,SAAS,QAAQ,UAAU,EAAE,EAAE,YAAY;AACrE,UAAI,eAAe,YAAa;AAGhC,aAAO,OAAO;AACd,YAAM,OAAO,OAAO;AAEpB,UAAI,SAAS,OAAO,SAAS,GAAI;AACjC,UAAI,oBAAoB,KAAK,IAAI,EAAG;AACpC,UAAI,mBAAmB,KAAK,IAAI,EAAG;AAGnC,YAAM,aAAa,OAAO,SAAS,KAAK,QAAQ,QAAQ,EAAE,IAAI,OAAO;AACrE,UAAI,kBAAkB,KAAK,UAAU,EAAG;AACxC,WAAK,IAAI,UAAU;AAAA,IACrB,QAAQ;AACN;AAAA,IACF;AAAA,EACF;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAIA,IAAM,oBAAmD;AAAA,EACvD,CAAC,+DAA+D,MAAM;AAAA,EACtE,CAAC,4CAA4C,OAAO;AAAA,EACpD,CAAC,iCAAiC,SAAS;AAAA,EAC3C,CAAC,yEAAyE,UAAU;AAAA,EACpF,CAAC,0CAA0C,SAAS;AAAA,EACpD,CAAC,yDAAyD,MAAM;AAAA,EAChE,CAAC,wDAAwD,WAAW;AAAA,EACpE,CAAC,sEAAsE,MAAM;AAAA,EAC7E,CAAC,sEAAsE,OAAO;AAAA,EAC9E,CAAC,yCAAyC,KAAK;AACjD;AAKO,SAAS,cAAc,KAA2B;AACvD,MAAI;AACF,UAAM,OAAO,IAAI,IAAI,GAAG,EAAE;AAC1B,eAAW,CAAC,SAAS,QAAQ,KAAK,mBAAmB;AACnD,UAAI,QAAQ,KAAK,IAAI,EAAG,QAAO;AAAA,IACjC;AAAA,EACF,QAAQ;AAAA,EAER;AACA,SAAO;AACT;AASA,eAAsB,cACpB,UACA,SACsB;AACtB,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,WAAW,SAAS,YAAY;AACtC,QAAM,YAAY,SAAS,aAAa;AACxC,QAAM,cAAc,SAAS,eAAe;AAC5C,QAAM,gBAAgB,SAAS,iBAAiB;AAEhD,QAAM,QAAuB,CAAC;AAC9B,QAAM,iBAAiB,oBAAI,IAAY;AACvC,QAAM,cAAc,oBAAI,IAAY;AACpC,QAAM,cAAc,oBAAI,IAAY;AACpC,QAAM,UAAU,oBAAI,IAAY;AAGhC,MAAI,cAA2B,EAAE,UAAU,CAAC,GAAG,OAAO,CAAC,EAAE;AACzD,MAAI,iBAAiB,SAAS,WAAW,MAAM;AAC7C,kBAAc,eAAe,SAAS,UAAU,IAAI;AAAA,EACtD;AAEA,QAAM,UAAU,GAAG,SAAS,QAAQ,MAAM,SAAS,MAAM;AAGzD,UAAQ,IAAI,aAAa,OAAO,CAAC;AACjC,UAAQ,IAAI,aAAa,UAAU,GAAG,CAAC;AACvC,MAAI,SAAS,YAAY;AACvB,eAAW,QAAQ,SAAS,YAAY;AACtC,UAAI,KAAK,SAAU,SAAQ,IAAI,aAAa,KAAK,QAAQ,CAAC;AAAA,IAC5D;AAAA,EACF;AAGA,QAAM,QAAkB,CAAC;AACzB,MAAI,SAAS,YAAY,MAAM;AAC7B,UAAM,cAAc,MAAM;AAAA,MACxB,SAAS,WAAW;AAAA,MACpB,SAAS;AAAA,MACT;AAAA,IACF;AACA,eAAW,OAAO,aAAa;AAC7B,YAAM,OAAO,aAAa,GAAG;AAC7B,UAAI,CAAC,QAAQ,IAAI,IAAI,GAAG;AACtB,uBAAe,IAAI,GAAG;AACtB,YAAI,CAAC,MAAM,SAAS,GAAG,EAAG,OAAM,KAAK,GAAG;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AAGA,MAAI,SAAS,UAAU,MAAM;AAC3B,UAAM,YAAY,qBAAqB,SAAS,SAAS,MAAM,SAAS,MAAM;AAC9E,eAAW,OAAO,WAAW;AAC3B,YAAM,OAAO,aAAa,GAAG;AAC7B,UAAI,CAAC,QAAQ,IAAI,IAAI,KAAK,CAAC,eAAe,IAAI,GAAG,GAAG;AAClD,uBAAe,IAAI,GAAG;AACtB,YAAI,CAAC,MAAM,SAAS,GAAG,EAAG,OAAM,KAAK,GAAG;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AAGA,SAAO,MAAM,SAAS,KAAK,YAAY,OAAO,UAAU;AAEtD,UAAM,YAAY,KAAK,IAAI,aAAa,WAAW,YAAY,MAAM,MAAM,MAAM;AACjF,UAAM,QAAkB,CAAC;AAEzB,WAAO,MAAM,SAAS,aAAa,MAAM,SAAS,GAAG;AACnD,YAAM,MAAM,MAAM,MAAM;AACxB,YAAM,OAAO,aAAa,GAAG;AAE7B,UAAI,QAAQ,IAAI,IAAI,EAAG;AACvB,cAAQ,IAAI,IAAI;AAGhB,UAAI,eAAe;AACjB,YAAI;AACF,gBAAM,OAAO,IAAI,IAAI,GAAG,EAAE;AAC1B,cAAI,qBAAqB,MAAM,WAAW,GAAG;AAC3C,wBAAY,IAAI,GAAG;AACnB;AAAA,UACF;AAAA,QACF,QAAQ;AAEN;AAAA,QACF;AAAA,MACF;AAEA,YAAM,KAAK,GAAG;AAAA,IAChB;AAEA,QAAI,MAAM,WAAW,EAAG;AAGxB,UAAM,eAAe,MAAM,QAAQ,IAAI,MAAM,IAAI,SAAO,UAAU,KAAK,SAAS,CAAC,CAAC;AAElF,UAAM,eAAqE,CAAC;AAE5E,aAAS,IAAI,GAAG,IAAI,aAAa,QAAQ,KAAK;AAC5C,YAAM,SAAS,aAAa,CAAC;AAC7B,YAAM,MAAM,MAAM,CAAC;AACnB,kBAAY,IAAI,GAAG;AACnB,mBAAa,KAAK,EAAE,KAAK,IAAI,CAAC,CAAC,QAAQ,QAAQ,QAAQ,OAAO,CAAC;AAE/D,UAAI,CAAC,OAAQ;AAEb,aAAO,WAAW,cAAc,GAAG;AACnC,YAAM,KAAK,MAAM;AAGjB,YAAM,WAAW,qBAAqB,OAAO,MAAM,SAAS,MAAM;AAClE,iBAAW,QAAQ,UAAU;AAC3B,cAAM,OAAO,aAAa,IAAI;AAC9B,YAAI,CAAC,QAAQ,IAAI,IAAI,KAAK,CAAC,eAAe,IAAI,IAAI,GAAG;AACnD,yBAAe,IAAI,IAAI;AACvB,gBAAM,KAAK,IAAI;AAAA,QACjB;AAAA,MACF;AAAA,IACF;AAGA,QAAI,SAAS,YAAY;AACvB,cAAQ,WAAW;AAAA,QACjB,MAAM;AAAA,QACN,SAAS;AAAA,QACT,SAAS,YAAY;AAAA,QACrB,YAAY,eAAe;AAAA,QAC3B;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAGA,aAAW,OAAO,OAAO;AACvB,QAAI,CAAC,YAAY,IAAI,GAAG,GAAG;AACzB,kBAAY,IAAI,GAAG;AAAA,IACrB;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA,gBAAgB,MAAM,KAAK,cAAc;AAAA,IACzC,aAAa,MAAM,KAAK,WAAW;AAAA,IACnC,aAAa,MAAM,KAAK,WAAW;AAAA,IACnC,SAAS,KAAK,OAAO,KAAK,IAAI,IAAI,aAAa,GAAG,IAAI;AAAA,EACxD;AACF;AAEA,SAAS,aAAa,KAAqB;AACzC,MAAI;AACF,UAAM,SAAS,IAAI,IAAI,GAAG;AAE1B,YAAQ,OAAO,SAAS,OAAO,SAAS,QAAQ,QAAQ,EAAE,IAAI,OAAO,QAAQ,YAAY;AAAA,EAC3F,QAAQ;AACN,WAAO,IAAI,YAAY;AAAA,EACzB;AACF;","names":[]}
@@ -5,7 +5,7 @@ import {
5
5
  inferCategory,
6
6
  isDisallowedByRobots,
7
7
  parseRobotsTxt
8
- } from "./chunk-OCLAIHX6.js";
8
+ } from "./chunk-RYV25AUV.js";
9
9
  export {
10
10
  crawlFullSite,
11
11
  extractAllUrlsFromSitemap,
@@ -14,4 +14,4 @@ export {
14
14
  isDisallowedByRobots,
15
15
  parseRobotsTxt
16
16
  };
17
- //# sourceMappingURL=full-site-crawler-BCJS67WQ.js.map
17
+ //# sourceMappingURL=full-site-crawler-TQ35TB2X.js.map
package/dist/index.cjs CHANGED
@@ -170,6 +170,7 @@ function extractInternalLinks(html, domain) {
170
170
  if (RESOURCE_EXTENSIONS.test(path)) continue;
171
171
  if (SKIP_PATH_PATTERNS.test(path)) continue;
172
172
  const normalized = parsed.origin + path.replace(/\/+$/, "") + parsed.search;
173
+ if (SKIP_URL_PATTERNS.test(normalized)) continue;
173
174
  urls.add(normalized);
174
175
  } catch {
175
176
  continue;
@@ -307,12 +308,13 @@ function normalizeUrl(url) {
307
308
  return url.toLowerCase();
308
309
  }
309
310
  }
310
- var RESOURCE_EXTENSIONS, SKIP_PATH_PATTERNS, CATEGORY_PATTERNS;
311
+ var RESOURCE_EXTENSIONS, SKIP_PATH_PATTERNS, SKIP_URL_PATTERNS, CATEGORY_PATTERNS;
311
312
  var init_full_site_crawler = __esm({
312
313
  "src/full-site-crawler.ts"() {
313
314
  "use strict";
314
315
  RESOURCE_EXTENSIONS = /\.(js|css|png|jpg|jpeg|gif|svg|ico|pdf|xml|txt|woff|woff2|ttf|eot|mp4|mp3|webp|avif|zip|gz|tar|json)$/i;
315
- SKIP_PATH_PATTERNS = /^\/(api|wp-admin|wp-json|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc)\b/i;
316
+ SKIP_PATH_PATTERNS = /^\/(api|wp-admin|wp-json|wp-content|wp-includes|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc|tag|tags|author|authors|category|categories|attachment|embed|trackback|comments|search|print|amp)\b/i;
317
+ SKIP_URL_PATTERNS = /\/page\/\d+\/?$|[?&](s|replytocom|p|preview|share|like|amp)=/i;
316
318
  CATEGORY_PATTERNS = [
317
319
  [/\/([^/]*-?)?(blog|articles?|posts?|news|insights|guides)\b/i, "blog"],
318
320
  [/\/(about|about-us|company|who-we-are)\b/i, "about"],