aeorank 1.6.0 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -149,7 +149,7 @@ function extractInternalLinks(html, domain) {
149
149
  return Array.from(urls);
150
150
  }
151
151
  var CATEGORY_PATTERNS = [
152
- [/\/(blog|articles?|posts?|news|insights|guides)\b/i, "blog"],
152
+ [/\/([^/]*-?)?(blog|articles?|posts?|news|insights|guides)\b/i, "blog"],
153
153
  [/\/(about|about-us|company|who-we-are)\b/i, "about"],
154
154
  [/\/(pricing|plans|packages)\b/i, "pricing"],
155
155
  [/\/(services?|features?|solutions?|products?|what-we-do|offerings?)\b/i, "services"],
@@ -288,4 +288,4 @@ export {
288
288
  inferCategory,
289
289
  crawlFullSite
290
290
  };
291
- //# sourceMappingURL=chunk-3IJISYWT.js.map
291
+ //# sourceMappingURL=chunk-PKJIKMLV.js.map
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/full-site-crawler.ts"],"sourcesContent":["/**\n * Full-site crawler for deep AEO audits.\n * BFS crawl that discovers all internal pages up to a configurable limit.\n */\n\nimport type { FetchResult, SiteData, PageCategory } from './site-crawler.js';\n\n// ─── Types ──────────────────────────────────────────────────────────────────\n\nexport interface CrawlOptions {\n /** Maximum pages to fetch (default 200) */\n maxPages?: number;\n /** Per-page fetch timeout in ms (default 10000) */\n timeoutMs?: number;\n /** Parallel fetches (default 5) */\n concurrency?: number;\n /** Honor robots.txt Disallow rules (default true) */\n respectRobots?: boolean;\n /** Include asset files — skipped by default */\n includeAssets?: boolean;\n}\n\nexport interface CrawlResult {\n pages: FetchResult[];\n discoveredUrls: string[];\n fetchedUrls: string[];\n skippedUrls: string[];\n elapsed: number;\n}\n\n// ─── Resource file extensions to skip ────────────────────────────────────────\n\nconst RESOURCE_EXTENSIONS = /\\.(js|css|png|jpg|jpeg|gif|svg|ico|pdf|xml|txt|woff|woff2|ttf|eot|mp4|mp3|webp|avif|zip|gz|tar|json)$/i;\n\nconst SKIP_PATH_PATTERNS = /^\\/(api|wp-admin|wp-json|static|assets|_next|auth|login|signup|cart|checkout|admin|feed|xmlrpc)\\b/i;\n\n// ─── Robots.txt parsing ─────────────────────────────────────────────────────\n\ninterface RobotsRules {\n disallow: string[];\n allow: string[];\n}\n\nexport function parseRobotsTxt(robotsText: string): RobotsRules {\n const lines = robotsText.split('\\n');\n const rules: RobotsRules = { disallow: [], allow: [] };\n\n // Collect rules for User-agent: * and User-agent: AEO-Visibility-Bot\n let inRelevantSection = false;\n\n for (const rawLine of lines) {\n const line = rawLine.trim();\n if (!line || line.startsWith('#')) continue;\n\n const uaMatch = line.match(/^user-agent:\\s*(.+)/i);\n if (uaMatch) {\n const agent = uaMatch[1].trim().toLowerCase();\n inRelevantSection = agent === '*' || agent === 'aeo-visibility-bot';\n continue;\n }\n\n if (!inRelevantSection) continue;\n\n const disallowMatch = line.match(/^disallow:\\s*(.*)/i);\n if (disallowMatch) {\n const path = disallowMatch[1].trim();\n if (path) rules.disallow.push(path);\n continue;\n }\n\n const allowMatch = line.match(/^allow:\\s*(.*)/i);\n if (allowMatch) {\n const path = allowMatch[1].trim();\n if (path) rules.allow.push(path);\n }\n }\n\n return rules;\n}\n\nexport function isDisallowedByRobots(urlPath: string, rules: RobotsRules): boolean {\n // Check allow rules first — more specific (longer) rules take precedence\n let longestAllow = 0;\n let longestDisallow = 0;\n\n for (const pattern of rules.allow) {\n if (urlPath.startsWith(pattern) && pattern.length > longestAllow) {\n longestAllow = pattern.length;\n }\n }\n\n for (const pattern of rules.disallow) {\n if (urlPath.startsWith(pattern) && pattern.length > longestDisallow) {\n longestDisallow = pattern.length;\n }\n }\n\n // More specific (longer) rule wins; if equal length, allow wins\n if (longestAllow === 0 && longestDisallow === 0) return false;\n return longestDisallow > longestAllow;\n}\n\n// ─── Fetch helper (matches multi-page-fetcher.ts fetchPage) ──────────────────\n\nasync function fetchPage(url: string, timeoutMs = 10000): Promise<FetchResult | null> {\n try {\n const res = await fetch(url, {\n signal: AbortSignal.timeout(timeoutMs),\n headers: { 'User-Agent': 'AEO-Visibility-Bot/1.0' },\n redirect: 'follow',\n });\n if (res.status !== 200) return null;\n const text = await res.text();\n if (text.length < 200) return null;\n return { text: text.slice(0, 500_000), status: res.status, finalUrl: res.url };\n } catch {\n return null;\n }\n}\n\nasync function fetchSitemapXml(url: string, timeoutMs = 10000): Promise<string | null> {\n try {\n const res = await fetch(url, {\n signal: AbortSignal.timeout(timeoutMs),\n headers: { 'User-Agent': 'AEO-Visibility-Bot/1.0' },\n redirect: 'follow',\n });\n if (res.status !== 200) return null;\n return await res.text();\n } catch {\n return null;\n }\n}\n\n// ─── Sitemap parsing ────────────────────────────────────────────────────────\n\n/**\n * Extract all page URLs from sitemap XML (handles sitemapindex with sub-sitemaps).\n * Filters to same domain only, skips resource files.\n */\nexport async function extractAllUrlsFromSitemap(\n sitemapText: string,\n domain: string,\n timeoutMs = 10000,\n): Promise<string[]> {\n const cleanDomain = domain.replace(/^www\\./, '').toLowerCase();\n const urls = new Set<string>();\n\n // Check for sitemapindex — fetch sub-sitemaps\n const subSitemapLocs = sitemapText.match(/<sitemap>[\\s\\S]*?<loc>([^<]+)<\\/loc>[\\s\\S]*?<\\/sitemap>/gi) || [];\n if (subSitemapLocs.length > 0) {\n const subUrls: string[] = [];\n for (const block of subSitemapLocs) {\n const locMatch = block.match(/<loc>([^<]+)<\\/loc>/i);\n if (locMatch) subUrls.push(locMatch[1].trim());\n }\n\n // Fetch sub-sitemaps in parallel (limit to 10)\n const fetches = subUrls.slice(0, 10).map(u => fetchSitemapXml(u, timeoutMs));\n const results = await Promise.all(fetches);\n for (const text of results) {\n if (text) {\n extractLocsFromXml(text, cleanDomain, urls);\n }\n }\n }\n\n // Also extract <url><loc> from the main sitemap text (could be a regular sitemap)\n extractLocsFromXml(sitemapText, cleanDomain, urls);\n\n return Array.from(urls);\n}\n\nfunction extractLocsFromXml(xml: string, cleanDomain: string, urls: Set<string>): void {\n const locMatches = xml.match(/<url>[\\s\\S]*?<loc>([^<]+)<\\/loc>[\\s\\S]*?<\\/url>/gi) || [];\n for (const block of locMatches) {\n const locMatch = block.match(/<loc>([^<]+)<\\/loc>/i);\n if (!locMatch) continue;\n const url = locMatch[1].trim();\n\n try {\n const parsed = new URL(url);\n const urlDomain = parsed.hostname.replace(/^www\\./, '').toLowerCase();\n if (urlDomain !== cleanDomain) continue;\n if (RESOURCE_EXTENSIONS.test(parsed.pathname)) continue;\n urls.add(url);\n } catch {\n continue;\n }\n }\n}\n\n// ─── Internal link extraction ───────────────────────────────────────────────\n\n/**\n * Extract ALL internal links from HTML (not just nav).\n * Returns deduplicated full URLs for the same domain.\n */\nexport function extractInternalLinks(html: string, domain: string): string[] {\n const cleanDomain = domain.replace(/^www\\./, '').toLowerCase();\n const hrefMatches = html.match(/href=\"([^\"]*)\"/gi) || [];\n const urls = new Set<string>();\n\n for (const match of hrefMatches) {\n const href = match.match(/href=\"([^\"]*)\"/i)?.[1];\n if (!href || !href.trim()) continue;\n\n let fullUrl: string;\n\n if (href.startsWith('//')) {\n fullUrl = `https:${href}`;\n } else if (href.startsWith('/')) {\n // Skip fragment-only, query-only, and anchor links\n if (href === '/' || href.startsWith('/#')) continue;\n fullUrl = `https://${domain}${href}`;\n } else if (href.startsWith('http')) {\n fullUrl = href;\n } else if (href.startsWith('#') || href.startsWith('?') || href.startsWith('mailto:') || href.startsWith('tel:') || href.startsWith('javascript:')) {\n continue;\n } else {\n // Relative path\n fullUrl = `https://${domain}/${href}`;\n }\n\n try {\n const parsed = new URL(fullUrl);\n const linkDomain = parsed.hostname.replace(/^www\\./, '').toLowerCase();\n if (linkDomain !== cleanDomain) continue;\n\n // Strip hash and normalize\n parsed.hash = '';\n const path = parsed.pathname;\n\n if (path === '/' || path === '') continue;\n if (RESOURCE_EXTENSIONS.test(path)) continue;\n if (SKIP_PATH_PATTERNS.test(path)) continue;\n\n // Normalize: strip trailing slash\n const normalized = parsed.origin + path.replace(/\\/+$/, '') + parsed.search;\n urls.add(normalized);\n } catch {\n continue;\n }\n }\n\n return Array.from(urls);\n}\n\n// ─── Category inference ─────────────────────────────────────────────────────\n\nconst CATEGORY_PATTERNS: Array<[RegExp, PageCategory]> = [\n [/\\/([^/]*-?)?(blog|articles?|posts?|news|insights|guides)\\b/i, 'blog'],\n [/\\/(about|about-us|company|who-we-are)\\b/i, 'about'],\n [/\\/(pricing|plans|packages)\\b/i, 'pricing'],\n [/\\/(services?|features?|solutions?|products?|what-we-do|offerings?)\\b/i, 'services'],\n [/\\/(contact|contact-us|get-in-touch)\\b/i, 'contact'],\n [/\\/(team|our-team|authors?|people|leadership|staff)\\b/i, 'team'],\n [/\\/(resources?|resource-center|library|downloads?)\\b/i, 'resources'],\n [/\\/(docs?|documentation|help|help-center|support|knowledge-base)\\b/i, 'docs'],\n [/\\/(case-stud\\w*|cases|customers?|success-stor\\w*|testimonials?)\\b/i, 'cases'],\n [/\\/(faq|frequently-asked|questions)\\b/i, 'faq'],\n];\n\n/**\n * Infer PageCategory from URL path patterns.\n */\nexport function inferCategory(url: string): PageCategory {\n try {\n const path = new URL(url).pathname;\n for (const [pattern, category] of CATEGORY_PATTERNS) {\n if (pattern.test(path)) return category;\n }\n } catch {\n // Fall through to default\n }\n return 'content';\n}\n\n// ─── Main crawler ───────────────────────────────────────────────────────────\n\n/**\n * BFS crawl of a site, discovering all internal pages up to maxPages.\n * Seeds from sitemap URLs + homepage internal links.\n * Skips URLs already in siteData.blogSample and homepage.\n */\nexport async function crawlFullSite(\n siteData: SiteData,\n options?: CrawlOptions,\n): Promise<CrawlResult> {\n const startTime = Date.now();\n const maxPages = options?.maxPages ?? 200;\n const timeoutMs = options?.timeoutMs ?? 10000;\n const concurrency = options?.concurrency ?? 5;\n const respectRobots = options?.respectRobots ?? true;\n\n const pages: FetchResult[] = [];\n const discoveredUrls = new Set<string>();\n const fetchedUrls = new Set<string>();\n const skippedUrls = new Set<string>();\n const visited = new Set<string>();\n\n // Parse robots.txt rules\n let robotsRules: RobotsRules = { disallow: [], allow: [] };\n if (respectRobots && siteData.robotsTxt?.text) {\n robotsRules = parseRobotsTxt(siteData.robotsTxt.text);\n }\n\n const baseUrl = `${siteData.protocol}://${siteData.domain}`;\n\n // Mark already-fetched URLs as visited\n visited.add(normalizeUrl(baseUrl));\n visited.add(normalizeUrl(baseUrl + '/'));\n if (siteData.blogSample) {\n for (const page of siteData.blogSample) {\n if (page.finalUrl) visited.add(normalizeUrl(page.finalUrl));\n }\n }\n\n // Seed the queue from sitemap\n const queue: string[] = [];\n if (siteData.sitemapXml?.text) {\n const sitemapUrls = await extractAllUrlsFromSitemap(\n siteData.sitemapXml.text,\n siteData.domain,\n timeoutMs,\n );\n for (const url of sitemapUrls) {\n const norm = normalizeUrl(url);\n if (!visited.has(norm)) {\n discoveredUrls.add(url);\n if (!queue.includes(url)) queue.push(url);\n }\n }\n }\n\n // Seed from homepage internal links\n if (siteData.homepage?.text) {\n const homeLinks = extractInternalLinks(siteData.homepage.text, siteData.domain);\n for (const url of homeLinks) {\n const norm = normalizeUrl(url);\n if (!visited.has(norm) && !discoveredUrls.has(url)) {\n discoveredUrls.add(url);\n if (!queue.includes(url)) queue.push(url);\n }\n }\n }\n\n // BFS loop\n while (queue.length > 0 && fetchedUrls.size < maxPages) {\n // Take a batch\n const batchSize = Math.min(concurrency, maxPages - fetchedUrls.size, queue.length);\n const batch: string[] = [];\n\n while (batch.length < batchSize && queue.length > 0) {\n const url = queue.shift()!;\n const norm = normalizeUrl(url);\n\n if (visited.has(norm)) continue;\n visited.add(norm);\n\n // Check robots.txt\n if (respectRobots) {\n try {\n const path = new URL(url).pathname;\n if (isDisallowedByRobots(path, robotsRules)) {\n skippedUrls.add(url);\n continue;\n }\n } catch {\n // Skip malformed URLs\n continue;\n }\n }\n\n batch.push(url);\n }\n\n if (batch.length === 0) continue;\n\n // Fetch batch in parallel\n const results = await Promise.all(batch.map(url => fetchPage(url, timeoutMs)));\n\n for (let i = 0; i < results.length; i++) {\n const result = results[i];\n const url = batch[i];\n fetchedUrls.add(url);\n\n if (!result) continue;\n\n result.category = inferCategory(url);\n pages.push(result);\n\n // Extract new internal links from fetched page\n const newLinks = extractInternalLinks(result.text, siteData.domain);\n for (const link of newLinks) {\n const norm = normalizeUrl(link);\n if (!visited.has(norm) && !discoveredUrls.has(link)) {\n discoveredUrls.add(link);\n queue.push(link);\n }\n }\n }\n }\n\n // Any remaining queued URLs count as discovered but skipped\n for (const url of queue) {\n if (!fetchedUrls.has(url)) {\n skippedUrls.add(url);\n }\n }\n\n return {\n pages,\n discoveredUrls: Array.from(discoveredUrls),\n fetchedUrls: Array.from(fetchedUrls),\n skippedUrls: Array.from(skippedUrls),\n elapsed: Math.round((Date.now() - startTime) / 100) / 10,\n };\n}\n\nfunction normalizeUrl(url: string): string {\n try {\n const parsed = new URL(url);\n // Normalize: lowercase host, strip trailing slash, strip hash\n return (parsed.origin + parsed.pathname.replace(/\\/+$/, '') + parsed.search).toLowerCase();\n } catch {\n return url.toLowerCase();\n }\n}\n"],"mappings":";AAgCA,IAAM,sBAAsB;AAE5B,IAAM,qBAAqB;AASpB,SAAS,eAAe,YAAiC;AAC9D,QAAM,QAAQ,WAAW,MAAM,IAAI;AACnC,QAAM,QAAqB,EAAE,UAAU,CAAC,GAAG,OAAO,CAAC,EAAE;AAGrD,MAAI,oBAAoB;AAExB,aAAW,WAAW,OAAO;AAC3B,UAAM,OAAO,QAAQ,KAAK;AAC1B,QAAI,CAAC,QAAQ,KAAK,WAAW,GAAG,EAAG;AAEnC,UAAM,UAAU,KAAK,MAAM,sBAAsB;AACjD,QAAI,SAAS;AACX,YAAM,QAAQ,QAAQ,CAAC,EAAE,KAAK,EAAE,YAAY;AAC5C,0BAAoB,UAAU,OAAO,UAAU;AAC/C;AAAA,IACF;AAEA,QAAI,CAAC,kBAAmB;AAExB,UAAM,gBAAgB,KAAK,MAAM,oBAAoB;AACrD,QAAI,eAAe;AACjB,YAAM,OAAO,cAAc,CAAC,EAAE,KAAK;AACnC,UAAI,KAAM,OAAM,SAAS,KAAK,IAAI;AAClC;AAAA,IACF;AAEA,UAAM,aAAa,KAAK,MAAM,iBAAiB;AAC/C,QAAI,YAAY;AACd,YAAM,OAAO,WAAW,CAAC,EAAE,KAAK;AAChC,UAAI,KAAM,OAAM,MAAM,KAAK,IAAI;AAAA,IACjC;AAAA,EACF;AAEA,SAAO;AACT;AAEO,SAAS,qBAAqB,SAAiB,OAA6B;AAEjF,MAAI,eAAe;AACnB,MAAI,kBAAkB;AAEtB,aAAW,WAAW,MAAM,OAAO;AACjC,QAAI,QAAQ,WAAW,OAAO,KAAK,QAAQ,SAAS,cAAc;AAChE,qBAAe,QAAQ;AAAA,IACzB;AAAA,EACF;AAEA,aAAW,WAAW,MAAM,UAAU;AACpC,QAAI,QAAQ,WAAW,OAAO,KAAK,QAAQ,SAAS,iBAAiB;AACnE,wBAAkB,QAAQ;AAAA,IAC5B;AAAA,EACF;AAGA,MAAI,iBAAiB,KAAK,oBAAoB,EAAG,QAAO;AACxD,SAAO,kBAAkB;AAC3B;AAIA,eAAe,UAAU,KAAa,YAAY,KAAoC;AACpF,MAAI;AACF,UAAM,MAAM,MAAM,MAAM,KAAK;AAAA,MAC3B,QAAQ,YAAY,QAAQ,SAAS;AAAA,MACrC,SAAS,EAAE,cAAc,yBAAyB;AAAA,MAClD,UAAU;AAAA,IACZ,CAAC;AACD,QAAI,IAAI,WAAW,IAAK,QAAO;AAC/B,UAAM,OAAO,MAAM,IAAI,KAAK;AAC5B,QAAI,KAAK,SAAS,IAAK,QAAO;AAC9B,WAAO,EAAE,MAAM,KAAK,MAAM,GAAG,GAAO,GAAG,QAAQ,IAAI,QAAQ,UAAU,IAAI,IAAI;AAAA,EAC/E,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAEA,eAAe,gBAAgB,KAAa,YAAY,KAA+B;AACrF,MAAI;AACF,UAAM,MAAM,MAAM,MAAM,KAAK;AAAA,MAC3B,QAAQ,YAAY,QAAQ,SAAS;AAAA,MACrC,SAAS,EAAE,cAAc,yBAAyB;AAAA,MAClD,UAAU;AAAA,IACZ,CAAC;AACD,QAAI,IAAI,WAAW,IAAK,QAAO;AAC/B,WAAO,MAAM,IAAI,KAAK;AAAA,EACxB,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AAQA,eAAsB,0BACpB,aACA,QACA,YAAY,KACO;AACnB,QAAM,cAAc,OAAO,QAAQ,UAAU,EAAE,EAAE,YAAY;AAC7D,QAAM,OAAO,oBAAI,IAAY;AAG7B,QAAM,iBAAiB,YAAY,MAAM,2DAA2D,KAAK,CAAC;AAC1G,MAAI,eAAe,SAAS,GAAG;AAC7B,UAAM,UAAoB,CAAC;AAC3B,eAAW,SAAS,gBAAgB;AAClC,YAAM,WAAW,MAAM,MAAM,sBAAsB;AACnD,UAAI,SAAU,SAAQ,KAAK,SAAS,CAAC,EAAE,KAAK,CAAC;AAAA,IAC/C;AAGA,UAAM,UAAU,QAAQ,MAAM,GAAG,EAAE,EAAE,IAAI,OAAK,gBAAgB,GAAG,SAAS,CAAC;AAC3E,UAAM,UAAU,MAAM,QAAQ,IAAI,OAAO;AACzC,eAAW,QAAQ,SAAS;AAC1B,UAAI,MAAM;AACR,2BAAmB,MAAM,aAAa,IAAI;AAAA,MAC5C;AAAA,IACF;AAAA,EACF;AAGA,qBAAmB,aAAa,aAAa,IAAI;AAEjD,SAAO,MAAM,KAAK,IAAI;AACxB;AAEA,SAAS,mBAAmB,KAAa,aAAqB,MAAyB;AACrF,QAAM,aAAa,IAAI,MAAM,mDAAmD,KAAK,CAAC;AACtF,aAAW,SAAS,YAAY;AAC9B,UAAM,WAAW,MAAM,MAAM,sBAAsB;AACnD,QAAI,CAAC,SAAU;AACf,UAAM,MAAM,SAAS,CAAC,EAAE,KAAK;AAE7B,QAAI;AACF,YAAM,SAAS,IAAI,IAAI,GAAG;AAC1B,YAAM,YAAY,OAAO,SAAS,QAAQ,UAAU,EAAE,EAAE,YAAY;AACpE,UAAI,cAAc,YAAa;AAC/B,UAAI,oBAAoB,KAAK,OAAO,QAAQ,EAAG;AAC/C,WAAK,IAAI,GAAG;AAAA,IACd,QAAQ;AACN;AAAA,IACF;AAAA,EACF;AACF;AAQO,SAAS,qBAAqB,MAAc,QAA0B;AAC3E,QAAM,cAAc,OAAO,QAAQ,UAAU,EAAE,EAAE,YAAY;AAC7D,QAAM,cAAc,KAAK,MAAM,kBAAkB,KAAK,CAAC;AACvD,QAAM,OAAO,oBAAI,IAAY;AAE7B,aAAW,SAAS,aAAa;AAC/B,UAAM,OAAO,MAAM,MAAM,iBAAiB,IAAI,CAAC;AAC/C,QAAI,CAAC,QAAQ,CAAC,KAAK,KAAK,EAAG;AAE3B,QAAI;AAEJ,QAAI,KAAK,WAAW,IAAI,GAAG;AACzB,gBAAU,SAAS,IAAI;AAAA,IACzB,WAAW,KAAK,WAAW,GAAG,GAAG;AAE/B,UAAI,SAAS,OAAO,KAAK,WAAW,IAAI,EAAG;AAC3C,gBAAU,WAAW,MAAM,GAAG,IAAI;AAAA,IACpC,WAAW,KAAK,WAAW,MAAM,GAAG;AAClC,gBAAU;AAAA,IACZ,WAAW,KAAK,WAAW,GAAG,KAAK,KAAK,WAAW,GAAG,KAAK,KAAK,WAAW,SAAS,KAAK,KAAK,WAAW,MAAM,KAAK,KAAK,WAAW,aAAa,GAAG;AAClJ;AAAA,IACF,OAAO;AAEL,gBAAU,WAAW,MAAM,IAAI,IAAI;AAAA,IACrC;AAEA,QAAI;AACF,YAAM,SAAS,IAAI,IAAI,OAAO;AAC9B,YAAM,aAAa,OAAO,SAAS,QAAQ,UAAU,EAAE,EAAE,YAAY;AACrE,UAAI,eAAe,YAAa;AAGhC,aAAO,OAAO;AACd,YAAM,OAAO,OAAO;AAEpB,UAAI,SAAS,OAAO,SAAS,GAAI;AACjC,UAAI,oBAAoB,KAAK,IAAI,EAAG;AACpC,UAAI,mBAAmB,KAAK,IAAI,EAAG;AAGnC,YAAM,aAAa,OAAO,SAAS,KAAK,QAAQ,QAAQ,EAAE,IAAI,OAAO;AACrE,WAAK,IAAI,UAAU;AAAA,IACrB,QAAQ;AACN;AAAA,IACF;AAAA,EACF;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;AAIA,IAAM,oBAAmD;AAAA,EACvD,CAAC,+DAA+D,MAAM;AAAA,EACtE,CAAC,4CAA4C,OAAO;AAAA,EACpD,CAAC,iCAAiC,SAAS;AAAA,EAC3C,CAAC,yEAAyE,UAAU;AAAA,EACpF,CAAC,0CAA0C,SAAS;AAAA,EACpD,CAAC,yDAAyD,MAAM;AAAA,EAChE,CAAC,wDAAwD,WAAW;AAAA,EACpE,CAAC,sEAAsE,MAAM;AAAA,EAC7E,CAAC,sEAAsE,OAAO;AAAA,EAC9E,CAAC,yCAAyC,KAAK;AACjD;AAKO,SAAS,cAAc,KAA2B;AACvD,MAAI;AACF,UAAM,OAAO,IAAI,IAAI,GAAG,EAAE;AAC1B,eAAW,CAAC,SAAS,QAAQ,KAAK,mBAAmB;AACnD,UAAI,QAAQ,KAAK,IAAI,EAAG,QAAO;AAAA,IACjC;AAAA,EACF,QAAQ;AAAA,EAER;AACA,SAAO;AACT;AASA,eAAsB,cACpB,UACA,SACsB;AACtB,QAAM,YAAY,KAAK,IAAI;AAC3B,QAAM,WAAW,SAAS,YAAY;AACtC,QAAM,YAAY,SAAS,aAAa;AACxC,QAAM,cAAc,SAAS,eAAe;AAC5C,QAAM,gBAAgB,SAAS,iBAAiB;AAEhD,QAAM,QAAuB,CAAC;AAC9B,QAAM,iBAAiB,oBAAI,IAAY;AACvC,QAAM,cAAc,oBAAI,IAAY;AACpC,QAAM,cAAc,oBAAI,IAAY;AACpC,QAAM,UAAU,oBAAI,IAAY;AAGhC,MAAI,cAA2B,EAAE,UAAU,CAAC,GAAG,OAAO,CAAC,EAAE;AACzD,MAAI,iBAAiB,SAAS,WAAW,MAAM;AAC7C,kBAAc,eAAe,SAAS,UAAU,IAAI;AAAA,EACtD;AAEA,QAAM,UAAU,GAAG,SAAS,QAAQ,MAAM,SAAS,MAAM;AAGzD,UAAQ,IAAI,aAAa,OAAO,CAAC;AACjC,UAAQ,IAAI,aAAa,UAAU,GAAG,CAAC;AACvC,MAAI,SAAS,YAAY;AACvB,eAAW,QAAQ,SAAS,YAAY;AACtC,UAAI,KAAK,SAAU,SAAQ,IAAI,aAAa,KAAK,QAAQ,CAAC;AAAA,IAC5D;AAAA,EACF;AAGA,QAAM,QAAkB,CAAC;AACzB,MAAI,SAAS,YAAY,MAAM;AAC7B,UAAM,cAAc,MAAM;AAAA,MACxB,SAAS,WAAW;AAAA,MACpB,SAAS;AAAA,MACT;AAAA,IACF;AACA,eAAW,OAAO,aAAa;AAC7B,YAAM,OAAO,aAAa,GAAG;AAC7B,UAAI,CAAC,QAAQ,IAAI,IAAI,GAAG;AACtB,uBAAe,IAAI,GAAG;AACtB,YAAI,CAAC,MAAM,SAAS,GAAG,EAAG,OAAM,KAAK,GAAG;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AAGA,MAAI,SAAS,UAAU,MAAM;AAC3B,UAAM,YAAY,qBAAqB,SAAS,SAAS,MAAM,SAAS,MAAM;AAC9E,eAAW,OAAO,WAAW;AAC3B,YAAM,OAAO,aAAa,GAAG;AAC7B,UAAI,CAAC,QAAQ,IAAI,IAAI,KAAK,CAAC,eAAe,IAAI,GAAG,GAAG;AAClD,uBAAe,IAAI,GAAG;AACtB,YAAI,CAAC,MAAM,SAAS,GAAG,EAAG,OAAM,KAAK,GAAG;AAAA,MAC1C;AAAA,IACF;AAAA,EACF;AAGA,SAAO,MAAM,SAAS,KAAK,YAAY,OAAO,UAAU;AAEtD,UAAM,YAAY,KAAK,IAAI,aAAa,WAAW,YAAY,MAAM,MAAM,MAAM;AACjF,UAAM,QAAkB,CAAC;AAEzB,WAAO,MAAM,SAAS,aAAa,MAAM,SAAS,GAAG;AACnD,YAAM,MAAM,MAAM,MAAM;AACxB,YAAM,OAAO,aAAa,GAAG;AAE7B,UAAI,QAAQ,IAAI,IAAI,EAAG;AACvB,cAAQ,IAAI,IAAI;AAGhB,UAAI,eAAe;AACjB,YAAI;AACF,gBAAM,OAAO,IAAI,IAAI,GAAG,EAAE;AAC1B,cAAI,qBAAqB,MAAM,WAAW,GAAG;AAC3C,wBAAY,IAAI,GAAG;AACnB;AAAA,UACF;AAAA,QACF,QAAQ;AAEN;AAAA,QACF;AAAA,MACF;AAEA,YAAM,KAAK,GAAG;AAAA,IAChB;AAEA,QAAI,MAAM,WAAW,EAAG;AAGxB,UAAM,UAAU,MAAM,QAAQ,IAAI,MAAM,IAAI,SAAO,UAAU,KAAK,SAAS,CAAC,CAAC;AAE7E,aAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,YAAM,SAAS,QAAQ,CAAC;AACxB,YAAM,MAAM,MAAM,CAAC;AACnB,kBAAY,IAAI,GAAG;AAEnB,UAAI,CAAC,OAAQ;AAEb,aAAO,WAAW,cAAc,GAAG;AACnC,YAAM,KAAK,MAAM;AAGjB,YAAM,WAAW,qBAAqB,OAAO,MAAM,SAAS,MAAM;AAClE,iBAAW,QAAQ,UAAU;AAC3B,cAAM,OAAO,aAAa,IAAI;AAC9B,YAAI,CAAC,QAAQ,IAAI,IAAI,KAAK,CAAC,eAAe,IAAI,IAAI,GAAG;AACnD,yBAAe,IAAI,IAAI;AACvB,gBAAM,KAAK,IAAI;AAAA,QACjB;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAGA,aAAW,OAAO,OAAO;AACvB,QAAI,CAAC,YAAY,IAAI,GAAG,GAAG;AACzB,kBAAY,IAAI,GAAG;AAAA,IACrB;AAAA,EACF;AAEA,SAAO;AAAA,IACL;AAAA,IACA,gBAAgB,MAAM,KAAK,cAAc;AAAA,IACzC,aAAa,MAAM,KAAK,WAAW;AAAA,IACnC,aAAa,MAAM,KAAK,WAAW;AAAA,IACnC,SAAS,KAAK,OAAO,KAAK,IAAI,IAAI,aAAa,GAAG,IAAI;AAAA,EACxD;AACF;AAEA,SAAS,aAAa,KAAqB;AACzC,MAAI;AACF,UAAM,SAAS,IAAI,IAAI,GAAG;AAE1B,YAAQ,OAAO,SAAS,OAAO,SAAS,QAAQ,QAAQ,EAAE,IAAI,OAAO,QAAQ,YAAY;AAAA,EAC3F,QAAQ;AACN,WAAO,IAAI,YAAY;AAAA,EACzB;AACF;","names":[]}
package/dist/cli.js CHANGED
@@ -193,7 +193,7 @@ async function prefetchSiteData(domain) {
193
193
  sitemapForBlog = subSitemap.text;
194
194
  }
195
195
  }
196
- const blogUrls = extractBlogUrlsFromSitemap(sitemapForBlog, domain, 10);
196
+ const blogUrls = extractBlogUrlsFromSitemap(sitemapForBlog, domain, 50);
197
197
  if (blogUrls.length > 0) {
198
198
  const fetched = await Promise.all(blogUrls.map((url) => fetchText(url)));
199
199
  blogSample = fetched.filter(
@@ -550,15 +550,17 @@ function checkOriginalData(data) {
550
550
  findings.push({ severity: "critical", detail: "Could not fetch homepage" });
551
551
  return { criterion: "original_data", criterion_label: "Original Data & Expert Content", score: 0, status: "not_found", findings, fix_priority: "P2" };
552
552
  }
553
+ const allPages = [data.homepage, ...data.blogSample || []].filter(Boolean);
553
554
  const html = data.homepage.text;
554
- const text = html.replace(/<[^>]*>/g, " ").replace(/\s+/g, " ");
555
+ const allText = allPages.map((p) => p.text.replace(/<[^>]*>/g, " ").replace(/\s+/g, " ")).join(" ");
556
+ const text = data.homepage.text.replace(/<[^>]*>/g, " ").replace(/\s+/g, " ");
555
557
  let score = 0;
556
558
  const statPatterns = /\d+%|\d+\s*(patients|clients|customers|cases|years|professionals|specialists|companies|users|businesses|domains|audits)/i;
557
- if (statPatterns.test(text)) {
559
+ if (statPatterns.test(allText)) {
558
560
  const researchContext = /\b(our\s+(?:study|analysis|research|data|survey|findings|report)|we\s+(?:surveyed|analyzed|studied|measured|tracked)|proprietary|methodology|original\s+research)\b/i;
559
- if (researchContext.test(text)) {
561
+ if (researchContext.test(allText)) {
560
562
  score += 3;
561
- findings.push({ severity: "info", detail: "Proprietary statistics with research context found on homepage" });
563
+ findings.push({ severity: "info", detail: "Proprietary statistics with research context found" });
562
564
  } else {
563
565
  score += 1;
564
566
  findings.push({ severity: "low", detail: 'Statistics found but without research context (e.g., "500+ clients")', fix: 'Add context about your methodology: "Our analysis of X found..." or "We surveyed Y..."' });
@@ -1073,20 +1075,24 @@ function checkFactDensity(data) {
1073
1075
  findings.push({ severity: "critical", detail: "Could not fetch homepage" });
1074
1076
  return { criterion: "fact_density", criterion_label: "Fact & Data Density", score: 0, status: "not_found", findings, fix_priority: "P2" };
1075
1077
  }
1076
- const text = data.homepage.text.replace(/<[^>]*>/g, " ").replace(/\s+/g, " ");
1078
+ const allPages = [data.homepage, ...data.blogSample || []].filter(Boolean);
1079
+ const allText = allPages.map((p) => p.text.replace(/<[^>]*>/g, " ").replace(/\s+/g, " ")).join(" ");
1080
+ const text = allText;
1081
+ const pageCount = allPages.length;
1077
1082
  let score = 0;
1078
1083
  const dataPoints = text.match(/\d+(?:\.\d+)?(?:\s*%|\s*\$|\s*USD|\s*EUR)/g) || [];
1079
1084
  const countPhrases = text.match(/\d+(?:,\d{3})*\+?\s+(?:users?|clients?|customers?|companies|businesses|patients?|members?|employees?|projects?|downloads?)/gi) || [];
1080
1085
  const totalDataPoints = dataPoints.length + countPhrases.length;
1081
- if (totalDataPoints >= 6) {
1086
+ const avgPerPage = pageCount > 0 ? totalDataPoints / pageCount : 0;
1087
+ if (avgPerPage >= 4) {
1082
1088
  score += 5;
1083
- findings.push({ severity: "info", detail: `${totalDataPoints} quantitative data points found on homepage` });
1084
- } else if (totalDataPoints >= 3) {
1089
+ findings.push({ severity: "info", detail: `${totalDataPoints} quantitative data points found across ${pageCount} pages (avg ${avgPerPage.toFixed(1)}/page)` });
1090
+ } else if (avgPerPage >= 2) {
1085
1091
  score += 3;
1086
- findings.push({ severity: "info", detail: `${totalDataPoints} quantitative data points found` });
1092
+ findings.push({ severity: "info", detail: `${totalDataPoints} quantitative data points found across ${pageCount} pages` });
1087
1093
  } else if (totalDataPoints >= 1) {
1088
1094
  score += 1;
1089
- findings.push({ severity: "low", detail: `Only ${totalDataPoints} quantitative data point(s) found`, fix: "Add more specific numbers, percentages, and metrics to strengthen credibility" });
1095
+ findings.push({ severity: "low", detail: `Only ${totalDataPoints} quantitative data point(s) found across ${pageCount} pages`, fix: "Add more specific numbers, percentages, and metrics to strengthen credibility" });
1090
1096
  } else {
1091
1097
  findings.push({ severity: "high", detail: "No quantitative data points found", fix: "Add specific statistics (percentages, counts, comparisons) that AI engines can cite" });
1092
1098
  }
@@ -1192,9 +1198,9 @@ function countRecentSitemapDates(sitemapText) {
1192
1198
  distinctRecentDays: recentDays.size
1193
1199
  };
1194
1200
  }
1195
- var BLOG_PATH_PATTERNS = /\/(?:blog|articles?|insights?|guides?|resources?|news|posts?|learn|help|how-?to|tutorials?|case-stud|whitepapers?)\b/i;
1201
+ var BLOG_PATH_PATTERNS = /\/(?:[^/]*-?)?(?:blog|articles?|insights?|guides?|resources?|news|posts?|learn|help|how-?to|tutorials?|case-stud|whitepapers?)\b/i;
1196
1202
  var EXCLUDE_PATH_PATTERNS = /\/(?:tag|category|author|page|feed|wp-content|wp-admin|wp-json|cart|checkout|login|search|api|static|assets|_next)\b/i;
1197
- function extractBlogUrlsFromSitemap(sitemapText, domain, limit = 5) {
1203
+ function extractBlogUrlsFromSitemap(sitemapText, domain, limit = 50) {
1198
1204
  const urlBlocks = sitemapText.match(/<url>([\s\S]*?)<\/url>/gi) || [];
1199
1205
  const candidates = [];
1200
1206
  const cleanDomain = domain.replace(/^www\./, "").toLowerCase();
@@ -1490,7 +1496,7 @@ function jaccardSimilarity(a, b) {
1490
1496
  const union = a.size + b.size - intersection;
1491
1497
  return union === 0 ? 0 : intersection / union;
1492
1498
  }
1493
- function checkContentCannibalization(data) {
1499
+ function checkContentCannibalization(data, topicCoherenceScore) {
1494
1500
  const findings = [];
1495
1501
  if (!data.homepage) {
1496
1502
  findings.push({ severity: "critical", detail: "No homepage available for cannibalization analysis" });
@@ -1500,7 +1506,7 @@ function checkContentCannibalization(data) {
1500
1506
  { html: data.homepage.text, url: data.homepage.finalUrl || `https://${data.domain}/` }
1501
1507
  ];
1502
1508
  if (data.blogSample) {
1503
- for (const page of data.blogSample.slice(0, 5)) {
1509
+ for (const page of data.blogSample) {
1504
1510
  pages.push({ html: page.text, url: page.finalUrl || "" });
1505
1511
  }
1506
1512
  }
@@ -1510,10 +1516,29 @@ function checkContentCannibalization(data) {
1510
1516
  }
1511
1517
  const pageTitles = pages.map((p) => ({ title: extractPageTitle(p.html), url: p.url }));
1512
1518
  const wordSets = pageTitles.map((p) => titleToWordSet(p.title));
1519
+ const termPageCount = /* @__PURE__ */ new Map();
1520
+ for (const ws of wordSets) {
1521
+ for (const w of ws) {
1522
+ termPageCount.set(w, (termPageCount.get(w) || 0) + 1);
1523
+ }
1524
+ }
1525
+ const commonTermThreshold = Math.max(3, pages.length * 0.4);
1526
+ const siteThemeTerms = /* @__PURE__ */ new Set();
1527
+ for (const [term, count] of termPageCount) {
1528
+ if (count >= commonTermThreshold) siteThemeTerms.add(term);
1529
+ }
1530
+ const filteredSets = wordSets.map((ws) => {
1531
+ const filtered = /* @__PURE__ */ new Set();
1532
+ for (const w of ws) {
1533
+ if (!siteThemeTerms.has(w)) filtered.add(w);
1534
+ }
1535
+ return filtered;
1536
+ });
1513
1537
  const cannibalPairs = [];
1514
1538
  for (let i = 0; i < pages.length; i++) {
1515
1539
  for (let j = i + 1; j < pages.length; j++) {
1516
- const sim = jaccardSimilarity(wordSets[i], wordSets[j]);
1540
+ if (filteredSets[i].size === 0 && filteredSets[j].size === 0) continue;
1541
+ const sim = jaccardSimilarity(filteredSets[i], filteredSets[j]);
1517
1542
  if (sim > 0.6) {
1518
1543
  cannibalPairs.push({
1519
1544
  urlA: pageTitles[i].url.slice(0, 60),
@@ -1523,23 +1548,39 @@ function checkContentCannibalization(data) {
1523
1548
  }
1524
1549
  }
1525
1550
  }
1551
+ const cannibalUrls = /* @__PURE__ */ new Set();
1552
+ for (const pair of cannibalPairs) {
1553
+ cannibalUrls.add(pair.urlA);
1554
+ cannibalUrls.add(pair.urlB);
1555
+ }
1556
+ const cannibalRatio = pages.length > 0 ? cannibalUrls.size / pages.length : 0;
1526
1557
  let score;
1527
1558
  if (cannibalPairs.length === 0) {
1528
1559
  score = 10;
1529
1560
  findings.push({ severity: "info", detail: `${pages.length} pages analyzed - no content cannibalization detected` });
1530
- } else if (cannibalPairs.length === 1) {
1531
- score = 8;
1532
- findings.push({ severity: "low", detail: `1 pair of pages with overlapping topics (${cannibalPairs[0].similarity}% similarity)`, fix: "Differentiate titles and H1 headings to reduce topic overlap" });
1533
- } else if (cannibalPairs.length === 2) {
1561
+ } else if (cannibalRatio <= 0.05) {
1562
+ score = 9;
1563
+ findings.push({ severity: "info", detail: `${cannibalPairs.length} pair(s) of pages with minor topic overlap (${cannibalUrls.size}/${pages.length} pages affected)` });
1564
+ } else if (cannibalRatio <= 0.1) {
1565
+ score = 7;
1566
+ findings.push({ severity: "low", detail: `${cannibalUrls.size} pages (${Math.round(cannibalRatio * 100)}%) have overlapping topics`, fix: "Differentiate titles and H1 headings to reduce topic overlap" });
1567
+ } else if (cannibalRatio <= 0.2) {
1534
1568
  score = 5;
1535
- findings.push({ severity: "medium", detail: `${cannibalPairs.length} pairs of pages with overlapping topics`, fix: "Consolidate overlapping pages or differentiate their titles and content focus" });
1569
+ findings.push({ severity: "medium", detail: `${cannibalUrls.size} pages (${Math.round(cannibalRatio * 100)}%) competing for overlapping topics`, fix: "Consolidate overlapping pages or differentiate their titles and content focus" });
1570
+ } else if (cannibalRatio <= 0.4) {
1571
+ score = 3;
1572
+ findings.push({ severity: "medium", detail: `${cannibalUrls.size} pages (${Math.round(cannibalRatio * 100)}%) have significant content overlap`, fix: "Many pages compete for the same topics - consolidate or clearly differentiate them" });
1536
1573
  } else {
1537
1574
  score = 0;
1538
- findings.push({ severity: "high", detail: `${cannibalPairs.length} pairs of pages competing for the same topics`, fix: "Significant content overlap detected - consolidate or clearly differentiate competing pages" });
1575
+ findings.push({ severity: "high", detail: `${cannibalUrls.size} pages (${Math.round(cannibalRatio * 100)}%) competing for the same topics`, fix: "Severe content cannibalization - consolidate overlapping pages or create clear topic differentiation" });
1539
1576
  }
1540
1577
  for (const pair of cannibalPairs.slice(0, 3)) {
1541
1578
  findings.push({ severity: "low", detail: `Overlap (${pair.similarity}%): ${pair.urlA} vs ${pair.urlB}` });
1542
1579
  }
1580
+ if (topicCoherenceScore !== void 0 && topicCoherenceScore <= 4 && score >= 8) {
1581
+ score = 6;
1582
+ findings.push({ severity: "low", detail: "Low topic overlap but content lacks coherent focus - not a strong signal for AI authority", fix: "Focus content on fewer core topics to build topical authority that AI engines can identify" });
1583
+ }
1543
1584
  return { criterion: "content_cannibalization", criterion_label: "Content Cannibalization", score, status: score >= 7 ? "pass" : score >= 4 ? "partial" : "fail", findings, fix_priority: score >= 7 ? "P3" : "P1" };
1544
1585
  }
1545
1586
  function checkVisibleDateSignal(data) {
@@ -1765,7 +1806,233 @@ function extractRawDataSummary(data) {
1765
1806
  crawl_skipped: data.crawlStats?.skipped ?? 0
1766
1807
  };
1767
1808
  }
1809
+ function getPageTopicText(html) {
1810
+ const titleMatch = html.match(/<title[^>]*>([^<]+)<\/title>/i);
1811
+ const h1Match = html.match(/<h1[^>]*>([\s\S]*?)<\/h1>/i);
1812
+ return [
1813
+ titleMatch?.[1] || "",
1814
+ h1Match?.[1]?.replace(/<[^>]*>/g, "") || ""
1815
+ ].join(" ").toLowerCase().trim();
1816
+ }
1817
+ function extractBigrams(text) {
1818
+ const words = text.split(/[\s,.!?;:()\[\]{}"'\/&]+/).filter((w) => w.length > 2 && !STOP_WORDS.has(w) && !/^\d+$/.test(w));
1819
+ const bigrams = [];
1820
+ for (let i = 0; i < words.length - 1; i++) {
1821
+ bigrams.push(words[i] + " " + words[i + 1]);
1822
+ }
1823
+ return bigrams;
1824
+ }
1825
+ function checkTopicCoherence(data) {
1826
+ const findings = [];
1827
+ if (!data.homepage) {
1828
+ findings.push({ severity: "critical", detail: "Could not fetch homepage" });
1829
+ return { criterion: "topic_coherence", criterion_label: "Topic Coherence", score: 0, status: "not_found", findings, fix_priority: "P0" };
1830
+ }
1831
+ if (!data.blogSample || data.blogSample.length < 3) {
1832
+ findings.push({ severity: "info", detail: `Only ${data.blogSample?.length || 0} blog pages found - insufficient for topic coherence analysis` });
1833
+ return { criterion: "topic_coherence", criterion_label: "Topic Coherence", score: 5, status: "partial", findings, fix_priority: "P2" };
1834
+ }
1835
+ const blogPages = data.blogSample;
1836
+ const domainBase = data.domain.replace(/^www\./, "").replace(/\.(com|org|net|io|co|ai)$/i, "").toLowerCase();
1837
+ const brandWords = /* @__PURE__ */ new Set();
1838
+ brandWords.add(domainBase);
1839
+ for (const part of domainBase.split(/[-_]/)) {
1840
+ if (part.length > 2) brandWords.add(part);
1841
+ }
1842
+ const rawTermFreq = /* @__PURE__ */ new Map();
1843
+ const pageTitleTexts = [];
1844
+ for (const page of blogPages) {
1845
+ const topicText = getPageTopicText(page.text);
1846
+ pageTitleTexts.push(topicText);
1847
+ const words = topicText.split(/[\s,.!?;:()\[\]{}"'\/&]+/).filter((w) => w.length > 2 && !STOP_WORDS.has(w) && !/^\d+$/.test(w));
1848
+ const uniqueWords = new Set(words);
1849
+ for (const w of uniqueWords) {
1850
+ rawTermFreq.set(w, (rawTermFreq.get(w) || 0) + 1);
1851
+ }
1852
+ }
1853
+ for (const [term, count] of rawTermFreq) {
1854
+ if (count / blogPages.length >= 0.8 && domainBase.includes(term)) {
1855
+ brandWords.add(term);
1856
+ }
1857
+ }
1858
+ const termFreq = /* @__PURE__ */ new Map();
1859
+ for (const page of blogPages) {
1860
+ const topicText = getPageTopicText(page.text);
1861
+ const words = topicText.split(/[\s,.!?;:()\[\]{}"'\/&]+/).filter((w) => w.length > 2 && !STOP_WORDS.has(w) && !/^\d+$/.test(w) && !brandWords.has(w));
1862
+ const uniqueWords = new Set(words);
1863
+ for (const w of uniqueWords) {
1864
+ termFreq.set(w, (termFreq.get(w) || 0) + 1);
1865
+ }
1866
+ }
1867
+ const sortedTerms = [...termFreq.entries()].sort((a, b) => b[1] - a[1]);
1868
+ const topTerm = sortedTerms[0];
1869
+ const bigramFreq = /* @__PURE__ */ new Map();
1870
+ const pageBigrams = [];
1871
+ for (const topicText of pageTitleTexts) {
1872
+ const bigrams = extractBigrams(topicText).filter((bg) => !bg.split(" ").some((w) => brandWords.has(w)));
1873
+ pageBigrams.push(bigrams);
1874
+ const uniqueBigrams = new Set(bigrams);
1875
+ for (const bg of uniqueBigrams) {
1876
+ bigramFreq.set(bg, (bigramFreq.get(bg) || 0) + 1);
1877
+ }
1878
+ }
1879
+ const sortedBigrams = [...bigramFreq.entries()].sort((a, b) => b[1] - a[1]);
1880
+ const topBigram = sortedBigrams[0];
1881
+ const significantBigrams = sortedBigrams.filter(([, count]) => count >= 2);
1882
+ const clusterRoots = [];
1883
+ const assigned = /* @__PURE__ */ new Set();
1884
+ for (const [bg] of significantBigrams) {
1885
+ if (assigned.has(bg)) continue;
1886
+ clusterRoots.push(bg);
1887
+ assigned.add(bg);
1888
+ const [w1, w2] = bg.split(" ");
1889
+ for (const [otherBg] of significantBigrams) {
1890
+ if (assigned.has(otherBg)) continue;
1891
+ if (otherBg.includes(w1) || otherBg.includes(w2)) {
1892
+ assigned.add(otherBg);
1893
+ }
1894
+ }
1895
+ }
1896
+ const topicClusterCount = clusterRoots.length;
1897
+ const dominantTerm = topTerm?.[0] || "";
1898
+ const dominantTermCount = topTerm?.[1] || 0;
1899
+ const focusRatio = blogPages.length > 0 ? dominantTermCount / blogPages.length : 0;
1900
+ const dominantBigram = topBigram?.[0] || "";
1901
+ const dominantBigramCount = topBigram?.[1] || 0;
1902
+ const bigramFocusRatio = blogPages.length > 0 ? dominantBigramCount / blogPages.length : 0;
1903
+ let score = 0;
1904
+ const bestFocusRatio = Math.max(focusRatio, bigramFocusRatio);
1905
+ if (bestFocusRatio >= 0.8) {
1906
+ score += 7;
1907
+ } else if (bestFocusRatio >= 0.6) {
1908
+ score += 6;
1909
+ } else if (bestFocusRatio >= 0.45) {
1910
+ score += 5;
1911
+ } else if (bestFocusRatio >= 0.3) {
1912
+ score += 3;
1913
+ } else if (bestFocusRatio >= 0.15) {
1914
+ score += 2;
1915
+ } else {
1916
+ score += 1;
1917
+ }
1918
+ const clusterPenaltyReduced = focusRatio >= 0.7;
1919
+ if (topicClusterCount <= 3) {
1920
+ score += 3;
1921
+ findings.push({ severity: "info", detail: `${topicClusterCount} topic cluster(s) - tightly focused content` });
1922
+ } else if (topicClusterCount <= 6) {
1923
+ score += clusterPenaltyReduced ? 2 : 1;
1924
+ findings.push({ severity: "info", detail: `${topicClusterCount} topic clusters${clusterPenaltyReduced ? " within a focused niche" : " - moderately focused"}` });
1925
+ } else if (topicClusterCount <= 10) {
1926
+ score += clusterPenaltyReduced ? 1 : 0;
1927
+ if (!clusterPenaltyReduced) {
1928
+ findings.push({ severity: "low", detail: `${topicClusterCount} topic clusters - scattered content`, fix: "Reduce the number of distinct topics. Focus blog content on 2-3 core expertise areas." });
1929
+ } else {
1930
+ findings.push({ severity: "info", detail: `${topicClusterCount} topic clusters but strong core topic focus (${Math.round(focusRatio * 100)}%)` });
1931
+ }
1932
+ } else {
1933
+ score += clusterPenaltyReduced ? 0 : -2;
1934
+ if (!clusterPenaltyReduced) {
1935
+ findings.push({ severity: "medium", detail: `${topicClusterCount} topic clusters - highly scattered content`, fix: "Content covers too many unrelated topics. AI engines cannot identify your expertise. Focus on your core niche." });
1936
+ } else {
1937
+ findings.push({ severity: "low", detail: `${topicClusterCount} topic clusters despite strong core topic focus`, fix: "Consider narrowing subtopics within your niche for even stronger AI visibility." });
1938
+ }
1939
+ }
1940
+ score = Math.max(0, Math.min(10, score));
1941
+ if (dominantTerm) {
1942
+ const focusPct = Math.round(focusRatio * 100);
1943
+ findings.push({ severity: "info", detail: `Dominant topic term: "${dominantTerm}" (${focusPct}% of ${blogPages.length} pages)` });
1944
+ }
1945
+ if (dominantBigram && dominantBigramCount >= 2) {
1946
+ findings.push({ severity: "info", detail: `Dominant topic phrase: "${dominantBigram}" (${dominantBigramCount}/${blogPages.length} pages)` });
1947
+ }
1948
+ const offTopicExamples = [];
1949
+ for (let i = 0; i < pageTitleTexts.length && offTopicExamples.length < 3; i++) {
1950
+ if (dominantTerm && !pageTitleTexts[i].includes(dominantTerm)) {
1951
+ const title = blogPages[i].text.match(/<title[^>]*>([^<]+)<\/title>/i)?.[1]?.trim();
1952
+ if (title && title.length > 3) offTopicExamples.push(title.slice(0, 60));
1953
+ }
1954
+ }
1955
+ if (offTopicExamples.length > 0 && score < 8) {
1956
+ findings.push({ severity: "low", detail: `Off-topic examples: ${offTopicExamples.join("; ")}` });
1957
+ }
1958
+ return { criterion: "topic_coherence", criterion_label: "Topic Coherence", score, status: score >= 7 ? "pass" : score >= 4 ? "partial" : "fail", findings, fix_priority: score >= 7 ? "P3" : "P0" };
1959
+ }
1960
+ function countWords(html) {
1961
+ const text = html.replace(/<script[^>]*>[\s\S]*?<\/script>/gi, "").replace(/<style[^>]*>[\s\S]*?<\/style>/gi, "").replace(/<[^>]*>/g, " ").replace(/\s+/g, " ").trim();
1962
+ return text.split(/\s+/).filter((w) => w.length > 0).length;
1963
+ }
1964
+ function countHeadings(html) {
1965
+ const headings = html.match(/<h[2-6][^>]*>/gi) || [];
1966
+ return headings.length;
1967
+ }
1968
+ function checkContentDepth(data, topicCoherenceScore) {
1969
+ const findings = [];
1970
+ if (!data.blogSample || data.blogSample.length < 2) {
1971
+ findings.push({ severity: "info", detail: `Only ${data.blogSample?.length || 0} blog pages found - insufficient for depth analysis` });
1972
+ return { criterion: "content_depth", criterion_label: "Content Depth", score: 3, status: "partial", findings, fix_priority: "P2" };
1973
+ }
1974
+ const blogPages = data.blogSample;
1975
+ const wordCounts = blogPages.map((p) => countWords(p.text));
1976
+ const headingCounts = blogPages.map((p) => countHeadings(p.text));
1977
+ const avgWords = wordCounts.reduce((a, b) => a + b, 0) / wordCounts.length;
1978
+ const avgHeadings = headingCounts.reduce((a, b) => a + b, 0) / headingCounts.length;
1979
+ const deepPages = wordCounts.filter((w) => w >= 1e3).length;
1980
+ const thinPages = wordCounts.filter((w) => w < 300).length;
1981
+ const deepRatio = deepPages / blogPages.length;
1982
+ const thinRatio = thinPages / blogPages.length;
1983
+ let score = 0;
1984
+ if (avgWords >= 2e3) {
1985
+ score += 5;
1986
+ findings.push({ severity: "info", detail: `Average ${Math.round(avgWords)} words per page across ${blogPages.length} pages - excellent depth` });
1987
+ } else if (avgWords >= 1200) {
1988
+ score += 4;
1989
+ findings.push({ severity: "info", detail: `Average ${Math.round(avgWords)} words per page across ${blogPages.length} pages - good depth` });
1990
+ } else if (avgWords >= 800) {
1991
+ score += 3;
1992
+ findings.push({ severity: "info", detail: `Average ${Math.round(avgWords)} words per page - moderate depth` });
1993
+ } else if (avgWords >= 400) {
1994
+ score += 2;
1995
+ findings.push({ severity: "low", detail: `Average ${Math.round(avgWords)} words per page - shallow content`, fix: "Expand articles with more detail, examples, and expert analysis to build AI citation authority" });
1996
+ } else {
1997
+ score += 1;
1998
+ findings.push({ severity: "medium", detail: `Average ${Math.round(avgWords)} words per page - very thin content`, fix: "Content is too thin for AI engines to cite. Aim for 1000+ words per article with structured sections." });
1999
+ }
2000
+ if (avgHeadings >= 8) {
2001
+ score += 3;
2002
+ findings.push({ severity: "info", detail: `Average ${avgHeadings.toFixed(1)} subheadings per page - well-structured` });
2003
+ } else if (avgHeadings >= 5) {
2004
+ score += 2;
2005
+ findings.push({ severity: "info", detail: `Average ${avgHeadings.toFixed(1)} subheadings per page - decent structure` });
2006
+ } else if (avgHeadings >= 2) {
2007
+ score += 1;
2008
+ findings.push({ severity: "low", detail: `Average ${avgHeadings.toFixed(1)} subheadings per page`, fix: "Add more H2/H3 headings to break content into extractable sections" });
2009
+ } else {
2010
+ findings.push({ severity: "medium", detail: `Average ${avgHeadings.toFixed(1)} subheadings per page - minimal structure`, fix: "Add question-format H2/H3 headings so AI engines can extract specific answers" });
2011
+ }
2012
+ if (deepRatio >= 0.5) {
2013
+ score += 2;
2014
+ findings.push({ severity: "info", detail: `${deepPages}/${blogPages.length} pages (${Math.round(deepRatio * 100)}%) have 1000+ words` });
2015
+ } else if (deepRatio >= 0.25) {
2016
+ score += 1;
2017
+ findings.push({ severity: "info", detail: `${deepPages}/${blogPages.length} pages have 1000+ words` });
2018
+ }
2019
+ if (thinRatio >= 0.5) {
2020
+ score = Math.max(0, score - 2);
2021
+ findings.push({ severity: "medium", detail: `${thinPages}/${blogPages.length} pages (${Math.round(thinRatio * 100)}%) have under 300 words - high thin content ratio`, fix: "Remove or expand thin pages. Thin content dilutes site quality for AI engines." });
2022
+ } else if (thinRatio >= 0.25) {
2023
+ score = Math.max(0, score - 1);
2024
+ findings.push({ severity: "low", detail: `${thinPages}/${blogPages.length} pages have under 300 words` });
2025
+ }
2026
+ let finalScore = Math.min(10, score);
2027
+ if (topicCoherenceScore !== void 0 && topicCoherenceScore <= 4 && finalScore >= 8) {
2028
+ finalScore = 7;
2029
+ findings.push({ severity: "low", detail: "Deep content but low topic coherence - depth on scattered topics has reduced AI citation value", fix: "Focus content depth on your core expertise area for maximum AI visibility" });
2030
+ }
2031
+ return { criterion: "content_depth", criterion_label: "Content Depth", score: finalScore, status: finalScore >= 7 ? "pass" : finalScore >= 4 ? "partial" : "fail", findings, fix_priority: finalScore >= 7 ? "P3" : "P1" };
2032
+ }
1768
2033
  function auditSiteFromData(data) {
2034
+ const topicCoherence = checkTopicCoherence(data);
2035
+ const cannibalization = checkContentCannibalization(data, topicCoherence.score);
1769
2036
  return [
1770
2037
  checkLlmsTxt(data),
1771
2038
  checkSchemaMarkup(data),
@@ -1791,47 +2058,55 @@ function auditSiteFromData(data) {
1791
2058
  checkSchemaCoverage(data),
1792
2059
  checkSpeakableSchema(data),
1793
2060
  checkQueryAnswerAlignment(data),
1794
- checkContentCannibalization(data),
1795
- checkVisibleDateSignal(data)
2061
+ cannibalization,
2062
+ checkVisibleDateSignal(data),
2063
+ topicCoherence,
2064
+ checkContentDepth(data, topicCoherence.score)
1796
2065
  ];
1797
2066
  }
1798
2067
 
1799
2068
  // src/scoring.ts
1800
2069
  var WEIGHTS = {
1801
- // Original 10
1802
- llms_txt: 0.1,
1803
- schema_markup: 0.15,
1804
- qa_content_format: 0.15,
1805
- clean_html: 0.1,
1806
- entity_consistency: 0.1,
1807
- robots_txt: 0.05,
1808
- faq_section: 0.1,
1809
- original_data: 0.1,
1810
- internal_linking: 0.1,
1811
- semantic_html: 0.05,
1812
- // New 12
1813
- content_freshness: 0.07,
1814
- sitemap_completeness: 0.05,
1815
- rss_feed: 0.03,
1816
- table_list_extractability: 0.07,
1817
- definition_patterns: 0.04,
2070
+ // ─── Core Content (high weight - these determine real AI citation quality) ──
2071
+ qa_content_format: 0.12,
2072
+ original_data: 0.12,
2073
+ topic_coherence: 0.14,
2074
+ // NEW v2.0: biggest predictor of AI citation quality
2075
+ fact_density: 0.08,
1818
2076
  direct_answer_density: 0.07,
1819
- content_licensing: 0.04,
2077
+ content_depth: 0.06,
2078
+ // NEW v2.0: substantive content vs thin pages
2079
+ // ─── Structure & Discovery (medium weight - technical readiness) ────────────
2080
+ schema_markup: 0.08,
2081
+ llms_txt: 0.08,
2082
+ clean_html: 0.08,
2083
+ entity_consistency: 0.08,
2084
+ faq_section: 0.08,
2085
+ internal_linking: 0.08,
2086
+ // ─── Content Signals (moderate weight) ──────────────────────────────────────
2087
+ content_freshness: 0.06,
2088
+ table_list_extractability: 0.05,
2089
+ query_answer_alignment: 0.06,
2090
+ definition_patterns: 0.04,
1820
2091
  author_schema_depth: 0.04,
1821
- fact_density: 0.05,
1822
- canonical_url: 0.04,
1823
- content_velocity: 0.03,
1824
- schema_coverage: 0.03,
1825
- speakable_schema: 0.03,
1826
- query_answer_alignment: 0.08,
1827
2092
  content_cannibalization: 0.05,
1828
- visible_date_signal: 0.04
2093
+ visible_date_signal: 0.04,
2094
+ semantic_html: 0.04,
2095
+ // ─── Plumbing (low weight - nice to have but not what drives citations) ─────
2096
+ robots_txt: 0.03,
2097
+ sitemap_completeness: 0.03,
2098
+ content_velocity: 0.03,
2099
+ rss_feed: 0.02,
2100
+ content_licensing: 0.03,
2101
+ canonical_url: 0.02,
2102
+ schema_coverage: 0.02,
2103
+ speakable_schema: 0.02
1829
2104
  };
1830
2105
  function calculateOverallScore(criteria) {
1831
2106
  let totalWeight = 0;
1832
2107
  let weightedSum = 0;
1833
2108
  for (const c of criteria) {
1834
- const weight = WEIGHTS[c.criterion] ?? 0.1;
2109
+ const weight = WEIGHTS[c.criterion] ?? 0.05;
1835
2110
  weightedSum += c.score / 10 * weight * 100;
1836
2111
  totalWeight += weight;
1837
2112
  }
@@ -1950,7 +2225,9 @@ var CRITERION_LABELS = {
1950
2225
  "Speakable Schema": "Speakable Schema",
1951
2226
  "Query-Answer Alignment": "Query-Answer Alignment",
1952
2227
  "Content Cannibalization": "Content Cannibalization",
1953
- "Visible Date Signal": "Visible Date Signal"
2228
+ "Visible Date Signal": "Visible Date Signal",
2229
+ "Topic Coherence": "Topic Coherence",
2230
+ "Content Depth": "Content Depth"
1954
2231
  };
1955
2232
  function scoreToStatus(score) {
1956
2233
  if (score === 0) return "MISSING";
@@ -2845,7 +3122,7 @@ function extractTitle(html) {
2845
3122
  function getTextContent2(html) {
2846
3123
  return html.replace(/<script[\s\S]*?<\/script>/gi, "").replace(/<style[\s\S]*?<\/style>/gi, "").replace(/<[^>]*>/g, " ").replace(/\s+/g, " ").trim();
2847
3124
  }
2848
- function countWords(text) {
3125
+ function countWords2(text) {
2849
3126
  if (!text) return 0;
2850
3127
  return text.split(/\s+/).filter((w) => w.length > 0).length;
2851
3128
  }
@@ -2996,7 +3273,7 @@ function checkHasQuestionHeadings(html) {
2996
3273
  function analyzePage(html, url, category) {
2997
3274
  const title = extractTitle(html);
2998
3275
  const textContent = getTextContent2(html);
2999
- const wordCount = countWords(textContent);
3276
+ const wordCount = countWords2(textContent);
3000
3277
  const issues = [];
3001
3278
  const strengths = [];
3002
3279
  const issueChecks = [
@@ -3076,7 +3353,7 @@ async function audit(domain, options) {
3076
3353
  }
3077
3354
  }
3078
3355
  if (options?.fullCrawl) {
3079
- const { crawlFullSite } = await import("./full-site-crawler-F7J2HRL4.js");
3356
+ const { crawlFullSite } = await import("./full-site-crawler-FQYO46YV.js");
3080
3357
  const crawlResult = await crawlFullSite(siteData, {
3081
3358
  maxPages: options.maxPages ?? 200,
3082
3359
  concurrency: options.concurrency ?? 5