scrapex 0.5.2 → 1.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +392 -145
- package/dist/enhancer-Q6CSc1gA.mjs +220 -0
- package/dist/enhancer-Q6CSc1gA.mjs.map +1 -0
- package/dist/enhancer-oM4BhYYS.cjs +268 -0
- package/dist/enhancer-oM4BhYYS.cjs.map +1 -0
- package/dist/index.cjs +852 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +264 -0
- package/dist/index.d.cts.map +1 -0
- package/dist/index.d.mts +264 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +798 -0
- package/dist/index.mjs.map +1 -0
- package/dist/llm/index.cjs +316 -0
- package/dist/llm/index.cjs.map +1 -0
- package/dist/llm/index.d.cts +211 -0
- package/dist/llm/index.d.cts.map +1 -0
- package/dist/llm/index.d.mts +211 -0
- package/dist/llm/index.d.mts.map +1 -0
- package/dist/llm/index.mjs +310 -0
- package/dist/llm/index.mjs.map +1 -0
- package/dist/parsers/index.cjs +200 -0
- package/dist/parsers/index.cjs.map +1 -0
- package/dist/parsers/index.d.cts +133 -0
- package/dist/parsers/index.d.cts.map +1 -0
- package/dist/parsers/index.d.mts +133 -0
- package/dist/parsers/index.d.mts.map +1 -0
- package/dist/parsers/index.mjs +192 -0
- package/dist/parsers/index.mjs.map +1 -0
- package/dist/types-CNQZVW36.d.mts +150 -0
- package/dist/types-CNQZVW36.d.mts.map +1 -0
- package/dist/types-D0HYR95H.d.cts +150 -0
- package/dist/types-D0HYR95H.d.cts.map +1 -0
- package/package.json +80 -100
- package/dist/index.d.ts +0 -45
- package/dist/index.js +0 -8
- package/dist/scrapex.cjs.development.js +0 -1128
- package/dist/scrapex.cjs.development.js.map +0 -1
- package/dist/scrapex.cjs.production.min.js +0 -2
- package/dist/scrapex.cjs.production.min.js.map +0 -1
- package/dist/scrapex.esm.js +0 -1120
- package/dist/scrapex.esm.js.map +0 -1
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs","names":["jsdomModule: typeof import('jsdom') | null","document: Document | null","turndown","TurndownService","Readability","jsonLd: Record<string, unknown>[]","result: Partial<ScrapedData>","links: ExtractedLink[]","parsedUrl: URL","ScrapeError","responseHeaders: Record<string, string>","rules: RobotsRules","ScrapeError","extractors: Extractor[]","intermediateResult: ScrapedData","enhance","extract"],"sources":["../src/core/context.ts","../src/extractors/content.ts","../src/utils/url.ts","../src/extractors/favicon.ts","../src/extractors/jsonld.ts","../src/extractors/links.ts","../src/extractors/meta.ts","../src/extractors/index.ts","../src/fetchers/types.ts","../src/fetchers/fetch.ts","../src/fetchers/robots.ts","../src/core/scrape.ts"],"sourcesContent":["import type { CheerioAPI } from 'cheerio';\nimport * as cheerio from 'cheerio';\nimport type { ExtractionContext, ScrapedData, ScrapeOptions } from './types.js';\n\n// Cached JSDOM module for lazy loading\nlet jsdomModule: typeof import('jsdom') | null = null;\n\n/**\n * Preload JSDOM module (called once during scrape initialization)\n */\nexport async function preloadJsdom(): Promise<void> {\n if (!jsdomModule) {\n jsdomModule = await import('jsdom');\n }\n}\n\n/**\n * Create an extraction context with lazy JSDOM loading.\n *\n * Cheerio is always available for fast DOM queries.\n * JSDOM is only loaded when getDocument() is called (for Readability).\n */\nexport function createExtractionContext(\n url: string,\n finalUrl: string,\n html: string,\n options: ScrapeOptions\n): ExtractionContext {\n // Lazy-loaded JSDOM document\n let document: Document | null = null;\n\n // Parse HTML with Cheerio (fast, always available)\n const $: CheerioAPI = cheerio.load(html);\n\n return {\n url,\n finalUrl,\n html,\n $,\n options,\n results: {},\n\n getDocument(): Document {\n // Use preloaded JSDOM module\n if (!document) {\n if (!jsdomModule) {\n throw new Error('JSDOM not preloaded. Call preloadJsdom() before using getDocument().');\n }\n const dom = new jsdomModule.JSDOM(html, { url: finalUrl });\n document = dom.window.document;\n }\n return document;\n },\n };\n}\n\n/**\n * Merge partial results into the context\n */\nexport function mergeResults(\n context: ExtractionContext,\n extracted: Partial<ScrapedData>\n): ExtractionContext {\n return {\n ...context,\n results: {\n ...context.results,\n ...extracted,\n // Merge custom fields if both exist\n custom:\n extracted.custom || context.results.custom\n ? { ...context.results.custom, ...extracted.custom }\n : undefined,\n },\n };\n}\n","import { Readability } from '@mozilla/readability';\nimport TurndownService from 'turndown';\nimport type { ContentType, ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\n\n// Initialize Turndown with sensible defaults\nconst turndown = new TurndownService({\n headingStyle: 'atx',\n codeBlockStyle: 'fenced',\n bulletListMarker: '-',\n emDelimiter: '_',\n strongDelimiter: '**',\n linkStyle: 'inlined',\n});\n\n// Remove script, style, and other noise\nturndown.remove(['script', 'style', 'noscript', 'iframe', 'nav', 'footer']);\n\n/**\n * Extracts main content using Mozilla Readability.\n * Converts HTML to Markdown for LLM consumption.\n */\nexport class ContentExtractor implements Extractor {\n readonly name = 'content';\n readonly priority = 50; // Medium priority - runs after meta\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { options } = context;\n\n // Skip if content extraction is disabled\n if (options.extractContent === false) {\n return {};\n }\n\n // Use JSDOM for Readability (lazy-loaded)\n const document = context.getDocument();\n const clonedDoc = document.cloneNode(true) as Document;\n\n // Run Readability\n const reader = new Readability(clonedDoc);\n const article = reader.parse();\n\n if (!article || !article.content) {\n // Fallback: extract body text\n return this.extractFallback(context);\n }\n\n // Convert to markdown\n let content = turndown.turndown(article.content);\n\n // Truncate if needed\n const maxLength = options.maxContentLength ?? 50000;\n if (content.length > maxLength) {\n content = `${content.slice(0, maxLength)}\\n\\n[Content truncated...]`;\n }\n\n // Plain text content\n const textContent = (article.textContent ?? '').trim();\n\n // Create excerpt\n const excerpt = this.createExcerpt(textContent);\n\n // Word count\n const wordCount = textContent.split(/\\s+/).filter(Boolean).length;\n\n // Detect content type\n const contentType = this.detectContentType(context);\n\n return {\n content,\n textContent,\n excerpt: article.excerpt || excerpt,\n wordCount,\n contentType,\n // Readability may provide better values than meta tags\n title: article.title || undefined,\n author: article.byline || undefined,\n siteName: article.siteName || undefined,\n };\n }\n\n private extractFallback(context: ExtractionContext): Partial<ScrapedData> {\n const { $ } = context;\n\n // Try to get body content\n const bodyHtml = $('body').html() || '';\n const content = turndown.turndown(bodyHtml);\n const textContent = $('body').text().replace(/\\s+/g, ' ').trim();\n\n return {\n content: content.slice(0, context.options.maxContentLength ?? 50000),\n textContent,\n excerpt: this.createExcerpt(textContent),\n wordCount: textContent.split(/\\s+/).filter(Boolean).length,\n contentType: 'unknown',\n };\n }\n\n private createExcerpt(text: string, maxLength = 300): string {\n if (text.length <= maxLength) {\n return text;\n }\n // Try to break at word boundary\n const truncated = text.slice(0, maxLength);\n const lastSpace = truncated.lastIndexOf(' ');\n return `${lastSpace > 0 ? truncated.slice(0, lastSpace) : truncated}...`;\n }\n\n private detectContentType(context: ExtractionContext): ContentType {\n const { $, finalUrl } = context;\n const url = finalUrl.toLowerCase();\n\n // GitHub repo\n if (url.includes('github.com') && !url.includes('/blob/') && !url.includes('/issues/')) {\n const repoMeta = $('meta[property=\"og:type\"]').attr('content');\n if (repoMeta === 'object' || url.match(/github\\.com\\/[^/]+\\/[^/]+\\/?$/)) {\n return 'repo';\n }\n }\n\n // npm package\n if (url.includes('npmjs.com/package/')) {\n return 'package';\n }\n\n // PyPI package\n if (url.includes('pypi.org/project/')) {\n return 'package';\n }\n\n // Documentation sites\n if (\n url.includes('/docs/') ||\n url.includes('.readthedocs.') ||\n url.includes('/documentation/')\n ) {\n return 'docs';\n }\n\n // Video platforms\n if (url.includes('youtube.com') || url.includes('vimeo.com') || url.includes('youtu.be')) {\n return 'video';\n }\n\n // Product pages (heuristic)\n const hasPrice = $('[class*=\"price\"], [data-price], [itemprop=\"price\"]').length > 0;\n const hasAddToCart = $('[class*=\"cart\"], [class*=\"buy\"], button:contains(\"Add\")').length > 0;\n if (hasPrice || hasAddToCart) {\n return 'product';\n }\n\n // Article detection (Open Graph type)\n const ogType = $('meta[property=\"og:type\"]').attr('content')?.toLowerCase();\n if (ogType === 'article' || ogType === 'blog' || ogType === 'news') {\n return 'article';\n }\n\n // Article heuristics\n const hasArticleTag = $('article').length > 0;\n const hasDateline = $('time[datetime], [class*=\"date\"], [class*=\"byline\"]').length > 0;\n if (hasArticleTag && hasDateline) {\n return 'article';\n }\n\n return 'unknown';\n }\n}\n","/**\n * Common tracking parameters to remove from URLs\n */\nconst TRACKING_PARAMS = [\n 'utm_source',\n 'utm_medium',\n 'utm_campaign',\n 'utm_term',\n 'utm_content',\n 'utm_id',\n 'ref',\n 'fbclid',\n 'gclid',\n 'gclsrc',\n 'dclid',\n 'msclkid',\n 'mc_cid',\n 'mc_eid',\n '_ga',\n '_gl',\n 'source',\n 'referrer',\n];\n\n/**\n * Validate if a string is a valid URL\n */\nexport function isValidUrl(url: string): boolean {\n try {\n const parsed = new URL(url);\n return ['http:', 'https:'].includes(parsed.protocol);\n } catch {\n return false;\n }\n}\n\n/**\n * Normalize URL by removing tracking params and trailing slashes\n */\nexport function normalizeUrl(url: string): string {\n try {\n const parsed = new URL(url);\n\n // Remove common tracking parameters\n for (const param of TRACKING_PARAMS) {\n parsed.searchParams.delete(param);\n }\n\n // Remove trailing slash for consistency (except for root)\n let normalized = parsed.toString();\n if (normalized.endsWith('/') && parsed.pathname !== '/') {\n normalized = normalized.slice(0, -1);\n }\n\n return normalized;\n } catch {\n return url;\n }\n}\n\n/**\n * Extract domain from URL (without www prefix)\n */\nexport function extractDomain(url: string): string {\n try {\n const parsed = new URL(url);\n return parsed.hostname.replace(/^www\\./, '');\n } catch {\n return '';\n }\n}\n\n/**\n * Resolve a potentially relative URL against a base URL\n */\nexport function resolveUrl(url: string | undefined | null, baseUrl: string): string | undefined {\n if (!url) return undefined;\n\n try {\n return new URL(url, baseUrl).href;\n } catch {\n return url;\n }\n}\n\n/**\n * Check if a URL is external relative to a domain\n */\nexport function isExternalUrl(url: string, baseDomain: string): boolean {\n try {\n const parsed = new URL(url);\n const urlDomain = parsed.hostname.replace(/^www\\./, '');\n return urlDomain !== baseDomain;\n } catch {\n return false;\n }\n}\n\n/**\n * Extract protocol from URL\n */\nexport function getProtocol(url: string): string {\n try {\n return new URL(url).protocol;\n } catch {\n return '';\n }\n}\n\n/**\n * Get the path portion of a URL\n */\nexport function getPath(url: string): string {\n try {\n return new URL(url).pathname;\n } catch {\n return '';\n }\n}\n\n/**\n * Check if URL matches a pattern (supports * wildcard)\n */\nexport function matchesUrlPattern(url: string, pattern: string): boolean {\n if (!pattern.includes('*')) {\n return url === pattern || url.startsWith(pattern);\n }\n\n const regexPattern = pattern.replace(/[.+?^${}()|[\\]\\\\]/g, '\\\\$&').replace(/\\*/g, '.*');\n\n return new RegExp(`^${regexPattern}`).test(url);\n}\n","import type { ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\nimport { resolveUrl } from '@/utils/url.js';\n\n/**\n * Extracts favicon URL from the page.\n * Checks multiple sources in order of preference.\n */\nexport class FaviconExtractor implements Extractor {\n readonly name = 'favicon';\n readonly priority = 70;\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $, finalUrl } = context;\n\n // Check various favicon link relations in order of preference\n const faviconSelectors = [\n 'link[rel=\"icon\"][type=\"image/svg+xml\"]', // SVG (best quality)\n 'link[rel=\"icon\"][sizes=\"192x192\"]',\n 'link[rel=\"icon\"][sizes=\"180x180\"]',\n 'link[rel=\"icon\"][sizes=\"128x128\"]',\n 'link[rel=\"icon\"][sizes=\"96x96\"]',\n 'link[rel=\"apple-touch-icon\"][sizes=\"180x180\"]',\n 'link[rel=\"apple-touch-icon\"]',\n 'link[rel=\"icon\"][sizes=\"32x32\"]',\n 'link[rel=\"icon\"]',\n 'link[rel=\"shortcut icon\"]',\n ];\n\n for (const selector of faviconSelectors) {\n const href = $(selector).first().attr('href');\n if (href) {\n return {\n favicon: resolveUrl(finalUrl, href),\n };\n }\n }\n\n // Fallback: try /favicon.ico\n try {\n const url = new URL(finalUrl);\n return {\n favicon: `${url.protocol}//${url.host}/favicon.ico`,\n };\n } catch {\n return {};\n }\n }\n}\n","import type { ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\n\n/**\n * Extracts JSON-LD structured data from the page.\n * Also extracts additional metadata from structured data.\n */\nexport class JsonLdExtractor implements Extractor {\n readonly name = 'jsonld';\n readonly priority = 80; // After meta, before content\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $ } = context;\n const jsonLd: Record<string, unknown>[] = [];\n\n // Find all JSON-LD scripts\n $('script[type=\"application/ld+json\"]').each((_, el) => {\n const content = $(el).html();\n if (!content) return;\n\n try {\n const parsed = JSON.parse(content);\n // Handle both single objects and arrays\n if (Array.isArray(parsed)) {\n jsonLd.push(...parsed);\n } else if (typeof parsed === 'object' && parsed !== null) {\n jsonLd.push(parsed);\n }\n } catch {\n // Invalid JSON-LD, skip\n }\n });\n\n if (jsonLd.length === 0) {\n return {};\n }\n\n // Extract useful metadata from JSON-LD\n const metadata = this.extractMetadata(jsonLd);\n\n return {\n jsonLd,\n ...metadata,\n };\n }\n\n private extractMetadata(jsonLd: Record<string, unknown>[]): Partial<ScrapedData> {\n const result: Partial<ScrapedData> = {};\n\n for (const item of jsonLd) {\n const type = this.getType(item);\n\n // Extract from Article/BlogPosting/NewsArticle\n if (type?.match(/Article|BlogPosting|NewsArticle|WebPage/i)) {\n result.title = result.title || this.getString(item, 'headline', 'name');\n result.description = result.description || this.getString(item, 'description');\n result.author = result.author || this.getAuthor(item);\n result.publishedAt = result.publishedAt || this.getString(item, 'datePublished');\n result.modifiedAt = result.modifiedAt || this.getString(item, 'dateModified');\n result.image = result.image || this.getImage(item);\n }\n\n // Extract from Organization\n if (type === 'Organization') {\n result.siteName = result.siteName || this.getString(item, 'name');\n }\n\n // Extract from Product\n if (type === 'Product') {\n result.title = result.title || this.getString(item, 'name');\n result.description = result.description || this.getString(item, 'description');\n result.image = result.image || this.getImage(item);\n }\n\n // Extract from SoftwareApplication\n if (type === 'SoftwareApplication') {\n result.title = result.title || this.getString(item, 'name');\n result.description = result.description || this.getString(item, 'description');\n }\n\n // Extract keywords from any type\n const keywords = this.getKeywords(item);\n if (keywords.length > 0) {\n result.keywords = [...(result.keywords || []), ...keywords];\n }\n }\n\n // Deduplicate keywords\n if (result.keywords) {\n result.keywords = [...new Set(result.keywords)];\n }\n\n return result;\n }\n\n private getType(item: Record<string, unknown>): string | undefined {\n const type = item['@type'];\n if (typeof type === 'string') return type;\n if (Array.isArray(type)) return type[0] as string;\n return undefined;\n }\n\n private getString(item: Record<string, unknown>, ...keys: string[]): string | undefined {\n for (const key of keys) {\n const value = item[key];\n if (typeof value === 'string') return value;\n if (typeof value === 'object' && value !== null && '@value' in value) {\n return String((value as { '@value': unknown })['@value']);\n }\n }\n return undefined;\n }\n\n private getAuthor(item: Record<string, unknown>): string | undefined {\n const author = item.author;\n if (typeof author === 'string') return author;\n // Check array BEFORE object since typeof [] === 'object'\n if (Array.isArray(author)) {\n const names = author\n .map((a) =>\n typeof a === 'string' ? a : this.getString(a as Record<string, unknown>, 'name')\n )\n .filter(Boolean);\n return names.join(', ') || undefined;\n }\n if (typeof author === 'object' && author !== null) {\n const authorObj = author as Record<string, unknown>;\n return this.getString(authorObj, 'name') || undefined;\n }\n return undefined;\n }\n\n private getImage(item: Record<string, unknown>): string | undefined {\n const image = item.image;\n if (typeof image === 'string') return image;\n // Check array BEFORE object since typeof [] === 'object'\n if (Array.isArray(image) && image.length > 0) {\n return this.getImage({ image: image[0] });\n }\n if (typeof image === 'object' && image !== null) {\n const imageObj = image as Record<string, unknown>;\n return this.getString(imageObj, 'url', 'contentUrl') || undefined;\n }\n return undefined;\n }\n\n private getKeywords(item: Record<string, unknown>): string[] {\n const keywords = item.keywords;\n if (typeof keywords === 'string') {\n return keywords\n .split(',')\n .map((k) => k.trim())\n .filter(Boolean);\n }\n if (Array.isArray(keywords)) {\n return keywords.filter((k): k is string => typeof k === 'string');\n }\n return [];\n }\n}\n","import type { ExtractedLink, ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\nimport { extractDomain, isExternalUrl, isValidUrl, resolveUrl } from '@/utils/url.js';\n\n/**\n * Extracts links from the page content.\n * Filters out navigation/footer links and focuses on content links.\n */\nexport class LinksExtractor implements Extractor {\n readonly name = 'links';\n readonly priority = 30; // Runs last\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $, finalUrl } = context;\n const links: ExtractedLink[] = [];\n const seen = new Set<string>();\n\n // Extract links from main content area (article, main, or body)\n const contentArea = $('article, main, [role=\"main\"]').first();\n const container = contentArea.length > 0 ? contentArea : $('body');\n\n // Skip links in navigation, header, footer, sidebar\n const skipSelectors =\n 'nav, header, footer, aside, [role=\"navigation\"], [class*=\"nav\"], [class*=\"footer\"], [class*=\"header\"], [class*=\"sidebar\"], [class*=\"menu\"]';\n\n container.find('a[href]').each((_, el) => {\n const $el = $(el);\n\n // Skip if inside navigation/footer elements\n if ($el.closest(skipSelectors).length > 0) {\n return;\n }\n\n const href = $el.attr('href');\n if (!href) return;\n\n // Skip anchors, javascript, mailto, tel\n if (\n href.startsWith('#') ||\n href.startsWith('javascript:') ||\n href.startsWith('mailto:') ||\n href.startsWith('tel:')\n ) {\n return;\n }\n\n // Resolve relative URLs\n const resolvedUrl = resolveUrl(href, finalUrl);\n if (!resolvedUrl || !isValidUrl(resolvedUrl)) return;\n\n // Skip duplicates\n if (seen.has(resolvedUrl)) return;\n seen.add(resolvedUrl);\n\n // Get link text\n const text = $el.text().trim() || $el.attr('title') || $el.attr('aria-label') || '';\n\n // Skip empty or very short link text (likely icons)\n if (text.length < 2) return;\n\n const baseDomain = extractDomain(finalUrl);\n links.push({\n url: resolvedUrl,\n text: text.slice(0, 200), // Limit text length\n isExternal: isExternalUrl(resolvedUrl, baseDomain),\n });\n });\n\n return {\n links: links.slice(0, 100), // Limit to 100 links\n };\n }\n}\n","import type { ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\n\n/**\n * Extracts metadata from HTML meta tags, Open Graph, and Twitter cards.\n * Runs first to provide basic metadata for other extractors.\n */\nexport class MetaExtractor implements Extractor {\n readonly name = 'meta';\n readonly priority = 100; // High priority - runs first\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $ } = context;\n\n // Helper to get meta content by name or property\n const getMeta = (nameOrProperty: string): string | undefined => {\n const value =\n $(`meta[name=\"${nameOrProperty}\"]`).attr('content') ||\n $(`meta[property=\"${nameOrProperty}\"]`).attr('content') ||\n $(`meta[itemprop=\"${nameOrProperty}\"]`).attr('content');\n return value?.trim() || undefined;\n };\n\n // Title (priority: og:title > twitter:title > <title>)\n const title =\n getMeta('og:title') || getMeta('twitter:title') || $('title').first().text().trim() || '';\n\n // Description (priority: og:description > twitter:description > meta description)\n const description =\n getMeta('og:description') || getMeta('twitter:description') || getMeta('description') || '';\n\n // Image (priority: og:image > twitter:image)\n const image =\n getMeta('og:image') || getMeta('twitter:image') || getMeta('twitter:image:src') || undefined;\n\n // Canonical URL\n const canonicalUrl =\n $('link[rel=\"canonical\"]').attr('href') || getMeta('og:url') || context.finalUrl;\n\n // Author\n const author =\n getMeta('author') ||\n getMeta('article:author') ||\n getMeta('twitter:creator') ||\n $('[rel=\"author\"]').first().text().trim() ||\n undefined;\n\n // Site name\n const siteName = getMeta('og:site_name') || getMeta('application-name') || undefined;\n\n // Published/Modified dates\n const publishedAt =\n getMeta('article:published_time') ||\n getMeta('datePublished') ||\n getMeta('date') ||\n $('time[datetime]').first().attr('datetime') ||\n undefined;\n\n const modifiedAt = getMeta('article:modified_time') || getMeta('dateModified') || undefined;\n\n // Language\n const language =\n $('html').attr('lang') || getMeta('og:locale') || getMeta('language') || undefined;\n\n // Keywords\n const keywordsRaw = getMeta('keywords') || getMeta('article:tag') || '';\n const keywords = keywordsRaw\n ? keywordsRaw\n .split(',')\n .map((k) => k.trim())\n .filter(Boolean)\n : [];\n\n return {\n title,\n description,\n image,\n canonicalUrl,\n author,\n siteName,\n publishedAt,\n modifiedAt,\n language,\n keywords,\n };\n }\n}\n","import type { Extractor } from '@/core/types.js';\nimport { ContentExtractor } from './content.js';\nimport { FaviconExtractor } from './favicon.js';\nimport { JsonLdExtractor } from './jsonld.js';\nimport { LinksExtractor } from './links.js';\nimport { MetaExtractor } from './meta.js';\n\nexport { ContentExtractor } from './content.js';\nexport { FaviconExtractor } from './favicon.js';\nexport { JsonLdExtractor } from './jsonld.js';\nexport { LinksExtractor } from './links.js';\n// Export all extractors\nexport { MetaExtractor } from './meta.js';\n\n/**\n * Default extractors in priority order.\n * Higher priority runs first.\n */\nexport function createDefaultExtractors(): Extractor[] {\n return [\n new MetaExtractor(), // priority: 100\n new JsonLdExtractor(), // priority: 80\n new FaviconExtractor(), // priority: 70\n new ContentExtractor(), // priority: 50\n new LinksExtractor(), // priority: 30\n ];\n}\n\n/**\n * Sort extractors by priority (higher first).\n */\nexport function sortExtractors(extractors: Extractor[]): Extractor[] {\n return [...extractors].sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));\n}\n","/**\n * Fetcher interface - allows swapping fetch implementation\n * for Puppeteer, Playwright, or custom solutions\n */\nexport interface Fetcher {\n /**\n * Fetch HTML from a URL\n * @returns HTML content and final URL (after redirects)\n */\n fetch(url: string, options?: FetchOptions): Promise<FetchResult>;\n\n /** Fetcher name for logging */\n readonly name: string;\n}\n\n/**\n * Options for fetching\n */\nexport interface FetchOptions {\n /** Timeout in milliseconds (default: 10000) */\n timeout?: number;\n\n /** User agent string */\n userAgent?: string;\n\n /** Additional headers to send */\n headers?: Record<string, string>;\n}\n\n/**\n * Result from fetching a URL\n */\nexport interface FetchResult {\n /** Raw HTML content */\n html: string;\n\n /** Final URL after redirects */\n finalUrl: string;\n\n /** HTTP status code */\n statusCode: number;\n\n /** Content-Type header */\n contentType: string;\n\n /** Response headers (optional) */\n headers?: Record<string, string>;\n}\n\n/**\n * Default user agent string\n */\nexport const DEFAULT_USER_AGENT =\n 'Scrapex-Bot/2.0 (+https://github.com/developer-rakeshpaul/scrapex)';\n\n/**\n * Default timeout in milliseconds\n */\nexport const DEFAULT_TIMEOUT = 10000;\n","import { ScrapeError } from '@/core/errors.js';\nimport {\n DEFAULT_TIMEOUT,\n DEFAULT_USER_AGENT,\n type Fetcher,\n type FetchOptions,\n type FetchResult,\n} from './types.js';\n\n/**\n * Default fetcher using native fetch API.\n * Works in Node.js 18+ without polyfills.\n */\nexport class NativeFetcher implements Fetcher {\n readonly name = 'native-fetch';\n\n async fetch(url: string, options: FetchOptions = {}): Promise<FetchResult> {\n const { timeout = DEFAULT_TIMEOUT, userAgent = DEFAULT_USER_AGENT, headers = {} } = options;\n\n // Validate URL\n let parsedUrl: URL;\n try {\n parsedUrl = new URL(url);\n } catch {\n throw new ScrapeError(`Invalid URL: ${url}`, 'INVALID_URL');\n }\n\n // Only allow http/https\n if (!['http:', 'https:'].includes(parsedUrl.protocol)) {\n throw new ScrapeError(`Invalid protocol: ${parsedUrl.protocol}`, 'INVALID_URL');\n }\n\n // Setup abort controller for timeout\n const controller = new AbortController();\n const timeoutId = setTimeout(() => controller.abort(), timeout);\n\n try {\n const response = await fetch(url, {\n signal: controller.signal,\n headers: {\n 'User-Agent': userAgent,\n Accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n ...headers,\n },\n redirect: 'follow',\n });\n\n clearTimeout(timeoutId);\n\n // Handle error status codes\n if (!response.ok) {\n if (response.status === 404) {\n throw new ScrapeError(`Page not found: ${url}`, 'NOT_FOUND', 404);\n }\n if (response.status === 403 || response.status === 401) {\n throw new ScrapeError(`Access blocked: ${url}`, 'BLOCKED', response.status);\n }\n if (response.status === 429) {\n throw new ScrapeError(`Rate limited: ${url}`, 'BLOCKED', 429);\n }\n throw new ScrapeError(\n `HTTP error ${response.status}: ${url}`,\n 'FETCH_FAILED',\n response.status\n );\n }\n\n const contentType = response.headers.get('content-type') || '';\n\n // Ensure we're getting HTML\n if (!contentType.includes('text/html') && !contentType.includes('application/xhtml')) {\n throw new ScrapeError(`Unexpected content type: ${contentType}`, 'PARSE_ERROR');\n }\n\n const html = await response.text();\n\n // Convert headers to plain object\n const responseHeaders: Record<string, string> = {};\n response.headers.forEach((value, key) => {\n responseHeaders[key] = value;\n });\n\n return {\n html,\n finalUrl: response.url,\n statusCode: response.status,\n contentType,\n headers: responseHeaders,\n };\n } catch (error) {\n clearTimeout(timeoutId);\n\n // Re-throw ScrapeErrors\n if (error instanceof ScrapeError) {\n throw error;\n }\n\n // Handle abort (timeout)\n if (error instanceof Error && error.name === 'AbortError') {\n throw new ScrapeError(`Request timed out after ${timeout}ms`, 'TIMEOUT');\n }\n\n // Handle other errors\n if (error instanceof Error) {\n throw new ScrapeError(`Fetch failed: ${error.message}`, 'FETCH_FAILED', undefined, error);\n }\n\n throw new ScrapeError('Unknown fetch error', 'FETCH_FAILED');\n }\n }\n}\n\n/**\n * Default fetcher instance\n */\nexport const defaultFetcher = new NativeFetcher();\n","import { DEFAULT_USER_AGENT } from './types.js';\n\n/**\n * Result of robots.txt check\n */\nexport interface RobotsCheckResult {\n allowed: boolean;\n reason?: string;\n}\n\n/**\n * Parsed robots.txt rules\n */\ninterface RobotsRules {\n disallow: string[];\n allow: string[];\n}\n\n/**\n * Check if URL is allowed by robots.txt\n *\n * @param url - The URL to check\n * @param userAgent - User agent to check rules for\n * @returns Whether the URL is allowed and optional reason\n */\nexport async function checkRobotsTxt(\n url: string,\n userAgent: string = DEFAULT_USER_AGENT\n): Promise<RobotsCheckResult> {\n try {\n const parsedUrl = new URL(url);\n const robotsUrl = `${parsedUrl.protocol}//${parsedUrl.host}/robots.txt`;\n\n // Fetch robots.txt with short timeout\n const response = await fetch(robotsUrl, {\n headers: { 'User-Agent': userAgent },\n signal: AbortSignal.timeout(5000),\n });\n\n // No robots.txt = allowed\n if (!response.ok) {\n return { allowed: true };\n }\n\n const robotsTxt = await response.text();\n const rules = parseRobotsTxt(robotsTxt, userAgent);\n\n const path = parsedUrl.pathname + parsedUrl.search;\n const allowed = isPathAllowed(rules, path);\n\n return {\n allowed,\n reason: allowed ? undefined : 'Blocked by robots.txt',\n };\n } catch {\n // On error (timeout, network issue), assume allowed\n return { allowed: true };\n }\n}\n\n/**\n * Parse robots.txt content for a specific user agent\n */\nfunction parseRobotsTxt(content: string, userAgent: string): RobotsRules {\n const rules: RobotsRules = { disallow: [], allow: [] };\n const lines = content.split('\\n');\n\n // Extract the bot name from user agent (first word or before /)\n const botName = userAgent.split(/[\\s/]/)[0]?.toLowerCase() || '';\n\n let currentAgent = '';\n let isMatchingAgent = false;\n let hasFoundSpecificAgent = false;\n\n for (const rawLine of lines) {\n const line = rawLine.trim();\n\n // Skip empty lines and comments\n if (!line || line.startsWith('#')) {\n continue;\n }\n\n // Parse directive\n const colonIndex = line.indexOf(':');\n if (colonIndex === -1) continue;\n\n const directive = line.slice(0, colonIndex).trim().toLowerCase();\n const value = line.slice(colonIndex + 1).trim();\n\n if (directive === 'user-agent') {\n currentAgent = value.toLowerCase();\n // Check if this agent applies to us\n isMatchingAgent =\n currentAgent === '*' || currentAgent === botName || botName.includes(currentAgent);\n\n // Prefer specific agent rules over wildcard\n if (currentAgent !== '*' && isMatchingAgent) {\n hasFoundSpecificAgent = true;\n // Reset rules if we found a more specific match\n rules.disallow = [];\n rules.allow = [];\n }\n } else if (isMatchingAgent && (!hasFoundSpecificAgent || currentAgent !== '*')) {\n if (directive === 'disallow' && value) {\n rules.disallow.push(value);\n } else if (directive === 'allow' && value) {\n rules.allow.push(value);\n }\n }\n }\n\n return rules;\n}\n\n/**\n * Check if a path is allowed based on robots.txt rules\n */\nfunction isPathAllowed(rules: RobotsRules, path: string): boolean {\n // No rules = allowed\n if (rules.disallow.length === 0 && rules.allow.length === 0) {\n return true;\n }\n\n // Check allow rules first (they take precedence for more specific matches)\n for (const pattern of rules.allow) {\n if (matchesPattern(path, pattern)) {\n return true;\n }\n }\n\n // Check disallow rules\n for (const pattern of rules.disallow) {\n if (matchesPattern(path, pattern)) {\n return false;\n }\n }\n\n // Default: allowed\n return true;\n}\n\n/**\n * Check if a path matches a robots.txt pattern\n */\nfunction matchesPattern(path: string, pattern: string): boolean {\n // Empty pattern matches nothing\n if (!pattern) return false;\n\n // Handle wildcard at end\n if (pattern.endsWith('*')) {\n return path.startsWith(pattern.slice(0, -1));\n }\n\n // Handle $ anchor\n if (pattern.endsWith('$')) {\n return path === pattern.slice(0, -1);\n }\n\n // Handle wildcards in middle\n if (pattern.includes('*')) {\n const regex = new RegExp(`^${pattern.replace(/\\*/g, '.*').replace(/\\?/g, '\\\\?')}.*`);\n return regex.test(path);\n }\n\n // Simple prefix match\n return path.startsWith(pattern);\n}\n","import { createDefaultExtractors, sortExtractors } from '@/extractors/index.js';\nimport { checkRobotsTxt, defaultFetcher } from '@/fetchers/index.js';\nimport { enhance, extract } from '@/llm/enhancer.js';\nimport { extractDomain, isValidUrl, normalizeUrl } from '@/utils/url.js';\nimport { createExtractionContext, mergeResults, preloadJsdom } from './context.js';\nimport { ScrapeError } from './errors.js';\nimport type { Extractor, ScrapedData, ScrapeOptions } from './types.js';\n\n/**\n * Scrape a URL and extract metadata and content.\n *\n * @param url - The URL to scrape\n * @param options - Scraping options\n * @returns Scraped data with metadata and content\n *\n * @example\n * ```ts\n * const result = await scrape('https://example.com/article');\n * console.log(result.title, result.content);\n * ```\n */\nexport async function scrape(url: string, options: ScrapeOptions = {}): Promise<ScrapedData> {\n const startTime = Date.now();\n\n // Validate URL\n if (!isValidUrl(url)) {\n throw new ScrapeError('Invalid URL provided', 'INVALID_URL');\n }\n\n // Normalize URL\n const normalizedUrl = normalizeUrl(url);\n\n // Check robots.txt if requested\n if (options.respectRobots) {\n const robotsResult = await checkRobotsTxt(normalizedUrl, options.userAgent);\n if (!robotsResult.allowed) {\n throw new ScrapeError(\n `URL blocked by robots.txt: ${robotsResult.reason || 'disallowed'}`,\n 'ROBOTS_BLOCKED'\n );\n }\n }\n\n // Fetch the page\n const fetcher = options.fetcher ?? defaultFetcher;\n const fetchResult = await fetcher.fetch(normalizedUrl, {\n timeout: options.timeout,\n userAgent: options.userAgent,\n });\n\n // Preload JSDOM for content extraction (async dynamic import)\n await preloadJsdom();\n\n // Create extraction context\n let context = createExtractionContext(\n normalizedUrl,\n fetchResult.finalUrl,\n fetchResult.html,\n options\n );\n\n // Prepare extractors\n let extractors: Extractor[];\n if (options.replaceDefaultExtractors) {\n extractors = options.extractors ?? [];\n } else {\n const defaults = createDefaultExtractors();\n extractors = options.extractors ? [...defaults, ...options.extractors] : defaults;\n }\n\n // Sort by priority and run extractors\n extractors = sortExtractors(extractors);\n\n for (const extractor of extractors) {\n try {\n const extracted = await extractor.extract(context);\n context = mergeResults(context, extracted);\n } catch (error) {\n // Log error but continue with other extractors\n console.error(`Extractor \"${extractor.name}\" failed:`, error);\n // Store error in results\n context = mergeResults(context, {\n error: context.results.error\n ? `${context.results.error}; ${extractor.name}: ${error instanceof Error ? error.message : String(error)}`\n : `${extractor.name}: ${error instanceof Error ? error.message : String(error)}`,\n });\n }\n }\n\n // Build intermediate result for LLM enhancement\n const intermediateResult: ScrapedData = {\n url: normalizedUrl,\n canonicalUrl: context.results.canonicalUrl || fetchResult.finalUrl,\n domain: extractDomain(fetchResult.finalUrl),\n title: context.results.title || '',\n description: context.results.description || '',\n image: context.results.image,\n favicon: context.results.favicon,\n content: context.results.content || '',\n textContent: context.results.textContent || '',\n excerpt: context.results.excerpt || '',\n wordCount: context.results.wordCount || 0,\n author: context.results.author,\n publishedAt: context.results.publishedAt,\n modifiedAt: context.results.modifiedAt,\n siteName: context.results.siteName,\n language: context.results.language,\n contentType: context.results.contentType || 'unknown',\n keywords: context.results.keywords || [],\n jsonLd: context.results.jsonLd,\n links: context.results.links,\n custom: context.results.custom,\n scrapedAt: new Date().toISOString(),\n scrapeTimeMs: 0,\n error: context.results.error,\n };\n\n // LLM Enhancement\n if (options.llm && options.enhance && options.enhance.length > 0) {\n try {\n const enhanced = await enhance(intermediateResult, options.llm, options.enhance);\n Object.assign(intermediateResult, enhanced);\n } catch (error) {\n console.error('LLM enhancement failed:', error);\n intermediateResult.error = intermediateResult.error\n ? `${intermediateResult.error}; LLM: ${error instanceof Error ? error.message : String(error)}`\n : `LLM: ${error instanceof Error ? error.message : String(error)}`;\n }\n }\n\n // LLM Extraction\n if (options.llm && options.extract) {\n try {\n const extracted = await extract(intermediateResult, options.llm, options.extract);\n intermediateResult.extracted = extracted as Record<string, unknown>;\n } catch (error) {\n console.error('LLM extraction failed:', error);\n intermediateResult.error = intermediateResult.error\n ? `${intermediateResult.error}; LLM extraction: ${error instanceof Error ? error.message : String(error)}`\n : `LLM extraction: ${error instanceof Error ? error.message : String(error)}`;\n }\n }\n\n // Build final result with timing\n const scrapeTimeMs = Date.now() - startTime;\n\n const result: ScrapedData = {\n ...intermediateResult,\n scrapeTimeMs,\n };\n\n return result;\n}\n\n/**\n * Scrape from raw HTML string (no fetch).\n *\n * @param html - The HTML content\n * @param url - The URL (for resolving relative links)\n * @param options - Scraping options\n * @returns Scraped data with metadata and content\n *\n * @example\n * ```ts\n * const html = await fetchSomehow('https://example.com');\n * const result = await scrapeHtml(html, 'https://example.com');\n * ```\n */\nexport async function scrapeHtml(\n html: string,\n url: string,\n options: ScrapeOptions = {}\n): Promise<ScrapedData> {\n const startTime = Date.now();\n\n // Validate URL\n if (!isValidUrl(url)) {\n throw new ScrapeError('Invalid URL provided', 'INVALID_URL');\n }\n\n const normalizedUrl = normalizeUrl(url);\n\n // Preload JSDOM for content extraction (async dynamic import)\n await preloadJsdom();\n\n // Create extraction context\n let context = createExtractionContext(normalizedUrl, normalizedUrl, html, options);\n\n // Prepare extractors\n let extractors: Extractor[];\n if (options.replaceDefaultExtractors) {\n extractors = options.extractors ?? [];\n } else {\n const defaults = createDefaultExtractors();\n extractors = options.extractors ? [...defaults, ...options.extractors] : defaults;\n }\n\n // Sort by priority and run extractors\n extractors = sortExtractors(extractors);\n\n for (const extractor of extractors) {\n try {\n const extracted = await extractor.extract(context);\n context = mergeResults(context, extracted);\n } catch (error) {\n console.error(`Extractor \"${extractor.name}\" failed:`, error);\n context = mergeResults(context, {\n error: context.results.error\n ? `${context.results.error}; ${extractor.name}: ${error instanceof Error ? error.message : String(error)}`\n : `${extractor.name}: ${error instanceof Error ? error.message : String(error)}`,\n });\n }\n }\n\n const scrapeTimeMs = Date.now() - startTime;\n const domain = extractDomain(normalizedUrl);\n\n const result: ScrapedData = {\n url: normalizedUrl,\n canonicalUrl: context.results.canonicalUrl || normalizedUrl,\n domain,\n title: context.results.title || '',\n description: context.results.description || '',\n image: context.results.image,\n favicon: context.results.favicon,\n content: context.results.content || '',\n textContent: context.results.textContent || '',\n excerpt: context.results.excerpt || '',\n wordCount: context.results.wordCount || 0,\n author: context.results.author,\n publishedAt: context.results.publishedAt,\n modifiedAt: context.results.modifiedAt,\n siteName: context.results.siteName,\n language: context.results.language,\n contentType: context.results.contentType || 'unknown',\n keywords: context.results.keywords || [],\n jsonLd: context.results.jsonLd,\n links: context.results.links,\n summary: context.results.summary,\n suggestedTags: context.results.suggestedTags,\n entities: context.results.entities,\n extracted: context.results.extracted,\n custom: context.results.custom,\n scrapedAt: new Date().toISOString(),\n scrapeTimeMs,\n error: context.results.error,\n };\n\n return result;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAKA,IAAIA,cAA6C;;;;AAKjD,eAAsB,eAA8B;AAClD,KAAI,CAAC,YACH,eAAc,MAAM,OAAO;;;;;;;;AAU/B,SAAgB,wBACd,KACA,UACA,MACA,SACmB;CAEnB,IAAIC,WAA4B;AAKhC,QAAO;EACL;EACA;EACA;EACA,GANoB,QAAQ,KAAK,KAAK;EAOtC;EACA,SAAS,EAAE;EAEX,cAAwB;AAEtB,OAAI,CAAC,UAAU;AACb,QAAI,CAAC,YACH,OAAM,IAAI,MAAM,uEAAuE;AAGzF,eADY,IAAI,YAAY,MAAM,MAAM,EAAE,KAAK,UAAU,CAAC,CAC3C,OAAO;;AAExB,UAAO;;EAEV;;;;;AAMH,SAAgB,aACd,SACA,WACmB;AACnB,QAAO;EACL,GAAG;EACH,SAAS;GACP,GAAG,QAAQ;GACX,GAAG;GAEH,QACE,UAAU,UAAU,QAAQ,QAAQ,SAChC;IAAE,GAAG,QAAQ,QAAQ;IAAQ,GAAG,UAAU;IAAQ,GAClD;GACP;EACF;;;;;ACrEH,MAAMC,aAAW,IAAIC,iBAAgB;CACnC,cAAc;CACd,gBAAgB;CAChB,kBAAkB;CAClB,aAAa;CACb,iBAAiB;CACjB,WAAW;CACZ,CAAC;AAGFD,WAAS,OAAO;CAAC;CAAU;CAAS;CAAY;CAAU;CAAO;CAAS,CAAC;;;;;AAM3E,IAAa,mBAAb,MAAmD;CACjD,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,YAAY;AAGpB,MAAI,QAAQ,mBAAmB,MAC7B,QAAO,EAAE;EASX,MAAM,UADS,IAAIE,kCAJF,QAAQ,aAAa,CACX,UAAU,KAAK,CAGD,CAClB,OAAO;AAE9B,MAAI,CAAC,WAAW,CAAC,QAAQ,QAEvB,QAAO,KAAK,gBAAgB,QAAQ;EAItC,IAAI,UAAUF,WAAS,SAAS,QAAQ,QAAQ;EAGhD,MAAM,YAAY,QAAQ,oBAAoB;AAC9C,MAAI,QAAQ,SAAS,UACnB,WAAU,GAAG,QAAQ,MAAM,GAAG,UAAU,CAAC;EAI3C,MAAM,eAAe,QAAQ,eAAe,IAAI,MAAM;EAGtD,MAAM,UAAU,KAAK,cAAc,YAAY;EAG/C,MAAM,YAAY,YAAY,MAAM,MAAM,CAAC,OAAO,QAAQ,CAAC;EAG3D,MAAM,cAAc,KAAK,kBAAkB,QAAQ;AAEnD,SAAO;GACL;GACA;GACA,SAAS,QAAQ,WAAW;GAC5B;GACA;GAEA,OAAO,QAAQ,SAAS;GACxB,QAAQ,QAAQ,UAAU;GAC1B,UAAU,QAAQ,YAAY;GAC/B;;CAGH,AAAQ,gBAAgB,SAAkD;EACxE,MAAM,EAAE,MAAM;EAGd,MAAM,WAAW,EAAE,OAAO,CAAC,MAAM,IAAI;EACrC,MAAM,UAAUA,WAAS,SAAS,SAAS;EAC3C,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,QAAQ,QAAQ,IAAI,CAAC,MAAM;AAEhE,SAAO;GACL,SAAS,QAAQ,MAAM,GAAG,QAAQ,QAAQ,oBAAoB,IAAM;GACpE;GACA,SAAS,KAAK,cAAc,YAAY;GACxC,WAAW,YAAY,MAAM,MAAM,CAAC,OAAO,QAAQ,CAAC;GACpD,aAAa;GACd;;CAGH,AAAQ,cAAc,MAAc,YAAY,KAAa;AAC3D,MAAI,KAAK,UAAU,UACjB,QAAO;EAGT,MAAM,YAAY,KAAK,MAAM,GAAG,UAAU;EAC1C,MAAM,YAAY,UAAU,YAAY,IAAI;AAC5C,SAAO,GAAG,YAAY,IAAI,UAAU,MAAM,GAAG,UAAU,GAAG,UAAU;;CAGtE,AAAQ,kBAAkB,SAAyC;EACjE,MAAM,EAAE,GAAG,aAAa;EACxB,MAAM,MAAM,SAAS,aAAa;AAGlC,MAAI,IAAI,SAAS,aAAa,IAAI,CAAC,IAAI,SAAS,SAAS,IAAI,CAAC,IAAI,SAAS,WAAW,EAEpF;OADiB,EAAE,6BAA2B,CAAC,KAAK,UAAU,KAC7C,YAAY,IAAI,MAAM,gCAAgC,CACrE,QAAO;;AAKX,MAAI,IAAI,SAAS,qBAAqB,CACpC,QAAO;AAIT,MAAI,IAAI,SAAS,oBAAoB,CACnC,QAAO;AAIT,MACE,IAAI,SAAS,SAAS,IACtB,IAAI,SAAS,gBAAgB,IAC7B,IAAI,SAAS,kBAAkB,CAE/B,QAAO;AAIT,MAAI,IAAI,SAAS,cAAc,IAAI,IAAI,SAAS,YAAY,IAAI,IAAI,SAAS,WAAW,CACtF,QAAO;EAIT,MAAM,WAAW,EAAE,yDAAqD,CAAC,SAAS;EAClF,MAAM,eAAe,EAAE,gEAA0D,CAAC,SAAS;AAC3F,MAAI,YAAY,aACd,QAAO;EAIT,MAAM,SAAS,EAAE,6BAA2B,CAAC,KAAK,UAAU,EAAE,aAAa;AAC3E,MAAI,WAAW,aAAa,WAAW,UAAU,WAAW,OAC1D,QAAO;EAIT,MAAM,gBAAgB,EAAE,UAAU,CAAC,SAAS;EAC5C,MAAM,cAAc,EAAE,yDAAqD,CAAC,SAAS;AACrF,MAAI,iBAAiB,YACnB,QAAO;AAGT,SAAO;;;;;;;;;AChKX,MAAM,kBAAkB;CACtB;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACD;;;;AAKD,SAAgB,WAAW,KAAsB;AAC/C,KAAI;EACF,MAAM,SAAS,IAAI,IAAI,IAAI;AAC3B,SAAO,CAAC,SAAS,SAAS,CAAC,SAAS,OAAO,SAAS;SAC9C;AACN,SAAO;;;;;;AAOX,SAAgB,aAAa,KAAqB;AAChD,KAAI;EACF,MAAM,SAAS,IAAI,IAAI,IAAI;AAG3B,OAAK,MAAM,SAAS,gBAClB,QAAO,aAAa,OAAO,MAAM;EAInC,IAAI,aAAa,OAAO,UAAU;AAClC,MAAI,WAAW,SAAS,IAAI,IAAI,OAAO,aAAa,IAClD,cAAa,WAAW,MAAM,GAAG,GAAG;AAGtC,SAAO;SACD;AACN,SAAO;;;;;;AAOX,SAAgB,cAAc,KAAqB;AACjD,KAAI;AAEF,SADe,IAAI,IAAI,IAAI,CACb,SAAS,QAAQ,UAAU,GAAG;SACtC;AACN,SAAO;;;;;;AAOX,SAAgB,WAAW,KAAgC,SAAqC;AAC9F,KAAI,CAAC,IAAK,QAAO;AAEjB,KAAI;AACF,SAAO,IAAI,IAAI,KAAK,QAAQ,CAAC;SACvB;AACN,SAAO;;;;;;AAOX,SAAgB,cAAc,KAAa,YAA6B;AACtE,KAAI;AAGF,SAFe,IAAI,IAAI,IAAI,CACF,SAAS,QAAQ,UAAU,GAAG,KAClC;SACf;AACN,SAAO;;;;;;AAOX,SAAgB,YAAY,KAAqB;AAC/C,KAAI;AACF,SAAO,IAAI,IAAI,IAAI,CAAC;SACd;AACN,SAAO;;;;;;AAOX,SAAgB,QAAQ,KAAqB;AAC3C,KAAI;AACF,SAAO,IAAI,IAAI,IAAI,CAAC;SACd;AACN,SAAO;;;;;;AAOX,SAAgB,kBAAkB,KAAa,SAA0B;AACvE,KAAI,CAAC,QAAQ,SAAS,IAAI,CACxB,QAAO,QAAQ,WAAW,IAAI,WAAW,QAAQ;CAGnD,MAAM,eAAe,QAAQ,QAAQ,sBAAsB,OAAO,CAAC,QAAQ,OAAO,KAAK;AAEvF,yBAAO,IAAI,OAAO,IAAI,eAAe,EAAC,KAAK,IAAI;;;;;;;;;AC3HjD,IAAa,mBAAb,MAAmD;CACjD,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,GAAG,aAAa;AAgBxB,OAAK,MAAM,YAbc;GACvB;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,EAEwC;GACvC,MAAM,OAAO,EAAE,SAAS,CAAC,OAAO,CAAC,KAAK,OAAO;AAC7C,OAAI,KACF,QAAO,EACL,SAAS,WAAW,UAAU,KAAK,EACpC;;AAKL,MAAI;GACF,MAAM,MAAM,IAAI,IAAI,SAAS;AAC7B,UAAO,EACL,SAAS,GAAG,IAAI,SAAS,IAAI,IAAI,KAAK,eACvC;UACK;AACN,UAAO,EAAE;;;;;;;;;;;ACtCf,IAAa,kBAAb,MAAkD;CAChD,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,MAAM;EACd,MAAMG,SAAoC,EAAE;AAG5C,IAAE,uCAAqC,CAAC,MAAM,GAAG,OAAO;GACtD,MAAM,UAAU,EAAE,GAAG,CAAC,MAAM;AAC5B,OAAI,CAAC,QAAS;AAEd,OAAI;IACF,MAAM,SAAS,KAAK,MAAM,QAAQ;AAElC,QAAI,MAAM,QAAQ,OAAO,CACvB,QAAO,KAAK,GAAG,OAAO;aACb,OAAO,WAAW,YAAY,WAAW,KAClD,QAAO,KAAK,OAAO;WAEf;IAGR;AAEF,MAAI,OAAO,WAAW,EACpB,QAAO,EAAE;AAMX,SAAO;GACL;GACA,GAJe,KAAK,gBAAgB,OAAO;GAK5C;;CAGH,AAAQ,gBAAgB,QAAyD;EAC/E,MAAMC,SAA+B,EAAE;AAEvC,OAAK,MAAM,QAAQ,QAAQ;GACzB,MAAM,OAAO,KAAK,QAAQ,KAAK;AAG/B,OAAI,MAAM,MAAM,2CAA2C,EAAE;AAC3D,WAAO,QAAQ,OAAO,SAAS,KAAK,UAAU,MAAM,YAAY,OAAO;AACvE,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,cAAc;AAC9E,WAAO,SAAS,OAAO,UAAU,KAAK,UAAU,KAAK;AACrD,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,gBAAgB;AAChF,WAAO,aAAa,OAAO,cAAc,KAAK,UAAU,MAAM,eAAe;AAC7E,WAAO,QAAQ,OAAO,SAAS,KAAK,SAAS,KAAK;;AAIpD,OAAI,SAAS,eACX,QAAO,WAAW,OAAO,YAAY,KAAK,UAAU,MAAM,OAAO;AAInE,OAAI,SAAS,WAAW;AACtB,WAAO,QAAQ,OAAO,SAAS,KAAK,UAAU,MAAM,OAAO;AAC3D,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,cAAc;AAC9E,WAAO,QAAQ,OAAO,SAAS,KAAK,SAAS,KAAK;;AAIpD,OAAI,SAAS,uBAAuB;AAClC,WAAO,QAAQ,OAAO,SAAS,KAAK,UAAU,MAAM,OAAO;AAC3D,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,cAAc;;GAIhF,MAAM,WAAW,KAAK,YAAY,KAAK;AACvC,OAAI,SAAS,SAAS,EACpB,QAAO,WAAW,CAAC,GAAI,OAAO,YAAY,EAAE,EAAG,GAAG,SAAS;;AAK/D,MAAI,OAAO,SACT,QAAO,WAAW,CAAC,GAAG,IAAI,IAAI,OAAO,SAAS,CAAC;AAGjD,SAAO;;CAGT,AAAQ,QAAQ,MAAmD;EACjE,MAAM,OAAO,KAAK;AAClB,MAAI,OAAO,SAAS,SAAU,QAAO;AACrC,MAAI,MAAM,QAAQ,KAAK,CAAE,QAAO,KAAK;;CAIvC,AAAQ,UAAU,MAA+B,GAAG,MAAoC;AACtF,OAAK,MAAM,OAAO,MAAM;GACtB,MAAM,QAAQ,KAAK;AACnB,OAAI,OAAO,UAAU,SAAU,QAAO;AACtC,OAAI,OAAO,UAAU,YAAY,UAAU,QAAQ,YAAY,MAC7D,QAAO,OAAQ,MAAgC,UAAU;;;CAM/D,AAAQ,UAAU,MAAmD;EACnE,MAAM,SAAS,KAAK;AACpB,MAAI,OAAO,WAAW,SAAU,QAAO;AAEvC,MAAI,MAAM,QAAQ,OAAO,CAMvB,QALc,OACX,KAAK,MACJ,OAAO,MAAM,WAAW,IAAI,KAAK,UAAU,GAA8B,OAAO,CACjF,CACA,OAAO,QAAQ,CACL,KAAK,KAAK,IAAI;AAE7B,MAAI,OAAO,WAAW,YAAY,WAAW,MAAM;GACjD,MAAM,YAAY;AAClB,UAAO,KAAK,UAAU,WAAW,OAAO,IAAI;;;CAKhD,AAAQ,SAAS,MAAmD;EAClE,MAAM,QAAQ,KAAK;AACnB,MAAI,OAAO,UAAU,SAAU,QAAO;AAEtC,MAAI,MAAM,QAAQ,MAAM,IAAI,MAAM,SAAS,EACzC,QAAO,KAAK,SAAS,EAAE,OAAO,MAAM,IAAI,CAAC;AAE3C,MAAI,OAAO,UAAU,YAAY,UAAU,MAAM;GAC/C,MAAM,WAAW;AACjB,UAAO,KAAK,UAAU,UAAU,OAAO,aAAa,IAAI;;;CAK5D,AAAQ,YAAY,MAAyC;EAC3D,MAAM,WAAW,KAAK;AACtB,MAAI,OAAO,aAAa,SACtB,QAAO,SACJ,MAAM,IAAI,CACV,KAAK,MAAM,EAAE,MAAM,CAAC,CACpB,OAAO,QAAQ;AAEpB,MAAI,MAAM,QAAQ,SAAS,CACzB,QAAO,SAAS,QAAQ,MAAmB,OAAO,MAAM,SAAS;AAEnE,SAAO,EAAE;;;;;;;;;;ACrJb,IAAa,iBAAb,MAAiD;CAC/C,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,GAAG,aAAa;EACxB,MAAMC,QAAyB,EAAE;EACjC,MAAM,uBAAO,IAAI,KAAa;EAG9B,MAAM,cAAc,EAAE,iCAA+B,CAAC,OAAO;EAC7D,MAAM,YAAY,YAAY,SAAS,IAAI,cAAc,EAAE,OAAO;EAGlE,MAAM,gBACJ;AAEF,YAAU,KAAK,UAAU,CAAC,MAAM,GAAG,OAAO;GACxC,MAAM,MAAM,EAAE,GAAG;AAGjB,OAAI,IAAI,QAAQ,cAAc,CAAC,SAAS,EACtC;GAGF,MAAM,OAAO,IAAI,KAAK,OAAO;AAC7B,OAAI,CAAC,KAAM;AAGX,OACE,KAAK,WAAW,IAAI,IACpB,KAAK,WAAW,cAAc,IAC9B,KAAK,WAAW,UAAU,IAC1B,KAAK,WAAW,OAAO,CAEvB;GAIF,MAAM,cAAc,WAAW,MAAM,SAAS;AAC9C,OAAI,CAAC,eAAe,CAAC,WAAW,YAAY,CAAE;AAG9C,OAAI,KAAK,IAAI,YAAY,CAAE;AAC3B,QAAK,IAAI,YAAY;GAGrB,MAAM,OAAO,IAAI,MAAM,CAAC,MAAM,IAAI,IAAI,KAAK,QAAQ,IAAI,IAAI,KAAK,aAAa,IAAI;AAGjF,OAAI,KAAK,SAAS,EAAG;GAErB,MAAM,aAAa,cAAc,SAAS;AAC1C,SAAM,KAAK;IACT,KAAK;IACL,MAAM,KAAK,MAAM,GAAG,IAAI;IACxB,YAAY,cAAc,aAAa,WAAW;IACnD,CAAC;IACF;AAEF,SAAO,EACL,OAAO,MAAM,MAAM,GAAG,IAAI,EAC3B;;;;;;;;;;AC/DL,IAAa,gBAAb,MAAgD;CAC9C,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,MAAM;EAGd,MAAM,WAAW,mBAA+C;AAK9D,WAHE,EAAE,cAAc,eAAe,IAAI,CAAC,KAAK,UAAU,IACnD,EAAE,kBAAkB,eAAe,IAAI,CAAC,KAAK,UAAU,IACvD,EAAE,kBAAkB,eAAe,IAAI,CAAC,KAAK,UAAU,GAC3C,MAAM,IAAI;;EAI1B,MAAM,QACJ,QAAQ,WAAW,IAAI,QAAQ,gBAAgB,IAAI,EAAE,QAAQ,CAAC,OAAO,CAAC,MAAM,CAAC,MAAM,IAAI;EAGzF,MAAM,cACJ,QAAQ,iBAAiB,IAAI,QAAQ,sBAAsB,IAAI,QAAQ,cAAc,IAAI;EAG3F,MAAM,QACJ,QAAQ,WAAW,IAAI,QAAQ,gBAAgB,IAAI,QAAQ,oBAAoB,IAAI;EAGrF,MAAM,eACJ,EAAE,0BAAwB,CAAC,KAAK,OAAO,IAAI,QAAQ,SAAS,IAAI,QAAQ;EAG1E,MAAM,SACJ,QAAQ,SAAS,IACjB,QAAQ,iBAAiB,IACzB,QAAQ,kBAAkB,IAC1B,EAAE,mBAAiB,CAAC,OAAO,CAAC,MAAM,CAAC,MAAM,IACzC;EAGF,MAAM,WAAW,QAAQ,eAAe,IAAI,QAAQ,mBAAmB,IAAI;EAG3E,MAAM,cACJ,QAAQ,yBAAyB,IACjC,QAAQ,gBAAgB,IACxB,QAAQ,OAAO,IACf,EAAE,iBAAiB,CAAC,OAAO,CAAC,KAAK,WAAW,IAC5C;EAEF,MAAM,aAAa,QAAQ,wBAAwB,IAAI,QAAQ,eAAe,IAAI;EAGlF,MAAM,WACJ,EAAE,OAAO,CAAC,KAAK,OAAO,IAAI,QAAQ,YAAY,IAAI,QAAQ,WAAW,IAAI;EAG3E,MAAM,cAAc,QAAQ,WAAW,IAAI,QAAQ,cAAc,IAAI;AAQrE,SAAO;GACL;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA,UAjBe,cACb,YACG,MAAM,IAAI,CACV,KAAK,MAAM,EAAE,MAAM,CAAC,CACpB,OAAO,QAAQ,GAClB,EAAE;GAaL;;;;;;;;;;ACjEL,SAAgB,0BAAuC;AACrD,QAAO;EACL,IAAI,eAAe;EACnB,IAAI,iBAAiB;EACrB,IAAI,kBAAkB;EACtB,IAAI,kBAAkB;EACtB,IAAI,gBAAgB;EACrB;;;;;AAMH,SAAgB,eAAe,YAAsC;AACnE,QAAO,CAAC,GAAG,WAAW,CAAC,MAAM,GAAG,OAAO,EAAE,YAAY,MAAM,EAAE,YAAY,GAAG;;;;;;;;ACoB9E,MAAa,qBACX;;;;AAKF,MAAa,kBAAkB;;;;;;;;AC7C/B,IAAa,gBAAb,MAA8C;CAC5C,AAAS,OAAO;CAEhB,MAAM,MAAM,KAAa,UAAwB,EAAE,EAAwB;EACzE,MAAM,EAAE,UAAU,iBAAiB,YAAY,oBAAoB,UAAU,EAAE,KAAK;EAGpF,IAAIC;AACJ,MAAI;AACF,eAAY,IAAI,IAAI,IAAI;UAClB;AACN,SAAM,IAAIC,6BAAY,gBAAgB,OAAO,cAAc;;AAI7D,MAAI,CAAC,CAAC,SAAS,SAAS,CAAC,SAAS,UAAU,SAAS,CACnD,OAAM,IAAIA,6BAAY,qBAAqB,UAAU,YAAY,cAAc;EAIjF,MAAM,aAAa,IAAI,iBAAiB;EACxC,MAAM,YAAY,iBAAiB,WAAW,OAAO,EAAE,QAAQ;AAE/D,MAAI;GACF,MAAM,WAAW,MAAM,MAAM,KAAK;IAChC,QAAQ,WAAW;IACnB,SAAS;KACP,cAAc;KACd,QAAQ;KACR,mBAAmB;KACnB,GAAG;KACJ;IACD,UAAU;IACX,CAAC;AAEF,gBAAa,UAAU;AAGvB,OAAI,CAAC,SAAS,IAAI;AAChB,QAAI,SAAS,WAAW,IACtB,OAAM,IAAIA,6BAAY,mBAAmB,OAAO,aAAa,IAAI;AAEnE,QAAI,SAAS,WAAW,OAAO,SAAS,WAAW,IACjD,OAAM,IAAIA,6BAAY,mBAAmB,OAAO,WAAW,SAAS,OAAO;AAE7E,QAAI,SAAS,WAAW,IACtB,OAAM,IAAIA,6BAAY,iBAAiB,OAAO,WAAW,IAAI;AAE/D,UAAM,IAAIA,6BACR,cAAc,SAAS,OAAO,IAAI,OAClC,gBACA,SAAS,OACV;;GAGH,MAAM,cAAc,SAAS,QAAQ,IAAI,eAAe,IAAI;AAG5D,OAAI,CAAC,YAAY,SAAS,YAAY,IAAI,CAAC,YAAY,SAAS,oBAAoB,CAClF,OAAM,IAAIA,6BAAY,4BAA4B,eAAe,cAAc;GAGjF,MAAM,OAAO,MAAM,SAAS,MAAM;GAGlC,MAAMC,kBAA0C,EAAE;AAClD,YAAS,QAAQ,SAAS,OAAO,QAAQ;AACvC,oBAAgB,OAAO;KACvB;AAEF,UAAO;IACL;IACA,UAAU,SAAS;IACnB,YAAY,SAAS;IACrB;IACA,SAAS;IACV;WACM,OAAO;AACd,gBAAa,UAAU;AAGvB,OAAI,iBAAiBD,6BACnB,OAAM;AAIR,OAAI,iBAAiB,SAAS,MAAM,SAAS,aAC3C,OAAM,IAAIA,6BAAY,2BAA2B,QAAQ,KAAK,UAAU;AAI1E,OAAI,iBAAiB,MACnB,OAAM,IAAIA,6BAAY,iBAAiB,MAAM,WAAW,gBAAgB,QAAW,MAAM;AAG3F,SAAM,IAAIA,6BAAY,uBAAuB,eAAe;;;;;;;AAQlE,MAAa,iBAAiB,IAAI,eAAe;;;;;;;;;;;AC3FjD,eAAsB,eACpB,KACA,YAAoB,oBACQ;AAC5B,KAAI;EACF,MAAM,YAAY,IAAI,IAAI,IAAI;EAC9B,MAAM,YAAY,GAAG,UAAU,SAAS,IAAI,UAAU,KAAK;EAG3D,MAAM,WAAW,MAAM,MAAM,WAAW;GACtC,SAAS,EAAE,cAAc,WAAW;GACpC,QAAQ,YAAY,QAAQ,IAAK;GAClC,CAAC;AAGF,MAAI,CAAC,SAAS,GACZ,QAAO,EAAE,SAAS,MAAM;EAO1B,MAAM,UAAU,cAHF,eADI,MAAM,SAAS,MAAM,EACC,UAAU,EAErC,UAAU,WAAW,UAAU,OACF;AAE1C,SAAO;GACL;GACA,QAAQ,UAAU,SAAY;GAC/B;SACK;AAEN,SAAO,EAAE,SAAS,MAAM;;;;;;AAO5B,SAAS,eAAe,SAAiB,WAAgC;CACvE,MAAME,QAAqB;EAAE,UAAU,EAAE;EAAE,OAAO,EAAE;EAAE;CACtD,MAAM,QAAQ,QAAQ,MAAM,KAAK;CAGjC,MAAM,UAAU,UAAU,MAAM,QAAQ,CAAC,IAAI,aAAa,IAAI;CAE9D,IAAI,eAAe;CACnB,IAAI,kBAAkB;CACtB,IAAI,wBAAwB;AAE5B,MAAK,MAAM,WAAW,OAAO;EAC3B,MAAM,OAAO,QAAQ,MAAM;AAG3B,MAAI,CAAC,QAAQ,KAAK,WAAW,IAAI,CAC/B;EAIF,MAAM,aAAa,KAAK,QAAQ,IAAI;AACpC,MAAI,eAAe,GAAI;EAEvB,MAAM,YAAY,KAAK,MAAM,GAAG,WAAW,CAAC,MAAM,CAAC,aAAa;EAChE,MAAM,QAAQ,KAAK,MAAM,aAAa,EAAE,CAAC,MAAM;AAE/C,MAAI,cAAc,cAAc;AAC9B,kBAAe,MAAM,aAAa;AAElC,qBACE,iBAAiB,OAAO,iBAAiB,WAAW,QAAQ,SAAS,aAAa;AAGpF,OAAI,iBAAiB,OAAO,iBAAiB;AAC3C,4BAAwB;AAExB,UAAM,WAAW,EAAE;AACnB,UAAM,QAAQ,EAAE;;aAET,oBAAoB,CAAC,yBAAyB,iBAAiB,MACxE;OAAI,cAAc,cAAc,MAC9B,OAAM,SAAS,KAAK,MAAM;YACjB,cAAc,WAAW,MAClC,OAAM,MAAM,KAAK,MAAM;;;AAK7B,QAAO;;;;;AAMT,SAAS,cAAc,OAAoB,MAAuB;AAEhE,KAAI,MAAM,SAAS,WAAW,KAAK,MAAM,MAAM,WAAW,EACxD,QAAO;AAIT,MAAK,MAAM,WAAW,MAAM,MAC1B,KAAI,eAAe,MAAM,QAAQ,CAC/B,QAAO;AAKX,MAAK,MAAM,WAAW,MAAM,SAC1B,KAAI,eAAe,MAAM,QAAQ,CAC/B,QAAO;AAKX,QAAO;;;;;AAMT,SAAS,eAAe,MAAc,SAA0B;AAE9D,KAAI,CAAC,QAAS,QAAO;AAGrB,KAAI,QAAQ,SAAS,IAAI,CACvB,QAAO,KAAK,WAAW,QAAQ,MAAM,GAAG,GAAG,CAAC;AAI9C,KAAI,QAAQ,SAAS,IAAI,CACvB,QAAO,SAAS,QAAQ,MAAM,GAAG,GAAG;AAItC,KAAI,QAAQ,SAAS,IAAI,CAEvB,yBADc,IAAI,OAAO,IAAI,QAAQ,QAAQ,OAAO,KAAK,CAAC,QAAQ,OAAO,MAAM,CAAC,IAAI,EACvE,KAAK,KAAK;AAIzB,QAAO,KAAK,WAAW,QAAQ;;;;;;;;;;;;;;;;;;AChJjC,eAAsB,OAAO,KAAa,UAAyB,EAAE,EAAwB;CAC3F,MAAM,YAAY,KAAK,KAAK;AAG5B,KAAI,CAAC,WAAW,IAAI,CAClB,OAAM,IAAIC,6BAAY,wBAAwB,cAAc;CAI9D,MAAM,gBAAgB,aAAa,IAAI;AAGvC,KAAI,QAAQ,eAAe;EACzB,MAAM,eAAe,MAAM,eAAe,eAAe,QAAQ,UAAU;AAC3E,MAAI,CAAC,aAAa,QAChB,OAAM,IAAIA,6BACR,8BAA8B,aAAa,UAAU,gBACrD,iBACD;;CAML,MAAM,cAAc,OADJ,QAAQ,WAAW,gBACD,MAAM,eAAe;EACrD,SAAS,QAAQ;EACjB,WAAW,QAAQ;EACpB,CAAC;AAGF,OAAM,cAAc;CAGpB,IAAI,UAAU,wBACZ,eACA,YAAY,UACZ,YAAY,MACZ,QACD;CAGD,IAAIC;AACJ,KAAI,QAAQ,yBACV,cAAa,QAAQ,cAAc,EAAE;MAChC;EACL,MAAM,WAAW,yBAAyB;AAC1C,eAAa,QAAQ,aAAa,CAAC,GAAG,UAAU,GAAG,QAAQ,WAAW,GAAG;;AAI3E,cAAa,eAAe,WAAW;AAEvC,MAAK,MAAM,aAAa,WACtB,KAAI;EACF,MAAM,YAAY,MAAM,UAAU,QAAQ,QAAQ;AAClD,YAAU,aAAa,SAAS,UAAU;UACnC,OAAO;AAEd,UAAQ,MAAM,cAAc,UAAU,KAAK,YAAY,MAAM;AAE7D,YAAU,aAAa,SAAS,EAC9B,OAAO,QAAQ,QAAQ,QACnB,GAAG,QAAQ,QAAQ,MAAM,IAAI,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KACtG,GAAG,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,IACjF,CAAC;;CAKN,MAAMC,qBAAkC;EACtC,KAAK;EACL,cAAc,QAAQ,QAAQ,gBAAgB,YAAY;EAC1D,QAAQ,cAAc,YAAY,SAAS;EAC3C,OAAO,QAAQ,QAAQ,SAAS;EAChC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,OAAO,QAAQ,QAAQ;EACvB,SAAS,QAAQ,QAAQ;EACzB,SAAS,QAAQ,QAAQ,WAAW;EACpC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,SAAS,QAAQ,QAAQ,WAAW;EACpC,WAAW,QAAQ,QAAQ,aAAa;EACxC,QAAQ,QAAQ,QAAQ;EACxB,aAAa,QAAQ,QAAQ;EAC7B,YAAY,QAAQ,QAAQ;EAC5B,UAAU,QAAQ,QAAQ;EAC1B,UAAU,QAAQ,QAAQ;EAC1B,aAAa,QAAQ,QAAQ,eAAe;EAC5C,UAAU,QAAQ,QAAQ,YAAY,EAAE;EACxC,QAAQ,QAAQ,QAAQ;EACxB,OAAO,QAAQ,QAAQ;EACvB,QAAQ,QAAQ,QAAQ;EACxB,4BAAW,IAAI,MAAM,EAAC,aAAa;EACnC,cAAc;EACd,OAAO,QAAQ,QAAQ;EACxB;AAGD,KAAI,QAAQ,OAAO,QAAQ,WAAW,QAAQ,QAAQ,SAAS,EAC7D,KAAI;EACF,MAAM,WAAW,MAAMC,yBAAQ,oBAAoB,QAAQ,KAAK,QAAQ,QAAQ;AAChF,SAAO,OAAO,oBAAoB,SAAS;UACpC,OAAO;AACd,UAAQ,MAAM,2BAA2B,MAAM;AAC/C,qBAAmB,QAAQ,mBAAmB,QAC1C,GAAG,mBAAmB,MAAM,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KAC3F,QAAQ,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;;AAKtE,KAAI,QAAQ,OAAO,QAAQ,QACzB,KAAI;AAEF,qBAAmB,YADD,MAAMC,yBAAQ,oBAAoB,QAAQ,KAAK,QAAQ,QAAQ;UAE1E,OAAO;AACd,UAAQ,MAAM,0BAA0B,MAAM;AAC9C,qBAAmB,QAAQ,mBAAmB,QAC1C,GAAG,mBAAmB,MAAM,oBAAoB,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KACtG,mBAAmB,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;;CAKjF,MAAM,eAAe,KAAK,KAAK,GAAG;AAOlC,QAL4B;EAC1B,GAAG;EACH;EACD;;;;;;;;;;;;;;;;AAmBH,eAAsB,WACpB,MACA,KACA,UAAyB,EAAE,EACL;CACtB,MAAM,YAAY,KAAK,KAAK;AAG5B,KAAI,CAAC,WAAW,IAAI,CAClB,OAAM,IAAIJ,6BAAY,wBAAwB,cAAc;CAG9D,MAAM,gBAAgB,aAAa,IAAI;AAGvC,OAAM,cAAc;CAGpB,IAAI,UAAU,wBAAwB,eAAe,eAAe,MAAM,QAAQ;CAGlF,IAAIC;AACJ,KAAI,QAAQ,yBACV,cAAa,QAAQ,cAAc,EAAE;MAChC;EACL,MAAM,WAAW,yBAAyB;AAC1C,eAAa,QAAQ,aAAa,CAAC,GAAG,UAAU,GAAG,QAAQ,WAAW,GAAG;;AAI3E,cAAa,eAAe,WAAW;AAEvC,MAAK,MAAM,aAAa,WACtB,KAAI;EACF,MAAM,YAAY,MAAM,UAAU,QAAQ,QAAQ;AAClD,YAAU,aAAa,SAAS,UAAU;UACnC,OAAO;AACd,UAAQ,MAAM,cAAc,UAAU,KAAK,YAAY,MAAM;AAC7D,YAAU,aAAa,SAAS,EAC9B,OAAO,QAAQ,QAAQ,QACnB,GAAG,QAAQ,QAAQ,MAAM,IAAI,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KACtG,GAAG,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,IACjF,CAAC;;CAIN,MAAM,eAAe,KAAK,KAAK,GAAG;CAClC,MAAM,SAAS,cAAc,cAAc;AAiC3C,QA/B4B;EAC1B,KAAK;EACL,cAAc,QAAQ,QAAQ,gBAAgB;EAC9C;EACA,OAAO,QAAQ,QAAQ,SAAS;EAChC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,OAAO,QAAQ,QAAQ;EACvB,SAAS,QAAQ,QAAQ;EACzB,SAAS,QAAQ,QAAQ,WAAW;EACpC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,SAAS,QAAQ,QAAQ,WAAW;EACpC,WAAW,QAAQ,QAAQ,aAAa;EACxC,QAAQ,QAAQ,QAAQ;EACxB,aAAa,QAAQ,QAAQ;EAC7B,YAAY,QAAQ,QAAQ;EAC5B,UAAU,QAAQ,QAAQ;EAC1B,UAAU,QAAQ,QAAQ;EAC1B,aAAa,QAAQ,QAAQ,eAAe;EAC5C,UAAU,QAAQ,QAAQ,YAAY,EAAE;EACxC,QAAQ,QAAQ,QAAQ;EACxB,OAAO,QAAQ,QAAQ;EACvB,SAAS,QAAQ,QAAQ;EACzB,eAAe,QAAQ,QAAQ;EAC/B,UAAU,QAAQ,QAAQ;EAC1B,WAAW,QAAQ,QAAQ;EAC3B,QAAQ,QAAQ,QAAQ;EACxB,4BAAW,IAAI,MAAM,EAAC,aAAa;EACnC;EACA,OAAO,QAAQ,QAAQ;EACxB"}
|
package/dist/index.d.cts
ADDED
|
@@ -0,0 +1,264 @@
|
|
|
1
|
+
import { a as ExtractedLink, c as ExtractionSchemaType, d as FetchResult, f as Fetcher, h as ScrapedData, i as ExtractedEntities, l as Extractor, m as ScrapeOptions, n as ContentType, o as ExtractionContext, p as LLMProvider, r as EnhancementType, s as ExtractionSchema, t as CompletionOptions, u as FetchOptions } from "./types-D0HYR95H.cjs";
|
|
2
|
+
|
|
3
|
+
//#region src/core/context.d.ts
|
|
4
|
+
|
|
5
|
+
/**
|
|
6
|
+
* Create an extraction context with lazy JSDOM loading.
|
|
7
|
+
*
|
|
8
|
+
* Cheerio is always available for fast DOM queries.
|
|
9
|
+
* JSDOM is only loaded when getDocument() is called (for Readability).
|
|
10
|
+
*/
|
|
11
|
+
declare function createExtractionContext(url: string, finalUrl: string, html: string, options: ScrapeOptions): ExtractionContext;
|
|
12
|
+
/**
|
|
13
|
+
* Merge partial results into the context
|
|
14
|
+
*/
|
|
15
|
+
declare function mergeResults(context: ExtractionContext, extracted: Partial<ScrapedData>): ExtractionContext;
|
|
16
|
+
//#endregion
|
|
17
|
+
//#region src/core/errors.d.ts
|
|
18
|
+
/**
|
|
19
|
+
* Error codes for scraping failures
|
|
20
|
+
*/
|
|
21
|
+
type ScrapeErrorCode = 'FETCH_FAILED' | 'TIMEOUT' | 'INVALID_URL' | 'BLOCKED' | 'NOT_FOUND' | 'ROBOTS_BLOCKED' | 'PARSE_ERROR' | 'LLM_ERROR' | 'VALIDATION_ERROR';
|
|
22
|
+
/**
|
|
23
|
+
* Custom error class for scraping failures with structured error codes
|
|
24
|
+
*/
|
|
25
|
+
declare class ScrapeError extends Error {
|
|
26
|
+
readonly code: ScrapeErrorCode;
|
|
27
|
+
readonly statusCode?: number;
|
|
28
|
+
constructor(message: string, code: ScrapeErrorCode, statusCode?: number, cause?: Error);
|
|
29
|
+
/**
|
|
30
|
+
* Create a ScrapeError from an unknown error
|
|
31
|
+
*/
|
|
32
|
+
static from(error: unknown, code?: ScrapeErrorCode): ScrapeError;
|
|
33
|
+
/**
|
|
34
|
+
* Check if error is retryable (network issues, timeouts)
|
|
35
|
+
*/
|
|
36
|
+
isRetryable(): boolean;
|
|
37
|
+
/**
|
|
38
|
+
* Convert to a plain object for serialization
|
|
39
|
+
*/
|
|
40
|
+
toJSON(): Record<string, unknown>;
|
|
41
|
+
}
|
|
42
|
+
//#endregion
|
|
43
|
+
//#region src/core/scrape.d.ts
|
|
44
|
+
/**
|
|
45
|
+
* Scrape a URL and extract metadata and content.
|
|
46
|
+
*
|
|
47
|
+
* @param url - The URL to scrape
|
|
48
|
+
* @param options - Scraping options
|
|
49
|
+
* @returns Scraped data with metadata and content
|
|
50
|
+
*
|
|
51
|
+
* @example
|
|
52
|
+
* ```ts
|
|
53
|
+
* const result = await scrape('https://example.com/article');
|
|
54
|
+
* console.log(result.title, result.content);
|
|
55
|
+
* ```
|
|
56
|
+
*/
|
|
57
|
+
declare function scrape(url: string, options?: ScrapeOptions): Promise<ScrapedData>;
|
|
58
|
+
/**
|
|
59
|
+
* Scrape from raw HTML string (no fetch).
|
|
60
|
+
*
|
|
61
|
+
* @param html - The HTML content
|
|
62
|
+
* @param url - The URL (for resolving relative links)
|
|
63
|
+
* @param options - Scraping options
|
|
64
|
+
* @returns Scraped data with metadata and content
|
|
65
|
+
*
|
|
66
|
+
* @example
|
|
67
|
+
* ```ts
|
|
68
|
+
* const html = await fetchSomehow('https://example.com');
|
|
69
|
+
* const result = await scrapeHtml(html, 'https://example.com');
|
|
70
|
+
* ```
|
|
71
|
+
*/
|
|
72
|
+
declare function scrapeHtml(html: string, url: string, options?: ScrapeOptions): Promise<ScrapedData>;
|
|
73
|
+
//#endregion
|
|
74
|
+
//#region src/extractors/content.d.ts
|
|
75
|
+
/**
|
|
76
|
+
* Extracts main content using Mozilla Readability.
|
|
77
|
+
* Converts HTML to Markdown for LLM consumption.
|
|
78
|
+
*/
|
|
79
|
+
declare class ContentExtractor implements Extractor {
|
|
80
|
+
readonly name = "content";
|
|
81
|
+
readonly priority = 50;
|
|
82
|
+
extract(context: ExtractionContext): Promise<Partial<ScrapedData>>;
|
|
83
|
+
private extractFallback;
|
|
84
|
+
private createExcerpt;
|
|
85
|
+
private detectContentType;
|
|
86
|
+
}
|
|
87
|
+
//#endregion
|
|
88
|
+
//#region src/extractors/favicon.d.ts
|
|
89
|
+
/**
|
|
90
|
+
* Extracts favicon URL from the page.
|
|
91
|
+
* Checks multiple sources in order of preference.
|
|
92
|
+
*/
|
|
93
|
+
declare class FaviconExtractor implements Extractor {
|
|
94
|
+
readonly name = "favicon";
|
|
95
|
+
readonly priority = 70;
|
|
96
|
+
extract(context: ExtractionContext): Promise<Partial<ScrapedData>>;
|
|
97
|
+
}
|
|
98
|
+
//#endregion
|
|
99
|
+
//#region src/extractors/jsonld.d.ts
|
|
100
|
+
/**
|
|
101
|
+
* Extracts JSON-LD structured data from the page.
|
|
102
|
+
* Also extracts additional metadata from structured data.
|
|
103
|
+
*/
|
|
104
|
+
declare class JsonLdExtractor implements Extractor {
|
|
105
|
+
readonly name = "jsonld";
|
|
106
|
+
readonly priority = 80;
|
|
107
|
+
extract(context: ExtractionContext): Promise<Partial<ScrapedData>>;
|
|
108
|
+
private extractMetadata;
|
|
109
|
+
private getType;
|
|
110
|
+
private getString;
|
|
111
|
+
private getAuthor;
|
|
112
|
+
private getImage;
|
|
113
|
+
private getKeywords;
|
|
114
|
+
}
|
|
115
|
+
//#endregion
|
|
116
|
+
//#region src/extractors/links.d.ts
|
|
117
|
+
/**
|
|
118
|
+
* Extracts links from the page content.
|
|
119
|
+
* Filters out navigation/footer links and focuses on content links.
|
|
120
|
+
*/
|
|
121
|
+
declare class LinksExtractor implements Extractor {
|
|
122
|
+
readonly name = "links";
|
|
123
|
+
readonly priority = 30;
|
|
124
|
+
extract(context: ExtractionContext): Promise<Partial<ScrapedData>>;
|
|
125
|
+
}
|
|
126
|
+
//#endregion
|
|
127
|
+
//#region src/extractors/meta.d.ts
|
|
128
|
+
/**
|
|
129
|
+
* Extracts metadata from HTML meta tags, Open Graph, and Twitter cards.
|
|
130
|
+
* Runs first to provide basic metadata for other extractors.
|
|
131
|
+
*/
|
|
132
|
+
declare class MetaExtractor implements Extractor {
|
|
133
|
+
readonly name = "meta";
|
|
134
|
+
readonly priority = 100;
|
|
135
|
+
extract(context: ExtractionContext): Promise<Partial<ScrapedData>>;
|
|
136
|
+
}
|
|
137
|
+
//#endregion
|
|
138
|
+
//#region src/extractors/index.d.ts
|
|
139
|
+
/**
|
|
140
|
+
* Default extractors in priority order.
|
|
141
|
+
* Higher priority runs first.
|
|
142
|
+
*/
|
|
143
|
+
declare function createDefaultExtractors(): Extractor[];
|
|
144
|
+
/**
|
|
145
|
+
* Sort extractors by priority (higher first).
|
|
146
|
+
*/
|
|
147
|
+
declare function sortExtractors(extractors: Extractor[]): Extractor[];
|
|
148
|
+
//#endregion
|
|
149
|
+
//#region src/fetchers/types.d.ts
|
|
150
|
+
/**
|
|
151
|
+
* Fetcher interface - allows swapping fetch implementation
|
|
152
|
+
* for Puppeteer, Playwright, or custom solutions
|
|
153
|
+
*/
|
|
154
|
+
interface Fetcher$1 {
|
|
155
|
+
/**
|
|
156
|
+
* Fetch HTML from a URL
|
|
157
|
+
* @returns HTML content and final URL (after redirects)
|
|
158
|
+
*/
|
|
159
|
+
fetch(url: string, options?: FetchOptions$1): Promise<FetchResult$1>;
|
|
160
|
+
/** Fetcher name for logging */
|
|
161
|
+
readonly name: string;
|
|
162
|
+
}
|
|
163
|
+
/**
|
|
164
|
+
* Options for fetching
|
|
165
|
+
*/
|
|
166
|
+
interface FetchOptions$1 {
|
|
167
|
+
/** Timeout in milliseconds (default: 10000) */
|
|
168
|
+
timeout?: number;
|
|
169
|
+
/** User agent string */
|
|
170
|
+
userAgent?: string;
|
|
171
|
+
/** Additional headers to send */
|
|
172
|
+
headers?: Record<string, string>;
|
|
173
|
+
}
|
|
174
|
+
/**
|
|
175
|
+
* Result from fetching a URL
|
|
176
|
+
*/
|
|
177
|
+
interface FetchResult$1 {
|
|
178
|
+
/** Raw HTML content */
|
|
179
|
+
html: string;
|
|
180
|
+
/** Final URL after redirects */
|
|
181
|
+
finalUrl: string;
|
|
182
|
+
/** HTTP status code */
|
|
183
|
+
statusCode: number;
|
|
184
|
+
/** Content-Type header */
|
|
185
|
+
contentType: string;
|
|
186
|
+
/** Response headers (optional) */
|
|
187
|
+
headers?: Record<string, string>;
|
|
188
|
+
}
|
|
189
|
+
/**
|
|
190
|
+
* Default user agent string
|
|
191
|
+
*/
|
|
192
|
+
declare const DEFAULT_USER_AGENT = "Scrapex-Bot/2.0 (+https://github.com/developer-rakeshpaul/scrapex)";
|
|
193
|
+
/**
|
|
194
|
+
* Default timeout in milliseconds
|
|
195
|
+
*/
|
|
196
|
+
declare const DEFAULT_TIMEOUT = 10000;
|
|
197
|
+
//#endregion
|
|
198
|
+
//#region src/fetchers/fetch.d.ts
|
|
199
|
+
/**
|
|
200
|
+
* Default fetcher using native fetch API.
|
|
201
|
+
* Works in Node.js 18+ without polyfills.
|
|
202
|
+
*/
|
|
203
|
+
declare class NativeFetcher implements Fetcher$1 {
|
|
204
|
+
readonly name = "native-fetch";
|
|
205
|
+
fetch(url: string, options?: FetchOptions$1): Promise<FetchResult$1>;
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Default fetcher instance
|
|
209
|
+
*/
|
|
210
|
+
declare const defaultFetcher: NativeFetcher;
|
|
211
|
+
//#endregion
|
|
212
|
+
//#region src/fetchers/robots.d.ts
|
|
213
|
+
/**
|
|
214
|
+
* Result of robots.txt check
|
|
215
|
+
*/
|
|
216
|
+
interface RobotsCheckResult {
|
|
217
|
+
allowed: boolean;
|
|
218
|
+
reason?: string;
|
|
219
|
+
}
|
|
220
|
+
/**
|
|
221
|
+
* Check if URL is allowed by robots.txt
|
|
222
|
+
*
|
|
223
|
+
* @param url - The URL to check
|
|
224
|
+
* @param userAgent - User agent to check rules for
|
|
225
|
+
* @returns Whether the URL is allowed and optional reason
|
|
226
|
+
*/
|
|
227
|
+
declare function checkRobotsTxt(url: string, userAgent?: string): Promise<RobotsCheckResult>;
|
|
228
|
+
//#endregion
|
|
229
|
+
//#region src/utils/url.d.ts
|
|
230
|
+
/**
|
|
231
|
+
* Validate if a string is a valid URL
|
|
232
|
+
*/
|
|
233
|
+
declare function isValidUrl(url: string): boolean;
|
|
234
|
+
/**
|
|
235
|
+
* Normalize URL by removing tracking params and trailing slashes
|
|
236
|
+
*/
|
|
237
|
+
declare function normalizeUrl(url: string): string;
|
|
238
|
+
/**
|
|
239
|
+
* Extract domain from URL (without www prefix)
|
|
240
|
+
*/
|
|
241
|
+
declare function extractDomain(url: string): string;
|
|
242
|
+
/**
|
|
243
|
+
* Resolve a potentially relative URL against a base URL
|
|
244
|
+
*/
|
|
245
|
+
declare function resolveUrl(url: string | undefined | null, baseUrl: string): string | undefined;
|
|
246
|
+
/**
|
|
247
|
+
* Check if a URL is external relative to a domain
|
|
248
|
+
*/
|
|
249
|
+
declare function isExternalUrl(url: string, baseDomain: string): boolean;
|
|
250
|
+
/**
|
|
251
|
+
* Extract protocol from URL
|
|
252
|
+
*/
|
|
253
|
+
declare function getProtocol(url: string): string;
|
|
254
|
+
/**
|
|
255
|
+
* Get the path portion of a URL
|
|
256
|
+
*/
|
|
257
|
+
declare function getPath(url: string): string;
|
|
258
|
+
/**
|
|
259
|
+
* Check if URL matches a pattern (supports * wildcard)
|
|
260
|
+
*/
|
|
261
|
+
declare function matchesUrlPattern(url: string, pattern: string): boolean;
|
|
262
|
+
//#endregion
|
|
263
|
+
export { type CompletionOptions, ContentExtractor, type ContentType, DEFAULT_TIMEOUT, DEFAULT_USER_AGENT, type EnhancementType, type ExtractedEntities, type ExtractedLink, type ExtractionContext, type ExtractionSchema, type ExtractionSchemaType, type Extractor, FaviconExtractor, type FetchOptions, type FetchResult, type Fetcher, JsonLdExtractor, type LLMProvider, LinksExtractor, MetaExtractor, NativeFetcher, type RobotsCheckResult, ScrapeError, type ScrapeErrorCode, type ScrapeOptions, type ScrapedData, checkRobotsTxt, createDefaultExtractors, createExtractionContext, defaultFetcher, extractDomain, getPath, getProtocol, isExternalUrl, isValidUrl, matchesUrlPattern, mergeResults, normalizeUrl, resolveUrl, scrape, scrapeHtml, sortExtractors };
|
|
264
|
+
//# sourceMappingURL=index.d.cts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.cts","names":[],"sources":["../src/core/context.ts","../src/core/errors.ts","../src/core/scrape.ts","../src/extractors/content.ts","../src/extractors/favicon.ts","../src/extractors/jsonld.ts","../src/extractors/links.ts","../src/extractors/meta.ts","../src/extractors/index.ts","../src/fetchers/types.ts","../src/fetchers/fetch.ts","../src/fetchers/robots.ts","../src/utils/url.ts"],"sourcesContent":[],"mappings":";;;;;;;;;;iBAsBgB,uBAAA,uDAIL,gBACR;;ACxBH;AAcA;AACwB,iBDyCR,YAAA,CCzCQ,OAAA,ED0Cb,iBC1Ca,EAAA,SAAA,ED2CX,OC3CW,CD2CH,WC3CG,CAAA,CAAA,ED4CrB,iBC5CqB;;;;;;ADIR,KCnBJ,eAAA,GDmB2B,cAI5B,GAAA,SAAA,GACR,aAAA,GAAiB,SAAA,GAAA,WAAA,GAAA,gBAAA,GAAA,aAAA,GAAA,WAAA,GAAA,kBAAA;AAgCpB;;;AAEa,cC5CA,WAAA,SAAoB,KAAA,CD4CpB;EACV,SAAA,IAAA,EC5CqB,eD4CrB;EAAiB,SAAA,UAAA,CAAA,EAAA,MAAA;qCCzCiB,8CAA8C;;;AAlBnF;EAca,OAAA,IAAA,CAAA,KAAY,EAAA,OAAA,EAAA,IAAA,CAAA,EAmBW,eAnBX,CAAA,EAmB8C,WAnB9C;EACD;;;EAkBY,WAAA,CAAA,CAAA,EAAA,OAAA;EAAmC;;;EAnBjC,MAAA,CAAA,CAAA,EAyC1B,MAzC0B,CAAA,MAAA,EAAA,OAAA,CAAA;;;;;;ADKtC;AAqCA;;;;;;;;;ACxDA;AAca,iBCIS,MAAA,CDJG,GAAA,EAAA,MAAA,EAAA,OAAA,CAAA,ECI0B,aDJ1B,CAAA,ECI+C,ODJ/C,CCIuD,WDJvD,CAAA;;;;;;;;;;;;ACIzB;;;AAAwE,iBAmJlD,UAAA,CAnJkD,IAAA,EAAA,MAAA,EAAA,GAAA,EAAA,MAAA,EAAA,OAAA,CAAA,EAsJ7D,aAtJ6D,CAAA,EAuJrE,OAvJqE,CAuJ7D,WAvJ6D,CAAA;;;;;AFCxE;AAqCA;AACW,cGvCE,gBAAA,YAA4B,SHuC9B,CAAA;EACU,SAAA,IAAA,GAAA,SAAA;EAAR,SAAA,QAAA,GAAA,EAAA;EACV,OAAA,CAAA,OAAA,EGrCsB,iBHqCtB,CAAA,EGrC0C,OHqC1C,CGrCkD,OHqClD,CGrC0D,WHqC1D,CAAA,CAAA;EAAiB,QAAA,eAAA;;;;;;;;AAxCpB;AAqCA;AACW,cIrDE,gBAAA,YAA4B,SJqD9B,CAAA;EACU,SAAA,IAAA,GAAA,SAAA;EAAR,SAAA,QAAA,GAAA,EAAA;EACV,OAAA,CAAA,OAAA,EInDsB,iBJmDtB,CAAA,EInD0C,OJmD1C,CInDkD,OJmDlD,CInD0D,WJmD1D,CAAA,CAAA;;;;;;AAxCH;AAqCA;AACW,cKtDE,eAAA,YAA2B,SLsD7B,CAAA;EACU,SAAA,IAAA,GAAA,QAAA;EAAR,SAAA,QAAA,GAAA,EAAA;EACV,OAAA,CAAA,OAAA,EKpDsB,iBLoDtB,CAAA,EKpD0C,OLoD1C,CKpDkD,OLoDlD,CKpD0D,WLoD1D,CAAA,CAAA;EAAiB,QAAA,eAAA;;;;EC3DR,QAAA,QAAA;EAcC,QAAA,WAAY;;;;;;ADKzB;AAqCA;AACW,cMrDE,cAAA,YAA0B,SNqD5B,CAAA;EACU,SAAA,IAAA,GAAA,OAAA;EAAR,SAAA,QAAA,GAAA,EAAA;EACV,OAAA,CAAA,OAAA,EMnDsB,iBNmDtB,CAAA,EMnD0C,ONmD1C,CMnDkD,ONmDlD,CMnD0D,WNmD1D,CAAA,CAAA;;;;;;AAxCH;AAqCA;AACW,cOtDE,aAAA,YAAyB,SPsD3B,CAAA;EACU,SAAA,IAAA,GAAA,MAAA;EAAR,SAAA,QAAA,GAAA,GAAA;EACV,OAAA,CAAA,OAAA,EOpDsB,iBPoDtB,CAAA,EOpD0C,OPoD1C,COpDkD,OPoDlD,COpD0D,WPoD1D,CAAA,CAAA;;;;;;;;iBQ5Ca,uBAAA,CAAA,GAA2B;;;APf3C;AAca,iBOcG,cAAA,CPdS,UAAA,EOckB,SPdlB,EAAA,CAAA,EOcgC,SPdhC,EAAA;;;;;;ADKzB;AAqCgB,USvDC,SAAA,CTuDW;EACjB;;;;EAES,KAAA,CAAA,GAAA,EAAA,MAAA,EAAA,OAAA,CAAA,ESrDW,cTqDX,CAAA,ESrD0B,OTqD1B,CSrDkC,aTqDlC,CAAA;;;;AC3DpB;AAcA;;AAIqC,UQHpB,cAAA,CRGoB;EAA8C;EAe/C,OAAA,CAAA,EAAA,MAAA;EAAmC;EAsB3D,SAAA,CAAA,EAAA,MAAA;EAzCqB;EAAK,OAAA,CAAA,EQS1B,MRT0B,CAAA,MAAA,EAAA,MAAA,CAAA;;;;ACItC;AAAmD,UOWlC,aAAA,CPXkC;EAA6B;EAAR,IAAA,EAAA,MAAA;EAAO;EAmJzD,QAAA,EAAA,MAAU;EAGrB;EACA,UAAA,EAAA,MAAA;EAAR;EAAO,WAAA,EAAA,MAAA;;YO9HE;;ANzBZ;;;AAIqD,cM2BxC,kBAAA,GN3BwC,oEAAA;;;;cMiCxC,eAAA;;;;;ATpCb;AAqCA;AACW,cU/CE,aAAA,YAAyB,SV+C3B,CAAA;EACU,SAAA,IAAA,GAAA,cAAA;EAAR,KAAA,CAAA,GAAA,EAAA,MAAA,EAAA,OAAA,CAAA,EU7CuB,cV6CvB,CAAA,EU7C2C,OV6C3C,CU7CmD,aV6CnD,CAAA;;;;;cUuDA,gBAAc;;;;;;AV9FX,UWjBC,iBAAA,CXiBsB;EAqCvB,OAAA,EAAA,OAAY;EACjB,MAAA,CAAA,EAAA,MAAA;;;;;;;;ACzDX;AAca,iBUQS,cAAA,CVRG,GAAA,EAAA,MAAA,EAAA,SAAA,CAAA,EAAA,MAAA,CAAA,EUWtB,OVXsB,CUWd,iBVXc,CAAA;;;;;;ADKT,iBYKA,UAAA,CZLuB,GAAA,EAAA,MAI5B,CAAA,EAAA,OAAA;AAiCX;;;AAEa,iBYtBG,YAAA,CZsBH,GAAA,EAAA,MAAA,CAAA,EAAA,MAAA;;;;iBYEG,aAAA;;AX5DhB;AAcA;AACwB,iBWyDR,UAAA,CXzDQ,GAAA,EAAA,MAAA,GAAA,SAAA,GAAA,IAAA,EAAA,OAAA,EAAA,MAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;AAkB+C,iBWoDvD,aAAA,CXpDuD,GAAA,EAAA,MAAA,EAAA,UAAA,EAAA,MAAA,CAAA,EAAA,OAAA;;;;iBWiEvD,WAAA;;;AVhFhB;AAAmD,iBU2FnC,OAAA,CV3FmC,GAAA,EAAA,MAAA,CAAA,EAAA,MAAA;;;;AAmJ7B,iBU7CN,iBAAA,CV6CgB,GAAA,EAAA,MAAA,EAAA,OAAA,EAAA,MAAA,CAAA,EAAA,OAAA"}
|