scrapex 0.5.2 → 1.0.0-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +392 -145
- package/dist/enhancer-Q6CSc1gA.mjs +220 -0
- package/dist/enhancer-Q6CSc1gA.mjs.map +1 -0
- package/dist/enhancer-oM4BhYYS.cjs +268 -0
- package/dist/enhancer-oM4BhYYS.cjs.map +1 -0
- package/dist/index.cjs +852 -0
- package/dist/index.cjs.map +1 -0
- package/dist/index.d.cts +264 -0
- package/dist/index.d.cts.map +1 -0
- package/dist/index.d.mts +264 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +798 -0
- package/dist/index.mjs.map +1 -0
- package/dist/llm/index.cjs +316 -0
- package/dist/llm/index.cjs.map +1 -0
- package/dist/llm/index.d.cts +211 -0
- package/dist/llm/index.d.cts.map +1 -0
- package/dist/llm/index.d.mts +211 -0
- package/dist/llm/index.d.mts.map +1 -0
- package/dist/llm/index.mjs +310 -0
- package/dist/llm/index.mjs.map +1 -0
- package/dist/parsers/index.cjs +200 -0
- package/dist/parsers/index.cjs.map +1 -0
- package/dist/parsers/index.d.cts +133 -0
- package/dist/parsers/index.d.cts.map +1 -0
- package/dist/parsers/index.d.mts +133 -0
- package/dist/parsers/index.d.mts.map +1 -0
- package/dist/parsers/index.mjs +192 -0
- package/dist/parsers/index.mjs.map +1 -0
- package/dist/types-CNQZVW36.d.mts +150 -0
- package/dist/types-CNQZVW36.d.mts.map +1 -0
- package/dist/types-D0HYR95H.d.cts +150 -0
- package/dist/types-D0HYR95H.d.cts.map +1 -0
- package/package.json +80 -100
- package/dist/index.d.ts +0 -45
- package/dist/index.js +0 -8
- package/dist/scrapex.cjs.development.js +0 -1128
- package/dist/scrapex.cjs.development.js.map +0 -1
- package/dist/scrapex.cjs.production.min.js +0 -2
- package/dist/scrapex.cjs.production.min.js.map +0 -1
- package/dist/scrapex.esm.js +0 -1120
- package/dist/scrapex.esm.js.map +0 -1
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.mjs","names":["jsdomModule: typeof import('jsdom') | null","document: Document | null","jsonLd: Record<string, unknown>[]","result: Partial<ScrapedData>","links: ExtractedLink[]","parsedUrl: URL","responseHeaders: Record<string, string>","rules: RobotsRules","extractors: Extractor[]","intermediateResult: ScrapedData"],"sources":["../src/core/context.ts","../src/extractors/content.ts","../src/utils/url.ts","../src/extractors/favicon.ts","../src/extractors/jsonld.ts","../src/extractors/links.ts","../src/extractors/meta.ts","../src/extractors/index.ts","../src/fetchers/types.ts","../src/fetchers/fetch.ts","../src/fetchers/robots.ts","../src/core/scrape.ts"],"sourcesContent":["import type { CheerioAPI } from 'cheerio';\nimport * as cheerio from 'cheerio';\nimport type { ExtractionContext, ScrapedData, ScrapeOptions } from './types.js';\n\n// Cached JSDOM module for lazy loading\nlet jsdomModule: typeof import('jsdom') | null = null;\n\n/**\n * Preload JSDOM module (called once during scrape initialization)\n */\nexport async function preloadJsdom(): Promise<void> {\n if (!jsdomModule) {\n jsdomModule = await import('jsdom');\n }\n}\n\n/**\n * Create an extraction context with lazy JSDOM loading.\n *\n * Cheerio is always available for fast DOM queries.\n * JSDOM is only loaded when getDocument() is called (for Readability).\n */\nexport function createExtractionContext(\n url: string,\n finalUrl: string,\n html: string,\n options: ScrapeOptions\n): ExtractionContext {\n // Lazy-loaded JSDOM document\n let document: Document | null = null;\n\n // Parse HTML with Cheerio (fast, always available)\n const $: CheerioAPI = cheerio.load(html);\n\n return {\n url,\n finalUrl,\n html,\n $,\n options,\n results: {},\n\n getDocument(): Document {\n // Use preloaded JSDOM module\n if (!document) {\n if (!jsdomModule) {\n throw new Error('JSDOM not preloaded. Call preloadJsdom() before using getDocument().');\n }\n const dom = new jsdomModule.JSDOM(html, { url: finalUrl });\n document = dom.window.document;\n }\n return document;\n },\n };\n}\n\n/**\n * Merge partial results into the context\n */\nexport function mergeResults(\n context: ExtractionContext,\n extracted: Partial<ScrapedData>\n): ExtractionContext {\n return {\n ...context,\n results: {\n ...context.results,\n ...extracted,\n // Merge custom fields if both exist\n custom:\n extracted.custom || context.results.custom\n ? { ...context.results.custom, ...extracted.custom }\n : undefined,\n },\n };\n}\n","import { Readability } from '@mozilla/readability';\nimport TurndownService from 'turndown';\nimport type { ContentType, ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\n\n// Initialize Turndown with sensible defaults\nconst turndown = new TurndownService({\n headingStyle: 'atx',\n codeBlockStyle: 'fenced',\n bulletListMarker: '-',\n emDelimiter: '_',\n strongDelimiter: '**',\n linkStyle: 'inlined',\n});\n\n// Remove script, style, and other noise\nturndown.remove(['script', 'style', 'noscript', 'iframe', 'nav', 'footer']);\n\n/**\n * Extracts main content using Mozilla Readability.\n * Converts HTML to Markdown for LLM consumption.\n */\nexport class ContentExtractor implements Extractor {\n readonly name = 'content';\n readonly priority = 50; // Medium priority - runs after meta\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { options } = context;\n\n // Skip if content extraction is disabled\n if (options.extractContent === false) {\n return {};\n }\n\n // Use JSDOM for Readability (lazy-loaded)\n const document = context.getDocument();\n const clonedDoc = document.cloneNode(true) as Document;\n\n // Run Readability\n const reader = new Readability(clonedDoc);\n const article = reader.parse();\n\n if (!article || !article.content) {\n // Fallback: extract body text\n return this.extractFallback(context);\n }\n\n // Convert to markdown\n let content = turndown.turndown(article.content);\n\n // Truncate if needed\n const maxLength = options.maxContentLength ?? 50000;\n if (content.length > maxLength) {\n content = `${content.slice(0, maxLength)}\\n\\n[Content truncated...]`;\n }\n\n // Plain text content\n const textContent = (article.textContent ?? '').trim();\n\n // Create excerpt\n const excerpt = this.createExcerpt(textContent);\n\n // Word count\n const wordCount = textContent.split(/\\s+/).filter(Boolean).length;\n\n // Detect content type\n const contentType = this.detectContentType(context);\n\n return {\n content,\n textContent,\n excerpt: article.excerpt || excerpt,\n wordCount,\n contentType,\n // Readability may provide better values than meta tags\n title: article.title || undefined,\n author: article.byline || undefined,\n siteName: article.siteName || undefined,\n };\n }\n\n private extractFallback(context: ExtractionContext): Partial<ScrapedData> {\n const { $ } = context;\n\n // Try to get body content\n const bodyHtml = $('body').html() || '';\n const content = turndown.turndown(bodyHtml);\n const textContent = $('body').text().replace(/\\s+/g, ' ').trim();\n\n return {\n content: content.slice(0, context.options.maxContentLength ?? 50000),\n textContent,\n excerpt: this.createExcerpt(textContent),\n wordCount: textContent.split(/\\s+/).filter(Boolean).length,\n contentType: 'unknown',\n };\n }\n\n private createExcerpt(text: string, maxLength = 300): string {\n if (text.length <= maxLength) {\n return text;\n }\n // Try to break at word boundary\n const truncated = text.slice(0, maxLength);\n const lastSpace = truncated.lastIndexOf(' ');\n return `${lastSpace > 0 ? truncated.slice(0, lastSpace) : truncated}...`;\n }\n\n private detectContentType(context: ExtractionContext): ContentType {\n const { $, finalUrl } = context;\n const url = finalUrl.toLowerCase();\n\n // GitHub repo\n if (url.includes('github.com') && !url.includes('/blob/') && !url.includes('/issues/')) {\n const repoMeta = $('meta[property=\"og:type\"]').attr('content');\n if (repoMeta === 'object' || url.match(/github\\.com\\/[^/]+\\/[^/]+\\/?$/)) {\n return 'repo';\n }\n }\n\n // npm package\n if (url.includes('npmjs.com/package/')) {\n return 'package';\n }\n\n // PyPI package\n if (url.includes('pypi.org/project/')) {\n return 'package';\n }\n\n // Documentation sites\n if (\n url.includes('/docs/') ||\n url.includes('.readthedocs.') ||\n url.includes('/documentation/')\n ) {\n return 'docs';\n }\n\n // Video platforms\n if (url.includes('youtube.com') || url.includes('vimeo.com') || url.includes('youtu.be')) {\n return 'video';\n }\n\n // Product pages (heuristic)\n const hasPrice = $('[class*=\"price\"], [data-price], [itemprop=\"price\"]').length > 0;\n const hasAddToCart = $('[class*=\"cart\"], [class*=\"buy\"], button:contains(\"Add\")').length > 0;\n if (hasPrice || hasAddToCart) {\n return 'product';\n }\n\n // Article detection (Open Graph type)\n const ogType = $('meta[property=\"og:type\"]').attr('content')?.toLowerCase();\n if (ogType === 'article' || ogType === 'blog' || ogType === 'news') {\n return 'article';\n }\n\n // Article heuristics\n const hasArticleTag = $('article').length > 0;\n const hasDateline = $('time[datetime], [class*=\"date\"], [class*=\"byline\"]').length > 0;\n if (hasArticleTag && hasDateline) {\n return 'article';\n }\n\n return 'unknown';\n }\n}\n","/**\n * Common tracking parameters to remove from URLs\n */\nconst TRACKING_PARAMS = [\n 'utm_source',\n 'utm_medium',\n 'utm_campaign',\n 'utm_term',\n 'utm_content',\n 'utm_id',\n 'ref',\n 'fbclid',\n 'gclid',\n 'gclsrc',\n 'dclid',\n 'msclkid',\n 'mc_cid',\n 'mc_eid',\n '_ga',\n '_gl',\n 'source',\n 'referrer',\n];\n\n/**\n * Validate if a string is a valid URL\n */\nexport function isValidUrl(url: string): boolean {\n try {\n const parsed = new URL(url);\n return ['http:', 'https:'].includes(parsed.protocol);\n } catch {\n return false;\n }\n}\n\n/**\n * Normalize URL by removing tracking params and trailing slashes\n */\nexport function normalizeUrl(url: string): string {\n try {\n const parsed = new URL(url);\n\n // Remove common tracking parameters\n for (const param of TRACKING_PARAMS) {\n parsed.searchParams.delete(param);\n }\n\n // Remove trailing slash for consistency (except for root)\n let normalized = parsed.toString();\n if (normalized.endsWith('/') && parsed.pathname !== '/') {\n normalized = normalized.slice(0, -1);\n }\n\n return normalized;\n } catch {\n return url;\n }\n}\n\n/**\n * Extract domain from URL (without www prefix)\n */\nexport function extractDomain(url: string): string {\n try {\n const parsed = new URL(url);\n return parsed.hostname.replace(/^www\\./, '');\n } catch {\n return '';\n }\n}\n\n/**\n * Resolve a potentially relative URL against a base URL\n */\nexport function resolveUrl(url: string | undefined | null, baseUrl: string): string | undefined {\n if (!url) return undefined;\n\n try {\n return new URL(url, baseUrl).href;\n } catch {\n return url;\n }\n}\n\n/**\n * Check if a URL is external relative to a domain\n */\nexport function isExternalUrl(url: string, baseDomain: string): boolean {\n try {\n const parsed = new URL(url);\n const urlDomain = parsed.hostname.replace(/^www\\./, '');\n return urlDomain !== baseDomain;\n } catch {\n return false;\n }\n}\n\n/**\n * Extract protocol from URL\n */\nexport function getProtocol(url: string): string {\n try {\n return new URL(url).protocol;\n } catch {\n return '';\n }\n}\n\n/**\n * Get the path portion of a URL\n */\nexport function getPath(url: string): string {\n try {\n return new URL(url).pathname;\n } catch {\n return '';\n }\n}\n\n/**\n * Check if URL matches a pattern (supports * wildcard)\n */\nexport function matchesUrlPattern(url: string, pattern: string): boolean {\n if (!pattern.includes('*')) {\n return url === pattern || url.startsWith(pattern);\n }\n\n const regexPattern = pattern.replace(/[.+?^${}()|[\\]\\\\]/g, '\\\\$&').replace(/\\*/g, '.*');\n\n return new RegExp(`^${regexPattern}`).test(url);\n}\n","import type { ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\nimport { resolveUrl } from '@/utils/url.js';\n\n/**\n * Extracts favicon URL from the page.\n * Checks multiple sources in order of preference.\n */\nexport class FaviconExtractor implements Extractor {\n readonly name = 'favicon';\n readonly priority = 70;\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $, finalUrl } = context;\n\n // Check various favicon link relations in order of preference\n const faviconSelectors = [\n 'link[rel=\"icon\"][type=\"image/svg+xml\"]', // SVG (best quality)\n 'link[rel=\"icon\"][sizes=\"192x192\"]',\n 'link[rel=\"icon\"][sizes=\"180x180\"]',\n 'link[rel=\"icon\"][sizes=\"128x128\"]',\n 'link[rel=\"icon\"][sizes=\"96x96\"]',\n 'link[rel=\"apple-touch-icon\"][sizes=\"180x180\"]',\n 'link[rel=\"apple-touch-icon\"]',\n 'link[rel=\"icon\"][sizes=\"32x32\"]',\n 'link[rel=\"icon\"]',\n 'link[rel=\"shortcut icon\"]',\n ];\n\n for (const selector of faviconSelectors) {\n const href = $(selector).first().attr('href');\n if (href) {\n return {\n favicon: resolveUrl(finalUrl, href),\n };\n }\n }\n\n // Fallback: try /favicon.ico\n try {\n const url = new URL(finalUrl);\n return {\n favicon: `${url.protocol}//${url.host}/favicon.ico`,\n };\n } catch {\n return {};\n }\n }\n}\n","import type { ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\n\n/**\n * Extracts JSON-LD structured data from the page.\n * Also extracts additional metadata from structured data.\n */\nexport class JsonLdExtractor implements Extractor {\n readonly name = 'jsonld';\n readonly priority = 80; // After meta, before content\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $ } = context;\n const jsonLd: Record<string, unknown>[] = [];\n\n // Find all JSON-LD scripts\n $('script[type=\"application/ld+json\"]').each((_, el) => {\n const content = $(el).html();\n if (!content) return;\n\n try {\n const parsed = JSON.parse(content);\n // Handle both single objects and arrays\n if (Array.isArray(parsed)) {\n jsonLd.push(...parsed);\n } else if (typeof parsed === 'object' && parsed !== null) {\n jsonLd.push(parsed);\n }\n } catch {\n // Invalid JSON-LD, skip\n }\n });\n\n if (jsonLd.length === 0) {\n return {};\n }\n\n // Extract useful metadata from JSON-LD\n const metadata = this.extractMetadata(jsonLd);\n\n return {\n jsonLd,\n ...metadata,\n };\n }\n\n private extractMetadata(jsonLd: Record<string, unknown>[]): Partial<ScrapedData> {\n const result: Partial<ScrapedData> = {};\n\n for (const item of jsonLd) {\n const type = this.getType(item);\n\n // Extract from Article/BlogPosting/NewsArticle\n if (type?.match(/Article|BlogPosting|NewsArticle|WebPage/i)) {\n result.title = result.title || this.getString(item, 'headline', 'name');\n result.description = result.description || this.getString(item, 'description');\n result.author = result.author || this.getAuthor(item);\n result.publishedAt = result.publishedAt || this.getString(item, 'datePublished');\n result.modifiedAt = result.modifiedAt || this.getString(item, 'dateModified');\n result.image = result.image || this.getImage(item);\n }\n\n // Extract from Organization\n if (type === 'Organization') {\n result.siteName = result.siteName || this.getString(item, 'name');\n }\n\n // Extract from Product\n if (type === 'Product') {\n result.title = result.title || this.getString(item, 'name');\n result.description = result.description || this.getString(item, 'description');\n result.image = result.image || this.getImage(item);\n }\n\n // Extract from SoftwareApplication\n if (type === 'SoftwareApplication') {\n result.title = result.title || this.getString(item, 'name');\n result.description = result.description || this.getString(item, 'description');\n }\n\n // Extract keywords from any type\n const keywords = this.getKeywords(item);\n if (keywords.length > 0) {\n result.keywords = [...(result.keywords || []), ...keywords];\n }\n }\n\n // Deduplicate keywords\n if (result.keywords) {\n result.keywords = [...new Set(result.keywords)];\n }\n\n return result;\n }\n\n private getType(item: Record<string, unknown>): string | undefined {\n const type = item['@type'];\n if (typeof type === 'string') return type;\n if (Array.isArray(type)) return type[0] as string;\n return undefined;\n }\n\n private getString(item: Record<string, unknown>, ...keys: string[]): string | undefined {\n for (const key of keys) {\n const value = item[key];\n if (typeof value === 'string') return value;\n if (typeof value === 'object' && value !== null && '@value' in value) {\n return String((value as { '@value': unknown })['@value']);\n }\n }\n return undefined;\n }\n\n private getAuthor(item: Record<string, unknown>): string | undefined {\n const author = item.author;\n if (typeof author === 'string') return author;\n // Check array BEFORE object since typeof [] === 'object'\n if (Array.isArray(author)) {\n const names = author\n .map((a) =>\n typeof a === 'string' ? a : this.getString(a as Record<string, unknown>, 'name')\n )\n .filter(Boolean);\n return names.join(', ') || undefined;\n }\n if (typeof author === 'object' && author !== null) {\n const authorObj = author as Record<string, unknown>;\n return this.getString(authorObj, 'name') || undefined;\n }\n return undefined;\n }\n\n private getImage(item: Record<string, unknown>): string | undefined {\n const image = item.image;\n if (typeof image === 'string') return image;\n // Check array BEFORE object since typeof [] === 'object'\n if (Array.isArray(image) && image.length > 0) {\n return this.getImage({ image: image[0] });\n }\n if (typeof image === 'object' && image !== null) {\n const imageObj = image as Record<string, unknown>;\n return this.getString(imageObj, 'url', 'contentUrl') || undefined;\n }\n return undefined;\n }\n\n private getKeywords(item: Record<string, unknown>): string[] {\n const keywords = item.keywords;\n if (typeof keywords === 'string') {\n return keywords\n .split(',')\n .map((k) => k.trim())\n .filter(Boolean);\n }\n if (Array.isArray(keywords)) {\n return keywords.filter((k): k is string => typeof k === 'string');\n }\n return [];\n }\n}\n","import type { ExtractedLink, ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\nimport { extractDomain, isExternalUrl, isValidUrl, resolveUrl } from '@/utils/url.js';\n\n/**\n * Extracts links from the page content.\n * Filters out navigation/footer links and focuses on content links.\n */\nexport class LinksExtractor implements Extractor {\n readonly name = 'links';\n readonly priority = 30; // Runs last\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $, finalUrl } = context;\n const links: ExtractedLink[] = [];\n const seen = new Set<string>();\n\n // Extract links from main content area (article, main, or body)\n const contentArea = $('article, main, [role=\"main\"]').first();\n const container = contentArea.length > 0 ? contentArea : $('body');\n\n // Skip links in navigation, header, footer, sidebar\n const skipSelectors =\n 'nav, header, footer, aside, [role=\"navigation\"], [class*=\"nav\"], [class*=\"footer\"], [class*=\"header\"], [class*=\"sidebar\"], [class*=\"menu\"]';\n\n container.find('a[href]').each((_, el) => {\n const $el = $(el);\n\n // Skip if inside navigation/footer elements\n if ($el.closest(skipSelectors).length > 0) {\n return;\n }\n\n const href = $el.attr('href');\n if (!href) return;\n\n // Skip anchors, javascript, mailto, tel\n if (\n href.startsWith('#') ||\n href.startsWith('javascript:') ||\n href.startsWith('mailto:') ||\n href.startsWith('tel:')\n ) {\n return;\n }\n\n // Resolve relative URLs\n const resolvedUrl = resolveUrl(href, finalUrl);\n if (!resolvedUrl || !isValidUrl(resolvedUrl)) return;\n\n // Skip duplicates\n if (seen.has(resolvedUrl)) return;\n seen.add(resolvedUrl);\n\n // Get link text\n const text = $el.text().trim() || $el.attr('title') || $el.attr('aria-label') || '';\n\n // Skip empty or very short link text (likely icons)\n if (text.length < 2) return;\n\n const baseDomain = extractDomain(finalUrl);\n links.push({\n url: resolvedUrl,\n text: text.slice(0, 200), // Limit text length\n isExternal: isExternalUrl(resolvedUrl, baseDomain),\n });\n });\n\n return {\n links: links.slice(0, 100), // Limit to 100 links\n };\n }\n}\n","import type { ExtractionContext, Extractor, ScrapedData } from '@/core/types.js';\n\n/**\n * Extracts metadata from HTML meta tags, Open Graph, and Twitter cards.\n * Runs first to provide basic metadata for other extractors.\n */\nexport class MetaExtractor implements Extractor {\n readonly name = 'meta';\n readonly priority = 100; // High priority - runs first\n\n async extract(context: ExtractionContext): Promise<Partial<ScrapedData>> {\n const { $ } = context;\n\n // Helper to get meta content by name or property\n const getMeta = (nameOrProperty: string): string | undefined => {\n const value =\n $(`meta[name=\"${nameOrProperty}\"]`).attr('content') ||\n $(`meta[property=\"${nameOrProperty}\"]`).attr('content') ||\n $(`meta[itemprop=\"${nameOrProperty}\"]`).attr('content');\n return value?.trim() || undefined;\n };\n\n // Title (priority: og:title > twitter:title > <title>)\n const title =\n getMeta('og:title') || getMeta('twitter:title') || $('title').first().text().trim() || '';\n\n // Description (priority: og:description > twitter:description > meta description)\n const description =\n getMeta('og:description') || getMeta('twitter:description') || getMeta('description') || '';\n\n // Image (priority: og:image > twitter:image)\n const image =\n getMeta('og:image') || getMeta('twitter:image') || getMeta('twitter:image:src') || undefined;\n\n // Canonical URL\n const canonicalUrl =\n $('link[rel=\"canonical\"]').attr('href') || getMeta('og:url') || context.finalUrl;\n\n // Author\n const author =\n getMeta('author') ||\n getMeta('article:author') ||\n getMeta('twitter:creator') ||\n $('[rel=\"author\"]').first().text().trim() ||\n undefined;\n\n // Site name\n const siteName = getMeta('og:site_name') || getMeta('application-name') || undefined;\n\n // Published/Modified dates\n const publishedAt =\n getMeta('article:published_time') ||\n getMeta('datePublished') ||\n getMeta('date') ||\n $('time[datetime]').first().attr('datetime') ||\n undefined;\n\n const modifiedAt = getMeta('article:modified_time') || getMeta('dateModified') || undefined;\n\n // Language\n const language =\n $('html').attr('lang') || getMeta('og:locale') || getMeta('language') || undefined;\n\n // Keywords\n const keywordsRaw = getMeta('keywords') || getMeta('article:tag') || '';\n const keywords = keywordsRaw\n ? keywordsRaw\n .split(',')\n .map((k) => k.trim())\n .filter(Boolean)\n : [];\n\n return {\n title,\n description,\n image,\n canonicalUrl,\n author,\n siteName,\n publishedAt,\n modifiedAt,\n language,\n keywords,\n };\n }\n}\n","import type { Extractor } from '@/core/types.js';\nimport { ContentExtractor } from './content.js';\nimport { FaviconExtractor } from './favicon.js';\nimport { JsonLdExtractor } from './jsonld.js';\nimport { LinksExtractor } from './links.js';\nimport { MetaExtractor } from './meta.js';\n\nexport { ContentExtractor } from './content.js';\nexport { FaviconExtractor } from './favicon.js';\nexport { JsonLdExtractor } from './jsonld.js';\nexport { LinksExtractor } from './links.js';\n// Export all extractors\nexport { MetaExtractor } from './meta.js';\n\n/**\n * Default extractors in priority order.\n * Higher priority runs first.\n */\nexport function createDefaultExtractors(): Extractor[] {\n return [\n new MetaExtractor(), // priority: 100\n new JsonLdExtractor(), // priority: 80\n new FaviconExtractor(), // priority: 70\n new ContentExtractor(), // priority: 50\n new LinksExtractor(), // priority: 30\n ];\n}\n\n/**\n * Sort extractors by priority (higher first).\n */\nexport function sortExtractors(extractors: Extractor[]): Extractor[] {\n return [...extractors].sort((a, b) => (b.priority ?? 0) - (a.priority ?? 0));\n}\n","/**\n * Fetcher interface - allows swapping fetch implementation\n * for Puppeteer, Playwright, or custom solutions\n */\nexport interface Fetcher {\n /**\n * Fetch HTML from a URL\n * @returns HTML content and final URL (after redirects)\n */\n fetch(url: string, options?: FetchOptions): Promise<FetchResult>;\n\n /** Fetcher name for logging */\n readonly name: string;\n}\n\n/**\n * Options for fetching\n */\nexport interface FetchOptions {\n /** Timeout in milliseconds (default: 10000) */\n timeout?: number;\n\n /** User agent string */\n userAgent?: string;\n\n /** Additional headers to send */\n headers?: Record<string, string>;\n}\n\n/**\n * Result from fetching a URL\n */\nexport interface FetchResult {\n /** Raw HTML content */\n html: string;\n\n /** Final URL after redirects */\n finalUrl: string;\n\n /** HTTP status code */\n statusCode: number;\n\n /** Content-Type header */\n contentType: string;\n\n /** Response headers (optional) */\n headers?: Record<string, string>;\n}\n\n/**\n * Default user agent string\n */\nexport const DEFAULT_USER_AGENT =\n 'Scrapex-Bot/2.0 (+https://github.com/developer-rakeshpaul/scrapex)';\n\n/**\n * Default timeout in milliseconds\n */\nexport const DEFAULT_TIMEOUT = 10000;\n","import { ScrapeError } from '@/core/errors.js';\nimport {\n DEFAULT_TIMEOUT,\n DEFAULT_USER_AGENT,\n type Fetcher,\n type FetchOptions,\n type FetchResult,\n} from './types.js';\n\n/**\n * Default fetcher using native fetch API.\n * Works in Node.js 18+ without polyfills.\n */\nexport class NativeFetcher implements Fetcher {\n readonly name = 'native-fetch';\n\n async fetch(url: string, options: FetchOptions = {}): Promise<FetchResult> {\n const { timeout = DEFAULT_TIMEOUT, userAgent = DEFAULT_USER_AGENT, headers = {} } = options;\n\n // Validate URL\n let parsedUrl: URL;\n try {\n parsedUrl = new URL(url);\n } catch {\n throw new ScrapeError(`Invalid URL: ${url}`, 'INVALID_URL');\n }\n\n // Only allow http/https\n if (!['http:', 'https:'].includes(parsedUrl.protocol)) {\n throw new ScrapeError(`Invalid protocol: ${parsedUrl.protocol}`, 'INVALID_URL');\n }\n\n // Setup abort controller for timeout\n const controller = new AbortController();\n const timeoutId = setTimeout(() => controller.abort(), timeout);\n\n try {\n const response = await fetch(url, {\n signal: controller.signal,\n headers: {\n 'User-Agent': userAgent,\n Accept: 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n ...headers,\n },\n redirect: 'follow',\n });\n\n clearTimeout(timeoutId);\n\n // Handle error status codes\n if (!response.ok) {\n if (response.status === 404) {\n throw new ScrapeError(`Page not found: ${url}`, 'NOT_FOUND', 404);\n }\n if (response.status === 403 || response.status === 401) {\n throw new ScrapeError(`Access blocked: ${url}`, 'BLOCKED', response.status);\n }\n if (response.status === 429) {\n throw new ScrapeError(`Rate limited: ${url}`, 'BLOCKED', 429);\n }\n throw new ScrapeError(\n `HTTP error ${response.status}: ${url}`,\n 'FETCH_FAILED',\n response.status\n );\n }\n\n const contentType = response.headers.get('content-type') || '';\n\n // Ensure we're getting HTML\n if (!contentType.includes('text/html') && !contentType.includes('application/xhtml')) {\n throw new ScrapeError(`Unexpected content type: ${contentType}`, 'PARSE_ERROR');\n }\n\n const html = await response.text();\n\n // Convert headers to plain object\n const responseHeaders: Record<string, string> = {};\n response.headers.forEach((value, key) => {\n responseHeaders[key] = value;\n });\n\n return {\n html,\n finalUrl: response.url,\n statusCode: response.status,\n contentType,\n headers: responseHeaders,\n };\n } catch (error) {\n clearTimeout(timeoutId);\n\n // Re-throw ScrapeErrors\n if (error instanceof ScrapeError) {\n throw error;\n }\n\n // Handle abort (timeout)\n if (error instanceof Error && error.name === 'AbortError') {\n throw new ScrapeError(`Request timed out after ${timeout}ms`, 'TIMEOUT');\n }\n\n // Handle other errors\n if (error instanceof Error) {\n throw new ScrapeError(`Fetch failed: ${error.message}`, 'FETCH_FAILED', undefined, error);\n }\n\n throw new ScrapeError('Unknown fetch error', 'FETCH_FAILED');\n }\n }\n}\n\n/**\n * Default fetcher instance\n */\nexport const defaultFetcher = new NativeFetcher();\n","import { DEFAULT_USER_AGENT } from './types.js';\n\n/**\n * Result of robots.txt check\n */\nexport interface RobotsCheckResult {\n allowed: boolean;\n reason?: string;\n}\n\n/**\n * Parsed robots.txt rules\n */\ninterface RobotsRules {\n disallow: string[];\n allow: string[];\n}\n\n/**\n * Check if URL is allowed by robots.txt\n *\n * @param url - The URL to check\n * @param userAgent - User agent to check rules for\n * @returns Whether the URL is allowed and optional reason\n */\nexport async function checkRobotsTxt(\n url: string,\n userAgent: string = DEFAULT_USER_AGENT\n): Promise<RobotsCheckResult> {\n try {\n const parsedUrl = new URL(url);\n const robotsUrl = `${parsedUrl.protocol}//${parsedUrl.host}/robots.txt`;\n\n // Fetch robots.txt with short timeout\n const response = await fetch(robotsUrl, {\n headers: { 'User-Agent': userAgent },\n signal: AbortSignal.timeout(5000),\n });\n\n // No robots.txt = allowed\n if (!response.ok) {\n return { allowed: true };\n }\n\n const robotsTxt = await response.text();\n const rules = parseRobotsTxt(robotsTxt, userAgent);\n\n const path = parsedUrl.pathname + parsedUrl.search;\n const allowed = isPathAllowed(rules, path);\n\n return {\n allowed,\n reason: allowed ? undefined : 'Blocked by robots.txt',\n };\n } catch {\n // On error (timeout, network issue), assume allowed\n return { allowed: true };\n }\n}\n\n/**\n * Parse robots.txt content for a specific user agent\n */\nfunction parseRobotsTxt(content: string, userAgent: string): RobotsRules {\n const rules: RobotsRules = { disallow: [], allow: [] };\n const lines = content.split('\\n');\n\n // Extract the bot name from user agent (first word or before /)\n const botName = userAgent.split(/[\\s/]/)[0]?.toLowerCase() || '';\n\n let currentAgent = '';\n let isMatchingAgent = false;\n let hasFoundSpecificAgent = false;\n\n for (const rawLine of lines) {\n const line = rawLine.trim();\n\n // Skip empty lines and comments\n if (!line || line.startsWith('#')) {\n continue;\n }\n\n // Parse directive\n const colonIndex = line.indexOf(':');\n if (colonIndex === -1) continue;\n\n const directive = line.slice(0, colonIndex).trim().toLowerCase();\n const value = line.slice(colonIndex + 1).trim();\n\n if (directive === 'user-agent') {\n currentAgent = value.toLowerCase();\n // Check if this agent applies to us\n isMatchingAgent =\n currentAgent === '*' || currentAgent === botName || botName.includes(currentAgent);\n\n // Prefer specific agent rules over wildcard\n if (currentAgent !== '*' && isMatchingAgent) {\n hasFoundSpecificAgent = true;\n // Reset rules if we found a more specific match\n rules.disallow = [];\n rules.allow = [];\n }\n } else if (isMatchingAgent && (!hasFoundSpecificAgent || currentAgent !== '*')) {\n if (directive === 'disallow' && value) {\n rules.disallow.push(value);\n } else if (directive === 'allow' && value) {\n rules.allow.push(value);\n }\n }\n }\n\n return rules;\n}\n\n/**\n * Check if a path is allowed based on robots.txt rules\n */\nfunction isPathAllowed(rules: RobotsRules, path: string): boolean {\n // No rules = allowed\n if (rules.disallow.length === 0 && rules.allow.length === 0) {\n return true;\n }\n\n // Check allow rules first (they take precedence for more specific matches)\n for (const pattern of rules.allow) {\n if (matchesPattern(path, pattern)) {\n return true;\n }\n }\n\n // Check disallow rules\n for (const pattern of rules.disallow) {\n if (matchesPattern(path, pattern)) {\n return false;\n }\n }\n\n // Default: allowed\n return true;\n}\n\n/**\n * Check if a path matches a robots.txt pattern\n */\nfunction matchesPattern(path: string, pattern: string): boolean {\n // Empty pattern matches nothing\n if (!pattern) return false;\n\n // Handle wildcard at end\n if (pattern.endsWith('*')) {\n return path.startsWith(pattern.slice(0, -1));\n }\n\n // Handle $ anchor\n if (pattern.endsWith('$')) {\n return path === pattern.slice(0, -1);\n }\n\n // Handle wildcards in middle\n if (pattern.includes('*')) {\n const regex = new RegExp(`^${pattern.replace(/\\*/g, '.*').replace(/\\?/g, '\\\\?')}.*`);\n return regex.test(path);\n }\n\n // Simple prefix match\n return path.startsWith(pattern);\n}\n","import { createDefaultExtractors, sortExtractors } from '@/extractors/index.js';\nimport { checkRobotsTxt, defaultFetcher } from '@/fetchers/index.js';\nimport { enhance, extract } from '@/llm/enhancer.js';\nimport { extractDomain, isValidUrl, normalizeUrl } from '@/utils/url.js';\nimport { createExtractionContext, mergeResults, preloadJsdom } from './context.js';\nimport { ScrapeError } from './errors.js';\nimport type { Extractor, ScrapedData, ScrapeOptions } from './types.js';\n\n/**\n * Scrape a URL and extract metadata and content.\n *\n * @param url - The URL to scrape\n * @param options - Scraping options\n * @returns Scraped data with metadata and content\n *\n * @example\n * ```ts\n * const result = await scrape('https://example.com/article');\n * console.log(result.title, result.content);\n * ```\n */\nexport async function scrape(url: string, options: ScrapeOptions = {}): Promise<ScrapedData> {\n const startTime = Date.now();\n\n // Validate URL\n if (!isValidUrl(url)) {\n throw new ScrapeError('Invalid URL provided', 'INVALID_URL');\n }\n\n // Normalize URL\n const normalizedUrl = normalizeUrl(url);\n\n // Check robots.txt if requested\n if (options.respectRobots) {\n const robotsResult = await checkRobotsTxt(normalizedUrl, options.userAgent);\n if (!robotsResult.allowed) {\n throw new ScrapeError(\n `URL blocked by robots.txt: ${robotsResult.reason || 'disallowed'}`,\n 'ROBOTS_BLOCKED'\n );\n }\n }\n\n // Fetch the page\n const fetcher = options.fetcher ?? defaultFetcher;\n const fetchResult = await fetcher.fetch(normalizedUrl, {\n timeout: options.timeout,\n userAgent: options.userAgent,\n });\n\n // Preload JSDOM for content extraction (async dynamic import)\n await preloadJsdom();\n\n // Create extraction context\n let context = createExtractionContext(\n normalizedUrl,\n fetchResult.finalUrl,\n fetchResult.html,\n options\n );\n\n // Prepare extractors\n let extractors: Extractor[];\n if (options.replaceDefaultExtractors) {\n extractors = options.extractors ?? [];\n } else {\n const defaults = createDefaultExtractors();\n extractors = options.extractors ? [...defaults, ...options.extractors] : defaults;\n }\n\n // Sort by priority and run extractors\n extractors = sortExtractors(extractors);\n\n for (const extractor of extractors) {\n try {\n const extracted = await extractor.extract(context);\n context = mergeResults(context, extracted);\n } catch (error) {\n // Log error but continue with other extractors\n console.error(`Extractor \"${extractor.name}\" failed:`, error);\n // Store error in results\n context = mergeResults(context, {\n error: context.results.error\n ? `${context.results.error}; ${extractor.name}: ${error instanceof Error ? error.message : String(error)}`\n : `${extractor.name}: ${error instanceof Error ? error.message : String(error)}`,\n });\n }\n }\n\n // Build intermediate result for LLM enhancement\n const intermediateResult: ScrapedData = {\n url: normalizedUrl,\n canonicalUrl: context.results.canonicalUrl || fetchResult.finalUrl,\n domain: extractDomain(fetchResult.finalUrl),\n title: context.results.title || '',\n description: context.results.description || '',\n image: context.results.image,\n favicon: context.results.favicon,\n content: context.results.content || '',\n textContent: context.results.textContent || '',\n excerpt: context.results.excerpt || '',\n wordCount: context.results.wordCount || 0,\n author: context.results.author,\n publishedAt: context.results.publishedAt,\n modifiedAt: context.results.modifiedAt,\n siteName: context.results.siteName,\n language: context.results.language,\n contentType: context.results.contentType || 'unknown',\n keywords: context.results.keywords || [],\n jsonLd: context.results.jsonLd,\n links: context.results.links,\n custom: context.results.custom,\n scrapedAt: new Date().toISOString(),\n scrapeTimeMs: 0,\n error: context.results.error,\n };\n\n // LLM Enhancement\n if (options.llm && options.enhance && options.enhance.length > 0) {\n try {\n const enhanced = await enhance(intermediateResult, options.llm, options.enhance);\n Object.assign(intermediateResult, enhanced);\n } catch (error) {\n console.error('LLM enhancement failed:', error);\n intermediateResult.error = intermediateResult.error\n ? `${intermediateResult.error}; LLM: ${error instanceof Error ? error.message : String(error)}`\n : `LLM: ${error instanceof Error ? error.message : String(error)}`;\n }\n }\n\n // LLM Extraction\n if (options.llm && options.extract) {\n try {\n const extracted = await extract(intermediateResult, options.llm, options.extract);\n intermediateResult.extracted = extracted as Record<string, unknown>;\n } catch (error) {\n console.error('LLM extraction failed:', error);\n intermediateResult.error = intermediateResult.error\n ? `${intermediateResult.error}; LLM extraction: ${error instanceof Error ? error.message : String(error)}`\n : `LLM extraction: ${error instanceof Error ? error.message : String(error)}`;\n }\n }\n\n // Build final result with timing\n const scrapeTimeMs = Date.now() - startTime;\n\n const result: ScrapedData = {\n ...intermediateResult,\n scrapeTimeMs,\n };\n\n return result;\n}\n\n/**\n * Scrape from raw HTML string (no fetch).\n *\n * @param html - The HTML content\n * @param url - The URL (for resolving relative links)\n * @param options - Scraping options\n * @returns Scraped data with metadata and content\n *\n * @example\n * ```ts\n * const html = await fetchSomehow('https://example.com');\n * const result = await scrapeHtml(html, 'https://example.com');\n * ```\n */\nexport async function scrapeHtml(\n html: string,\n url: string,\n options: ScrapeOptions = {}\n): Promise<ScrapedData> {\n const startTime = Date.now();\n\n // Validate URL\n if (!isValidUrl(url)) {\n throw new ScrapeError('Invalid URL provided', 'INVALID_URL');\n }\n\n const normalizedUrl = normalizeUrl(url);\n\n // Preload JSDOM for content extraction (async dynamic import)\n await preloadJsdom();\n\n // Create extraction context\n let context = createExtractionContext(normalizedUrl, normalizedUrl, html, options);\n\n // Prepare extractors\n let extractors: Extractor[];\n if (options.replaceDefaultExtractors) {\n extractors = options.extractors ?? [];\n } else {\n const defaults = createDefaultExtractors();\n extractors = options.extractors ? [...defaults, ...options.extractors] : defaults;\n }\n\n // Sort by priority and run extractors\n extractors = sortExtractors(extractors);\n\n for (const extractor of extractors) {\n try {\n const extracted = await extractor.extract(context);\n context = mergeResults(context, extracted);\n } catch (error) {\n console.error(`Extractor \"${extractor.name}\" failed:`, error);\n context = mergeResults(context, {\n error: context.results.error\n ? `${context.results.error}; ${extractor.name}: ${error instanceof Error ? error.message : String(error)}`\n : `${extractor.name}: ${error instanceof Error ? error.message : String(error)}`,\n });\n }\n }\n\n const scrapeTimeMs = Date.now() - startTime;\n const domain = extractDomain(normalizedUrl);\n\n const result: ScrapedData = {\n url: normalizedUrl,\n canonicalUrl: context.results.canonicalUrl || normalizedUrl,\n domain,\n title: context.results.title || '',\n description: context.results.description || '',\n image: context.results.image,\n favicon: context.results.favicon,\n content: context.results.content || '',\n textContent: context.results.textContent || '',\n excerpt: context.results.excerpt || '',\n wordCount: context.results.wordCount || 0,\n author: context.results.author,\n publishedAt: context.results.publishedAt,\n modifiedAt: context.results.modifiedAt,\n siteName: context.results.siteName,\n language: context.results.language,\n contentType: context.results.contentType || 'unknown',\n keywords: context.results.keywords || [],\n jsonLd: context.results.jsonLd,\n links: context.results.links,\n summary: context.results.summary,\n suggestedTags: context.results.suggestedTags,\n entities: context.results.entities,\n extracted: context.results.extracted,\n custom: context.results.custom,\n scrapedAt: new Date().toISOString(),\n scrapeTimeMs,\n error: context.results.error,\n };\n\n return result;\n}\n"],"mappings":";;;;;;AAKA,IAAIA,cAA6C;;;;AAKjD,eAAsB,eAA8B;AAClD,KAAI,CAAC,YACH,eAAc,MAAM,OAAO;;;;;;;;AAU/B,SAAgB,wBACd,KACA,UACA,MACA,SACmB;CAEnB,IAAIC,WAA4B;AAKhC,QAAO;EACL;EACA;EACA;EACA,GANoB,QAAQ,KAAK,KAAK;EAOtC;EACA,SAAS,EAAE;EAEX,cAAwB;AAEtB,OAAI,CAAC,UAAU;AACb,QAAI,CAAC,YACH,OAAM,IAAI,MAAM,uEAAuE;AAGzF,eADY,IAAI,YAAY,MAAM,MAAM,EAAE,KAAK,UAAU,CAAC,CAC3C,OAAO;;AAExB,UAAO;;EAEV;;;;;AAMH,SAAgB,aACd,SACA,WACmB;AACnB,QAAO;EACL,GAAG;EACH,SAAS;GACP,GAAG,QAAQ;GACX,GAAG;GAEH,QACE,UAAU,UAAU,QAAQ,QAAQ,SAChC;IAAE,GAAG,QAAQ,QAAQ;IAAQ,GAAG,UAAU;IAAQ,GAClD;GACP;EACF;;;;;ACrEH,MAAM,WAAW,IAAI,gBAAgB;CACnC,cAAc;CACd,gBAAgB;CAChB,kBAAkB;CAClB,aAAa;CACb,iBAAiB;CACjB,WAAW;CACZ,CAAC;AAGF,SAAS,OAAO;CAAC;CAAU;CAAS;CAAY;CAAU;CAAO;CAAS,CAAC;;;;;AAM3E,IAAa,mBAAb,MAAmD;CACjD,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,YAAY;AAGpB,MAAI,QAAQ,mBAAmB,MAC7B,QAAO,EAAE;EASX,MAAM,UADS,IAAI,YAJF,QAAQ,aAAa,CACX,UAAU,KAAK,CAGD,CAClB,OAAO;AAE9B,MAAI,CAAC,WAAW,CAAC,QAAQ,QAEvB,QAAO,KAAK,gBAAgB,QAAQ;EAItC,IAAI,UAAU,SAAS,SAAS,QAAQ,QAAQ;EAGhD,MAAM,YAAY,QAAQ,oBAAoB;AAC9C,MAAI,QAAQ,SAAS,UACnB,WAAU,GAAG,QAAQ,MAAM,GAAG,UAAU,CAAC;EAI3C,MAAM,eAAe,QAAQ,eAAe,IAAI,MAAM;EAGtD,MAAM,UAAU,KAAK,cAAc,YAAY;EAG/C,MAAM,YAAY,YAAY,MAAM,MAAM,CAAC,OAAO,QAAQ,CAAC;EAG3D,MAAM,cAAc,KAAK,kBAAkB,QAAQ;AAEnD,SAAO;GACL;GACA;GACA,SAAS,QAAQ,WAAW;GAC5B;GACA;GAEA,OAAO,QAAQ,SAAS;GACxB,QAAQ,QAAQ,UAAU;GAC1B,UAAU,QAAQ,YAAY;GAC/B;;CAGH,AAAQ,gBAAgB,SAAkD;EACxE,MAAM,EAAE,MAAM;EAGd,MAAM,WAAW,EAAE,OAAO,CAAC,MAAM,IAAI;EACrC,MAAM,UAAU,SAAS,SAAS,SAAS;EAC3C,MAAM,cAAc,EAAE,OAAO,CAAC,MAAM,CAAC,QAAQ,QAAQ,IAAI,CAAC,MAAM;AAEhE,SAAO;GACL,SAAS,QAAQ,MAAM,GAAG,QAAQ,QAAQ,oBAAoB,IAAM;GACpE;GACA,SAAS,KAAK,cAAc,YAAY;GACxC,WAAW,YAAY,MAAM,MAAM,CAAC,OAAO,QAAQ,CAAC;GACpD,aAAa;GACd;;CAGH,AAAQ,cAAc,MAAc,YAAY,KAAa;AAC3D,MAAI,KAAK,UAAU,UACjB,QAAO;EAGT,MAAM,YAAY,KAAK,MAAM,GAAG,UAAU;EAC1C,MAAM,YAAY,UAAU,YAAY,IAAI;AAC5C,SAAO,GAAG,YAAY,IAAI,UAAU,MAAM,GAAG,UAAU,GAAG,UAAU;;CAGtE,AAAQ,kBAAkB,SAAyC;EACjE,MAAM,EAAE,GAAG,aAAa;EACxB,MAAM,MAAM,SAAS,aAAa;AAGlC,MAAI,IAAI,SAAS,aAAa,IAAI,CAAC,IAAI,SAAS,SAAS,IAAI,CAAC,IAAI,SAAS,WAAW,EAEpF;OADiB,EAAE,6BAA2B,CAAC,KAAK,UAAU,KAC7C,YAAY,IAAI,MAAM,gCAAgC,CACrE,QAAO;;AAKX,MAAI,IAAI,SAAS,qBAAqB,CACpC,QAAO;AAIT,MAAI,IAAI,SAAS,oBAAoB,CACnC,QAAO;AAIT,MACE,IAAI,SAAS,SAAS,IACtB,IAAI,SAAS,gBAAgB,IAC7B,IAAI,SAAS,kBAAkB,CAE/B,QAAO;AAIT,MAAI,IAAI,SAAS,cAAc,IAAI,IAAI,SAAS,YAAY,IAAI,IAAI,SAAS,WAAW,CACtF,QAAO;EAIT,MAAM,WAAW,EAAE,yDAAqD,CAAC,SAAS;EAClF,MAAM,eAAe,EAAE,gEAA0D,CAAC,SAAS;AAC3F,MAAI,YAAY,aACd,QAAO;EAIT,MAAM,SAAS,EAAE,6BAA2B,CAAC,KAAK,UAAU,EAAE,aAAa;AAC3E,MAAI,WAAW,aAAa,WAAW,UAAU,WAAW,OAC1D,QAAO;EAIT,MAAM,gBAAgB,EAAE,UAAU,CAAC,SAAS;EAC5C,MAAM,cAAc,EAAE,yDAAqD,CAAC,SAAS;AACrF,MAAI,iBAAiB,YACnB,QAAO;AAGT,SAAO;;;;;;;;;AChKX,MAAM,kBAAkB;CACtB;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACA;CACD;;;;AAKD,SAAgB,WAAW,KAAsB;AAC/C,KAAI;EACF,MAAM,SAAS,IAAI,IAAI,IAAI;AAC3B,SAAO,CAAC,SAAS,SAAS,CAAC,SAAS,OAAO,SAAS;SAC9C;AACN,SAAO;;;;;;AAOX,SAAgB,aAAa,KAAqB;AAChD,KAAI;EACF,MAAM,SAAS,IAAI,IAAI,IAAI;AAG3B,OAAK,MAAM,SAAS,gBAClB,QAAO,aAAa,OAAO,MAAM;EAInC,IAAI,aAAa,OAAO,UAAU;AAClC,MAAI,WAAW,SAAS,IAAI,IAAI,OAAO,aAAa,IAClD,cAAa,WAAW,MAAM,GAAG,GAAG;AAGtC,SAAO;SACD;AACN,SAAO;;;;;;AAOX,SAAgB,cAAc,KAAqB;AACjD,KAAI;AAEF,SADe,IAAI,IAAI,IAAI,CACb,SAAS,QAAQ,UAAU,GAAG;SACtC;AACN,SAAO;;;;;;AAOX,SAAgB,WAAW,KAAgC,SAAqC;AAC9F,KAAI,CAAC,IAAK,QAAO;AAEjB,KAAI;AACF,SAAO,IAAI,IAAI,KAAK,QAAQ,CAAC;SACvB;AACN,SAAO;;;;;;AAOX,SAAgB,cAAc,KAAa,YAA6B;AACtE,KAAI;AAGF,SAFe,IAAI,IAAI,IAAI,CACF,SAAS,QAAQ,UAAU,GAAG,KAClC;SACf;AACN,SAAO;;;;;;AAOX,SAAgB,YAAY,KAAqB;AAC/C,KAAI;AACF,SAAO,IAAI,IAAI,IAAI,CAAC;SACd;AACN,SAAO;;;;;;AAOX,SAAgB,QAAQ,KAAqB;AAC3C,KAAI;AACF,SAAO,IAAI,IAAI,IAAI,CAAC;SACd;AACN,SAAO;;;;;;AAOX,SAAgB,kBAAkB,KAAa,SAA0B;AACvE,KAAI,CAAC,QAAQ,SAAS,IAAI,CACxB,QAAO,QAAQ,WAAW,IAAI,WAAW,QAAQ;CAGnD,MAAM,eAAe,QAAQ,QAAQ,sBAAsB,OAAO,CAAC,QAAQ,OAAO,KAAK;AAEvF,yBAAO,IAAI,OAAO,IAAI,eAAe,EAAC,KAAK,IAAI;;;;;;;;;AC3HjD,IAAa,mBAAb,MAAmD;CACjD,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,GAAG,aAAa;AAgBxB,OAAK,MAAM,YAbc;GACvB;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACD,EAEwC;GACvC,MAAM,OAAO,EAAE,SAAS,CAAC,OAAO,CAAC,KAAK,OAAO;AAC7C,OAAI,KACF,QAAO,EACL,SAAS,WAAW,UAAU,KAAK,EACpC;;AAKL,MAAI;GACF,MAAM,MAAM,IAAI,IAAI,SAAS;AAC7B,UAAO,EACL,SAAS,GAAG,IAAI,SAAS,IAAI,IAAI,KAAK,eACvC;UACK;AACN,UAAO,EAAE;;;;;;;;;;;ACtCf,IAAa,kBAAb,MAAkD;CAChD,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,MAAM;EACd,MAAMC,SAAoC,EAAE;AAG5C,IAAE,uCAAqC,CAAC,MAAM,GAAG,OAAO;GACtD,MAAM,UAAU,EAAE,GAAG,CAAC,MAAM;AAC5B,OAAI,CAAC,QAAS;AAEd,OAAI;IACF,MAAM,SAAS,KAAK,MAAM,QAAQ;AAElC,QAAI,MAAM,QAAQ,OAAO,CACvB,QAAO,KAAK,GAAG,OAAO;aACb,OAAO,WAAW,YAAY,WAAW,KAClD,QAAO,KAAK,OAAO;WAEf;IAGR;AAEF,MAAI,OAAO,WAAW,EACpB,QAAO,EAAE;AAMX,SAAO;GACL;GACA,GAJe,KAAK,gBAAgB,OAAO;GAK5C;;CAGH,AAAQ,gBAAgB,QAAyD;EAC/E,MAAMC,SAA+B,EAAE;AAEvC,OAAK,MAAM,QAAQ,QAAQ;GACzB,MAAM,OAAO,KAAK,QAAQ,KAAK;AAG/B,OAAI,MAAM,MAAM,2CAA2C,EAAE;AAC3D,WAAO,QAAQ,OAAO,SAAS,KAAK,UAAU,MAAM,YAAY,OAAO;AACvE,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,cAAc;AAC9E,WAAO,SAAS,OAAO,UAAU,KAAK,UAAU,KAAK;AACrD,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,gBAAgB;AAChF,WAAO,aAAa,OAAO,cAAc,KAAK,UAAU,MAAM,eAAe;AAC7E,WAAO,QAAQ,OAAO,SAAS,KAAK,SAAS,KAAK;;AAIpD,OAAI,SAAS,eACX,QAAO,WAAW,OAAO,YAAY,KAAK,UAAU,MAAM,OAAO;AAInE,OAAI,SAAS,WAAW;AACtB,WAAO,QAAQ,OAAO,SAAS,KAAK,UAAU,MAAM,OAAO;AAC3D,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,cAAc;AAC9E,WAAO,QAAQ,OAAO,SAAS,KAAK,SAAS,KAAK;;AAIpD,OAAI,SAAS,uBAAuB;AAClC,WAAO,QAAQ,OAAO,SAAS,KAAK,UAAU,MAAM,OAAO;AAC3D,WAAO,cAAc,OAAO,eAAe,KAAK,UAAU,MAAM,cAAc;;GAIhF,MAAM,WAAW,KAAK,YAAY,KAAK;AACvC,OAAI,SAAS,SAAS,EACpB,QAAO,WAAW,CAAC,GAAI,OAAO,YAAY,EAAE,EAAG,GAAG,SAAS;;AAK/D,MAAI,OAAO,SACT,QAAO,WAAW,CAAC,GAAG,IAAI,IAAI,OAAO,SAAS,CAAC;AAGjD,SAAO;;CAGT,AAAQ,QAAQ,MAAmD;EACjE,MAAM,OAAO,KAAK;AAClB,MAAI,OAAO,SAAS,SAAU,QAAO;AACrC,MAAI,MAAM,QAAQ,KAAK,CAAE,QAAO,KAAK;;CAIvC,AAAQ,UAAU,MAA+B,GAAG,MAAoC;AACtF,OAAK,MAAM,OAAO,MAAM;GACtB,MAAM,QAAQ,KAAK;AACnB,OAAI,OAAO,UAAU,SAAU,QAAO;AACtC,OAAI,OAAO,UAAU,YAAY,UAAU,QAAQ,YAAY,MAC7D,QAAO,OAAQ,MAAgC,UAAU;;;CAM/D,AAAQ,UAAU,MAAmD;EACnE,MAAM,SAAS,KAAK;AACpB,MAAI,OAAO,WAAW,SAAU,QAAO;AAEvC,MAAI,MAAM,QAAQ,OAAO,CAMvB,QALc,OACX,KAAK,MACJ,OAAO,MAAM,WAAW,IAAI,KAAK,UAAU,GAA8B,OAAO,CACjF,CACA,OAAO,QAAQ,CACL,KAAK,KAAK,IAAI;AAE7B,MAAI,OAAO,WAAW,YAAY,WAAW,MAAM;GACjD,MAAM,YAAY;AAClB,UAAO,KAAK,UAAU,WAAW,OAAO,IAAI;;;CAKhD,AAAQ,SAAS,MAAmD;EAClE,MAAM,QAAQ,KAAK;AACnB,MAAI,OAAO,UAAU,SAAU,QAAO;AAEtC,MAAI,MAAM,QAAQ,MAAM,IAAI,MAAM,SAAS,EACzC,QAAO,KAAK,SAAS,EAAE,OAAO,MAAM,IAAI,CAAC;AAE3C,MAAI,OAAO,UAAU,YAAY,UAAU,MAAM;GAC/C,MAAM,WAAW;AACjB,UAAO,KAAK,UAAU,UAAU,OAAO,aAAa,IAAI;;;CAK5D,AAAQ,YAAY,MAAyC;EAC3D,MAAM,WAAW,KAAK;AACtB,MAAI,OAAO,aAAa,SACtB,QAAO,SACJ,MAAM,IAAI,CACV,KAAK,MAAM,EAAE,MAAM,CAAC,CACpB,OAAO,QAAQ;AAEpB,MAAI,MAAM,QAAQ,SAAS,CACzB,QAAO,SAAS,QAAQ,MAAmB,OAAO,MAAM,SAAS;AAEnE,SAAO,EAAE;;;;;;;;;;ACrJb,IAAa,iBAAb,MAAiD;CAC/C,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,GAAG,aAAa;EACxB,MAAMC,QAAyB,EAAE;EACjC,MAAM,uBAAO,IAAI,KAAa;EAG9B,MAAM,cAAc,EAAE,iCAA+B,CAAC,OAAO;EAC7D,MAAM,YAAY,YAAY,SAAS,IAAI,cAAc,EAAE,OAAO;EAGlE,MAAM,gBACJ;AAEF,YAAU,KAAK,UAAU,CAAC,MAAM,GAAG,OAAO;GACxC,MAAM,MAAM,EAAE,GAAG;AAGjB,OAAI,IAAI,QAAQ,cAAc,CAAC,SAAS,EACtC;GAGF,MAAM,OAAO,IAAI,KAAK,OAAO;AAC7B,OAAI,CAAC,KAAM;AAGX,OACE,KAAK,WAAW,IAAI,IACpB,KAAK,WAAW,cAAc,IAC9B,KAAK,WAAW,UAAU,IAC1B,KAAK,WAAW,OAAO,CAEvB;GAIF,MAAM,cAAc,WAAW,MAAM,SAAS;AAC9C,OAAI,CAAC,eAAe,CAAC,WAAW,YAAY,CAAE;AAG9C,OAAI,KAAK,IAAI,YAAY,CAAE;AAC3B,QAAK,IAAI,YAAY;GAGrB,MAAM,OAAO,IAAI,MAAM,CAAC,MAAM,IAAI,IAAI,KAAK,QAAQ,IAAI,IAAI,KAAK,aAAa,IAAI;AAGjF,OAAI,KAAK,SAAS,EAAG;GAErB,MAAM,aAAa,cAAc,SAAS;AAC1C,SAAM,KAAK;IACT,KAAK;IACL,MAAM,KAAK,MAAM,GAAG,IAAI;IACxB,YAAY,cAAc,aAAa,WAAW;IACnD,CAAC;IACF;AAEF,SAAO,EACL,OAAO,MAAM,MAAM,GAAG,IAAI,EAC3B;;;;;;;;;;AC/DL,IAAa,gBAAb,MAAgD;CAC9C,AAAS,OAAO;CAChB,AAAS,WAAW;CAEpB,MAAM,QAAQ,SAA2D;EACvE,MAAM,EAAE,MAAM;EAGd,MAAM,WAAW,mBAA+C;AAK9D,WAHE,EAAE,cAAc,eAAe,IAAI,CAAC,KAAK,UAAU,IACnD,EAAE,kBAAkB,eAAe,IAAI,CAAC,KAAK,UAAU,IACvD,EAAE,kBAAkB,eAAe,IAAI,CAAC,KAAK,UAAU,GAC3C,MAAM,IAAI;;EAI1B,MAAM,QACJ,QAAQ,WAAW,IAAI,QAAQ,gBAAgB,IAAI,EAAE,QAAQ,CAAC,OAAO,CAAC,MAAM,CAAC,MAAM,IAAI;EAGzF,MAAM,cACJ,QAAQ,iBAAiB,IAAI,QAAQ,sBAAsB,IAAI,QAAQ,cAAc,IAAI;EAG3F,MAAM,QACJ,QAAQ,WAAW,IAAI,QAAQ,gBAAgB,IAAI,QAAQ,oBAAoB,IAAI;EAGrF,MAAM,eACJ,EAAE,0BAAwB,CAAC,KAAK,OAAO,IAAI,QAAQ,SAAS,IAAI,QAAQ;EAG1E,MAAM,SACJ,QAAQ,SAAS,IACjB,QAAQ,iBAAiB,IACzB,QAAQ,kBAAkB,IAC1B,EAAE,mBAAiB,CAAC,OAAO,CAAC,MAAM,CAAC,MAAM,IACzC;EAGF,MAAM,WAAW,QAAQ,eAAe,IAAI,QAAQ,mBAAmB,IAAI;EAG3E,MAAM,cACJ,QAAQ,yBAAyB,IACjC,QAAQ,gBAAgB,IACxB,QAAQ,OAAO,IACf,EAAE,iBAAiB,CAAC,OAAO,CAAC,KAAK,WAAW,IAC5C;EAEF,MAAM,aAAa,QAAQ,wBAAwB,IAAI,QAAQ,eAAe,IAAI;EAGlF,MAAM,WACJ,EAAE,OAAO,CAAC,KAAK,OAAO,IAAI,QAAQ,YAAY,IAAI,QAAQ,WAAW,IAAI;EAG3E,MAAM,cAAc,QAAQ,WAAW,IAAI,QAAQ,cAAc,IAAI;AAQrE,SAAO;GACL;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA;GACA,UAjBe,cACb,YACG,MAAM,IAAI,CACV,KAAK,MAAM,EAAE,MAAM,CAAC,CACpB,OAAO,QAAQ,GAClB,EAAE;GAaL;;;;;;;;;;ACjEL,SAAgB,0BAAuC;AACrD,QAAO;EACL,IAAI,eAAe;EACnB,IAAI,iBAAiB;EACrB,IAAI,kBAAkB;EACtB,IAAI,kBAAkB;EACtB,IAAI,gBAAgB;EACrB;;;;;AAMH,SAAgB,eAAe,YAAsC;AACnE,QAAO,CAAC,GAAG,WAAW,CAAC,MAAM,GAAG,OAAO,EAAE,YAAY,MAAM,EAAE,YAAY,GAAG;;;;;;;;ACoB9E,MAAa,qBACX;;;;AAKF,MAAa,kBAAkB;;;;;;;;AC7C/B,IAAa,gBAAb,MAA8C;CAC5C,AAAS,OAAO;CAEhB,MAAM,MAAM,KAAa,UAAwB,EAAE,EAAwB;EACzE,MAAM,EAAE,UAAU,iBAAiB,YAAY,oBAAoB,UAAU,EAAE,KAAK;EAGpF,IAAIC;AACJ,MAAI;AACF,eAAY,IAAI,IAAI,IAAI;UAClB;AACN,SAAM,IAAI,YAAY,gBAAgB,OAAO,cAAc;;AAI7D,MAAI,CAAC,CAAC,SAAS,SAAS,CAAC,SAAS,UAAU,SAAS,CACnD,OAAM,IAAI,YAAY,qBAAqB,UAAU,YAAY,cAAc;EAIjF,MAAM,aAAa,IAAI,iBAAiB;EACxC,MAAM,YAAY,iBAAiB,WAAW,OAAO,EAAE,QAAQ;AAE/D,MAAI;GACF,MAAM,WAAW,MAAM,MAAM,KAAK;IAChC,QAAQ,WAAW;IACnB,SAAS;KACP,cAAc;KACd,QAAQ;KACR,mBAAmB;KACnB,GAAG;KACJ;IACD,UAAU;IACX,CAAC;AAEF,gBAAa,UAAU;AAGvB,OAAI,CAAC,SAAS,IAAI;AAChB,QAAI,SAAS,WAAW,IACtB,OAAM,IAAI,YAAY,mBAAmB,OAAO,aAAa,IAAI;AAEnE,QAAI,SAAS,WAAW,OAAO,SAAS,WAAW,IACjD,OAAM,IAAI,YAAY,mBAAmB,OAAO,WAAW,SAAS,OAAO;AAE7E,QAAI,SAAS,WAAW,IACtB,OAAM,IAAI,YAAY,iBAAiB,OAAO,WAAW,IAAI;AAE/D,UAAM,IAAI,YACR,cAAc,SAAS,OAAO,IAAI,OAClC,gBACA,SAAS,OACV;;GAGH,MAAM,cAAc,SAAS,QAAQ,IAAI,eAAe,IAAI;AAG5D,OAAI,CAAC,YAAY,SAAS,YAAY,IAAI,CAAC,YAAY,SAAS,oBAAoB,CAClF,OAAM,IAAI,YAAY,4BAA4B,eAAe,cAAc;GAGjF,MAAM,OAAO,MAAM,SAAS,MAAM;GAGlC,MAAMC,kBAA0C,EAAE;AAClD,YAAS,QAAQ,SAAS,OAAO,QAAQ;AACvC,oBAAgB,OAAO;KACvB;AAEF,UAAO;IACL;IACA,UAAU,SAAS;IACnB,YAAY,SAAS;IACrB;IACA,SAAS;IACV;WACM,OAAO;AACd,gBAAa,UAAU;AAGvB,OAAI,iBAAiB,YACnB,OAAM;AAIR,OAAI,iBAAiB,SAAS,MAAM,SAAS,aAC3C,OAAM,IAAI,YAAY,2BAA2B,QAAQ,KAAK,UAAU;AAI1E,OAAI,iBAAiB,MACnB,OAAM,IAAI,YAAY,iBAAiB,MAAM,WAAW,gBAAgB,QAAW,MAAM;AAG3F,SAAM,IAAI,YAAY,uBAAuB,eAAe;;;;;;;AAQlE,MAAa,iBAAiB,IAAI,eAAe;;;;;;;;;;;AC3FjD,eAAsB,eACpB,KACA,YAAoB,oBACQ;AAC5B,KAAI;EACF,MAAM,YAAY,IAAI,IAAI,IAAI;EAC9B,MAAM,YAAY,GAAG,UAAU,SAAS,IAAI,UAAU,KAAK;EAG3D,MAAM,WAAW,MAAM,MAAM,WAAW;GACtC,SAAS,EAAE,cAAc,WAAW;GACpC,QAAQ,YAAY,QAAQ,IAAK;GAClC,CAAC;AAGF,MAAI,CAAC,SAAS,GACZ,QAAO,EAAE,SAAS,MAAM;EAO1B,MAAM,UAAU,cAHF,eADI,MAAM,SAAS,MAAM,EACC,UAAU,EAErC,UAAU,WAAW,UAAU,OACF;AAE1C,SAAO;GACL;GACA,QAAQ,UAAU,SAAY;GAC/B;SACK;AAEN,SAAO,EAAE,SAAS,MAAM;;;;;;AAO5B,SAAS,eAAe,SAAiB,WAAgC;CACvE,MAAMC,QAAqB;EAAE,UAAU,EAAE;EAAE,OAAO,EAAE;EAAE;CACtD,MAAM,QAAQ,QAAQ,MAAM,KAAK;CAGjC,MAAM,UAAU,UAAU,MAAM,QAAQ,CAAC,IAAI,aAAa,IAAI;CAE9D,IAAI,eAAe;CACnB,IAAI,kBAAkB;CACtB,IAAI,wBAAwB;AAE5B,MAAK,MAAM,WAAW,OAAO;EAC3B,MAAM,OAAO,QAAQ,MAAM;AAG3B,MAAI,CAAC,QAAQ,KAAK,WAAW,IAAI,CAC/B;EAIF,MAAM,aAAa,KAAK,QAAQ,IAAI;AACpC,MAAI,eAAe,GAAI;EAEvB,MAAM,YAAY,KAAK,MAAM,GAAG,WAAW,CAAC,MAAM,CAAC,aAAa;EAChE,MAAM,QAAQ,KAAK,MAAM,aAAa,EAAE,CAAC,MAAM;AAE/C,MAAI,cAAc,cAAc;AAC9B,kBAAe,MAAM,aAAa;AAElC,qBACE,iBAAiB,OAAO,iBAAiB,WAAW,QAAQ,SAAS,aAAa;AAGpF,OAAI,iBAAiB,OAAO,iBAAiB;AAC3C,4BAAwB;AAExB,UAAM,WAAW,EAAE;AACnB,UAAM,QAAQ,EAAE;;aAET,oBAAoB,CAAC,yBAAyB,iBAAiB,MACxE;OAAI,cAAc,cAAc,MAC9B,OAAM,SAAS,KAAK,MAAM;YACjB,cAAc,WAAW,MAClC,OAAM,MAAM,KAAK,MAAM;;;AAK7B,QAAO;;;;;AAMT,SAAS,cAAc,OAAoB,MAAuB;AAEhE,KAAI,MAAM,SAAS,WAAW,KAAK,MAAM,MAAM,WAAW,EACxD,QAAO;AAIT,MAAK,MAAM,WAAW,MAAM,MAC1B,KAAI,eAAe,MAAM,QAAQ,CAC/B,QAAO;AAKX,MAAK,MAAM,WAAW,MAAM,SAC1B,KAAI,eAAe,MAAM,QAAQ,CAC/B,QAAO;AAKX,QAAO;;;;;AAMT,SAAS,eAAe,MAAc,SAA0B;AAE9D,KAAI,CAAC,QAAS,QAAO;AAGrB,KAAI,QAAQ,SAAS,IAAI,CACvB,QAAO,KAAK,WAAW,QAAQ,MAAM,GAAG,GAAG,CAAC;AAI9C,KAAI,QAAQ,SAAS,IAAI,CACvB,QAAO,SAAS,QAAQ,MAAM,GAAG,GAAG;AAItC,KAAI,QAAQ,SAAS,IAAI,CAEvB,yBADc,IAAI,OAAO,IAAI,QAAQ,QAAQ,OAAO,KAAK,CAAC,QAAQ,OAAO,MAAM,CAAC,IAAI,EACvE,KAAK,KAAK;AAIzB,QAAO,KAAK,WAAW,QAAQ;;;;;;;;;;;;;;;;;;AChJjC,eAAsB,OAAO,KAAa,UAAyB,EAAE,EAAwB;CAC3F,MAAM,YAAY,KAAK,KAAK;AAG5B,KAAI,CAAC,WAAW,IAAI,CAClB,OAAM,IAAI,YAAY,wBAAwB,cAAc;CAI9D,MAAM,gBAAgB,aAAa,IAAI;AAGvC,KAAI,QAAQ,eAAe;EACzB,MAAM,eAAe,MAAM,eAAe,eAAe,QAAQ,UAAU;AAC3E,MAAI,CAAC,aAAa,QAChB,OAAM,IAAI,YACR,8BAA8B,aAAa,UAAU,gBACrD,iBACD;;CAML,MAAM,cAAc,OADJ,QAAQ,WAAW,gBACD,MAAM,eAAe;EACrD,SAAS,QAAQ;EACjB,WAAW,QAAQ;EACpB,CAAC;AAGF,OAAM,cAAc;CAGpB,IAAI,UAAU,wBACZ,eACA,YAAY,UACZ,YAAY,MACZ,QACD;CAGD,IAAIC;AACJ,KAAI,QAAQ,yBACV,cAAa,QAAQ,cAAc,EAAE;MAChC;EACL,MAAM,WAAW,yBAAyB;AAC1C,eAAa,QAAQ,aAAa,CAAC,GAAG,UAAU,GAAG,QAAQ,WAAW,GAAG;;AAI3E,cAAa,eAAe,WAAW;AAEvC,MAAK,MAAM,aAAa,WACtB,KAAI;EACF,MAAM,YAAY,MAAM,UAAU,QAAQ,QAAQ;AAClD,YAAU,aAAa,SAAS,UAAU;UACnC,OAAO;AAEd,UAAQ,MAAM,cAAc,UAAU,KAAK,YAAY,MAAM;AAE7D,YAAU,aAAa,SAAS,EAC9B,OAAO,QAAQ,QAAQ,QACnB,GAAG,QAAQ,QAAQ,MAAM,IAAI,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KACtG,GAAG,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,IACjF,CAAC;;CAKN,MAAMC,qBAAkC;EACtC,KAAK;EACL,cAAc,QAAQ,QAAQ,gBAAgB,YAAY;EAC1D,QAAQ,cAAc,YAAY,SAAS;EAC3C,OAAO,QAAQ,QAAQ,SAAS;EAChC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,OAAO,QAAQ,QAAQ;EACvB,SAAS,QAAQ,QAAQ;EACzB,SAAS,QAAQ,QAAQ,WAAW;EACpC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,SAAS,QAAQ,QAAQ,WAAW;EACpC,WAAW,QAAQ,QAAQ,aAAa;EACxC,QAAQ,QAAQ,QAAQ;EACxB,aAAa,QAAQ,QAAQ;EAC7B,YAAY,QAAQ,QAAQ;EAC5B,UAAU,QAAQ,QAAQ;EAC1B,UAAU,QAAQ,QAAQ;EAC1B,aAAa,QAAQ,QAAQ,eAAe;EAC5C,UAAU,QAAQ,QAAQ,YAAY,EAAE;EACxC,QAAQ,QAAQ,QAAQ;EACxB,OAAO,QAAQ,QAAQ;EACvB,QAAQ,QAAQ,QAAQ;EACxB,4BAAW,IAAI,MAAM,EAAC,aAAa;EACnC,cAAc;EACd,OAAO,QAAQ,QAAQ;EACxB;AAGD,KAAI,QAAQ,OAAO,QAAQ,WAAW,QAAQ,QAAQ,SAAS,EAC7D,KAAI;EACF,MAAM,WAAW,MAAM,QAAQ,oBAAoB,QAAQ,KAAK,QAAQ,QAAQ;AAChF,SAAO,OAAO,oBAAoB,SAAS;UACpC,OAAO;AACd,UAAQ,MAAM,2BAA2B,MAAM;AAC/C,qBAAmB,QAAQ,mBAAmB,QAC1C,GAAG,mBAAmB,MAAM,SAAS,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KAC3F,QAAQ,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;;AAKtE,KAAI,QAAQ,OAAO,QAAQ,QACzB,KAAI;AAEF,qBAAmB,YADD,MAAM,QAAQ,oBAAoB,QAAQ,KAAK,QAAQ,QAAQ;UAE1E,OAAO;AACd,UAAQ,MAAM,0BAA0B,MAAM;AAC9C,qBAAmB,QAAQ,mBAAmB,QAC1C,GAAG,mBAAmB,MAAM,oBAAoB,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KACtG,mBAAmB,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM;;CAKjF,MAAM,eAAe,KAAK,KAAK,GAAG;AAOlC,QAL4B;EAC1B,GAAG;EACH;EACD;;;;;;;;;;;;;;;;AAmBH,eAAsB,WACpB,MACA,KACA,UAAyB,EAAE,EACL;CACtB,MAAM,YAAY,KAAK,KAAK;AAG5B,KAAI,CAAC,WAAW,IAAI,CAClB,OAAM,IAAI,YAAY,wBAAwB,cAAc;CAG9D,MAAM,gBAAgB,aAAa,IAAI;AAGvC,OAAM,cAAc;CAGpB,IAAI,UAAU,wBAAwB,eAAe,eAAe,MAAM,QAAQ;CAGlF,IAAID;AACJ,KAAI,QAAQ,yBACV,cAAa,QAAQ,cAAc,EAAE;MAChC;EACL,MAAM,WAAW,yBAAyB;AAC1C,eAAa,QAAQ,aAAa,CAAC,GAAG,UAAU,GAAG,QAAQ,WAAW,GAAG;;AAI3E,cAAa,eAAe,WAAW;AAEvC,MAAK,MAAM,aAAa,WACtB,KAAI;EACF,MAAM,YAAY,MAAM,UAAU,QAAQ,QAAQ;AAClD,YAAU,aAAa,SAAS,UAAU;UACnC,OAAO;AACd,UAAQ,MAAM,cAAc,UAAU,KAAK,YAAY,MAAM;AAC7D,YAAU,aAAa,SAAS,EAC9B,OAAO,QAAQ,QAAQ,QACnB,GAAG,QAAQ,QAAQ,MAAM,IAAI,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,KACtG,GAAG,UAAU,KAAK,IAAI,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,IACjF,CAAC;;CAIN,MAAM,eAAe,KAAK,KAAK,GAAG;CAClC,MAAM,SAAS,cAAc,cAAc;AAiC3C,QA/B4B;EAC1B,KAAK;EACL,cAAc,QAAQ,QAAQ,gBAAgB;EAC9C;EACA,OAAO,QAAQ,QAAQ,SAAS;EAChC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,OAAO,QAAQ,QAAQ;EACvB,SAAS,QAAQ,QAAQ;EACzB,SAAS,QAAQ,QAAQ,WAAW;EACpC,aAAa,QAAQ,QAAQ,eAAe;EAC5C,SAAS,QAAQ,QAAQ,WAAW;EACpC,WAAW,QAAQ,QAAQ,aAAa;EACxC,QAAQ,QAAQ,QAAQ;EACxB,aAAa,QAAQ,QAAQ;EAC7B,YAAY,QAAQ,QAAQ;EAC5B,UAAU,QAAQ,QAAQ;EAC1B,UAAU,QAAQ,QAAQ;EAC1B,aAAa,QAAQ,QAAQ,eAAe;EAC5C,UAAU,QAAQ,QAAQ,YAAY,EAAE;EACxC,QAAQ,QAAQ,QAAQ;EACxB,OAAO,QAAQ,QAAQ;EACvB,SAAS,QAAQ,QAAQ;EACzB,eAAe,QAAQ,QAAQ;EAC/B,UAAU,QAAQ,QAAQ;EAC1B,WAAW,QAAQ,QAAQ;EAC3B,QAAQ,QAAQ,QAAQ;EACxB,4BAAW,IAAI,MAAM,EAAC,aAAa;EACnC;EACA,OAAO,QAAQ,QAAQ;EACxB"}
|
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
const require_enhancer = require('../enhancer-oM4BhYYS.cjs');
|
|
2
|
+
|
|
3
|
+
//#region src/llm/anthropic.ts
|
|
4
|
+
const DEFAULT_MODEL$1 = "claude-3-5-haiku-20241022";
|
|
5
|
+
const DEFAULT_MAX_TOKENS$1 = 1024;
|
|
6
|
+
/**
|
|
7
|
+
* Anthropic Claude provider
|
|
8
|
+
*
|
|
9
|
+
* Requires @anthropic-ai/sdk as a peer dependency.
|
|
10
|
+
*
|
|
11
|
+
* @example
|
|
12
|
+
* ```ts
|
|
13
|
+
* const provider = new AnthropicProvider({ apiKey: 'sk-...' });
|
|
14
|
+
* const result = await scrape(url, { llm: provider, enhance: ['summarize'] });
|
|
15
|
+
* ```
|
|
16
|
+
*/
|
|
17
|
+
var AnthropicProvider = class {
|
|
18
|
+
name = "anthropic";
|
|
19
|
+
client;
|
|
20
|
+
model;
|
|
21
|
+
constructor(config = {}) {
|
|
22
|
+
const apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY;
|
|
23
|
+
if (!apiKey) throw new require_enhancer.ScrapeError("Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass apiKey in config.", "LLM_ERROR");
|
|
24
|
+
this.model = config.model ?? DEFAULT_MODEL$1;
|
|
25
|
+
try {
|
|
26
|
+
const { Anthropic } = require("@anthropic-ai/sdk");
|
|
27
|
+
this.client = new Anthropic({
|
|
28
|
+
apiKey,
|
|
29
|
+
baseURL: config.baseUrl
|
|
30
|
+
});
|
|
31
|
+
} catch {
|
|
32
|
+
throw new require_enhancer.ScrapeError("@anthropic-ai/sdk is required for Anthropic provider. Install with: npm install @anthropic-ai/sdk", "LLM_ERROR");
|
|
33
|
+
}
|
|
34
|
+
}
|
|
35
|
+
async complete(prompt, options = {}) {
|
|
36
|
+
try {
|
|
37
|
+
const content = (await this.client.messages.create({
|
|
38
|
+
model: this.model,
|
|
39
|
+
max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS$1,
|
|
40
|
+
messages: [{
|
|
41
|
+
role: "user",
|
|
42
|
+
content: prompt
|
|
43
|
+
}],
|
|
44
|
+
system: options.systemPrompt,
|
|
45
|
+
temperature: options.temperature
|
|
46
|
+
})).content[0];
|
|
47
|
+
if (content?.type === "text" && content.text) return content.text;
|
|
48
|
+
throw new require_enhancer.ScrapeError("Unexpected or empty response from Anthropic", "LLM_ERROR");
|
|
49
|
+
} catch (error) {
|
|
50
|
+
if (error instanceof require_enhancer.ScrapeError) throw error;
|
|
51
|
+
throw new require_enhancer.ScrapeError(`Anthropic API error: ${error instanceof Error ? error.message : String(error)}`, "LLM_ERROR", void 0, error instanceof Error ? error : void 0);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
async completeJSON(prompt, schema, options = {}) {
|
|
55
|
+
const jsonPrompt = `${prompt}
|
|
56
|
+
|
|
57
|
+
Respond ONLY with valid JSON matching this schema:
|
|
58
|
+
${JSON.stringify(zodToJsonSchema$1(schema), null, 2)}
|
|
59
|
+
|
|
60
|
+
Do not include any explanation or markdown formatting. Just the JSON object.`;
|
|
61
|
+
const response = await this.complete(jsonPrompt, {
|
|
62
|
+
...options,
|
|
63
|
+
systemPrompt: options.systemPrompt ?? "You are a helpful assistant that responds only with valid JSON."
|
|
64
|
+
});
|
|
65
|
+
try {
|
|
66
|
+
const jsonMatch = response.match(/\{[\s\S]*\}/);
|
|
67
|
+
if (!jsonMatch) throw new Error("No JSON object found in response");
|
|
68
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
69
|
+
return schema.parse(parsed);
|
|
70
|
+
} catch (error) {
|
|
71
|
+
throw new require_enhancer.ScrapeError(`Failed to parse LLM response as JSON: ${error instanceof Error ? error.message : String(error)}`, "VALIDATION_ERROR", void 0, error instanceof Error ? error : void 0);
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
};
|
|
75
|
+
/**
|
|
76
|
+
* Convert a Zod schema to a simple JSON Schema representation
|
|
77
|
+
* (simplified version for prompt engineering)
|
|
78
|
+
*/
|
|
79
|
+
function zodToJsonSchema$1(schema) {
|
|
80
|
+
const def = schema._def;
|
|
81
|
+
switch (def.typeName) {
|
|
82
|
+
case "ZodObject": {
|
|
83
|
+
const shape = schema.shape;
|
|
84
|
+
const properties = {};
|
|
85
|
+
for (const [key, value] of Object.entries(shape)) properties[key] = zodToJsonSchema$1(value);
|
|
86
|
+
return {
|
|
87
|
+
type: "object",
|
|
88
|
+
properties
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
case "ZodArray": return {
|
|
92
|
+
type: "array",
|
|
93
|
+
items: zodToJsonSchema$1(def.type)
|
|
94
|
+
};
|
|
95
|
+
case "ZodString": return { type: "string" };
|
|
96
|
+
case "ZodNumber": return { type: "number" };
|
|
97
|
+
case "ZodBoolean": return { type: "boolean" };
|
|
98
|
+
case "ZodEnum": return {
|
|
99
|
+
type: "string",
|
|
100
|
+
enum: def.values
|
|
101
|
+
};
|
|
102
|
+
default: return { type: "string" };
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
//#endregion
|
|
107
|
+
//#region src/llm/openai.ts
|
|
108
|
+
const DEFAULT_MODEL = "gpt-4o-mini";
|
|
109
|
+
const DEFAULT_MAX_TOKENS = 1024;
|
|
110
|
+
const DEFAULT_BASE_URL = "https://api.openai.com/v1";
|
|
111
|
+
/**
|
|
112
|
+
* OpenAI-compatible provider
|
|
113
|
+
*
|
|
114
|
+
* Works with:
|
|
115
|
+
* - OpenAI API
|
|
116
|
+
* - Ollama (http://localhost:11434/v1)
|
|
117
|
+
* - LM Studio (http://localhost:1234/v1)
|
|
118
|
+
* - LocalAI
|
|
119
|
+
* - vLLM
|
|
120
|
+
* - Any OpenAI-compatible API
|
|
121
|
+
*
|
|
122
|
+
* Requires `openai` as a peer dependency.
|
|
123
|
+
*
|
|
124
|
+
* @example
|
|
125
|
+
* ```ts
|
|
126
|
+
* // OpenAI
|
|
127
|
+
* const provider = new OpenAIProvider({ apiKey: 'sk-...' });
|
|
128
|
+
*
|
|
129
|
+
* // Ollama
|
|
130
|
+
* const provider = new OpenAIProvider({
|
|
131
|
+
* baseUrl: 'http://localhost:11434/v1',
|
|
132
|
+
* model: 'llama3.2',
|
|
133
|
+
* apiKey: 'ollama' // Ollama doesn't require a real key
|
|
134
|
+
* });
|
|
135
|
+
*
|
|
136
|
+
* // LM Studio
|
|
137
|
+
* const provider = new OpenAIProvider({
|
|
138
|
+
* baseUrl: 'http://localhost:1234/v1',
|
|
139
|
+
* model: 'local-model',
|
|
140
|
+
* apiKey: 'lm-studio'
|
|
141
|
+
* });
|
|
142
|
+
* ```
|
|
143
|
+
*/
|
|
144
|
+
var OpenAIProvider = class {
|
|
145
|
+
name = "openai";
|
|
146
|
+
client;
|
|
147
|
+
model;
|
|
148
|
+
constructor(config = {}) {
|
|
149
|
+
const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY;
|
|
150
|
+
const baseUrl = config.baseUrl ?? DEFAULT_BASE_URL;
|
|
151
|
+
if (!apiKey && baseUrl === DEFAULT_BASE_URL) throw new require_enhancer.ScrapeError("OpenAI API key required. Set OPENAI_API_KEY env var or pass apiKey in config.", "LLM_ERROR");
|
|
152
|
+
this.model = config.model ?? DEFAULT_MODEL;
|
|
153
|
+
try {
|
|
154
|
+
const { OpenAI } = require("openai");
|
|
155
|
+
this.client = new OpenAI({
|
|
156
|
+
apiKey: apiKey ?? "local",
|
|
157
|
+
baseURL: baseUrl
|
|
158
|
+
});
|
|
159
|
+
} catch {
|
|
160
|
+
throw new require_enhancer.ScrapeError("openai package is required for OpenAI provider. Install with: npm install openai", "LLM_ERROR");
|
|
161
|
+
}
|
|
162
|
+
}
|
|
163
|
+
async complete(prompt, options = {}) {
|
|
164
|
+
try {
|
|
165
|
+
const client = this.client;
|
|
166
|
+
const messages = [];
|
|
167
|
+
if (options.systemPrompt) messages.push({
|
|
168
|
+
role: "system",
|
|
169
|
+
content: options.systemPrompt
|
|
170
|
+
});
|
|
171
|
+
messages.push({
|
|
172
|
+
role: "user",
|
|
173
|
+
content: prompt
|
|
174
|
+
});
|
|
175
|
+
const content = (await client.chat.completions.create({
|
|
176
|
+
model: this.model,
|
|
177
|
+
max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
178
|
+
messages,
|
|
179
|
+
temperature: options.temperature
|
|
180
|
+
})).choices[0]?.message?.content;
|
|
181
|
+
if (!content) throw new require_enhancer.ScrapeError("Empty response from OpenAI", "LLM_ERROR");
|
|
182
|
+
return content;
|
|
183
|
+
} catch (error) {
|
|
184
|
+
if (error instanceof require_enhancer.ScrapeError) throw error;
|
|
185
|
+
throw new require_enhancer.ScrapeError(`OpenAI API error: ${error instanceof Error ? error.message : String(error)}`, "LLM_ERROR", void 0, error instanceof Error ? error : void 0);
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
async completeJSON(prompt, schema, options = {}) {
|
|
189
|
+
const client = this.client;
|
|
190
|
+
try {
|
|
191
|
+
const messages = [{
|
|
192
|
+
role: "system",
|
|
193
|
+
content: options.systemPrompt ?? "You are a helpful assistant that extracts information from content."
|
|
194
|
+
}, {
|
|
195
|
+
role: "user",
|
|
196
|
+
content: prompt
|
|
197
|
+
}];
|
|
198
|
+
const content = (await client.chat.completions.create({
|
|
199
|
+
model: this.model,
|
|
200
|
+
max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,
|
|
201
|
+
messages,
|
|
202
|
+
temperature: options.temperature,
|
|
203
|
+
response_format: { type: "json_object" }
|
|
204
|
+
})).choices[0]?.message?.content;
|
|
205
|
+
if (!content) throw new require_enhancer.ScrapeError("Empty response from OpenAI", "LLM_ERROR");
|
|
206
|
+
const parsed = JSON.parse(content);
|
|
207
|
+
return schema.parse(parsed);
|
|
208
|
+
} catch (error) {
|
|
209
|
+
if (error instanceof require_enhancer.ScrapeError) throw error;
|
|
210
|
+
const jsonPrompt = `${prompt}
|
|
211
|
+
|
|
212
|
+
Respond ONLY with valid JSON matching this schema:
|
|
213
|
+
${JSON.stringify(zodToJsonSchema(schema), null, 2)}
|
|
214
|
+
|
|
215
|
+
Do not include any explanation or markdown formatting. Just the JSON object.`;
|
|
216
|
+
const response = await this.complete(jsonPrompt, {
|
|
217
|
+
...options,
|
|
218
|
+
systemPrompt: "You respond only with valid JSON."
|
|
219
|
+
});
|
|
220
|
+
try {
|
|
221
|
+
const jsonMatch = response.match(/\{[\s\S]*\}/);
|
|
222
|
+
if (!jsonMatch) throw new Error("No JSON object found in response");
|
|
223
|
+
const parsed = JSON.parse(jsonMatch[0]);
|
|
224
|
+
return schema.parse(parsed);
|
|
225
|
+
} catch (parseError) {
|
|
226
|
+
throw new require_enhancer.ScrapeError(`Failed to parse LLM response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`, "VALIDATION_ERROR", void 0, parseError instanceof Error ? parseError : void 0);
|
|
227
|
+
}
|
|
228
|
+
}
|
|
229
|
+
}
|
|
230
|
+
};
|
|
231
|
+
/**
|
|
232
|
+
* Convert a Zod schema to JSON Schema for structured outputs
|
|
233
|
+
*/
|
|
234
|
+
function zodToJsonSchema(schema) {
|
|
235
|
+
const def = schema._def;
|
|
236
|
+
switch (def.typeName) {
|
|
237
|
+
case "ZodObject": {
|
|
238
|
+
const shape = schema.shape;
|
|
239
|
+
const properties = {};
|
|
240
|
+
const required = [];
|
|
241
|
+
for (const [key, value] of Object.entries(shape)) {
|
|
242
|
+
properties[key] = zodToJsonSchema(value);
|
|
243
|
+
if (value._def.typeName !== "ZodOptional") required.push(key);
|
|
244
|
+
}
|
|
245
|
+
return {
|
|
246
|
+
type: "object",
|
|
247
|
+
properties,
|
|
248
|
+
required
|
|
249
|
+
};
|
|
250
|
+
}
|
|
251
|
+
case "ZodArray": return {
|
|
252
|
+
type: "array",
|
|
253
|
+
items: zodToJsonSchema(def.type)
|
|
254
|
+
};
|
|
255
|
+
case "ZodString": return { type: "string" };
|
|
256
|
+
case "ZodNumber": return { type: "number" };
|
|
257
|
+
case "ZodBoolean": return { type: "boolean" };
|
|
258
|
+
case "ZodEnum": return {
|
|
259
|
+
type: "string",
|
|
260
|
+
enum: def.values
|
|
261
|
+
};
|
|
262
|
+
case "ZodOptional": return zodToJsonSchema(def.innerType);
|
|
263
|
+
default: return { type: "string" };
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
/**
|
|
267
|
+
* Create an OpenAI provider with default settings
|
|
268
|
+
*/
|
|
269
|
+
function createOpenAI(config) {
|
|
270
|
+
return new OpenAIProvider(config);
|
|
271
|
+
}
|
|
272
|
+
/**
|
|
273
|
+
* Create an Ollama provider
|
|
274
|
+
*
|
|
275
|
+
* @example
|
|
276
|
+
* ```ts
|
|
277
|
+
* const provider = createOllama({ model: 'llama3.2' });
|
|
278
|
+
* ```
|
|
279
|
+
*/
|
|
280
|
+
function createOllama(config = { model: "llama3.2" }) {
|
|
281
|
+
return new OpenAIProvider({
|
|
282
|
+
baseUrl: `http://localhost:${config.port ?? 11434}/v1`,
|
|
283
|
+
model: config.model,
|
|
284
|
+
apiKey: "ollama"
|
|
285
|
+
});
|
|
286
|
+
}
|
|
287
|
+
/**
|
|
288
|
+
* Create an LM Studio provider
|
|
289
|
+
*
|
|
290
|
+
* @example
|
|
291
|
+
* ```ts
|
|
292
|
+
* const provider = createLMStudio({ model: 'local-model' });
|
|
293
|
+
* ```
|
|
294
|
+
*/
|
|
295
|
+
function createLMStudio(config = { model: "local-model" }) {
|
|
296
|
+
return new OpenAIProvider({
|
|
297
|
+
baseUrl: `http://localhost:${config.port ?? 1234}/v1`,
|
|
298
|
+
model: config.model,
|
|
299
|
+
apiKey: "lm-studio"
|
|
300
|
+
});
|
|
301
|
+
}
|
|
302
|
+
|
|
303
|
+
//#endregion
|
|
304
|
+
exports.AnthropicProvider = AnthropicProvider;
|
|
305
|
+
exports.ClassifySchema = require_enhancer.ClassifySchema;
|
|
306
|
+
exports.EntitiesSchema = require_enhancer.EntitiesSchema;
|
|
307
|
+
exports.OpenAIProvider = OpenAIProvider;
|
|
308
|
+
exports.SummarySchema = require_enhancer.SummarySchema;
|
|
309
|
+
exports.TagsSchema = require_enhancer.TagsSchema;
|
|
310
|
+
exports.ask = require_enhancer.ask;
|
|
311
|
+
exports.createLMStudio = createLMStudio;
|
|
312
|
+
exports.createOllama = createOllama;
|
|
313
|
+
exports.createOpenAI = createOpenAI;
|
|
314
|
+
exports.enhance = require_enhancer.enhance;
|
|
315
|
+
exports.extract = require_enhancer.extract;
|
|
316
|
+
//# sourceMappingURL=index.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.cjs","names":["DEFAULT_MODEL","DEFAULT_MAX_TOKENS","ScrapeError","zodToJsonSchema","properties: Record<string, object>","ScrapeError","messages: Array<{ role: 'system' | 'user'; content: string }>","properties: Record<string, object>","required: string[]"],"sources":["../../src/llm/anthropic.ts","../../src/llm/openai.ts"],"sourcesContent":["import type { z } from 'zod';\nimport { ScrapeError } from '@/core/errors.js';\nimport type { AnthropicConfig, CompletionOptions, LLMProvider } from './types.js';\n\nconst DEFAULT_MODEL = 'claude-3-5-haiku-20241022';\nconst DEFAULT_MAX_TOKENS = 1024;\n\n/**\n * Anthropic Claude provider\n *\n * Requires @anthropic-ai/sdk as a peer dependency.\n *\n * @example\n * ```ts\n * const provider = new AnthropicProvider({ apiKey: 'sk-...' });\n * const result = await scrape(url, { llm: provider, enhance: ['summarize'] });\n * ```\n */\nexport class AnthropicProvider implements LLMProvider {\n readonly name = 'anthropic';\n private client: unknown;\n private model: string;\n\n constructor(config: AnthropicConfig = {}) {\n const apiKey = config.apiKey ?? process.env.ANTHROPIC_API_KEY;\n if (!apiKey) {\n throw new ScrapeError(\n 'Anthropic API key required. Set ANTHROPIC_API_KEY env var or pass apiKey in config.',\n 'LLM_ERROR'\n );\n }\n\n this.model = config.model ?? DEFAULT_MODEL;\n\n // Dynamic import to avoid requiring the SDK if not used\n try {\n // eslint-disable-next-line @typescript-eslint/no-require-imports\n const { Anthropic } = require('@anthropic-ai/sdk') as typeof import('@anthropic-ai/sdk');\n this.client = new Anthropic({\n apiKey,\n baseURL: config.baseUrl,\n });\n } catch {\n throw new ScrapeError(\n '@anthropic-ai/sdk is required for Anthropic provider. Install with: npm install @anthropic-ai/sdk',\n 'LLM_ERROR'\n );\n }\n }\n\n async complete(prompt: string, options: CompletionOptions = {}): Promise<string> {\n try {\n const client = this.client as import('@anthropic-ai/sdk').Anthropic;\n const response = await client.messages.create({\n model: this.model,\n max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,\n messages: [{ role: 'user', content: prompt }],\n system: options.systemPrompt,\n temperature: options.temperature,\n });\n\n const content = response.content[0];\n if (content?.type === 'text' && content.text) {\n return content.text;\n }\n\n throw new ScrapeError('Unexpected or empty response from Anthropic', 'LLM_ERROR');\n } catch (error) {\n if (error instanceof ScrapeError) throw error;\n throw new ScrapeError(\n `Anthropic API error: ${error instanceof Error ? error.message : String(error)}`,\n 'LLM_ERROR',\n undefined,\n error instanceof Error ? error : undefined\n );\n }\n }\n\n async completeJSON<T>(\n prompt: string,\n schema: z.ZodType<T>,\n options: CompletionOptions = {}\n ): Promise<T> {\n const jsonPrompt = `${prompt}\n\nRespond ONLY with valid JSON matching this schema:\n${JSON.stringify(zodToJsonSchema(schema), null, 2)}\n\nDo not include any explanation or markdown formatting. Just the JSON object.`;\n\n const response = await this.complete(jsonPrompt, {\n ...options,\n systemPrompt:\n options.systemPrompt ?? 'You are a helpful assistant that responds only with valid JSON.',\n });\n\n try {\n // Try to extract JSON from the response\n const jsonMatch = response.match(/\\{[\\s\\S]*\\}/);\n if (!jsonMatch) {\n throw new Error('No JSON object found in response');\n }\n\n const parsed = JSON.parse(jsonMatch[0]);\n return schema.parse(parsed);\n } catch (error) {\n throw new ScrapeError(\n `Failed to parse LLM response as JSON: ${error instanceof Error ? error.message : String(error)}`,\n 'VALIDATION_ERROR',\n undefined,\n error instanceof Error ? error : undefined\n );\n }\n }\n}\n\n/**\n * Convert a Zod schema to a simple JSON Schema representation\n * (simplified version for prompt engineering)\n */\nfunction zodToJsonSchema(schema: z.ZodType<unknown>): object {\n const def = (schema as z.ZodType<unknown> & { _def: { typeName: string } })._def;\n\n switch (def.typeName) {\n case 'ZodObject': {\n const shape = (schema as z.ZodObject<z.ZodRawShape>).shape;\n const properties: Record<string, object> = {};\n for (const [key, value] of Object.entries(shape)) {\n properties[key] = zodToJsonSchema(value as z.ZodType<unknown>);\n }\n return { type: 'object', properties };\n }\n case 'ZodArray': {\n const arrayDef = def as unknown as { type: z.ZodType<unknown> };\n return { type: 'array', items: zodToJsonSchema(arrayDef.type) };\n }\n case 'ZodString':\n return { type: 'string' };\n case 'ZodNumber':\n return { type: 'number' };\n case 'ZodBoolean':\n return { type: 'boolean' };\n case 'ZodEnum': {\n const enumDef = def as unknown as { values: string[] };\n return { type: 'string', enum: enumDef.values };\n }\n default:\n return { type: 'string' };\n }\n}\n","import type { z } from 'zod';\nimport { ScrapeError } from '@/core/errors.js';\nimport type { CompletionOptions, LLMProvider, OpenAICompatibleConfig } from './types.js';\n\nconst DEFAULT_MODEL = 'gpt-4o-mini';\nconst DEFAULT_MAX_TOKENS = 1024;\nconst DEFAULT_BASE_URL = 'https://api.openai.com/v1';\n\n/**\n * OpenAI-compatible provider\n *\n * Works with:\n * - OpenAI API\n * - Ollama (http://localhost:11434/v1)\n * - LM Studio (http://localhost:1234/v1)\n * - LocalAI\n * - vLLM\n * - Any OpenAI-compatible API\n *\n * Requires `openai` as a peer dependency.\n *\n * @example\n * ```ts\n * // OpenAI\n * const provider = new OpenAIProvider({ apiKey: 'sk-...' });\n *\n * // Ollama\n * const provider = new OpenAIProvider({\n * baseUrl: 'http://localhost:11434/v1',\n * model: 'llama3.2',\n * apiKey: 'ollama' // Ollama doesn't require a real key\n * });\n *\n * // LM Studio\n * const provider = new OpenAIProvider({\n * baseUrl: 'http://localhost:1234/v1',\n * model: 'local-model',\n * apiKey: 'lm-studio'\n * });\n * ```\n */\nexport class OpenAIProvider implements LLMProvider {\n readonly name = 'openai';\n private client: unknown;\n private model: string;\n\n constructor(config: OpenAICompatibleConfig = {}) {\n const apiKey = config.apiKey ?? process.env.OPENAI_API_KEY;\n const baseUrl = config.baseUrl ?? DEFAULT_BASE_URL;\n\n // Only require API key for OpenAI (not for local providers)\n if (!apiKey && baseUrl === DEFAULT_BASE_URL) {\n throw new ScrapeError(\n 'OpenAI API key required. Set OPENAI_API_KEY env var or pass apiKey in config.',\n 'LLM_ERROR'\n );\n }\n\n this.model = config.model ?? DEFAULT_MODEL;\n\n // Dynamic import to avoid requiring the SDK if not used\n try {\n // eslint-disable-next-line @typescript-eslint/no-require-imports\n const { OpenAI } = require('openai') as typeof import('openai');\n this.client = new OpenAI({\n apiKey: apiKey ?? 'local', // Use 'local' as placeholder for local providers\n baseURL: baseUrl,\n });\n } catch {\n throw new ScrapeError(\n 'openai package is required for OpenAI provider. Install with: npm install openai',\n 'LLM_ERROR'\n );\n }\n }\n\n async complete(prompt: string, options: CompletionOptions = {}): Promise<string> {\n try {\n const client = this.client as import('openai').OpenAI;\n const messages: Array<{ role: 'system' | 'user'; content: string }> = [];\n\n if (options.systemPrompt) {\n messages.push({ role: 'system', content: options.systemPrompt });\n }\n messages.push({ role: 'user', content: prompt });\n\n const response = await client.chat.completions.create({\n model: this.model,\n max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,\n messages,\n temperature: options.temperature,\n });\n\n const content = response.choices[0]?.message?.content;\n if (!content) {\n throw new ScrapeError('Empty response from OpenAI', 'LLM_ERROR');\n }\n\n return content;\n } catch (error) {\n if (error instanceof ScrapeError) throw error;\n throw new ScrapeError(\n `OpenAI API error: ${error instanceof Error ? error.message : String(error)}`,\n 'LLM_ERROR',\n undefined,\n error instanceof Error ? error : undefined\n );\n }\n }\n\n async completeJSON<T>(\n prompt: string,\n schema: z.ZodType<T>,\n options: CompletionOptions = {}\n ): Promise<T> {\n const client = this.client as import('openai').OpenAI;\n\n try {\n // Use JSON mode for structured outputs\n const messages: Array<{ role: 'system' | 'user'; content: string }> = [\n {\n role: 'system',\n content:\n options.systemPrompt ??\n 'You are a helpful assistant that extracts information from content.',\n },\n { role: 'user', content: prompt },\n ];\n\n const response = await client.chat.completions.create({\n model: this.model,\n max_tokens: options.maxTokens ?? DEFAULT_MAX_TOKENS,\n messages,\n temperature: options.temperature,\n response_format: { type: 'json_object' },\n });\n\n const content = response.choices[0]?.message?.content;\n if (!content) {\n throw new ScrapeError('Empty response from OpenAI', 'LLM_ERROR');\n }\n\n const parsed = JSON.parse(content);\n return schema.parse(parsed);\n } catch (error) {\n // Fallback to regular completion with JSON instruction\n if (error instanceof ScrapeError) throw error;\n\n // If structured output failed, try regular completion\n const jsonPrompt = `${prompt}\n\nRespond ONLY with valid JSON matching this schema:\n${JSON.stringify(zodToJsonSchema(schema), null, 2)}\n\nDo not include any explanation or markdown formatting. Just the JSON object.`;\n\n const response = await this.complete(jsonPrompt, {\n ...options,\n systemPrompt: 'You respond only with valid JSON.',\n });\n\n try {\n const jsonMatch = response.match(/\\{[\\s\\S]*\\}/);\n if (!jsonMatch) {\n throw new Error('No JSON object found in response');\n }\n\n const parsed = JSON.parse(jsonMatch[0]);\n return schema.parse(parsed);\n } catch (parseError) {\n throw new ScrapeError(\n `Failed to parse LLM response as JSON: ${parseError instanceof Error ? parseError.message : String(parseError)}`,\n 'VALIDATION_ERROR',\n undefined,\n parseError instanceof Error ? parseError : undefined\n );\n }\n }\n }\n}\n\n/**\n * Convert a Zod schema to JSON Schema for structured outputs\n */\nfunction zodToJsonSchema(schema: z.ZodType<unknown>): object {\n const def = (schema as z.ZodType<unknown> & { _def: { typeName: string } })._def;\n\n switch (def.typeName) {\n case 'ZodObject': {\n const shape = (schema as z.ZodObject<z.ZodRawShape>).shape;\n const properties: Record<string, object> = {};\n const required: string[] = [];\n\n for (const [key, value] of Object.entries(shape)) {\n properties[key] = zodToJsonSchema(value as z.ZodType<unknown>);\n // Assume all fields are required unless wrapped in ZodOptional\n const valueDef = (value as z.ZodType<unknown> & { _def: { typeName: string } })._def;\n if (valueDef.typeName !== 'ZodOptional') {\n required.push(key);\n }\n }\n return { type: 'object', properties, required };\n }\n case 'ZodArray': {\n const arrayDef = def as unknown as { type: z.ZodType<unknown> };\n return { type: 'array', items: zodToJsonSchema(arrayDef.type) };\n }\n case 'ZodString':\n return { type: 'string' };\n case 'ZodNumber':\n return { type: 'number' };\n case 'ZodBoolean':\n return { type: 'boolean' };\n case 'ZodEnum': {\n const enumDef = def as unknown as { values: string[] };\n return { type: 'string', enum: enumDef.values };\n }\n case 'ZodOptional': {\n const optionalDef = def as unknown as { innerType: z.ZodType<unknown> };\n return zodToJsonSchema(optionalDef.innerType);\n }\n default:\n return { type: 'string' };\n }\n}\n\n// Convenience factory functions\n\n/**\n * Create an OpenAI provider with default settings\n */\nexport function createOpenAI(config?: OpenAICompatibleConfig): OpenAIProvider {\n return new OpenAIProvider(config);\n}\n\n/**\n * Create an Ollama provider\n *\n * @example\n * ```ts\n * const provider = createOllama({ model: 'llama3.2' });\n * ```\n */\nexport function createOllama(\n config: { model: string; port?: number } = { model: 'llama3.2' }\n): OpenAIProvider {\n return new OpenAIProvider({\n baseUrl: `http://localhost:${config.port ?? 11434}/v1`,\n model: config.model,\n apiKey: 'ollama',\n });\n}\n\n/**\n * Create an LM Studio provider\n *\n * @example\n * ```ts\n * const provider = createLMStudio({ model: 'local-model' });\n * ```\n */\nexport function createLMStudio(\n config: { model: string; port?: number } = { model: 'local-model' }\n): OpenAIProvider {\n return new OpenAIProvider({\n baseUrl: `http://localhost:${config.port ?? 1234}/v1`,\n model: config.model,\n apiKey: 'lm-studio',\n });\n}\n"],"mappings":";;;AAIA,MAAMA,kBAAgB;AACtB,MAAMC,uBAAqB;;;;;;;;;;;;AAa3B,IAAa,oBAAb,MAAsD;CACpD,AAAS,OAAO;CAChB,AAAQ;CACR,AAAQ;CAER,YAAY,SAA0B,EAAE,EAAE;EACxC,MAAM,SAAS,OAAO,UAAU,QAAQ,IAAI;AAC5C,MAAI,CAAC,OACH,OAAM,IAAIC,6BACR,uFACA,YACD;AAGH,OAAK,QAAQ,OAAO,SAASF;AAG7B,MAAI;GAEF,MAAM,EAAE,cAAc,QAAQ,oBAAoB;AAClD,QAAK,SAAS,IAAI,UAAU;IAC1B;IACA,SAAS,OAAO;IACjB,CAAC;UACI;AACN,SAAM,IAAIE,6BACR,qGACA,YACD;;;CAIL,MAAM,SAAS,QAAgB,UAA6B,EAAE,EAAmB;AAC/E,MAAI;GAUF,MAAM,WARW,MADF,KAAK,OACU,SAAS,OAAO;IAC5C,OAAO,KAAK;IACZ,YAAY,QAAQ,aAAaD;IACjC,UAAU,CAAC;KAAE,MAAM;KAAQ,SAAS;KAAQ,CAAC;IAC7C,QAAQ,QAAQ;IAChB,aAAa,QAAQ;IACtB,CAAC,EAEuB,QAAQ;AACjC,OAAI,SAAS,SAAS,UAAU,QAAQ,KACtC,QAAO,QAAQ;AAGjB,SAAM,IAAIC,6BAAY,+CAA+C,YAAY;WAC1E,OAAO;AACd,OAAI,iBAAiBA,6BAAa,OAAM;AACxC,SAAM,IAAIA,6BACR,wBAAwB,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,IAC9E,aACA,QACA,iBAAiB,QAAQ,QAAQ,OAClC;;;CAIL,MAAM,aACJ,QACA,QACA,UAA6B,EAAE,EACnB;EACZ,MAAM,aAAa,GAAG,OAAO;;;EAG/B,KAAK,UAAUC,kBAAgB,OAAO,EAAE,MAAM,EAAE,CAAC;;;EAI/C,MAAM,WAAW,MAAM,KAAK,SAAS,YAAY;GAC/C,GAAG;GACH,cACE,QAAQ,gBAAgB;GAC3B,CAAC;AAEF,MAAI;GAEF,MAAM,YAAY,SAAS,MAAM,cAAc;AAC/C,OAAI,CAAC,UACH,OAAM,IAAI,MAAM,mCAAmC;GAGrD,MAAM,SAAS,KAAK,MAAM,UAAU,GAAG;AACvC,UAAO,OAAO,MAAM,OAAO;WACpB,OAAO;AACd,SAAM,IAAID,6BACR,yCAAyC,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,IAC/F,oBACA,QACA,iBAAiB,QAAQ,QAAQ,OAClC;;;;;;;;AASP,SAASC,kBAAgB,QAAoC;CAC3D,MAAM,MAAO,OAA+D;AAE5E,SAAQ,IAAI,UAAZ;EACE,KAAK,aAAa;GAChB,MAAM,QAAS,OAAsC;GACrD,MAAMC,aAAqC,EAAE;AAC7C,QAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,MAAM,CAC9C,YAAW,OAAOD,kBAAgB,MAA4B;AAEhE,UAAO;IAAE,MAAM;IAAU;IAAY;;EAEvC,KAAK,WAEH,QAAO;GAAE,MAAM;GAAS,OAAOA,kBADd,IACuC,KAAK;GAAE;EAEjE,KAAK,YACH,QAAO,EAAE,MAAM,UAAU;EAC3B,KAAK,YACH,QAAO,EAAE,MAAM,UAAU;EAC3B,KAAK,aACH,QAAO,EAAE,MAAM,WAAW;EAC5B,KAAK,UAEH,QAAO;GAAE,MAAM;GAAU,MADT,IACuB;GAAQ;EAEjD,QACE,QAAO,EAAE,MAAM,UAAU;;;;;;AC/I/B,MAAM,gBAAgB;AACtB,MAAM,qBAAqB;AAC3B,MAAM,mBAAmB;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAmCzB,IAAa,iBAAb,MAAmD;CACjD,AAAS,OAAO;CAChB,AAAQ;CACR,AAAQ;CAER,YAAY,SAAiC,EAAE,EAAE;EAC/C,MAAM,SAAS,OAAO,UAAU,QAAQ,IAAI;EAC5C,MAAM,UAAU,OAAO,WAAW;AAGlC,MAAI,CAAC,UAAU,YAAY,iBACzB,OAAM,IAAIE,6BACR,iFACA,YACD;AAGH,OAAK,QAAQ,OAAO,SAAS;AAG7B,MAAI;GAEF,MAAM,EAAE,WAAW,QAAQ,SAAS;AACpC,QAAK,SAAS,IAAI,OAAO;IACvB,QAAQ,UAAU;IAClB,SAAS;IACV,CAAC;UACI;AACN,SAAM,IAAIA,6BACR,oFACA,YACD;;;CAIL,MAAM,SAAS,QAAgB,UAA6B,EAAE,EAAmB;AAC/E,MAAI;GACF,MAAM,SAAS,KAAK;GACpB,MAAMC,WAAgE,EAAE;AAExE,OAAI,QAAQ,aACV,UAAS,KAAK;IAAE,MAAM;IAAU,SAAS,QAAQ;IAAc,CAAC;AAElE,YAAS,KAAK;IAAE,MAAM;IAAQ,SAAS;IAAQ,CAAC;GAShD,MAAM,WAPW,MAAM,OAAO,KAAK,YAAY,OAAO;IACpD,OAAO,KAAK;IACZ,YAAY,QAAQ,aAAa;IACjC;IACA,aAAa,QAAQ;IACtB,CAAC,EAEuB,QAAQ,IAAI,SAAS;AAC9C,OAAI,CAAC,QACH,OAAM,IAAID,6BAAY,8BAA8B,YAAY;AAGlE,UAAO;WACA,OAAO;AACd,OAAI,iBAAiBA,6BAAa,OAAM;AACxC,SAAM,IAAIA,6BACR,qBAAqB,iBAAiB,QAAQ,MAAM,UAAU,OAAO,MAAM,IAC3E,aACA,QACA,iBAAiB,QAAQ,QAAQ,OAClC;;;CAIL,MAAM,aACJ,QACA,QACA,UAA6B,EAAE,EACnB;EACZ,MAAM,SAAS,KAAK;AAEpB,MAAI;GAEF,MAAMC,WAAgE,CACpE;IACE,MAAM;IACN,SACE,QAAQ,gBACR;IACH,EACD;IAAE,MAAM;IAAQ,SAAS;IAAQ,CAClC;GAUD,MAAM,WARW,MAAM,OAAO,KAAK,YAAY,OAAO;IACpD,OAAO,KAAK;IACZ,YAAY,QAAQ,aAAa;IACjC;IACA,aAAa,QAAQ;IACrB,iBAAiB,EAAE,MAAM,eAAe;IACzC,CAAC,EAEuB,QAAQ,IAAI,SAAS;AAC9C,OAAI,CAAC,QACH,OAAM,IAAID,6BAAY,8BAA8B,YAAY;GAGlE,MAAM,SAAS,KAAK,MAAM,QAAQ;AAClC,UAAO,OAAO,MAAM,OAAO;WACpB,OAAO;AAEd,OAAI,iBAAiBA,6BAAa,OAAM;GAGxC,MAAM,aAAa,GAAG,OAAO;;;EAGjC,KAAK,UAAU,gBAAgB,OAAO,EAAE,MAAM,EAAE,CAAC;;;GAI7C,MAAM,WAAW,MAAM,KAAK,SAAS,YAAY;IAC/C,GAAG;IACH,cAAc;IACf,CAAC;AAEF,OAAI;IACF,MAAM,YAAY,SAAS,MAAM,cAAc;AAC/C,QAAI,CAAC,UACH,OAAM,IAAI,MAAM,mCAAmC;IAGrD,MAAM,SAAS,KAAK,MAAM,UAAU,GAAG;AACvC,WAAO,OAAO,MAAM,OAAO;YACpB,YAAY;AACnB,UAAM,IAAIA,6BACR,yCAAyC,sBAAsB,QAAQ,WAAW,UAAU,OAAO,WAAW,IAC9G,oBACA,QACA,sBAAsB,QAAQ,aAAa,OAC5C;;;;;;;;AAST,SAAS,gBAAgB,QAAoC;CAC3D,MAAM,MAAO,OAA+D;AAE5E,SAAQ,IAAI,UAAZ;EACE,KAAK,aAAa;GAChB,MAAM,QAAS,OAAsC;GACrD,MAAME,aAAqC,EAAE;GAC7C,MAAMC,WAAqB,EAAE;AAE7B,QAAK,MAAM,CAAC,KAAK,UAAU,OAAO,QAAQ,MAAM,EAAE;AAChD,eAAW,OAAO,gBAAgB,MAA4B;AAG9D,QADkB,MAA8D,KACnE,aAAa,cACxB,UAAS,KAAK,IAAI;;AAGtB,UAAO;IAAE,MAAM;IAAU;IAAY;IAAU;;EAEjD,KAAK,WAEH,QAAO;GAAE,MAAM;GAAS,OAAO,gBADd,IACuC,KAAK;GAAE;EAEjE,KAAK,YACH,QAAO,EAAE,MAAM,UAAU;EAC3B,KAAK,YACH,QAAO,EAAE,MAAM,UAAU;EAC3B,KAAK,aACH,QAAO,EAAE,MAAM,WAAW;EAC5B,KAAK,UAEH,QAAO;GAAE,MAAM;GAAU,MADT,IACuB;GAAQ;EAEjD,KAAK,cAEH,QAAO,gBADa,IACe,UAAU;EAE/C,QACE,QAAO,EAAE,MAAM,UAAU;;;;;;AAS/B,SAAgB,aAAa,QAAiD;AAC5E,QAAO,IAAI,eAAe,OAAO;;;;;;;;;;AAWnC,SAAgB,aACd,SAA2C,EAAE,OAAO,YAAY,EAChD;AAChB,QAAO,IAAI,eAAe;EACxB,SAAS,oBAAoB,OAAO,QAAQ,MAAM;EAClD,OAAO,OAAO;EACd,QAAQ;EACT,CAAC;;;;;;;;;;AAWJ,SAAgB,eACd,SAA2C,EAAE,OAAO,eAAe,EACnD;AAChB,QAAO,IAAI,eAAe;EACxB,SAAS,oBAAoB,OAAO,QAAQ,KAAK;EACjD,OAAO,OAAO;EACd,QAAQ;EACT,CAAC"}
|