@arabold/docs-mcp-server 1.12.4 → 1.13.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1 +0,0 @@
1
- {"version":3,"file":"DocumentManagementService-BupnR1eC.js","sources":["../src/utils/logger.ts","../src/utils/errors.ts","../src/utils/url.ts","../src/utils/config.ts","../src/scraper/fetcher/FingerprintGenerator.ts","../src/scraper/fetcher/HttpFetcher.ts","../src/scraper/fetcher/FileFetcher.ts","../src/scraper/middleware/ContentProcessorPipeline.ts","../src/scraper/middleware/components/HtmlCheerioParserMiddleware.ts","../src/utils/dom.ts","../src/scraper/middleware/components/HtmlLinkExtractorMiddleware.ts","../src/scraper/middleware/components/HtmlMetadataExtractorMiddleware.ts","../src/scraper/types.ts","../src/scraper/middleware/components/HtmlPlaywrightMiddleware.ts","../src/scraper/middleware/components/HtmlSanitizerMiddleware.ts","../src/scraper/middleware/components/HtmlToMarkdownMiddleware.ts","../src/scraper/middleware/components/MarkdownLinkExtractorMiddleware.ts","../src/scraper/middleware/components/MarkdownMetadataExtractorMiddleware.ts","../src/pipeline/errors.ts","../src/scraper/strategies/BaseScraperStrategy.ts","../src/scraper/strategies/WebScraperStrategy.ts","../src/scraper/strategies/GitHubScraperStrategy.ts","../src/scraper/strategies/LocalFileStrategy.ts","../src/scraper/strategies/NpmScraperStrategy.ts","../src/scraper/strategies/PyPiScraperStrategy.ts","../src/scraper/ScraperRegistry.ts","../src/scraper/ScraperService.ts","../src/pipeline/PipelineWorker.ts","../src/pipeline/types.ts","../src/pipeline/PipelineManager.ts","../src/utils/string.ts","../src/splitter/errors.ts","../src/splitter/splitters/CodeContentSplitter.ts","../src/splitter/splitters/TableContentSplitter.ts","../src/splitter/splitters/TextContentSplitter.ts","../src/splitter/SemanticMarkdownSplitter.ts","../src/splitter/GreedySplitter.ts","../src/tools/errors.ts","../src/tools/ListLibrariesTool.ts","../src/tools/ScrapeTool.ts","../src/tools/SearchTool.ts","../src/utils/paths.ts","../src/store/DocumentRetrieverService.ts","../src/store/errors.ts","../src/store/applyMigrations.ts","../src/store/types.ts","../src/store/DocumentStore.ts","../src/store/DocumentManagementService.ts"],"sourcesContent":["/**\n * Defines the available log levels.\n */\nexport enum LogLevel {\n ERROR = 0,\n WARN = 1,\n INFO = 2,\n DEBUG = 3,\n}\n\nlet currentLogLevel: LogLevel = LogLevel.INFO; // Default level\n\n/**\n * Sets the current logging level for the application.\n * @param level - The desired log level.\n */\nexport function setLogLevel(level: LogLevel): void {\n currentLogLevel = level;\n}\n\n/**\n * Provides logging functionalities with level control.\n */\nexport const logger = {\n /**\n * Logs a debug message if the current log level is DEBUG or higher.\n * @param message - The message to log.\n */\n debug: (message: string) => {\n if (currentLogLevel >= LogLevel.DEBUG) {\n console.debug(message);\n }\n },\n /**\n * Logs an info message if the current log level is INFO or higher.\n * @param message - The message to log.\n */\n info: (message: string) => {\n if (currentLogLevel >= LogLevel.INFO) {\n console.log(message); // Using console.log for INFO\n }\n },\n /**\n * Logs a warning message if the current log level is WARN or higher.\n * @param message - The message to log.\n */\n warn: (message: string) => {\n if (currentLogLevel >= LogLevel.WARN) {\n console.warn(message);\n }\n },\n /**\n * Logs an error message if the current log level is ERROR or higher (always logs).\n * @param message - The message to log.\n */\n error: (message: string) => {\n if (currentLogLevel >= LogLevel.ERROR) {\n console.error(message);\n }\n },\n};\n","class ScraperError extends Error {\n constructor(\n message: string,\n public readonly isRetryable: boolean = false,\n public readonly cause?: Error,\n ) {\n super(message);\n this.name = this.constructor.name;\n if (cause?.stack) {\n this.stack = `${this.stack}\\nCaused by: ${cause.stack}`;\n }\n }\n}\n\nclass NetworkError extends ScraperError {\n constructor(\n message: string,\n public readonly statusCode?: number,\n cause?: Error,\n ) {\n super(message, true, cause);\n }\n}\n\nclass RateLimitError extends ScraperError {\n constructor(\n message: string,\n public readonly retryAfter?: number,\n ) {\n super(message, true);\n }\n}\n\nclass InvalidUrlError extends ScraperError {\n constructor(url: string, cause?: Error) {\n super(`Invalid URL: ${url}`, false, cause);\n }\n}\n\nclass ParsingError extends ScraperError {\n constructor(message: string, cause?: Error) {\n super(`Failed to parse content: ${message}`, false, cause);\n }\n}\n\nclass RedirectError extends ScraperError {\n constructor(\n public readonly originalUrl: string,\n public readonly redirectUrl: string,\n public readonly statusCode: number,\n ) {\n super(\n `Redirect detected from ${originalUrl} to ${redirectUrl} (status: ${statusCode})`,\n false,\n );\n }\n}\n\nexport {\n ScraperError,\n NetworkError,\n RateLimitError,\n InvalidUrlError,\n ParsingError,\n RedirectError,\n};\n","import psl from \"psl\";\nimport { InvalidUrlError } from \"./errors\";\n\ninterface UrlNormalizerOptions {\n ignoreCase?: boolean;\n removeHash?: boolean;\n removeTrailingSlash?: boolean;\n removeQuery?: boolean;\n removeIndex?: boolean;\n}\n\nconst defaultNormalizerOptions: UrlNormalizerOptions = {\n ignoreCase: true,\n removeHash: true,\n removeTrailingSlash: true,\n removeQuery: false,\n removeIndex: true,\n};\n\nexport function normalizeUrl(\n url: string,\n options: UrlNormalizerOptions = defaultNormalizerOptions,\n): string {\n try {\n const parsedUrl = new URL(url);\n const finalOptions = { ...defaultNormalizerOptions, ...options };\n\n // Create a new URL to ensure proper structure\n const normalized = new URL(parsedUrl.origin + parsedUrl.pathname);\n\n // Remove index files first, before handling trailing slashes\n if (finalOptions.removeIndex) {\n normalized.pathname = normalized.pathname.replace(\n /\\/index\\.(html|htm|asp|php|jsp)$/i,\n \"/\",\n );\n }\n\n // Handle trailing slash\n if (finalOptions.removeTrailingSlash && normalized.pathname.length > 1) {\n normalized.pathname = normalized.pathname.replace(/\\/+$/, \"\");\n }\n\n // Keep original parts we want to preserve\n const preservedHash = !finalOptions.removeHash ? parsedUrl.hash : \"\";\n const preservedSearch = !finalOptions.removeQuery ? parsedUrl.search : \"\";\n\n // Construct final URL string in correct order (query before hash)\n let result = normalized.origin + normalized.pathname;\n if (preservedSearch) {\n result += preservedSearch;\n }\n if (preservedHash) {\n result += preservedHash;\n }\n\n // Apply case normalization if configured\n if (finalOptions.ignoreCase) {\n result = result.toLowerCase();\n }\n\n return result;\n } catch {\n return url; // Return original URL if parsing fails\n }\n}\n\n/**\n * Validates if a string is a valid URL\n * @throws {InvalidUrlError} If the URL is invalid\n */\nexport function validateUrl(url: string): void {\n try {\n new URL(url);\n } catch (error) {\n throw new InvalidUrlError(url, error instanceof Error ? error : undefined);\n }\n}\n\n/**\n * Checks if two URLs have the exact same hostname\n */\nexport function hasSameHostname(urlA: URL, urlB: URL): boolean {\n return urlA.hostname.toLowerCase() === urlB.hostname.toLowerCase();\n}\n\n/**\n * Checks if two URLs are on the same domain (including subdomains)\n * Using the public suffix list to properly handle domains like .co.uk\n */\nexport function hasSameDomain(urlA: URL, urlB: URL): boolean {\n const domainA = psl.get(urlA.hostname.toLowerCase());\n const domainB = psl.get(urlB.hostname.toLowerCase());\n return domainA !== null && domainA === domainB;\n}\n\n/**\n * Checks if a target URL is under the same path as the base URL\n * Example: base = https://example.com/docs/\n * target = https://example.com/docs/getting-started\n * result = true\n */\nexport function isSubpath(baseUrl: URL, targetUrl: URL): boolean {\n // Normalize paths to ensure consistent comparison\n const basePath = baseUrl.pathname.endsWith(\"/\")\n ? baseUrl.pathname\n : `${baseUrl.pathname}/`;\n\n return targetUrl.pathname.startsWith(basePath);\n}\n\nexport type { UrlNormalizerOptions };\n","/**\n * Default configuration values for the scraping pipeline and server\n */\n\n/** Maximum number of pages to scrape in a single job */\nexport const DEFAULT_MAX_PAGES = 1000;\n\n/** Maximum navigation depth when crawling links */\nexport const DEFAULT_MAX_DEPTH = 3;\n\n/** Maximum number of concurrent page requests */\nexport const DEFAULT_MAX_CONCURRENCY = 3;\n\n/** Default protocol for the MCP server */\nexport const DEFAULT_PROTOCOL = \"stdio\";\n\n/** Default port for the HTTP protocol */\nexport const DEFAULT_HTTP_PORT = 6280;\n\n/** Default port for the Web UI */\nexport const DEFAULT_WEB_PORT = 6281;\n\n/**\n * Maximum number of retries for HTTP fetcher requests.\n */\nexport const FETCHER_MAX_RETRIES = 6;\n\n/**\n * Base delay in milliseconds for HTTP fetcher retry backoff.\n */\nexport const FETCHER_BASE_DELAY = 1000;\n\n/**\n * Default chunk size settings for splitters\n */\nexport const SPLITTER_MIN_CHUNK_SIZE = 500;\nexport const SPLITTER_PREFERRED_CHUNK_SIZE = 1500;\nexport const SPLITTER_MAX_CHUNK_SIZE = 5000;\n","import { HeaderGenerator, type HeaderGeneratorOptions } from \"header-generator\";\n\n/**\n * Generates realistic browser-like HTTP headers to help avoid bot detection.\n * Uses the `header-generator` library for header generation.\n */\nexport class FingerprintGenerator {\n private headerGenerator: HeaderGenerator;\n\n /**\n * Creates an instance of FingerprintGenerator.\n * @param options Optional configuration for the header generator.\n */\n constructor(options?: Partial<HeaderGeneratorOptions>) {\n // Default options for a broad range of realistic headers\n const defaultOptions: Partial<HeaderGeneratorOptions> = {\n browsers: [{ name: \"chrome\", minVersion: 100 }, \"firefox\", \"safari\"],\n devices: [\"desktop\", \"mobile\"],\n operatingSystems: [\"windows\", \"linux\", \"macos\", \"android\", \"ios\"],\n locales: [\"en-US\", \"en\"],\n httpVersion: \"2\",\n };\n\n this.headerGenerator = new HeaderGenerator({\n ...defaultOptions,\n ...options,\n });\n }\n\n /**\n * Generates a set of realistic HTTP headers.\n * @returns A set of realistic HTTP headers.\n */\n generateHeaders(): Record<string, string> {\n return this.headerGenerator.getHeaders();\n }\n}\n","import axios, { type AxiosError, type AxiosRequestConfig } from \"axios\";\nimport { FETCHER_BASE_DELAY, FETCHER_MAX_RETRIES } from \"../../utils/config\";\nimport { RedirectError, ScraperError } from \"../../utils/errors\";\nimport { logger } from \"../../utils/logger\";\nimport { FingerprintGenerator } from \"./FingerprintGenerator\";\nimport type { ContentFetcher, FetchOptions, RawContent } from \"./types\";\n\n/**\n * Fetches content from remote sources using HTTP/HTTPS.\n */\nexport class HttpFetcher implements ContentFetcher {\n private readonly retryableStatusCodes = [\n 408, // Request Timeout\n 429, // Too Many Requests\n 500, // Internal Server Error\n 502, // Bad Gateway\n 503, // Service Unavailable\n 504, // Gateway Timeout\n 525, // SSL Handshake Failed (Cloudflare specific)\n ];\n\n private fingerprintGenerator: FingerprintGenerator;\n\n constructor() {\n this.fingerprintGenerator = new FingerprintGenerator();\n }\n\n canFetch(source: string): boolean {\n return source.startsWith(\"http://\") || source.startsWith(\"https://\");\n }\n\n private async delay(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n }\n\n async fetch(source: string, options?: FetchOptions): Promise<RawContent> {\n const maxRetries = options?.maxRetries ?? FETCHER_MAX_RETRIES;\n const baseDelay = options?.retryDelay ?? FETCHER_BASE_DELAY;\n // Default to following redirects if not specified\n const followRedirects = options?.followRedirects ?? true;\n\n for (let attempt = 0; attempt <= maxRetries; attempt++) {\n try {\n const fingerprint = this.fingerprintGenerator.generateHeaders();\n const headers = {\n ...fingerprint,\n ...options?.headers, // User-provided headers override generated ones\n };\n\n const config: AxiosRequestConfig = {\n responseType: \"arraybuffer\", // For handling both text and binary\n headers,\n timeout: options?.timeout,\n signal: options?.signal, // Pass signal to axios\n // Axios follows redirects by default, we need to explicitly disable it if needed\n maxRedirects: followRedirects ? 5 : 0,\n };\n\n const response = await axios.get(source, config);\n\n return {\n content: response.data,\n mimeType: response.headers[\"content-type\"] || \"application/octet-stream\",\n source: source,\n encoding: response.headers[\"content-encoding\"],\n } satisfies RawContent;\n } catch (error: unknown) {\n const axiosError = error as AxiosError;\n const status = axiosError.response?.status;\n const code = axiosError.code;\n\n // Handle redirect errors (status codes 301, 302, 303, 307, 308)\n if (!followRedirects && status && status >= 300 && status < 400) {\n const location = axiosError.response?.headers?.location;\n if (location) {\n throw new RedirectError(source, location, status);\n }\n }\n\n if (\n attempt < maxRetries &&\n (status === undefined || this.retryableStatusCodes.includes(status))\n ) {\n const delay = baseDelay * 2 ** attempt;\n logger.warn(\n `Attempt ${attempt + 1}/${\n maxRetries + 1\n } failed for ${source} (Status: ${status}, Code: ${code}). Retrying in ${delay}ms...`,\n );\n await this.delay(delay);\n continue;\n }\n\n // Not a 5xx error or max retries reached\n throw new ScraperError(\n `Failed to fetch ${source} after ${\n attempt + 1\n } attempts: ${axiosError.message ?? \"Unknown error\"}`,\n true,\n error instanceof Error ? error : undefined,\n );\n }\n }\n throw new ScraperError(\n `Failed to fetch ${source} after ${maxRetries + 1} attempts`,\n true,\n );\n }\n}\n","import fs from \"node:fs/promises\";\nimport path from \"node:path\";\nimport { ScraperError } from \"../../utils/errors\";\nimport { logger } from \"../../utils/logger\";\nimport type { ContentFetcher, FetchOptions, RawContent } from \"./types\";\n\n/**\n * Fetches content from local file system.\n */\nexport class FileFetcher implements ContentFetcher {\n canFetch(source: string): boolean {\n return source.startsWith(\"file://\");\n }\n\n async fetch(source: string, options?: FetchOptions): Promise<RawContent> {\n const filePath = source.replace(/^file:\\/\\//, \"\");\n logger.info(`Fetching file: ${filePath}`);\n\n try {\n const content = await fs.readFile(filePath);\n const ext = path.extname(filePath).toLowerCase();\n const mimeType = this.getMimeType(ext);\n\n return {\n content,\n mimeType,\n source,\n encoding: \"utf-8\", // Assume UTF-8 for text files\n };\n } catch (error: unknown) {\n throw new ScraperError(\n `Failed to read file ${filePath}: ${\n (error as { message?: string }).message ?? \"Unknown error\"\n }`,\n false,\n error instanceof Error ? error : undefined,\n );\n }\n }\n\n private getMimeType(ext: string): string {\n switch (ext) {\n case \".html\":\n case \".htm\":\n return \"text/html\";\n case \".md\":\n return \"text/markdown\";\n case \".txt\":\n return \"text/plain\";\n default:\n return \"application/octet-stream\";\n }\n }\n}\n","import { logger } from \"../../utils/logger\";\nimport type { ContentProcessingContext, ContentProcessorMiddleware } from \"./types\";\n\n/**\n * Manages and executes a sequence of content processing middleware.\n */\nexport class ContentProcessingPipeline {\n private readonly middleware: ContentProcessorMiddleware[];\n\n /**\n * Creates an instance of ContentProcessingPipeline.\n * @param middleware An array of middleware instances to execute in order.\n */\n constructor(middleware: ContentProcessorMiddleware[]) {\n this.middleware = middleware;\n }\n\n /**\n * Executes the middleware pipeline with the given initial context.\n * @param initialContext The starting context for the pipeline.\n * @returns A promise that resolves with the final context after all middleware have executed.\n */\n async run(initialContext: ContentProcessingContext): Promise<ContentProcessingContext> {\n let index = -1;\n\n const dispatch = async (i: number): Promise<void> => {\n if (i <= index) {\n // next() called multiple times within the same middleware\n throw new Error(\"next() called multiple times\");\n }\n index = i;\n\n const mw: ContentProcessorMiddleware | undefined = this.middleware[i];\n if (!mw) {\n // End of the pipeline\n return;\n }\n\n // Bind the next function to the subsequent index\n const next = dispatch.bind(null, i + 1);\n\n try {\n await mw.process(initialContext, next);\n } catch (error) {\n // Add error to context and potentially stop pipeline or continue\n initialContext.errors.push(\n error instanceof Error ? error : new Error(String(error)),\n );\n // Depending on desired behavior, you might re-throw or just log\n logger.warn(`Error in middleware pipeline: ${error}`);\n // Decide if pipeline should stop on error. For now, let's continue.\n // If stopping is desired, uncomment the next line:\n // throw error;\n }\n };\n\n // Start the dispatch chain from the first middleware (index 0)\n await dispatch(0);\n\n // Return the final context after the chain completes\n return initialContext;\n }\n}\n","import * as cheerio from \"cheerio\";\nimport { logger } from \"../../../utils/logger\";\nimport type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Middleware to parse HTML string/buffer content into a Cheerio object.\n * It populates the `context.dom` property.\n * Assumes the input HTML in `context.content` is the final version to be parsed\n * (e.g., after potential rendering by Playwright or modification by JS execution).\n */\nexport class HtmlCheerioParserMiddleware implements ContentProcessorMiddleware {\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n // Only process HTML content\n if (!context.contentType.startsWith(\"text/html\")) {\n await next();\n return;\n }\n\n // Ensure content is a string for Cheerio\n const htmlString =\n typeof context.content === \"string\"\n ? context.content\n : Buffer.from(context.content).toString(\"utf-8\");\n\n try {\n logger.debug(`Parsing HTML content with Cheerio from ${context.source}`);\n // Load the HTML string using Cheerio\n const $ = cheerio.load(htmlString);\n\n // Add the Cheerio API object to the context\n context.dom = $;\n\n // Proceed to the next middleware\n await next();\n } catch (error) {\n logger.error(`Failed to parse HTML with Cheerio for ${context.source}: ${error}`);\n context.errors.push(\n error instanceof Error\n ? error\n : new Error(`Cheerio HTML parsing failed: ${String(error)}`),\n );\n // Do not proceed further down the pipeline if parsing fails\n return;\n }\n }\n}\n","import { JSDOM, VirtualConsole } from \"jsdom\";\nimport type { ConstructorOptions } from \"jsdom\";\n\n/**\n * Creates a JSDOM instance with a pre-configured virtual console to suppress console noise.\n * This utility simplifies the setup of JSDOM by providing a standard configuration.\n *\n * @param html - The HTML content to parse.\n * @param options - Optional JSDOM configuration options. These will be merged with the default virtual console setup.\n * @returns A JSDOM instance.\n */\nexport function createJSDOM(html: string, options?: ConstructorOptions): JSDOM {\n const virtualConsole = new VirtualConsole();\n // Suppress console output from JSDOM by default\n virtualConsole.on(\"error\", () => {});\n virtualConsole.on(\"warn\", () => {});\n virtualConsole.on(\"info\", () => {});\n virtualConsole.on(\"debug\", () => {});\n virtualConsole.on(\"log\", () => {}); // Also suppress regular logs\n\n const defaultOptions: ConstructorOptions = {\n virtualConsole,\n };\n\n // Merge provided options with defaults, letting provided options override\n const finalOptions: ConstructorOptions = { ...defaultOptions, ...options };\n\n return new JSDOM(html, finalOptions);\n}\n","import { logger } from \"../../../utils/logger\"; // Added logger\nimport type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Middleware to extract links (href attributes from <a> tags) from HTML content using Cheerio.\n * It expects the Cheerio API object to be available in `context.dom`.\n * This should run *after* parsing but *before* conversion to Markdown.\n */\nexport class HtmlLinkExtractorMiddleware implements ContentProcessorMiddleware {\n /**\n * Processes the context to extract links from the sanitized HTML body.\n * @param context The current processing context.\n * @param next Function to call the next middleware.\n */\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n // Check if we have a Cheerio object from a previous step\n const $ = context.dom;\n if (!$) {\n // Log a warning if running on HTML content without a DOM\n if (context.contentType.startsWith(\"text/html\")) {\n logger.warn(\n `Skipping ${this.constructor.name}: context.dom is missing for HTML content. Ensure HtmlCheerioParserMiddleware runs before this.`,\n );\n }\n // Otherwise, just proceed (might be non-HTML content)\n await next();\n return;\n }\n\n // Only process if we have a Cheerio object (implicitly means it's HTML)\n try {\n const linkElements = $(\"a[href]\"); // Use Cheerio selector\n logger.debug(`Found ${linkElements.length} potential links in ${context.source}`);\n\n const extractedLinks: string[] = [];\n linkElements.each((index, element) => {\n const href = $(element).attr(\"href\");\n if (href && href.trim() !== \"\") {\n try {\n const urlObj = new URL(href, context.source);\n // Explicitly check for valid protocols\n if (![\"http:\", \"https:\", \"file:\"].includes(urlObj.protocol)) {\n logger.debug(`Ignoring link with invalid protocol: ${href}`);\n return; // Continue to next element\n }\n extractedLinks.push(urlObj.href);\n } catch (e) {\n // Ignore URLs that cause the URL constructor to throw\n logger.debug(`Ignoring invalid URL syntax: ${href}`);\n }\n }\n });\n\n // Add extracted links to the context. Using a Set ensures uniqueness.\n context.links = [...new Set(extractedLinks)];\n logger.debug(\n `Extracted ${context.links.length} unique, valid links from ${context.source}`,\n );\n } catch (error) {\n logger.error(`Error extracting links from ${context.source}: ${error}`);\n context.errors.push(\n new Error(\n `Failed to extract links from HTML: ${error instanceof Error ? error.message : String(error)}`,\n ),\n );\n // Decide if pipeline should stop\n }\n\n // Call the next middleware in the chain\n await next();\n\n // No cleanup needed specifically for this middleware as it only reads from context\n }\n}\n","import { logger } from \"../../../utils/logger\";\nimport type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Middleware to extract the title from HTML content using Cheerio.\n * Assumes context.dom (Cheerio API object) is populated by a preceding middleware\n * (e.g., HtmlCheerioParserMiddleware).\n */\nexport class HtmlMetadataExtractorMiddleware implements ContentProcessorMiddleware {\n /**\n * Processes the context to extract the HTML title.\n * @param context The current processing context.\n * @param next Function to call the next middleware.\n */\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n // Check if Cheerio DOM exists from previous middleware\n const $ = context.dom;\n if (!$) {\n // Log a warning if running on HTML content without a DOM\n if (context.contentType.startsWith(\"text/html\")) {\n logger.warn(\n `Skipping ${this.constructor.name}: context.dom is missing for HTML content. Ensure HtmlCheerioParserMiddleware runs before this.`,\n );\n }\n // Otherwise, just proceed (might be non-HTML content)\n await next();\n return;\n }\n\n // Only process if we have a Cheerio object (implicitly means it's HTML)\n try {\n // Extract title (using title tag, fallback to h1 if title is empty/missing)\n let title = $(\"title\").first().text().trim();\n\n if (!title) {\n // Fallback to the first H1 if title is empty\n title = $(\"h1\").first().text().trim();\n }\n\n // Default to \"Untitled\" if both are empty\n title = title || \"Untitled\";\n\n // Basic cleanup (replace multiple spaces with single space)\n title = title.replace(/\\s+/g, \" \").trim();\n\n context.metadata.title = title;\n logger.debug(`Extracted title: \"${title}\" from ${context.source}`);\n } catch (error) {\n logger.error(`Error extracting metadata from ${context.source}: ${error}`);\n context.errors.push(\n new Error(\n `Failed to extract metadata from HTML: ${error instanceof Error ? error.message : String(error)}`,\n ),\n );\n // Optionally decide whether to stop the pipeline here\n }\n\n // Call the next middleware in the chain\n await next();\n\n // No cleanup needed for Cheerio\n }\n}\n","import type { Document, ProgressCallback } from \"../types\";\n\n/**\n * Enum defining the available HTML processing strategies.\n */\nexport enum ScrapeMode {\n Fetch = \"fetch\",\n Playwright = \"playwright\",\n Auto = \"auto\",\n}\n\n/**\n * Strategy interface for implementing different scraping behaviors\n */\nexport interface ScraperStrategy {\n canHandle(url: string): boolean;\n scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal, // Add optional signal\n ): Promise<void>;\n}\n\n/**\n * Options for configuring the scraping process\n */\nexport interface ScraperOptions {\n url: string;\n library: string;\n version: string;\n maxPages?: number;\n maxDepth?: number;\n /**\n * Defines the allowed crawling boundary relative to the starting URL\n * - 'subpages': Only crawl URLs on the same hostname and within the same starting path (default)\n * - 'hostname': Crawl any URL on the same exact hostname, regardless of path\n * - 'domain': Crawl any URL on the same top-level domain, including subdomains\n */\n scope?: \"subpages\" | \"hostname\" | \"domain\";\n /**\n * Controls whether HTTP redirects (3xx responses) should be followed\n * - When true: Redirects are followed automatically (default)\n * - When false: A RedirectError is thrown when a 3xx response is received\n */\n followRedirects?: boolean;\n maxConcurrency?: number;\n ignoreErrors?: boolean;\n /** CSS selectors for elements to exclude during HTML processing */\n excludeSelectors?: string[];\n /**\n * Determines the HTML processing strategy.\n * - 'fetch': Use a simple DOM parser (faster, less JS support).\n * - 'playwright': Use a headless browser (slower, full JS support).\n * - 'auto': Automatically select the best strategy (currently defaults to 'playwright').\n * @default ScrapeMode.Auto\n */\n scrapeMode?: ScrapeMode;\n /** Optional AbortSignal for cancellation */\n signal?: AbortSignal;\n}\n\n/**\n * Result of scraping a single page. Used internally by HtmlScraper.\n */\nexport interface ScrapedPage {\n content: string;\n title: string;\n url: string;\n /** URLs extracted from page links, used for recursive scraping */\n links: string[];\n}\n\n/**\n * Progress information during scraping\n */\nexport interface ScraperProgress {\n pagesScraped: number;\n maxPages: number;\n currentUrl: string;\n depth: number;\n maxDepth: number;\n document?: Document;\n}\n","import { type Browser, type Page, chromium } from \"playwright\";\nimport { logger } from \"../../../utils/logger\";\nimport { ScrapeMode } from \"../../types\"; // Import ScrapeMode\nimport type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Middleware to process HTML content using Playwright for rendering dynamic content,\n * *if* the scrapeMode option requires it ('playwright' or 'auto').\n * It updates `context.content` with the rendered HTML if Playwright runs.\n * Subsequent middleware (e.g., HtmlCheerioParserMiddleware) should handle parsing this content.\n */\nexport class HtmlPlaywrightMiddleware implements ContentProcessorMiddleware {\n private browser: Browser | null = null;\n\n /**\n * Initializes the Playwright browser instance.\n * Consider making this more robust (e.g., lazy initialization, singleton).\n */\n private async ensureBrowser(): Promise<Browser> {\n if (!this.browser || !this.browser.isConnected()) {\n const launchArgs = process.env.PLAYWRIGHT_LAUNCH_ARGS?.split(\" \") ?? [];\n logger.debug(\n `Launching new Playwright browser instance (Chromium) with args: ${launchArgs.join(\" \") || \"none\"}...`,\n );\n this.browser = await chromium.launch({ channel: \"chromium\", args: launchArgs });\n this.browser.on(\"disconnected\", () => {\n logger.debug(\"Playwright browser instance disconnected.\");\n this.browser = null;\n });\n }\n\n return this.browser;\n }\n\n /**\n * Closes the Playwright browser instance if it exists.\n * Should be called during application shutdown.\n */\n async closeBrowser(): Promise<void> {\n if (this.browser?.isConnected()) {\n logger.debug(\"Closing Playwright browser instance...\");\n await this.browser.close();\n this.browser = null;\n }\n }\n\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n // Only process HTML content\n if (!context.contentType.startsWith(\"text/html\")) {\n await next();\n return;\n }\n\n // Determine if Playwright should run based on scrapeMode\n const scrapeMode = context.options?.scrapeMode ?? ScrapeMode.Auto; // Default to Auto\n const shouldRunPlaywright =\n scrapeMode === ScrapeMode.Playwright || scrapeMode === ScrapeMode.Auto;\n\n if (!shouldRunPlaywright) {\n logger.debug(\n `Skipping Playwright rendering for ${context.source} as scrapeMode is '${scrapeMode}'.`,\n );\n await next();\n return;\n }\n\n // --- Playwright Execution Logic ---\n logger.debug(\n `Running Playwright rendering for ${context.source} (scrapeMode: '${scrapeMode}')`,\n );\n\n let page: Page | null = null;\n let renderedHtml: string | null = null;\n\n try {\n const browser = await this.ensureBrowser();\n page = await browser.newPage();\n logger.debug(`Playwright: Processing ${context.source}`);\n\n // Block unnecessary resources\n await page.route(\"**/*\", (route) => {\n if (route.request().url() === context.source) {\n return route.fulfill({\n status: 200,\n contentType: context.contentType,\n body: context.content,\n });\n }\n\n const resourceType = route.request().resourceType();\n if ([\"image\", \"stylesheet\", \"font\", \"media\"].includes(resourceType)) {\n return route.abort();\n }\n return route.continue();\n });\n\n // Load initial HTML content\n // Use 'domcontentloaded' as scripts might need the initial DOM structure\n // Use 'networkidle' if waiting for async data fetches is critical, but slower.\n await page.goto(context.source, {\n waitUntil: \"load\",\n });\n\n // Optionally, add a small delay or wait for a specific element if needed\n // await page.waitForTimeout(100); // Example: wait 100ms\n\n // Get the fully rendered HTML\n renderedHtml = await page.content();\n logger.debug(`Playwright: Successfully rendered content for ${context.source}`);\n } catch (error) {\n logger.error(`Playwright failed to render ${context.source}: ${error}`);\n context.errors.push(\n error instanceof Error\n ? error\n : new Error(`Playwright rendering failed: ${String(error)}`),\n );\n } finally {\n // Ensure page is closed even if subsequent steps fail\n if (page) {\n await page.unroute(\"**/*\");\n await page.close();\n }\n }\n // --- End Playwright Execution Logic ---\n\n // Update context content *only if* Playwright ran and succeeded\n if (renderedHtml !== null) {\n context.content = renderedHtml;\n logger.debug(\n `Playwright middleware updated content for ${context.source}. Proceeding.`,\n );\n } else {\n // Log if Playwright ran but failed to render\n logger.warn(\n `Playwright rendering resulted in null content for ${context.source}. Proceeding without content update.`,\n );\n }\n\n // Proceed to the next middleware regardless of Playwright success/failure\n await next();\n }\n}\n","import { logger } from \"../../../utils/logger\";\nimport type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Options for HtmlSanitizerMiddleware.\n */\nexport interface HtmlSanitizerOptions {\n /** CSS selectors for elements to remove *in addition* to the defaults. */\n excludeSelectors?: string[];\n}\n\n/**\n * Middleware to remove unwanted elements from parsed HTML content using Cheerio.\n * It expects the Cheerio API object (`context.dom`) to be populated by a preceding middleware\n * (e.g., HtmlCheerioParserMiddleware).\n * It modifies the `context.dom` object in place.\n */\nexport class HtmlSanitizerMiddleware implements ContentProcessorMiddleware {\n // Default selectors to remove\n private readonly defaultSelectorsToRemove = [\n \"nav\",\n \"footer\",\n \"script\",\n \"style\",\n \"noscript\",\n \"svg\",\n \"link\",\n \"meta\",\n \"iframe\",\n \"header\",\n \"button\",\n \"input\",\n \"textarea\",\n \"select\",\n // \"form\", // Keep commented\n \".ads\",\n \".advertisement\",\n \".banner\",\n \".cookie-banner\",\n \".cookie-consent\",\n \".hidden\",\n \".hide\",\n \".modal\",\n \".nav-bar\",\n \".overlay\",\n \".popup\",\n \".promo\",\n \".mw-editsection\",\n \".side-bar\",\n \".social-share\",\n \".sticky\",\n \"#ads\",\n \"#banner\",\n \"#cookieBanner\",\n \"#modal\",\n \"#nav\",\n \"#overlay\",\n \"#popup\",\n \"#sidebar\",\n \"#socialMediaBox\",\n \"#stickyHeader\",\n \"#ad-container\",\n \".ad-container\",\n \".login-form\",\n \".signup-form\",\n \".tooltip\",\n \".dropdown-menu\",\n // \".alert\", // Keep commented\n \".breadcrumb\",\n \".pagination\",\n // '[role=\"alert\"]', // Keep commented\n '[role=\"banner\"]',\n '[role=\"dialog\"]',\n '[role=\"alertdialog\"]',\n '[role=\"region\"][aria-label*=\"skip\" i]',\n '[aria-modal=\"true\"]',\n \".noprint\",\n ];\n\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n // Check if Cheerio DOM exists\n const $ = context.dom;\n if (!$) {\n if (context.contentType.startsWith(\"text/html\")) {\n logger.warn(\n `Skipping ${this.constructor.name}: context.dom is missing. Ensure HtmlCheerioParserMiddleware runs before this.`,\n );\n }\n await next();\n return;\n }\n\n try {\n // Remove unwanted elements using Cheerio\n const selectorsToRemove = [\n ...(context.options.excludeSelectors || []), // Use options from the context\n ...this.defaultSelectorsToRemove,\n ];\n logger.debug(\n `Removing elements matching ${selectorsToRemove.length} selectors for ${context.source}`,\n );\n let removedCount = 0;\n for (const selector of selectorsToRemove) {\n try {\n const elements = $(selector); // Use Cheerio selector\n const count = elements.length;\n if (count > 0) {\n elements.remove(); // Use Cheerio remove\n removedCount += count;\n }\n } catch (selectorError) {\n // Log invalid selectors but continue with others\n // Cheerio is generally more tolerant of invalid selectors than querySelectorAll\n logger.warn(\n `Potentially invalid selector \"${selector}\" during element removal: ${selectorError}`,\n );\n context.errors.push(\n new Error(`Invalid selector \"${selector}\": ${selectorError}`),\n );\n }\n }\n logger.debug(`Removed ${removedCount} elements for ${context.source}`);\n\n // The context.dom object ($) has been modified in place.\n } catch (error) {\n logger.error(`Error during HTML element removal for ${context.source}: ${error}`);\n context.errors.push(\n error instanceof Error\n ? error\n : new Error(`HTML element removal failed: ${String(error)}`),\n );\n // Decide if pipeline should stop? For now, continue.\n }\n\n // Proceed to the next middleware\n await next();\n }\n}\n","// @ts-ignore\nimport { gfm } from \"@joplin/turndown-plugin-gfm\";\nimport TurndownService from \"turndown\";\nimport { logger } from \"../../../utils/logger\"; // Added logger\nimport type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Middleware to convert the final processed HTML content (from Cheerio object in context.dom)\n * into Markdown using Turndown, applying custom rules.\n */\nexport class HtmlToMarkdownMiddleware implements ContentProcessorMiddleware {\n private turndownService: TurndownService;\n\n constructor() {\n this.turndownService = new TurndownService({\n headingStyle: \"atx\",\n hr: \"---\",\n bulletListMarker: \"-\",\n codeBlockStyle: \"fenced\",\n emDelimiter: \"_\",\n strongDelimiter: \"**\",\n linkStyle: \"inlined\",\n });\n\n this.turndownService.use(gfm);\n\n this.addCustomRules();\n }\n\n private addCustomRules(): void {\n // Preserve code blocks and syntax (replicated from HtmlProcessor)\n this.turndownService.addRule(\"pre\", {\n filter: [\"pre\"],\n replacement: (content, node) => {\n const element = node as unknown as HTMLElement;\n let language = element.getAttribute(\"data-language\") || \"\";\n if (!language) {\n // Try to infer the language from the class name\n // This is a common pattern in syntax highlighters\n const highlightElement =\n element.closest(\n '[class*=\"highlight-source-\"], [class*=\"highlight-\"], [class*=\"language-\"]',\n ) ||\n element.querySelector(\n '[class*=\"highlight-source-\"], [class*=\"highlight-\"], [class*=\"language-\"]',\n );\n if (highlightElement) {\n const className = highlightElement.className;\n const match = className.match(\n /(?:highlight-source-|highlight-|language-)(\\w+)/,\n );\n if (match) language = match[1];\n }\n }\n\n const brElements = Array.from(element.querySelectorAll(\"br\"));\n for (const br of brElements) {\n br.replaceWith(\"\\n\");\n }\n const text = element.textContent || \"\";\n\n return `\\n\\`\\`\\`${language}\\n${text.replace(/^\\n+|\\n+$/g, \"\")}\\n\\`\\`\\`\\n`;\n },\n });\n this.turndownService.addRule(\"anchor\", {\n filter: [\"a\"],\n replacement: (content, node) => {\n const href = (node as HTMLElement).getAttribute(\"href\");\n if (!content || content === \"#\") {\n return \"\"; // Remove if content is # or empty\n }\n if (!href) {\n return content; // Preserve content if href is missing or empty\n }\n return `[${content}](${href})`; // Standard link conversion\n },\n });\n }\n\n /**\n * Processes the context to convert the sanitized HTML body node to Markdown.\n * @param context The current processing context.\n * @param next Function to call the next middleware.\n */\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n // Check if we have a Cheerio object from a previous step\n const $ = context.dom;\n if (!$) {\n // Log a warning if running on HTML content without a DOM\n if (context.contentType.startsWith(\"text/html\")) {\n logger.warn(\n `Skipping ${this.constructor.name}: context.dom is missing for HTML content. Ensure HtmlCheerioParserMiddleware ran correctly.`,\n );\n }\n // Otherwise, just proceed (might be non-HTML content or error state)\n await next();\n return;\n }\n\n // Only process if we have a Cheerio object (implicitly means it's HTML)\n try {\n logger.debug(`Converting HTML content to Markdown for ${context.source}`);\n // Provide Turndown with the HTML string content from the Cheerio object's body,\n // or the whole document if body is empty/unavailable.\n const htmlToConvert = $(\"body\").html() || $.html();\n const markdown = this.turndownService.turndown(htmlToConvert).trim();\n\n if (!markdown) {\n // If conversion results in empty markdown, log a warning but treat as valid empty markdown\n const warnMsg = `HTML to Markdown conversion resulted in empty content for ${context.source}.`;\n logger.warn(warnMsg);\n // Set content to empty string and update type, do not add error\n context.content = \"\";\n context.contentType = \"text/markdown\";\n } else {\n // Conversion successful and produced non-empty markdown\n context.content = markdown;\n context.contentType = \"text/markdown\"; // Update content type\n logger.debug(`Successfully converted HTML to Markdown for ${context.source}`);\n }\n } catch (error) {\n logger.error(`Error converting HTML to Markdown for ${context.source}: ${error}`);\n context.errors.push(\n new Error(\n `Failed to convert HTML to Markdown: ${error instanceof Error ? error.message : String(error)}`,\n ),\n );\n // Decide if pipeline should stop? For now, continue.\n }\n\n // Call the next middleware in the chain regardless of whether conversion happened\n await next();\n\n // No need to close/free Cheerio object explicitly\n // context.dom = undefined; // Optionally clear the dom property if no longer needed downstream\n }\n}\n","import type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Placeholder middleware for extracting links from Markdown content.\n * Currently, it does not implement link extraction, matching the\n * original MarkdownProcessor's TODO status.\n */\nexport class MarkdownLinkExtractorMiddleware implements ContentProcessorMiddleware {\n /**\n * Processes the context. Currently a no-op regarding link extraction.\n * @param context The current processing context.\n * @param next Function to call the next middleware.\n */\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n if (context.contentType === \"text/markdown\") {\n // TODO: Implement Markdown link extraction (e.g., using regex or a Markdown parser)\n // For now, ensure context.links exists, defaulting to empty array if not set.\n if (!Array.isArray(context.links)) {\n context.links = [];\n }\n // No links are added here yet.\n }\n\n // Call the next middleware in the chain\n await next();\n\n // No cleanup needed\n }\n}\n","import type { ContentProcessingContext, ContentProcessorMiddleware } from \"../types\";\n\n/**\n * Middleware to extract the title (first H1 heading) from Markdown content.\n */\nexport class MarkdownMetadataExtractorMiddleware implements ContentProcessorMiddleware {\n /**\n * Processes the context to extract the title from Markdown.\n * @param context The current processing context.\n * @param next Function to call the next middleware.\n */\n async process(\n context: ContentProcessingContext,\n next: () => Promise<void>,\n ): Promise<void> {\n // Process both markdown and plain text for title extraction (or default)\n if (context.contentType === \"text/markdown\" || context.contentType === \"text/plain\") {\n try {\n // Ensure content is a string\n const textContent =\n typeof context.content === \"string\"\n ? context.content\n : Buffer.from(context.content).toString(\"utf-8\"); // Assume utf-8 if buffer\n // Update context content to string if it was a buffer\n if (typeof context.content !== \"string\") {\n context.content = textContent;\n }\n\n let title = \"Untitled\"; // Default title\n // Only look for H1 if it's actually markdown\n if (context.contentType === \"text/markdown\") {\n const match = textContent.match(/^#\\s+(.*)$/m);\n if (match?.[1]) {\n title = match[1].trim();\n }\n }\n // Set title (either extracted H1 or the default \"Untitled\")\n context.metadata.title = title;\n } catch (error) {\n context.errors.push(\n new Error(\n `Failed to extract metadata from Markdown: ${error instanceof Error ? error.message : String(error)}`,\n ),\n );\n // Decide if pipeline should stop\n }\n }\n\n // Call the next middleware in the chain\n await next();\n\n // No cleanup needed\n }\n}\n","export class PipelineError extends Error {\n constructor(\n message: string,\n public readonly cause?: Error,\n ) {\n super(message);\n this.name = this.constructor.name;\n if (cause?.stack) {\n this.stack = `${this.stack}\\nCaused by: ${cause.stack}`;\n }\n }\n}\n\nexport class DocumentProcessingError extends PipelineError {\n constructor(\n message: string,\n public readonly documentId: string,\n cause?: Error,\n ) {\n super(`Failed to process document ${documentId}: ${message}`, cause);\n }\n}\n\nexport class PipelineStateError extends PipelineError {}\n\n/**\n * Error indicating that an operation was cancelled.\n */\nexport class CancellationError extends PipelineError {\n constructor(message = \"Operation cancelled\") {\n super(message);\n }\n}\n","import { URL } from \"node:url\";\nimport { CancellationError } from \"../../pipeline/errors\";\nimport type { Document, ProgressCallback } from \"../../types\";\nimport { logger } from \"../../utils/logger\";\nimport { type UrlNormalizerOptions, normalizeUrl } from \"../../utils/url\";\nimport type { ScraperOptions, ScraperProgress, ScraperStrategy } from \"../types\";\n\n// Define defaults for optional options\nconst DEFAULT_MAX_PAGES = 100;\nconst DEFAULT_MAX_DEPTH = 3;\nconst DEFAULT_CONCURRENCY = 3;\n\nexport type QueueItem = {\n url: string;\n depth: number;\n};\n\nexport interface BaseScraperStrategyOptions {\n urlNormalizerOptions?: UrlNormalizerOptions;\n}\n\nexport abstract class BaseScraperStrategy implements ScraperStrategy {\n protected visited = new Set<string>();\n protected pageCount = 0;\n\n abstract canHandle(url: string): boolean;\n\n protected options: BaseScraperStrategyOptions;\n\n constructor(options: BaseScraperStrategyOptions = {}) {\n this.options = options;\n }\n\n /**\n * Process a single item from the queue.\n *\n * @returns A list of URLs to add to the queue\n */\n protected abstract processItem(\n item: QueueItem,\n options: ScraperOptions,\n progressCallback?: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal, // Add signal\n ): Promise<{\n document?: Document;\n links?: string[];\n }>;\n\n // Removed getProcessor method as processing is now handled by strategies using middleware pipelines\n\n protected async processBatch(\n batch: QueueItem[],\n baseUrl: URL,\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal, // Add signal\n ): Promise<QueueItem[]> {\n const results = await Promise.all(\n batch.map(async (item) => {\n // Check signal before processing each item in the batch\n if (signal?.aborted) {\n throw new CancellationError(\"Scraping cancelled during batch processing\");\n }\n // Resolve default for maxDepth check\n const maxDepth = options.maxDepth ?? DEFAULT_MAX_DEPTH;\n if (item.depth > maxDepth) {\n return [];\n }\n\n try {\n // Pass signal to processItem\n const result = await this.processItem(item, options, undefined, signal);\n\n if (result.document) {\n this.pageCount++;\n // Resolve defaults for logging and progress callback\n const maxPages = options.maxPages ?? DEFAULT_MAX_PAGES;\n // maxDepth already resolved above\n logger.info(\n `🌐 Scraping page ${this.pageCount}/${maxPages} (depth ${item.depth}/${maxDepth}): ${item.url}`,\n );\n await progressCallback({\n pagesScraped: this.pageCount,\n maxPages: maxPages,\n currentUrl: item.url,\n depth: item.depth,\n maxDepth: maxDepth,\n document: result.document,\n });\n }\n\n const nextItems = result.links || [];\n return nextItems\n .map((value) => {\n try {\n const targetUrl = new URL(value, baseUrl);\n return {\n url: targetUrl.href,\n depth: item.depth + 1,\n } satisfies QueueItem;\n } catch (error) {\n // Invalid URL or path\n logger.warn(`❌ Invalid URL: ${value}`);\n }\n return null;\n })\n .filter((item) => item !== null);\n } catch (error) {\n if (options.ignoreErrors) {\n logger.error(`❌ Failed to process ${item.url}: ${error}`);\n return [];\n }\n throw error;\n }\n }),\n );\n\n // After all concurrent processing is done, deduplicate the results\n const allLinks = results.flat();\n const uniqueLinks: QueueItem[] = [];\n\n // Now perform deduplication once, after all parallel processing is complete\n for (const item of allLinks) {\n const normalizedUrl = normalizeUrl(item.url, this.options.urlNormalizerOptions);\n if (!this.visited.has(normalizedUrl)) {\n this.visited.add(normalizedUrl);\n uniqueLinks.push(item);\n }\n }\n\n return uniqueLinks;\n }\n\n async scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal, // Add signal\n ): Promise<void> {\n this.visited.clear();\n this.pageCount = 0;\n\n const baseUrl = new URL(options.url);\n const queue = [{ url: options.url, depth: 0 } satisfies QueueItem];\n\n // Track values we've seen (either queued or visited)\n this.visited.add(normalizeUrl(options.url, this.options.urlNormalizerOptions));\n\n // Resolve optional values to defaults using temporary variables\n const maxPages = options.maxPages ?? DEFAULT_MAX_PAGES;\n const maxConcurrency = options.maxConcurrency ?? DEFAULT_CONCURRENCY;\n\n while (queue.length > 0 && this.pageCount < maxPages) {\n // Use variable\n // Check for cancellation at the start of each loop iteration\n if (signal?.aborted) {\n logger.info(\"Scraping cancelled by signal.\");\n throw new CancellationError(\"Scraping cancelled by signal\");\n }\n\n const remainingPages = maxPages - this.pageCount; // Use variable\n if (remainingPages <= 0) {\n break;\n }\n\n const batchSize = Math.min(\n maxConcurrency, // Use variable\n remainingPages,\n queue.length,\n );\n\n const batch = queue.splice(0, batchSize);\n // Pass signal to processBatch\n const newUrls = await this.processBatch(\n batch,\n baseUrl,\n options,\n progressCallback,\n signal,\n );\n\n queue.push(...newUrls);\n }\n }\n}\n","import type { Document, ProgressCallback } from \"../../types\";\nimport { logger } from \"../../utils/logger\";\nimport type { UrlNormalizerOptions } from \"../../utils/url\";\nimport { hasSameDomain, hasSameHostname, isSubpath } from \"../../utils/url\";\nimport { HttpFetcher } from \"../fetcher\";\nimport type { RawContent } from \"../fetcher/types\";\nimport { ContentProcessingPipeline } from \"../middleware/ContentProcessorPipeline\";\n// Import new and updated middleware from index\nimport {\n HtmlCheerioParserMiddleware, // Use the new Cheerio parser\n HtmlLinkExtractorMiddleware,\n HtmlMetadataExtractorMiddleware,\n HtmlPlaywrightMiddleware, // Keep Playwright for rendering\n HtmlSanitizerMiddleware, // Keep Sanitizer (element remover)\n HtmlToMarkdownMiddleware,\n MarkdownLinkExtractorMiddleware,\n MarkdownMetadataExtractorMiddleware,\n} from \"../middleware/components\";\nimport type { ContentProcessorMiddleware } from \"../middleware/types\";\nimport type { ContentProcessingContext } from \"../middleware/types\";\nimport type { ScraperOptions, ScraperProgress } from \"../types\";\nimport { BaseScraperStrategy, type QueueItem } from \"./BaseScraperStrategy\";\n\nexport interface WebScraperStrategyOptions {\n urlNormalizerOptions?: UrlNormalizerOptions;\n shouldFollowLink?: (baseUrl: URL, targetUrl: URL) => boolean;\n}\n\nexport class WebScraperStrategy extends BaseScraperStrategy {\n private readonly httpFetcher = new HttpFetcher();\n private readonly shouldFollowLinkFn?: (baseUrl: URL, targetUrl: URL) => boolean;\n private readonly playwrightMiddleware: HtmlPlaywrightMiddleware; // Add member\n\n constructor(options: WebScraperStrategyOptions = {}) {\n super({ urlNormalizerOptions: options.urlNormalizerOptions });\n this.shouldFollowLinkFn = options.shouldFollowLink;\n this.playwrightMiddleware = new HtmlPlaywrightMiddleware(); // Instantiate here\n }\n\n canHandle(url: string): boolean {\n try {\n const parsedUrl = new URL(url);\n return parsedUrl.protocol === \"http:\" || parsedUrl.protocol === \"https:\";\n } catch {\n return false;\n }\n }\n\n /**\n * Determines if a target URL should be followed based on the scope setting.\n */\n private isInScope(\n baseUrl: URL,\n targetUrl: URL,\n scope: \"subpages\" | \"hostname\" | \"domain\",\n ): boolean {\n try {\n // First check if the URLs are on the same domain or hostname\n if (scope === \"domain\") {\n return hasSameDomain(baseUrl, targetUrl);\n }\n if (scope === \"hostname\") {\n return hasSameHostname(baseUrl, targetUrl);\n }\n // 'subpages' (default)\n return hasSameHostname(baseUrl, targetUrl) && isSubpath(baseUrl, targetUrl);\n } catch {\n return false;\n }\n }\n\n protected override async processItem(\n item: QueueItem,\n options: ScraperOptions,\n _progressCallback?: ProgressCallback<ScraperProgress>, // Base class passes it, but not used here\n signal?: AbortSignal, // Add signal\n ): Promise<{ document?: Document; links?: string[] }> {\n const { url } = item;\n\n try {\n // Define fetch options, passing both signal and followRedirects\n const fetchOptions = {\n signal,\n followRedirects: options.followRedirects,\n };\n\n // Pass options to fetcher\n const rawContent: RawContent = await this.httpFetcher.fetch(url, fetchOptions);\n\n // --- Start Middleware Pipeline ---\n const initialContext: ContentProcessingContext = {\n content: rawContent.content,\n contentType: rawContent.mimeType,\n source: rawContent.source, // Use the final source URL after redirects\n metadata: {},\n links: [],\n errors: [],\n options,\n fetcher: this.httpFetcher,\n };\n\n let pipeline: ContentProcessingPipeline;\n if (initialContext.contentType.startsWith(\"text/html\")) {\n // Construct the new HTML pipeline order\n const htmlPipelineSteps: ContentProcessorMiddleware[] = [\n this.playwrightMiddleware, // Use the instance member\n // TODO: Add HtmlJsExecutorMiddleware here if needed based on options\n new HtmlCheerioParserMiddleware(), // Always runs after content is finalized\n new HtmlMetadataExtractorMiddleware(),\n new HtmlLinkExtractorMiddleware(),\n new HtmlSanitizerMiddleware(), // Element remover\n new HtmlToMarkdownMiddleware(),\n ];\n pipeline = new ContentProcessingPipeline(htmlPipelineSteps);\n } else if (\n initialContext.contentType === \"text/markdown\" ||\n initialContext.contentType === \"text/plain\" // Treat plain text as markdown\n ) {\n pipeline = new ContentProcessingPipeline([\n new MarkdownMetadataExtractorMiddleware(),\n new MarkdownLinkExtractorMiddleware(), // Placeholder for now\n ]);\n } else {\n // Unsupported content type, treat as error or skip\n logger.warn(\n `Unsupported content type \"${initialContext.contentType}\" for URL ${url}. Skipping processing.`,\n );\n // Return empty result, allowing crawl to potentially continue if links were somehow extracted elsewhere\n return { document: undefined, links: [] };\n }\n\n const finalContext = await pipeline.run(initialContext);\n // --- End Middleware Pipeline ---\n\n // Log errors from pipeline\n for (const err of finalContext.errors) {\n logger.warn(`Processing error for ${url}: ${err.message}`);\n }\n\n // Check if content processing resulted in usable content\n if (typeof finalContext.content !== \"string\" || !finalContext.content.trim()) {\n logger.warn(`No processable content found for ${url} after pipeline execution.`);\n // Return empty but allow crawl to continue based on extracted links\n return { document: undefined, links: finalContext.links };\n }\n\n // Filter extracted links based on scope and custom filter\n const baseUrl = new URL(options.url); // Use the original base URL for scope calculation\n const filteredLinks = finalContext.links.filter((link) => {\n try {\n const targetUrl = new URL(link); // Links should be absolute now\n const scope = options.scope || \"subpages\";\n return (\n this.isInScope(baseUrl, targetUrl, scope) &&\n (!this.shouldFollowLinkFn || this.shouldFollowLinkFn(baseUrl, targetUrl))\n );\n } catch {\n return false; // Ignore invalid URLs\n }\n });\n\n return {\n document: {\n content: finalContext.content, // Final processed content (Markdown)\n metadata: {\n url: finalContext.source, // URL after redirects\n // Ensure title is a string, default to \"Untitled\"\n title:\n typeof finalContext.metadata.title === \"string\"\n ? finalContext.metadata.title\n : \"Untitled\",\n library: options.library,\n version: options.version,\n // Add other metadata from context if needed\n },\n } satisfies Document,\n links: filteredLinks, // Use the filtered links\n };\n } catch (error) {\n // Log fetch errors or pipeline execution errors (if run throws)\n logger.error(`Failed processing page ${url}: ${error}`);\n throw error;\n }\n }\n\n /**\n * Overrides the base scrape method to ensure the Playwright browser is closed\n * after the scraping process completes or errors out.\n */\n override async scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal,\n ): Promise<void> {\n try {\n // Call the base class scrape method\n await super.scrape(options, progressCallback, signal);\n } finally {\n // Ensure the browser instance is closed\n await this.playwrightMiddleware.closeBrowser();\n }\n }\n}\n","import type { ProgressCallback } from \"../../types\";\nimport type { ScraperOptions, ScraperProgress, ScraperStrategy } from \"../types\";\nimport { WebScraperStrategy } from \"./WebScraperStrategy\";\n\nexport class GitHubScraperStrategy implements ScraperStrategy {\n private defaultStrategy: WebScraperStrategy;\n\n canHandle(url: string): boolean {\n const { hostname } = new URL(url);\n return [\"github.com\", \"www.github.com\"].includes(hostname);\n }\n\n constructor() {\n const shouldFollowLink = (baseUrl: URL, targetUrl: URL) => {\n // Must be in same repository\n if (this.getRepoPath(baseUrl) !== this.getRepoPath(targetUrl)) {\n return false;\n }\n\n const path = targetUrl.pathname;\n\n // Root README (repository root)\n if (path === this.getRepoPath(targetUrl)) {\n return true;\n }\n\n // Wiki pages\n if (path.startsWith(`${this.getRepoPath(targetUrl)}/wiki`)) {\n return true;\n }\n\n // Markdown files under /blob/\n if (\n path.startsWith(`${this.getRepoPath(targetUrl)}/blob/`) &&\n path.endsWith(\".md\")\n ) {\n return true;\n }\n\n return false;\n };\n\n this.defaultStrategy = new WebScraperStrategy({\n urlNormalizerOptions: {\n ignoreCase: true,\n removeHash: true,\n removeTrailingSlash: true,\n removeQuery: true, // Remove query parameters like ?tab=readme-ov-file\n },\n shouldFollowLink,\n });\n }\n\n private getRepoPath(url: URL): string {\n // Extract /<org>/<repo> from github.com/<org>/<repo>/...\n const match = url.pathname.match(/^\\/[^/]+\\/[^/]+/);\n return match?.[0] || \"\";\n }\n\n async scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal,\n ): Promise<void> {\n // Validate it's a GitHub URL\n const url = new URL(options.url);\n if (!url.hostname.includes(\"github.com\")) {\n throw new Error(\"URL must be a GitHub URL\");\n }\n\n // Pass signal down to the delegated strategy\n await this.defaultStrategy.scrape(options, progressCallback, signal);\n }\n}\n","import fs from \"node:fs/promises\";\nimport path from \"node:path\";\nimport type { Document, ProgressCallback } from \"../../types\";\nimport { logger } from \"../../utils/logger\";\nimport { FileFetcher } from \"../fetcher\";\nimport type { RawContent } from \"../fetcher/types\";\nimport { ContentProcessingPipeline } from \"../middleware/ContentProcessorPipeline\";\n// Import new and updated middleware from index\nimport {\n HtmlCheerioParserMiddleware,\n HtmlMetadataExtractorMiddleware,\n HtmlSanitizerMiddleware,\n HtmlToMarkdownMiddleware,\n MarkdownMetadataExtractorMiddleware,\n} from \"../middleware/components\";\n// Note: Link extractors are not used for local file content\nimport type { ContentProcessingContext } from \"../middleware/types\";\nimport type { ScraperOptions, ScraperProgress } from \"../types\";\nimport { BaseScraperStrategy, type QueueItem } from \"./BaseScraperStrategy\";\n\nexport class LocalFileStrategy extends BaseScraperStrategy {\n private readonly fileFetcher = new FileFetcher();\n\n canHandle(url: string): boolean {\n return url.startsWith(\"file://\");\n }\n\n protected async processItem(\n item: QueueItem,\n options: ScraperOptions,\n _progressCallback?: ProgressCallback<ScraperProgress>, // Add unused param to match base\n _signal?: AbortSignal, // Add unused signal to match base\n ): Promise<{ document?: Document; links?: string[] }> {\n // Note: Cancellation signal is not actively checked here as file operations are typically fast.\n const filePath = item.url.replace(/^file:\\/\\//, \"\");\n const stats = await fs.stat(filePath);\n\n // If this is a directory, return contained files and subdirectories as new paths\n if (stats.isDirectory()) {\n const contents = await fs.readdir(filePath);\n return {\n links: contents.map((name) => `file://${path.join(filePath, name)}`),\n };\n }\n\n // Process the file\n logger.info(`📄 Processing file ${this.pageCount}/${options.maxPages}: ${filePath}`);\n\n const rawContent: RawContent = await this.fileFetcher.fetch(item.url);\n\n // --- Start Middleware Pipeline ---\n const initialContext: ContentProcessingContext = {\n content: rawContent.content,\n contentType: rawContent.mimeType,\n source: rawContent.source, // file:// URL\n metadata: {},\n links: [], // LocalFileStrategy doesn't extract links from file content itself\n errors: [],\n options: options, // Pass the full options object\n };\n\n let pipeline: ContentProcessingPipeline;\n if (initialContext.contentType.startsWith(\"text/html\")) {\n // Updated HTML pipeline for local files (no link extraction from content)\n pipeline = new ContentProcessingPipeline([\n new HtmlCheerioParserMiddleware(),\n new HtmlMetadataExtractorMiddleware(),\n // No HtmlLinkExtractorMiddleware needed for local files\n new HtmlSanitizerMiddleware(),\n new HtmlToMarkdownMiddleware(),\n ]);\n } else if (\n initialContext.contentType === \"text/markdown\" ||\n initialContext.contentType === \"text/plain\" || // Treat plain text as markdown\n initialContext.contentType.startsWith(\"text/\") // Added for compatibility\n ) {\n // Markdown pipeline remains simple\n pipeline = new ContentProcessingPipeline([\n new MarkdownMetadataExtractorMiddleware(),\n // No MarkdownLinkExtractorMiddleware needed for local files\n ]);\n } else {\n logger.warn(\n `Unsupported content type \"${initialContext.contentType}\" for file ${filePath}. Skipping processing.`,\n );\n return { document: undefined, links: [] }; // Return empty\n }\n\n const finalContext = await pipeline.run(initialContext);\n // --- End Middleware Pipeline ---\n\n // Log errors from pipeline\n for (const err of finalContext.errors) {\n logger.warn(`Processing error for ${filePath}: ${err.message}`);\n }\n\n // If pipeline ran successfully, always create a document, even if content is empty/whitespace.\n // Downstream consumers (e.g., indexing) can filter if needed.\n // Ensure content is a string before creating the document.\n const finalContentString =\n typeof finalContext.content === \"string\"\n ? finalContext.content\n : Buffer.from(finalContext.content).toString(\"utf-8\");\n\n return {\n document: {\n // Use the potentially empty string content\n content: finalContentString,\n metadata: {\n url: finalContext.source, // Use context source (file:// URL)\n // Ensure title is a string, default to \"Untitled\"\n title:\n typeof finalContext.metadata.title === \"string\"\n ? finalContext.metadata.title\n : \"Untitled\",\n library: options.library,\n version: options.version,\n },\n } satisfies Document,\n // No links returned from file content processing\n };\n }\n\n async scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal,\n ): Promise<void> {\n // Pass signal down to base class scrape method\n await super.scrape(options, progressCallback, signal); // Pass the received signal\n }\n}\n","import type { ProgressCallback } from \"../../types\";\nimport type { ScraperOptions, ScraperProgress, ScraperStrategy } from \"../types\";\nimport { WebScraperStrategy } from \"./WebScraperStrategy\";\n\nexport class NpmScraperStrategy implements ScraperStrategy {\n private defaultStrategy: WebScraperStrategy;\n\n canHandle(url: string): boolean {\n const { hostname } = new URL(url);\n return [\"npmjs.org\", \"npmjs.com\", \"www.npmjs.com\"].includes(hostname);\n }\n\n constructor() {\n this.defaultStrategy = new WebScraperStrategy({\n urlNormalizerOptions: {\n ignoreCase: true,\n removeHash: true,\n removeTrailingSlash: true,\n removeQuery: true, // Enable removeQuery for NPM packages\n },\n });\n }\n\n async scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal,\n ): Promise<void> {\n // Use default strategy with our configuration, passing the signal\n await this.defaultStrategy.scrape(options, progressCallback, signal);\n }\n}\n","import type { ProgressCallback } from \"../../types\";\nimport type { ScraperOptions, ScraperProgress, ScraperStrategy } from \"../types\";\nimport { WebScraperStrategy } from \"./WebScraperStrategy\";\n\nexport class PyPiScraperStrategy implements ScraperStrategy {\n private defaultStrategy: WebScraperStrategy;\n\n canHandle(url: string): boolean {\n const { hostname } = new URL(url);\n return [\"pypi.org\", \"www.pypi.org\"].includes(hostname);\n }\n\n constructor() {\n this.defaultStrategy = new WebScraperStrategy({\n urlNormalizerOptions: {\n ignoreCase: true,\n removeHash: true,\n removeTrailingSlash: true,\n removeQuery: true, // Enable removeQuery for PyPI packages\n },\n });\n }\n\n async scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal,\n ): Promise<void> {\n // Use default strategy with our configuration, passing the signal\n await this.defaultStrategy.scrape(options, progressCallback, signal);\n }\n}\n","import { ScraperError } from \"../utils/errors\";\nimport { validateUrl } from \"../utils/url\";\nimport { GitHubScraperStrategy } from \"./strategies/GitHubScraperStrategy\";\nimport { LocalFileStrategy } from \"./strategies/LocalFileStrategy\";\nimport { NpmScraperStrategy } from \"./strategies/NpmScraperStrategy\";\nimport { PyPiScraperStrategy } from \"./strategies/PyPiScraperStrategy\";\nimport { WebScraperStrategy } from \"./strategies/WebScraperStrategy\";\nimport type { ScraperStrategy } from \"./types\";\n\nexport class ScraperRegistry {\n private strategies: ScraperStrategy[];\n\n constructor() {\n this.strategies = [\n new NpmScraperStrategy(),\n new PyPiScraperStrategy(),\n new GitHubScraperStrategy(),\n new WebScraperStrategy(),\n new LocalFileStrategy(),\n ];\n }\n\n getStrategy(url: string): ScraperStrategy {\n validateUrl(url);\n const strategy = this.strategies.find((s) => s.canHandle(url));\n if (!strategy) {\n throw new ScraperError(`No strategy found for URL: ${url}`);\n }\n return strategy;\n }\n}\n","import type { ProgressCallback } from \"../types\";\nimport { ScraperError } from \"../utils/errors\";\nimport type { ScraperRegistry } from \"./ScraperRegistry\";\nimport type { ScraperOptions, ScraperProgress } from \"./types\";\n\n/**\n * Orchestrates document scraping operations using registered scraping strategies.\n * Automatically selects appropriate strategy based on URL patterns.\n */\nexport class ScraperService {\n private registry: ScraperRegistry;\n\n constructor(registry: ScraperRegistry) {\n this.registry = registry;\n }\n\n /**\n * Scrapes content from the provided URL using the appropriate strategy.\n * Reports progress via callback and handles errors.\n */\n async scrape(\n options: ScraperOptions,\n progressCallback: ProgressCallback<ScraperProgress>,\n signal?: AbortSignal, // Add optional signal parameter\n ): Promise<void> {\n // Find strategy for this URL\n const strategy = this.registry.getStrategy(options.url);\n if (!strategy) {\n throw new ScraperError(`No scraper strategy found for URL: ${options.url}`, false);\n }\n\n // Pass the signal down to the strategy\n await strategy.scrape(options, progressCallback, signal);\n }\n}\n","import type { ScraperService } from \"../scraper\";\nimport type { ScraperProgress } from \"../scraper/types\";\nimport type { DocumentManagementService } from \"../store\";\nimport { logger } from \"../utils/logger\";\nimport { CancellationError } from \"./errors\";\nimport type { PipelineJob, PipelineManagerCallbacks } from \"./types\";\n\n/**\n * Executes a single document processing job.\n * Handles scraping, storing documents, and reporting progress/errors via callbacks.\n */\nexport class PipelineWorker {\n // Dependencies are passed in, making the worker stateless regarding specific jobs\n private readonly store: DocumentManagementService;\n private readonly scraperService: ScraperService;\n\n // Constructor accepts dependencies needed for execution\n constructor(store: DocumentManagementService, scraperService: ScraperService) {\n this.store = store;\n this.scraperService = scraperService;\n }\n\n /**\n * Executes the given pipeline job.\n * @param job - The job to execute.\n * @param callbacks - Callbacks provided by the manager for reporting.\n */\n async executeJob(job: PipelineJob, callbacks: PipelineManagerCallbacks): Promise<void> {\n const { id: jobId, library, version, options, abortController } = job;\n const signal = abortController.signal;\n\n logger.debug(`[${jobId}] Worker starting job for ${library}@${version}`);\n\n try {\n // --- Core Job Logic ---\n await this.scraperService.scrape(\n options,\n async (progress: ScraperProgress) => {\n // Check for cancellation signal before processing each document\n if (signal.aborted) {\n throw new CancellationError(\"Job cancelled during scraping progress\");\n }\n\n // Update job object directly (manager holds the reference)\n job.progress = progress;\n // Report progress via manager's callback\n await callbacks.onJobProgress?.(job, progress);\n\n if (progress.document) {\n try {\n // TODO: Pass signal to store.addDocument if it supports it\n await this.store.addDocument(library, version, {\n pageContent: progress.document.content,\n metadata: progress.document.metadata,\n });\n logger.debug(\n `[${jobId}] Stored document: ${progress.document.metadata.url}`,\n );\n } catch (docError) {\n logger.error(\n `[${jobId}] Failed to store document ${progress.document.metadata.url}: ${docError}`,\n );\n // Report document-specific errors via manager's callback\n await callbacks.onJobError?.(\n job,\n docError instanceof Error ? docError : new Error(String(docError)),\n progress.document,\n );\n // Decide if a single document error should fail the whole job\n // For now, we log and continue. To fail, re-throw here.\n }\n }\n },\n signal, // Pass signal to scraper service\n );\n // --- End Core Job Logic ---\n\n // Check signal one last time after scrape finishes\n if (signal.aborted) {\n throw new CancellationError(\"Job cancelled shortly after scraping finished\");\n }\n\n // If successful and not cancelled, the manager will handle status update\n logger.debug(`[${jobId}] Worker finished job successfully.`);\n } catch (error) {\n // Re-throw error to be caught by the manager in _runJob\n logger.warn(`[${jobId}] Worker encountered error: ${error}`);\n throw error;\n }\n // Note: The manager (_runJob) is responsible for updating final job status (COMPLETED/FAILED/CANCELLED)\n // and resolving/rejecting the completion promise based on the outcome here.\n }\n\n // --- Old methods removed ---\n // process()\n // stop()\n // setCallbacks()\n // handleScrapingProgress()\n}\n","import type { ScraperOptions, ScraperProgress } from \"../scraper/types\";\nimport type { Document } from \"../types\"; // Use local Document type\n\n/**\n * Represents the possible states of a pipeline job.\n */\nexport enum PipelineJobStatus {\n QUEUED = \"queued\",\n RUNNING = \"running\",\n COMPLETED = \"completed\",\n FAILED = \"failed\",\n CANCELLING = \"cancelling\",\n CANCELLED = \"cancelled\",\n}\n\n/**\n * Represents a single document processing job within the pipeline.\n */\nexport interface PipelineJob {\n /** Unique identifier for the job. */\n id: string;\n /** The library name associated with the job. */\n library: string;\n /** The library version associated with the job. */\n version: string;\n /** Options provided for the scraper. */\n options: ScraperOptions;\n /** Current status of the job. */\n status: PipelineJobStatus;\n /** Detailed progress information. */\n progress: ScraperProgress | null;\n /** Error object if the job failed. */\n error: Error | null;\n /** Timestamp when the job was created. */\n createdAt: Date;\n /** Timestamp when the job started running. */\n startedAt: Date | null;\n /** Timestamp when the job finished (completed, failed, or cancelled). */\n finishedAt: Date | null;\n /** AbortController to signal cancellation. */\n abortController: AbortController;\n /** Promise that resolves/rejects when the job finishes. */\n completionPromise: Promise<void>;\n /** Resolver function for the completion promise. */\n resolveCompletion: () => void;\n /** Rejector function for the completion promise. */\n rejectCompletion: (reason?: unknown) => void;\n}\n\n/**\n * Defines the structure for callback functions used with the PipelineManager.\n * Allows external components to hook into job lifecycle events.\n */\nexport interface PipelineManagerCallbacks {\n /** Callback triggered when a job's status changes. */\n onJobStatusChange?: (job: PipelineJob) => Promise<void>;\n /** Callback triggered when a job makes progress. */\n onJobProgress?: (job: PipelineJob, progress: ScraperProgress) => Promise<void>;\n /** Callback triggered when a job encounters an error during processing (e.g., storing a doc). */\n onJobError?: (job: PipelineJob, error: Error, document?: Document) => Promise<void>;\n}\n","import { v4 as uuidv4 } from \"uuid\";\nimport { ScraperRegistry, ScraperService } from \"../scraper\";\nimport type { ScraperOptions } from \"../scraper/types\";\nimport type { DocumentManagementService } from \"../store\";\nimport { logger } from \"../utils/logger\";\nimport { PipelineWorker } from \"./PipelineWorker\"; // Import the worker\nimport { CancellationError, PipelineStateError } from \"./errors\";\nimport type { PipelineJob, PipelineManagerCallbacks } from \"./types\";\nimport { PipelineJobStatus } from \"./types\";\n\nconst DEFAULT_CONCURRENCY = 3;\n\n/**\n * Manages a queue of document processing jobs, controlling concurrency and tracking progress.\n */\nexport class PipelineManager {\n private jobMap: Map<string, PipelineJob> = new Map();\n private jobQueue: string[] = [];\n private activeWorkers: Set<string> = new Set();\n private isRunning = false;\n private concurrency: number;\n private callbacks: PipelineManagerCallbacks = {};\n private store: DocumentManagementService;\n private scraperService: ScraperService;\n\n constructor(\n store: DocumentManagementService,\n concurrency: number = DEFAULT_CONCURRENCY,\n ) {\n this.store = store;\n this.concurrency = concurrency;\n // ScraperService needs a registry. We create one internally for the manager.\n const registry = new ScraperRegistry();\n this.scraperService = new ScraperService(registry);\n }\n\n /**\n * Registers callback handlers for pipeline manager events.\n */\n setCallbacks(callbacks: PipelineManagerCallbacks): void {\n this.callbacks = callbacks;\n }\n\n /**\n * Starts the pipeline manager's worker processing.\n */\n async start(): Promise<void> {\n if (this.isRunning) {\n logger.warn(\"PipelineManager is already running.\");\n return;\n }\n this.isRunning = true;\n logger.debug(`PipelineManager started with concurrency ${this.concurrency}.`);\n this._processQueue(); // Start processing any existing jobs\n }\n\n /**\n * Stops the pipeline manager and attempts to gracefully shut down workers.\n * Currently, it just stops processing new jobs. Cancellation of active jobs\n * needs explicit `cancelJob` calls.\n */\n async stop(): Promise<void> {\n if (!this.isRunning) {\n logger.warn(\"PipelineManager is not running.\");\n return;\n }\n this.isRunning = false;\n logger.debug(\"PipelineManager stopping. No new jobs will be started.\");\n // Note: Does not automatically cancel active jobs.\n }\n\n /**\n * Enqueues a new document processing job.\n */\n async enqueueJob(\n library: string,\n version: string,\n options: ScraperOptions,\n ): Promise<string> {\n const jobId = uuidv4();\n const abortController = new AbortController();\n let resolveCompletion!: () => void;\n let rejectCompletion!: (reason?: unknown) => void;\n\n const completionPromise = new Promise<void>((resolve, reject) => {\n resolveCompletion = resolve;\n rejectCompletion = reject;\n });\n\n const job: PipelineJob = {\n id: jobId,\n library,\n version,\n options,\n status: PipelineJobStatus.QUEUED,\n progress: null,\n error: null,\n createdAt: new Date(),\n startedAt: null,\n finishedAt: null,\n abortController,\n completionPromise,\n resolveCompletion,\n rejectCompletion,\n };\n\n this.jobMap.set(jobId, job);\n this.jobQueue.push(jobId);\n logger.info(`📝 Job enqueued: ${jobId} for ${library}@${version}`);\n\n await this.callbacks.onJobStatusChange?.(job);\n\n // Trigger processing if manager is running\n if (this.isRunning) {\n this._processQueue();\n }\n\n return jobId;\n }\n\n /**\n * Retrieves the current state of a specific job.\n */\n async getJob(jobId: string): Promise<PipelineJob | undefined> {\n return this.jobMap.get(jobId);\n }\n\n /**\n * Retrieves the current state of all jobs (or a subset based on status).\n */\n async getJobs(status?: PipelineJobStatus): Promise<PipelineJob[]> {\n const allJobs = Array.from(this.jobMap.values());\n if (status) {\n return allJobs.filter((job) => job.status === status);\n }\n return allJobs;\n }\n\n /**\n * Returns a promise that resolves when the specified job completes, fails, or is cancelled.\n */\n async waitForJobCompletion(jobId: string): Promise<void> {\n const job = this.jobMap.get(jobId);\n if (!job) {\n throw new PipelineStateError(`Job not found: ${jobId}`);\n }\n await job.completionPromise;\n }\n\n /**\n * Attempts to cancel a queued or running job.\n */\n async cancelJob(jobId: string): Promise<void> {\n const job = this.jobMap.get(jobId);\n if (!job) {\n logger.warn(`Attempted to cancel non-existent job: ${jobId}`);\n return;\n }\n\n switch (job.status) {\n case PipelineJobStatus.QUEUED:\n // Remove from queue and mark as cancelled\n this.jobQueue = this.jobQueue.filter((id) => id !== jobId);\n job.status = PipelineJobStatus.CANCELLED;\n job.finishedAt = new Date();\n logger.info(`🚫 Job cancelled (was queued): ${jobId}`);\n await this.callbacks.onJobStatusChange?.(job);\n job.rejectCompletion(new PipelineStateError(\"Job cancelled before starting\"));\n break;\n\n case PipelineJobStatus.RUNNING:\n // Signal cancellation via AbortController\n job.status = PipelineJobStatus.CANCELLING;\n job.abortController.abort();\n logger.info(`🚫 Signalling cancellation for running job: ${jobId}`);\n await this.callbacks.onJobStatusChange?.(job);\n // The worker is responsible for transitioning to CANCELLED and rejecting\n break;\n\n case PipelineJobStatus.COMPLETED:\n case PipelineJobStatus.FAILED:\n case PipelineJobStatus.CANCELLED:\n case PipelineJobStatus.CANCELLING:\n logger.warn(\n `Job ${jobId} cannot be cancelled in its current state: ${job.status}`,\n );\n break;\n\n default:\n logger.error(`Unhandled job status for cancellation: ${job.status}`);\n break;\n }\n }\n\n // --- Private Methods ---\n\n /**\n * Processes the job queue, starting new workers if capacity allows.\n */\n private _processQueue(): void {\n if (!this.isRunning) return;\n\n while (this.activeWorkers.size < this.concurrency && this.jobQueue.length > 0) {\n const jobId = this.jobQueue.shift();\n if (!jobId) continue; // Should not happen, but safety check\n\n const job = this.jobMap.get(jobId);\n if (!job || job.status !== PipelineJobStatus.QUEUED) {\n logger.warn(`Skipping job ${jobId} in queue (not found or not queued).`);\n continue;\n }\n\n this.activeWorkers.add(jobId);\n job.status = PipelineJobStatus.RUNNING;\n job.startedAt = new Date();\n this.callbacks.onJobStatusChange?.(job); // Fire and forget status update\n\n // Start the actual job execution asynchronously\n this._runJob(job).catch((error) => {\n // Catch unexpected errors during job setup/execution not handled by _runJob itself\n logger.error(`Unhandled error during job ${jobId} execution: ${error}`);\n if (\n job.status !== PipelineJobStatus.FAILED &&\n job.status !== PipelineJobStatus.CANCELLED\n ) {\n job.status = PipelineJobStatus.FAILED;\n job.error = error instanceof Error ? error : new Error(String(error));\n job.finishedAt = new Date();\n this.callbacks.onJobStatusChange?.(job); // Fire and forget\n job.rejectCompletion(job.error);\n }\n this.activeWorkers.delete(jobId);\n this._processQueue(); // Check if another job can start\n });\n }\n }\n\n /**\n * Executes a single pipeline job by delegating to a PipelineWorker.\n * Handles final status updates and promise resolution/rejection.\n */\n private async _runJob(job: PipelineJob): Promise<void> {\n const { id: jobId, abortController } = job;\n const signal = abortController.signal; // Get signal for error checking\n\n // Instantiate a worker for this job.\n // Dependencies (store, scraperService) are held by the manager.\n const worker = new PipelineWorker(this.store, this.scraperService);\n\n try {\n // Delegate the actual work to the worker\n await worker.executeJob(job, this.callbacks);\n\n // If executeJob completes without throwing, and we weren't cancelled meanwhile...\n if (signal.aborted) {\n // Check signal again in case cancellation happened *during* the very last await in executeJob\n throw new CancellationError(\"Job cancelled just before completion\");\n }\n\n // Mark as completed\n job.status = PipelineJobStatus.COMPLETED;\n job.finishedAt = new Date();\n await this.callbacks.onJobStatusChange?.(job);\n job.resolveCompletion();\n } catch (error) {\n // Handle errors thrown by the worker, including CancellationError\n if (error instanceof CancellationError || signal.aborted) {\n // Explicitly check for CancellationError or if the signal was aborted\n job.status = PipelineJobStatus.CANCELLED;\n job.finishedAt = new Date();\n // Use the caught error if it's a CancellationError, otherwise create a new one\n job.error =\n error instanceof CancellationError\n ? error\n : new CancellationError(\"Job cancelled by signal\");\n logger.info(`🚫 Job execution cancelled: ${jobId}: ${job.error.message}`);\n await this.callbacks.onJobStatusChange?.(job);\n job.rejectCompletion(job.error);\n } else {\n // Handle other errors\n job.status = PipelineJobStatus.FAILED;\n job.error = error instanceof Error ? error : new Error(String(error));\n job.finishedAt = new Date();\n logger.error(`❌ Job failed: ${jobId}: ${job.error}`);\n await this.callbacks.onJobStatusChange?.(job);\n job.rejectCompletion(job.error);\n }\n } finally {\n // Ensure worker slot is freed and queue processing continues\n this.activeWorkers.delete(jobId);\n this._processQueue();\n }\n }\n}\n","/**\n * Thoroughly removes all types of whitespace characters from both ends of a string.\n * Handles spaces, tabs, line breaks, and carriage returns.\n */\nexport const fullTrim = (str: string): string => {\n return str.replace(/^[\\s\\r\\n\\t]+|[\\s\\r\\n\\t]+$/g, \"\");\n};\n","/**\n * Base error class for all splitter-related errors\n */\nexport class SplitterError extends Error {}\n\n/**\n * Thrown when content cannot be split further while maintaining its validity\n * (e.g., markdown tables require headers, code blocks require language and backticks)\n */\nexport class MinimumChunkSizeError extends SplitterError {\n constructor(size: number, maxSize: number) {\n super(\n `Cannot split content any further. Content requires minimum chunk size of ${size} bytes, but maximum allowed is ${maxSize} bytes.`,\n );\n }\n}\n\n/**\n * Generic error for content splitting failures\n */\nexport class ContentSplitterError extends SplitterError {}\n","import { MinimumChunkSizeError } from \"../errors\";\nimport type { ContentSplitter, ContentSplitterOptions } from \"./types\";\n\n/**\n * Splits code content while preserving language information and formatting.\n * Uses line boundaries for splitting and ensures each chunk is properly\n * wrapped with language-specific code block markers.\n */\nexport class CodeContentSplitter implements ContentSplitter {\n constructor(private options: ContentSplitterOptions) {}\n\n async split(content: string): Promise<string[]> {\n // Determine language and strip triple backticks from content\n const language = content.match(/^```(\\w+)\\n/)?.[1];\n const strippedContent = content.replace(/^```(\\w*)\\n/, \"\").replace(/```\\s*$/, \"\");\n\n const lines = strippedContent.split(\"\\n\");\n const chunks: string[] = [];\n let currentChunkLines: string[] = [];\n\n for (const line of lines) {\n // Check if a single line with code block markers exceeds chunkSize\n const singleLineSize = this.wrap(line, language).length;\n if (singleLineSize > this.options.chunkSize) {\n throw new MinimumChunkSizeError(singleLineSize, this.options.chunkSize);\n }\n\n currentChunkLines.push(line);\n const newChunkContent = this.wrap(currentChunkLines.join(\"\\n\"), language);\n const newChunkSize = newChunkContent.length;\n\n if (newChunkSize > this.options.chunkSize && currentChunkLines.length > 1) {\n // remove last item\n const lastLine = currentChunkLines.pop();\n // wrap content and create chunk\n chunks.push(this.wrap(currentChunkLines.join(\"\\n\"), language));\n currentChunkLines = [lastLine as string];\n }\n }\n\n if (currentChunkLines.length > 0) {\n chunks.push(this.wrap(currentChunkLines.join(\"\\n\"), language));\n }\n\n return chunks;\n }\n\n protected wrap(content: string, language?: string | null): string {\n return `\\`\\`\\`${language || \"\"}\\n${content.replace(/\\n+$/, \"\")}\\n\\`\\`\\``;\n }\n}\n","import { MinimumChunkSizeError } from \"../errors\";\nimport type { ContentSplitter, ContentSplitterOptions } from \"./types\";\n\n/**\n * Interface representing the structure of a parsed markdown table\n */\ninterface ParsedTable {\n headers: string[];\n separator: string;\n rows: string[];\n}\n\n/**\n * Splits table content while preserving headers and table formatting.\n * Each chunk maintains the table structure with headers and separator row.\n */\nexport class TableContentSplitter implements ContentSplitter {\n constructor(private options: ContentSplitterOptions) {}\n\n /**\n * Splits table content into chunks while preserving table structure\n */\n async split(content: string): Promise<string[]> {\n const parsedTable = this.parseTable(content);\n if (!parsedTable) {\n return [content];\n }\n\n const { headers, rows } = parsedTable;\n\n const chunks: string[] = [];\n let currentRows: string[] = [];\n\n for (const row of rows) {\n // Check if a single row with headers exceeds chunkSize\n const singleRowSize = this.wrap(row, headers).length;\n if (singleRowSize > this.options.chunkSize) {\n throw new MinimumChunkSizeError(singleRowSize, this.options.chunkSize);\n }\n\n const newChunkContent = this.wrap([...currentRows, row].join(\"\\n\"), headers);\n const newChunkSize = newChunkContent.length;\n if (newChunkSize > this.options.chunkSize && currentRows.length > 0) {\n // Add current chunk, start new\n chunks.push(this.wrap(currentRows.join(\"\\n\"), headers));\n currentRows = [row];\n } else {\n currentRows.push(row);\n }\n }\n\n if (currentRows.length > 0) {\n chunks.push(this.wrap(currentRows.join(\"\\n\"), headers));\n }\n\n // No merging of table chunks\n return chunks;\n }\n\n protected wrap(content: string, headers: string[]): string {\n const headerRow = `| ${headers.join(\" | \")} |`;\n const separatorRow = `|${headers.map(() => \"---\").join(\"|\")}|`;\n return [headerRow, separatorRow, content].join(\"\\n\");\n }\n\n private parseTable(content: string): ParsedTable | null {\n const lines = content.trim().split(\"\\n\");\n if (lines.length < 3) return null; // Need at least headers, separator, and one data row\n\n const headers = this.parseRow(lines[0]);\n if (!headers) return null;\n\n const separator = lines[1];\n if (!this.isValidSeparator(separator)) return null;\n\n const rows = lines.slice(2).filter((row) => row.trim() !== \"\");\n\n return { headers, separator, rows };\n }\n\n /**\n * Parses a table row into cells\n */\n private parseRow(row: string): string[] | null {\n if (!row.includes(\"|\")) return null;\n return row\n .split(\"|\")\n .map((cell) => cell.trim())\n .filter((cell) => cell !== \"\");\n }\n\n /**\n * Validates the separator row of the table\n */\n private isValidSeparator(separator: string): boolean {\n return separator.includes(\"|\") && /^\\|?[\\s-|]+\\|?$/.test(separator);\n }\n}\n","import { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\nimport { fullTrim } from \"../../utils/string\";\nimport { MinimumChunkSizeError } from \"../errors\";\nimport type { ContentSplitter, ContentSplitterOptions } from \"./types\";\n\n/**\n * Splits text content using a hierarchical approach:\n * 1. Try splitting by paragraphs (double newlines)\n * 2. If chunks still too large, split by single newlines\n * 3. Finally, use word boundaries via LangChain's splitter\n */\nexport class TextContentSplitter implements ContentSplitter {\n constructor(private options: ContentSplitterOptions) {}\n\n /**\n * Splits text content into chunks while trying to preserve semantic boundaries.\n * Prefers paragraph breaks, then line breaks, finally falling back to word boundaries.\n */\n async split(content: string): Promise<string[]> {\n const trimmedContent = fullTrim(content);\n\n if (trimmedContent.length <= this.options.chunkSize) {\n return [trimmedContent];\n }\n\n // Check for unsplittable content (e.g., a single word longer than chunkSize)\n const words = trimmedContent.split(/\\s+/);\n const longestWord = words.reduce((max, word) =>\n word.length > max.length ? word : max,\n );\n if (longestWord.length > this.options.chunkSize) {\n throw new MinimumChunkSizeError(longestWord.length, this.options.chunkSize);\n }\n\n // First try splitting by paragraphs (double newlines)\n const paragraphChunks = this.splitByParagraphs(trimmedContent);\n if (this.areChunksValid(paragraphChunks)) {\n // No merging for paragraph chunks; they are already semantically separated\n return paragraphChunks;\n }\n\n // If that doesn't work, try splitting by single newlines\n const lineChunks = this.splitByLines(trimmedContent);\n if (this.areChunksValid(lineChunks)) {\n return this.mergeChunks(lineChunks, \"\\n\");\n }\n\n // Finally, fall back to word-based splitting using LangChain\n const wordChunks = await this.splitByWords(trimmedContent);\n return this.mergeChunks(wordChunks, \" \");\n }\n\n /**\n * Checks if all chunks are within the maximum size limit\n */\n private areChunksValid(chunks: string[]): boolean {\n return chunks.every((chunk) => chunk.length <= this.options.chunkSize);\n }\n\n /**\n * Splits text into chunks by paragraph boundaries (double newlines)\n */\n private splitByParagraphs(text: string): string[] {\n const paragraphs = text\n .split(/\\n\\s*\\n/)\n .map((p) => fullTrim(p))\n .filter(Boolean);\n\n return paragraphs.filter((chunk) => chunk.length > 2);\n }\n\n /**\n * Splits text into chunks by line boundaries\n */\n private splitByLines(text: string): string[] {\n const lines = text\n .split(/\\n/)\n .map((line) => fullTrim(line))\n .filter(Boolean);\n\n return lines.filter((chunk) => chunk.length > 1);\n }\n\n /**\n * Uses LangChain's recursive splitter for word-based splitting as a last resort\n */\n private async splitByWords(text: string): Promise<string[]> {\n const splitter = new RecursiveCharacterTextSplitter({\n chunkSize: this.options.chunkSize,\n chunkOverlap: 0,\n });\n\n const chunks = await splitter.splitText(text);\n return chunks;\n }\n\n /**\n * Attempts to merge small chunks with previous chunks to minimize fragmentation.\n * Only merges if combined size is within maxChunkSize.\n */\n protected mergeChunks(chunks: string[], separator: string): string[] {\n const mergedChunks: string[] = [];\n let currentChunk: string | null = null;\n\n for (const chunk of chunks) {\n if (currentChunk === null) {\n currentChunk = chunk;\n continue;\n }\n\n const currentChunkSize = this.getChunkSize(currentChunk);\n const nextChunkSize = this.getChunkSize(chunk);\n\n if (currentChunkSize + nextChunkSize + separator.length <= this.options.chunkSize) {\n // Merge chunks\n currentChunk = `${currentChunk}${separator}${chunk}`;\n } else {\n // Add the current chunk to the result and start a new one\n mergedChunks.push(currentChunk);\n currentChunk = chunk;\n }\n }\n\n if (currentChunk) {\n mergedChunks.push(currentChunk);\n }\n\n return mergedChunks;\n }\n\n protected getChunkSize(chunk: string): number {\n return chunk.length;\n }\n\n protected wrap(content: string): string {\n return content;\n }\n}\n","import { JSDOM } from \"jsdom\";\nimport { RecursiveCharacterTextSplitter } from \"langchain/text_splitter\";\nimport remarkGfm from \"remark-gfm\";\nimport remarkHtml from \"remark-html\";\nimport remarkParse from \"remark-parse\";\nimport TurndownService from \"turndown\";\nimport { unified } from \"unified\";\nimport { createJSDOM } from \"../utils/dom\";\nimport { logger } from \"../utils/logger\";\nimport { fullTrim } from \"../utils/string\";\nimport { ContentSplitterError, MinimumChunkSizeError } from \"./errors\";\nimport { CodeContentSplitter } from \"./splitters/CodeContentSplitter\";\nimport { TableContentSplitter } from \"./splitters/TableContentSplitter\";\nimport { TextContentSplitter } from \"./splitters/TextContentSplitter\";\nimport type { ContentChunk, DocumentSplitter, SectionContentType } from \"./types\";\n\n/**\n * Represents a section of content within a document,\n * typically defined by a heading\n */\ninterface DocumentSection {\n level: number;\n path: string[]; // Full path including parent headings\n content: {\n type: SectionContentType;\n text: string;\n }[];\n}\n\n/**\n * Splits markdown documents into semantic chunks while preserving\n * structure and distinguishing between different content types.\n *\n * The splitting process happens in two steps:\n * 1. Split document into sections based on headings (H1-H3 only)\n * 2. Split section content into smaller chunks based on preferredChunkSize\n */\nexport class SemanticMarkdownSplitter implements DocumentSplitter {\n private turndownService: TurndownService;\n public textSplitter: TextContentSplitter;\n public codeSplitter: CodeContentSplitter;\n public tableSplitter: TableContentSplitter;\n\n constructor(\n private preferredChunkSize: number,\n private maxChunkSize: number,\n ) {\n this.turndownService = new TurndownService({\n headingStyle: \"atx\",\n hr: \"---\",\n bulletListMarker: \"-\",\n codeBlockStyle: \"fenced\",\n emDelimiter: \"_\",\n strongDelimiter: \"**\",\n linkStyle: \"inlined\",\n });\n\n // Add table rule to preserve markdown table format\n this.turndownService.addRule(\"table\", {\n filter: [\"table\"],\n replacement: (content, node) => {\n const table = node as HTMLTableElement;\n const headers = Array.from(table.querySelectorAll(\"th\")).map(\n (th) => th.textContent?.trim() || \"\",\n );\n const rows = Array.from(table.querySelectorAll(\"tr\")).filter(\n (tr) => !tr.querySelector(\"th\"),\n );\n\n if (headers.length === 0 && rows.length === 0) return \"\";\n\n let markdown = \"\\n\";\n if (headers.length > 0) {\n markdown += `| ${headers.join(\" | \")} |\\n`;\n markdown += `|${headers.map(() => \"---\").join(\"|\")}|\\n`;\n }\n\n for (const row of rows) {\n const cells = Array.from(row.querySelectorAll(\"td\")).map(\n (td) => td.textContent?.trim() || \"\",\n );\n markdown += `| ${cells.join(\" | \")} |\\n`;\n }\n\n return markdown;\n },\n });\n\n // Text splitter uses preferred chunk size (keeps paragraphs together if possible)\n this.textSplitter = new TextContentSplitter({\n chunkSize: this.preferredChunkSize,\n });\n // Code/table splitters use the hard chunk size (avoid splitting unless necessary)\n this.codeSplitter = new CodeContentSplitter({\n chunkSize: this.maxChunkSize,\n });\n this.tableSplitter = new TableContentSplitter({\n chunkSize: this.maxChunkSize,\n });\n }\n\n /**\n * Main entry point for splitting markdown content\n */\n async splitText(markdown: string): Promise<ContentChunk[]> {\n const html = await this.markdownToHtml(markdown);\n const dom = await this.parseHtml(html);\n const sections = await this.splitIntoSections(dom);\n return this.splitSectionContent(sections);\n }\n\n /**\n * Step 1: Split document into sections based on H1-H6 headings,\n * as well as code blocks and tables.\n */\n private async splitIntoSections(dom: Document): Promise<DocumentSection[]> {\n const body = dom.querySelector(\"body\");\n if (!body) {\n throw new Error(\"Invalid HTML structure: no body element found\");\n }\n\n let currentSection = this.createRootSection();\n const sections: DocumentSection[] = [];\n const stack: DocumentSection[] = [currentSection];\n\n // Process each child of the body\n for (const element of Array.from(body.children)) {\n const headingMatch = element.tagName.match(/H([1-6])/);\n\n if (headingMatch) {\n // Create new section for H1-H6 heading\n const level = Number.parseInt(headingMatch[1], 10);\n const title = fullTrim(element.textContent || \"\");\n\n // Pop sections from stack until we find the parent level\n while (stack.length > 1 && stack[stack.length - 1].level >= level) {\n stack.pop();\n }\n\n // Start new section with the header\n currentSection = {\n level,\n path: [\n ...stack.slice(1).reduce((acc: string[], s) => {\n const lastPath = s.path[s.path.length - 1];\n if (lastPath) acc.push(lastPath);\n return acc;\n }, []),\n title,\n ],\n content: [\n {\n type: \"heading\",\n text: `${\"#\".repeat(level)} ${title}`,\n },\n ],\n };\n\n sections.push(currentSection);\n stack.push(currentSection);\n } else if (element.tagName === \"PRE\") {\n // Code blocks are kept as separate chunks\n const code = element.querySelector(\"code\");\n const language = code?.className.replace(\"language-\", \"\") || \"\";\n const content = code?.textContent || element.textContent || \"\";\n const markdown = `${\"```\"}${language}\\n${content}\\n${\"```\"}`;\n\n currentSection = {\n level: currentSection.level,\n path: currentSection.path,\n content: [\n {\n type: \"code\",\n text: markdown,\n },\n ],\n } satisfies DocumentSection;\n sections.push(currentSection);\n } else if (element.tagName === \"TABLE\") {\n // Tables are kept as separate chunks\n const markdown = fullTrim(this.turndownService.turndown(element.outerHTML));\n\n currentSection = {\n level: currentSection.level,\n path: currentSection.path,\n content: [\n {\n type: \"table\",\n text: markdown,\n },\n ],\n } satisfies DocumentSection;\n sections.push(currentSection);\n } else {\n const markdown = fullTrim(this.turndownService.turndown(element.innerHTML));\n if (markdown) {\n // Create a new section for the text content\n currentSection = {\n level: currentSection.level,\n path: currentSection.path,\n content: [\n {\n type: \"text\",\n text: markdown,\n },\n ],\n } satisfies DocumentSection;\n sections.push(currentSection);\n }\n }\n }\n\n return sections;\n }\n\n /**\n * Step 2: Split section content into smaller chunks\n */\n private async splitSectionContent(\n sections: DocumentSection[],\n ): Promise<ContentChunk[]> {\n const chunks: ContentChunk[] = [];\n\n for (const section of sections) {\n for (const content of section.content) {\n let splitContent: string[] = [];\n\n try {\n switch (content.type) {\n case \"heading\":\n case \"text\": {\n splitContent = await this.textSplitter.split(content.text);\n break;\n }\n case \"code\": {\n splitContent = await this.codeSplitter.split(content.text);\n break;\n }\n case \"table\": {\n splitContent = await this.tableSplitter.split(content.text);\n break;\n }\n }\n } catch (err) {\n // If it's a MinimumChunkSizeError, use RecursiveCharacterTextSplitter directly\n if (err instanceof MinimumChunkSizeError) {\n logger.warn(\n `⚠ Cannot split ${content.type} chunk normally, using RecursiveCharacterTextSplitter: ${err.message}`,\n );\n\n // Create a RecursiveCharacterTextSplitter with aggressive settings to ensure splitting\n const splitter = new RecursiveCharacterTextSplitter({\n chunkSize: this.maxChunkSize,\n chunkOverlap: Math.min(20, Math.floor(this.maxChunkSize * 0.1)),\n // Use more aggressive separators including empty string as last resort\n separators: [\n \"\\n\\n\",\n \"\\n\",\n \" \",\n \"\\t\",\n \".\",\n \",\",\n \";\",\n \":\",\n \"-\",\n \"(\",\n \")\",\n \"[\",\n \"]\",\n \"{\",\n \"}\",\n \"\",\n ],\n });\n\n const chunks = await splitter.splitText(content.text);\n if (chunks.length === 0) {\n // If still no chunks, use the most extreme approach: just truncate\n splitContent = [content.text.substring(0, this.maxChunkSize)];\n } else {\n splitContent = chunks;\n }\n } else {\n // Convert other error message to string, handling non-Error objects\n const errMessage = err instanceof Error ? err.message : String(err);\n throw new ContentSplitterError(\n `Failed to split ${content.type} content: ${errMessage}`,\n );\n }\n }\n\n // Create chunks from split content\n chunks.push(\n ...splitContent.map(\n (text): ContentChunk => ({\n types: [content.type],\n content: text,\n section: {\n level: section.level,\n path: section.path,\n },\n }),\n ),\n );\n }\n }\n\n return chunks;\n }\n\n /**\n * Helper to create the root section\n */\n private createRootSection(): DocumentSection {\n return {\n level: 0,\n path: [],\n content: [],\n };\n }\n\n /**\n * Convert markdown to HTML using remark\n */\n private async markdownToHtml(markdown: string): Promise<string> {\n const html = await unified()\n .use(remarkParse)\n .use(remarkGfm)\n .use(remarkHtml)\n .process(markdown);\n\n return `<!DOCTYPE html>\n <html>\n <body>\n ${String(html)}\n </body>\n </html>`;\n }\n\n /**\n * Parse HTML\n */\n private async parseHtml(html: string): Promise<Document> {\n // Use createJSDOM which includes default options like virtualConsole\n const { window } = createJSDOM(html);\n return window.document;\n }\n}\n","import type { ContentChunk, DocumentSplitter, SectionContentType } from \"./types\";\n\n/**\n * Takes small document chunks and greedily concatenates them into larger, more meaningful units\n * while preserving document structure and semantic boundaries.\n *\n * This approach improves embedding quality by:\n * - Maintaining context by keeping related content together\n * - Respecting natural document breaks at major section boundaries (H1/H2)\n * - Ensuring chunks are large enough to capture meaningful relationships\n * - Preventing chunks from becoming too large for effective embedding\n */\nexport class GreedySplitter implements DocumentSplitter {\n private baseSplitter: DocumentSplitter;\n private minChunkSize: number;\n private preferredChunkSize: number;\n\n /**\n * Combines a base document splitter with size constraints to produce optimally-sized chunks.\n * The base splitter handles the initial semantic splitting, while this class handles\n * the concatenation strategy.\n */\n constructor(\n baseSplitter: DocumentSplitter,\n minChunkSize: number,\n preferredChunkSize: number,\n ) {\n this.baseSplitter = baseSplitter;\n this.minChunkSize = minChunkSize;\n this.preferredChunkSize = preferredChunkSize;\n }\n\n /**\n * Uses a greedy concatenation strategy to build optimally-sized chunks. Small chunks\n * are combined until they reach the minimum size, but splits are preserved at major\n * section boundaries to maintain document structure. This balances the need for\n * context with semantic coherence.\n */\n async splitText(markdown: string): Promise<ContentChunk[]> {\n const initialChunks = await this.baseSplitter.splitText(markdown);\n const concatenatedChunks: ContentChunk[] = [];\n let currentChunk: ContentChunk | null = null;\n\n for (const nextChunk of initialChunks) {\n if (currentChunk) {\n if (this.wouldExceedMaxSize(currentChunk, nextChunk)) {\n concatenatedChunks.push(currentChunk);\n currentChunk = this.cloneChunk(nextChunk);\n continue;\n }\n if (\n currentChunk.content.length >= this.minChunkSize &&\n this.startsNewMajorSection(nextChunk)\n ) {\n concatenatedChunks.push(currentChunk);\n currentChunk = this.cloneChunk(nextChunk);\n continue;\n }\n currentChunk.content += `\\n${nextChunk.content}`;\n currentChunk.section = this.mergeSectionInfo(currentChunk, nextChunk);\n currentChunk.types = this.mergeTypes(currentChunk.types, nextChunk.types);\n } else {\n currentChunk = this.cloneChunk(nextChunk);\n }\n }\n\n if (currentChunk) {\n concatenatedChunks.push(currentChunk);\n }\n\n return concatenatedChunks;\n }\n\n private cloneChunk(chunk: ContentChunk): ContentChunk {\n return {\n types: [...chunk.types],\n content: chunk.content,\n section: {\n level: chunk.section.level,\n path: [...chunk.section.path],\n },\n };\n }\n\n /**\n * H1 and H2 headings represent major conceptual breaks in the document.\n * Preserving these splits helps maintain the document's logical structure.\n */\n private startsNewMajorSection(chunk: ContentChunk): boolean {\n return chunk.section.level === 1 || chunk.section.level === 2;\n }\n\n /**\n * Size limit check to ensure chunks remain within embedding model constraints.\n * Essential for maintaining consistent embedding quality and avoiding truncation.\n */\n private wouldExceedMaxSize(\n currentChunk: ContentChunk | null,\n nextChunk: ContentChunk,\n ): boolean {\n if (!currentChunk) {\n return false;\n }\n return (\n currentChunk.content.length + nextChunk.content.length > this.preferredChunkSize\n );\n }\n\n /**\n * Checks if one path is a prefix of another path, indicating a parent-child relationship\n */\n private isPathIncluded(parentPath: string[], childPath: string[]): boolean {\n if (parentPath.length >= childPath.length) return false;\n return parentPath.every((part, i) => part === childPath[i]);\n }\n\n /**\n * Merges section metadata when concatenating chunks, following these rules:\n * 1. Level: Always uses the lowest (most general) level between chunks\n * 2. Path selection:\n * - For parent-child relationships (one path includes the other), uses the child's path\n * - For siblings/unrelated sections, uses the common parent path\n * - If no common path exists, uses the root path ([])\n */\n private mergeSectionInfo(\n currentChunk: ContentChunk,\n nextChunk: ContentChunk,\n ): ContentChunk[\"section\"] {\n // Always use the lowest level\n const level = Math.min(currentChunk.section.level, nextChunk.section.level);\n\n // If sections are exactly equal, preserve all metadata\n if (\n currentChunk.section.level === nextChunk.section.level &&\n currentChunk.section.path.length === nextChunk.section.path.length &&\n currentChunk.section.path.every((p, i) => p === nextChunk.section.path[i])\n ) {\n return currentChunk.section;\n }\n\n // Check if one path includes the other\n if (this.isPathIncluded(currentChunk.section.path, nextChunk.section.path)) {\n return {\n path: nextChunk.section.path,\n level,\n };\n }\n\n if (this.isPathIncluded(nextChunk.section.path, currentChunk.section.path)) {\n return {\n path: currentChunk.section.path,\n level,\n };\n }\n\n // Find common parent path\n const commonPath = this.findCommonPrefix(\n currentChunk.section.path,\n nextChunk.section.path,\n );\n\n return {\n path: commonPath,\n level,\n };\n }\n\n private mergeTypes(\n currentTypes: SectionContentType[],\n nextTypes: SectionContentType[],\n ): SectionContentType[] {\n return [...new Set([...currentTypes, ...nextTypes])];\n }\n\n /**\n * Returns longest common prefix between two paths\n */\n private findCommonPrefix(path1: string[], path2: string[]): string[] {\n const common: string[] = [];\n for (let i = 0; i < Math.min(path1.length, path2.length); i++) {\n if (path1[i] === path2[i]) {\n common.push(path1[i]);\n } else {\n break;\n }\n }\n return common;\n }\n}\n","import semver from \"semver\";\nimport type { LibraryVersionDetails } from \"../store/types\"; // Import LibraryVersionDetails\n\nclass ToolError extends Error {\n constructor(\n message: string,\n public readonly toolName: string,\n ) {\n super(message);\n this.name = this.constructor.name;\n }\n}\n\nclass VersionNotFoundError extends ToolError {\n constructor(\n public readonly library: string,\n public readonly requestedVersion: string,\n public readonly availableVersions: LibraryVersionDetails[], // Use LibraryVersionDetails\n ) {\n super(\n `Version ${requestedVersion} not found for ${library}. Available versions: ${availableVersions.map((v) => v.version).join(\", \")}`,\n \"SearchTool\",\n );\n }\n\n getLatestVersion() {\n return this.availableVersions.sort((a, b) => semver.compare(b.version, a.version))[0];\n }\n}\n\n/**\n * Error thrown when a requested library cannot be found in the store.\n * Includes suggestions for similar library names if available.\n */\nclass LibraryNotFoundError extends ToolError {\n constructor(\n public readonly requestedLibrary: string,\n public readonly suggestions: string[] = [],\n ) {\n let message = `Library '${requestedLibrary}' not found.`;\n if (suggestions.length > 0) {\n message += ` Did you mean one of these: ${suggestions.join(\", \")}?`;\n }\n // Assuming this error might originate from various tools, but SearchTool is a primary candidate.\n // We might need to adjust the toolName if it's thrown elsewhere.\n super(message, \"SearchTool\");\n }\n}\n\nexport { LibraryNotFoundError, ToolError, VersionNotFoundError };\n","import type { DocumentManagementService } from \"../store/DocumentManagementService\";\nimport type { LibraryVersionDetails } from \"../store/types\";\n\n// Define the structure for the tool's output, using the detailed version info\nexport interface LibraryInfo {\n name: string;\n versions: LibraryVersionDetails[]; // Use the detailed interface\n}\n\nexport interface ListLibrariesResult {\n libraries: LibraryInfo[];\n}\n\n/**\n * Tool for listing all available libraries and their indexed versions in the store.\n */\nexport class ListLibrariesTool {\n private docService: DocumentManagementService;\n\n constructor(docService: DocumentManagementService) {\n this.docService = docService;\n }\n\n async execute(options?: Record<string, never>): Promise<ListLibrariesResult> {\n // docService.listLibraries() now returns the detailed structure directly\n const rawLibraries = await this.docService.listLibraries();\n\n // The structure returned by listLibraries already matches LibraryInfo[]\n // No complex mapping is needed here anymore, just ensure the names match\n const libraries: LibraryInfo[] = rawLibraries.map(({ library, versions }) => ({\n name: library,\n versions: versions, // Directly assign the detailed versions array\n }));\n\n return { libraries };\n }\n}\n","import * as semver from \"semver\";\nimport type { PipelineManager } from \"../pipeline/PipelineManager\";\nimport { ScrapeMode } from \"../scraper/types\";\nimport type { DocumentManagementService } from \"../store/DocumentManagementService\";\nimport type { ProgressResponse } from \"../types\";\nimport {\n DEFAULT_MAX_CONCURRENCY,\n DEFAULT_MAX_DEPTH,\n DEFAULT_MAX_PAGES,\n} from \"../utils/config\";\nimport { logger } from \"../utils/logger\";\n\nexport interface ScrapeToolOptions {\n library: string;\n version?: string | null; // Make version optional\n url: string;\n options?: {\n maxPages?: number;\n maxDepth?: number;\n /**\n * Defines the allowed crawling boundary relative to the starting URL\n * - 'subpages': Only crawl URLs on the same hostname and within the same starting path (default)\n * - 'hostname': Crawl any URL on the same hostname, regardless of path\n * - 'domain': Crawl any URL on the same top-level domain, including subdomains\n */\n scope?: \"subpages\" | \"hostname\" | \"domain\";\n /**\n * Controls whether HTTP redirects (3xx responses) should be followed\n * - When true: Redirects are followed automatically (default)\n * - When false: A RedirectError is thrown when a 3xx response is received\n */\n followRedirects?: boolean;\n maxConcurrency?: number; // Note: Concurrency is now set when PipelineManager is created\n ignoreErrors?: boolean;\n /**\n * Determines the HTML processing strategy.\n * - 'fetch': Use a simple DOM parser (faster, less JS support).\n * - 'playwright': Use a headless browser (slower, full JS support).\n * - 'auto': Automatically select the best strategy (currently defaults to 'playwright').\n * @default ScrapeMode.Auto\n */\n scrapeMode?: ScrapeMode;\n };\n /** If false, returns jobId immediately without waiting. Defaults to true. */\n waitForCompletion?: boolean;\n}\n\nexport interface ScrapeResult {\n /** Indicates the number of pages scraped if waitForCompletion was true and the job succeeded. May be 0 or inaccurate if job failed or waitForCompletion was false. */\n pagesScraped: number;\n}\n\n/** Return type for ScrapeTool.execute */\nexport type ScrapeExecuteResult = ScrapeResult | { jobId: string };\n\n/**\n * Tool for enqueuing documentation scraping jobs via the PipelineManager.\n */\nexport class ScrapeTool {\n private docService: DocumentManagementService;\n private manager: PipelineManager; // Add manager property\n\n constructor(docService: DocumentManagementService, manager: PipelineManager) {\n // Add manager to constructor\n this.docService = docService;\n this.manager = manager; // Store manager instance\n }\n\n async execute(options: ScrapeToolOptions): Promise<ScrapeExecuteResult> {\n const {\n library,\n version,\n url,\n options: scraperOptions,\n waitForCompletion = true,\n } = options;\n\n // Store initialization and manager start should happen externally\n\n let internalVersion: string;\n const partialVersionRegex = /^\\d+(\\.\\d+)?$/; // Matches '1' or '1.2'\n\n if (version === null || version === undefined) {\n internalVersion = \"\";\n } else {\n const validFullVersion = semver.valid(version);\n if (validFullVersion) {\n internalVersion = validFullVersion;\n } else if (partialVersionRegex.test(version)) {\n const coercedVersion = semver.coerce(version);\n if (coercedVersion) {\n internalVersion = coercedVersion.version;\n } else {\n throw new Error(\n `Invalid version format for scraping: '${version}'. Use 'X.Y.Z', 'X.Y.Z-prerelease', 'X.Y', 'X', or omit.`,\n );\n }\n } else {\n throw new Error(\n `Invalid version format for scraping: '${version}'. Use 'X.Y.Z', 'X.Y.Z-prerelease', 'X.Y', 'X', or omit.`,\n );\n }\n }\n\n internalVersion = internalVersion.toLowerCase();\n\n // Remove any existing documents for this library/version\n await this.docService.removeAllDocuments(library, internalVersion);\n logger.info(\n `💾 Cleared store for ${library}@${internalVersion || \"[no version]\"} before scraping.`,\n );\n\n // Use the injected manager instance\n const manager = this.manager;\n\n // Remove internal progress tracking and callbacks\n // let pagesScraped = 0;\n // let lastReportedPages = 0;\n // const reportProgress = ...\n // manager.setCallbacks(...)\n\n // Enqueue the job using the injected manager\n const jobId = await manager.enqueueJob(library, internalVersion, {\n url: url,\n library: library,\n version: internalVersion,\n scope: scraperOptions?.scope ?? \"subpages\",\n followRedirects: scraperOptions?.followRedirects ?? true,\n maxPages: scraperOptions?.maxPages ?? DEFAULT_MAX_PAGES,\n maxDepth: scraperOptions?.maxDepth ?? DEFAULT_MAX_DEPTH,\n maxConcurrency: scraperOptions?.maxConcurrency ?? DEFAULT_MAX_CONCURRENCY,\n ignoreErrors: scraperOptions?.ignoreErrors ?? true,\n scrapeMode: scraperOptions?.scrapeMode ?? ScrapeMode.Auto, // Pass scrapeMode enum\n });\n\n // Conditionally wait for completion\n if (waitForCompletion) {\n try {\n await manager.waitForJobCompletion(jobId);\n // Fetch final job state to get status and potentially final page count\n const finalJob = await manager.getJob(jobId);\n const finalPagesScraped = finalJob?.progress?.pagesScraped ?? 0; // Get count from final job state\n logger.debug(\n `Job ${jobId} finished with status ${finalJob?.status}. Pages scraped: ${finalPagesScraped}`,\n );\n return {\n pagesScraped: finalPagesScraped,\n };\n } catch (error) {\n logger.error(`Job ${jobId} failed or was cancelled: ${error}`);\n throw error; // Re-throw so the caller knows it failed\n }\n // No finally block needed to stop manager, as it's managed externally\n }\n\n // If not waiting, return the job ID immediately\n return { jobId };\n }\n}\n","import type { DocumentManagementService } from \"../store\";\nimport type { LibraryVersionDetails, StoreSearchResult } from \"../store/types\"; // Import LibraryVersionDetails\nimport { logger } from \"../utils/logger\";\nimport { VersionNotFoundError } from \"./errors\";\n\nexport interface SearchToolOptions {\n library: string;\n version?: string;\n query: string;\n limit?: number;\n exactMatch?: boolean;\n}\n\nexport interface SearchToolResultError {\n message: string;\n availableVersions?: LibraryVersionDetails[]; // Use LibraryVersionDetails\n suggestions?: string[]; // Specific to LibraryNotFoundError\n}\n\nexport interface SearchToolResult {\n results: StoreSearchResult[];\n}\n\n/**\n * Tool for searching indexed documentation.\n * Supports exact version matches and version range patterns.\n * Returns available versions when requested version is not found.\n */\nexport class SearchTool {\n private docService: DocumentManagementService;\n\n constructor(docService: DocumentManagementService) {\n this.docService = docService;\n }\n\n async execute(options: SearchToolOptions): Promise<SearchToolResult> {\n const { library, version, query, limit = 5, exactMatch = false } = options;\n\n // When exactMatch is true, version must be specified and not 'latest'\n if (exactMatch && (!version || version === \"latest\")) {\n // Get available *detailed* versions for error message\n await this.docService.validateLibraryExists(library);\n // Fetch detailed versions using listLibraries and find the specific library\n const allLibraries = await this.docService.listLibraries();\n const libraryInfo = allLibraries.find((lib) => lib.library === library);\n const detailedVersions = libraryInfo ? libraryInfo.versions : [];\n throw new VersionNotFoundError(\n library,\n \"latest\", // Or perhaps the original 'version' if it wasn't 'latest'? Check logic.\n detailedVersions,\n );\n }\n\n // Default to 'latest' only when exactMatch is false\n const resolvedVersion = version || \"latest\";\n\n logger.info(\n `🔍 Searching ${library}@${resolvedVersion} for: ${query}${exactMatch ? \" (exact match)\" : \"\"}`,\n );\n\n try {\n // 1. Validate library exists first\n await this.docService.validateLibraryExists(library);\n\n // 2. Proceed with version finding and searching\n let versionToSearch: string | null | undefined = resolvedVersion;\n\n if (!exactMatch) {\n // If not exact match, find the best version (which might be null)\n const versionResult = await this.docService.findBestVersion(library, version);\n // Use the bestMatch from the result, which could be null\n versionToSearch = versionResult.bestMatch;\n\n // If findBestVersion returned null (no matching semver) AND unversioned docs exist,\n // should we search unversioned? The current logic passes null to searchStore,\n // which gets normalized to \"\" (unversioned). This seems reasonable.\n // If findBestVersion threw VersionNotFoundError, it's caught below.\n }\n // If exactMatch is true, versionToSearch remains the originally provided version.\n\n // Note: versionToSearch can be string | null | undefined here.\n // searchStore handles null/undefined by normalizing to \"\".\n const results = await this.docService.searchStore(\n library,\n versionToSearch,\n query,\n limit,\n );\n logger.info(`✅ Found ${results.length} matching results`);\n\n return { results };\n } catch (error) {\n logger.error(\n `❌ Search failed: ${error instanceof Error ? error.message : \"Unknown error\"}`,\n );\n throw error;\n }\n }\n}\n","import fs from \"node:fs\";\nimport path from \"node:path\";\nimport { fileURLToPath } from \"node:url\";\n\nlet projectRoot: string | null = null;\n\n/**\n * Finds the project root directory by searching upwards from the current file\n * for a directory containing 'package.json'. Caches the result.\n *\n * @returns {string} The absolute path to the project root.\n * @throws {Error} If package.json cannot be found.\n */\nexport function getProjectRoot(): string {\n // Return cached result if available\n if (projectRoot) {\n return projectRoot;\n }\n\n // Start from the directory of the current module\n const currentFilePath = fileURLToPath(import.meta.url);\n let currentDir = path.dirname(currentFilePath);\n\n // eslint-disable-next-line no-constant-condition\n while (true) {\n const packageJsonPath = path.join(currentDir, \"package.json\");\n if (fs.existsSync(packageJsonPath)) {\n projectRoot = currentDir; // Cache the result\n return projectRoot;\n }\n\n const parentDir = path.dirname(currentDir);\n // Check if we have reached the filesystem root\n if (parentDir === currentDir) {\n throw new Error(\"Could not find project root containing package.json.\");\n }\n currentDir = parentDir;\n }\n}\n","import type { Document } from \"@langchain/core/documents\";\nimport type { DocumentStore } from \"./DocumentStore\";\nimport type { StoreSearchResult } from \"./types\";\n\nconst CHILD_LIMIT = 5;\nconst SIBLING_LIMIT = 2;\n\nexport class DocumentRetrieverService {\n private documentStore: DocumentStore;\n\n constructor(documentStore: DocumentStore) {\n this.documentStore = documentStore;\n }\n\n /**\n * Collects all related chunk IDs for a given initial hit.\n * Returns an object with url, hitId, relatedIds (Set), and score.\n */\n private async getRelatedChunkIds(\n library: string,\n version: string,\n doc: Document,\n siblingLimit = SIBLING_LIMIT,\n childLimit = CHILD_LIMIT,\n ): Promise<{\n url: string;\n hitId: string;\n relatedIds: Set<string>;\n score: number;\n }> {\n const id = doc.id as string;\n const url = doc.metadata.url as string;\n const score = doc.metadata.score as number;\n const relatedIds = new Set<string>();\n relatedIds.add(id);\n\n // Parent\n const parent = await this.documentStore.findParentChunk(library, version, id);\n if (parent) {\n relatedIds.add(parent.id as string);\n }\n\n // Preceding Siblings\n const precedingSiblings = await this.documentStore.findPrecedingSiblingChunks(\n library,\n version,\n id,\n siblingLimit,\n );\n for (const sib of precedingSiblings) {\n relatedIds.add(sib.id as string);\n }\n\n // Child Chunks\n const childChunks = await this.documentStore.findChildChunks(\n library,\n version,\n id,\n childLimit,\n );\n for (const child of childChunks) {\n relatedIds.add(child.id as string);\n }\n\n // Subsequent Siblings\n const subsequentSiblings = await this.documentStore.findSubsequentSiblingChunks(\n library,\n version,\n id,\n siblingLimit,\n );\n for (const sib of subsequentSiblings) {\n relatedIds.add(sib.id as string);\n }\n\n return { url, hitId: id, relatedIds, score };\n }\n\n /**\n * Groups related chunk info by URL, deduplicates IDs, and finds max score per URL.\n */\n private groupAndPrepareFetch(\n relatedInfos: Array<{\n url: string;\n hitId: string;\n relatedIds: Set<string>;\n score: number;\n }>,\n ): Map<string, { uniqueChunkIds: Set<string>; maxScore: number }> {\n const urlMap = new Map<string, { uniqueChunkIds: Set<string>; maxScore: number }>();\n for (const info of relatedInfos) {\n let entry = urlMap.get(info.url);\n if (!entry) {\n entry = { uniqueChunkIds: new Set(), maxScore: info.score };\n urlMap.set(info.url, entry);\n }\n for (const id of info.relatedIds) {\n entry.uniqueChunkIds.add(id);\n }\n if (info.score > entry.maxScore) {\n entry.maxScore = info.score;\n }\n }\n return urlMap;\n }\n\n /**\n * Finalizes the merged result for a URL group by fetching, sorting, and joining content.\n */\n private async finalizeResult(\n library: string,\n version: string,\n url: string,\n uniqueChunkIds: Set<string>,\n maxScore: number,\n ): Promise<StoreSearchResult> {\n const ids = Array.from(uniqueChunkIds);\n const docs = await this.documentStore.findChunksByIds(library, version, ids);\n // Already sorted by sort_order in findChunksByIds\n const content = docs.map((d) => d.pageContent).join(\"\\n\\n\");\n // TODO: Apply code block merging here if/when implemented\n return {\n url,\n content,\n score: maxScore,\n };\n }\n\n /**\n * Searches for documents and expands the context around the matches.\n * @param library The library name.\n * @param version The library version.\n * @param query The search query.\n * @param version The library version (optional, defaults to searching documents without a version).\n * @param query The search query.\n * @param limit The optional limit for the initial search results.\n * @returns An array of strings representing the aggregated content of the retrieved chunks.\n */\n async search(\n library: string,\n version: string | null | undefined,\n query: string,\n limit?: number,\n ): Promise<StoreSearchResult[]> {\n // Normalize version: null/undefined becomes empty string, then lowercase\n const normalizedVersion = (version ?? \"\").toLowerCase();\n\n const initialResults = await this.documentStore.findByContent(\n library,\n normalizedVersion,\n query,\n limit ?? 10,\n );\n\n // Step 1: Expand context for each initial hit (collect related chunk IDs)\n const relatedInfos = await Promise.all(\n initialResults.map((doc) =>\n this.getRelatedChunkIds(library, normalizedVersion, doc),\n ),\n );\n\n // Step 2: Group by URL, deduplicate, and find max score\n const urlMap = this.groupAndPrepareFetch(relatedInfos);\n\n // Step 3: For each URL group, fetch, sort, and format the merged result\n const results: StoreSearchResult[] = [];\n for (const [url, { uniqueChunkIds, maxScore }] of urlMap.entries()) {\n const result = await this.finalizeResult(\n library,\n normalizedVersion,\n url,\n uniqueChunkIds,\n maxScore,\n );\n results.push(result);\n }\n\n return results;\n }\n}\n","class StoreError extends Error {\n constructor(\n message: string,\n public readonly cause?: unknown,\n ) {\n super(cause ? `${message} caused by ${cause}` : message);\n this.name = this.constructor.name;\n\n const causeError =\n cause instanceof Error ? cause : cause ? new Error(String(cause)) : undefined;\n if (causeError?.stack) {\n this.stack = causeError.stack;\n }\n }\n}\n\nclass DimensionError extends StoreError {\n constructor(\n public readonly modelName: string,\n public readonly modelDimension: number,\n public readonly dbDimension: number,\n ) {\n super(\n `Model \"${modelName}\" produces ${modelDimension}-dimensional vectors, ` +\n `which exceeds the database's fixed dimension of ${dbDimension}. ` +\n `Please use a model with dimension ≤ ${dbDimension}.`,\n );\n }\n}\n\nclass ConnectionError extends StoreError {}\n\nclass DocumentNotFoundError extends StoreError {\n constructor(public readonly id: string) {\n super(`Document ${id} not found`);\n }\n}\n\nexport { StoreError, ConnectionError, DocumentNotFoundError, DimensionError };\n","import fs from \"node:fs\";\nimport path from \"node:path\";\nimport type { Database } from \"better-sqlite3\";\nimport { logger } from \"../utils/logger\";\nimport { getProjectRoot } from \"../utils/paths\";\nimport { StoreError } from \"./errors\";\n\n// Construct the absolute path to the migrations directory using the project root\nconst MIGRATIONS_DIR = path.join(getProjectRoot(), \"db\", \"migrations\");\nconst MIGRATIONS_TABLE = \"_schema_migrations\";\n\n/**\n * Ensures the migration tracking table exists in the database.\n * @param db The database instance.\n */\nfunction ensureMigrationsTable(db: Database): void {\n db.exec(`\n CREATE TABLE IF NOT EXISTS ${MIGRATIONS_TABLE} (\n id TEXT PRIMARY KEY,\n applied_at DATETIME DEFAULT CURRENT_TIMESTAMP\n );\n `);\n}\n\n/**\n * Retrieves the set of already applied migration IDs (filenames) from the tracking table.\n * @param db The database instance.\n * @returns A Set containing the IDs of applied migrations.\n */\nfunction getAppliedMigrations(db: Database): Set<string> {\n const stmt = db.prepare(`SELECT id FROM ${MIGRATIONS_TABLE}`);\n const rows = stmt.all() as Array<{ id: string }>;\n return new Set(rows.map((row) => row.id));\n}\n\n/**\n * Applies pending database migrations found in the migrations directory.\n * Migrations are expected to be .sql files with sequential prefixes (e.g., 001-, 002-).\n * It tracks applied migrations in the _schema_migrations table.\n *\n * @param db The better-sqlite3 database instance.\n * @throws {StoreError} If any migration fails.\n */\nexport function applyMigrations(db: Database): void {\n try {\n logger.debug(\"Applying database migrations...\");\n ensureMigrationsTable(db);\n const appliedMigrations = getAppliedMigrations(db);\n\n if (!fs.existsSync(MIGRATIONS_DIR)) {\n throw new StoreError(\"Migrations directory not found\");\n }\n\n const migrationFiles = fs\n .readdirSync(MIGRATIONS_DIR)\n .filter((file) => file.endsWith(\".sql\"))\n .sort(); // Sort alphabetically, relying on naming convention (001-, 002-)\n\n let appliedCount = 0;\n for (const filename of migrationFiles) {\n if (!appliedMigrations.has(filename)) {\n logger.debug(`Applying migration: ${filename}`);\n const filePath = path.join(MIGRATIONS_DIR, filename);\n const sql = fs.readFileSync(filePath, \"utf8\");\n\n // Run migration within a transaction\n const transaction = db.transaction(() => {\n db.exec(sql);\n const insertStmt = db.prepare(\n `INSERT INTO ${MIGRATIONS_TABLE} (id) VALUES (?)`,\n );\n insertStmt.run(filename);\n });\n\n try {\n transaction();\n logger.debug(`Successfully applied migration: ${filename}`);\n appliedCount++;\n } catch (error) {\n logger.error(`Failed to apply migration: ${filename} - ${error}`);\n // Let the transaction implicitly rollback on error\n throw new StoreError(`Migration failed: ${filename} - ${error}`);\n }\n }\n }\n\n if (appliedCount > 0) {\n logger.debug(`Applied ${appliedCount} new migration(s).`);\n } else {\n logger.debug(\"Database schema is up to date.\");\n }\n } catch (error) {\n // Ensure StoreError is thrown for consistent handling\n if (error instanceof StoreError) {\n throw error;\n }\n throw new StoreError(\"Failed during migration process\", error);\n }\n}\n","import type { DocumentMetadata } from \"../types\";\n\n/** Default vector dimension used across the application */\nexport const VECTOR_DIMENSION = 1536;\n\n/**\n * Database document record type matching the documents table schema\n */\nexport interface DbDocument {\n id: string;\n library: string;\n version: string;\n url: string;\n content: string;\n metadata: string; // JSON string of DocumentMetadata\n embedding: string | null; // JSON string of number[]\n sort_order: number;\n score: number | null;\n}\n\n/**\n * Utility type for handling SQLite query results that may be undefined\n */\nexport type DbQueryResult<T> = T | undefined;\n\n/**\n * Maps raw database document to the Document type used by the application\n */\nexport function mapDbDocumentToDocument(doc: DbDocument) {\n return {\n id: doc.id,\n pageContent: doc.content,\n metadata: JSON.parse(doc.metadata) as DocumentMetadata,\n };\n}\n\n/**\n * Search result type returned by the DocumentRetrieverService\n */\nexport interface StoreSearchResult {\n url: string;\n content: string;\n score: number | null;\n}\n\n/**\n * Represents a library and its indexed versions.\n */\nexport interface LibraryVersion {\n version: string;\n}\n\n/**\n * Detailed information about a specific indexed library version.\n */\nexport interface LibraryVersionDetails {\n version: string;\n documentCount: number;\n uniqueUrlCount: number;\n indexedAt: string | null; // ISO 8601 format from MIN(indexed_at)\n}\n\n/**\n * Result type for findBestVersion, indicating the best semver match\n * and whether unversioned documents exist.\n */\nexport interface FindVersionResult {\n bestMatch: string | null;\n hasUnversioned: boolean;\n}\n","import type { Document } from \"@langchain/core/documents\";\nimport type { Embeddings } from \"@langchain/core/embeddings\";\nimport Database, { type Database as DatabaseType } from \"better-sqlite3\";\nimport semver from \"semver\";\nimport * as sqliteVec from \"sqlite-vec\";\nimport type { DocumentMetadata } from \"../types\";\nimport { applyMigrations } from \"./applyMigrations\";\nimport { ConnectionError, DimensionError, StoreError } from \"./errors\";\nimport { VECTOR_DIMENSION } from \"./types\";\nimport {\n type DbDocument,\n type DbQueryResult,\n type LibraryVersionDetails,\n mapDbDocumentToDocument,\n} from \"./types\";\n\ninterface RawSearchResult extends DbDocument {\n vec_score?: number;\n fts_score?: number;\n}\n\ninterface RankedResult extends RawSearchResult {\n vec_rank?: number;\n fts_rank?: number;\n rrf_score: number;\n}\n\n/**\n * Manages document storage and retrieval using SQLite with vector and full-text search capabilities.\n * Provides direct access to SQLite with prepared statements to store and query document\n * embeddings along with their metadata. Supports versioned storage of documents for different\n * libraries, enabling version-specific document retrieval and searches.\n */\nexport class DocumentStore {\n private readonly db: DatabaseType;\n private embeddings!: Embeddings;\n private readonly dbDimension: number = VECTOR_DIMENSION;\n private modelDimension!: number;\n private statements!: {\n getById: Database.Statement;\n insertDocument: Database.Statement;\n insertEmbedding: Database.Statement;\n deleteDocuments: Database.Statement;\n queryVersions: Database.Statement;\n checkExists: Database.Statement;\n queryLibraryVersions: Database.Statement<[]>; // Updated type\n getChildChunks: Database.Statement;\n getPrecedingSiblings: Database.Statement;\n getSubsequentSiblings: Database.Statement;\n getParentChunk: Database.Statement;\n };\n\n /**\n * Calculates Reciprocal Rank Fusion score for a result\n */\n private calculateRRF(vecRank?: number, ftsRank?: number, k = 60): number {\n let rrf = 0;\n if (vecRank !== undefined) {\n rrf += 1 / (k + vecRank);\n }\n if (ftsRank !== undefined) {\n rrf += 1 / (k + ftsRank);\n }\n return rrf;\n }\n\n /**\n * Assigns ranks to search results based on their scores\n */\n private assignRanks(results: RawSearchResult[]): RankedResult[] {\n // Create maps to store ranks\n const vecRanks = new Map<number, number>();\n const ftsRanks = new Map<number, number>();\n\n // Sort by vector scores and assign ranks\n results\n .filter((r) => r.vec_score !== undefined)\n .sort((a, b) => (a.vec_score ?? 0) - (b.vec_score ?? 0))\n .forEach((result, index) => {\n vecRanks.set(Number(result.id), index + 1);\n });\n\n // Sort by BM25 scores and assign ranks\n results\n .filter((r) => r.fts_score !== undefined)\n .sort((a, b) => (a.fts_score ?? 0) - (b.fts_score ?? 0))\n .forEach((result, index) => {\n ftsRanks.set(Number(result.id), index + 1);\n });\n\n // Combine results with ranks and calculate RRF\n return results.map((result) => ({\n ...result,\n vec_rank: vecRanks.get(Number(result.id)),\n fts_rank: ftsRanks.get(Number(result.id)),\n rrf_score: this.calculateRRF(\n vecRanks.get(Number(result.id)),\n ftsRanks.get(Number(result.id)),\n ),\n }));\n }\n\n constructor(dbPath: string) {\n if (!dbPath) {\n throw new StoreError(\"Missing required database path\");\n }\n\n // Only establish database connection in constructor\n this.db = new Database(dbPath);\n }\n\n /**\n * Sets up prepared statements for database queries\n */\n private prepareStatements(): void {\n const statements = {\n getById: this.db.prepare(\"SELECT * FROM documents WHERE id = ?\"),\n insertDocument: this.db.prepare(\n \"INSERT INTO documents (library, version, url, content, metadata, sort_order, indexed_at) VALUES (?, ?, ?, ?, ?, ?, ?)\", // Added indexed_at\n ),\n insertEmbedding: this.db.prepare<[number, string]>(\n \"INSERT INTO documents_vec (rowid, library, version, embedding) VALUES (?, ?, ?, ?)\",\n ),\n deleteDocuments: this.db.prepare(\n \"DELETE FROM documents WHERE library = ? AND version = ?\",\n ),\n queryVersions: this.db.prepare(\n \"SELECT DISTINCT version FROM documents WHERE library = ? ORDER BY version\",\n ),\n checkExists: this.db.prepare(\n \"SELECT id FROM documents WHERE library = ? AND version = ? LIMIT 1\",\n ),\n queryLibraryVersions: this.db.prepare(\n `SELECT\n library,\n version,\n COUNT(*) as documentCount,\n COUNT(DISTINCT url) as uniqueUrlCount,\n MIN(indexed_at) as indexedAt\n FROM documents\n GROUP BY library, version\n ORDER BY library, version`,\n ),\n getChildChunks: this.db.prepare(`\n SELECT * FROM documents\n WHERE library = ? \n AND version = ? \n AND url = ?\n AND json_array_length(json_extract(metadata, '$.path')) = ?\n AND json_extract(metadata, '$.path') LIKE ? || '%'\n AND sort_order > (SELECT sort_order FROM documents WHERE id = ?)\n ORDER BY sort_order\n LIMIT ?\n `),\n getPrecedingSiblings: this.db.prepare(`\n SELECT * FROM documents \n WHERE library = ? \n AND version = ? \n AND url = ?\n AND sort_order < (SELECT sort_order FROM documents WHERE id = ?)\n AND json_extract(metadata, '$.path') = ?\n ORDER BY sort_order DESC\n LIMIT ?\n `),\n getSubsequentSiblings: this.db.prepare(`\n SELECT * FROM documents \n WHERE library = ? \n AND version = ? \n AND url = ?\n AND sort_order > (SELECT sort_order FROM documents WHERE id = ?)\n AND json_extract(metadata, '$.path') = ?\n ORDER BY sort_order\n LIMIT ?\n `),\n getParentChunk: this.db.prepare(`\n SELECT * FROM documents \n WHERE library = ? \n AND version = ? \n AND url = ?\n AND json_extract(metadata, '$.path') = ?\n AND sort_order < (SELECT sort_order FROM documents WHERE id = ?)\n ORDER BY sort_order DESC\n LIMIT 1\n `),\n };\n this.statements = statements;\n }\n\n /**\n * Pads a vector to the fixed database dimension by appending zeros.\n * Throws an error if the input vector is longer than the database dimension.\n */\n private padVector(vector: number[]): number[] {\n if (vector.length > this.dbDimension) {\n throw new Error(\n `Vector dimension ${vector.length} exceeds database dimension ${this.dbDimension}`,\n );\n }\n if (vector.length === this.dbDimension) {\n return vector;\n }\n return [...vector, ...new Array(this.dbDimension - vector.length).fill(0)];\n }\n\n /**\n * Initializes embeddings client using environment variables for configuration.\n *\n * The embedding model is configured using DOCS_MCP_EMBEDDING_MODEL environment variable.\n * Format: \"provider:model_name\" (e.g., \"google:text-embedding-004\") or just \"model_name\"\n * for OpenAI (default).\n *\n * Supported providers and their required environment variables:\n * - openai: OPENAI_API_KEY (and optionally OPENAI_API_BASE, OPENAI_ORG_ID)\n * - google: GOOGLE_APPLICATION_CREDENTIALS (path to service account JSON)\n * - aws: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION (or BEDROCK_AWS_REGION)\n * - microsoft: Azure OpenAI credentials (AZURE_OPENAI_API_*)\n */\n private async initializeEmbeddings(): Promise<void> {\n const modelSpec = process.env.DOCS_MCP_EMBEDDING_MODEL || \"text-embedding-3-small\";\n\n // Import dynamically to avoid circular dependencies\n const { createEmbeddingModel } = await import(\"./embeddings/EmbeddingFactory\");\n this.embeddings = createEmbeddingModel(modelSpec);\n\n // Determine the model's actual dimension by embedding a test string\n const testVector = await this.embeddings.embedQuery(\"test\");\n this.modelDimension = testVector.length;\n\n if (this.modelDimension > this.dbDimension) {\n throw new DimensionError(modelSpec, this.modelDimension, this.dbDimension);\n }\n }\n\n /**\n * Escapes a query string for use with SQLite FTS5 MATCH operator.\n * Wraps the query in double quotes and escapes internal double quotes.\n */\n private escapeFtsQuery(query: string): string {\n // Escape internal double quotes by doubling them\n const escapedQuotes = query.replace(/\"/g, '\"\"');\n // Wrap the entire string in double quotes\n return `\"${escapedQuotes}\"`;\n }\n\n /**\n * Initializes database connection and ensures readiness\n */\n async initialize(): Promise<void> {\n try {\n // 1. Load extensions first (moved before migrations)\n sqliteVec.load(this.db);\n\n // 2. Apply migrations (after extensions are loaded)\n applyMigrations(this.db);\n\n // 3. Initialize prepared statements\n this.prepareStatements();\n\n // 4. Initialize embeddings client (await to catch errors)\n await this.initializeEmbeddings();\n } catch (error) {\n // Re-throw StoreError directly, wrap others in ConnectionError\n if (error instanceof StoreError) {\n throw error;\n }\n throw new ConnectionError(\"Failed to initialize database connection\", error);\n }\n }\n\n /**\n * Gracefully closes database connections\n */\n async shutdown(): Promise<void> {\n this.db.close();\n }\n\n /**\n * Retrieves all unique versions for a specific library\n */\n async queryUniqueVersions(library: string): Promise<string[]> {\n try {\n const rows = this.statements.queryVersions.all(library.toLowerCase()) as Array<\n Pick<DbDocument, \"version\">\n >;\n return rows.map((row) => row.version);\n } catch (error) {\n throw new ConnectionError(\"Failed to query versions\", error);\n }\n }\n\n /**\n * Verifies existence of documents for a specific library version\n */\n async checkDocumentExists(library: string, version: string): Promise<boolean> {\n try {\n const result = this.statements.checkExists.get(\n library.toLowerCase(),\n version.toLowerCase(),\n );\n return result !== undefined;\n } catch (error) {\n throw new ConnectionError(\"Failed to check document existence\", error);\n }\n }\n\n /**\n * Retrieves a mapping of all libraries to their available versions with details.\n */\n async queryLibraryVersions(): Promise<Map<string, LibraryVersionDetails[]>> {\n try {\n // Define the expected row structure from the GROUP BY query\n interface LibraryVersionRow {\n library: string;\n version: string;\n documentCount: number;\n uniqueUrlCount: number;\n indexedAt: string | null; // SQLite MIN might return string or null\n }\n\n const rows = this.statements.queryLibraryVersions.all() as LibraryVersionRow[];\n const libraryMap = new Map<string, LibraryVersionDetails[]>();\n\n for (const row of rows) {\n // Process all rows, including those where version is \"\" (unversioned)\n const library = row.library;\n if (!libraryMap.has(library)) {\n libraryMap.set(library, []);\n }\n\n // Format indexedAt to ISO string if available\n const indexedAtISO = row.indexedAt ? new Date(row.indexedAt).toISOString() : null;\n\n libraryMap.get(library)?.push({\n version: row.version,\n documentCount: row.documentCount,\n uniqueUrlCount: row.uniqueUrlCount,\n indexedAt: indexedAtISO,\n });\n }\n\n // Sort versions within each library: unversioned first, then semantically\n for (const versions of libraryMap.values()) {\n versions.sort((a, b) => {\n if (a.version === \"\" && b.version !== \"\") {\n return -1; // a (unversioned) comes first\n }\n if (a.version !== \"\" && b.version === \"\") {\n return 1; // b (unversioned) comes first\n }\n if (a.version === \"\" && b.version === \"\") {\n return 0; // Should not happen with GROUP BY, but handle anyway\n }\n // Both are non-empty, use semver compare\n return semver.compare(a.version, b.version);\n });\n }\n\n return libraryMap;\n } catch (error) {\n throw new ConnectionError(\"Failed to query library versions\", error);\n }\n }\n\n /**\n * Stores documents with library and version metadata, generating embeddings\n * for vector similarity search\n */\n async addDocuments(\n library: string,\n version: string,\n documents: Document[],\n ): Promise<void> {\n try {\n // Generate embeddings in batch\n const texts = documents.map((doc) => {\n const header = `<title>${doc.metadata.title}</title>\\n<url>${doc.metadata.url}</url>\\n<path>${doc.metadata.path.join(\" / \")}</path>\\n`;\n return `${header}${doc.pageContent}`;\n });\n const rawEmbeddings = await this.embeddings.embedDocuments(texts);\n const paddedEmbeddings = rawEmbeddings.map((vector) => this.padVector(vector));\n\n // Insert documents in a transaction\n const transaction = this.db.transaction((docs: typeof documents) => {\n for (let i = 0; i < docs.length; i++) {\n const doc = docs[i];\n const url = doc.metadata.url as string;\n if (!url || typeof url !== \"string\" || !url.trim()) {\n throw new StoreError(\"Document metadata must include a valid URL\");\n }\n\n // Insert into main documents table\n const result = this.statements.insertDocument.run(\n library.toLowerCase(),\n version.toLowerCase(),\n url,\n doc.pageContent,\n JSON.stringify(doc.metadata),\n i,\n new Date().toISOString(), // Pass current timestamp for indexed_at\n );\n const rowId = result.lastInsertRowid;\n\n // Insert into vector table\n this.statements.insertEmbedding.run(\n BigInt(rowId),\n library.toLowerCase(),\n version.toLowerCase(),\n JSON.stringify(paddedEmbeddings[i]),\n );\n }\n });\n\n transaction(documents);\n } catch (error) {\n throw new ConnectionError(\"Failed to add documents to store\", error);\n }\n }\n\n /**\n * Removes documents matching specified library and version\n * @returns Number of documents deleted\n */\n async deleteDocuments(library: string, version: string): Promise<number> {\n try {\n const result = this.statements.deleteDocuments.run(\n library.toLowerCase(),\n version.toLowerCase(),\n );\n return result.changes;\n } catch (error) {\n throw new ConnectionError(\"Failed to delete documents\", error);\n }\n }\n\n /**\n * Retrieves a document by its ID.\n * @param id The ID of the document.\n * @returns The document, or null if not found.\n */\n async getById(id: string): Promise<Document | null> {\n try {\n const row = this.statements.getById.get(id) as DbQueryResult<DbDocument>;\n if (!row) {\n return null;\n }\n\n return mapDbDocumentToDocument(row);\n } catch (error) {\n throw new ConnectionError(`Failed to get document by ID ${id}`, error);\n }\n }\n\n /**\n * Finds documents matching a text query using hybrid search.\n * Combines vector similarity search with full-text search using Reciprocal Rank Fusion.\n */\n async findByContent(\n library: string,\n version: string,\n query: string,\n limit: number,\n ): Promise<Document[]> {\n try {\n const rawEmbedding = await this.embeddings.embedQuery(query);\n const embedding = this.padVector(rawEmbedding);\n const ftsQuery = this.escapeFtsQuery(query); // Escape the query for FTS\n\n const stmt = this.db.prepare(`\n WITH vec_scores AS (\n SELECT\n rowid as id,\n distance as vec_score\n FROM documents_vec\n WHERE library = ?\n AND version = ?\n AND embedding MATCH ?\n ORDER BY vec_score\n LIMIT ?\n ),\n fts_scores AS (\n SELECT\n f.rowid as id,\n bm25(documents_fts, 10.0, 1.0, 5.0, 1.0) as fts_score\n FROM documents_fts f\n JOIN documents d ON f.rowid = d.rowid\n WHERE d.library = ?\n AND d.version = ?\n AND documents_fts MATCH ?\n ORDER BY fts_score\n LIMIT ?\n )\n SELECT\n d.id,\n d.content,\n d.metadata,\n COALESCE(1 / (1 + v.vec_score), 0) as vec_score,\n COALESCE(1 / (1 + f.fts_score), 0) as fts_score\n FROM documents d\n LEFT JOIN vec_scores v ON d.id = v.id\n LEFT JOIN fts_scores f ON d.id = f.id\n WHERE v.id IS NOT NULL OR f.id IS NOT NULL\n `);\n\n const rawResults = stmt.all(\n library.toLowerCase(),\n version.toLowerCase(),\n JSON.stringify(embedding),\n limit,\n library.toLowerCase(),\n version.toLowerCase(),\n ftsQuery, // Use the escaped query\n limit,\n ) as RawSearchResult[];\n\n // Apply RRF ranking\n const rankedResults = this.assignRanks(rawResults);\n\n // Sort by RRF score and take top results\n const topResults = rankedResults\n .sort((a, b) => b.rrf_score - a.rrf_score)\n .slice(0, limit);\n\n return topResults.map((row) => ({\n ...mapDbDocumentToDocument(row),\n metadata: {\n ...JSON.parse(row.metadata),\n score: row.rrf_score,\n vec_rank: row.vec_rank,\n fts_rank: row.fts_rank,\n },\n }));\n } catch (error) {\n throw new ConnectionError(\n `Failed to find documents by content with query \"${query}\"`,\n error,\n );\n }\n }\n\n /**\n * Finds child chunks of a given document based on path hierarchy.\n */\n async findChildChunks(\n library: string,\n version: string,\n id: string,\n limit: number,\n ): Promise<Document[]> {\n try {\n const parent = await this.getById(id);\n if (!parent) {\n return [];\n }\n\n const parentPath = (parent.metadata as DocumentMetadata).path ?? [];\n const parentUrl = (parent.metadata as DocumentMetadata).url;\n\n const result = this.statements.getChildChunks.all(\n library.toLowerCase(),\n version.toLowerCase(),\n parentUrl,\n parentPath.length + 1,\n JSON.stringify(parentPath),\n id,\n limit,\n ) as Array<DbDocument>;\n\n return result.map((row) => mapDbDocumentToDocument(row));\n } catch (error) {\n throw new ConnectionError(`Failed to find child chunks for ID ${id}`, error);\n }\n }\n\n /**\n * Finds preceding sibling chunks of a given document.\n */\n async findPrecedingSiblingChunks(\n library: string,\n version: string,\n id: string,\n limit: number,\n ): Promise<Document[]> {\n try {\n const reference = await this.getById(id);\n if (!reference) {\n return [];\n }\n\n const refMetadata = reference.metadata as DocumentMetadata;\n\n const result = this.statements.getPrecedingSiblings.all(\n library.toLowerCase(),\n version.toLowerCase(),\n refMetadata.url,\n id,\n JSON.stringify(refMetadata.path),\n limit,\n ) as Array<DbDocument>;\n\n return result.reverse().map((row) => mapDbDocumentToDocument(row));\n } catch (error) {\n throw new ConnectionError(\n `Failed to find preceding sibling chunks for ID ${id}`,\n error,\n );\n }\n }\n\n /**\n * Finds subsequent sibling chunks of a given document.\n */\n async findSubsequentSiblingChunks(\n library: string,\n version: string,\n id: string,\n limit: number,\n ): Promise<Document[]> {\n try {\n const reference = await this.getById(id);\n if (!reference) {\n return [];\n }\n\n const refMetadata = reference.metadata;\n\n const result = this.statements.getSubsequentSiblings.all(\n library.toLowerCase(),\n version.toLowerCase(),\n refMetadata.url,\n id,\n JSON.stringify(refMetadata.path),\n limit,\n ) as Array<DbDocument>;\n\n return result.map((row) => mapDbDocumentToDocument(row));\n } catch (error) {\n throw new ConnectionError(\n `Failed to find subsequent sibling chunks for ID ${id}`,\n error,\n );\n }\n }\n\n /**\n * Finds the parent chunk of a given document.\n */\n async findParentChunk(\n library: string,\n version: string,\n id: string,\n ): Promise<Document | null> {\n try {\n const child = await this.getById(id);\n if (!child) {\n return null;\n }\n\n const childMetadata = child.metadata as DocumentMetadata;\n const path = childMetadata.path ?? [];\n const parentPath = path.slice(0, -1);\n\n if (parentPath.length === 0) {\n return null;\n }\n\n const result = this.statements.getParentChunk.get(\n library.toLowerCase(),\n version.toLowerCase(),\n childMetadata.url,\n JSON.stringify(parentPath),\n id,\n ) as DbQueryResult<DbDocument>;\n\n if (!result) {\n return null;\n }\n\n return mapDbDocumentToDocument(result);\n } catch (error) {\n throw new ConnectionError(`Failed to find parent chunk for ID ${id}`, error);\n }\n }\n\n /**\n * Fetches multiple documents by their IDs in a single call.\n * Returns an array of Document objects, sorted by their sort_order.\n */\n async findChunksByIds(\n library: string,\n version: string,\n ids: string[],\n ): Promise<Document[]> {\n if (!ids.length) return [];\n try {\n // Use parameterized query for variable number of IDs\n const placeholders = ids.map(() => \"?\").join(\",\");\n const stmt = this.db.prepare(\n `SELECT * FROM documents WHERE library = ? AND version = ? AND id IN (${placeholders}) ORDER BY sort_order`,\n );\n const rows = stmt.all(\n library.toLowerCase(),\n version.toLowerCase(),\n ...ids,\n ) as DbDocument[];\n return rows.map((row) => mapDbDocumentToDocument(row));\n } catch (error) {\n throw new ConnectionError(\"Failed to fetch documents by IDs\", error);\n }\n }\n}\n","import fs from \"node:fs\";\nimport path from \"node:path\";\nimport type { Document } from \"@langchain/core/documents\";\nimport envPaths from \"env-paths\";\nimport Fuse from \"fuse.js\";\nimport semver from \"semver\";\nimport { GreedySplitter, SemanticMarkdownSplitter } from \"../splitter\";\nimport type { ContentChunk, DocumentSplitter } from \"../splitter/types\";\nimport { LibraryNotFoundError, VersionNotFoundError } from \"../tools\";\nimport {\n SPLITTER_MAX_CHUNK_SIZE,\n SPLITTER_MIN_CHUNK_SIZE,\n SPLITTER_PREFERRED_CHUNK_SIZE,\n} from \"../utils/config\";\nimport { logger } from \"../utils/logger\";\nimport { getProjectRoot } from \"../utils/paths\";\nimport { DocumentRetrieverService } from \"./DocumentRetrieverService\";\nimport { DocumentStore } from \"./DocumentStore\";\nimport { StoreError } from \"./errors\";\nimport type {\n FindVersionResult,\n LibraryVersion,\n LibraryVersionDetails,\n StoreSearchResult,\n} from \"./types\";\n\n/**\n * Provides semantic search capabilities across different versions of library documentation.\n */\nexport class DocumentManagementService {\n private readonly store: DocumentStore;\n private readonly documentRetriever: DocumentRetrieverService;\n private readonly splitter: DocumentSplitter;\n\n /**\n * Normalizes a version string, converting null or undefined to an empty string\n * and converting to lowercase.\n */\n private normalizeVersion(version?: string | null): string {\n return (version ?? \"\").toLowerCase();\n }\n\n constructor() {\n let dbPath: string;\n let dbDir: string;\n\n // 1. Check Environment Variable\n const envStorePath = process.env.DOCS_MCP_STORE_PATH;\n if (envStorePath) {\n dbDir = envStorePath;\n dbPath = path.join(dbDir, \"documents.db\");\n logger.debug(`💾 Using database directory from DOCS_MCP_STORE_PATH: ${dbDir}`);\n } else {\n // 2. Check Old Local Path\n const projectRoot = getProjectRoot();\n const oldDbDir = path.join(projectRoot, \".store\");\n const oldDbPath = path.join(oldDbDir, \"documents.db\");\n const oldDbExists = fs.existsSync(oldDbPath); // Check file existence specifically\n\n if (oldDbExists) {\n dbPath = oldDbPath;\n dbDir = oldDbDir;\n logger.debug(`💾 Using legacy database path: ${dbPath}`);\n } else {\n // 3. Use Standard Path\n const standardPaths = envPaths(\"docs-mcp-server\", { suffix: \"\" });\n dbDir = standardPaths.data;\n dbPath = path.join(dbDir, \"documents.db\");\n logger.debug(`💾 Using standard database directory: ${dbDir}`);\n }\n }\n\n // Ensure the chosen directory exists\n try {\n fs.mkdirSync(dbDir, { recursive: true });\n } catch (error) {\n // Log potential error during directory creation but proceed\n // The DocumentStore constructor might handle DB file creation errors\n logger.error(`⚠️ Failed to create database directory ${dbDir}: ${error}`);\n }\n\n this.store = new DocumentStore(dbPath);\n this.documentRetriever = new DocumentRetrieverService(this.store);\n\n const semanticSplitter = new SemanticMarkdownSplitter(\n SPLITTER_PREFERRED_CHUNK_SIZE,\n SPLITTER_MAX_CHUNK_SIZE,\n );\n const greedySplitter = new GreedySplitter(\n semanticSplitter,\n SPLITTER_MIN_CHUNK_SIZE,\n SPLITTER_PREFERRED_CHUNK_SIZE,\n );\n\n this.splitter = greedySplitter;\n }\n\n /**\n * Initializes the underlying document store.\n */\n async initialize(): Promise<void> {\n await this.store.initialize();\n }\n\n /**\n * Shuts down the underlying document store.\n */\n\n async shutdown(): Promise<void> {\n logger.info(\"🔌 Shutting down store manager\");\n await this.store.shutdown();\n }\n\n /**\n * Validates if a library exists in the store (either versioned or unversioned).\n * Throws LibraryNotFoundError with suggestions if the library is not found.\n * @param library The name of the library to validate.\n * @throws {LibraryNotFoundError} If the library does not exist.\n */\n async validateLibraryExists(library: string): Promise<void> {\n logger.info(`🔎 Validating existence of library: ${library}`);\n const normalizedLibrary = library.toLowerCase(); // Ensure consistent casing\n\n // Check for both versioned and unversioned documents\n const versions = await this.listVersions(normalizedLibrary);\n const hasUnversioned = await this.exists(normalizedLibrary, \"\"); // Check explicitly for unversioned\n\n if (versions.length === 0 && !hasUnversioned) {\n logger.warn(`⚠️ Library '${library}' not found.`);\n\n // Library doesn't exist, fetch all libraries to provide suggestions\n const allLibraries = await this.listLibraries();\n const libraryNames = allLibraries.map((lib) => lib.library);\n\n let suggestions: string[] = [];\n if (libraryNames.length > 0) {\n const fuse = new Fuse(libraryNames, {\n // Configure fuse.js options if needed (e.g., threshold)\n // isCaseSensitive: false, // Handled by normalizing library names\n // includeScore: true,\n threshold: 0.4, // Adjust threshold for desired fuzziness (0=exact, 1=match anything)\n });\n const results = fuse.search(normalizedLibrary);\n // Take top 3 suggestions\n suggestions = results.slice(0, 3).map((result) => result.item);\n logger.info(`🔍 Found suggestions: ${suggestions.join(\", \")}`);\n }\n\n throw new LibraryNotFoundError(library, suggestions);\n }\n\n logger.info(`✅ Library '${library}' confirmed to exist.`);\n }\n\n /**\n * Returns a list of all available semantic versions for a library.\n */\n async listVersions(library: string): Promise<LibraryVersion[]> {\n const versions = await this.store.queryUniqueVersions(library);\n return versions.filter((v) => semver.valid(v)).map((version) => ({ version }));\n }\n\n /**\n * Checks if documents exist for a given library and optional version.\n * If version is omitted, checks for documents without a specific version.\n */\n async exists(library: string, version?: string | null): Promise<boolean> {\n const normalizedVersion = this.normalizeVersion(version);\n return this.store.checkDocumentExists(library, normalizedVersion);\n }\n\n /**\n * Finds the most appropriate version of documentation based on the requested version.\n * When no target version is specified, returns the latest version.\n *\n * Version matching behavior:\n * - Exact versions (e.g., \"18.0.0\"): Matches that version or any earlier version\n * - X-Range patterns (e.g., \"5.x\", \"5.2.x\"): Matches within the specified range\n * - \"latest\" or no version: Returns the latest available version\n *\n * For documentation, we prefer matching older versions over no match at all,\n * since older docs are often still relevant and useful.\n * Also checks if unversioned documents exist for the library.\n */\n async findBestVersion(\n library: string,\n targetVersion?: string,\n ): Promise<FindVersionResult> {\n logger.info(\n `🔍 Finding best version for ${library}${targetVersion ? `@${targetVersion}` : \"\"}`,\n );\n\n // Check if unversioned documents exist *before* filtering for valid semver\n const hasUnversioned = await this.store.checkDocumentExists(library, \"\");\n const validSemverVersions = await this.listVersions(library);\n\n if (validSemverVersions.length === 0) {\n if (hasUnversioned) {\n logger.info(`ℹ️ Unversioned documents exist for ${library}`);\n return { bestMatch: null, hasUnversioned: true };\n }\n // Throw error only if NO versions (semver or unversioned) exist\n logger.warn(`⚠️ No valid versions found for ${library}`);\n // Fetch detailed versions to pass to the error constructor\n const allLibraryDetails = await this.store.queryLibraryVersions();\n const libraryDetails = allLibraryDetails.get(library) ?? [];\n throw new VersionNotFoundError(library, targetVersion ?? \"\", libraryDetails);\n }\n\n const versionStrings = validSemverVersions.map((v) => v.version);\n let bestMatch: string | null = null;\n\n if (!targetVersion || targetVersion === \"latest\") {\n bestMatch = semver.maxSatisfying(versionStrings, \"*\");\n } else {\n const versionRegex = /^(\\d+)(?:\\.(?:x(?:\\.x)?|\\d+(?:\\.(?:x|\\d+))?))?$|^$/;\n if (!versionRegex.test(targetVersion)) {\n logger.warn(`⚠️ Invalid target version format: ${targetVersion}`);\n // Don't throw yet, maybe unversioned exists\n } else {\n // Restore the previous logic with fallback\n let range = targetVersion;\n if (!semver.validRange(targetVersion)) {\n // If it's not a valid range (like '1.2' or '1'), treat it like a tilde range\n range = `~${targetVersion}`;\n } else if (semver.valid(targetVersion)) {\n // If it's an exact version, allow matching it OR any older version\n range = `${range} || <=${targetVersion}`;\n }\n // If it was already a valid range (like '1.x'), use it directly\n bestMatch = semver.maxSatisfying(versionStrings, range);\n }\n }\n\n if (bestMatch) {\n logger.info(\n `✅ Found best match version ${bestMatch} for ${library}@${targetVersion}`,\n );\n } else {\n logger.warn(`⚠️ No matching semver version found for ${library}@${targetVersion}`);\n }\n\n // If no semver match found, but unversioned exists, return that info.\n // If a semver match was found, return it along with unversioned status.\n // If no semver match AND no unversioned, throw error.\n if (!bestMatch && !hasUnversioned) {\n // Fetch detailed versions to pass to the error constructor\n const allLibraryDetails = await this.store.queryLibraryVersions();\n const libraryDetails = allLibraryDetails.get(library) ?? [];\n throw new VersionNotFoundError(library, targetVersion ?? \"\", libraryDetails);\n }\n\n return { bestMatch, hasUnversioned };\n }\n\n /**\n * Removes all documents for a specific library and optional version.\n * If version is omitted, removes documents without a specific version.\n */\n async removeAllDocuments(library: string, version?: string | null): Promise<void> {\n const normalizedVersion = this.normalizeVersion(version);\n logger.info(\n `🗑️ Removing all documents from ${library}@${normalizedVersion || \"[no version]\"} store`,\n );\n const count = await this.store.deleteDocuments(library, normalizedVersion);\n logger.info(`📊 Deleted ${count} documents`);\n }\n\n /**\n * Adds a document to the store, splitting it into smaller chunks for better search results.\n * Uses SemanticMarkdownSplitter to maintain markdown structure and content types during splitting.\n * Preserves hierarchical structure of documents and distinguishes between text and code segments.\n * If version is omitted, the document is added without a specific version.\n */\n async addDocument(\n library: string,\n version: string | null | undefined,\n document: Document,\n ): Promise<void> {\n const normalizedVersion = this.normalizeVersion(version);\n const url = document.metadata.url as string;\n if (!url || typeof url !== \"string\" || !url.trim()) {\n throw new StoreError(\"Document metadata must include a valid URL\");\n }\n\n logger.info(`📚 Adding document: ${document.metadata.title}`);\n\n if (!document.pageContent.trim()) {\n throw new Error(\"Document content cannot be empty\");\n }\n\n // Split document into semantic chunks\n const chunks = await this.splitter.splitText(document.pageContent);\n\n // Convert semantic chunks to documents\n const splitDocs = chunks.map((chunk: ContentChunk) => ({\n pageContent: chunk.content,\n metadata: {\n ...document.metadata,\n level: chunk.section.level,\n path: chunk.section.path,\n },\n }));\n logger.info(`📄 Split document into ${splitDocs.length} chunks`);\n\n // Add split documents to store\n await this.store.addDocuments(library, normalizedVersion, splitDocs);\n }\n\n /**\n * Searches for documentation content across versions.\n * Uses hybrid search (vector + FTS).\n * If version is omitted, searches documents without a specific version.\n */\n async searchStore(\n library: string,\n version: string | null | undefined,\n query: string,\n limit = 5,\n ): Promise<StoreSearchResult[]> {\n const normalizedVersion = this.normalizeVersion(version);\n return this.documentRetriever.search(library, normalizedVersion, query, limit);\n }\n\n async listLibraries(): Promise<\n Array<{ library: string; versions: LibraryVersionDetails[] }>\n > {\n // queryLibraryVersions now returns the detailed map directly\n const libraryMap = await this.store.queryLibraryVersions();\n\n // Transform the map into the desired array structure\n return Array.from(libraryMap.entries()).map(([library, versions]) => ({\n library,\n versions, // The versions array already contains LibraryVersionDetails\n }));\n }\n}\n"],"names":["LogLevel","DEFAULT_MAX_PAGES","DEFAULT_MAX_DEPTH","ScrapeMode","DEFAULT_CONCURRENCY","URL","item","path","PipelineJobStatus","uuidv4","chunks","semver","fs","projectRoot"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;AAGY,IAAA,6BAAAA,cAAL;AACLA,YAAAA,UAAA,WAAQ,CAAR,IAAA;AACAA,YAAAA,UAAA,UAAO,CAAP,IAAA;AACAA,YAAAA,UAAA,UAAO,CAAP,IAAA;AACAA,YAAAA,UAAA,WAAQ,CAAR,IAAA;AAJUA,SAAAA;AAAA,GAAA,YAAA,CAAA,CAAA;AAOZ,IAAI,kBAA4B;AAMzB,SAAS,YAAY,OAAuB;AAC/B,oBAAA;AACpB;AAKO,MAAM,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA,EAKpB,OAAO,CAAC,YAAoB;AAC1B,QAAI,mBAAmB,GAAgB;AACrC,cAAQ,MAAM,OAAO;AAAA,IAAA;AAAA,EAEzB;AAAA;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,CAAC,YAAoB;AACzB,QAAI,mBAAmB,GAAe;AACpC,cAAQ,IAAI,OAAO;AAAA,IAAA;AAAA,EAEvB;AAAA;AAAA;AAAA;AAAA;AAAA,EAKA,MAAM,CAAC,YAAoB;AACzB,QAAI,mBAAmB,GAAe;AACpC,cAAQ,KAAK,OAAO;AAAA,IAAA;AAAA,EAExB;AAAA;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,CAAC,YAAoB;AAC1B,QAAI,mBAAmB,GAAgB;AACrC,cAAQ,MAAM,OAAO;AAAA,IAAA;AAAA,EACvB;AAEJ;AC5DA,MAAM,qBAAqB,MAAM;AAAA,EAC/B,YACE,SACgB,cAAuB,OACvB,OAChB;AACA,UAAM,OAAO;AAHG,SAAA,cAAA;AACA,SAAA,QAAA;AAGX,SAAA,OAAO,KAAK,YAAY;AAC7B,QAAI,OAAO,OAAO;AACX,WAAA,QAAQ,GAAG,KAAK,KAAK;AAAA,aAAgB,MAAM,KAAK;AAAA,IAAA;AAAA,EACvD;AAEJ;AAqBA,MAAM,wBAAwB,aAAa;AAAA,EACzC,YAAY,KAAa,OAAe;AACtC,UAAM,gBAAgB,GAAG,IAAI,OAAO,KAAK;AAAA,EAAA;AAE7C;AAQA,MAAM,sBAAsB,aAAa;AAAA,EACvC,YACkB,aACA,aACA,YAChB;AACA;AAAA,MACE,0BAA0B,WAAW,OAAO,WAAW,aAAa,UAAU;AAAA,MAC9E;AAAA,IACF;AAPgB,SAAA,cAAA;AACA,SAAA,cAAA;AACA,SAAA,aAAA;AAAA,EAAA;AAOpB;AC7CA,MAAM,2BAAiD;AAAA,EACrD,YAAY;AAAA,EACZ,YAAY;AAAA,EACZ,qBAAqB;AAAA,EACrB,aAAa;AAAA,EACb,aAAa;AACf;AAEgB,SAAA,aACd,KACA,UAAgC,0BACxB;AACJ,MAAA;AACI,UAAA,YAAY,IAAI,IAAI,GAAG;AAC7B,UAAM,eAAe,EAAE,GAAG,0BAA0B,GAAG,QAAQ;AAG/D,UAAM,aAAa,IAAI,IAAI,UAAU,SAAS,UAAU,QAAQ;AAGhE,QAAI,aAAa,aAAa;AACjB,iBAAA,WAAW,WAAW,SAAS;AAAA,QACxC;AAAA,QACA;AAAA,MACF;AAAA,IAAA;AAIF,QAAI,aAAa,uBAAuB,WAAW,SAAS,SAAS,GAAG;AACtE,iBAAW,WAAW,WAAW,SAAS,QAAQ,QAAQ,EAAE;AAAA,IAAA;AAI9D,UAAM,gBAAgB,CAAC,aAAa,aAAa,UAAU,OAAO;AAClE,UAAM,kBAAkB,CAAC,aAAa,cAAc,UAAU,SAAS;AAGnE,QAAA,SAAS,WAAW,SAAS,WAAW;AAC5C,QAAI,iBAAiB;AACT,gBAAA;AAAA,IAAA;AAEZ,QAAI,eAAe;AACP,gBAAA;AAAA,IAAA;AAIZ,QAAI,aAAa,YAAY;AAC3B,eAAS,OAAO,YAAY;AAAA,IAAA;AAGvB,WAAA;AAAA,EAAA,QACD;AACC,WAAA;AAAA,EAAA;AAEX;AAMO,SAAS,YAAY,KAAmB;AACzC,MAAA;AACF,QAAI,IAAI,GAAG;AAAA,WACJ,OAAO;AACd,UAAM,IAAI,gBAAgB,KAAK,iBAAiB,QAAQ,QAAQ,MAAS;AAAA,EAAA;AAE7E;AAKgB,SAAA,gBAAgB,MAAW,MAAoB;AAC7D,SAAO,KAAK,SAAS,YAAkB,MAAA,KAAK,SAAS,YAAY;AACnE;AAMgB,SAAA,cAAc,MAAW,MAAoB;AAC3D,QAAM,UAAU,IAAI,IAAI,KAAK,SAAS,aAAa;AACnD,QAAM,UAAU,IAAI,IAAI,KAAK,SAAS,aAAa;AAC5C,SAAA,YAAY,QAAQ,YAAY;AACzC;AAQgB,SAAA,UAAU,SAAc,WAAyB;AAEzD,QAAA,WAAW,QAAQ,SAAS,SAAS,GAAG,IAC1C,QAAQ,WACR,GAAG,QAAQ,QAAQ;AAEhB,SAAA,UAAU,SAAS,WAAW,QAAQ;AAC/C;ACxGO,MAAMC,sBAAoB;AAG1B,MAAMC,sBAAoB;AAG1B,MAAM,0BAA0B;AAGhC,MAAM,mBAAmB;AAGzB,MAAM,oBAAoB;AAG1B,MAAM,mBAAmB;AAKzB,MAAM,sBAAsB;AAK5B,MAAM,qBAAqB;AAK3B,MAAM,0BAA0B;AAChC,MAAM,gCAAgC;AACtC,MAAM,0BAA0B;AC/BhC,MAAM,qBAAqB;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMR,YAAY,SAA2C;AAErD,UAAM,iBAAkD;AAAA,MACtD,UAAU,CAAC,EAAE,MAAM,UAAU,YAAY,IAAO,GAAA,WAAW,QAAQ;AAAA,MACnE,SAAS,CAAC,WAAW,QAAQ;AAAA,MAC7B,kBAAkB,CAAC,WAAW,SAAS,SAAS,WAAW,KAAK;AAAA,MAChE,SAAS,CAAC,SAAS,IAAI;AAAA,MACvB,aAAa;AAAA,IACf;AAEK,SAAA,kBAAkB,IAAI,gBAAgB;AAAA,MACzC,GAAG;AAAA,MACH,GAAG;AAAA,IAAA,CACJ;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOH,kBAA0C;AACjC,WAAA,KAAK,gBAAgB,WAAW;AAAA,EAAA;AAE3C;AC1BO,MAAM,YAAsC;AAAA,EAChC,uBAAuB;AAAA,IACtC;AAAA;AAAA,IACA;AAAA;AAAA,IACA;AAAA;AAAA,IACA;AAAA;AAAA,IACA;AAAA;AAAA,IACA;AAAA;AAAA,IACA;AAAA;AAAA,EACF;AAAA,EAEQ;AAAA,EAER,cAAc;AACP,SAAA,uBAAuB,IAAI,qBAAqB;AAAA,EAAA;AAAA,EAGvD,SAAS,QAAyB;AAChC,WAAO,OAAO,WAAW,SAAS,KAAK,OAAO,WAAW,UAAU;AAAA,EAAA;AAAA,EAGrE,MAAc,MAAM,IAA2B;AAC7C,WAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AAAA,EAAA;AAAA,EAGzD,MAAM,MAAM,QAAgB,SAA6C;AACjE,UAAA,aAAa,SAAS,cAAc;AACpC,UAAA,YAAY,SAAS,cAAc;AAEnC,UAAA,kBAAkB,SAAS,mBAAmB;AAEpD,aAAS,UAAU,GAAG,WAAW,YAAY,WAAW;AAClD,UAAA;AACI,cAAA,cAAc,KAAK,qBAAqB,gBAAgB;AAC9D,cAAM,UAAU;AAAA,UACd,GAAG;AAAA,UACH,GAAG,SAAS;AAAA;AAAA,QACd;AAEA,cAAM,SAA6B;AAAA,UACjC,cAAc;AAAA;AAAA,UACd;AAAA,UACA,SAAS,SAAS;AAAA,UAClB,QAAQ,SAAS;AAAA;AAAA;AAAA,UAEjB,cAAc,kBAAkB,IAAI;AAAA,QACtC;AAEA,cAAM,WAAW,MAAM,MAAM,IAAI,QAAQ,MAAM;AAExC,eAAA;AAAA,UACL,SAAS,SAAS;AAAA,UAClB,UAAU,SAAS,QAAQ,cAAc,KAAK;AAAA,UAC9C;AAAA,UACA,UAAU,SAAS,QAAQ,kBAAkB;AAAA,QAC/C;AAAA,eACO,OAAgB;AACvB,cAAM,aAAa;AACb,cAAA,SAAS,WAAW,UAAU;AACpC,cAAM,OAAO,WAAW;AAGxB,YAAI,CAAC,mBAAmB,UAAU,UAAU,OAAO,SAAS,KAAK;AACzD,gBAAA,WAAW,WAAW,UAAU,SAAS;AAC/C,cAAI,UAAU;AACZ,kBAAM,IAAI,cAAc,QAAQ,UAAU,MAAM;AAAA,UAAA;AAAA,QAClD;AAIA,YAAA,UAAU,eACT,WAAW,UAAa,KAAK,qBAAqB,SAAS,MAAM,IAClE;AACM,gBAAA,QAAQ,YAAY,KAAK;AACxB,iBAAA;AAAA,YACL,WAAW,UAAU,CAAC,IACpB,aAAa,CACf,eAAe,MAAM,aAAa,MAAM,WAAW,IAAI,kBAAkB,KAAK;AAAA,UAChF;AACM,gBAAA,KAAK,MAAM,KAAK;AACtB;AAAA,QAAA;AAIF,cAAM,IAAI;AAAA,UACR,mBAAmB,MAAM,UACvB,UAAU,CACZ,cAAc,WAAW,WAAW,eAAe;AAAA,UACnD;AAAA,UACA,iBAAiB,QAAQ,QAAQ;AAAA,QACnC;AAAA,MAAA;AAAA,IACF;AAEF,UAAM,IAAI;AAAA,MACR,mBAAmB,MAAM,UAAU,aAAa,CAAC;AAAA,MACjD;AAAA,IACF;AAAA,EAAA;AAEJ;ACnGO,MAAM,YAAsC;AAAA,EACjD,SAAS,QAAyB;AACzB,WAAA,OAAO,WAAW,SAAS;AAAA,EAAA;AAAA,EAGpC,MAAM,MAAM,QAAgB,SAA6C;AACvE,UAAM,WAAW,OAAO,QAAQ,cAAc,EAAE;AACzC,WAAA,KAAK,kBAAkB,QAAQ,EAAE;AAEpC,QAAA;AACF,YAAM,UAAU,MAAM,GAAG,SAAS,QAAQ;AAC1C,YAAM,MAAM,KAAK,QAAQ,QAAQ,EAAE,YAAY;AACzC,YAAA,WAAW,KAAK,YAAY,GAAG;AAE9B,aAAA;AAAA,QACL;AAAA,QACA;AAAA,QACA;AAAA,QACA,UAAU;AAAA;AAAA,MACZ;AAAA,aACO,OAAgB;AACvB,YAAM,IAAI;AAAA,QACR,uBAAuB,QAAQ,KAC5B,MAA+B,WAAW,eAC7C;AAAA,QACA;AAAA,QACA,iBAAiB,QAAQ,QAAQ;AAAA,MACnC;AAAA,IAAA;AAAA,EACF;AAAA,EAGM,YAAY,KAAqB;AACvC,YAAQ,KAAK;AAAA,MACX,KAAK;AAAA,MACL,KAAK;AACI,eAAA;AAAA,MACT,KAAK;AACI,eAAA;AAAA,MACT,KAAK;AACI,eAAA;AAAA,MACT;AACS,eAAA;AAAA,IAAA;AAAA,EACX;AAEJ;AC/CO,MAAM,0BAA0B;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA,EAMjB,YAAY,YAA0C;AACpD,SAAK,aAAa;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQpB,MAAM,IAAI,gBAA6E;AACrF,QAAI,QAAQ;AAEN,UAAA,WAAW,OAAO,MAA6B;AACnD,UAAI,KAAK,OAAO;AAER,cAAA,IAAI,MAAM,8BAA8B;AAAA,MAAA;AAExC,cAAA;AAEF,YAAA,KAA6C,KAAK,WAAW,CAAC;AACpE,UAAI,CAAC,IAAI;AAEP;AAAA,MAAA;AAIF,YAAM,OAAO,SAAS,KAAK,MAAM,IAAI,CAAC;AAElC,UAAA;AACI,cAAA,GAAG,QAAQ,gBAAgB,IAAI;AAAA,eAC9B,OAAO;AAEd,uBAAe,OAAO;AAAA,UACpB,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AAAA,QAC1D;AAEO,eAAA,KAAK,iCAAiC,KAAK,EAAE;AAAA,MAAA;AAAA,IAKxD;AAGA,UAAM,SAAS,CAAC;AAGT,WAAA;AAAA,EAAA;AAEX;ACpDO,MAAM,4BAAkE;AAAA,EAC7E,MAAM,QACJ,SACA,MACe;AAEf,QAAI,CAAC,QAAQ,YAAY,WAAW,WAAW,GAAG;AAChD,YAAM,KAAK;AACX;AAAA,IAAA;AAIF,UAAM,aACJ,OAAO,QAAQ,YAAY,WACvB,QAAQ,UACR,OAAO,KAAK,QAAQ,OAAO,EAAE,SAAS,OAAO;AAE/C,QAAA;AACF,aAAO,MAAM,0CAA0C,QAAQ,MAAM,EAAE;AAEjE,YAAA,IAAI,QAAQ,KAAK,UAAU;AAGjC,cAAQ,MAAM;AAGd,YAAM,KAAK;AAAA,aACJ,OAAO;AACd,aAAO,MAAM,yCAAyC,QAAQ,MAAM,KAAK,KAAK,EAAE;AAChF,cAAQ,OAAO;AAAA,QACb,iBAAiB,QACb,QACA,IAAI,MAAM,gCAAgC,OAAO,KAAK,CAAC,EAAE;AAAA,MAC/D;AAEA;AAAA,IAAA;AAAA,EACF;AAEJ;ACrCgB,SAAA,YAAY,MAAc,SAAqC;AACvE,QAAA,iBAAiB,IAAI,eAAe;AAE3B,iBAAA,GAAG,SAAS,MAAM;AAAA,EAAA,CAAE;AACpB,iBAAA,GAAG,QAAQ,MAAM;AAAA,EAAA,CAAE;AACnB,iBAAA,GAAG,QAAQ,MAAM;AAAA,EAAA,CAAE;AACnB,iBAAA,GAAG,SAAS,MAAM;AAAA,EAAA,CAAE;AACpB,iBAAA,GAAG,OAAO,MAAM;AAAA,EAAA,CAAE;AAEjC,QAAM,iBAAqC;AAAA,IACzC;AAAA,EACF;AAGA,QAAM,eAAmC,EAAE,GAAG,gBAAgB,GAAG,QAAQ;AAElE,SAAA,IAAI,MAAM,MAAM,YAAY;AACrC;ACpBO,MAAM,4BAAkE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM7E,MAAM,QACJ,SACA,MACe;AAEf,UAAM,IAAI,QAAQ;AAClB,QAAI,CAAC,GAAG;AAEN,UAAI,QAAQ,YAAY,WAAW,WAAW,GAAG;AACxC,eAAA;AAAA,UACL,YAAY,KAAK,YAAY,IAAI;AAAA,QACnC;AAAA,MAAA;AAGF,YAAM,KAAK;AACX;AAAA,IAAA;AAIE,QAAA;AACI,YAAA,eAAe,EAAE,SAAS;AAChC,aAAO,MAAM,SAAS,aAAa,MAAM,uBAAuB,QAAQ,MAAM,EAAE;AAEhF,YAAM,iBAA2B,CAAC;AACrB,mBAAA,KAAK,CAAC,OAAO,YAAY;AACpC,cAAM,OAAO,EAAE,OAAO,EAAE,KAAK,MAAM;AACnC,YAAI,QAAQ,KAAK,KAAK,MAAM,IAAI;AAC1B,cAAA;AACF,kBAAM,SAAS,IAAI,IAAI,MAAM,QAAQ,MAAM;AAEvC,gBAAA,CAAC,CAAC,SAAS,UAAU,OAAO,EAAE,SAAS,OAAO,QAAQ,GAAG;AACpD,qBAAA,MAAM,wCAAwC,IAAI,EAAE;AAC3D;AAAA,YAAA;AAEa,2BAAA,KAAK,OAAO,IAAI;AAAA,mBACxB,GAAG;AAEH,mBAAA,MAAM,gCAAgC,IAAI,EAAE;AAAA,UAAA;AAAA,QACrD;AAAA,MACF,CACD;AAGD,cAAQ,QAAQ,CAAC,GAAG,IAAI,IAAI,cAAc,CAAC;AACpC,aAAA;AAAA,QACL,aAAa,QAAQ,MAAM,MAAM,6BAA6B,QAAQ,MAAM;AAAA,MAC9E;AAAA,aACO,OAAO;AACd,aAAO,MAAM,+BAA+B,QAAQ,MAAM,KAAK,KAAK,EAAE;AACtE,cAAQ,OAAO;AAAA,QACb,IAAI;AAAA,UACF,sCAAsC,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,QAAA;AAAA,MAEhG;AAAA,IAAA;AAKF,UAAM,KAAK;AAAA,EAAA;AAIf;ACpEO,MAAM,gCAAsE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMjF,MAAM,QACJ,SACA,MACe;AAEf,UAAM,IAAI,QAAQ;AAClB,QAAI,CAAC,GAAG;AAEN,UAAI,QAAQ,YAAY,WAAW,WAAW,GAAG;AACxC,eAAA;AAAA,UACL,YAAY,KAAK,YAAY,IAAI;AAAA,QACnC;AAAA,MAAA;AAGF,YAAM,KAAK;AACX;AAAA,IAAA;AAIE,QAAA;AAEE,UAAA,QAAQ,EAAE,OAAO,EAAE,QAAQ,OAAO,KAAK;AAE3C,UAAI,CAAC,OAAO;AAEV,gBAAQ,EAAE,IAAI,EAAE,QAAQ,OAAO,KAAK;AAAA,MAAA;AAItC,cAAQ,SAAS;AAGjB,cAAQ,MAAM,QAAQ,QAAQ,GAAG,EAAE,KAAK;AAExC,cAAQ,SAAS,QAAQ;AACzB,aAAO,MAAM,qBAAqB,KAAK,UAAU,QAAQ,MAAM,EAAE;AAAA,aAC1D,OAAO;AACd,aAAO,MAAM,kCAAkC,QAAQ,MAAM,KAAK,KAAK,EAAE;AACzE,cAAQ,OAAO;AAAA,QACb,IAAI;AAAA,UACF,yCAAyC,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,QAAA;AAAA,MAEnG;AAAA,IAAA;AAKF,UAAM,KAAK;AAAA,EAAA;AAIf;AC5DY,IAAA,+BAAAC,gBAAL;AACLA,cAAA,OAAQ,IAAA;AACRA,cAAA,YAAa,IAAA;AACbA,cAAA,MAAO,IAAA;AAHGA,SAAAA;AAAA,GAAA,cAAA,CAAA,CAAA;ACML,MAAM,yBAA+D;AAAA,EAClE,UAA0B;AAAA;AAAA;AAAA;AAAA;AAAA,EAMlC,MAAc,gBAAkC;AAC9C,QAAI,CAAC,KAAK,WAAW,CAAC,KAAK,QAAQ,eAAe;AAChD,YAAM,aAAa,QAAQ,IAAI,wBAAwB,MAAM,GAAG,KAAK,CAAC;AAC/D,aAAA;AAAA,QACL,mEAAmE,WAAW,KAAK,GAAG,KAAK,MAAM;AAAA,MACnG;AACK,WAAA,UAAU,MAAM,SAAS,OAAO,EAAE,SAAS,YAAY,MAAM,YAAY;AACzE,WAAA,QAAQ,GAAG,gBAAgB,MAAM;AACpC,eAAO,MAAM,2CAA2C;AACxD,aAAK,UAAU;AAAA,MAAA,CAChB;AAAA,IAAA;AAGH,WAAO,KAAK;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOd,MAAM,eAA8B;AAC9B,QAAA,KAAK,SAAS,eAAe;AAC/B,aAAO,MAAM,wCAAwC;AAC/C,YAAA,KAAK,QAAQ,MAAM;AACzB,WAAK,UAAU;AAAA,IAAA;AAAA,EACjB;AAAA,EAGF,MAAM,QACJ,SACA,MACe;AAEf,QAAI,CAAC,QAAQ,YAAY,WAAW,WAAW,GAAG;AAChD,YAAM,KAAK;AACX;AAAA,IAAA;AAIF,UAAM,aAAa,QAAQ,SAAS,cAAc,WAAW;AAC7D,UAAM,sBACJ,eAAe,WAAW,cAAc,eAAe,WAAW;AAEpE,QAAI,CAAC,qBAAqB;AACjB,aAAA;AAAA,QACL,qCAAqC,QAAQ,MAAM,sBAAsB,UAAU;AAAA,MACrF;AACA,YAAM,KAAK;AACX;AAAA,IAAA;AAIK,WAAA;AAAA,MACL,oCAAoC,QAAQ,MAAM,kBAAkB,UAAU;AAAA,IAChF;AAEA,QAAI,OAAoB;AACxB,QAAI,eAA8B;AAE9B,QAAA;AACI,YAAA,UAAU,MAAM,KAAK,cAAc;AAClC,aAAA,MAAM,QAAQ,QAAQ;AAC7B,aAAO,MAAM,0BAA0B,QAAQ,MAAM,EAAE;AAGvD,YAAM,KAAK,MAAM,QAAQ,CAAC,UAAU;AAClC,YAAI,MAAM,QAAQ,EAAE,IAAI,MAAM,QAAQ,QAAQ;AAC5C,iBAAO,MAAM,QAAQ;AAAA,YACnB,QAAQ;AAAA,YACR,aAAa,QAAQ;AAAA,YACrB,MAAM,QAAQ;AAAA,UAAA,CACf;AAAA,QAAA;AAGH,cAAM,eAAe,MAAM,QAAQ,EAAE,aAAa;AAC9C,YAAA,CAAC,SAAS,cAAc,QAAQ,OAAO,EAAE,SAAS,YAAY,GAAG;AACnE,iBAAO,MAAM,MAAM;AAAA,QAAA;AAErB,eAAO,MAAM,SAAS;AAAA,MAAA,CACvB;AAKK,YAAA,KAAK,KAAK,QAAQ,QAAQ;AAAA,QAC9B,WAAW;AAAA,MAAA,CACZ;AAMc,qBAAA,MAAM,KAAK,QAAQ;AAClC,aAAO,MAAM,iDAAiD,QAAQ,MAAM,EAAE;AAAA,aACvE,OAAO;AACd,aAAO,MAAM,+BAA+B,QAAQ,MAAM,KAAK,KAAK,EAAE;AACtE,cAAQ,OAAO;AAAA,QACb,iBAAiB,QACb,QACA,IAAI,MAAM,gCAAgC,OAAO,KAAK,CAAC,EAAE;AAAA,MAC/D;AAAA,IAAA,UACA;AAEA,UAAI,MAAM;AACF,cAAA,KAAK,QAAQ,MAAM;AACzB,cAAM,KAAK,MAAM;AAAA,MAAA;AAAA,IACnB;AAKF,QAAI,iBAAiB,MAAM;AACzB,cAAQ,UAAU;AACX,aAAA;AAAA,QACL,6CAA6C,QAAQ,MAAM;AAAA,MAC7D;AAAA,IAAA,OACK;AAEE,aAAA;AAAA,QACL,qDAAqD,QAAQ,MAAM;AAAA,MACrE;AAAA,IAAA;AAIF,UAAM,KAAK;AAAA,EAAA;AAEf;AC/HO,MAAM,wBAA8D;AAAA;AAAA,EAExD,2BAA2B;AAAA,IAC1C;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAAA,EAEA,MAAM,QACJ,SACA,MACe;AAEf,UAAM,IAAI,QAAQ;AAClB,QAAI,CAAC,GAAG;AACN,UAAI,QAAQ,YAAY,WAAW,WAAW,GAAG;AACxC,eAAA;AAAA,UACL,YAAY,KAAK,YAAY,IAAI;AAAA,QACnC;AAAA,MAAA;AAEF,YAAM,KAAK;AACX;AAAA,IAAA;AAGE,QAAA;AAEF,YAAM,oBAAoB;AAAA,QACxB,GAAI,QAAQ,QAAQ,oBAAoB,CAAC;AAAA;AAAA,QACzC,GAAG,KAAK;AAAA,MACV;AACO,aAAA;AAAA,QACL,8BAA8B,kBAAkB,MAAM,kBAAkB,QAAQ,MAAM;AAAA,MACxF;AACA,UAAI,eAAe;AACnB,iBAAW,YAAY,mBAAmB;AACpC,YAAA;AACI,gBAAA,WAAW,EAAE,QAAQ;AAC3B,gBAAM,QAAQ,SAAS;AACvB,cAAI,QAAQ,GAAG;AACb,qBAAS,OAAO;AACA,4BAAA;AAAA,UAAA;AAAA,iBAEX,eAAe;AAGf,iBAAA;AAAA,YACL,iCAAiC,QAAQ,6BAA6B,aAAa;AAAA,UACrF;AACA,kBAAQ,OAAO;AAAA,YACb,IAAI,MAAM,qBAAqB,QAAQ,MAAM,aAAa,EAAE;AAAA,UAC9D;AAAA,QAAA;AAAA,MACF;AAEF,aAAO,MAAM,WAAW,YAAY,iBAAiB,QAAQ,MAAM,EAAE;AAAA,aAG9D,OAAO;AACd,aAAO,MAAM,yCAAyC,QAAQ,MAAM,KAAK,KAAK,EAAE;AAChF,cAAQ,OAAO;AAAA,QACb,iBAAiB,QACb,QACA,IAAI,MAAM,gCAAgC,OAAO,KAAK,CAAC,EAAE;AAAA,MAC/D;AAAA,IAAA;AAKF,UAAM,KAAK;AAAA,EAAA;AAEf;AClIO,MAAM,yBAA+D;AAAA,EAClE;AAAA,EAER,cAAc;AACP,SAAA,kBAAkB,IAAI,gBAAgB;AAAA,MACzC,cAAc;AAAA,MACd,IAAI;AAAA,MACJ,kBAAkB;AAAA,MAClB,gBAAgB;AAAA,MAChB,aAAa;AAAA,MACb,iBAAiB;AAAA,MACjB,WAAW;AAAA,IAAA,CACZ;AAEI,SAAA,gBAAgB,IAAI,GAAG;AAE5B,SAAK,eAAe;AAAA,EAAA;AAAA,EAGd,iBAAuB;AAExB,SAAA,gBAAgB,QAAQ,OAAO;AAAA,MAClC,QAAQ,CAAC,KAAK;AAAA,MACd,aAAa,CAAC,SAAS,SAAS;AAC9B,cAAM,UAAU;AAChB,YAAI,WAAW,QAAQ,aAAa,eAAe,KAAK;AACxD,YAAI,CAAC,UAAU;AAGb,gBAAM,mBACJ,QAAQ;AAAA,YACN;AAAA,eAEF,QAAQ;AAAA,YACN;AAAA,UACF;AACF,cAAI,kBAAkB;AACpB,kBAAM,YAAY,iBAAiB;AACnC,kBAAM,QAAQ,UAAU;AAAA,cACtB;AAAA,YACF;AACI,gBAAA,MAAkB,YAAA,MAAM,CAAC;AAAA,UAAA;AAAA,QAC/B;AAGF,cAAM,aAAa,MAAM,KAAK,QAAQ,iBAAiB,IAAI,CAAC;AAC5D,mBAAW,MAAM,YAAY;AAC3B,aAAG,YAAY,IAAI;AAAA,QAAA;AAEf,cAAA,OAAO,QAAQ,eAAe;AAE7B,eAAA;AAAA,QAAW,QAAQ;AAAA,EAAK,KAAK,QAAQ,cAAc,EAAE,CAAC;AAAA;AAAA;AAAA,MAAA;AAAA,IAC/D,CACD;AACI,SAAA,gBAAgB,QAAQ,UAAU;AAAA,MACrC,QAAQ,CAAC,GAAG;AAAA,MACZ,aAAa,CAAC,SAAS,SAAS;AACxB,cAAA,OAAQ,KAAqB,aAAa,MAAM;AAClD,YAAA,CAAC,WAAW,YAAY,KAAK;AACxB,iBAAA;AAAA,QAAA;AAET,YAAI,CAAC,MAAM;AACF,iBAAA;AAAA,QAAA;AAEF,eAAA,IAAI,OAAO,KAAK,IAAI;AAAA,MAAA;AAAA,IAC7B,CACD;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQH,MAAM,QACJ,SACA,MACe;AAEf,UAAM,IAAI,QAAQ;AAClB,QAAI,CAAC,GAAG;AAEN,UAAI,QAAQ,YAAY,WAAW,WAAW,GAAG;AACxC,eAAA;AAAA,UACL,YAAY,KAAK,YAAY,IAAI;AAAA,QACnC;AAAA,MAAA;AAGF,YAAM,KAAK;AACX;AAAA,IAAA;AAIE,QAAA;AACF,aAAO,MAAM,2CAA2C,QAAQ,MAAM,EAAE;AAGxE,YAAM,gBAAgB,EAAE,MAAM,EAAE,KAAK,KAAK,EAAE,KAAK;AACjD,YAAM,WAAW,KAAK,gBAAgB,SAAS,aAAa,EAAE,KAAK;AAEnE,UAAI,CAAC,UAAU;AAEP,cAAA,UAAU,6DAA6D,QAAQ,MAAM;AAC3F,eAAO,KAAK,OAAO;AAEnB,gBAAQ,UAAU;AAClB,gBAAQ,cAAc;AAAA,MAAA,OACjB;AAEL,gBAAQ,UAAU;AAClB,gBAAQ,cAAc;AACtB,eAAO,MAAM,+CAA+C,QAAQ,MAAM,EAAE;AAAA,MAAA;AAAA,aAEvE,OAAO;AACd,aAAO,MAAM,yCAAyC,QAAQ,MAAM,KAAK,KAAK,EAAE;AAChF,cAAQ,OAAO;AAAA,QACb,IAAI;AAAA,UACF,uCAAuC,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,QAAA;AAAA,MAEjG;AAAA,IAAA;AAKF,UAAM,KAAK;AAAA,EAAA;AAKf;ACpIO,MAAM,gCAAsE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMjF,MAAM,QACJ,SACA,MACe;AACX,QAAA,QAAQ,gBAAgB,iBAAiB;AAG3C,UAAI,CAAC,MAAM,QAAQ,QAAQ,KAAK,GAAG;AACjC,gBAAQ,QAAQ,CAAC;AAAA,MAAA;AAAA,IACnB;AAKF,UAAM,KAAK;AAAA,EAAA;AAIf;AC1BO,MAAM,oCAA0E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMrF,MAAM,QACJ,SACA,MACe;AAEf,QAAI,QAAQ,gBAAgB,mBAAmB,QAAQ,gBAAgB,cAAc;AAC/E,UAAA;AAEF,cAAM,cACJ,OAAO,QAAQ,YAAY,WACvB,QAAQ,UACR,OAAO,KAAK,QAAQ,OAAO,EAAE,SAAS,OAAO;AAE/C,YAAA,OAAO,QAAQ,YAAY,UAAU;AACvC,kBAAQ,UAAU;AAAA,QAAA;AAGpB,YAAI,QAAQ;AAER,YAAA,QAAQ,gBAAgB,iBAAiB;AACrC,gBAAA,QAAQ,YAAY,MAAM,aAAa;AACzC,cAAA,QAAQ,CAAC,GAAG;AACN,oBAAA,MAAM,CAAC,EAAE,KAAK;AAAA,UAAA;AAAA,QACxB;AAGF,gBAAQ,SAAS,QAAQ;AAAA,eAClB,OAAO;AACd,gBAAQ,OAAO;AAAA,UACb,IAAI;AAAA,YACF,6CAA6C,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK,CAAC;AAAA,UAAA;AAAA,QAEvG;AAAA,MAAA;AAAA,IAEF;AAIF,UAAM,KAAK;AAAA,EAAA;AAIf;ACrDO,MAAM,sBAAsB,MAAM;AAAA,EACvC,YACE,SACgB,OAChB;AACA,UAAM,OAAO;AAFG,SAAA,QAAA;AAGX,SAAA,OAAO,KAAK,YAAY;AAC7B,QAAI,OAAO,OAAO;AACX,WAAA,QAAQ,GAAG,KAAK,KAAK;AAAA,aAAgB,MAAM,KAAK;AAAA,IAAA;AAAA,EACvD;AAEJ;AAYO,MAAM,2BAA2B,cAAc;AAAC;AAKhD,MAAM,0BAA0B,cAAc;AAAA,EACnD,YAAY,UAAU,uBAAuB;AAC3C,UAAM,OAAO;AAAA,EAAA;AAEjB;ACxBA,MAAM,oBAAoB;AAC1B,MAAM,oBAAoB;AAC1B,MAAMC,wBAAsB;AAWrB,MAAe,oBAA+C;AAAA,EACzD,8BAAc,IAAY;AAAA,EAC1B,YAAY;AAAA,EAIZ;AAAA,EAEV,YAAY,UAAsC,IAAI;AACpD,SAAK,UAAU;AAAA,EAAA;AAAA;AAAA,EAoBjB,MAAgB,aACd,OACA,SACA,SACA,kBACA,QACsB;AAChB,UAAA,UAAU,MAAM,QAAQ;AAAA,MAC5B,MAAM,IAAI,OAAO,SAAS;AAExB,YAAI,QAAQ,SAAS;AACb,gBAAA,IAAI,kBAAkB,4CAA4C;AAAA,QAAA;AAGpE,cAAA,WAAW,QAAQ,YAAY;AACjC,YAAA,KAAK,QAAQ,UAAU;AACzB,iBAAO,CAAC;AAAA,QAAA;AAGN,YAAA;AAEF,gBAAM,SAAS,MAAM,KAAK,YAAY,MAAM,SAAS,QAAW,MAAM;AAEtE,cAAI,OAAO,UAAU;AACd,iBAAA;AAEC,kBAAA,WAAW,QAAQ,YAAY;AAE9B,mBAAA;AAAA,cACL,oBAAoB,KAAK,SAAS,IAAI,QAAQ,WAAW,KAAK,KAAK,IAAI,QAAQ,MAAM,KAAK,GAAG;AAAA,YAC/F;AACA,kBAAM,iBAAiB;AAAA,cACrB,cAAc,KAAK;AAAA,cACnB;AAAA,cACA,YAAY,KAAK;AAAA,cACjB,OAAO,KAAK;AAAA,cACZ;AAAA,cACA,UAAU,OAAO;AAAA,YAAA,CAClB;AAAA,UAAA;AAGG,gBAAA,YAAY,OAAO,SAAS,CAAC;AAC5B,iBAAA,UACJ,IAAI,CAAC,UAAU;AACV,gBAAA;AACF,oBAAM,YAAY,IAAIC,MAAI,OAAO,OAAO;AACjC,qBAAA;AAAA,gBACL,KAAK,UAAU;AAAA,gBACf,OAAO,KAAK,QAAQ;AAAA,cACtB;AAAA,qBACO,OAAO;AAEP,qBAAA,KAAK,kBAAkB,KAAK,EAAE;AAAA,YAAA;AAEhC,mBAAA;AAAA,UACR,CAAA,EACA,OAAO,CAACC,UAASA,UAAS,IAAI;AAAA,iBAC1B,OAAO;AACd,cAAI,QAAQ,cAAc;AACxB,mBAAO,MAAM,uBAAuB,KAAK,GAAG,KAAK,KAAK,EAAE;AACxD,mBAAO,CAAC;AAAA,UAAA;AAEJ,gBAAA;AAAA,QAAA;AAAA,MAET,CAAA;AAAA,IACH;AAGM,UAAA,WAAW,QAAQ,KAAK;AAC9B,UAAM,cAA2B,CAAC;AAGlC,eAAW,QAAQ,UAAU;AAC3B,YAAM,gBAAgB,aAAa,KAAK,KAAK,KAAK,QAAQ,oBAAoB;AAC9E,UAAI,CAAC,KAAK,QAAQ,IAAI,aAAa,GAAG;AAC/B,aAAA,QAAQ,IAAI,aAAa;AAC9B,oBAAY,KAAK,IAAI;AAAA,MAAA;AAAA,IACvB;AAGK,WAAA;AAAA,EAAA;AAAA,EAGT,MAAM,OACJ,SACA,kBACA,QACe;AACf,SAAK,QAAQ,MAAM;AACnB,SAAK,YAAY;AAEjB,UAAM,UAAU,IAAID,MAAI,QAAQ,GAAG;AAC7B,UAAA,QAAQ,CAAC,EAAE,KAAK,QAAQ,KAAK,OAAO,GAAuB;AAG5D,SAAA,QAAQ,IAAI,aAAa,QAAQ,KAAK,KAAK,QAAQ,oBAAoB,CAAC;AAGvE,UAAA,WAAW,QAAQ,YAAY;AAC/B,UAAA,iBAAiB,QAAQ,kBAAkBD;AAEjD,WAAO,MAAM,SAAS,KAAK,KAAK,YAAY,UAAU;AAGpD,UAAI,QAAQ,SAAS;AACnB,eAAO,KAAK,+BAA+B;AACrC,cAAA,IAAI,kBAAkB,8BAA8B;AAAA,MAAA;AAGtD,YAAA,iBAAiB,WAAW,KAAK;AACvC,UAAI,kBAAkB,GAAG;AACvB;AAAA,MAAA;AAGF,YAAM,YAAY,KAAK;AAAA,QACrB;AAAA;AAAA,QACA;AAAA,QACA,MAAM;AAAA,MACR;AAEA,YAAM,QAAQ,MAAM,OAAO,GAAG,SAAS;AAEjC,YAAA,UAAU,MAAM,KAAK;AAAA,QACzB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAEM,YAAA,KAAK,GAAG,OAAO;AAAA,IAAA;AAAA,EACvB;AAEJ;AC3JO,MAAM,2BAA2B,oBAAoB;AAAA,EACzC,cAAc,IAAI,YAAY;AAAA,EAC9B;AAAA,EACA;AAAA;AAAA,EAEjB,YAAY,UAAqC,IAAI;AACnD,UAAM,EAAE,sBAAsB,QAAQ,qBAAA,CAAsB;AAC5D,SAAK,qBAAqB,QAAQ;AAC7B,SAAA,uBAAuB,IAAI,yBAAyB;AAAA,EAAA;AAAA,EAG3D,UAAU,KAAsB;AAC1B,QAAA;AACI,YAAA,YAAY,IAAI,IAAI,GAAG;AAC7B,aAAO,UAAU,aAAa,WAAW,UAAU,aAAa;AAAA,IAAA,QAC1D;AACC,aAAA;AAAA,IAAA;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAMM,UACN,SACA,WACA,OACS;AACL,QAAA;AAEF,UAAI,UAAU,UAAU;AACf,eAAA,cAAc,SAAS,SAAS;AAAA,MAAA;AAEzC,UAAI,UAAU,YAAY;AACjB,eAAA,gBAAgB,SAAS,SAAS;AAAA,MAAA;AAG3C,aAAO,gBAAgB,SAAS,SAAS,KAAK,UAAU,SAAS,SAAS;AAAA,IAAA,QACpE;AACC,aAAA;AAAA,IAAA;AAAA,EACT;AAAA,EAGF,MAAyB,YACvB,MACA,SACA,mBACA,QACoD;AAC9C,UAAA,EAAE,QAAQ;AAEZ,QAAA;AAEF,YAAM,eAAe;AAAA,QACnB;AAAA,QACA,iBAAiB,QAAQ;AAAA,MAC3B;AAGA,YAAM,aAAyB,MAAM,KAAK,YAAY,MAAM,KAAK,YAAY;AAG7E,YAAM,iBAA2C;AAAA,QAC/C,SAAS,WAAW;AAAA,QACpB,aAAa,WAAW;AAAA,QACxB,QAAQ,WAAW;AAAA;AAAA,QACnB,UAAU,CAAC;AAAA,QACX,OAAO,CAAC;AAAA,QACR,QAAQ,CAAC;AAAA,QACT;AAAA,QACA,SAAS,KAAK;AAAA,MAChB;AAEI,UAAA;AACJ,UAAI,eAAe,YAAY,WAAW,WAAW,GAAG;AAEtD,cAAM,oBAAkD;AAAA,UACtD,KAAK;AAAA;AAAA;AAAA,UAEL,IAAI,4BAA4B;AAAA;AAAA,UAChC,IAAI,gCAAgC;AAAA,UACpC,IAAI,4BAA4B;AAAA,UAChC,IAAI,wBAAwB;AAAA;AAAA,UAC5B,IAAI,yBAAyB;AAAA,QAC/B;AACW,mBAAA,IAAI,0BAA0B,iBAAiB;AAAA,MAAA,WAE1D,eAAe,gBAAgB,mBAC/B,eAAe,gBAAgB,cAC/B;AACA,mBAAW,IAAI,0BAA0B;AAAA,UACvC,IAAI,oCAAoC;AAAA,UACxC,IAAI,gCAAgC;AAAA;AAAA,QAAA,CACrC;AAAA,MAAA,OACI;AAEE,eAAA;AAAA,UACL,6BAA6B,eAAe,WAAW,aAAa,GAAG;AAAA,QACzE;AAEA,eAAO,EAAE,UAAU,QAAW,OAAO,CAAA,EAAG;AAAA,MAAA;AAG1C,YAAM,eAAe,MAAM,SAAS,IAAI,cAAc;AAI3C,iBAAA,OAAO,aAAa,QAAQ;AACrC,eAAO,KAAK,wBAAwB,GAAG,KAAK,IAAI,OAAO,EAAE;AAAA,MAAA;AAIvD,UAAA,OAAO,aAAa,YAAY,YAAY,CAAC,aAAa,QAAQ,QAAQ;AACrE,eAAA,KAAK,oCAAoC,GAAG,4BAA4B;AAE/E,eAAO,EAAE,UAAU,QAAW,OAAO,aAAa,MAAM;AAAA,MAAA;AAI1D,YAAM,UAAU,IAAI,IAAI,QAAQ,GAAG;AACnC,YAAM,gBAAgB,aAAa,MAAM,OAAO,CAAC,SAAS;AACpD,YAAA;AACI,gBAAA,YAAY,IAAI,IAAI,IAAI;AACxB,gBAAA,QAAQ,QAAQ,SAAS;AAC/B,iBACE,KAAK,UAAU,SAAS,WAAW,KAAK,MACvC,CAAC,KAAK,sBAAsB,KAAK,mBAAmB,SAAS,SAAS;AAAA,QAAA,QAEnE;AACC,iBAAA;AAAA,QAAA;AAAA,MACT,CACD;AAEM,aAAA;AAAA,QACL,UAAU;AAAA,UACR,SAAS,aAAa;AAAA;AAAA,UACtB,UAAU;AAAA,YACR,KAAK,aAAa;AAAA;AAAA;AAAA,YAElB,OACE,OAAO,aAAa,SAAS,UAAU,WACnC,aAAa,SAAS,QACtB;AAAA,YACN,SAAS,QAAQ;AAAA,YACjB,SAAS,QAAQ;AAAA;AAAA,UAAA;AAAA,QAGrB;AAAA,QACA,OAAO;AAAA;AAAA,MACT;AAAA,aACO,OAAO;AAEd,aAAO,MAAM,0BAA0B,GAAG,KAAK,KAAK,EAAE;AAChD,YAAA;AAAA,IAAA;AAAA,EACR;AAAA;AAAA;AAAA;AAAA;AAAA,EAOF,MAAe,OACb,SACA,kBACA,QACe;AACX,QAAA;AAEF,YAAM,MAAM,OAAO,SAAS,kBAAkB,MAAM;AAAA,IAAA,UACpD;AAEM,YAAA,KAAK,qBAAqB,aAAa;AAAA,IAAA;AAAA,EAC/C;AAEJ;ACtMO,MAAM,sBAAiD;AAAA,EACpD;AAAA,EAER,UAAU,KAAsB;AAC9B,UAAM,EAAE,SAAA,IAAa,IAAI,IAAI,GAAG;AAChC,WAAO,CAAC,cAAc,gBAAgB,EAAE,SAAS,QAAQ;AAAA,EAAA;AAAA,EAG3D,cAAc;AACN,UAAA,mBAAmB,CAAC,SAAc,cAAmB;AAEzD,UAAI,KAAK,YAAY,OAAO,MAAM,KAAK,YAAY,SAAS,GAAG;AACtD,eAAA;AAAA,MAAA;AAGT,YAAMG,QAAO,UAAU;AAGvB,UAAIA,UAAS,KAAK,YAAY,SAAS,GAAG;AACjC,eAAA;AAAA,MAAA;AAIL,UAAAA,MAAK,WAAW,GAAG,KAAK,YAAY,SAAS,CAAC,OAAO,GAAG;AACnD,eAAA;AAAA,MAAA;AAIT,UACEA,MAAK,WAAW,GAAG,KAAK,YAAY,SAAS,CAAC,QAAQ,KACtDA,MAAK,SAAS,KAAK,GACnB;AACO,eAAA;AAAA,MAAA;AAGF,aAAA;AAAA,IACT;AAEK,SAAA,kBAAkB,IAAI,mBAAmB;AAAA,MAC5C,sBAAsB;AAAA,QACpB,YAAY;AAAA,QACZ,YAAY;AAAA,QACZ,qBAAqB;AAAA,QACrB,aAAa;AAAA;AAAA,MACf;AAAA,MACA;AAAA,IAAA,CACD;AAAA,EAAA;AAAA,EAGK,YAAY,KAAkB;AAEpC,UAAM,QAAQ,IAAI,SAAS,MAAM,iBAAiB;AAC3C,WAAA,QAAQ,CAAC,KAAK;AAAA,EAAA;AAAA,EAGvB,MAAM,OACJ,SACA,kBACA,QACe;AAEf,UAAM,MAAM,IAAI,IAAI,QAAQ,GAAG;AAC/B,QAAI,CAAC,IAAI,SAAS,SAAS,YAAY,GAAG;AAClC,YAAA,IAAI,MAAM,0BAA0B;AAAA,IAAA;AAI5C,UAAM,KAAK,gBAAgB,OAAO,SAAS,kBAAkB,MAAM;AAAA,EAAA;AAEvE;ACrDO,MAAM,0BAA0B,oBAAoB;AAAA,EACxC,cAAc,IAAI,YAAY;AAAA,EAE/C,UAAU,KAAsB;AACvB,WAAA,IAAI,WAAW,SAAS;AAAA,EAAA;AAAA,EAGjC,MAAgB,YACd,MACA,SACA,mBACA,SACoD;AAEpD,UAAM,WAAW,KAAK,IAAI,QAAQ,cAAc,EAAE;AAClD,UAAM,QAAQ,MAAM,GAAG,KAAK,QAAQ;AAGhC,QAAA,MAAM,eAAe;AACvB,YAAM,WAAW,MAAM,GAAG,QAAQ,QAAQ;AACnC,aAAA;AAAA,QACL,OAAO,SAAS,IAAI,CAAC,SAAS,UAAU,KAAK,KAAK,UAAU,IAAI,CAAC,EAAE;AAAA,MACrE;AAAA,IAAA;AAIK,WAAA,KAAK,sBAAsB,KAAK,SAAS,IAAI,QAAQ,QAAQ,KAAK,QAAQ,EAAE;AAEnF,UAAM,aAAyB,MAAM,KAAK,YAAY,MAAM,KAAK,GAAG;AAGpE,UAAM,iBAA2C;AAAA,MAC/C,SAAS,WAAW;AAAA,MACpB,aAAa,WAAW;AAAA,MACxB,QAAQ,WAAW;AAAA;AAAA,MACnB,UAAU,CAAC;AAAA,MACX,OAAO,CAAC;AAAA;AAAA,MACR,QAAQ,CAAC;AAAA,MACT;AAAA;AAAA,IACF;AAEI,QAAA;AACJ,QAAI,eAAe,YAAY,WAAW,WAAW,GAAG;AAEtD,iBAAW,IAAI,0BAA0B;AAAA,QACvC,IAAI,4BAA4B;AAAA,QAChC,IAAI,gCAAgC;AAAA;AAAA,QAEpC,IAAI,wBAAwB;AAAA,QAC5B,IAAI,yBAAyB;AAAA,MAAA,CAC9B;AAAA,IAAA,WAED,eAAe,gBAAgB,mBAC/B,eAAe,gBAAgB;AAAA,IAC/B,eAAe,YAAY,WAAW,OAAO,GAC7C;AAEA,iBAAW,IAAI,0BAA0B;AAAA,QACvC,IAAI,oCAAoC;AAAA;AAAA,MAAA,CAEzC;AAAA,IAAA,OACI;AACE,aAAA;AAAA,QACL,6BAA6B,eAAe,WAAW,cAAc,QAAQ;AAAA,MAC/E;AACA,aAAO,EAAE,UAAU,QAAW,OAAO,CAAA,EAAG;AAAA,IAAA;AAG1C,UAAM,eAAe,MAAM,SAAS,IAAI,cAAc;AAI3C,eAAA,OAAO,aAAa,QAAQ;AACrC,aAAO,KAAK,wBAAwB,QAAQ,KAAK,IAAI,OAAO,EAAE;AAAA,IAAA;AAMhE,UAAM,qBACJ,OAAO,aAAa,YAAY,WAC5B,aAAa,UACb,OAAO,KAAK,aAAa,OAAO,EAAE,SAAS,OAAO;AAEjD,WAAA;AAAA,MACL,UAAU;AAAA;AAAA,QAER,SAAS;AAAA,QACT,UAAU;AAAA,UACR,KAAK,aAAa;AAAA;AAAA;AAAA,UAElB,OACE,OAAO,aAAa,SAAS,UAAU,WACnC,aAAa,SAAS,QACtB;AAAA,UACN,SAAS,QAAQ;AAAA,UACjB,SAAS,QAAQ;AAAA,QAAA;AAAA,MACnB;AAAA;AAAA,IAGJ;AAAA,EAAA;AAAA,EAGF,MAAM,OACJ,SACA,kBACA,QACe;AAEf,UAAM,MAAM,OAAO,SAAS,kBAAkB,MAAM;AAAA,EAAA;AAExD;AC/HO,MAAM,mBAA8C;AAAA,EACjD;AAAA,EAER,UAAU,KAAsB;AAC9B,UAAM,EAAE,SAAA,IAAa,IAAI,IAAI,GAAG;AAChC,WAAO,CAAC,aAAa,aAAa,eAAe,EAAE,SAAS,QAAQ;AAAA,EAAA;AAAA,EAGtE,cAAc;AACP,SAAA,kBAAkB,IAAI,mBAAmB;AAAA,MAC5C,sBAAsB;AAAA,QACpB,YAAY;AAAA,QACZ,YAAY;AAAA,QACZ,qBAAqB;AAAA,QACrB,aAAa;AAAA;AAAA,MAAA;AAAA,IACf,CACD;AAAA,EAAA;AAAA,EAGH,MAAM,OACJ,SACA,kBACA,QACe;AAEf,UAAM,KAAK,gBAAgB,OAAO,SAAS,kBAAkB,MAAM;AAAA,EAAA;AAEvE;AC3BO,MAAM,oBAA+C;AAAA,EAClD;AAAA,EAER,UAAU,KAAsB;AAC9B,UAAM,EAAE,SAAA,IAAa,IAAI,IAAI,GAAG;AAChC,WAAO,CAAC,YAAY,cAAc,EAAE,SAAS,QAAQ;AAAA,EAAA;AAAA,EAGvD,cAAc;AACP,SAAA,kBAAkB,IAAI,mBAAmB;AAAA,MAC5C,sBAAsB;AAAA,QACpB,YAAY;AAAA,QACZ,YAAY;AAAA,QACZ,qBAAqB;AAAA,QACrB,aAAa;AAAA;AAAA,MAAA;AAAA,IACf,CACD;AAAA,EAAA;AAAA,EAGH,MAAM,OACJ,SACA,kBACA,QACe;AAEf,UAAM,KAAK,gBAAgB,OAAO,SAAS,kBAAkB,MAAM;AAAA,EAAA;AAEvE;ACtBO,MAAM,gBAAgB;AAAA,EACnB;AAAA,EAER,cAAc;AACZ,SAAK,aAAa;AAAA,MAChB,IAAI,mBAAmB;AAAA,MACvB,IAAI,oBAAoB;AAAA,MACxB,IAAI,sBAAsB;AAAA,MAC1B,IAAI,mBAAmB;AAAA,MACvB,IAAI,kBAAkB;AAAA,IACxB;AAAA,EAAA;AAAA,EAGF,YAAY,KAA8B;AACxC,gBAAY,GAAG;AACT,UAAA,WAAW,KAAK,WAAW,KAAK,CAAC,MAAM,EAAE,UAAU,GAAG,CAAC;AAC7D,QAAI,CAAC,UAAU;AACb,YAAM,IAAI,aAAa,8BAA8B,GAAG,EAAE;AAAA,IAAA;AAErD,WAAA;AAAA,EAAA;AAEX;ACrBO,MAAM,eAAe;AAAA,EAClB;AAAA,EAER,YAAY,UAA2B;AACrC,SAAK,WAAW;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOlB,MAAM,OACJ,SACA,kBACA,QACe;AAEf,UAAM,WAAW,KAAK,SAAS,YAAY,QAAQ,GAAG;AACtD,QAAI,CAAC,UAAU;AACb,YAAM,IAAI,aAAa,sCAAsC,QAAQ,GAAG,IAAI,KAAK;AAAA,IAAA;AAInF,UAAM,SAAS,OAAO,SAAS,kBAAkB,MAAM;AAAA,EAAA;AAE3D;ACvBO,MAAM,eAAe;AAAA;AAAA,EAET;AAAA,EACA;AAAA;AAAA,EAGjB,YAAY,OAAkC,gBAAgC;AAC5E,SAAK,QAAQ;AACb,SAAK,iBAAiB;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQxB,MAAM,WAAW,KAAkB,WAAoD;AACrF,UAAM,EAAE,IAAI,OAAO,SAAS,SAAS,SAAS,oBAAoB;AAClE,UAAM,SAAS,gBAAgB;AAE/B,WAAO,MAAM,IAAI,KAAK,6BAA6B,OAAO,IAAI,OAAO,EAAE;AAEnE,QAAA;AAEF,YAAM,KAAK,eAAe;AAAA,QACxB;AAAA,QACA,OAAO,aAA8B;AAEnC,cAAI,OAAO,SAAS;AACZ,kBAAA,IAAI,kBAAkB,wCAAwC;AAAA,UAAA;AAItE,cAAI,WAAW;AAET,gBAAA,UAAU,gBAAgB,KAAK,QAAQ;AAE7C,cAAI,SAAS,UAAU;AACjB,gBAAA;AAEF,oBAAM,KAAK,MAAM,YAAY,SAAS,SAAS;AAAA,gBAC7C,aAAa,SAAS,SAAS;AAAA,gBAC/B,UAAU,SAAS,SAAS;AAAA,cAAA,CAC7B;AACM,qBAAA;AAAA,gBACL,IAAI,KAAK,sBAAsB,SAAS,SAAS,SAAS,GAAG;AAAA,cAC/D;AAAA,qBACO,UAAU;AACV,qBAAA;AAAA,gBACL,IAAI,KAAK,8BAA8B,SAAS,SAAS,SAAS,GAAG,KAAK,QAAQ;AAAA,cACpF;AAEA,oBAAM,UAAU;AAAA,gBACd;AAAA,gBACA,oBAAoB,QAAQ,WAAW,IAAI,MAAM,OAAO,QAAQ,CAAC;AAAA,gBACjE,SAAS;AAAA,cACX;AAAA,YAAA;AAAA,UAGF;AAAA,QAEJ;AAAA,QACA;AAAA;AAAA,MACF;AAIA,UAAI,OAAO,SAAS;AACZ,cAAA,IAAI,kBAAkB,+CAA+C;AAAA,MAAA;AAItE,aAAA,MAAM,IAAI,KAAK,qCAAqC;AAAA,aACpD,OAAO;AAEd,aAAO,KAAK,IAAI,KAAK,+BAA+B,KAAK,EAAE;AACrD,YAAA;AAAA,IAAA;AAAA,EACR;AAAA;AAAA;AAAA;AAAA;AAAA;AAUJ;AC5FY,IAAA,sCAAAC,uBAAL;AACLA,qBAAA,QAAS,IAAA;AACTA,qBAAA,SAAU,IAAA;AACVA,qBAAA,WAAY,IAAA;AACZA,qBAAA,QAAS,IAAA;AACTA,qBAAA,YAAa,IAAA;AACbA,qBAAA,WAAY,IAAA;AANFA,SAAAA;AAAA,GAAA,qBAAA,CAAA,CAAA;ACIZ,MAAM,sBAAsB;AAKrB,MAAM,gBAAgB;AAAA,EACnB,6BAAuC,IAAI;AAAA,EAC3C,WAAqB,CAAC;AAAA,EACtB,oCAAiC,IAAI;AAAA,EACrC,YAAY;AAAA,EACZ;AAAA,EACA,YAAsC,CAAC;AAAA,EACvC;AAAA,EACA;AAAA,EAER,YACE,OACA,cAAsB,qBACtB;AACA,SAAK,QAAQ;AACb,SAAK,cAAc;AAEb,UAAA,WAAW,IAAI,gBAAgB;AAChC,SAAA,iBAAiB,IAAI,eAAe,QAAQ;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMnD,aAAa,WAA2C;AACtD,SAAK,YAAY;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMnB,MAAM,QAAuB;AAC3B,QAAI,KAAK,WAAW;AAClB,aAAO,KAAK,qCAAqC;AACjD;AAAA,IAAA;AAEF,SAAK,YAAY;AACjB,WAAO,MAAM,4CAA4C,KAAK,WAAW,GAAG;AAC5E,SAAK,cAAc;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQrB,MAAM,OAAsB;AACtB,QAAA,CAAC,KAAK,WAAW;AACnB,aAAO,KAAK,iCAAiC;AAC7C;AAAA,IAAA;AAEF,SAAK,YAAY;AACjB,WAAO,MAAM,wDAAwD;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAOvE,MAAM,WACJ,SACA,SACA,SACiB;AACjB,UAAM,QAAQC,GAAO;AACf,UAAA,kBAAkB,IAAI,gBAAgB;AACxC,QAAA;AACA,QAAA;AAEJ,UAAM,oBAAoB,IAAI,QAAc,CAAC,SAAS,WAAW;AAC3C,0BAAA;AACD,yBAAA;AAAA,IAAA,CACpB;AAED,UAAM,MAAmB;AAAA,MACvB,IAAI;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,QAAQ,kBAAkB;AAAA,MAC1B,UAAU;AAAA,MACV,OAAO;AAAA,MACP,+BAAe,KAAK;AAAA,MACpB,WAAW;AAAA,MACX,YAAY;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEK,SAAA,OAAO,IAAI,OAAO,GAAG;AACrB,SAAA,SAAS,KAAK,KAAK;AACxB,WAAO,KAAK,oBAAoB,KAAK,QAAQ,OAAO,IAAI,OAAO,EAAE;AAE3D,UAAA,KAAK,UAAU,oBAAoB,GAAG;AAG5C,QAAI,KAAK,WAAW;AAClB,WAAK,cAAc;AAAA,IAAA;AAGd,WAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMT,MAAM,OAAO,OAAiD;AACrD,WAAA,KAAK,OAAO,IAAI,KAAK;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAM9B,MAAM,QAAQ,QAAoD;AAChE,UAAM,UAAU,MAAM,KAAK,KAAK,OAAO,QAAQ;AAC/C,QAAI,QAAQ;AACV,aAAO,QAAQ,OAAO,CAAC,QAAQ,IAAI,WAAW,MAAM;AAAA,IAAA;AAE/C,WAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMT,MAAM,qBAAqB,OAA8B;AACvD,UAAM,MAAM,KAAK,OAAO,IAAI,KAAK;AACjC,QAAI,CAAC,KAAK;AACR,YAAM,IAAI,mBAAmB,kBAAkB,KAAK,EAAE;AAAA,IAAA;AAExD,UAAM,IAAI;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMZ,MAAM,UAAU,OAA8B;AAC5C,UAAM,MAAM,KAAK,OAAO,IAAI,KAAK;AACjC,QAAI,CAAC,KAAK;AACD,aAAA,KAAK,yCAAyC,KAAK,EAAE;AAC5D;AAAA,IAAA;AAGF,YAAQ,IAAI,QAAQ;AAAA,MAClB,KAAK,kBAAkB;AAErB,aAAK,WAAW,KAAK,SAAS,OAAO,CAAC,OAAO,OAAO,KAAK;AACzD,YAAI,SAAS,kBAAkB;AAC3B,YAAA,iCAAiB,KAAK;AACnB,eAAA,KAAK,kCAAkC,KAAK,EAAE;AAC/C,cAAA,KAAK,UAAU,oBAAoB,GAAG;AAC5C,YAAI,iBAAiB,IAAI,mBAAmB,+BAA+B,CAAC;AAC5E;AAAA,MAEF,KAAK,kBAAkB;AAErB,YAAI,SAAS,kBAAkB;AAC/B,YAAI,gBAAgB,MAAM;AACnB,eAAA,KAAK,+CAA+C,KAAK,EAAE;AAC5D,cAAA,KAAK,UAAU,oBAAoB,GAAG;AAE5C;AAAA,MAEF,KAAK,kBAAkB;AAAA,MACvB,KAAK,kBAAkB;AAAA,MACvB,KAAK,kBAAkB;AAAA,MACvB,KAAK,kBAAkB;AACd,eAAA;AAAA,UACL,OAAO,KAAK,8CAA8C,IAAI,MAAM;AAAA,QACtE;AACA;AAAA,MAEF;AACE,eAAO,MAAM,0CAA0C,IAAI,MAAM,EAAE;AACnE;AAAA,IAAA;AAAA,EACJ;AAAA;AAAA;AAAA;AAAA;AAAA,EAQM,gBAAsB;AACxB,QAAA,CAAC,KAAK,UAAW;AAEd,WAAA,KAAK,cAAc,OAAO,KAAK,eAAe,KAAK,SAAS,SAAS,GAAG;AACvE,YAAA,QAAQ,KAAK,SAAS,MAAM;AAClC,UAAI,CAAC,MAAO;AAEZ,YAAM,MAAM,KAAK,OAAO,IAAI,KAAK;AACjC,UAAI,CAAC,OAAO,IAAI,WAAW,kBAAkB,QAAQ;AAC5C,eAAA,KAAK,gBAAgB,KAAK,sCAAsC;AACvE;AAAA,MAAA;AAGG,WAAA,cAAc,IAAI,KAAK;AAC5B,UAAI,SAAS,kBAAkB;AAC3B,UAAA,gCAAgB,KAAK;AACpB,WAAA,UAAU,oBAAoB,GAAG;AAGtC,WAAK,QAAQ,GAAG,EAAE,MAAM,CAAC,UAAU;AAEjC,eAAO,MAAM,8BAA8B,KAAK,eAAe,KAAK,EAAE;AACtE,YACE,IAAI,WAAW,kBAAkB,UACjC,IAAI,WAAW,kBAAkB,WACjC;AACA,cAAI,SAAS,kBAAkB;AAC3B,cAAA,QAAQ,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AAChE,cAAA,iCAAiB,KAAK;AACrB,eAAA,UAAU,oBAAoB,GAAG;AAClC,cAAA,iBAAiB,IAAI,KAAK;AAAA,QAAA;AAE3B,aAAA,cAAc,OAAO,KAAK;AAC/B,aAAK,cAAc;AAAA,MAAA,CACpB;AAAA,IAAA;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAOF,MAAc,QAAQ,KAAiC;AACrD,UAAM,EAAE,IAAI,OAAO,gBAAoB,IAAA;AACvC,UAAM,SAAS,gBAAgB;AAI/B,UAAM,SAAS,IAAI,eAAe,KAAK,OAAO,KAAK,cAAc;AAE7D,QAAA;AAEF,YAAM,OAAO,WAAW,KAAK,KAAK,SAAS;AAG3C,UAAI,OAAO,SAAS;AAEZ,cAAA,IAAI,kBAAkB,sCAAsC;AAAA,MAAA;AAIpE,UAAI,SAAS,kBAAkB;AAC3B,UAAA,iCAAiB,KAAK;AACpB,YAAA,KAAK,UAAU,oBAAoB,GAAG;AAC5C,UAAI,kBAAkB;AAAA,aACf,OAAO;AAEV,UAAA,iBAAiB,qBAAqB,OAAO,SAAS;AAExD,YAAI,SAAS,kBAAkB;AAC3B,YAAA,iCAAiB,KAAK;AAE1B,YAAI,QACF,iBAAiB,oBACb,QACA,IAAI,kBAAkB,yBAAyB;AACrD,eAAO,KAAK,+BAA+B,KAAK,KAAK,IAAI,MAAM,OAAO,EAAE;AAClE,cAAA,KAAK,UAAU,oBAAoB,GAAG;AACxC,YAAA,iBAAiB,IAAI,KAAK;AAAA,MAAA,OACzB;AAEL,YAAI,SAAS,kBAAkB;AAC3B,YAAA,QAAQ,iBAAiB,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC;AAChE,YAAA,iCAAiB,KAAK;AAC1B,eAAO,MAAM,iBAAiB,KAAK,KAAK,IAAI,KAAK,EAAE;AAC7C,cAAA,KAAK,UAAU,oBAAoB,GAAG;AACxC,YAAA,iBAAiB,IAAI,KAAK;AAAA,MAAA;AAAA,IAChC,UACA;AAEK,WAAA,cAAc,OAAO,KAAK;AAC/B,WAAK,cAAc;AAAA,IAAA;AAAA,EACrB;AAEJ;ACjSa,MAAA,WAAW,CAAC,QAAwB;AACxC,SAAA,IAAI,QAAQ,8BAA8B,EAAE;AACrD;ACHO,MAAM,sBAAsB,MAAM;AAAC;AAMnC,MAAM,8BAA8B,cAAc;AAAA,EACvD,YAAY,MAAc,SAAiB;AACzC;AAAA,MACE,4EAA4E,IAAI,kCAAkC,OAAO;AAAA,IAC3H;AAAA,EAAA;AAEJ;AAKO,MAAM,6BAA6B,cAAc;AAAC;ACZlD,MAAM,oBAA+C;AAAA,EAC1D,YAAoB,SAAiC;AAAjC,SAAA,UAAA;AAAA,EAAA;AAAA,EAEpB,MAAM,MAAM,SAAoC;AAE9C,UAAM,WAAW,QAAQ,MAAM,aAAa,IAAI,CAAC;AAC3C,UAAA,kBAAkB,QAAQ,QAAQ,eAAe,EAAE,EAAE,QAAQ,WAAW,EAAE;AAE1E,UAAA,QAAQ,gBAAgB,MAAM,IAAI;AACxC,UAAM,SAAmB,CAAC;AAC1B,QAAI,oBAA8B,CAAC;AAEnC,eAAW,QAAQ,OAAO;AAExB,YAAM,iBAAiB,KAAK,KAAK,MAAM,QAAQ,EAAE;AAC7C,UAAA,iBAAiB,KAAK,QAAQ,WAAW;AAC3C,cAAM,IAAI,sBAAsB,gBAAgB,KAAK,QAAQ,SAAS;AAAA,MAAA;AAGxE,wBAAkB,KAAK,IAAI;AAC3B,YAAM,kBAAkB,KAAK,KAAK,kBAAkB,KAAK,IAAI,GAAG,QAAQ;AACxE,YAAM,eAAe,gBAAgB;AAErC,UAAI,eAAe,KAAK,QAAQ,aAAa,kBAAkB,SAAS,GAAG;AAEnE,cAAA,WAAW,kBAAkB,IAAI;AAEhC,eAAA,KAAK,KAAK,KAAK,kBAAkB,KAAK,IAAI,GAAG,QAAQ,CAAC;AAC7D,4BAAoB,CAAC,QAAkB;AAAA,MAAA;AAAA,IACzC;AAGE,QAAA,kBAAkB,SAAS,GAAG;AACzB,aAAA,KAAK,KAAK,KAAK,kBAAkB,KAAK,IAAI,GAAG,QAAQ,CAAC;AAAA,IAAA;AAGxD,WAAA;AAAA,EAAA;AAAA,EAGC,KAAK,SAAiB,UAAkC;AACzD,WAAA,SAAS,YAAY,EAAE;AAAA,EAAK,QAAQ,QAAQ,QAAQ,EAAE,CAAC;AAAA;AAAA,EAAA;AAElE;AClCO,MAAM,qBAAgD;AAAA,EAC3D,YAAoB,SAAiC;AAAjC,SAAA,UAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAKpB,MAAM,MAAM,SAAoC;AACxC,UAAA,cAAc,KAAK,WAAW,OAAO;AAC3C,QAAI,CAAC,aAAa;AAChB,aAAO,CAAC,OAAO;AAAA,IAAA;AAGX,UAAA,EAAE,SAAS,KAAA,IAAS;AAE1B,UAAM,SAAmB,CAAC;AAC1B,QAAI,cAAwB,CAAC;AAE7B,eAAW,OAAO,MAAM;AAEtB,YAAM,gBAAgB,KAAK,KAAK,KAAK,OAAO,EAAE;AAC1C,UAAA,gBAAgB,KAAK,QAAQ,WAAW;AAC1C,cAAM,IAAI,sBAAsB,eAAe,KAAK,QAAQ,SAAS;AAAA,MAAA;AAGjE,YAAA,kBAAkB,KAAK,KAAK,CAAC,GAAG,aAAa,GAAG,EAAE,KAAK,IAAI,GAAG,OAAO;AAC3E,YAAM,eAAe,gBAAgB;AACrC,UAAI,eAAe,KAAK,QAAQ,aAAa,YAAY,SAAS,GAAG;AAE5D,eAAA,KAAK,KAAK,KAAK,YAAY,KAAK,IAAI,GAAG,OAAO,CAAC;AACtD,sBAAc,CAAC,GAAG;AAAA,MAAA,OACb;AACL,oBAAY,KAAK,GAAG;AAAA,MAAA;AAAA,IACtB;AAGE,QAAA,YAAY,SAAS,GAAG;AACnB,aAAA,KAAK,KAAK,KAAK,YAAY,KAAK,IAAI,GAAG,OAAO,CAAC;AAAA,IAAA;AAIjD,WAAA;AAAA,EAAA;AAAA,EAGC,KAAK,SAAiB,SAA2B;AACzD,UAAM,YAAY,KAAK,QAAQ,KAAK,KAAK,CAAC;AACpC,UAAA,eAAe,IAAI,QAAQ,IAAI,MAAM,KAAK,EAAE,KAAK,GAAG,CAAC;AAC3D,WAAO,CAAC,WAAW,cAAc,OAAO,EAAE,KAAK,IAAI;AAAA,EAAA;AAAA,EAG7C,WAAW,SAAqC;AACtD,UAAM,QAAQ,QAAQ,KAAK,EAAE,MAAM,IAAI;AACnC,QAAA,MAAM,SAAS,EAAU,QAAA;AAE7B,UAAM,UAAU,KAAK,SAAS,MAAM,CAAC,CAAC;AAClC,QAAA,CAAC,QAAgB,QAAA;AAEf,UAAA,YAAY,MAAM,CAAC;AACzB,QAAI,CAAC,KAAK,iBAAiB,SAAS,EAAU,QAAA;AAExC,UAAA,OAAO,MAAM,MAAM,CAAC,EAAE,OAAO,CAAC,QAAQ,IAAI,KAAK,MAAM,EAAE;AAEtD,WAAA,EAAE,SAAS,WAAW,KAAK;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAM5B,SAAS,KAA8B;AAC7C,QAAI,CAAC,IAAI,SAAS,GAAG,EAAU,QAAA;AAC/B,WAAO,IACJ,MAAM,GAAG,EACT,IAAI,CAAC,SAAS,KAAK,KAAA,CAAM,EACzB,OAAO,CAAC,SAAS,SAAS,EAAE;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMzB,iBAAiB,WAA4B;AACnD,WAAO,UAAU,SAAS,GAAG,KAAK,kBAAkB,KAAK,SAAS;AAAA,EAAA;AAEtE;ACtFO,MAAM,oBAA+C;AAAA,EAC1D,YAAoB,SAAiC;AAAjC,SAAA,UAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMpB,MAAM,MAAM,SAAoC;AACxC,UAAA,iBAAiB,SAAS,OAAO;AAEvC,QAAI,eAAe,UAAU,KAAK,QAAQ,WAAW;AACnD,aAAO,CAAC,cAAc;AAAA,IAAA;AAIlB,UAAA,QAAQ,eAAe,MAAM,KAAK;AACxC,UAAM,cAAc,MAAM;AAAA,MAAO,CAAC,KAAK,SACrC,KAAK,SAAS,IAAI,SAAS,OAAO;AAAA,IACpC;AACA,QAAI,YAAY,SAAS,KAAK,QAAQ,WAAW;AAC/C,YAAM,IAAI,sBAAsB,YAAY,QAAQ,KAAK,QAAQ,SAAS;AAAA,IAAA;AAItE,UAAA,kBAAkB,KAAK,kBAAkB,cAAc;AACzD,QAAA,KAAK,eAAe,eAAe,GAAG;AAEjC,aAAA;AAAA,IAAA;AAIH,UAAA,aAAa,KAAK,aAAa,cAAc;AAC/C,QAAA,KAAK,eAAe,UAAU,GAAG;AAC5B,aAAA,KAAK,YAAY,YAAY,IAAI;AAAA,IAAA;AAI1C,UAAM,aAAa,MAAM,KAAK,aAAa,cAAc;AAClD,WAAA,KAAK,YAAY,YAAY,GAAG;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMjC,eAAe,QAA2B;AACzC,WAAA,OAAO,MAAM,CAAC,UAAU,MAAM,UAAU,KAAK,QAAQ,SAAS;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAM/D,kBAAkB,MAAwB;AAChD,UAAM,aAAa,KAChB,MAAM,SAAS,EACf,IAAI,CAAC,MAAM,SAAS,CAAC,CAAC,EACtB,OAAO,OAAO;AAEjB,WAAO,WAAW,OAAO,CAAC,UAAU,MAAM,SAAS,CAAC;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAM9C,aAAa,MAAwB;AAC3C,UAAM,QAAQ,KACX,MAAM,IAAI,EACV,IAAI,CAAC,SAAS,SAAS,IAAI,CAAC,EAC5B,OAAO,OAAO;AAEjB,WAAO,MAAM,OAAO,CAAC,UAAU,MAAM,SAAS,CAAC;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMjD,MAAc,aAAa,MAAiC;AACpD,UAAA,WAAW,IAAI,+BAA+B;AAAA,MAClD,WAAW,KAAK,QAAQ;AAAA,MACxB,cAAc;AAAA,IAAA,CACf;AAED,UAAM,SAAS,MAAM,SAAS,UAAU,IAAI;AACrC,WAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOC,YAAY,QAAkB,WAA6B;AACnE,UAAM,eAAyB,CAAC;AAChC,QAAI,eAA8B;AAElC,eAAW,SAAS,QAAQ;AAC1B,UAAI,iBAAiB,MAAM;AACV,uBAAA;AACf;AAAA,MAAA;AAGI,YAAA,mBAAmB,KAAK,aAAa,YAAY;AACjD,YAAA,gBAAgB,KAAK,aAAa,KAAK;AAE7C,UAAI,mBAAmB,gBAAgB,UAAU,UAAU,KAAK,QAAQ,WAAW;AAEjF,uBAAe,GAAG,YAAY,GAAG,SAAS,GAAG,KAAK;AAAA,MAAA,OAC7C;AAEL,qBAAa,KAAK,YAAY;AACf,uBAAA;AAAA,MAAA;AAAA,IACjB;AAGF,QAAI,cAAc;AAChB,mBAAa,KAAK,YAAY;AAAA,IAAA;AAGzB,WAAA;AAAA,EAAA;AAAA,EAGC,aAAa,OAAuB;AAC5C,WAAO,MAAM;AAAA,EAAA;AAAA,EAGL,KAAK,SAAyB;AAC/B,WAAA;AAAA,EAAA;AAEX;ACpGO,MAAM,yBAAqD;AAAA,EAMhE,YACU,oBACA,cACR;AAFQ,SAAA,qBAAA;AACA,SAAA,eAAA;AAEH,SAAA,kBAAkB,IAAI,gBAAgB;AAAA,MACzC,cAAc;AAAA,MACd,IAAI;AAAA,MACJ,kBAAkB;AAAA,MAClB,gBAAgB;AAAA,MAChB,aAAa;AAAA,MACb,iBAAiB;AAAA,MACjB,WAAW;AAAA,IAAA,CACZ;AAGI,SAAA,gBAAgB,QAAQ,SAAS;AAAA,MACpC,QAAQ,CAAC,OAAO;AAAA,MAChB,aAAa,CAAC,SAAS,SAAS;AAC9B,cAAM,QAAQ;AACd,cAAM,UAAU,MAAM,KAAK,MAAM,iBAAiB,IAAI,CAAC,EAAE;AAAA,UACvD,CAAC,OAAO,GAAG,aAAa,UAAU;AAAA,QACpC;AACA,cAAM,OAAO,MAAM,KAAK,MAAM,iBAAiB,IAAI,CAAC,EAAE;AAAA,UACpD,CAAC,OAAO,CAAC,GAAG,cAAc,IAAI;AAAA,QAChC;AAEA,YAAI,QAAQ,WAAW,KAAK,KAAK,WAAW,EAAU,QAAA;AAEtD,YAAI,WAAW;AACX,YAAA,QAAQ,SAAS,GAAG;AACtB,sBAAY,KAAK,QAAQ,KAAK,KAAK,CAAC;AAAA;AACxB,sBAAA,IAAI,QAAQ,IAAI,MAAM,KAAK,EAAE,KAAK,GAAG,CAAC;AAAA;AAAA,QAAA;AAGpD,mBAAW,OAAO,MAAM;AACtB,gBAAM,QAAQ,MAAM,KAAK,IAAI,iBAAiB,IAAI,CAAC,EAAE;AAAA,YACnD,CAAC,OAAO,GAAG,aAAa,UAAU;AAAA,UACpC;AACA,sBAAY,KAAK,MAAM,KAAK,KAAK,CAAC;AAAA;AAAA,QAAA;AAG7B,eAAA;AAAA,MAAA;AAAA,IACT,CACD;AAGI,SAAA,eAAe,IAAI,oBAAoB;AAAA,MAC1C,WAAW,KAAK;AAAA,IAAA,CACjB;AAEI,SAAA,eAAe,IAAI,oBAAoB;AAAA,MAC1C,WAAW,KAAK;AAAA,IAAA,CACjB;AACI,SAAA,gBAAgB,IAAI,qBAAqB;AAAA,MAC5C,WAAW,KAAK;AAAA,IAAA,CACjB;AAAA,EAAA;AAAA,EA5DK;AAAA,EACD;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA,EA+DP,MAAM,UAAU,UAA2C;AACzD,UAAM,OAAO,MAAM,KAAK,eAAe,QAAQ;AAC/C,UAAM,MAAM,MAAM,KAAK,UAAU,IAAI;AACrC,UAAM,WAAW,MAAM,KAAK,kBAAkB,GAAG;AAC1C,WAAA,KAAK,oBAAoB,QAAQ;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO1C,MAAc,kBAAkB,KAA2C;AACnE,UAAA,OAAO,IAAI,cAAc,MAAM;AACrC,QAAI,CAAC,MAAM;AACH,YAAA,IAAI,MAAM,+CAA+C;AAAA,IAAA;AAG7D,QAAA,iBAAiB,KAAK,kBAAkB;AAC5C,UAAM,WAA8B,CAAC;AAC/B,UAAA,QAA2B,CAAC,cAAc;AAGhD,eAAW,WAAW,MAAM,KAAK,KAAK,QAAQ,GAAG;AAC/C,YAAM,eAAe,QAAQ,QAAQ,MAAM,UAAU;AAErD,UAAI,cAAc;AAEhB,cAAM,QAAQ,OAAO,SAAS,aAAa,CAAC,GAAG,EAAE;AACjD,cAAM,QAAQ,SAAS,QAAQ,eAAe,EAAE;AAGzC,eAAA,MAAM,SAAS,KAAK,MAAM,MAAM,SAAS,CAAC,EAAE,SAAS,OAAO;AACjE,gBAAM,IAAI;AAAA,QAAA;AAIK,yBAAA;AAAA,UACf;AAAA,UACA,MAAM;AAAA,YACJ,GAAG,MAAM,MAAM,CAAC,EAAE,OAAO,CAAC,KAAe,MAAM;AAC7C,oBAAM,WAAW,EAAE,KAAK,EAAE,KAAK,SAAS,CAAC;AACrC,kBAAA,SAAc,KAAA,KAAK,QAAQ;AACxB,qBAAA;AAAA,YACT,GAAG,EAAE;AAAA,YACL;AAAA,UACF;AAAA,UACA,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM,GAAG,IAAI,OAAO,KAAK,CAAC,IAAI,KAAK;AAAA,YAAA;AAAA,UACrC;AAAA,QAEJ;AAEA,iBAAS,KAAK,cAAc;AAC5B,cAAM,KAAK,cAAc;AAAA,MAAA,WAChB,QAAQ,YAAY,OAAO;AAE9B,cAAA,OAAO,QAAQ,cAAc,MAAM;AACzC,cAAM,WAAW,MAAM,UAAU,QAAQ,aAAa,EAAE,KAAK;AAC7D,cAAM,UAAU,MAAM,eAAe,QAAQ,eAAe;AAC5D,cAAM,WAAW,GAAG,KAAK,GAAG,QAAQ;AAAA,EAAK,OAAO;AAAA,EAAK,KAAK;AAEzC,yBAAA;AAAA,UACf,OAAO,eAAe;AAAA,UACtB,MAAM,eAAe;AAAA,UACrB,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM;AAAA,YAAA;AAAA,UACR;AAAA,QAEJ;AACA,iBAAS,KAAK,cAAc;AAAA,MAAA,WACnB,QAAQ,YAAY,SAAS;AAEtC,cAAM,WAAW,SAAS,KAAK,gBAAgB,SAAS,QAAQ,SAAS,CAAC;AAEzD,yBAAA;AAAA,UACf,OAAO,eAAe;AAAA,UACtB,MAAM,eAAe;AAAA,UACrB,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM;AAAA,YAAA;AAAA,UACR;AAAA,QAEJ;AACA,iBAAS,KAAK,cAAc;AAAA,MAAA,OACvB;AACL,cAAM,WAAW,SAAS,KAAK,gBAAgB,SAAS,QAAQ,SAAS,CAAC;AAC1E,YAAI,UAAU;AAEK,2BAAA;AAAA,YACf,OAAO,eAAe;AAAA,YACtB,MAAM,eAAe;AAAA,YACrB,SAAS;AAAA,cACP;AAAA,gBACE,MAAM;AAAA,gBACN,MAAM;AAAA,cAAA;AAAA,YACR;AAAA,UAEJ;AACA,mBAAS,KAAK,cAAc;AAAA,QAAA;AAAA,MAC9B;AAAA,IACF;AAGK,WAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMT,MAAc,oBACZ,UACyB;AACzB,UAAM,SAAyB,CAAC;AAEhC,eAAW,WAAW,UAAU;AACnB,iBAAA,WAAW,QAAQ,SAAS;AACrC,YAAI,eAAyB,CAAC;AAE1B,YAAA;AACF,kBAAQ,QAAQ,MAAM;AAAA,YACpB,KAAK;AAAA,YACL,KAAK,QAAQ;AACX,6BAAe,MAAM,KAAK,aAAa,MAAM,QAAQ,IAAI;AACzD;AAAA,YAAA;AAAA,YAEF,KAAK,QAAQ;AACX,6BAAe,MAAM,KAAK,aAAa,MAAM,QAAQ,IAAI;AACzD;AAAA,YAAA;AAAA,YAEF,KAAK,SAAS;AACZ,6BAAe,MAAM,KAAK,cAAc,MAAM,QAAQ,IAAI;AAC1D;AAAA,YAAA;AAAA,UACF;AAAA,iBAEK,KAAK;AAEZ,cAAI,eAAe,uBAAuB;AACjC,mBAAA;AAAA,cACL,kBAAkB,QAAQ,IAAI,0DAA0D,IAAI,OAAO;AAAA,YACrG;AAGM,kBAAA,WAAW,IAAI,+BAA+B;AAAA,cAClD,WAAW,KAAK;AAAA,cAChB,cAAc,KAAK,IAAI,IAAI,KAAK,MAAM,KAAK,eAAe,GAAG,CAAC;AAAA;AAAA,cAE9D,YAAY;AAAA,gBACV;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,gBACA;AAAA,cAAA;AAAA,YACF,CACD;AAED,kBAAMC,UAAS,MAAM,SAAS,UAAU,QAAQ,IAAI;AAChDA,gBAAAA,QAAO,WAAW,GAAG;AAEvB,6BAAe,CAAC,QAAQ,KAAK,UAAU,GAAG,KAAK,YAAY,CAAC;AAAA,YAAA,OACvD;AACUA,6BAAAA;AAAAA,YAAA;AAAA,UACjB,OACK;AAEL,kBAAM,aAAa,eAAe,QAAQ,IAAI,UAAU,OAAO,GAAG;AAClE,kBAAM,IAAI;AAAA,cACR,mBAAmB,QAAQ,IAAI,aAAa,UAAU;AAAA,YACxD;AAAA,UAAA;AAAA,QACF;AAIK,eAAA;AAAA,UACL,GAAG,aAAa;AAAA,YACd,CAAC,UAAwB;AAAA,cACvB,OAAO,CAAC,QAAQ,IAAI;AAAA,cACpB,SAAS;AAAA,cACT,SAAS;AAAA,gBACP,OAAO,QAAQ;AAAA,gBACf,MAAM,QAAQ;AAAA,cAAA;AAAA,YAElB;AAAA,UAAA;AAAA,QAEJ;AAAA,MAAA;AAAA,IACF;AAGK,WAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMD,oBAAqC;AACpC,WAAA;AAAA,MACL,OAAO;AAAA,MACP,MAAM,CAAC;AAAA,MACP,SAAS,CAAA;AAAA,IACX;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMF,MAAc,eAAe,UAAmC;AAC9D,UAAM,OAAO,MAAM,UAChB,IAAI,WAAW,EACf,IAAI,SAAS,EACb,IAAI,UAAU,EACd,QAAQ,QAAQ;AAEZ,WAAA;AAAA;AAAA;AAAA,YAGC,OAAO,IAAI,CAAC;AAAA;AAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAQtB,MAAc,UAAU,MAAiC;AAEvD,UAAM,EAAE,OAAA,IAAW,YAAY,IAAI;AACnC,WAAO,OAAO;AAAA,EAAA;AAElB;AC/UO,MAAM,eAA2C;AAAA,EAC9C;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOR,YACE,cACA,cACA,oBACA;AACA,SAAK,eAAe;AACpB,SAAK,eAAe;AACpB,SAAK,qBAAqB;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAS5B,MAAM,UAAU,UAA2C;AACzD,UAAM,gBAAgB,MAAM,KAAK,aAAa,UAAU,QAAQ;AAChE,UAAM,qBAAqC,CAAC;AAC5C,QAAI,eAAoC;AAExC,eAAW,aAAa,eAAe;AACrC,UAAI,cAAc;AAChB,YAAI,KAAK,mBAAmB,cAAc,SAAS,GAAG;AACpD,6BAAmB,KAAK,YAAY;AACrB,yBAAA,KAAK,WAAW,SAAS;AACxC;AAAA,QAAA;AAGA,YAAA,aAAa,QAAQ,UAAU,KAAK,gBACpC,KAAK,sBAAsB,SAAS,GACpC;AACA,6BAAmB,KAAK,YAAY;AACrB,yBAAA,KAAK,WAAW,SAAS;AACxC;AAAA,QAAA;AAEF,qBAAa,WAAW;AAAA,EAAK,UAAU,OAAO;AAC9C,qBAAa,UAAU,KAAK,iBAAiB,cAAc,SAAS;AACpE,qBAAa,QAAQ,KAAK,WAAW,aAAa,OAAO,UAAU,KAAK;AAAA,MAAA,OACnE;AACU,uBAAA,KAAK,WAAW,SAAS;AAAA,MAAA;AAAA,IAC1C;AAGF,QAAI,cAAc;AAChB,yBAAmB,KAAK,YAAY;AAAA,IAAA;AAG/B,WAAA;AAAA,EAAA;AAAA,EAGD,WAAW,OAAmC;AAC7C,WAAA;AAAA,MACL,OAAO,CAAC,GAAG,MAAM,KAAK;AAAA,MACtB,SAAS,MAAM;AAAA,MACf,SAAS;AAAA,QACP,OAAO,MAAM,QAAQ;AAAA,QACrB,MAAM,CAAC,GAAG,MAAM,QAAQ,IAAI;AAAA,MAAA;AAAA,IAEhC;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOM,sBAAsB,OAA8B;AAC1D,WAAO,MAAM,QAAQ,UAAU,KAAK,MAAM,QAAQ,UAAU;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOtD,mBACN,cACA,WACS;AACT,QAAI,CAAC,cAAc;AACV,aAAA;AAAA,IAAA;AAET,WACE,aAAa,QAAQ,SAAS,UAAU,QAAQ,SAAS,KAAK;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAO1D,eAAe,YAAsB,WAA8B;AACzE,QAAI,WAAW,UAAU,UAAU,OAAe,QAAA;AAC3C,WAAA,WAAW,MAAM,CAAC,MAAM,MAAM,SAAS,UAAU,CAAC,CAAC;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWpD,iBACN,cACA,WACyB;AAEnB,UAAA,QAAQ,KAAK,IAAI,aAAa,QAAQ,OAAO,UAAU,QAAQ,KAAK;AAIxE,QAAA,aAAa,QAAQ,UAAU,UAAU,QAAQ,SACjD,aAAa,QAAQ,KAAK,WAAW,UAAU,QAAQ,KAAK,UAC5D,aAAa,QAAQ,KAAK,MAAM,CAAC,GAAG,MAAM,MAAM,UAAU,QAAQ,KAAK,CAAC,CAAC,GACzE;AACA,aAAO,aAAa;AAAA,IAAA;AAIlB,QAAA,KAAK,eAAe,aAAa,QAAQ,MAAM,UAAU,QAAQ,IAAI,GAAG;AACnE,aAAA;AAAA,QACL,MAAM,UAAU,QAAQ;AAAA,QACxB;AAAA,MACF;AAAA,IAAA;AAGE,QAAA,KAAK,eAAe,UAAU,QAAQ,MAAM,aAAa,QAAQ,IAAI,GAAG;AACnE,aAAA;AAAA,QACL,MAAM,aAAa,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,IAAA;AAIF,UAAM,aAAa,KAAK;AAAA,MACtB,aAAa,QAAQ;AAAA,MACrB,UAAU,QAAQ;AAAA,IACpB;AAEO,WAAA;AAAA,MACL,MAAM;AAAA,MACN;AAAA,IACF;AAAA,EAAA;AAAA,EAGM,WACN,cACA,WACsB;AACf,WAAA,CAAC,GAAG,oBAAI,IAAI,CAAC,GAAG,cAAc,GAAG,SAAS,CAAC,CAAC;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAM7C,iBAAiB,OAAiB,OAA2B;AACnE,UAAM,SAAmB,CAAC;AACjB,aAAA,IAAI,GAAG,IAAI,KAAK,IAAI,MAAM,QAAQ,MAAM,MAAM,GAAG,KAAK;AAC7D,UAAI,MAAM,CAAC,MAAM,MAAM,CAAC,GAAG;AAClB,eAAA,KAAK,MAAM,CAAC,CAAC;AAAA,MAAA,OACf;AACL;AAAA,MAAA;AAAA,IACF;AAEK,WAAA;AAAA,EAAA;AAEX;ACzLA,MAAM,kBAAkB,MAAM;AAAA,EAC5B,YACE,SACgB,UAChB;AACA,UAAM,OAAO;AAFG,SAAA,WAAA;AAGX,SAAA,OAAO,KAAK,YAAY;AAAA,EAAA;AAEjC;AAEA,MAAM,6BAA6B,UAAU;AAAA,EAC3C,YACkB,SACA,kBACA,mBAChB;AACA;AAAA,MACE,WAAW,gBAAgB,kBAAkB,OAAO,yBAAyB,kBAAkB,IAAI,CAAC,MAAM,EAAE,OAAO,EAAE,KAAK,IAAI,CAAC;AAAA,MAC/H;AAAA,IACF;AAPgB,SAAA,UAAA;AACA,SAAA,mBAAA;AACA,SAAA,oBAAA;AAAA,EAAA;AAAA,EAQlB,mBAAmB;AACjB,WAAO,KAAK,kBAAkB,KAAK,CAAC,GAAG,MAAMC,gBAAO,QAAQ,EAAE,SAAS,EAAE,OAAO,CAAC,EAAE,CAAC;AAAA,EAAA;AAExF;AAMA,MAAM,6BAA6B,UAAU;AAAA,EAC3C,YACkB,kBACA,cAAwB,IACxC;AACI,QAAA,UAAU,YAAY,gBAAgB;AACtC,QAAA,YAAY,SAAS,GAAG;AAC1B,iBAAW,+BAA+B,YAAY,KAAK,IAAI,CAAC;AAAA,IAAA;AAIlE,UAAM,SAAS,YAAY;AATX,SAAA,mBAAA;AACA,SAAA,cAAA;AAAA,EAAA;AAUpB;AC/BO,MAAM,kBAAkB;AAAA,EACrB;AAAA,EAER,YAAY,YAAuC;AACjD,SAAK,aAAa;AAAA,EAAA;AAAA,EAGpB,MAAM,QAAQ,SAA+D;AAE3E,UAAM,eAAe,MAAM,KAAK,WAAW,cAAc;AAIzD,UAAM,YAA2B,aAAa,IAAI,CAAC,EAAE,SAAS,gBAAgB;AAAA,MAC5E,MAAM;AAAA,MACN;AAAA;AAAA,IAAA,EACA;AAEF,WAAO,EAAE,UAAU;AAAA,EAAA;AAEvB;ACsBO,MAAM,WAAW;AAAA,EACd;AAAA,EACA;AAAA;AAAA,EAER,YAAY,YAAuC,SAA0B;AAE3E,SAAK,aAAa;AAClB,SAAK,UAAU;AAAA,EAAA;AAAA,EAGjB,MAAM,QAAQ,SAA0D;AAChE,UAAA;AAAA,MACJ;AAAA,MACA;AAAA,MACA;AAAA,MACA,SAAS;AAAA,MACT,oBAAoB;AAAA,IAAA,IAClB;AAIA,QAAA;AACJ,UAAM,sBAAsB;AAExB,QAAA,YAAY,QAAQ,YAAY,QAAW;AAC3B,wBAAA;AAAA,IAAA,OACb;AACC,YAAA,mBAAmB,OAAO,MAAM,OAAO;AAC7C,UAAI,kBAAkB;AACF,0BAAA;AAAA,MACT,WAAA,oBAAoB,KAAK,OAAO,GAAG;AACtC,cAAA,iBAAiB,OAAO,OAAO,OAAO;AAC5C,YAAI,gBAAgB;AAClB,4BAAkB,eAAe;AAAA,QAAA,OAC5B;AACL,gBAAM,IAAI;AAAA,YACR,yCAAyC,OAAO;AAAA,UAClD;AAAA,QAAA;AAAA,MACF,OACK;AACL,cAAM,IAAI;AAAA,UACR,yCAAyC,OAAO;AAAA,QAClD;AAAA,MAAA;AAAA,IACF;AAGF,sBAAkB,gBAAgB,YAAY;AAG9C,UAAM,KAAK,WAAW,mBAAmB,SAAS,eAAe;AAC1D,WAAA;AAAA,MACL,wBAAwB,OAAO,IAAI,mBAAmB,cAAc;AAAA,IACtE;AAGA,UAAM,UAAU,KAAK;AASrB,UAAM,QAAQ,MAAM,QAAQ,WAAW,SAAS,iBAAiB;AAAA,MAC/D;AAAA,MACA;AAAA,MACA,SAAS;AAAA,MACT,OAAO,gBAAgB,SAAS;AAAA,MAChC,iBAAiB,gBAAgB,mBAAmB;AAAA,MACpD,UAAU,gBAAgB,YAAYV;AAAAA,MACtC,UAAU,gBAAgB,YAAYC;AAAAA,MACtC,gBAAgB,gBAAgB,kBAAkB;AAAA,MAClD,cAAc,gBAAgB,gBAAgB;AAAA,MAC9C,YAAY,gBAAgB,cAAc,WAAW;AAAA;AAAA,IAAA,CACtD;AAGD,QAAI,mBAAmB;AACjB,UAAA;AACI,cAAA,QAAQ,qBAAqB,KAAK;AAExC,cAAM,WAAW,MAAM,QAAQ,OAAO,KAAK;AACrC,cAAA,oBAAoB,UAAU,UAAU,gBAAgB;AACvD,eAAA;AAAA,UACL,OAAO,KAAK,yBAAyB,UAAU,MAAM,oBAAoB,iBAAiB;AAAA,QAC5F;AACO,eAAA;AAAA,UACL,cAAc;AAAA,QAChB;AAAA,eACO,OAAO;AACd,eAAO,MAAM,OAAO,KAAK,6BAA6B,KAAK,EAAE;AACvD,cAAA;AAAA,MAAA;AAAA,IACR;AAKF,WAAO,EAAE,MAAM;AAAA,EAAA;AAEnB;AClIO,MAAM,WAAW;AAAA,EACd;AAAA,EAER,YAAY,YAAuC;AACjD,SAAK,aAAa;AAAA,EAAA;AAAA,EAGpB,MAAM,QAAQ,SAAuD;AAC7D,UAAA,EAAE,SAAS,SAAS,OAAO,QAAQ,GAAG,aAAa,UAAU;AAGnE,QAAI,eAAe,CAAC,WAAW,YAAY,WAAW;AAE9C,YAAA,KAAK,WAAW,sBAAsB,OAAO;AAEnD,YAAM,eAAe,MAAM,KAAK,WAAW,cAAc;AACzD,YAAM,cAAc,aAAa,KAAK,CAAC,QAAQ,IAAI,YAAY,OAAO;AACtE,YAAM,mBAAmB,cAAc,YAAY,WAAW,CAAC;AAC/D,YAAM,IAAI;AAAA,QACR;AAAA,QACA;AAAA;AAAA,QACA;AAAA,MACF;AAAA,IAAA;AAIF,UAAM,kBAAkB,WAAW;AAE5B,WAAA;AAAA,MACL,gBAAgB,OAAO,IAAI,eAAe,SAAS,KAAK,GAAG,aAAa,mBAAmB,EAAE;AAAA,IAC/F;AAEI,QAAA;AAEI,YAAA,KAAK,WAAW,sBAAsB,OAAO;AAGnD,UAAI,kBAA6C;AAEjD,UAAI,CAAC,YAAY;AAEf,cAAM,gBAAgB,MAAM,KAAK,WAAW,gBAAgB,SAAS,OAAO;AAE5E,0BAAkB,cAAc;AAAA,MAAA;AAW5B,YAAA,UAAU,MAAM,KAAK,WAAW;AAAA,QACpC;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AACA,aAAO,KAAK,WAAW,QAAQ,MAAM,mBAAmB;AAExD,aAAO,EAAE,QAAQ;AAAA,aACV,OAAO;AACP,aAAA;AAAA,QACL,oBAAoB,iBAAiB,QAAQ,MAAM,UAAU,eAAe;AAAA,MAC9E;AACM,YAAA;AAAA,IAAA;AAAA,EACR;AAEJ;AC9FA,IAAI,cAA6B;AAS1B,SAAS,iBAAyB;AAEvC,MAAI,aAAa;AACR,WAAA;AAAA,EAAA;AAIH,QAAA,kBAAkB,cAAc,YAAY,GAAG;AACjD,MAAA,aAAa,KAAK,QAAQ,eAAe;AAG7C,SAAO,MAAM;AACX,UAAM,kBAAkB,KAAK,KAAK,YAAY,cAAc;AACxD,QAAAU,KAAG,WAAW,eAAe,GAAG;AACpB,oBAAA;AACP,aAAA;AAAA,IAAA;AAGH,UAAA,YAAY,KAAK,QAAQ,UAAU;AAEzC,QAAI,cAAc,YAAY;AACtB,YAAA,IAAI,MAAM,sDAAsD;AAAA,IAAA;AAE3D,iBAAA;AAAA,EAAA;AAEjB;AClCA,MAAM,cAAc;AACpB,MAAM,gBAAgB;AAEf,MAAM,yBAAyB;AAAA,EAC5B;AAAA,EAER,YAAY,eAA8B;AACxC,SAAK,gBAAgB;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvB,MAAc,mBACZ,SACA,SACA,KACA,eAAe,eACf,aAAa,aAMZ;AACD,UAAM,KAAK,IAAI;AACT,UAAA,MAAM,IAAI,SAAS;AACnB,UAAA,QAAQ,IAAI,SAAS;AACrB,UAAA,iCAAiB,IAAY;AACnC,eAAW,IAAI,EAAE;AAGjB,UAAM,SAAS,MAAM,KAAK,cAAc,gBAAgB,SAAS,SAAS,EAAE;AAC5E,QAAI,QAAQ;AACC,iBAAA,IAAI,OAAO,EAAY;AAAA,IAAA;AAI9B,UAAA,oBAAoB,MAAM,KAAK,cAAc;AAAA,MACjD;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,eAAW,OAAO,mBAAmB;AACxB,iBAAA,IAAI,IAAI,EAAY;AAAA,IAAA;AAI3B,UAAA,cAAc,MAAM,KAAK,cAAc;AAAA,MAC3C;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,eAAW,SAAS,aAAa;AACpB,iBAAA,IAAI,MAAM,EAAY;AAAA,IAAA;AAI7B,UAAA,qBAAqB,MAAM,KAAK,cAAc;AAAA,MAClD;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,eAAW,OAAO,oBAAoB;AACzB,iBAAA,IAAI,IAAI,EAAY;AAAA,IAAA;AAGjC,WAAO,EAAE,KAAK,OAAO,IAAI,YAAY,MAAM;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMrC,qBACN,cAMgE;AAC1D,UAAA,6BAAa,IAA+D;AAClF,eAAW,QAAQ,cAAc;AAC/B,UAAI,QAAQ,OAAO,IAAI,KAAK,GAAG;AAC/B,UAAI,CAAC,OAAO;AACV,gBAAQ,EAAE,gBAAgB,oBAAI,IAAO,GAAA,UAAU,KAAK,MAAM;AACnD,eAAA,IAAI,KAAK,KAAK,KAAK;AAAA,MAAA;AAEjB,iBAAA,MAAM,KAAK,YAAY;AAC1B,cAAA,eAAe,IAAI,EAAE;AAAA,MAAA;AAEzB,UAAA,KAAK,QAAQ,MAAM,UAAU;AAC/B,cAAM,WAAW,KAAK;AAAA,MAAA;AAAA,IACxB;AAEK,WAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMT,MAAc,eACZ,SACA,SACA,KACA,gBACA,UAC4B;AACtB,UAAA,MAAM,MAAM,KAAK,cAAc;AACrC,UAAM,OAAO,MAAM,KAAK,cAAc,gBAAgB,SAAS,SAAS,GAAG;AAErE,UAAA,UAAU,KAAK,IAAI,CAAC,MAAM,EAAE,WAAW,EAAE,KAAK,MAAM;AAEnD,WAAA;AAAA,MACL;AAAA,MACA;AAAA,MACA,OAAO;AAAA,IACT;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaF,MAAM,OACJ,SACA,SACA,OACA,OAC8B;AAExB,UAAA,qBAAqB,WAAW,IAAI,YAAY;AAEhD,UAAA,iBAAiB,MAAM,KAAK,cAAc;AAAA,MAC9C;AAAA,MACA;AAAA,MACA;AAAA,MACA,SAAS;AAAA,IACX;AAGM,UAAA,eAAe,MAAM,QAAQ;AAAA,MACjC,eAAe;AAAA,QAAI,CAAC,QAClB,KAAK,mBAAmB,SAAS,mBAAmB,GAAG;AAAA,MAAA;AAAA,IAE3D;AAGM,UAAA,SAAS,KAAK,qBAAqB,YAAY;AAGrD,UAAM,UAA+B,CAAC;AAC3B,eAAA,CAAC,KAAK,EAAE,gBAAgB,UAAU,KAAK,OAAO,WAAW;AAC5D,YAAA,SAAS,MAAM,KAAK;AAAA,QACxB;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AACA,cAAQ,KAAK,MAAM;AAAA,IAAA;AAGd,WAAA;AAAA,EAAA;AAEX;ACnLA,MAAM,mBAAmB,MAAM;AAAA,EAC7B,YACE,SACgB,OAChB;AACA,UAAM,QAAQ,GAAG,OAAO,cAAc,KAAK,KAAK,OAAO;AAFvC,SAAA,QAAA;AAGX,SAAA,OAAO,KAAK,YAAY;AAEvB,UAAA,aACJ,iBAAiB,QAAQ,QAAQ,QAAQ,IAAI,MAAM,OAAO,KAAK,CAAC,IAAI;AACtE,QAAI,YAAY,OAAO;AACrB,WAAK,QAAQ,WAAW;AAAA,IAAA;AAAA,EAC1B;AAEJ;AAEA,MAAM,uBAAuB,WAAW;AAAA,EACtC,YACkB,WACA,gBACA,aAChB;AACA;AAAA,MACE,UAAU,SAAS,cAAc,cAAc,yEACM,WAAW,yCACvB,WAAW;AAAA,IACtD;AARgB,SAAA,YAAA;AACA,SAAA,iBAAA;AACA,SAAA,cAAA;AAAA,EAAA;AAQpB;AAEA,MAAM,wBAAwB,WAAW;AAAC;ACtB1C,MAAM,iBAAiB,KAAK,KAAK,eAAe,GAAG,MAAM,YAAY;AACrE,MAAM,mBAAmB;AAMzB,SAAS,sBAAsB,IAAoB;AACjD,KAAG,KAAK;AAAA,iCACuB,gBAAgB;AAAA;AAAA;AAAA;AAAA,GAI9C;AACH;AAOA,SAAS,qBAAqB,IAA2B;AACvD,QAAM,OAAO,GAAG,QAAQ,kBAAkB,gBAAgB,EAAE;AACtD,QAAA,OAAO,KAAK,IAAI;AACf,SAAA,IAAI,IAAI,KAAK,IAAI,CAAC,QAAQ,IAAI,EAAE,CAAC;AAC1C;AAUO,SAAS,gBAAgB,IAAoB;AAC9C,MAAA;AACF,WAAO,MAAM,iCAAiC;AAC9C,0BAAsB,EAAE;AAClB,UAAA,oBAAoB,qBAAqB,EAAE;AAEjD,QAAI,CAACA,KAAG,WAAW,cAAc,GAAG;AAC5B,YAAA,IAAI,WAAW,gCAAgC;AAAA,IAAA;AAGvD,UAAM,iBAAiBA,KACpB,YAAY,cAAc,EAC1B,OAAO,CAAC,SAAS,KAAK,SAAS,MAAM,CAAC,EACtC,KAAK;AAER,QAAI,eAAe;AACnB,eAAW,YAAY,gBAAgB;AACrC,UAAI,CAAC,kBAAkB,IAAI,QAAQ,GAAG;AAC7B,eAAA,MAAM,uBAAuB,QAAQ,EAAE;AAC9C,cAAM,WAAW,KAAK,KAAK,gBAAgB,QAAQ;AACnD,cAAM,MAAMA,KAAG,aAAa,UAAU,MAAM;AAGtC,cAAA,cAAc,GAAG,YAAY,MAAM;AACvC,aAAG,KAAK,GAAG;AACX,gBAAM,aAAa,GAAG;AAAA,YACpB,eAAe,gBAAgB;AAAA,UACjC;AACA,qBAAW,IAAI,QAAQ;AAAA,QAAA,CACxB;AAEG,YAAA;AACU,sBAAA;AACL,iBAAA,MAAM,mCAAmC,QAAQ,EAAE;AAC1D;AAAA,iBACO,OAAO;AACd,iBAAO,MAAM,8BAA8B,QAAQ,MAAM,KAAK,EAAE;AAEhE,gBAAM,IAAI,WAAW,qBAAqB,QAAQ,MAAM,KAAK,EAAE;AAAA,QAAA;AAAA,MACjE;AAAA,IACF;AAGF,QAAI,eAAe,GAAG;AACb,aAAA,MAAM,WAAW,YAAY,oBAAoB;AAAA,IAAA,OACnD;AACL,aAAO,MAAM,gCAAgC;AAAA,IAAA;AAAA,WAExC,OAAO;AAEd,QAAI,iBAAiB,YAAY;AACzB,YAAA;AAAA,IAAA;AAEF,UAAA,IAAI,WAAW,mCAAmC,KAAK;AAAA,EAAA;AAEjE;AC/FO,MAAM,mBAAmB;AAyBzB,SAAS,wBAAwB,KAAiB;AAChD,SAAA;AAAA,IACL,IAAI,IAAI;AAAA,IACR,aAAa,IAAI;AAAA,IACjB,UAAU,KAAK,MAAM,IAAI,QAAQ;AAAA,EACnC;AACF;ACDO,MAAM,cAAc;AAAA,EACR;AAAA,EACT;AAAA,EACS,cAAsB;AAAA,EAC/B;AAAA,EACA;AAAA;AAAA;AAAA;AAAA,EAiBA,aAAa,SAAkB,SAAkB,IAAI,IAAY;AACvE,QAAI,MAAM;AACV,QAAI,YAAY,QAAW;AACzB,aAAO,KAAK,IAAI;AAAA,IAAA;AAElB,QAAI,YAAY,QAAW;AACzB,aAAO,KAAK,IAAI;AAAA,IAAA;AAEX,WAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMD,YAAY,SAA4C;AAExD,UAAA,+BAAe,IAAoB;AACnC,UAAA,+BAAe,IAAoB;AAItC,YAAA,OAAO,CAAC,MAAM,EAAE,cAAc,MAAS,EACvC,KAAK,CAAC,GAAG,OAAO,EAAE,aAAa,MAAM,EAAE,aAAa,EAAE,EACtD,QAAQ,CAAC,QAAQ,UAAU;AAC1B,eAAS,IAAI,OAAO,OAAO,EAAE,GAAG,QAAQ,CAAC;AAAA,IAAA,CAC1C;AAIA,YAAA,OAAO,CAAC,MAAM,EAAE,cAAc,MAAS,EACvC,KAAK,CAAC,GAAG,OAAO,EAAE,aAAa,MAAM,EAAE,aAAa,EAAE,EACtD,QAAQ,CAAC,QAAQ,UAAU;AAC1B,eAAS,IAAI,OAAO,OAAO,EAAE,GAAG,QAAQ,CAAC;AAAA,IAAA,CAC1C;AAGI,WAAA,QAAQ,IAAI,CAAC,YAAY;AAAA,MAC9B,GAAG;AAAA,MACH,UAAU,SAAS,IAAI,OAAO,OAAO,EAAE,CAAC;AAAA,MACxC,UAAU,SAAS,IAAI,OAAO,OAAO,EAAE,CAAC;AAAA,MACxC,WAAW,KAAK;AAAA,QACd,SAAS,IAAI,OAAO,OAAO,EAAE,CAAC;AAAA,QAC9B,SAAS,IAAI,OAAO,OAAO,EAAE,CAAC;AAAA,MAAA;AAAA,IAChC,EACA;AAAA,EAAA;AAAA,EAGJ,YAAY,QAAgB;AAC1B,QAAI,CAAC,QAAQ;AACL,YAAA,IAAI,WAAW,gCAAgC;AAAA,IAAA;AAIlD,SAAA,KAAK,IAAI,SAAS,MAAM;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMvB,oBAA0B;AAChC,UAAM,aAAa;AAAA,MACjB,SAAS,KAAK,GAAG,QAAQ,sCAAsC;AAAA,MAC/D,gBAAgB,KAAK,GAAG;AAAA,QACtB;AAAA;AAAA,MACF;AAAA,MACA,iBAAiB,KAAK,GAAG;AAAA,QACvB;AAAA,MACF;AAAA,MACA,iBAAiB,KAAK,GAAG;AAAA,QACvB;AAAA,MACF;AAAA,MACA,eAAe,KAAK,GAAG;AAAA,QACrB;AAAA,MACF;AAAA,MACA,aAAa,KAAK,GAAG;AAAA,QACnB;AAAA,MACF;AAAA,MACA,sBAAsB,KAAK,GAAG;AAAA,QAC5B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MASF;AAAA,MACA,gBAAgB,KAAK,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAU/B;AAAA,MACD,sBAAsB,KAAK,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OASrC;AAAA,MACD,uBAAuB,KAAK,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAStC;AAAA,MACD,gBAAgB,KAAK,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAS/B;AAAA,IACH;AACA,SAAK,aAAa;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOZ,UAAU,QAA4B;AACxC,QAAA,OAAO,SAAS,KAAK,aAAa;AACpC,YAAM,IAAI;AAAA,QACR,oBAAoB,OAAO,MAAM,+BAA+B,KAAK,WAAW;AAAA,MAClF;AAAA,IAAA;AAEE,QAAA,OAAO,WAAW,KAAK,aAAa;AAC/B,aAAA;AAAA,IAAA;AAET,WAAO,CAAC,GAAG,QAAQ,GAAG,IAAI,MAAM,KAAK,cAAc,OAAO,MAAM,EAAE,KAAK,CAAC,CAAC;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgB3E,MAAc,uBAAsC;AAC5C,UAAA,YAAY,QAAQ,IAAI,4BAA4B;AAG1D,UAAM,EAAE,qBAAA,IAAyB,MAAM,OAAO,gCAA+B;AACxE,SAAA,aAAa,qBAAqB,SAAS;AAGhD,UAAM,aAAa,MAAM,KAAK,WAAW,WAAW,MAAM;AAC1D,SAAK,iBAAiB,WAAW;AAE7B,QAAA,KAAK,iBAAiB,KAAK,aAAa;AAC1C,YAAM,IAAI,eAAe,WAAW,KAAK,gBAAgB,KAAK,WAAW;AAAA,IAAA;AAAA,EAC3E;AAAA;AAAA;AAAA;AAAA;AAAA,EAOM,eAAe,OAAuB;AAE5C,UAAM,gBAAgB,MAAM,QAAQ,MAAM,IAAI;AAE9C,WAAO,IAAI,aAAa;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAM1B,MAAM,aAA4B;AAC5B,QAAA;AAEQ,gBAAA,KAAK,KAAK,EAAE;AAGtB,sBAAgB,KAAK,EAAE;AAGvB,WAAK,kBAAkB;AAGvB,YAAM,KAAK,qBAAqB;AAAA,aACzB,OAAO;AAEd,UAAI,iBAAiB,YAAY;AACzB,cAAA;AAAA,MAAA;AAEF,YAAA,IAAI,gBAAgB,4CAA4C,KAAK;AAAA,IAAA;AAAA,EAC7E;AAAA;AAAA;AAAA;AAAA,EAMF,MAAM,WAA0B;AAC9B,SAAK,GAAG,MAAM;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMhB,MAAM,oBAAoB,SAAoC;AACxD,QAAA;AACF,YAAM,OAAO,KAAK,WAAW,cAAc,IAAI,QAAQ,aAAa;AAGpE,aAAO,KAAK,IAAI,CAAC,QAAQ,IAAI,OAAO;AAAA,aAC7B,OAAO;AACR,YAAA,IAAI,gBAAgB,4BAA4B,KAAK;AAAA,IAAA;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA,EAMF,MAAM,oBAAoB,SAAiB,SAAmC;AACxE,QAAA;AACI,YAAA,SAAS,KAAK,WAAW,YAAY;AAAA,QACzC,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,MACtB;AACA,aAAO,WAAW;AAAA,aACX,OAAO;AACR,YAAA,IAAI,gBAAgB,sCAAsC,KAAK;AAAA,IAAA;AAAA,EACvE;AAAA;AAAA;AAAA;AAAA,EAMF,MAAM,uBAAsE;AACtE,QAAA;AAUF,YAAM,OAAO,KAAK,WAAW,qBAAqB,IAAI;AAChD,YAAA,iCAAiB,IAAqC;AAE5D,iBAAW,OAAO,MAAM;AAEtB,cAAM,UAAU,IAAI;AACpB,YAAI,CAAC,WAAW,IAAI,OAAO,GAAG;AACjB,qBAAA,IAAI,SAAS,EAAE;AAAA,QAAA;AAItB,cAAA,eAAe,IAAI,YAAY,IAAI,KAAK,IAAI,SAAS,EAAE,YAAA,IAAgB;AAElE,mBAAA,IAAI,OAAO,GAAG,KAAK;AAAA,UAC5B,SAAS,IAAI;AAAA,UACb,eAAe,IAAI;AAAA,UACnB,gBAAgB,IAAI;AAAA,UACpB,WAAW;AAAA,QAAA,CACZ;AAAA,MAAA;AAIQ,iBAAA,YAAY,WAAW,UAAU;AACjC,iBAAA,KAAK,CAAC,GAAG,MAAM;AACtB,cAAI,EAAE,YAAY,MAAM,EAAE,YAAY,IAAI;AACjC,mBAAA;AAAA,UAAA;AAET,cAAI,EAAE,YAAY,MAAM,EAAE,YAAY,IAAI;AACjC,mBAAA;AAAA,UAAA;AAET,cAAI,EAAE,YAAY,MAAM,EAAE,YAAY,IAAI;AACjC,mBAAA;AAAA,UAAA;AAGT,iBAAOD,gBAAO,QAAQ,EAAE,SAAS,EAAE,OAAO;AAAA,QAAA,CAC3C;AAAA,MAAA;AAGI,aAAA;AAAA,aACA,OAAO;AACR,YAAA,IAAI,gBAAgB,oCAAoC,KAAK;AAAA,IAAA;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA,EAOF,MAAM,aACJ,SACA,SACA,WACe;AACX,QAAA;AAEF,YAAM,QAAQ,UAAU,IAAI,CAAC,QAAQ;AACnC,cAAM,SAAS,UAAU,IAAI,SAAS,KAAK;AAAA,OAAkB,IAAI,SAAS,GAAG;AAAA,QAAiB,IAAI,SAAS,KAAK,KAAK,KAAK,CAAC;AAAA;AAC3H,eAAO,GAAG,MAAM,GAAG,IAAI,WAAW;AAAA,MAAA,CACnC;AACD,YAAM,gBAAgB,MAAM,KAAK,WAAW,eAAe,KAAK;AAC1D,YAAA,mBAAmB,cAAc,IAAI,CAAC,WAAW,KAAK,UAAU,MAAM,CAAC;AAG7E,YAAM,cAAc,KAAK,GAAG,YAAY,CAAC,SAA2B;AAClE,iBAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AAC9B,gBAAA,MAAM,KAAK,CAAC;AACZ,gBAAA,MAAM,IAAI,SAAS;AACrB,cAAA,CAAC,OAAO,OAAO,QAAQ,YAAY,CAAC,IAAI,QAAQ;AAC5C,kBAAA,IAAI,WAAW,4CAA4C;AAAA,UAAA;AAI7D,gBAAA,SAAS,KAAK,WAAW,eAAe;AAAA,YAC5C,QAAQ,YAAY;AAAA,YACpB,QAAQ,YAAY;AAAA,YACpB;AAAA,YACA,IAAI;AAAA,YACJ,KAAK,UAAU,IAAI,QAAQ;AAAA,YAC3B;AAAA,aACA,oBAAI,KAAK,GAAE,YAAY;AAAA;AAAA,UACzB;AACA,gBAAM,QAAQ,OAAO;AAGrB,eAAK,WAAW,gBAAgB;AAAA,YAC9B,OAAO,KAAK;AAAA,YACZ,QAAQ,YAAY;AAAA,YACpB,QAAQ,YAAY;AAAA,YACpB,KAAK,UAAU,iBAAiB,CAAC,CAAC;AAAA,UACpC;AAAA,QAAA;AAAA,MACF,CACD;AAED,kBAAY,SAAS;AAAA,aACd,OAAO;AACR,YAAA,IAAI,gBAAgB,oCAAoC,KAAK;AAAA,IAAA;AAAA,EACrE;AAAA;AAAA;AAAA;AAAA;AAAA,EAOF,MAAM,gBAAgB,SAAiB,SAAkC;AACnE,QAAA;AACI,YAAA,SAAS,KAAK,WAAW,gBAAgB;AAAA,QAC7C,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,MACtB;AACA,aAAO,OAAO;AAAA,aACP,OAAO;AACR,YAAA,IAAI,gBAAgB,8BAA8B,KAAK;AAAA,IAAA;AAAA,EAC/D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQF,MAAM,QAAQ,IAAsC;AAC9C,QAAA;AACF,YAAM,MAAM,KAAK,WAAW,QAAQ,IAAI,EAAE;AAC1C,UAAI,CAAC,KAAK;AACD,eAAA;AAAA,MAAA;AAGT,aAAO,wBAAwB,GAAG;AAAA,aAC3B,OAAO;AACd,YAAM,IAAI,gBAAgB,gCAAgC,EAAE,IAAI,KAAK;AAAA,IAAA;AAAA,EACvE;AAAA;AAAA;AAAA;AAAA;AAAA,EAOF,MAAM,cACJ,SACA,SACA,OACA,OACqB;AACjB,QAAA;AACF,YAAM,eAAe,MAAM,KAAK,WAAW,WAAW,KAAK;AACrD,YAAA,YAAY,KAAK,UAAU,YAAY;AACvC,YAAA,WAAW,KAAK,eAAe,KAAK;AAEpC,YAAA,OAAO,KAAK,GAAG,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,OAkC5B;AAED,YAAM,aAAa,KAAK;AAAA,QACtB,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,QACpB,KAAK,UAAU,SAAS;AAAA,QACxB;AAAA,QACA,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,QACpB;AAAA;AAAA,QACA;AAAA,MACF;AAGM,YAAA,gBAAgB,KAAK,YAAY,UAAU;AAGjD,YAAM,aAAa,cAChB,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,SAAS,EACxC,MAAM,GAAG,KAAK;AAEV,aAAA,WAAW,IAAI,CAAC,SAAS;AAAA,QAC9B,GAAG,wBAAwB,GAAG;AAAA,QAC9B,UAAU;AAAA,UACR,GAAG,KAAK,MAAM,IAAI,QAAQ;AAAA,UAC1B,OAAO,IAAI;AAAA,UACX,UAAU,IAAI;AAAA,UACd,UAAU,IAAI;AAAA,QAAA;AAAA,MAChB,EACA;AAAA,aACK,OAAO;AACd,YAAM,IAAI;AAAA,QACR,mDAAmD,KAAK;AAAA,QACxD;AAAA,MACF;AAAA,IAAA;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAMF,MAAM,gBACJ,SACA,SACA,IACA,OACqB;AACjB,QAAA;AACF,YAAM,SAAS,MAAM,KAAK,QAAQ,EAAE;AACpC,UAAI,CAAC,QAAQ;AACX,eAAO,CAAC;AAAA,MAAA;AAGV,YAAM,aAAc,OAAO,SAA8B,QAAQ,CAAC;AAC5D,YAAA,YAAa,OAAO,SAA8B;AAElD,YAAA,SAAS,KAAK,WAAW,eAAe;AAAA,QAC5C,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,QACpB;AAAA,QACA,WAAW,SAAS;AAAA,QACpB,KAAK,UAAU,UAAU;AAAA,QACzB;AAAA,QACA;AAAA,MACF;AAEA,aAAO,OAAO,IAAI,CAAC,QAAQ,wBAAwB,GAAG,CAAC;AAAA,aAChD,OAAO;AACd,YAAM,IAAI,gBAAgB,sCAAsC,EAAE,IAAI,KAAK;AAAA,IAAA;AAAA,EAC7E;AAAA;AAAA;AAAA;AAAA,EAMF,MAAM,2BACJ,SACA,SACA,IACA,OACqB;AACjB,QAAA;AACF,YAAM,YAAY,MAAM,KAAK,QAAQ,EAAE;AACvC,UAAI,CAAC,WAAW;AACd,eAAO,CAAC;AAAA,MAAA;AAGV,YAAM,cAAc,UAAU;AAExB,YAAA,SAAS,KAAK,WAAW,qBAAqB;AAAA,QAClD,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,QACpB,YAAY;AAAA,QACZ;AAAA,QACA,KAAK,UAAU,YAAY,IAAI;AAAA,QAC/B;AAAA,MACF;AAEO,aAAA,OAAO,UAAU,IAAI,CAAC,QAAQ,wBAAwB,GAAG,CAAC;AAAA,aAC1D,OAAO;AACd,YAAM,IAAI;AAAA,QACR,kDAAkD,EAAE;AAAA,QACpD;AAAA,MACF;AAAA,IAAA;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAMF,MAAM,4BACJ,SACA,SACA,IACA,OACqB;AACjB,QAAA;AACF,YAAM,YAAY,MAAM,KAAK,QAAQ,EAAE;AACvC,UAAI,CAAC,WAAW;AACd,eAAO,CAAC;AAAA,MAAA;AAGV,YAAM,cAAc,UAAU;AAExB,YAAA,SAAS,KAAK,WAAW,sBAAsB;AAAA,QACnD,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,QACpB,YAAY;AAAA,QACZ;AAAA,QACA,KAAK,UAAU,YAAY,IAAI;AAAA,QAC/B;AAAA,MACF;AAEA,aAAO,OAAO,IAAI,CAAC,QAAQ,wBAAwB,GAAG,CAAC;AAAA,aAChD,OAAO;AACd,YAAM,IAAI;AAAA,QACR,mDAAmD,EAAE;AAAA,QACrD;AAAA,MACF;AAAA,IAAA;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAMF,MAAM,gBACJ,SACA,SACA,IAC0B;AACtB,QAAA;AACF,YAAM,QAAQ,MAAM,KAAK,QAAQ,EAAE;AACnC,UAAI,CAAC,OAAO;AACH,eAAA;AAAA,MAAA;AAGT,YAAM,gBAAgB,MAAM;AACtB,YAAAJ,QAAO,cAAc,QAAQ,CAAC;AACpC,YAAM,aAAaA,MAAK,MAAM,GAAG,EAAE;AAE/B,UAAA,WAAW,WAAW,GAAG;AACpB,eAAA;AAAA,MAAA;AAGH,YAAA,SAAS,KAAK,WAAW,eAAe;AAAA,QAC5C,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,QACpB,cAAc;AAAA,QACd,KAAK,UAAU,UAAU;AAAA,QACzB;AAAA,MACF;AAEA,UAAI,CAAC,QAAQ;AACJ,eAAA;AAAA,MAAA;AAGT,aAAO,wBAAwB,MAAM;AAAA,aAC9B,OAAO;AACd,YAAM,IAAI,gBAAgB,sCAAsC,EAAE,IAAI,KAAK;AAAA,IAAA;AAAA,EAC7E;AAAA;AAAA;AAAA;AAAA;AAAA,EAOF,MAAM,gBACJ,SACA,SACA,KACqB;AACrB,QAAI,CAAC,IAAI,OAAQ,QAAO,CAAC;AACrB,QAAA;AAEF,YAAM,eAAe,IAAI,IAAI,MAAM,GAAG,EAAE,KAAK,GAAG;AAC1C,YAAA,OAAO,KAAK,GAAG;AAAA,QACnB,wEAAwE,YAAY;AAAA,MACtF;AACA,YAAM,OAAO,KAAK;AAAA,QAChB,QAAQ,YAAY;AAAA,QACpB,QAAQ,YAAY;AAAA,QACpB,GAAG;AAAA,MACL;AACA,aAAO,KAAK,IAAI,CAAC,QAAQ,wBAAwB,GAAG,CAAC;AAAA,aAC9C,OAAO;AACR,YAAA,IAAI,gBAAgB,oCAAoC,KAAK;AAAA,IAAA;AAAA,EACrE;AAEJ;ACxqBO,MAAM,0BAA0B;AAAA,EACpB;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMT,iBAAiB,SAAiC;AAChD,YAAA,WAAW,IAAI,YAAY;AAAA,EAAA;AAAA,EAGrC,cAAc;AACR,QAAA;AACA,QAAA;AAGE,UAAA,eAAe,QAAQ,IAAI;AACjC,QAAI,cAAc;AACR,cAAA;AACC,eAAA,KAAK,KAAK,OAAO,cAAc;AACjC,aAAA,MAAM,yDAAyD,KAAK,EAAE;AAAA,IAAA,OACxE;AAEL,YAAMM,eAAc,eAAe;AACnC,YAAM,WAAW,KAAK,KAAKA,cAAa,QAAQ;AAChD,YAAM,YAAY,KAAK,KAAK,UAAU,cAAc;AAC9C,YAAA,cAAcD,KAAG,WAAW,SAAS;AAE3C,UAAI,aAAa;AACN,iBAAA;AACD,gBAAA;AACD,eAAA,MAAM,kCAAkC,MAAM,EAAE;AAAA,MAAA,OAClD;AAEL,cAAM,gBAAgB,SAAS,mBAAmB,EAAE,QAAQ,IAAI;AAChE,gBAAQ,cAAc;AACb,iBAAA,KAAK,KAAK,OAAO,cAAc;AACjC,eAAA,MAAM,yCAAyC,KAAK,EAAE;AAAA,MAAA;AAAA,IAC/D;AAIE,QAAA;AACFA,WAAG,UAAU,OAAO,EAAE,WAAW,MAAM;AAAA,aAChC,OAAO;AAGd,aAAO,MAAM,0CAA0C,KAAK,KAAK,KAAK,EAAE;AAAA,IAAA;AAGrE,SAAA,QAAQ,IAAI,cAAc,MAAM;AACrC,SAAK,oBAAoB,IAAI,yBAAyB,KAAK,KAAK;AAEhE,UAAM,mBAAmB,IAAI;AAAA,MAC3B;AAAA,MACA;AAAA,IACF;AACA,UAAM,iBAAiB,IAAI;AAAA,MACzB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,SAAK,WAAW;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAMlB,MAAM,aAA4B;AAC1B,UAAA,KAAK,MAAM,WAAW;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAO9B,MAAM,WAA0B;AAC9B,WAAO,KAAK,gCAAgC;AACtC,UAAA,KAAK,MAAM,SAAS;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAS5B,MAAM,sBAAsB,SAAgC;AACnD,WAAA,KAAK,uCAAuC,OAAO,EAAE;AACtD,UAAA,oBAAoB,QAAQ,YAAY;AAG9C,UAAM,WAAW,MAAM,KAAK,aAAa,iBAAiB;AAC1D,UAAM,iBAAiB,MAAM,KAAK,OAAO,mBAAmB,EAAE;AAE9D,QAAI,SAAS,WAAW,KAAK,CAAC,gBAAgB;AACrC,aAAA,KAAK,eAAe,OAAO,cAAc;AAG1C,YAAA,eAAe,MAAM,KAAK,cAAc;AAC9C,YAAM,eAAe,aAAa,IAAI,CAAC,QAAQ,IAAI,OAAO;AAE1D,UAAI,cAAwB,CAAC;AACzB,UAAA,aAAa,SAAS,GAAG;AACrB,cAAA,OAAO,IAAI,KAAK,cAAc;AAAA;AAAA;AAAA;AAAA,UAIlC,WAAW;AAAA;AAAA,QAAA,CACZ;AACK,cAAA,UAAU,KAAK,OAAO,iBAAiB;AAE/B,sBAAA,QAAQ,MAAM,GAAG,CAAC,EAAE,IAAI,CAAC,WAAW,OAAO,IAAI;AAC7D,eAAO,KAAK,yBAAyB,YAAY,KAAK,IAAI,CAAC,EAAE;AAAA,MAAA;AAGzD,YAAA,IAAI,qBAAqB,SAAS,WAAW;AAAA,IAAA;AAG9C,WAAA,KAAK,cAAc,OAAO,uBAAuB;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA,EAM1D,MAAM,aAAa,SAA4C;AAC7D,UAAM,WAAW,MAAM,KAAK,MAAM,oBAAoB,OAAO;AAC7D,WAAO,SAAS,OAAO,CAAC,MAAMD,gBAAO,MAAM,CAAC,CAAC,EAAE,IAAI,CAAC,aAAa,EAAE,QAAU,EAAA;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAO/E,MAAM,OAAO,SAAiB,SAA2C;AACjE,UAAA,oBAAoB,KAAK,iBAAiB,OAAO;AACvD,WAAO,KAAK,MAAM,oBAAoB,SAAS,iBAAiB;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBlE,MAAM,gBACJ,SACA,eAC4B;AACrB,WAAA;AAAA,MACL,+BAA+B,OAAO,GAAG,gBAAgB,IAAI,aAAa,KAAK,EAAE;AAAA,IACnF;AAGA,UAAM,iBAAiB,MAAM,KAAK,MAAM,oBAAoB,SAAS,EAAE;AACvE,UAAM,sBAAsB,MAAM,KAAK,aAAa,OAAO;AAEvD,QAAA,oBAAoB,WAAW,GAAG;AACpC,UAAI,gBAAgB;AACX,eAAA,KAAK,sCAAsC,OAAO,EAAE;AAC3D,eAAO,EAAE,WAAW,MAAM,gBAAgB,KAAK;AAAA,MAAA;AAG1C,aAAA,KAAK,kCAAkC,OAAO,EAAE;AAEvD,YAAM,oBAAoB,MAAM,KAAK,MAAM,qBAAqB;AAChE,YAAM,iBAAiB,kBAAkB,IAAI,OAAO,KAAK,CAAC;AAC1D,YAAM,IAAI,qBAAqB,SAAS,iBAAiB,IAAI,cAAc;AAAA,IAAA;AAG7E,UAAM,iBAAiB,oBAAoB,IAAI,CAAC,MAAM,EAAE,OAAO;AAC/D,QAAI,YAA2B;AAE3B,QAAA,CAAC,iBAAiB,kBAAkB,UAAU;AACpC,kBAAAA,gBAAO,cAAc,gBAAgB,GAAG;AAAA,IAAA,OAC/C;AACL,YAAM,eAAe;AACrB,UAAI,CAAC,aAAa,KAAK,aAAa,GAAG;AAC9B,eAAA,KAAK,qCAAqC,aAAa,EAAE;AAAA,MAAA,OAE3D;AAEL,YAAI,QAAQ;AACZ,YAAI,CAACA,gBAAO,WAAW,aAAa,GAAG;AAErC,kBAAQ,IAAI,aAAa;AAAA,QAChB,WAAAA,gBAAO,MAAM,aAAa,GAAG;AAE9B,kBAAA,GAAG,KAAK,SAAS,aAAa;AAAA,QAAA;AAG5B,oBAAAA,gBAAO,cAAc,gBAAgB,KAAK;AAAA,MAAA;AAAA,IACxD;AAGF,QAAI,WAAW;AACN,aAAA;AAAA,QACL,8BAA8B,SAAS,QAAQ,OAAO,IAAI,aAAa;AAAA,MACzE;AAAA,IAAA,OACK;AACL,aAAO,KAAK,2CAA2C,OAAO,IAAI,aAAa,EAAE;AAAA,IAAA;AAM/E,QAAA,CAAC,aAAa,CAAC,gBAAgB;AAEjC,YAAM,oBAAoB,MAAM,KAAK,MAAM,qBAAqB;AAChE,YAAM,iBAAiB,kBAAkB,IAAI,OAAO,KAAK,CAAC;AAC1D,YAAM,IAAI,qBAAqB,SAAS,iBAAiB,IAAI,cAAc;AAAA,IAAA;AAGtE,WAAA,EAAE,WAAW,eAAe;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOrC,MAAM,mBAAmB,SAAiB,SAAwC;AAC1E,UAAA,oBAAoB,KAAK,iBAAiB,OAAO;AAChD,WAAA;AAAA,MACL,mCAAmC,OAAO,IAAI,qBAAqB,cAAc;AAAA,IACnF;AACA,UAAM,QAAQ,MAAM,KAAK,MAAM,gBAAgB,SAAS,iBAAiB;AAClE,WAAA,KAAK,cAAc,KAAK,YAAY;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAS7C,MAAM,YACJ,SACA,SACA,UACe;AACT,UAAA,oBAAoB,KAAK,iBAAiB,OAAO;AACjD,UAAA,MAAM,SAAS,SAAS;AAC1B,QAAA,CAAC,OAAO,OAAO,QAAQ,YAAY,CAAC,IAAI,QAAQ;AAC5C,YAAA,IAAI,WAAW,4CAA4C;AAAA,IAAA;AAGnE,WAAO,KAAK,uBAAuB,SAAS,SAAS,KAAK,EAAE;AAE5D,QAAI,CAAC,SAAS,YAAY,QAAQ;AAC1B,YAAA,IAAI,MAAM,kCAAkC;AAAA,IAAA;AAIpD,UAAM,SAAS,MAAM,KAAK,SAAS,UAAU,SAAS,WAAW;AAGjE,UAAM,YAAY,OAAO,IAAI,CAAC,WAAyB;AAAA,MACrD,aAAa,MAAM;AAAA,MACnB,UAAU;AAAA,QACR,GAAG,SAAS;AAAA,QACZ,OAAO,MAAM,QAAQ;AAAA,QACrB,MAAM,MAAM,QAAQ;AAAA,MAAA;AAAA,IACtB,EACA;AACF,WAAO,KAAK,0BAA0B,UAAU,MAAM,SAAS;AAG/D,UAAM,KAAK,MAAM,aAAa,SAAS,mBAAmB,SAAS;AAAA,EAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQrE,MAAM,YACJ,SACA,SACA,OACA,QAAQ,GACsB;AACxB,UAAA,oBAAoB,KAAK,iBAAiB,OAAO;AACvD,WAAO,KAAK,kBAAkB,OAAO,SAAS,mBAAmB,OAAO,KAAK;AAAA,EAAA;AAAA,EAG/E,MAAM,gBAEJ;AAEA,UAAM,aAAa,MAAM,KAAK,MAAM,qBAAqB;AAGlD,WAAA,MAAM,KAAK,WAAW,QAAQ,CAAC,EAAE,IAAI,CAAC,CAAC,SAAS,QAAQ,OAAO;AAAA,MACpE;AAAA,MACA;AAAA;AAAA,IAAA,EACA;AAAA,EAAA;AAEN;"}