@heripo/document-processor 0.1.4 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.cjs +189 -30
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +51 -8
- package/dist/index.d.ts +51 -8
- package/dist/index.js +189 -30
- package/dist/index.js.map +1 -1
- package/package.json +9 -9
package/dist/index.cjs.map
CHANGED
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"sources":["../src/index.ts","../../shared/src/utils/batch-processor.ts","../../shared/src/utils/spawn-utils.ts","../../shared/src/utils/llm-caller.ts","../../shared/src/utils/llm-token-usage-aggregator.ts","../src/utils/ref-resolver.ts","../src/utils/id-generator.ts","../src/utils/text-cleaner.ts","../src/utils/markdown-converter.ts","../src/converters/chapter-converter.ts","../src/extractors/toc-extract-error.ts","../src/extractors/toc-validator.ts","../src/extractors/toc-finder.ts","../src/extractors/toc-extractor.ts","../src/core/base-llm-component.ts","../src/core/text-llm-component.ts","../src/extractors/vision-toc-extractor.ts","../src/core/vision-llm-component.ts","../src/parsers/caption-parser.ts","../src/parsers/page-range-parse-error.ts","../src/parsers/page-range-parser.ts","../src/validators/base-validator.ts","../src/validators/toc-content-validator.ts","../src/validators/caption-validator.ts","../src/document-processor.ts"],"sourcesContent":["/**\n * @heripo/document-processor\n *\n * Document preprocessing package that converts DoclingDocument to ProcessedDocument.\n *\n * ## Key Features\n *\n * - TOC extraction and structuring (LLM-based)\n * - Page range mapping (Vision LLM)\n * - Text cleaning and sentence merging (lightweight LLM)\n * - Caption parsing (lightweight LLM)\n * - Chapter tree construction\n * - Image/table conversion\n *\n * @packageDocumentation\n */\n\nexport { DocumentProcessor } from './document-processor';\nexport { BaseLLMComponent, TextLLMComponent, VisionLLMComponent } from './core';\nexport type {\n BaseLLMComponentOptions,\n VisionLLMComponentOptions,\n ImageContent,\n} from './core';\nexport type { DocumentProcessorOptions } from './document-processor';\nexport type { TocEntry, TocAreaResult, PageSizeGroup } from './types';\nexport {\n CaptionParser,\n CaptionParseError,\n PageRangeParser,\n PagePattern,\n PageRangeParseError,\n} from './parsers';\nexport type { CaptionParserOptions } from './parsers';\nexport {\n TocFinder,\n TocExtractor,\n TocExtractError,\n TocNotFoundError,\n TocParseError,\n TOC_KEYWORDS,\n CONTINUATION_MARKERS,\n PAGE_NUMBER_PATTERN,\n TocEntrySchema,\n TocResponseSchema,\n VisionTocExtractor,\n VisionTocExtractionSchema,\n} from './extractors';\nexport type {\n TocFinderOptions,\n TocExtractorOptions,\n TocResponse,\n VisionTocExtractorOptions,\n VisionTocExtractionResult,\n} from './extractors';\nexport {\n BaseValidator,\n TocContentValidator,\n TocContentValidationSchema,\n CaptionValidator,\n CaptionValidationError,\n} from './validators';\nexport type {\n BaseValidatorOptions,\n TocContentValidatorOptions,\n TocContentValidationResult,\n CaptionValidatorOptions,\n} from './validators';\nexport { ChapterConverter } from './converters';\n","/**\n * BatchProcessor - Batch processing utility\n *\n * Provides functionality to split large arrays into batches for parallel processing.\n */\nexport class BatchProcessor {\n /**\n * Splits an array into batches of specified size.\n *\n * @param items - Array to split\n * @param batchSize - Size of each batch\n * @returns Array of batches\n *\n * @example\n * ```typescript\n * const items = [1, 2, 3, 4, 5];\n * const batches = BatchProcessor.createBatches(items, 2);\n * // [[1, 2], [3, 4], [5]]\n * ```\n */\n static createBatches<T>(items: T[], batchSize: number): T[][] {\n const batches: T[][] = [];\n for (let i = 0; i < items.length; i += batchSize) {\n batches.push(items.slice(i, i + batchSize));\n }\n return batches;\n }\n\n /**\n * Splits an array into batches and executes async function in parallel.\n *\n * @param items - Array to process\n * @param batchSize - Size of each batch\n * @param processFn - Async function to process each batch\n * @returns Flattened array of processed results\n *\n * @example\n * ```typescript\n * const texts = ['a', 'b', 'c', 'd', 'e'];\n * const results = await BatchProcessor.processBatch(\n * texts,\n * 2,\n * async (batch) => {\n * return batch.map(t => t.toUpperCase());\n * }\n * );\n * // ['A', 'B', 'C', 'D', 'E']\n * ```\n */\n static async processBatch<T, R>(\n items: T[],\n batchSize: number,\n processFn: (batch: T[]) => Promise<R[]>,\n ): Promise<R[]> {\n const batches = this.createBatches(items, batchSize);\n const results = await Promise.all(batches.map((batch) => processFn(batch)));\n return results.flat();\n }\n\n /**\n * Splits an array into batches and executes sync function in parallel.\n *\n * @param items - Array to process\n * @param batchSize - Size of each batch\n * @param processFn - Sync function to process each batch\n * @returns Flattened array of processed results\n *\n * @example\n * ```typescript\n * const numbers = [1, 2, 3, 4, 5];\n * const results = BatchProcessor.processBatchSync(\n * numbers,\n * 2,\n * (batch) => batch.map(n => n * 2)\n * );\n * // [2, 4, 6, 8, 10]\n * ```\n */\n static processBatchSync<T, R>(\n items: T[],\n batchSize: number,\n processFn: (batch: T[]) => R[],\n ): R[] {\n const batches = this.createBatches(items, batchSize);\n const results = batches.map((batch) => processFn(batch));\n return results.flat();\n }\n}\n","import type { SpawnOptions } from 'node:child_process';\n\nimport { spawn } from 'node:child_process';\n\n/**\n * Result of a spawn operation\n */\nexport interface SpawnResult {\n stdout: string;\n stderr: string;\n code: number;\n}\n\n/**\n * Extended spawn options with output capture control\n */\nexport interface SpawnAsyncOptions extends SpawnOptions {\n /**\n * Whether to capture stdout (default: true)\n */\n captureStdout?: boolean;\n\n /**\n * Whether to capture stderr (default: true)\n */\n captureStderr?: boolean;\n}\n\n/**\n * Execute a command asynchronously and return the result\n *\n * Eliminates the repetitive Promise wrapper pattern used throughout\n * DoclingEnvironment for spawn operations.\n *\n * @param command - The command to execute\n * @param args - Arguments to pass to the command\n * @param options - Spawn options with optional output capture control\n * @returns Promise resolving to stdout, stderr, and exit code\n *\n * @example\n * ```typescript\n * // Simple usage\n * const result = await spawnAsync('python3', ['--version']);\n * console.log(result.stdout); // \"Python 3.12.0\"\n *\n * // With options\n * const result = await spawnAsync('pip', ['install', 'package'], {\n * cwd: '/path/to/venv',\n * captureStderr: true,\n * });\n * ```\n */\nexport function spawnAsync(\n command: string,\n args: string[],\n options: SpawnAsyncOptions = {},\n): Promise<SpawnResult> {\n const {\n captureStdout = true,\n captureStderr = true,\n ...spawnOptions\n } = options;\n\n return new Promise((resolve, reject) => {\n const proc = spawn(command, args, spawnOptions);\n\n let stdout = '';\n let stderr = '';\n\n if (captureStdout && proc.stdout) {\n proc.stdout.on('data', (data) => {\n stdout += data.toString();\n });\n }\n\n if (captureStderr && proc.stderr) {\n proc.stderr.on('data', (data) => {\n stderr += data.toString();\n });\n }\n\n proc.on('close', (code) => {\n resolve({ stdout, stderr, code: code ?? 0 });\n });\n\n proc.on('error', reject);\n });\n}\n","import type { z } from 'zod';\n\nimport { type LanguageModel, Output, generateText } from 'ai';\n\n/**\n * Configuration for LLM API call with retry and fallback support\n */\nexport interface LLMCallConfig<TSchema extends z.ZodType> {\n /**\n * Zod schema for response validation\n */\n schema: TSchema;\n\n /**\n * System prompt for LLM\n */\n systemPrompt: string;\n\n /**\n * User prompt for LLM\n */\n userPrompt: string;\n\n /**\n * Primary model for the call (required)\n */\n primaryModel: LanguageModel;\n\n /**\n * Fallback model for retry after primary model exhausts maxRetries (optional)\n */\n fallbackModel?: LanguageModel;\n\n /**\n * Maximum retry count per model (default: 3)\n */\n maxRetries: number;\n\n /**\n * Temperature for generation (optional, 0-1)\n */\n temperature?: number;\n\n /**\n * Abort signal for cancellation support\n */\n abortSignal?: AbortSignal;\n\n /**\n * Component name for tracking (e.g., 'TocExtractor', 'PageRangeParser')\n */\n component: string;\n\n /**\n * Phase name for tracking (e.g., 'extraction', 'validation', 'sampling')\n */\n phase: string;\n}\n\n/**\n * Configuration for LLM vision call with message format\n */\nexport interface LLMVisionCallConfig<TSchema extends z.ZodType> {\n /**\n * Zod schema for response validation\n */\n schema: TSchema;\n\n /**\n * Messages array for vision LLM (instead of systemPrompt/userPrompt)\n */\n messages: Array<{ role: 'user' | 'assistant'; content: any[] | string }>;\n\n /**\n * Primary model for the call (required)\n */\n primaryModel: LanguageModel;\n\n /**\n * Fallback model for retry after primary model exhausts maxRetries (optional)\n */\n fallbackModel?: LanguageModel;\n\n /**\n * Maximum retry count per model (default: 3)\n */\n maxRetries: number;\n\n /**\n * Temperature for generation (optional, 0-1)\n */\n temperature?: number;\n\n /**\n * Abort signal for cancellation support\n */\n abortSignal?: AbortSignal;\n\n /**\n * Component name for tracking (e.g., 'TocExtractor', 'PageRangeParser')\n */\n component: string;\n\n /**\n * Phase name for tracking (e.g., 'extraction', 'validation', 'sampling')\n */\n phase: string;\n}\n\n/**\n * Token usage information with model tracking\n */\nexport interface ExtendedTokenUsage {\n component: string;\n phase: string;\n model: 'primary' | 'fallback';\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n}\n\n/**\n * Result of LLM call including usage information\n */\nexport interface LLMCallResult<T> {\n output: T;\n usage: ExtendedTokenUsage;\n usedFallback: boolean;\n}\n\n/**\n * Base execution configuration for LLM calls\n */\ninterface ExecutionConfig {\n primaryModel: LanguageModel;\n fallbackModel?: LanguageModel;\n abortSignal?: AbortSignal;\n component: string;\n phase: string;\n}\n\n/**\n * LLMCaller - Centralized LLM API caller with retry and fallback support\n *\n * Wraps AI SDK's generateText with enhanced retry strategy:\n * 1. Try primary model with maxRetries\n * 2. If all attempts fail and fallbackModel provided, try fallback with maxRetries\n * 3. Return usage data with model type indicator\n *\n * @example\n * ```typescript\n * const result = await LLMCaller.call({\n * schema: MyZodSchema,\n * systemPrompt: 'You are a helpful assistant',\n * userPrompt: 'Extract the TOC from this markdown',\n * primaryModel: openai('gpt-5'),\n * fallbackModel: anthropic('claude-opus-4-5'),\n * maxRetries: 3,\n * component: 'TocExtractor',\n * phase: 'extraction',\n * });\n *\n * console.log(result.output); // Parsed result\n * console.log(result.usage); // Token usage with model info\n * console.log(result.usedFallback); // Whether fallback was used\n * ```\n */\nexport class LLMCaller {\n /**\n * Extract model name from LanguageModel object\n *\n * Attempts to get model ID from various possible fields in the LanguageModel object.\n */\n private static extractModelName(model: LanguageModel): string {\n const modelObj = model as Record<string, unknown>;\n\n // Try common field names\n if (typeof modelObj.modelId === 'string') return modelObj.modelId;\n if (typeof modelObj.id === 'string') return modelObj.id;\n if (typeof modelObj.model === 'string') return modelObj.model;\n if (typeof modelObj.name === 'string') return modelObj.name;\n\n // Fallback: return object representation\n return String(model);\n }\n\n /**\n * Build usage information from response\n */\n private static buildUsage(\n config: ExecutionConfig,\n modelName: string,\n response: {\n usage?: {\n inputTokens?: number;\n outputTokens?: number;\n totalTokens?: number;\n };\n },\n usedFallback: boolean,\n ): ExtendedTokenUsage {\n return {\n component: config.component,\n phase: config.phase,\n model: usedFallback ? 'fallback' : 'primary',\n modelName,\n inputTokens: response.usage?.inputTokens ?? 0,\n outputTokens: response.usage?.outputTokens ?? 0,\n totalTokens: response.usage?.totalTokens ?? 0,\n };\n }\n\n /**\n * Execute LLM call with fallback support\n *\n * Common execution logic for both text and vision calls.\n */\n private static async executeWithFallback<TOutput>(\n config: ExecutionConfig,\n generateFn: (model: LanguageModel) => Promise<{\n output: TOutput;\n usage?: {\n inputTokens?: number;\n outputTokens?: number;\n totalTokens?: number;\n };\n }>,\n ): Promise<LLMCallResult<TOutput>> {\n const primaryModelName = this.extractModelName(config.primaryModel);\n\n // Attempt 1: Try primary model\n try {\n const response = await generateFn(config.primaryModel);\n\n return {\n output: response.output,\n usage: this.buildUsage(config, primaryModelName, response, false),\n usedFallback: false,\n };\n } catch (primaryError) {\n // If aborted, don't try fallback - re-throw immediately\n if (config.abortSignal?.aborted) {\n throw primaryError;\n }\n\n // If no fallback model, throw immediately\n if (!config.fallbackModel) {\n throw primaryError;\n }\n\n // Attempt 2: Try fallback model\n const fallbackModelName = this.extractModelName(config.fallbackModel);\n const response = await generateFn(config.fallbackModel);\n\n return {\n output: response.output,\n usage: this.buildUsage(config, fallbackModelName, response, true),\n usedFallback: true,\n };\n }\n }\n\n /**\n * Call LLM with retry and fallback support\n *\n * Retry Strategy:\n * 1. Try primary model up to maxRetries times\n * 2. If all fail and fallbackModel provided, try fallback up to maxRetries times\n * 3. Throw error if all attempts exhausted\n *\n * @template TOutput - Output type from schema validation\n * @param config - LLM call configuration\n * @returns Result with parsed object and usage information\n * @throws Error if all retry attempts fail\n */\n static async call<TOutput = unknown>(\n config: LLMCallConfig<z.ZodType<TOutput>>,\n ): Promise<LLMCallResult<TOutput>> {\n return this.executeWithFallback(config, (model) =>\n generateText({\n model,\n output: Output.object({\n schema: config.schema,\n }),\n system: config.systemPrompt,\n prompt: config.userPrompt,\n temperature: config.temperature,\n maxRetries: config.maxRetries,\n abortSignal: config.abortSignal,\n }),\n );\n }\n\n /**\n * Call LLM for vision tasks with message format support\n *\n * Same retry and fallback logic as call(), but using message format instead of system/user prompts.\n *\n * @template TOutput - Output type from schema validation\n * @param config - LLM vision call configuration\n * @returns Result with parsed object and usage information\n * @throws Error if all retry attempts fail\n */\n static async callVision<TOutput = unknown>(\n config: LLMVisionCallConfig<z.ZodType<TOutput>>,\n ): Promise<LLMCallResult<TOutput>> {\n return this.executeWithFallback(config, (model) =>\n generateText({\n model,\n output: Output.object({\n schema: config.schema,\n }),\n messages: config.messages,\n temperature: config.temperature,\n maxRetries: config.maxRetries,\n abortSignal: config.abortSignal,\n }),\n );\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\n\nimport type { ExtendedTokenUsage } from './llm-caller';\n\n/**\n * Token usage totals\n */\nexport interface TokenUsage {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n}\n\n/**\n * Format token usage as a human-readable string\n *\n * @param usage - Token usage object with input, output, and total counts\n * @returns Formatted string like \"1500 input, 300 output, 1800 total\"\n */\nfunction formatTokens(usage: TokenUsage): string {\n return `${usage.inputTokens} input, ${usage.outputTokens} output, ${usage.totalTokens} total`;\n}\n\n/**\n * Aggregated token usage for a specific component\n */\ninterface ComponentAggregate {\n component: string;\n phases: Record<\n string,\n {\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }\n >;\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n}\n\n/**\n * LLMTokenUsageAggregator - Aggregates token usage across all LLM calls\n *\n * Unlike LLMTokenUsageTracker which logs immediately after each component,\n * this aggregator collects usage data from all components and logs a comprehensive\n * summary at the end of document processing.\n *\n * Tracks usage by:\n * - Component (TocExtractor, PageRangeParser, etc.)\n * - Phase (extraction, validation, sampling, etc.)\n * - Model (primary vs fallback)\n *\n * @example\n * ```typescript\n * const aggregator = new LLMTokenUsageAggregator();\n *\n * // Track usage from each LLM call\n * aggregator.track({\n * component: 'TocExtractor',\n * phase: 'extraction',\n * model: 'primary',\n * modelName: 'gpt-5',\n * inputTokens: 1500,\n * outputTokens: 300,\n * totalTokens: 1800,\n * });\n *\n * aggregator.track({\n * component: 'PageRangeParser',\n * phase: 'sampling',\n * model: 'fallback',\n * modelName: 'claude-opus-4-5',\n * inputTokens: 2000,\n * outputTokens: 100,\n * totalTokens: 2100,\n * });\n *\n * // Log comprehensive summary\n * aggregator.logSummary(logger);\n * // Outputs:\n * // [DocumentProcessor] Token usage summary:\n * // TocExtractor:\n * // - extraction (primary: gpt-5): 1500 input, 300 output, 1800 total\n * // TocExtractor total: 1500 input, 300 output, 1800 total\n * // PageRangeParser:\n * // - sampling (fallback: claude-opus-4-5): 2000 input, 100 output, 2100 total\n * // PageRangeParser total: 2000 input, 100 output, 2100 total\n * // Grand total: 3500 input, 400 output, 3900 total\n * ```\n */\nexport class LLMTokenUsageAggregator {\n private usage: Record<string, ComponentAggregate> = {};\n\n /**\n * Track token usage from an LLM call\n *\n * @param usage - Extended token usage with component/phase/model information\n */\n track(usage: ExtendedTokenUsage): void {\n // Initialize component if not seen before\n if (!this.usage[usage.component]) {\n this.usage[usage.component] = {\n component: usage.component,\n phases: {},\n total: {\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n },\n };\n }\n\n const component = this.usage[usage.component];\n\n // Initialize phase if not seen before\n if (!component.phases[usage.phase]) {\n component.phases[usage.phase] = {\n total: {\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n },\n };\n }\n\n const phase = component.phases[usage.phase];\n\n // Track by model type\n if (usage.model === 'primary') {\n if (!phase.primary) {\n phase.primary = {\n modelName: usage.modelName,\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n };\n }\n\n phase.primary.inputTokens += usage.inputTokens;\n phase.primary.outputTokens += usage.outputTokens;\n phase.primary.totalTokens += usage.totalTokens;\n } else if (usage.model === 'fallback') {\n if (!phase.fallback) {\n phase.fallback = {\n modelName: usage.modelName,\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n };\n }\n\n phase.fallback.inputTokens += usage.inputTokens;\n phase.fallback.outputTokens += usage.outputTokens;\n phase.fallback.totalTokens += usage.totalTokens;\n }\n\n // Update phase total\n phase.total.inputTokens += usage.inputTokens;\n phase.total.outputTokens += usage.outputTokens;\n phase.total.totalTokens += usage.totalTokens;\n\n // Update component total\n component.total.inputTokens += usage.inputTokens;\n component.total.outputTokens += usage.outputTokens;\n component.total.totalTokens += usage.totalTokens;\n }\n\n /**\n * Get aggregated usage grouped by component\n *\n * @returns Array of component aggregates with phase breakdown\n */\n getByComponent(): ComponentAggregate[] {\n return Object.values(this.usage);\n }\n\n /**\n * Get token usage report in structured JSON format\n *\n * Converts internal usage data to external TokenUsageReport format suitable\n * for serialization and reporting. The report includes component breakdown,\n * phase-level details, and both primary and fallback model usage.\n *\n * @returns Structured token usage report with components and total\n */\n getReport(): {\n components: Array<{\n component: string;\n phases: Array<{\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }>;\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }>;\n total: TokenUsage;\n } {\n const components: Array<{\n component: string;\n phases: Array<{\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }>;\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }> = [];\n\n for (const component of Object.values(this.usage)) {\n const phases: Array<{\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }> = [];\n\n for (const [phaseName, phaseData] of Object.entries(component.phases)) {\n const phaseReport: {\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n } = {\n phase: phaseName,\n total: {\n inputTokens: phaseData.total.inputTokens,\n outputTokens: phaseData.total.outputTokens,\n totalTokens: phaseData.total.totalTokens,\n },\n };\n\n if (phaseData.primary) {\n phaseReport.primary = {\n modelName: phaseData.primary.modelName,\n inputTokens: phaseData.primary.inputTokens,\n outputTokens: phaseData.primary.outputTokens,\n totalTokens: phaseData.primary.totalTokens,\n };\n }\n\n if (phaseData.fallback) {\n phaseReport.fallback = {\n modelName: phaseData.fallback.modelName,\n inputTokens: phaseData.fallback.inputTokens,\n outputTokens: phaseData.fallback.outputTokens,\n totalTokens: phaseData.fallback.totalTokens,\n };\n }\n\n phases.push(phaseReport);\n }\n\n components.push({\n component: component.component,\n phases,\n total: {\n inputTokens: component.total.inputTokens,\n outputTokens: component.total.outputTokens,\n totalTokens: component.total.totalTokens,\n },\n });\n }\n\n const totalUsage = this.getTotalUsage();\n\n return {\n components,\n total: {\n inputTokens: totalUsage.inputTokens,\n outputTokens: totalUsage.outputTokens,\n totalTokens: totalUsage.totalTokens,\n },\n };\n }\n\n /**\n * Get total usage across all components and phases\n *\n * @returns Aggregated token usage totals\n */\n getTotalUsage(): TokenUsage {\n let totalInput = 0;\n let totalOutput = 0;\n let totalTokens = 0;\n\n for (const component of Object.values(this.usage)) {\n totalInput += component.total.inputTokens;\n totalOutput += component.total.outputTokens;\n totalTokens += component.total.totalTokens;\n }\n\n return {\n inputTokens: totalInput,\n outputTokens: totalOutput,\n totalTokens: totalTokens,\n };\n }\n\n /**\n * Log comprehensive token usage summary\n *\n * Outputs usage grouped by component, with phase and model breakdown.\n * Shows primary and fallback token usage separately for each phase.\n * Call this once at the end of document processing.\n *\n * @param logger - Logger instance for output\n */\n logSummary(logger: LoggerMethods): void {\n const components = this.getByComponent();\n\n if (components.length === 0) {\n logger.info('[DocumentProcessor] No token usage to report');\n return;\n }\n\n logger.info('[DocumentProcessor] Token usage summary:');\n logger.info('');\n\n let grandInputTokens = 0;\n let grandOutputTokens = 0;\n let grandTotalTokens = 0;\n let grandPrimaryInputTokens = 0;\n let grandPrimaryOutputTokens = 0;\n let grandPrimaryTotalTokens = 0;\n let grandFallbackInputTokens = 0;\n let grandFallbackOutputTokens = 0;\n let grandFallbackTotalTokens = 0;\n\n for (const component of components) {\n logger.info(`${component.component}:`);\n\n for (const [phase, phaseData] of Object.entries(component.phases)) {\n logger.info(` - ${phase}:`);\n\n // Show primary model usage\n if (phaseData.primary) {\n logger.info(\n ` primary (${phaseData.primary.modelName}): ${formatTokens(phaseData.primary)}`,\n );\n grandPrimaryInputTokens += phaseData.primary.inputTokens;\n grandPrimaryOutputTokens += phaseData.primary.outputTokens;\n grandPrimaryTotalTokens += phaseData.primary.totalTokens;\n }\n\n // Show fallback model usage\n if (phaseData.fallback) {\n logger.info(\n ` fallback (${phaseData.fallback.modelName}): ${formatTokens(phaseData.fallback)}`,\n );\n grandFallbackInputTokens += phaseData.fallback.inputTokens;\n grandFallbackOutputTokens += phaseData.fallback.outputTokens;\n grandFallbackTotalTokens += phaseData.fallback.totalTokens;\n }\n\n // Show phase subtotal\n logger.info(` subtotal: ${formatTokens(phaseData.total)}`);\n }\n\n logger.info(\n ` ${component.component} total: ${formatTokens(component.total)}`,\n );\n logger.info('');\n\n grandInputTokens += component.total.inputTokens;\n grandOutputTokens += component.total.outputTokens;\n grandTotalTokens += component.total.totalTokens;\n }\n\n // Show grand total with primary/fallback breakdown\n logger.info('--- Summary ---');\n if (grandPrimaryTotalTokens > 0) {\n logger.info(\n `Primary total: ${formatTokens({\n inputTokens: grandPrimaryInputTokens,\n outputTokens: grandPrimaryOutputTokens,\n totalTokens: grandPrimaryTotalTokens,\n })}`,\n );\n }\n if (grandFallbackTotalTokens > 0) {\n logger.info(\n `Fallback total: ${formatTokens({\n inputTokens: grandFallbackInputTokens,\n outputTokens: grandFallbackOutputTokens,\n totalTokens: grandFallbackTotalTokens,\n })}`,\n );\n }\n logger.info(\n `Grand total: ${formatTokens({\n inputTokens: grandInputTokens,\n outputTokens: grandOutputTokens,\n totalTokens: grandTotalTokens,\n })}`,\n );\n }\n\n /**\n * Reset all tracked usage\n *\n * Call this at the start of a new document processing run.\n */\n reset(): void {\n this.usage = {};\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n DoclingDocument,\n DoclingGroupItem,\n DoclingPictureItem,\n DoclingTableItem,\n DoclingTextItem,\n} from '@heripo/model';\n\n/**\n * Resolves $ref references in DoclingDocument to actual objects.\n *\n * DoclingDocument uses JSON references (e.g., \"#/texts/0\") to link nodes.\n * This class builds an index for quick lookups of texts, pictures, tables, and groups.\n */\nexport class RefResolver {\n private readonly logger: LoggerMethods;\n private readonly textMap: Map<string, DoclingTextItem>;\n private readonly pictureMap: Map<string, DoclingPictureItem>;\n private readonly tableMap: Map<string, DoclingTableItem>;\n private readonly groupMap: Map<string, DoclingGroupItem>;\n\n constructor(logger: LoggerMethods, doc: DoclingDocument) {\n this.logger = logger;\n this.logger.info('[RefResolver] Initializing reference resolver...');\n\n this.textMap = this.buildIndex(doc.texts, 'texts');\n this.pictureMap = this.buildIndex(doc.pictures, 'pictures');\n this.tableMap = this.buildIndex(doc.tables, 'tables');\n this.groupMap = this.buildIndex(doc.groups, 'groups');\n\n this.logger.info(\n `[RefResolver] Indexed ${this.textMap.size} texts, ${this.pictureMap.size} pictures, ${this.tableMap.size} tables, ${this.groupMap.size} groups`,\n );\n }\n\n /**\n * Build an index mapping self_ref to the actual item\n */\n private buildIndex<T extends { self_ref: string }>(\n items: T[],\n _prefix: string,\n ): Map<string, T> {\n const map = new Map<string, T>();\n for (const item of items) {\n map.set(item.self_ref, item);\n }\n return map;\n }\n\n /**\n * Resolve a $ref string to the actual item\n * @param ref - Reference string (e.g., \"#/texts/0\")\n * @returns The resolved item, or null if not found\n */\n resolve(\n ref: string,\n ):\n | DoclingTextItem\n | DoclingPictureItem\n | DoclingTableItem\n | DoclingGroupItem\n | null {\n // Extract the collection type from the reference\n // Format: \"#/texts/0\" or \"#/pictures/5\" etc.\n const match = ref.match(/^#\\/(\\w+)\\//);\n if (!match) {\n this.logger.warn(`[RefResolver] Invalid reference format: ${ref}`);\n return null;\n }\n\n const collection = match[1];\n\n if (collection === 'texts') {\n const result = this.textMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Text reference not found: ${ref}`);\n }\n return result;\n }\n if (collection === 'pictures') {\n const result = this.pictureMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Picture reference not found: ${ref}`);\n }\n return result;\n }\n if (collection === 'tables') {\n const result = this.tableMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Table reference not found: ${ref}`);\n }\n return result;\n }\n if (collection === 'groups') {\n const result = this.groupMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Group reference not found: ${ref}`);\n }\n return result;\n }\n\n this.logger.warn(`[RefResolver] Unknown collection type: ${collection}`);\n return null;\n }\n\n /**\n * Resolve a text reference\n * @param ref - Reference string (e.g., \"#/texts/0\")\n * @returns The resolved text item, or null if not found\n */\n resolveText(ref: string): DoclingTextItem | null {\n return this.textMap.get(ref) ?? null;\n }\n\n /**\n * Resolve a picture reference\n * @param ref - Reference string (e.g., \"#/pictures/0\")\n * @returns The resolved picture item, or null if not found\n */\n resolvePicture(ref: string): DoclingPictureItem | null {\n return this.pictureMap.get(ref) ?? null;\n }\n\n /**\n * Resolve a table reference\n * @param ref - Reference string (e.g., \"#/tables/0\")\n * @returns The resolved table item, or null if not found\n */\n resolveTable(ref: string): DoclingTableItem | null {\n return this.tableMap.get(ref) ?? null;\n }\n\n /**\n * Resolve a group reference\n * @param ref - Reference string (e.g., \"#/groups/0\")\n * @returns The resolved group item, or null if not found\n */\n resolveGroup(ref: string): DoclingGroupItem | null {\n return this.groupMap.get(ref) ?? null;\n }\n\n /**\n * Resolve multiple references at once\n * @param refs - Array of reference objects with $ref property\n * @returns Array of resolved items (null for unresolved references)\n */\n resolveMany(\n refs: Array<{ $ref: string }>,\n ): Array<\n | DoclingTextItem\n | DoclingPictureItem\n | DoclingTableItem\n | DoclingGroupItem\n | null\n > {\n return refs.map((ref) => this.resolve(ref.$ref));\n }\n}\n","/**\n * Generates sequential IDs for different types of items.\n *\n * IDs are formatted as: `{prefix}-{number}` where number is zero-padded to 3 digits.\n * - Chapters: ch-001, ch-002, ...\n * - Images: img-001, img-002, ...\n * - Tables: tbl-001, tbl-002, ...\n *\n * Each type maintains its own independent counter.\n */\nexport class IdGenerator {\n private chapterCounter = 0;\n private imageCounter = 0;\n private tableCounter = 0;\n private footnoteCounter = 0;\n\n /**\n * Generate a chapter ID\n * @returns A chapter ID in the format \"ch-001\"\n */\n generateChapterId(): string {\n this.chapterCounter++;\n return `ch-${this.padNumber(this.chapterCounter)}`;\n }\n\n /**\n * Generate an image ID\n * @returns An image ID in the format \"img-001\"\n */\n generateImageId(): string {\n this.imageCounter++;\n return `img-${this.padNumber(this.imageCounter)}`;\n }\n\n /**\n * Generate a table ID\n * @returns A table ID in the format \"tbl-001\"\n */\n generateTableId(): string {\n this.tableCounter++;\n return `tbl-${this.padNumber(this.tableCounter)}`;\n }\n\n /**\n * Generate a footnote ID\n * @returns A footnote ID in the format \"ftn-001\"\n */\n generateFootnoteId(): string {\n this.footnoteCounter++;\n return `ftn-${this.padNumber(this.footnoteCounter)}`;\n }\n\n /**\n * Reset all counters to zero\n */\n reset(): void {\n this.chapterCounter = 0;\n this.imageCounter = 0;\n this.tableCounter = 0;\n this.footnoteCounter = 0;\n }\n\n /**\n * Get current counter values (for testing/debugging)\n */\n getCounters(): {\n chapter: number;\n image: number;\n table: number;\n footnote: number;\n } {\n return {\n chapter: this.chapterCounter,\n image: this.imageCounter,\n table: this.tableCounter,\n footnote: this.footnoteCounter,\n };\n }\n\n /**\n * Pad a number to 3 digits with leading zeros\n */\n private padNumber(num: number): string {\n return num.toString().padStart(3, '0');\n }\n}\n","import { BatchProcessor } from '@heripo/shared';\n\n/**\n * TextCleaner - Text normalization and cleaning\n *\n * Utility for normalizing text from DoclingDocument.\n * - Whitespace normalization (remove consecutive spaces, clean line breaks)\n * - Special character removal/normalization\n * - Unicode normalization\n * - Batch normalization + filtering\n */\nexport class TextCleaner {\n /**\n * Normalizes text\n * - Converts consecutive spaces/line breaks to single space\n * - Trims leading and trailing spaces\n * - Normalizes special whitespace characters (tabs, non-breaking spaces, etc.)\n */\n static normalize(text: string): string {\n if (!text) return '';\n\n // Unicode normalization (NFC)\n let normalized = text.normalize('NFC');\n\n // Convert special whitespace characters to regular space\n normalized = normalized.replace(/[\\t\\u00A0\\u2000-\\u200B]/g, ' ');\n\n // Convert line breaks to space\n normalized = normalized.replace(/[\\r\\n]+/g, ' ');\n\n // Convert consecutive spaces to single space\n normalized = normalized.replace(/\\s+/g, ' ');\n\n // Trim leading and trailing spaces\n normalized = normalized.trim();\n\n return normalized;\n }\n\n /**\n * Clean text starting/ending with punctuation marks\n * - Remove commas/periods at sentence start\n * - Clean spaces and punctuation at sentence end\n */\n static cleanPunctuation(text: string): string {\n if (!text) return '';\n\n // Remove commas/periods at start\n let cleaned = text.replace(/^[,.:;!?]+\\s*/, '');\n\n // Clean spaces at end\n cleaned = cleaned.replace(/\\s+[,.:;!?]*$/, '');\n\n return cleaned;\n }\n\n /**\n * Filter text consisting only of numbers and spaces\n */\n static isValidText(text: string): boolean {\n if (!text) return false;\n const cleaned = this.normalize(text);\n // Invalid if only numbers and spaces\n return !/^\\s*[\\d\\s]*$/.test(cleaned);\n }\n\n /**\n * Batch normalization (for bulk processing)\n */\n static normalizeBatch(texts: string[]): string[] {\n return texts.map((text) => this.normalize(text));\n }\n\n /**\n * Batch filtering (returns only valid text)\n */\n static filterValidTexts(texts: string[]): string[] {\n return texts.filter((text) => this.isValidText(text));\n }\n\n /**\n * Batch normalization + filtering (stage 1 + stage 2 combined)\n *\n * Performs TextCleaner's basic normalization and filtering in batch processing at once.\n * Splits large amounts of text into batches for efficient processing.\n *\n * If batchSize is 0, processes items sequentially without batch processing.\n *\n * @param texts - Original text array\n * @param batchSize - Batch size (default: 10). Set to 0 for sequential processing without batching.\n * @returns Normalized and filtered text array\n *\n * @example\n * ```typescript\n * const rawTexts = [' text 1 ', '123', 'text 2\\n'];\n * const cleaned = TextCleaner.normalizeAndFilterBatch(rawTexts, 10);\n * // ['text 1', 'text 2']\n *\n * // Sequential processing (no batching)\n * const cleanedSequential = TextCleaner.normalizeAndFilterBatch(rawTexts, 0);\n * // ['text 1', 'text 2']\n * ```\n */\n static normalizeAndFilterBatch(\n texts: string[],\n batchSize: number = 10,\n ): string[] {\n if (batchSize === 0) {\n // Sequential processing without BatchProcessor\n const results: string[] = [];\n for (const text of texts) {\n const normalized = this.normalize(text);\n if (this.isValidText(normalized)) {\n results.push(normalized);\n }\n }\n return results;\n }\n\n // Batch processing: normalize then filter for each batch\n return BatchProcessor.processBatchSync(texts, batchSize, (batch) => {\n // Stage 1: Normalize\n const normalized = this.normalizeBatch(batch);\n // Stage 2: Filter\n return this.filterValidTexts(normalized);\n });\n }\n}\n","import type {\n DoclingGroupItem,\n DoclingTableItem,\n DoclingTextItem,\n} from '@heripo/model';\n\nimport type { RefResolver } from './ref-resolver';\n\n/**\n * MarkdownConverter\n *\n * Converts TOC-related groups and tables to Markdown format for LLM processing.\n * Provides static utility methods for conversion.\n */\nexport class MarkdownConverter {\n /**\n * Convert TOC items (groups/tables) to Markdown string\n *\n * @param refs - Array of item references from TocAreaResult\n * @param refResolver - RefResolver for resolving references\n * @returns Markdown string representation of TOC\n */\n static convert(refs: string[], refResolver: RefResolver): string {\n if (refs.length === 0) {\n return '';\n }\n\n const lines: string[] = [];\n\n for (const ref of refs) {\n const item = refResolver.resolve(ref);\n if (!item) {\n continue;\n }\n\n // Check if it's a group item\n if ('name' in item && (item.name === 'list' || item.name === 'group')) {\n const groupMarkdown = MarkdownConverter.groupToMarkdown(\n item as DoclingGroupItem,\n refResolver,\n 0,\n );\n if (groupMarkdown) {\n lines.push(groupMarkdown);\n }\n }\n // Check if it's a table item\n else if ('data' in item && 'grid' in (item as DoclingTableItem).data) {\n const tableMarkdown = MarkdownConverter.tableToMarkdown(\n item as DoclingTableItem,\n );\n if (tableMarkdown) {\n lines.push(tableMarkdown);\n }\n }\n // Check if it's a text item\n else if ('text' in item && 'orig' in item) {\n const textMarkdown = MarkdownConverter.textToMarkdown(\n item as DoclingTextItem,\n 0,\n );\n if (textMarkdown) {\n lines.push(textMarkdown);\n }\n }\n }\n\n return lines.join('\\n\\n');\n }\n\n /**\n * Convert a group item to Markdown list format\n *\n * Handles nested lists and preserves hierarchy.\n *\n * @example\n * Output:\n * - Chapter 1 Introduction ..... 1\n * - 1.1 Background ..... 3\n * - 1.2 Objectives ..... 5\n * - Chapter 2 Methodology ..... 10\n */\n static groupToMarkdown(\n group: DoclingGroupItem,\n refResolver: RefResolver,\n indentLevel = 0,\n ): string {\n const lines: string[] = [];\n\n for (const childRef of group.children) {\n const child = refResolver.resolve(childRef.$ref);\n if (!child) {\n continue;\n }\n\n // Handle nested group\n if (\n 'name' in child &&\n (child.name === 'list' || child.name === 'group')\n ) {\n const nestedMarkdown = MarkdownConverter.groupToMarkdown(\n child as DoclingGroupItem,\n refResolver,\n indentLevel + 1,\n );\n if (nestedMarkdown) {\n lines.push(nestedMarkdown);\n }\n }\n // Handle text item\n else if ('text' in child && 'orig' in child) {\n const textMarkdown = MarkdownConverter.textToMarkdown(\n child as DoclingTextItem,\n indentLevel,\n );\n if (textMarkdown) {\n lines.push(textMarkdown);\n }\n }\n }\n\n return lines.join('\\n');\n }\n\n /**\n * Convert a table item to Markdown table format\n *\n * @example\n * Output:\n * | Chapter | Page |\n * |---------|------|\n * | Chapter 1 Introduction | 1 |\n * | Chapter 2 Methodology | 10 |\n */\n static tableToMarkdown(table: DoclingTableItem): string {\n const { grid } = table.data;\n if (!grid || grid.length === 0) {\n return '';\n }\n\n const lines: string[] = [];\n\n // Build rows from grid\n for (let rowIdx = 0; rowIdx < grid.length; rowIdx++) {\n const row = grid[rowIdx];\n if (!row || row.length === 0) {\n continue;\n }\n\n const cells = row.map((cell) =>\n MarkdownConverter.escapeTableCell(cell.text),\n );\n lines.push(`| ${cells.join(' | ')} |`);\n\n // Add separator after header row (first row)\n if (rowIdx === 0) {\n const separator = row.map(() => '---').join(' | ');\n lines.push(`| ${separator} |`);\n }\n }\n\n return lines.join('\\n');\n }\n\n /**\n * Convert a text item to Markdown line\n */\n static textToMarkdown(text: DoclingTextItem, indentLevel = 0): string {\n const content = text.text.trim();\n if (!content) {\n return '';\n }\n\n const indent = MarkdownConverter.getIndent(indentLevel);\n const marker = MarkdownConverter.getListMarker(\n text.enumerated,\n text.marker,\n );\n\n return `${indent}${marker}${content}`;\n }\n\n /**\n * Generate list marker based on enumeration and marker\n */\n private static getListMarker(enumerated?: boolean, marker?: string): string {\n if (marker) {\n return `${marker} `;\n }\n if (enumerated === true) {\n return '1. ';\n }\n if (enumerated === false) {\n return '- ';\n }\n return '- ';\n }\n\n /**\n * Generate indent string (2 spaces per level)\n */\n private static getIndent(level: number): string {\n return ' '.repeat(level);\n }\n\n /**\n * Escape special characters in table cell content\n */\n private static escapeTableCell(text: string): string {\n return text.replace(/\\|/g, '\\\\|').replace(/\\n/g, ' ').trim();\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n Chapter,\n DoclingTextItem,\n PageRange,\n ProcessedFootnote,\n ProcessedImage,\n ProcessedTable,\n TextBlock,\n} from '@heripo/model';\n\nimport type { TocEntry } from '../types';\nimport type { IdGenerator } from '../utils';\n\nimport { TextCleaner } from '../utils';\n\n/**\n * Flattened chapter with TOC page number for range calculation\n */\ninterface FlatChapter {\n chapter: Chapter;\n tocPageNo: number;\n}\n\n/**\n * Chapter page range for content assignment\n */\ninterface ChapterRange {\n startPage: number;\n endPage: number;\n}\n\n/**\n * ChapterConverter\n *\n * Converts TocEntry[] to Chapter[] with text blocks, images, and tables.\n *\n * ## Conversion Process\n *\n * 1. Create Front Matter chapter (ch-000) for pre-TOC content\n * 2. Build chapter tree from TocEntry[] (recursive)\n * 3. Calculate page ranges for each chapter\n * 4. Assign text blocks to chapters based on page ranges\n * 5. Link images/tables to chapters based on page ranges\n *\n * ## Page Assignment Strategy\n *\n * Uses \"start page first\" strategy: resources are assigned to the chapter\n * whose startPage is the largest value that is still <= the resource's page.\n *\n * ## Front Matter\n *\n * A special chapter (ch-000) is always created to hold content that appears\n * before the first TOC entry (e.g., cover, preface, table of contents itself).\n */\nexport class ChapterConverter {\n private static readonly FRONT_MATTER_ID = 'ch-000';\n private static readonly FRONT_MATTER_TITLE = 'Front Matter';\n\n private readonly logger: LoggerMethods;\n private readonly idGenerator: IdGenerator;\n\n constructor(logger: LoggerMethods, idGenerator: IdGenerator) {\n this.logger = logger;\n this.idGenerator = idGenerator;\n }\n\n /**\n * Convert TocEntry[] to Chapter[]\n *\n * @param tocEntries - Table of contents entries\n * @param textItems - DoclingDocument.texts (with prov for page numbers)\n * @param pageRangeMap - PDF page to actual page mapping\n * @param images - Converted images\n * @param tables - Converted tables\n * @param footnotes - Converted footnotes\n * @returns Converted chapters with text blocks and resource references\n */\n convert(\n tocEntries: TocEntry[],\n textItems: DoclingTextItem[],\n pageRangeMap: Record<number, PageRange>,\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n ): Chapter[] {\n this.logger.info('[ChapterConverter] Starting chapter conversion...');\n\n // Step 1: Create Front Matter chapter\n const frontMatter = this.createFrontMatterChapter();\n\n // Step 2: Build chapter tree from TOC\n const tocChapters = this.buildChapterTree(tocEntries);\n this.logger.info(\n `[ChapterConverter] Built ${tocChapters.length} TOC chapters + Front Matter`,\n );\n\n // Step 3: Combine all chapters (Front Matter first)\n const allChapters = [frontMatter, ...tocChapters];\n\n // Step 4: Calculate page ranges\n const flatChapters = this.flattenChapters(allChapters);\n const chapterRanges = this.calculatePageRanges(flatChapters, tocEntries);\n this.logger.info(\n `[ChapterConverter] Calculated ranges for ${chapterRanges.size} chapters`,\n );\n\n // Step 5: Convert and assign text blocks\n const textBlocks = this.convertTextBlocks(textItems, pageRangeMap);\n this.assignTextBlocks(allChapters, textBlocks, chapterRanges, pageRangeMap);\n this.logger.info(\n `[ChapterConverter] Assigned ${textBlocks.length} text blocks`,\n );\n\n // Step 6: Link resources\n this.linkResources(\n allChapters,\n images,\n tables,\n footnotes,\n chapterRanges,\n pageRangeMap,\n );\n this.logger.info(\n `[ChapterConverter] Linked ${images.length} images, ${tables.length} tables, and ${footnotes.length} footnotes`,\n );\n\n return allChapters;\n }\n\n /**\n * Create Front Matter chapter for pre-TOC content\n */\n private createFrontMatterChapter(): Chapter {\n return {\n id: ChapterConverter.FRONT_MATTER_ID,\n originTitle: ChapterConverter.FRONT_MATTER_TITLE,\n title: ChapterConverter.FRONT_MATTER_TITLE,\n pageNo: 1,\n level: 1,\n textBlocks: [],\n imageIds: [],\n tableIds: [],\n footnoteIds: [],\n };\n }\n\n /**\n * Build chapter tree from TocEntry[]\n * Recursively processes children\n */\n private buildChapterTree(entries: TocEntry[]): Chapter[] {\n return entries.map((entry) => {\n const chapterId = this.idGenerator.generateChapterId();\n\n const chapter: Chapter = {\n id: chapterId,\n originTitle: entry.title,\n title: TextCleaner.normalize(entry.title),\n pageNo: entry.pageNo,\n level: entry.level,\n textBlocks: [],\n imageIds: [],\n tableIds: [],\n footnoteIds: [],\n };\n\n if (entry.children && entry.children.length > 0) {\n chapter.children = this.buildChapterTree(entry.children);\n }\n\n return chapter;\n });\n }\n\n /**\n * Flatten chapter tree for page range calculation\n * Preserves original TOC page numbers\n */\n private flattenChapters(chapters: Chapter[]): FlatChapter[] {\n const result: FlatChapter[] = [];\n\n const flatten = (chapterList: Chapter[]): void => {\n for (const chapter of chapterList) {\n result.push({\n chapter,\n tocPageNo: chapter.pageNo,\n });\n\n if (chapter.children && chapter.children.length > 0) {\n flatten(chapter.children);\n }\n }\n };\n\n flatten(chapters);\n return result;\n }\n\n /**\n * Calculate page range for each chapter\n * Uses next chapter's start page as end boundary\n *\n * Front Matter (ch-000) gets special handling:\n * - startPage: 1\n * - endPage: first TOC entry's page - 1 (or 0 if TOC starts at page 1)\n */\n private calculatePageRanges(\n flatChapters: FlatChapter[],\n tocEntries: TocEntry[],\n ): Map<string, ChapterRange> {\n const ranges = new Map<string, ChapterRange>();\n\n if (flatChapters.length === 0) {\n return ranges;\n }\n\n // Find first TOC page (minimum page number from TOC entries)\n const firstTocPage =\n tocEntries.length > 0\n ? Math.min(...tocEntries.map((e) => e.pageNo))\n : Number.MAX_SAFE_INTEGER;\n\n // Filter out Front Matter for sorting (it's handled separately)\n const tocChapters = flatChapters.filter(\n (fc) => fc.chapter.id !== ChapterConverter.FRONT_MATTER_ID,\n );\n\n // Sort by TOC page number\n const sorted = [...tocChapters].sort((a, b) => a.tocPageNo - b.tocPageNo);\n\n // Set Front Matter range (always page 1 to firstTocPage - 1)\n ranges.set(ChapterConverter.FRONT_MATTER_ID, {\n startPage: 1,\n endPage: firstTocPage - 1,\n });\n\n // Set ranges for TOC chapters\n for (let i = 0; i < sorted.length; i++) {\n const current = sorted[i];\n const next = sorted[i + 1];\n\n ranges.set(current.chapter.id, {\n startPage: current.tocPageNo,\n endPage: next ? next.tocPageNo - 1 : Number.MAX_SAFE_INTEGER,\n });\n }\n\n return ranges;\n }\n\n /**\n * Valid labels for text blocks\n * Only these labels are included in chapter text blocks\n */\n private static readonly VALID_TEXT_LABELS = new Set([\n 'text',\n 'section_header',\n 'list_item',\n ]);\n\n /**\n * Check if text item has a picture parent\n * Items with parent.$ref starting with \"#/pictures/\" are excluded\n */\n private static hasPictureParent(item: DoclingTextItem): boolean {\n const parentRef = item.parent?.$ref;\n return typeof parentRef === 'string' && parentRef.startsWith('#/pictures/');\n }\n\n /**\n * Convert text items to text blocks\n * Filters by label (text, section_header, list_item), excludes picture children,\n * and extracts page numbers from prov\n */\n private convertTextBlocks(\n textItems: DoclingTextItem[],\n _pageRangeMap: Record<number, PageRange>,\n ): TextBlock[] {\n return textItems\n .filter(\n (item) =>\n ChapterConverter.VALID_TEXT_LABELS.has(item.label) &&\n !ChapterConverter.hasPictureParent(item) &&\n TextCleaner.isValidText(item.text),\n )\n .map((item) => {\n const pdfPageNo = item.prov?.[0]?.page_no ?? 1;\n return {\n text: TextCleaner.normalize(item.text),\n pdfPageNo,\n };\n });\n }\n\n /**\n * Convert PDF page number to actual document page number\n * Falls back to pdfPageNo if mapping is missing\n */\n private pdfPageToActualPage(\n pdfPageNo: number,\n pageRangeMap: Record<number, PageRange>,\n ): number {\n const range = pageRangeMap[pdfPageNo];\n if (!range) {\n // Fallback: assume 1:1 mapping\n return pdfPageNo;\n }\n // Return start page for the actual document page\n return range.startPageNo;\n }\n\n /**\n * Find chapter ID for a given actual page number\n * Uses \"start page first\" strategy\n */\n private findChapterForPage(\n actualPageNo: number,\n chapterRanges: Map<string, ChapterRange>,\n ): string | null {\n let bestMatch: string | null = null;\n let bestStartPage = -1;\n\n for (const [chapterId, range] of chapterRanges) {\n // Check if page is within range\n if (actualPageNo >= range.startPage && actualPageNo <= range.endPage) {\n // Use \"start page first\" strategy: prefer chapter with largest startPage <= actualPageNo\n if (range.startPage > bestStartPage) {\n bestStartPage = range.startPage;\n bestMatch = chapterId;\n }\n }\n }\n\n return bestMatch;\n }\n\n /**\n * Assign text blocks to chapters based on page ranges\n */\n private assignTextBlocks(\n chapters: Chapter[],\n textBlocks: TextBlock[],\n chapterRanges: Map<string, ChapterRange>,\n pageRangeMap: Record<number, PageRange>,\n ): void {\n // Build chapter map for O(1) lookup\n const chapterMap = this.buildChapterMap(chapters);\n\n for (const textBlock of textBlocks) {\n const actualPageNo = this.pdfPageToActualPage(\n textBlock.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.textBlocks.push(textBlock);\n }\n }\n }\n\n /**\n * Link images, tables, and footnotes to chapters based on page ranges\n */\n private linkResources(\n chapters: Chapter[],\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n chapterRanges: Map<string, ChapterRange>,\n pageRangeMap: Record<number, PageRange>,\n ): void {\n // Build chapter map for O(1) lookup\n const chapterMap = this.buildChapterMap(chapters);\n\n // Link images\n for (const image of images) {\n const actualPageNo = this.pdfPageToActualPage(\n image.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.imageIds.push(image.id);\n }\n }\n\n // Link tables\n for (const table of tables) {\n const actualPageNo = this.pdfPageToActualPage(\n table.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.tableIds.push(table.id);\n }\n }\n\n // Link footnotes\n for (const footnote of footnotes) {\n const actualPageNo = this.pdfPageToActualPage(\n footnote.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.footnoteIds.push(footnote.id);\n }\n }\n }\n\n /**\n * Build flat chapter map for O(1) lookup\n */\n private buildChapterMap(chapters: Chapter[]): Map<string, Chapter> {\n const map = new Map<string, Chapter>();\n\n const addToMap = (chapterList: Chapter[]): void => {\n for (const chapter of chapterList) {\n map.set(chapter.id, chapter);\n\n if (chapter.children && chapter.children.length > 0) {\n addToMap(chapter.children);\n }\n }\n };\n\n addToMap(chapters);\n return map;\n }\n}\n","import type { TocEntry } from '../types';\n\n/**\n * Single validation issue detected during TOC validation\n */\nexport interface TocValidationIssue {\n /**\n * Issue code (V001, V002, etc.)\n */\n code: string;\n\n /**\n * Human-readable error message\n */\n message: string;\n\n /**\n * Path to the problematic entry (e.g., \"[0].children[2]\")\n */\n path: string;\n\n /**\n * The problematic entry\n */\n entry: TocEntry;\n}\n\n/**\n * Result of TOC validation\n */\nexport interface TocValidationResult {\n /**\n * Whether validation passed (no errors)\n */\n valid: boolean;\n\n /**\n * List of validation issues\n */\n issues: TocValidationIssue[];\n\n /**\n * Error count\n */\n errorCount: number;\n}\n\n/**\n * TocExtractError\n *\n * Base error class for TOC extraction failures.\n */\nexport class TocExtractError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'TocExtractError';\n }\n\n /**\n * Extract error message from unknown error type\n */\n static getErrorMessage(error: unknown): string {\n return error instanceof Error ? error.message : String(error);\n }\n\n /**\n * Create TocExtractError from unknown error with context\n */\n static fromError(context: string, error: unknown): TocExtractError {\n return new TocExtractError(\n `${context}: ${TocExtractError.getErrorMessage(error)}`,\n { cause: error },\n );\n }\n}\n\n/**\n * TocNotFoundError\n *\n * Error thrown when TOC area cannot be found in the document.\n */\nexport class TocNotFoundError extends TocExtractError {\n constructor(message = 'Table of contents not found in the document') {\n super(message);\n this.name = 'TocNotFoundError';\n }\n}\n\n/**\n * TocParseError\n *\n * Error thrown when LLM fails to parse TOC structure.\n */\nexport class TocParseError extends TocExtractError {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'TocParseError';\n }\n}\n\n/**\n * TocValidationError\n *\n * Error thrown when TOC validation fails.\n * Contains detailed information about validation issues.\n */\nexport class TocValidationError extends TocExtractError {\n /**\n * Validation result with detailed issues\n */\n readonly validationResult: TocValidationResult;\n\n constructor(message: string, validationResult: TocValidationResult) {\n super(message);\n this.name = 'TocValidationError';\n this.validationResult = validationResult;\n }\n\n /**\n * Get formatted error summary\n */\n getSummary(): string {\n const { errorCount, issues } = this.validationResult;\n const lines = [\n `TOC validation failed: ${errorCount} error(s)`,\n '',\n 'Issues:',\n ];\n\n for (const issue of issues) {\n lines.push(` [${issue.code}] ${issue.message}`);\n lines.push(` Path: ${issue.path}`);\n lines.push(\n ` Entry: \"${issue.entry.title}\" (page ${issue.entry.pageNo})`,\n );\n }\n\n return lines.join('\\n');\n }\n}\n","import type { TocEntry } from '../types';\nimport type {\n TocValidationIssue,\n TocValidationResult,\n} from './toc-extract-error';\n\nimport { TocValidationError } from './toc-extract-error';\n\n/**\n * Validation options for TocValidator\n */\nexport interface TocValidationOptions {\n /**\n * Total page count of the document (for range validation)\n * If not provided, page range upper bound validation is skipped\n */\n totalPages?: number;\n\n /**\n * Maximum allowed title length (default: 200)\n */\n maxTitleLength?: number;\n}\n\n/**\n * Default validation options\n */\nconst DEFAULT_OPTIONS: Required<TocValidationOptions> = {\n totalPages: Infinity,\n maxTitleLength: 200,\n};\n\n/**\n * TocValidator\n *\n * Validates TocEntry[] structure for consistency and correctness.\n * Performs hierarchical validation including parent-child relationships.\n */\nexport class TocValidator {\n private readonly options: Required<TocValidationOptions>;\n private issues: TocValidationIssue[];\n\n constructor(options?: TocValidationOptions) {\n this.options = {\n ...DEFAULT_OPTIONS,\n ...options,\n };\n this.issues = [];\n }\n\n /**\n * Validate TocEntry array\n *\n * @param entries - TOC entries to validate\n * @returns Validation result\n */\n validate(entries: TocEntry[]): TocValidationResult {\n this.issues = [];\n\n // Validate all entries recursively\n this.validateEntries(entries, '', null, new Set<string>());\n\n const errorCount = this.issues.length;\n\n return {\n valid: errorCount === 0,\n issues: [...this.issues],\n errorCount,\n };\n }\n\n /**\n * Validate and throw if invalid\n *\n * @param entries - TOC entries to validate\n * @throws {TocValidationError} When validation fails\n */\n validateOrThrow(entries: TocEntry[]): void {\n const result = this.validate(entries);\n\n if (!result.valid) {\n throw new TocValidationError(\n `TOC validation failed with ${result.errorCount} error(s)`,\n result,\n );\n }\n }\n\n /**\n * Recursively validate entries\n */\n private validateEntries(\n entries: TocEntry[],\n parentPath: string,\n parentEntry: TocEntry | null,\n seenKeys: Set<string>,\n ): void {\n let prevPageNo = parentEntry?.pageNo ?? 0;\n\n for (let i = 0; i < entries.length; i++) {\n const entry = entries[i];\n const path = parentPath ? `${parentPath}.children[${i}]` : `[${i}]`;\n\n // V003: Empty title\n this.validateTitle(entry, path);\n\n // V004: Title length\n this.validateTitleLength(entry, path);\n\n // V002: Page range\n this.validatePageRange(entry, path);\n\n // V001: Page order (within same level)\n this.validatePageOrder(entry, path, prevPageNo);\n prevPageNo = entry.pageNo;\n\n // V005: Parent-child page relationship\n if (parentEntry) {\n this.validateParentChildPage(entry, path, parentEntry);\n }\n\n // V006: Duplicate detection\n const key = `${entry.title}:${entry.pageNo}`;\n this.validateDuplicate(entry, path, key, seenKeys);\n seenKeys.add(key);\n\n // Recursive validation for children\n if (entry.children && entry.children.length > 0) {\n this.validateEntries(entry.children, path, entry, seenKeys);\n }\n }\n }\n\n /**\n * V003: Validate title is not empty\n */\n private validateTitle(entry: TocEntry, path: string): void {\n if (!entry.title || entry.title.trim() === '') {\n this.addIssue({\n code: 'V003',\n message: 'Title is empty or contains only whitespace',\n path,\n entry,\n });\n }\n }\n\n /**\n * V004: Validate title length\n */\n private validateTitleLength(entry: TocEntry, path: string): void {\n if (entry.title.length > this.options.maxTitleLength) {\n this.addIssue({\n code: 'V004',\n message: `Title exceeds ${this.options.maxTitleLength} characters (${entry.title.length})`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V002: Validate page number range\n */\n private validatePageRange(entry: TocEntry, path: string): void {\n if (entry.pageNo < 1) {\n this.addIssue({\n code: 'V002',\n message: `Page number must be >= 1, got ${entry.pageNo}`,\n path,\n entry,\n });\n }\n\n if (entry.pageNo > this.options.totalPages) {\n this.addIssue({\n code: 'V002',\n message: `Page number ${entry.pageNo} exceeds document total pages (${this.options.totalPages})`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V001: Validate page order within same level\n */\n private validatePageOrder(\n entry: TocEntry,\n path: string,\n prevPageNo: number,\n ): void {\n if (entry.pageNo < prevPageNo) {\n this.addIssue({\n code: 'V001',\n message: `Page number decreased from ${prevPageNo} to ${entry.pageNo}`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V005: Validate parent-child page relationship\n */\n private validateParentChildPage(\n entry: TocEntry,\n path: string,\n parent: TocEntry,\n ): void {\n if (entry.pageNo < parent.pageNo) {\n this.addIssue({\n code: 'V005',\n message: `Child page (${entry.pageNo}) is before parent page (${parent.pageNo})`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V006: Validate no duplicates\n */\n private validateDuplicate(\n entry: TocEntry,\n path: string,\n key: string,\n seenKeys: Set<string>,\n ): void {\n if (seenKeys.has(key)) {\n this.addIssue({\n code: 'V006',\n message: `Duplicate entry: \"${entry.title}\" at page ${entry.pageNo}`,\n path,\n entry,\n });\n }\n }\n\n /**\n * Add issue to the list\n */\n private addIssue(issue: TocValidationIssue): void {\n this.issues.push(issue);\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n DoclingDocument,\n DoclingGroupItem,\n DoclingTableItem,\n DoclingTextItem,\n} from '@heripo/model';\n\nimport type { TocAreaResult } from '../types';\nimport type { RefResolver } from '../utils';\n\nimport { TocNotFoundError } from './toc-extract-error';\n\n/**\n * TOC keyword patterns for different languages\n * Korean: 목차, 차례, 목 차\n * Chinese: 目录, 目 录, 内容, 內容\n * Japanese: 目次, 目 次\n * English: Contents, Table of Contents, etc.\n */\nexport const TOC_KEYWORDS = [\n '목차',\n '차례',\n '목 차',\n '目录',\n '目 录',\n '内容',\n '內容',\n '目次',\n '目 次',\n 'Contents',\n 'Table of Contents',\n 'TABLE OF CONTENTS',\n 'CONTENTS',\n] as const;\n\n/**\n * Continuation marker patterns for multi-page TOC\n * Korean: 목차(계속), 목차 (계속), (계속)\n * Chinese: 目录(续), 目录 (续), (续), 续表\n * Japanese: 目次(続), 目次 (続), (続)\n * English: (continued), (Continued), etc.\n */\nexport const CONTINUATION_MARKERS = [\n '목차(계속)',\n '목차 (계속)',\n '(계속)',\n '目录(续)',\n '目录 (续)',\n '(续)',\n '续表',\n '目次(続)',\n '目次 (続)',\n '(続)',\n '(continued)',\n '(Continued)',\n '(CONTINUED)',\n 'continued',\n] as const;\n\n/**\n * Page number pattern regex for detecting TOC-like structures\n * Matches patterns like \"... 123\", \".... 45\", ending with numbers\n */\nexport const PAGE_NUMBER_PATTERN = /\\.{2,}\\s*\\d+\\s*$|…+\\s*\\d+\\s*$|\\s+\\d+\\s*$/;\n\n/**\n * TocFinder options\n */\nexport interface TocFinderOptions {\n /**\n * Maximum pages to search for TOC (default: 10)\n */\n maxSearchPages?: number;\n\n /**\n * Custom TOC keywords to add (optional)\n */\n additionalKeywords?: string[];\n}\n\n/**\n * TocFinder\n *\n * Finds TOC area in DoclingDocument using multi-stage search strategy:\n * 1. Keyword search in texts (section_header, list_item labels)\n * 2. Structure analysis for lists/tables with page number patterns\n * 3. Position heuristic (prioritize early pages)\n */\nexport class TocFinder {\n private readonly maxSearchPages: number;\n private readonly keywords: string[];\n\n constructor(\n private readonly logger: LoggerMethods,\n private readonly refResolver: RefResolver,\n options?: TocFinderOptions,\n ) {\n this.maxSearchPages = options?.maxSearchPages ?? 10;\n this.keywords = [...TOC_KEYWORDS, ...(options?.additionalKeywords ?? [])];\n }\n\n /**\n * Find TOC area in the document\n *\n * @throws {TocNotFoundError} When no TOC area is found\n */\n find(doc: DoclingDocument): TocAreaResult {\n this.logger.info('[TocFinder] Starting TOC search...');\n\n // Stage 1: Search by keywords\n const keywordResult = this.findByKeywords(doc);\n if (keywordResult) {\n this.logger.info(\n `[TocFinder] Found TOC by keyword search: pages ${keywordResult.startPage}-${keywordResult.endPage}`,\n );\n return keywordResult;\n }\n\n // Stage 2: Search by structure\n const structureResult = this.findByStructure(doc);\n if (structureResult) {\n this.logger.info(\n `[TocFinder] Found TOC by structure analysis: pages ${structureResult.startPage}-${structureResult.endPage}`,\n );\n return structureResult;\n }\n\n this.logger.warn('[TocFinder] No TOC found in document');\n throw new TocNotFoundError();\n }\n\n /**\n * Stage 1: Search by keywords in text items\n */\n private findByKeywords(doc: DoclingDocument): TocAreaResult | null {\n // Find text items containing TOC keywords\n for (const text of doc.texts) {\n if (!this.containsTocKeyword(text.text)) {\n continue;\n }\n\n const pageNo = text.prov[0]?.page_no;\n if (pageNo === undefined || pageNo > this.maxSearchPages) {\n continue;\n }\n\n this.logger.info(\n `[TocFinder] Found TOC keyword \"${text.text}\" on page ${pageNo}`,\n );\n\n // Find the parent group or table containing this text\n const parentRef = text.parent?.$ref;\n if (!parentRef) {\n // Single text item, return it directly\n return {\n itemRefs: [text.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n };\n }\n\n // Try to find group containing TOC structure\n const result = this.findTocContainer(doc, parentRef, pageNo);\n if (result) {\n return this.expandToConsecutivePages(result, doc);\n }\n }\n\n return null;\n }\n\n /**\n * Stage 2: Search by structure (lists/tables with page numbers)\n */\n private findByStructure(doc: DoclingDocument): TocAreaResult | null {\n const candidates: Array<{\n result: TocAreaResult;\n score: number;\n }> = [];\n\n // Check groups for TOC-like structure\n for (const group of doc.groups) {\n const pageNo = this.getGroupFirstPage(group);\n if (pageNo === undefined || pageNo > this.maxSearchPages) {\n continue;\n }\n\n if (this.isGroupTocLike(group, doc)) {\n const score = this.calculateScore(group, pageNo);\n candidates.push({\n result: {\n itemRefs: [group.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n },\n score,\n });\n }\n }\n\n // Check tables for TOC-like structure\n for (const table of doc.tables) {\n const pageNo = table.prov[0]?.page_no;\n if (pageNo === undefined || pageNo > this.maxSearchPages) {\n continue;\n }\n\n if (this.isTableTocLike(table)) {\n const score = this.calculateTableScore(table, pageNo);\n candidates.push({\n result: {\n itemRefs: [table.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n },\n score,\n });\n }\n }\n\n if (candidates.length === 0) {\n return null;\n }\n\n // Sort by score (higher is better) and return best match\n candidates.sort((a, b) => b.score - a.score);\n const best = candidates[0];\n\n return this.expandToConsecutivePages(best.result, doc);\n }\n\n /**\n * Find the TOC container (group or table) from a parent reference\n */\n private findTocContainer(\n doc: DoclingDocument,\n parentRef: string,\n pageNo: number,\n ): TocAreaResult | null {\n // Check if parent is a group\n const group = this.refResolver.resolveGroup(parentRef);\n if (group) {\n return {\n itemRefs: [group.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n };\n }\n\n // Check if parent is a table\n const table = this.refResolver.resolveTable(parentRef);\n if (table) {\n return {\n itemRefs: [table.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n };\n }\n\n // Try parent's parent (navigate up hierarchy)\n const item = this.refResolver.resolve(parentRef);\n if (item && item.parent?.$ref) {\n return this.findTocContainer(doc, item.parent.$ref, pageNo);\n }\n\n return null;\n }\n\n /**\n * Check if a group contains TOC-like structure\n */\n private isGroupTocLike(\n group: DoclingGroupItem,\n _doc: DoclingDocument,\n ): boolean {\n if (group.name !== 'list' && group.name !== 'group') {\n return false;\n }\n\n // Count children with page number patterns\n let pageNumberCount = 0;\n const children = this.refResolver.resolveMany(group.children);\n\n for (const child of children) {\n if (!child) continue;\n\n // Check if it's a text item with page number pattern\n if ('text' in child && 'orig' in child) {\n const textItem = child as DoclingTextItem;\n if (PAGE_NUMBER_PATTERN.test(textItem.text)) {\n pageNumberCount++;\n }\n }\n }\n\n // Consider TOC-like if at least 3 items have page numbers\n // or if more than 50% of items have page numbers\n const total = children.filter((c) => c !== null).length;\n return pageNumberCount >= 3 || (total > 0 && pageNumberCount / total > 0.5);\n }\n\n /**\n * Check if a table contains TOC-like structure\n */\n private isTableTocLike(table: DoclingTableItem): boolean {\n // Check for document_index label (Docling specific)\n if (table.label === 'document_index') {\n return true;\n }\n\n const { grid, num_rows, num_cols } = table.data;\n\n // Need at least 3 rows and 2 columns typically\n if (num_rows < 3 || num_cols < 2) {\n return false;\n }\n\n // Check if last column contains mostly numbers (page numbers)\n let numberCount = 0;\n for (let row = 1; row < grid.length; row++) {\n const lastCell = grid[row]?.[num_cols - 1];\n if (lastCell && /^\\d+$/.test(lastCell.text.trim())) {\n numberCount++;\n }\n }\n\n // More than 50% of data rows have numeric last column\n return numberCount > 0 && numberCount / (num_rows - 1) > 0.5;\n }\n\n /**\n * Expand TOC area to consecutive pages\n */\n private expandToConsecutivePages(\n initial: TocAreaResult,\n doc: DoclingDocument,\n ): TocAreaResult {\n const itemRefs = [...initial.itemRefs];\n let endPage = initial.endPage;\n\n // Look for continuation on subsequent pages\n for (\n let pageNo = initial.endPage + 1;\n pageNo <= this.maxSearchPages;\n pageNo++\n ) {\n const continuationItems = this.findContinuationOnPage(doc, pageNo);\n if (continuationItems.length === 0) {\n break;\n }\n\n itemRefs.push(...continuationItems);\n endPage = pageNo;\n }\n\n return {\n itemRefs,\n startPage: initial.startPage,\n endPage,\n };\n }\n\n /**\n * Find TOC continuation items on a specific page\n */\n private findContinuationOnPage(\n doc: DoclingDocument,\n pageNo: number,\n ): string[] {\n const refs: string[] = [];\n\n // Check for continuation markers in texts\n for (const text of doc.texts) {\n if (text.prov[0]?.page_no !== pageNo) {\n continue;\n }\n\n if (this.hasContinuationMarker(text.text)) {\n const parentRef = text.parent?.$ref;\n if (parentRef) {\n const group = this.refResolver.resolveGroup(parentRef);\n if (group) {\n refs.push(group.self_ref);\n }\n }\n }\n }\n\n // Check for TOC-like groups on this page\n for (const group of doc.groups) {\n const groupPage = this.getGroupFirstPage(group);\n if (groupPage !== pageNo) {\n continue;\n }\n\n if (this.isGroupTocLike(group, doc) && !refs.includes(group.self_ref)) {\n refs.push(group.self_ref);\n }\n }\n\n // Check for TOC-like tables on this page\n for (const table of doc.tables) {\n if (table.prov[0]?.page_no !== pageNo) {\n continue;\n }\n\n if (this.isTableTocLike(table) && !refs.includes(table.self_ref)) {\n refs.push(table.self_ref);\n }\n }\n\n return refs;\n }\n\n /**\n * Check if text contains TOC keyword\n */\n private containsTocKeyword(text: string): boolean {\n const normalizedText = text.trim().toLowerCase();\n return this.keywords.some((keyword) =>\n normalizedText.includes(keyword.toLowerCase()),\n );\n }\n\n /**\n * Check for continuation markers\n */\n private hasContinuationMarker(text: string): boolean {\n const normalizedText = text.trim().toLowerCase();\n return CONTINUATION_MARKERS.some((marker) =>\n normalizedText.includes(marker.toLowerCase()),\n );\n }\n\n /**\n * Get first page number of a group by checking its children\n */\n private getGroupFirstPage(group: DoclingGroupItem): number | undefined {\n for (const childRef of group.children) {\n const child = this.refResolver.resolve(childRef.$ref);\n if (child && 'prov' in child) {\n const prov = (child as DoclingTextItem).prov;\n if (prov && prov[0]?.page_no !== undefined) {\n return prov[0].page_no;\n }\n }\n }\n return undefined;\n }\n\n /**\n * Calculate score for a group candidate\n * Higher score = better match\n */\n private calculateScore(group: DoclingGroupItem, pageNo: number): number {\n let score = 0;\n\n // Earlier pages get higher score\n score += (this.maxSearchPages - pageNo + 1) * 10;\n\n // More children (TOC entries) = higher score\n score += group.children.length * 2;\n\n // Count items with page numbers\n const children = this.refResolver.resolveMany(group.children);\n for (const child of children) {\n if (child && 'text' in child) {\n const textItem = child as DoclingTextItem;\n if (PAGE_NUMBER_PATTERN.test(textItem.text)) {\n score += 5;\n }\n }\n }\n\n return score;\n }\n\n /**\n * Calculate score for a table candidate\n */\n private calculateTableScore(table: DoclingTableItem, pageNo: number): number {\n let score = 0;\n\n // Earlier pages get higher score\n score += (this.maxSearchPages - pageNo + 1) * 10;\n\n // More rows = higher score\n score += table.data.num_rows * 2;\n\n // document_index label is a strong indicator\n if (table.label === 'document_index') {\n score += 50;\n }\n\n return score;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { ExtendedTokenUsage } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport type { TocEntry } from '../types';\nimport type { TocValidationOptions } from './toc-validator';\n\nimport { z } from 'zod';\n\nimport {\n type BaseLLMComponentOptions,\n TextLLMComponent,\n} from '../core/text-llm-component';\nimport { TocParseError, TocValidationError } from './toc-extract-error';\nimport { TocValidator } from './toc-validator';\n\n/**\n * Zod schema for recursive TocEntry structure\n */\nexport const TocEntrySchema: z.ZodType<TocEntry> = z.lazy(() =>\n z.object({\n title: z.string().describe('Chapter or section title'),\n level: z.number().int().min(1).describe('Hierarchy depth (1 = top level)'),\n pageNo: z.number().int().min(1).describe('Starting page number'),\n children: z.array(TocEntrySchema).optional().describe('Child sections'),\n }),\n);\n\n/**\n * Schema for LLM response\n */\nexport const TocResponseSchema = z.object({\n entries: z.array(TocEntrySchema).describe('Extracted TOC entries'),\n});\n\nexport type TocResponse = z.infer<typeof TocResponseSchema>;\n\n/**\n * TocExtractor options\n */\nexport interface TocExtractorOptions extends BaseLLMComponentOptions {\n /**\n * Validation options (optional)\n * If not provided, validation is performed with default settings\n */\n validation?: TocValidationOptions;\n\n /**\n * Whether to skip validation entirely (default: false)\n */\n skipValidation?: boolean;\n}\n\n/**\n * TocExtractor\n *\n * Uses high-performance LLM to extract structured TOC from Markdown representation.\n * Extends TextLLMComponent for standardized LLM call handling.\n */\nexport class TocExtractor extends TextLLMComponent {\n private readonly validationOptions?: TocValidationOptions;\n private readonly skipValidation: boolean;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: TocExtractorOptions,\n fallbackModel?: LanguageModel,\n abortSignal?: AbortSignal,\n ) {\n super(\n logger,\n model,\n 'TocExtractor',\n { ...options, abortSignal },\n fallbackModel,\n );\n this.validationOptions = options?.validation;\n this.skipValidation = options?.skipValidation ?? false;\n }\n\n /**\n * Extract TOC structure from Markdown\n *\n * @param markdown - Markdown representation of TOC area\n * @returns Object with entries array and token usage information\n * @throws {TocParseError} When LLM fails to parse structure\n * @throws {TocValidationError} When validation fails\n */\n async extract(\n markdown: string,\n ): Promise<{ entries: TocEntry[]; usage: ExtendedTokenUsage }> {\n this.log('info', `Starting TOC extraction (${markdown.length} chars)`);\n\n if (!markdown.trim()) {\n this.log('error', 'Cannot extract TOC from empty markdown content');\n throw new TocParseError(\n 'TOC extraction failed: provided markdown content is empty',\n );\n }\n\n try {\n const result = await this.callTextLLM(\n TocResponseSchema,\n this.buildSystemPrompt(),\n this.buildUserPrompt(markdown),\n 'extraction',\n );\n\n const entries = this.normalizeEntries(result.output.entries);\n\n // Validate entries\n if (!this.skipValidation) {\n this.validateEntries(entries);\n }\n\n this.log(\n 'info',\n `Extraction completed: ${entries.length} top-level entries`,\n );\n\n return { entries, usage: result.usage };\n } catch (error) {\n // Re-throw TocValidationError as-is\n if (error instanceof TocValidationError) {\n this.log('error', `Validation failed: ${error.message}`);\n throw error;\n }\n\n const message = error instanceof Error ? error.message : String(error);\n this.log('error', `Extraction failed: ${message}`);\n throw new TocParseError(`Failed to extract TOC structure: ${message}`, {\n cause: error,\n });\n }\n }\n\n /**\n * Validate extracted entries\n *\n * @throws {TocValidationError} When validation fails\n */\n private validateEntries(entries: TocEntry[]): void {\n if (entries.length === 0) {\n return;\n }\n\n const validator = new TocValidator(this.validationOptions);\n validator.validateOrThrow(entries);\n }\n\n /**\n * Build system prompt for TOC extraction\n */\n protected buildSystemPrompt(): string {\n return `You are a document structure extraction assistant. Your task is to parse a table of contents (TOC) from markdown format and extract structured entries.\n\n## Instructions\n\n1. **Title**: Extract the exact chapter/section title from each line. Remove page number indicators like \"..... 10\" or \"... 5\" at the end.\n\n2. **Level**: Determine the hierarchy depth:\n - Level 1: Top-level chapters (e.g., \"제1장\", \"Chapter 1\", \"I.\", \"Part 1\")\n - Level 2: Main sections within chapters (e.g., \"1.\", \"1.1\", \"A.\")\n - Level 3: Subsections (e.g., \"1.1.1\", \"a.\", \"(1)\")\n - Use indentation and numbering patterns to infer level\n\n3. **Page Number**: Extract the page number from each entry. Convert Roman numerals to Arabic numerals if present (e.g., \"iv\" → 4).\n\n4. **Children**: Nest child entries under parent entries based on their hierarchy level.\n\n5. **IMPORTANT - Extract Main TOC Only**: Only extract the main document table of contents. EXCLUDE the following supplementary indices:\n - Photo/image indices (사진 목차, 사진목차, 화보 목차, Photo Index, List of Photos, List of Figures)\n - Drawing/diagram indices (도면 목차, 도면목차, 삽도 목차, Drawing Index, List of Drawings)\n - Table indices (표 목차, 표목차, Table Index, List of Tables)\n - Appendix indices (부록 목차, Appendix Index)\n - Any other supplementary material indices\n\n## Output Format\n\nReturn a flat array of top-level entries. Each entry at level 1 should contain its children (level 2+) nested properly.\n\n## Example\n\nInput:\n- 제1장 서론 ..... 1\n - 1. 연구 배경 ..... 3\n - 2. 연구 목적 ..... 5\n- 제2장 방법론 ..... 10\n\nOutput:\n{\n \"entries\": [\n {\n \"title\": \"제1장 서론\",\n \"level\": 1,\n \"pageNo\": 1,\n \"children\": [\n { \"title\": \"1. 연구 배경\", \"level\": 2, \"pageNo\": 3 },\n { \"title\": \"2. 연구 목적\", \"level\": 2, \"pageNo\": 5 }\n ]\n },\n { \"title\": \"제2장 방법론\", \"level\": 1, \"pageNo\": 10 }\n ]\n}`;\n }\n\n /**\n * Build user prompt with Markdown content\n */\n protected buildUserPrompt(markdown: string): string {\n return `Extract the table of contents structure from the following markdown:\n\n${markdown}`;\n }\n\n /**\n * Normalize and validate extracted entries\n */\n private normalizeEntries(entries: TocEntry[]): TocEntry[] {\n if (entries.length === 0) {\n return [];\n }\n\n // Normalize level consistency starting from level 1\n return this.normalizeLevel(entries, 1);\n }\n\n /**\n * Recursively ensure level consistency\n *\n * Children must have level = parent.level + 1\n */\n private normalizeLevel(\n entries: TocEntry[],\n expectedLevel: number,\n ): TocEntry[] {\n return entries.map((entry) => {\n const normalizedEntry: TocEntry = {\n title: entry.title.trim(),\n level: expectedLevel,\n pageNo: entry.pageNo,\n };\n\n if (entry.children && entry.children.length > 0) {\n normalizedEntry.children = this.normalizeLevel(\n entry.children,\n expectedLevel + 1,\n );\n }\n\n return normalizedEntry;\n });\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\n/**\n * Base options for all LLM-based components\n */\nexport interface BaseLLMComponentOptions {\n /**\n * Maximum retry count for LLM API (default: 3)\n */\n maxRetries?: number;\n\n /**\n * Temperature for LLM generation (default: 0)\n */\n temperature?: number;\n\n /**\n * Abort signal for cancellation support\n */\n abortSignal?: AbortSignal;\n}\n\n/**\n * Abstract base class for all LLM-based components\n *\n * Provides common functionality:\n * - Consistent logging with component name prefix\n * - Token usage tracking via optional aggregator\n * - Standard configuration (model, fallback, retries, temperature)\n *\n * Subclasses must implement buildSystemPrompt() and buildUserPrompt().\n */\nexport abstract class BaseLLMComponent {\n protected readonly logger: LoggerMethods;\n protected readonly model: LanguageModel;\n protected readonly fallbackModel?: LanguageModel;\n protected readonly maxRetries: number;\n protected readonly temperature: number;\n protected readonly componentName: string;\n protected readonly aggregator?: LLMTokenUsageAggregator;\n protected readonly abortSignal?: AbortSignal;\n\n /**\n * Constructor for BaseLLMComponent\n *\n * @param logger - Logger instance for logging\n * @param model - Primary language model for LLM calls\n * @param componentName - Name of the component for logging (e.g., \"TocExtractor\")\n * @param options - Optional configuration (maxRetries, temperature)\n * @param fallbackModel - Optional fallback model for retry on failure\n * @param aggregator - Optional token usage aggregator for tracking LLM calls\n */\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n componentName: string,\n options?: BaseLLMComponentOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n this.logger = logger;\n this.model = model;\n this.componentName = componentName;\n this.maxRetries = options?.maxRetries ?? 3;\n this.temperature = options?.temperature ?? 0;\n this.fallbackModel = fallbackModel;\n this.aggregator = aggregator;\n this.abortSignal = options?.abortSignal;\n }\n\n /**\n * Log a message with consistent component name prefix\n *\n * @param level - Log level ('info', 'warn', 'error')\n * @param message - Message to log (without prefix)\n * @param args - Additional arguments to pass to logger\n */\n protected log(\n level: 'info' | 'warn' | 'error',\n message: string,\n ...args: unknown[]\n ): void {\n const formattedMessage = `[${this.componentName}] ${message}`;\n this.logger[level](formattedMessage, ...args);\n }\n\n /**\n * Track token usage to aggregator if available\n *\n * @param usage - Token usage information to track\n */\n protected trackUsage(usage: ExtendedTokenUsage): void {\n if (this.aggregator) {\n this.aggregator.track(usage);\n }\n }\n\n /**\n * Create an empty usage record for edge cases (e.g., empty input)\n *\n * @param phase - Phase name for the usage record\n * @returns Empty ExtendedTokenUsage object\n */\n protected createEmptyUsage(phase: string): ExtendedTokenUsage {\n return {\n component: this.componentName,\n phase,\n model: 'primary',\n modelName: 'none',\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n };\n }\n\n /**\n * Build system prompt for LLM call\n *\n * Subclasses must implement this to provide component-specific system prompts.\n */\n protected abstract buildSystemPrompt(...args: unknown[]): string;\n\n /**\n * Build user prompt for LLM call\n *\n * Subclasses must implement this to construct prompts from input data.\n */\n protected abstract buildUserPrompt(...args: unknown[]): string;\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\nimport type { z } from 'zod';\n\nimport { LLMCaller } from '@heripo/shared';\n\nimport {\n BaseLLMComponent,\n type BaseLLMComponentOptions,\n} from './base-llm-component';\n\nexport type { BaseLLMComponentOptions } from './base-llm-component';\n\n/**\n * Abstract base class for text-based LLM components\n *\n * Extends BaseLLMComponent with helper method for text-based LLM calls\n * using LLMCaller.call() (non-vision).\n *\n * Subclasses: TocExtractor, CaptionParser, BaseValidator\n */\nexport abstract class TextLLMComponent extends BaseLLMComponent {\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n componentName: string,\n options?: BaseLLMComponentOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(logger, model, componentName, options, fallbackModel, aggregator);\n }\n\n /**\n * Call LLM with text-based prompts using LLMCaller.call()\n *\n * @template TSchema - Zod schema type for response validation\n * @param schema - Zod schema for response validation\n * @param systemPrompt - System prompt for LLM\n * @param userPrompt - User prompt for LLM\n * @param phase - Phase name for tracking (e.g., 'extraction', 'validation')\n * @returns Promise with parsed object and usage information\n */\n protected async callTextLLM<TSchema extends z.ZodType>(\n schema: TSchema,\n systemPrompt: string,\n userPrompt: string,\n phase: string,\n ): Promise<{ output: z.infer<TSchema>; usage: ExtendedTokenUsage }> {\n const result = await LLMCaller.call({\n schema,\n systemPrompt,\n userPrompt,\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase,\n });\n\n this.trackUsage(result.usage);\n\n return {\n output: result.output as z.infer<TSchema>,\n usage: result.usage,\n };\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport {\n LLMCaller,\n LLMTokenUsageAggregator as LLMTokenUsageAggregatorClass,\n} from '@heripo/shared';\nimport * as fs from 'node:fs';\nimport * as path from 'node:path';\nimport { z } from 'zod';\n\nimport {\n VisionLLMComponent,\n type VisionLLMComponentOptions,\n} from '../core/vision-llm-component';\n\n/**\n * Schema for vision-based TOC extraction response\n */\nexport const VisionTocExtractionSchema = z.object({\n hasToc: z.boolean().describe('Whether a TOC is visible on these pages'),\n tocMarkdown: z\n .string()\n .nullable()\n .describe('Extracted TOC in markdown format, null if not found'),\n continuesOnNextPage: z\n .boolean()\n .describe('Whether TOC continues beyond these pages'),\n});\n\nexport type VisionTocExtractionResult = z.infer<\n typeof VisionTocExtractionSchema\n>;\n\n/**\n * Options for VisionTocExtractor\n */\nexport interface VisionTocExtractorOptions extends VisionLLMComponentOptions {\n /**\n * Number of pages for first batch (default: 10)\n */\n firstBatchSize?: number;\n\n /**\n * Number of pages for second batch (default: 10)\n */\n secondBatchSize?: number;\n}\n\n/**\n * VisionTocExtractor\n *\n * Uses vision LLM to find and extract TOC directly from page images.\n * Fallback strategy when rule-based extraction fails or produces invalid content.\n * Extends VisionLLMComponent for standardized vision LLM call handling.\n *\n * Output format matches MarkdownConverter.convert() for consistency.\n */\nexport class VisionTocExtractor extends VisionLLMComponent {\n private readonly firstBatchSize: number;\n private readonly secondBatchSize: number;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n outputPath: string,\n options?: VisionTocExtractorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n 'VisionTocExtractor',\n outputPath,\n options,\n fallbackModel,\n aggregator ?? new LLMTokenUsageAggregatorClass(),\n );\n this.firstBatchSize = options?.firstBatchSize ?? 10;\n this.secondBatchSize = options?.secondBatchSize ?? 10;\n }\n\n /**\n * Extract TOC from page images\n *\n * Searches pages 1-10 first, then 11-20 if not found.\n *\n * @param totalPages - Total number of pages in the document\n * @returns Extracted TOC markdown or null if not found\n */\n async extract(totalPages: number): Promise<string | null> {\n this.log('info', `Starting TOC extraction from ${totalPages} pages`);\n\n if (totalPages === 0) {\n this.log('info', 'No pages to search');\n return null;\n }\n\n // First batch: pages 1-10 (or fewer if document is smaller)\n const firstBatchEnd = Math.min(this.firstBatchSize, totalPages);\n this.log('info', `Searching first batch: pages 1-${firstBatchEnd}`);\n\n const firstResult = await this.extractFromBatch(1, firstBatchEnd);\n\n if (firstResult.hasToc && firstResult.tocMarkdown) {\n // Check if TOC continues\n if (firstResult.continuesOnNextPage && firstBatchEnd < totalPages) {\n this.log('info', 'TOC continues on next pages, extracting more');\n const continuationEnd = Math.min(\n firstBatchEnd + this.secondBatchSize,\n totalPages,\n );\n const continuationResult = await this.extractFromBatch(\n firstBatchEnd + 1,\n continuationEnd,\n );\n\n if (continuationResult.hasToc && continuationResult.tocMarkdown) {\n const merged = this.mergeMarkdown(\n firstResult.tocMarkdown,\n continuationResult.tocMarkdown,\n );\n this.aggregator!.logSummary(this.logger);\n this.log(\n 'info',\n `TOC extracted with continuation (${merged.length} chars)`,\n );\n return merged;\n }\n }\n\n this.aggregator!.logSummary(this.logger);\n this.log(\n 'info',\n `TOC found in first batch (${firstResult.tocMarkdown.length} chars)`,\n );\n return firstResult.tocMarkdown;\n }\n\n // Second batch: pages 11-20 (only if first batch didn't find TOC)\n if (firstBatchEnd < totalPages) {\n const secondBatchStart = firstBatchEnd + 1;\n const secondBatchEnd = Math.min(\n firstBatchEnd + this.secondBatchSize,\n totalPages,\n );\n\n this.log(\n 'info',\n `Searching second batch: pages ${secondBatchStart}-${secondBatchEnd}`,\n );\n\n const secondResult = await this.extractFromBatch(\n secondBatchStart,\n secondBatchEnd,\n );\n\n if (secondResult.hasToc && secondResult.tocMarkdown) {\n this.aggregator!.logSummary(this.logger);\n this.log(\n 'info',\n `TOC found in second batch (${secondResult.tocMarkdown.length} chars)`,\n );\n return secondResult.tocMarkdown;\n }\n }\n\n this.aggregator!.logSummary(this.logger);\n this.log('info', 'TOC not found in any batch');\n return null;\n }\n\n /**\n * Extract TOC from a specific batch of pages\n */\n private async extractFromBatch(\n startPage: number,\n endPage: number,\n ): Promise<VisionTocExtractionResult> {\n this.log('info', `Extracting from pages ${startPage}-${endPage}`);\n\n this.log(\n 'info',\n `Preparing images for vision analysis. This can be very slow (10+ minutes, sometimes 30+ minutes) depending on batch size and image resolution.`,\n );\n const imageContents = this.loadPageImages(startPage, endPage);\n\n this.log(\n 'info',\n `Calling vision LLM for TOC extraction (pages ${startPage}-${endPage})`,\n );\n const result = await LLMCaller.callVision({\n schema: VisionTocExtractionSchema,\n messages: [\n {\n role: 'user',\n content: [\n {\n type: 'text',\n text: this.buildUserPrompt(startPage, endPage),\n },\n ...imageContents,\n ],\n },\n ],\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: 'VisionTocExtractor',\n phase: 'extraction',\n });\n this.log(\n 'info',\n `Vision LLM call completed (pages ${startPage}-${endPage})`,\n );\n\n this.trackUsage(result.usage);\n\n return result.output;\n }\n\n /**\n * Load page images and build message content\n */\n private loadPageImages(\n startPage: number,\n endPage: number,\n ): Array<{ type: 'image'; image: string }> {\n const imageContents: Array<{ type: 'image'; image: string }> = [];\n\n for (let pageNo = startPage; pageNo <= endPage; pageNo++) {\n // Page files are 0-indexed: page_0.png, page_1.png, etc.\n const imagePath = path.resolve(\n this.outputPath,\n `pages/page_${pageNo - 1}.png`,\n );\n const imageBuffer = fs.readFileSync(imagePath);\n const base64Image = imageBuffer.toString('base64');\n\n imageContents.push({\n type: 'image',\n image: `data:image/png;base64,${base64Image}`,\n });\n }\n\n return imageContents;\n }\n\n /**\n * Merge markdown from multiple batches\n */\n private mergeMarkdown(first: string, continuation: string): string {\n return `${first.trim()}\\n${continuation.trim()}`;\n }\n\n /**\n * Build system prompt for vision LLM (not used, but required by abstract class)\n */\n protected buildSystemPrompt(): string {\n return '';\n }\n\n /**\n * Build user prompt with page range information\n */\n protected buildUserPrompt(startPage: number, endPage: number): string {\n const pageCount = endPage - startPage + 1;\n return `You are a document analysis specialist. Your task is to find and extract the Table of Contents (TOC) from document page images.\n\nI am providing ${pageCount} document page images (pages ${startPage}-${endPage}).\n\n## Where to Look for TOC:\n- TOC typically appears in the first 10-20 pages of a document\n- Look for pages with headings like \"목차\", \"차례\", \"Contents\", \"Table of Contents\"\n- Look for structured lists with chapter titles and page numbers\n\n## What to Extract:\nExtract the TOC content as markdown format that matches this exact structure:\n- Use \"- \" prefix for each list item\n- Use 2-space indentation for hierarchy levels\n- Include \"..... \" followed by page number at the end of each entry\n- Preserve original chapter/section numbering from the document\n\n## Output Format Example:\n\\`\\`\\`\n- 제1장 서론 ..... 1\n - 1. 연구 배경 ..... 3\n - 2. 연구 목적 ..... 5\n- 제2장 연구 방법 ..... 10\n - 1. 조사 지역 ..... 10\n - 2. 조사 방법 ..... 15\n- 제3장 연구 결과 ..... 25\n\\`\\`\\`\n\n## Important Rules:\n1. Extract ONLY the main document TOC\n2. DO NOT include supplementary indices:\n - Photo indices (사진 목차, 사진목차)\n - Table indices (표 목차, 표목차)\n - Figure indices (도면 목차, 도면목차)\n3. If no TOC is found, set hasToc to false and tocMarkdown to null\n4. Set continuesOnNextPage to true if the TOC appears to continue beyond the visible pages\n\nPlease examine these pages and:\n1. Determine if any page contains a Table of Contents (TOC)\n2. If found, extract the complete TOC in markdown format\n3. Indicate if the TOC continues beyond these pages\n\nRemember: Extract the main document TOC only. Ignore photo/table/figure indices.`;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\nimport type { z } from 'zod';\n\nimport { LLMCaller } from '@heripo/shared';\nimport * as fs from 'node:fs';\nimport * as path from 'node:path';\n\nimport {\n BaseLLMComponent,\n type BaseLLMComponentOptions,\n} from './base-llm-component';\n\n/**\n * Options for VisionLLMComponent\n */\nexport interface VisionLLMComponentOptions extends BaseLLMComponentOptions {\n // Vision components may have additional options in future\n}\n\n/**\n * Image content structure for vision LLM messages\n */\nexport interface ImageContent {\n type: 'image';\n image: string;\n}\n\n/**\n * Abstract base class for vision-based LLM components\n *\n * Extends BaseLLMComponent with helper methods for vision-based LLM calls\n * using LLMCaller.callVision().\n *\n * Subclasses: PageRangeParser, VisionTocExtractor\n */\nexport abstract class VisionLLMComponent extends BaseLLMComponent {\n protected readonly outputPath: string;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n componentName: string,\n outputPath: string,\n options?: VisionLLMComponentOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(logger, model, componentName, options, fallbackModel, aggregator);\n this.outputPath = outputPath;\n }\n\n /**\n * Call LLM with vision capabilities using LLMCaller.callVision()\n *\n * @template TSchema - Zod schema type for response validation\n * @param schema - Zod schema for response validation\n * @param messages - Messages array including image content\n * @param phase - Phase name for tracking (e.g., 'extraction', 'sampling')\n * @returns Promise with parsed object and usage information\n */\n protected async callVisionLLM<TSchema extends z.ZodType>(\n schema: TSchema,\n messages: Array<{\n role: 'user' | 'assistant';\n content: unknown[] | string;\n }>,\n phase: string,\n ): Promise<{ output: z.infer<TSchema>; usage: ExtendedTokenUsage }> {\n const result = await LLMCaller.callVision({\n schema,\n messages,\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase,\n });\n\n this.trackUsage(result.usage);\n\n return {\n output: result.output as z.infer<TSchema>,\n usage: result.usage,\n };\n }\n\n /**\n * Load an image file and encode it as base64\n *\n * @param imagePath - Absolute path to the image file\n * @returns Base64 encoded image string\n */\n protected loadImageAsBase64(imagePath: string): string {\n const imageBuffer = fs.readFileSync(imagePath);\n return imageBuffer.toString('base64');\n }\n\n /**\n * Build image content object for vision LLM messages\n *\n * @param imagePath - Path to the image file (relative to outputPath or absolute)\n * @param mimeType - MIME type of the image (default: 'image/png')\n * @returns ImageContent object for LLM message\n */\n protected buildImageContent(\n imagePath: string,\n mimeType: string = 'image/png',\n ): ImageContent {\n const absolutePath = path.isAbsolute(imagePath)\n ? imagePath\n : path.resolve(this.outputPath, imagePath);\n const base64Image = this.loadImageAsBase64(absolutePath);\n return {\n type: 'image',\n image: `data:${mimeType};base64,${base64Image}`,\n };\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { Caption } from '@heripo/model';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport {\n BatchProcessor,\n LLMCaller,\n LLMTokenUsageAggregator as LLMTokenUsageAggregatorClass,\n} from '@heripo/shared';\nimport { z } from 'zod';\n\nimport {\n type BaseLLMComponentOptions,\n TextLLMComponent,\n} from '../core/text-llm-component';\n\n/**\n * CaptionParser options\n */\nexport interface CaptionParserOptions extends BaseLLMComponentOptions {\n /**\n * Custom component name for token usage tracking.\n * Defaults to 'CaptionParser'.\n */\n componentName?: string;\n}\n\n/**\n * Schema for a single caption extraction result (used for sequential processing)\n */\nconst CaptionSingleSchema = z.object({\n num: z\n .string()\n .nullable()\n .describe('Extracted caption prefix + number (e.g., \"도판 1\", \"Figure 2\")'),\n});\n\n/**\n * Schema for a single caption extraction result with index (used for batch processing)\n */\nconst CaptionExtractionSchema = z.object({\n index: z.number().int().describe('Index of the caption in the input array'),\n num: z\n .string()\n .nullable()\n .describe('Extracted caption prefix + number (e.g., \"도판 1\", \"Figure 2\")'),\n});\n\n/**\n * Schema for batch caption response\n */\nconst CaptionBatchSchema = z.object({\n results: z.array(CaptionExtractionSchema),\n});\n\n/**\n * CaptionParser\n *\n * Extracts caption prefix and number from image/table captions using LLM.\n * Preserves original spacing from input text.\n * Extends TextLLMComponent for standardized LLM call handling.\n *\n * ## Algorithm\n *\n * 1. Collect caption texts\n * 2. Split into batches based on batchSize\n * 3. For each batch: call LLM to extract caption prefix + number\n * 4. Flatten results and return\n */\nexport class CaptionParser extends TextLLMComponent {\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: CaptionParserOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n options?.componentName ?? 'CaptionParser',\n options,\n fallbackModel,\n aggregator ?? new LLMTokenUsageAggregatorClass(),\n );\n }\n\n /**\n * Parse batch of captions\n *\n * @param captions - Array of caption full texts\n * @param batchSize - Batch size for processing. Set to 0 for sequential processing without batching.\n * @param overrideModel - Optional model to use instead of the default model\n * @returns Array of Caption objects with num extracted (maintains original order)\n */\n async parseBatch(\n captions: string[],\n batchSize: number,\n overrideModel?: LanguageModel,\n ): Promise<Caption[]> {\n const effectiveModel = overrideModel ?? this.model;\n const isOverride = overrideModel !== undefined;\n const modelName =\n (effectiveModel as { modelId?: string }).modelId ??\n (effectiveModel as { id?: string }).id ??\n 'unknown';\n this.log(\n 'info',\n `Starting caption parsing for ${captions.length} captions with ${isOverride ? 'override ' : ''}model: ${modelName}`,\n );\n\n if (captions.length === 0) {\n this.log('info', 'No captions to parse');\n return [];\n }\n\n try {\n if (batchSize === 0) {\n // Sequential processing (one-by-one) without batch processing\n this.log('info', 'Using sequential processing (batchSize=0)');\n const results: Caption[] = [];\n\n for (let i = 0; i < captions.length; i++) {\n const fullText = captions[i];\n\n // Log progress\n this.log('info', `Processing ${i + 1} / ${captions.length}...`);\n\n const result = await LLMCaller.call({\n schema: CaptionSingleSchema,\n systemPrompt: this.buildSystemPrompt('single'),\n userPrompt: this.buildUserPromptSingle(fullText),\n primaryModel: effectiveModel,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase: 'caption-extraction',\n });\n\n this.trackUsage(result.usage);\n\n const finalNum = this.extractNumFromFullText(\n fullText,\n result.output.num,\n );\n results.push({ fullText, num: finalNum });\n }\n\n // Log token usage summary\n this.aggregator!.logSummary(this.logger);\n\n this.log(\n 'info',\n `Completed: ${results.length} captions parsed, ${results.filter((r) => r.num).length} with extracted numbers`,\n );\n\n return results;\n }\n\n // Batch processing: Convert to indexed format for batch processing\n const indexedCaptions = captions.map((text, index) => ({ index, text }));\n\n // Use BatchProcessor to process captions in parallel batches\n const batchResults = await BatchProcessor.processBatch(\n indexedCaptions,\n batchSize,\n async (batch) => this.parseBatchInternal(batch, effectiveModel),\n );\n\n // Sort results by original index to maintain order\n batchResults.sort((a, b) => a.index - b.index);\n const results = batchResults.map((r) => r.caption);\n\n // Log token usage summary\n this.aggregator!.logSummary(this.logger);\n\n this.log(\n 'info',\n `Completed: ${results.length} captions parsed, ${results.filter((r) => r.num).length} with extracted numbers`,\n );\n\n return results;\n } catch (error) {\n const message = error instanceof Error ? error.message : String(error);\n this.log('error', `Parsing failed: ${message}`);\n throw new CaptionParseError(`Failed to parse captions: ${message}`, {\n cause: error,\n });\n }\n }\n\n /**\n * Internal: Parse batch of captions using LLM\n *\n * @param captions - Batch of caption texts with original indices\n * @param model - Effective model to use\n * @returns Array of Caption objects indexed correctly\n */\n private async parseBatchInternal(\n captions: Array<{ index: number; text: string }>,\n model: LanguageModel,\n ): Promise<Array<{ index: number; caption: Caption }>> {\n const result = await LLMCaller.call({\n schema: CaptionBatchSchema,\n systemPrompt: this.buildSystemPrompt(),\n userPrompt: this.buildUserPrompt(captions),\n primaryModel: model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase: 'caption-extraction',\n });\n\n // Track token usage\n this.trackUsage(result.usage);\n\n // Warn if LLM returned incomplete results (fewer results than inputs)\n if (result.output.results.length !== captions.length) {\n this.log(\n 'warn',\n `LLM returned ${result.output.results.length} results for ${captions.length} captions. ` +\n `This may cause index mismatch.`,\n );\n }\n\n // Map LLM results back to original indices\n const captionMap = new Map(captions.map((c) => [c.index, c.text]));\n\n return result.output.results.map((resultItem) => {\n // resultItem.index is the position within this batch (0, 1, 2...)\n // We need to use the original caption at that position to get the actual index\n const originalCaption = captions[resultItem.index];\n const originalIndex = originalCaption?.index ?? resultItem.index;\n const fullText = captionMap.get(originalIndex) || '';\n const finalNum = this.extractNumFromFullText(fullText, resultItem.num);\n\n return {\n index: originalIndex,\n caption: {\n fullText,\n num: finalNum,\n },\n };\n });\n }\n\n /**\n * Extract and normalize caption number from full text\n *\n * Finds the extracted num pattern in the full text and extracts it\n * with original casing. Handles case-insensitive matching.\n *\n * @param fullText - The full caption text\n * @param extractedNum - The num extracted by LLM (may have different casing)\n * @returns Normalized num or undefined if no match\n */\n private extractNumFromFullText(\n fullText: string,\n extractedNum: string | null,\n ): string | undefined {\n if (!extractedNum) return undefined;\n\n let matchIndex = fullText.indexOf(extractedNum);\n\n if (matchIndex === -1) {\n // Pattern not found directly - try case-insensitive search\n const lowerFullText = fullText.toLowerCase();\n const lowerNum = extractedNum.toLowerCase();\n matchIndex = lowerFullText.indexOf(lowerNum);\n\n if (matchIndex !== -1) {\n // Found case-insensitive match - extract from match position using original casing\n return fullText.substring(matchIndex, matchIndex + extractedNum.length);\n }\n // If still not found, keep the original extracted num\n return extractedNum;\n }\n\n // Found the pattern - extract from match position to end of the matched pattern\n return fullText.substring(matchIndex, matchIndex + extractedNum.length);\n }\n\n /**\n * Build system prompt for caption parsing\n *\n * @param mode - 'batch' for multiple captions, 'single' for single caption\n */\n protected buildSystemPrompt(mode: 'batch' | 'single' = 'batch'): string {\n const intro =\n mode === 'batch'\n ? 'Extract the caption prefix and number (e.g., \"도판 1\", \"Figure 2\") from image/table captions.\\nReturn the prefix + number part as a string, or null if no number exists.'\n : 'Extract the caption prefix and number (e.g., \"도판 1\", \"Figure 2\") from an image/table caption.\\nReturn the prefix + number part as a string, or null if no number exists.';\n\n return `You are a caption prefix extractor for archaeological excavation reports.\n\n${intro}\n\nRules:\n1. Extract if the text follows a caption pattern: <prefix word(s)> <number>\n - The prefix can be ANY Korean/English word(s) that label images/tables/figures\n - Common examples: 도판, 사진, 그림, 도면, 표, 원색사진, 흑백사진, Figure, Photo, Plate, etc.\n - The key is the PATTERN (text followed by number), not a specific word list\n - \"원색사진 1. 조사지역\" → \"원색사진 1\" (valid: prefix + number pattern)\n - \"흑백사진 2 출토유물\" → \"흑백사진 2\" (valid: prefix + number pattern)\n2. IGNORE leading punctuation/brackets when extracting:\n - \"(사진 16> 느티나무\" → \"사진 16\" (ignore leading '(' and extract the pattern inside)\n - \"<도판 1> 유적\" → \"도판 1\" (ignore angle brackets)\n - \"[그림 2] 전경\" → \"그림 2\" (ignore square brackets)\n3. Do NOT extract (return null) if:\n - It's a numbered list item starting with just a number: \"1. 유적 전경\" → null\n - It's a date/time reference: \"39 3월 28일...\" → null\n - It's a year reference: \"2024년 조사 현황\" → null\n - It starts with a number without a prefix: \"123 설명\" → null\n4. PRESERVE original spacing from the input text exactly (after ignoring leading punctuation)\n5. Include the full number (e.g., \"1-2\", \"3a\") not just the first digit\n6. Include period/dot after number if it directly follows (e.g., \"3.6\" → \"도판 3.6\")\n - \"그림 3.6. 한반도 중부\" → \"그림 3.6\" (period after decimal number included)\n - \"도판 2. 유적\" → \"도판 2\" (period after space NOT included)\n7. Stop at the first punctuation (except decimal point), whitespace, or underscore after the number\n - \"사진 1_ㅇㅇㅇ\" → \"사진 1\" (stop at underscore)\n - \"사진 1 ㅇㅇㅇ\" → \"사진 1\" (stop at space)\n - \"그림 3.6. 한반도\" → \"그림 3.6\" (period included as decimal separator)\n\nExamples:\n- \"도판 1 유적 전경\" → \"도판 1\"\n- \"원색사진 1. 조사지역 원경\" → \"원색사진 1\"\n- \"흑백사진 2 출토유물\" → \"흑백사진 2\"\n- \"(사진 16> 느티나무의 접선단면\" → \"사진 16\" (ignore leading punctuation)\n- \"<도판 3> 유물 사진\" → \"도판 3\" (ignore angle brackets)\n- \"도판1 어쩌구\" → \"도판1\" (no space preserved)\n- \"사진 2. 출토 유물\" → \"사진 2\" (period after space, not included)\n- \"그림 3.6. 한반도 중부 및 남부의 ㅇㅇㅇ\" → \"그림 3.6\" (period as decimal included)\n- \"Figure 3: Site plan\" → \"Figure 3\"\n- \"Table 4a. Artifact list\" → \"Table 4a\"\n- \"도판 5-2 층위 단면\" → \"도판 5-2\"\n- \"설명 없는 이미지\" → null\n- \"39 3월 28일(백제 도로유구 내부 조사)\" → null (starts with number, no prefix)\n- \"1. 유구 현황\" → null (numbered list, not caption)\n- \"2024-05-01 촬영\" → null (date, not caption)`;\n }\n\n /**\n * Build user prompt for caption parsing\n */\n protected buildUserPrompt(\n captions: Array<{ index: number; text: string }>,\n ): string {\n const captionList = captions\n .map((c) => `[${c.index}] ${c.text}`)\n .join('\\n');\n\n return `Extract caption prefix and number from the following captions:\n\n${captionList}\n\nReturn the results as JSON array with \"index\" (original position) and \"num\" (extracted prefix + number or null).\n\nExample format:\n[\n { \"index\": 0, \"num\": \"도판 1\" },\n { \"index\": 1, \"num\": \"Figure 2\" },\n { \"index\": 2, \"num\": null }\n]`;\n }\n\n /**\n * Build user prompt for single caption parsing\n */\n private buildUserPromptSingle(caption: string): string {\n return `Extract caption prefix and number from the following caption:\n\n\"${caption}\"\n\nCRITICAL: Return ONLY the JSON object directly with a \"num\" field.\n- DO NOT wrap the JSON in quotes or additional formatting\n- DO NOT output \"final:\", \"result:\", or any prefix labels\n- DO NOT wrap in backticks or code blocks\n- Return ONLY valid JSON: { \"num\": value }\n\nThe value must be:\n- A string with the extracted caption prefix + number (e.g., \"도판 1\", \"Figure 2\")\n- null if no number exists\n\nValid outputs:\n{ \"num\": \"도판 1\" }\n{ \"num\": null }\n\nInvalid outputs (NEVER do these):\n- { \"final\": \"...\" } ❌\n- \\`\\`\\`json { \"num\": \"...\" } \\`\\`\\` ❌\n- \"{ \"num\": \"...\" }\" ❌\n- { \"num\": { \"value\": \"...\" } } ❌`;\n }\n}\n\n/**\n * Error thrown when caption parsing fails\n */\nexport class CaptionParseError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'CaptionParseError';\n }\n}\n","/**\n * PageRangeParseError\n *\n * Custom error thrown when page range parsing fails.\n */\nexport class PageRangeParseError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'PageRangeParseError';\n }\n\n /**\n * Extract error message from unknown error type\n */\n static getErrorMessage(error: unknown): string {\n return error instanceof Error ? error.message : String(error);\n }\n\n /**\n * Create PageRangeParseError from unknown error with context\n */\n static fromError(context: string, error: unknown): PageRangeParseError {\n return new PageRangeParseError(\n `${context}: ${PageRangeParseError.getErrorMessage(error)}`,\n { cause: error },\n );\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { DoclingDocument, DoclingPage, PageRange } from '@heripo/model';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport type { PageSizeGroup } from '../types';\n\nimport {\n LLMCaller,\n LLMTokenUsageAggregator as LLMTokenUsageAggregatorClass,\n} from '@heripo/shared';\nimport * as fs from 'node:fs';\nimport * as path from 'node:path';\nimport { z } from 'zod';\n\nimport { VisionLLMComponent } from '../core/vision-llm-component';\nimport { PageRangeParseError } from './page-range-parse-error';\n\n/**\n * Pattern types for page number sequences\n */\nexport enum PagePattern {\n /** Simple increment: [1, 2, 3, 4, ...] */\n SIMPLE_INCREMENT = 'simple_increment',\n /** Double-sided scan: [1-2, 3-4, 5-6, ...] */\n DOUBLE_SIDED = 'double_sided',\n /** Offset pattern: PDF page != actual page (consistent offset) */\n OFFSET = 'offset',\n /** No clear pattern detected */\n UNKNOWN = 'unknown',\n}\n\n/**\n * Pattern analysis result\n */\ninterface PatternAnalysis {\n pattern: PagePattern;\n offset: number;\n increment: number;\n}\n\n/**\n * Sample extraction result from Vision LLM\n */\ninterface SampleResult {\n pdfPageNo: number;\n startPageNo: number | null;\n endPageNo: number | null;\n}\n\n/**\n * PageRangeParser\n *\n * Extracts actual document page numbers from PDF page images using Vision LLM.\n * Uses random sampling + pattern detection to minimize LLM calls.\n * Extends VisionLLMComponent for standardized vision LLM call handling.\n *\n * ## Algorithm\n *\n * 1. Group pages by size (consecutive pages with same dimensions)\n * 2. For each group:\n * - If ≤3 pages: send all to LLM at once\n * - If >3 pages: random sample 3 pages, detect pattern, apply to all\n * 3. Post-process: handle drops, normalize negatives, backfill failed pages\n */\nexport class PageRangeParser extends VisionLLMComponent {\n // Configuration constants\n private readonly SAMPLE_SIZE = 3;\n private readonly MAX_PATTERN_RETRIES = 6;\n private readonly SIZE_TOLERANCE = 5.0;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n outputPath: string,\n maxRetries: number = 3,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n abortSignal?: AbortSignal,\n ) {\n super(\n logger,\n model,\n 'PageRangeParser',\n outputPath,\n { maxRetries, abortSignal },\n fallbackModel,\n aggregator ?? new LLMTokenUsageAggregatorClass(),\n );\n }\n\n /**\n * Main parse method\n *\n * Extracts page range mapping from DoclingDocument using Vision LLM.\n * Automatically tracks token usage in the aggregator if one was provided.\n *\n * @param doclingDoc - DoclingDocument to extract page ranges from\n * @returns Object with page range mapping and token usage information\n */\n async parse(doclingDoc: DoclingDocument): Promise<{\n pageRangeMap: Record<number, PageRange>;\n usage: ExtendedTokenUsage[];\n }> {\n this.log('info', 'Starting page range parsing...');\n\n // Step 1: Extract and group pages by size\n const pages = this.extractPages(doclingDoc);\n if (pages.length === 0) {\n this.log('warn', 'No pages found');\n const emptyUsage = this.createEmptyUsage('sampling');\n this.trackUsage(emptyUsage);\n return {\n pageRangeMap: {},\n usage: [emptyUsage],\n };\n }\n\n const sizeGroups = this.analyzeSizes(pages);\n this.log(\n 'info',\n `Found ${sizeGroups.length} size group(s), total ${pages.length} pages`,\n );\n\n // Step 2: Process each size group\n const pageRangeMap: Record<number, PageRange> = {};\n const usageList: ExtendedTokenUsage[] = [];\n\n for (let i = 0; i < sizeGroups.length; i++) {\n const group = sizeGroups[i];\n this.log(\n 'info',\n `Processing group ${i + 1}/${sizeGroups.length}: ${group.pageNos.length} pages`,\n );\n\n const groupResult = await this.processGroup(pages, group, this.model);\n Object.assign(pageRangeMap, groupResult.pageRangeMap);\n usageList.push(...groupResult.usage);\n }\n\n // Step 3: Track all usage in aggregator\n for (const usage of usageList) {\n this.trackUsage(usage);\n }\n\n // Step 4: Post-processing\n this.postProcess(pageRangeMap);\n\n this.log(\n 'info',\n `Completed: ${Object.keys(pageRangeMap).length} pages mapped`,\n );\n\n return { pageRangeMap, usage: usageList };\n }\n\n /**\n * Extract pages array from DoclingDocument\n */\n private extractPages(doclingDoc: DoclingDocument): DoclingPage[] {\n const pageKeys = Object.keys(doclingDoc.pages)\n .map(Number)\n .filter((n) => !Number.isNaN(n))\n .sort((a, b) => a - b);\n\n return pageKeys.map((key) => doclingDoc.pages[String(key)]);\n }\n\n /**\n * Analyze page sizes and group consecutive pages with same dimensions\n */\n private analyzeSizes(pages: DoclingPage[]): PageSizeGroup[] {\n const groups: PageSizeGroup[] = [];\n let currentGroup: PageSizeGroup | null = null;\n\n for (const page of pages) {\n const sizeKey = this.createSizeKey(page.size.width, page.size.height);\n\n if (!currentGroup || currentGroup.sizeKey !== sizeKey) {\n // Start new group\n currentGroup = { sizeKey, pageNos: [page.page_no] };\n groups.push(currentGroup);\n } else {\n // Add to current group\n currentGroup.pageNos.push(page.page_no);\n }\n }\n\n return groups;\n }\n\n /**\n * Create size key with tolerance for floating point comparison\n */\n private createSizeKey(width: number, height: number): string {\n const roundedWidth = Math.round(width / this.SIZE_TOLERANCE);\n const roundedHeight = Math.round(height / this.SIZE_TOLERANCE);\n return `${roundedWidth}x${roundedHeight}`;\n }\n\n /**\n * Process a single size group\n */\n private async processGroup(\n pages: DoclingPage[],\n group: PageSizeGroup,\n model: LanguageModel,\n ): Promise<{\n pageRangeMap: Record<number, PageRange>;\n usage: ExtendedTokenUsage[];\n }> {\n const { pageNos } = group;\n const usageList: ExtendedTokenUsage[] = [];\n\n // Special case: 3 or fewer pages - send all at once\n if (pageNos.length <= this.SAMPLE_SIZE) {\n this.log(\n 'info',\n `Small group (${pageNos.length} pages), extracting all at once`,\n );\n const result = await this.extractMultiplePages(pages, pageNos, model);\n usageList.push(result.usage);\n return {\n pageRangeMap: this.samplesToMap(result.samples),\n usage: usageList,\n };\n }\n\n // Larger groups: random sampling + pattern detection\n const sampledPages = new Set<number>();\n\n for (let attempt = 0; attempt <= this.MAX_PATTERN_RETRIES; attempt++) {\n // Select 3 random pages (excluding previously sampled if possible)\n const samplePageNos = this.selectRandomSamples(\n pageNos,\n this.SAMPLE_SIZE,\n sampledPages,\n );\n\n // Track which pages we've sampled\n for (const p of samplePageNos) {\n sampledPages.add(p);\n }\n\n this.log(\n 'info',\n `Attempt ${attempt + 1}/${this.MAX_PATTERN_RETRIES + 1}: sampling pages ${samplePageNos.join(', ')}`,\n );\n\n // Send all 3 images at once to Vision LLM\n const result = await this.extractMultiplePages(\n pages,\n samplePageNos,\n model,\n );\n usageList.push(result.usage);\n const samples = result.samples;\n\n // Try to detect pattern\n const pattern = this.detectPattern(samples);\n\n if (pattern.pattern !== PagePattern.UNKNOWN) {\n // Pattern found! Apply to all pages\n this.log(\n 'info',\n `Pattern detected: ${pattern.pattern} (offset=${pattern.offset}, increment=${pattern.increment})`,\n );\n return {\n pageRangeMap: this.applyPattern(pageNos, pattern),\n usage: usageList,\n };\n }\n\n // Pattern not found - log and retry\n this.log(\n 'warn',\n `Pattern detection failed, attempt ${attempt + 1}/${this.MAX_PATTERN_RETRIES + 1}`,\n );\n }\n\n // All retries exhausted - throw error\n throw new PageRangeParseError(\n `Failed to detect page pattern after ${this.MAX_PATTERN_RETRIES + 1} attempts for size group with ${pageNos.length} pages`,\n );\n }\n\n /**\n * Select random samples from page numbers\n */\n private selectRandomSamples(\n pageNos: number[],\n count: number,\n exclude: Set<number> = new Set(),\n ): number[] {\n // Get available pages (not previously sampled)\n const available = pageNos.filter((p) => !exclude.has(p));\n\n // If not enough unsampled pages, allow reuse\n const pool = available.length >= count ? available : pageNos;\n\n // Fisher-Yates shuffle for random selection\n const shuffled = [...pool];\n for (let i = shuffled.length - 1; i > 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [shuffled[i], shuffled[j]] = [shuffled[j], shuffled[i]];\n }\n\n // Return first 'count' elements, sorted by page number for consistency\n return shuffled.slice(0, count).sort((a, b) => a - b);\n }\n\n /**\n * Extract page numbers from multiple pages in a single LLM call\n */\n private async extractMultiplePages(\n pages: DoclingPage[],\n pageNos: number[],\n model: LanguageModel,\n ): Promise<{ samples: SampleResult[]; usage: ExtendedTokenUsage }> {\n this.log('info', `Extracting ${pageNos.length} pages in single LLM call`);\n\n // Build image content array\n const imageContents: Array<{ type: 'image'; image: string }> = [];\n\n for (const pageNo of pageNos) {\n const page = pages[pageNo - 1];\n const imagePath = path.resolve(this.outputPath, page.image.uri);\n const imageBuffer = fs.readFileSync(imagePath);\n const base64Image = imageBuffer.toString('base64');\n const mimeType = page.image.mimetype || 'image/png';\n\n imageContents.push({\n type: 'image',\n image: `data:${mimeType};base64,${base64Image}`,\n });\n }\n\n // Build schema for multi-page response\n const schema = z.object({\n pages: z\n .array(\n z.object({\n imageIndex: z\n .number()\n .describe('0-based index of the image in the request'),\n startPageNo: z\n .number()\n .nullable()\n .describe('Start page number (null if not found)'),\n endPageNo: z\n .number()\n .nullable()\n .describe(\n 'End page number for double-sided scans (null for single page)',\n ),\n }),\n )\n .describe('Extracted page numbers for each image'),\n });\n\n try {\n const result = await LLMCaller.callVision({\n schema,\n messages: [\n {\n role: 'user',\n content: [\n { type: 'text', text: this.buildUserPrompt(pageNos) },\n ...imageContents,\n ],\n },\n ],\n primaryModel: model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: 0,\n abortSignal: this.abortSignal,\n component: 'PageRangeParser',\n phase: 'sampling',\n });\n\n // Convert response to SampleResult array\n const samples = result.output.pages.map((p) => ({\n pdfPageNo: pageNos[p.imageIndex],\n startPageNo: p.startPageNo,\n endPageNo: p.endPageNo,\n }));\n\n return { samples, usage: result.usage };\n } catch (error) {\n this.log('error', 'Multi-image extraction failed:', error);\n throw PageRangeParseError.fromError(\n 'Multi-image extraction failed',\n error,\n );\n }\n }\n\n /**\n * Detect pattern from sample results\n */\n private detectPattern(samples: SampleResult[]): PatternAnalysis {\n // Filter out null results\n const validSamples = samples.filter((s) => s.startPageNo !== null);\n\n if (validSamples.length < 2) {\n return { pattern: PagePattern.UNKNOWN, offset: 0, increment: 1 };\n }\n\n // Sort by PDF page number\n validSamples.sort((a, b) => a.pdfPageNo - b.pdfPageNo);\n\n // Check for SIMPLE_INCREMENT pattern\n const isSimple = validSamples.every((s, i) => {\n // startPageNo should equal endPageNo (or endPageNo is null)\n if (s.endPageNo !== null && s.startPageNo !== s.endPageNo) return false;\n if (i === 0) return true;\n const prev = validSamples[i - 1];\n const expectedIncrease = s.pdfPageNo - prev.pdfPageNo;\n return s.startPageNo === prev.startPageNo! + expectedIncrease;\n });\n\n if (isSimple) {\n const firstSample = validSamples[0];\n const offset = firstSample.startPageNo! - firstSample.pdfPageNo;\n return { pattern: PagePattern.SIMPLE_INCREMENT, offset, increment: 1 };\n }\n\n // Check for DOUBLE_SIDED pattern\n // Each PDF page contains 2 actual pages: [startPageNo, startPageNo+1]\n // Formula: startPageNo = pdfPageNo * 2 + offset (where offset is usually -1 for 1-based)\n const isDoubleSided = validSamples.every((s, i) => {\n // Each page must have endPageNo = startPageNo + 1\n if (s.endPageNo === null) return false;\n if (s.endPageNo !== s.startPageNo! + 1) return false;\n if (i === 0) return true;\n\n // For non-consecutive samples, check the formula consistency\n // startPageNo should follow: pdfPageNo * 2 + offset\n const prev = validSamples[i - 1];\n const pdfDiff = s.pdfPageNo - prev.pdfPageNo;\n const expectedStartDiff = pdfDiff * 2; // Each PDF page = 2 actual pages\n const actualStartDiff = s.startPageNo! - prev.startPageNo!;\n return actualStartDiff === expectedStartDiff;\n });\n\n if (isDoubleSided) {\n const firstSample = validSamples[0];\n const offset = firstSample.startPageNo! - firstSample.pdfPageNo * 2;\n return { pattern: PagePattern.DOUBLE_SIDED, offset, increment: 2 };\n }\n\n // Check for OFFSET pattern (consistent offset with ±1 tolerance)\n const offsets = validSamples.map((s) => s.startPageNo! - s.pdfPageNo);\n const avgOffset = Math.round(\n offsets.reduce((a, b) => a + b, 0) / offsets.length,\n );\n const isConsistentOffset = offsets.every(\n (o) => Math.abs(o - avgOffset) <= 1,\n );\n\n if (isConsistentOffset) {\n return { pattern: PagePattern.OFFSET, offset: avgOffset, increment: 1 };\n }\n\n return { pattern: PagePattern.UNKNOWN, offset: 0, increment: 1 };\n }\n\n /**\n * Apply detected pattern to generate page range map\n */\n private applyPattern(\n pageNos: number[],\n pattern: PatternAnalysis,\n ): Record<number, PageRange> {\n const result: Record<number, PageRange> = {};\n\n for (const pdfPageNo of pageNos) {\n switch (pattern.pattern) {\n case PagePattern.SIMPLE_INCREMENT:\n case PagePattern.OFFSET: {\n const pageNo = pdfPageNo + pattern.offset;\n result[pdfPageNo] = {\n startPageNo: pageNo,\n endPageNo: pageNo,\n };\n break;\n }\n\n case PagePattern.DOUBLE_SIDED: {\n const start = pdfPageNo * 2 + pattern.offset;\n result[pdfPageNo] = {\n startPageNo: start,\n endPageNo: start + 1,\n };\n break;\n }\n\n default:\n result[pdfPageNo] = { startPageNo: 0, endPageNo: 0 };\n }\n }\n\n return result;\n }\n\n /**\n * Convert sample results to page range map (for small groups)\n */\n private samplesToMap(samples: SampleResult[]): Record<number, PageRange> {\n const result: Record<number, PageRange> = {};\n\n for (const sample of samples) {\n if (sample.startPageNo !== null) {\n result[sample.pdfPageNo] = {\n startPageNo: sample.startPageNo,\n endPageNo: sample.endPageNo ?? sample.startPageNo,\n };\n } else {\n result[sample.pdfPageNo] = { startPageNo: 0, endPageNo: 0 };\n }\n }\n\n return result;\n }\n\n /**\n * Post-process the page range map\n */\n private postProcess(pageRangeMap: Record<number, PageRange>): void {\n // Order matters:\n // 1. Detect outliers (abnormally high values at beginning)\n // 2. Handle drops\n // 3. Normalize negatives\n // 4. Backfill failed pages\n this.detectAndHandleOutliers(pageRangeMap);\n this.detectAndHandleDrops(pageRangeMap);\n this.normalizeNegatives(pageRangeMap);\n this.backfillFailedPages(pageRangeMap);\n }\n\n /**\n * Detect and handle outlier page numbers at the beginning of document\n *\n * When early PDF pages have abnormally high page numbers compared to\n * subsequent pages (e.g., PDF 1-9 = 75-83, but PDF 10+ = 2,3,4...),\n * the LLM likely misread figure/photo numbers as page numbers.\n *\n * Detection: If page numbers at the beginning are significantly higher\n * than subsequent pages (which follow a normal pattern), mark them as failed.\n */\n private detectAndHandleOutliers(\n pageRangeMap: Record<number, PageRange>,\n ): void {\n const pdfPages = Object.keys(pageRangeMap)\n .map(Number)\n .sort((a, b) => a - b);\n\n if (pdfPages.length < 3) return;\n\n // Find the first \"normal\" sequence (at least 3 consecutive pages following a pattern)\n const normalSequenceStart = this.findNormalSequenceStart(\n pageRangeMap,\n pdfPages,\n );\n\n if (normalSequenceStart === null || normalSequenceStart <= 0) return;\n\n const normalStartPdfPage = pdfPages[normalSequenceStart];\n const normalStartPageNo = pageRangeMap[normalStartPdfPage].startPageNo;\n\n // Check if pages before the normal sequence are outliers\n // (their page numbers are much higher than what they should be)\n let hasOutliers = false;\n for (let i = 0; i < normalSequenceStart; i++) {\n const pdfPage = pdfPages[i];\n const pageNo = pageRangeMap[pdfPage].startPageNo;\n\n if (pageNo === 0) continue;\n\n // Calculate expected page number based on the normal sequence\n const pdfDiff = normalStartPdfPage - pdfPage;\n\n // For double-sided: each PDF page = 2 actual pages\n const isDoubleSided = this.isDoubleSidedRange(\n pageRangeMap[normalStartPdfPage],\n );\n const expectedPageNo = isDoubleSided\n ? normalStartPageNo - pdfDiff * 2\n : normalStartPageNo - pdfDiff;\n\n // If actual page number is significantly higher than expected, it's an outlier\n // Use threshold: actual > expected + 10 (to avoid false positives)\n if (pageNo > expectedPageNo + 10) {\n this.log(\n 'info',\n `Outlier detected: PDF ${pdfPage}=${pageNo} (expected ~${expectedPageNo})`,\n );\n pageRangeMap[pdfPage] = { startPageNo: 0, endPageNo: 0 };\n hasOutliers = true;\n }\n }\n\n if (hasOutliers) {\n this.log('info', `Outliers marked as failed, will be backfilled later`);\n }\n }\n\n /**\n * Find the start index of a \"normal\" sequence in the page range map\n *\n * A normal sequence is defined as at least 3 consecutive PDF pages where:\n * - Page numbers are increasing (for single-page) or increasing by 2 (for double-sided)\n * - The pattern is consistent\n *\n * Returns the index in pdfPages array, or null if not found.\n */\n private findNormalSequenceStart(\n pageRangeMap: Record<number, PageRange>,\n pdfPages: number[],\n ): number | null {\n const MIN_SEQUENCE_LENGTH = 3;\n\n for (\n let startIdx = 0;\n startIdx <= pdfPages.length - MIN_SEQUENCE_LENGTH;\n startIdx++\n ) {\n let isValidSequence = true;\n let expectedIncrement: number | null = null;\n\n for (let i = 0; i < MIN_SEQUENCE_LENGTH - 1; i++) {\n const currPdfPage = pdfPages[startIdx + i];\n const nextPdfPage = pdfPages[startIdx + i + 1];\n const currRange = pageRangeMap[currPdfPage];\n const nextRange = pageRangeMap[nextPdfPage];\n\n // Skip if either has failed extraction\n if (currRange.startPageNo === 0 || nextRange.startPageNo === 0) {\n isValidSequence = false;\n break;\n }\n\n // Calculate increment\n const pageIncrement = nextRange.startPageNo - currRange.startPageNo;\n const pdfIncrement = nextPdfPage - currPdfPage;\n\n // Determine expected increment (1 for single-page, 2 for double-sided per PDF page)\n const isDoubleSided = this.isDoubleSidedRange(currRange);\n const expectedIncrementPerPdf = isDoubleSided ? 2 : 1;\n const expected = pdfIncrement * expectedIncrementPerPdf;\n\n if (expectedIncrement === null) {\n expectedIncrement = pageIncrement;\n }\n\n // Check if increment is reasonable (should match expected pattern)\n if (pageIncrement !== expected) {\n isValidSequence = false;\n break;\n }\n }\n\n if (isValidSequence) {\n return startIdx;\n }\n }\n\n return null;\n }\n\n /**\n * Check if a page range represents a double-sided scan\n */\n private isDoubleSidedRange(range: PageRange): boolean {\n return (\n range.endPageNo !== null &&\n range.endPageNo !== range.startPageNo &&\n range.endPageNo === range.startPageNo + 1\n );\n }\n\n /**\n * Detect and handle page number drops\n *\n * When page numbers suddenly decrease (e.g., 8,9 -> 3,4),\n * recalculate previous pages based on the drop point.\n */\n private detectAndHandleDrops(pageRangeMap: Record<number, PageRange>): void {\n const pdfPages = Object.keys(pageRangeMap)\n .map(Number)\n .sort((a, b) => a - b);\n\n if (pdfPages.length < 2) return;\n\n for (let i = 1; i < pdfPages.length; i++) {\n const prevPdfPage = pdfPages[i - 1];\n const currPdfPage = pdfPages[i];\n const prevPageNo = pageRangeMap[prevPdfPage].startPageNo;\n const currPageNo = pageRangeMap[currPdfPage].startPageNo;\n\n // Skip if either is 0 (extraction failed)\n if (prevPageNo === 0 || currPageNo === 0) continue;\n\n // Detect significant drop (more than 1)\n if (\n currPageNo > 0 &&\n prevPageNo > currPageNo &&\n prevPageNo - currPageNo > 1\n ) {\n this.log(\n 'info',\n `Page drop detected: PDF ${prevPdfPage}=${prevPageNo} -> PDF ${currPdfPage}=${currPageNo}`,\n );\n\n // Determine if the reference page is double-sided\n const isDoubleSided = this.isDoubleSidedRange(\n pageRangeMap[currPdfPage],\n );\n\n // Recalculate all previous pages based on drop point\n for (let j = i - 1; j >= 0; j--) {\n const pdfPage = pdfPages[j];\n const distance = currPdfPage - pdfPage;\n\n if (isDoubleSided) {\n // Double-sided: each PDF page = 2 actual pages\n const expectedStartPageNo = currPageNo - distance * 2;\n\n if (expectedStartPageNo < 1) {\n pageRangeMap[pdfPage] = { startPageNo: 0, endPageNo: 0 };\n } else {\n pageRangeMap[pdfPage] = {\n startPageNo: expectedStartPageNo,\n endPageNo: expectedStartPageNo + 1,\n };\n }\n } else {\n // Single-page pattern\n const expectedPageNo = currPageNo - distance;\n\n if (expectedPageNo < 1) {\n pageRangeMap[pdfPage] = { startPageNo: 0, endPageNo: 0 };\n } else {\n pageRangeMap[pdfPage] = {\n startPageNo: expectedPageNo,\n endPageNo: expectedPageNo,\n };\n }\n }\n this.log(\n 'info',\n `Recalculated PDF ${pdfPage} -> ${pageRangeMap[pdfPage].startPageNo}`,\n );\n }\n }\n }\n }\n\n /**\n * Normalize negative page numbers to 0\n */\n private normalizeNegatives(pageRangeMap: Record<number, PageRange>): void {\n for (const [pdfPageStr, range] of Object.entries(pageRangeMap)) {\n if (range.startPageNo < 0 || range.endPageNo < 0) {\n this.log('info', `Normalizing negative: PDF ${pdfPageStr} -> 0`);\n pageRangeMap[Number(pdfPageStr)] = { startPageNo: 0, endPageNo: 0 };\n }\n }\n }\n\n /**\n * Backfill pages marked with 0 using detected pattern\n */\n private backfillFailedPages(pageRangeMap: Record<number, PageRange>): void {\n const pdfPages = Object.keys(pageRangeMap)\n .map(Number)\n .sort((a, b) => a - b);\n\n // Find pages with startPageNo === 0 (extraction failed)\n const failedPages = pdfPages.filter(\n (p) => pageRangeMap[p].startPageNo === 0,\n );\n if (failedPages.length === 0) return;\n\n // Find successful pages to detect pattern\n const successfulPages = pdfPages\n .filter((p) => pageRangeMap[p].startPageNo > 0)\n .map((p) => ({\n pdfPage: p,\n pageNo: pageRangeMap[p].startPageNo,\n isDoubleSided: this.isDoubleSidedRange(pageRangeMap[p]),\n }));\n\n if (successfulPages.length < 2) {\n this.log('warn', 'Not enough successful pages for backfill');\n return;\n }\n\n // Detect if this is a double-sided pattern\n const doubleSidedCount = successfulPages.filter(\n (s) => s.isDoubleSided,\n ).length;\n const isDoubleSided = doubleSidedCount > successfulPages.length / 2;\n\n if (isDoubleSided) {\n // For double-sided: calculate offset using formula startPageNo = pdfPage * 2 + offset\n const offsets = successfulPages.map((s) => s.pageNo - s.pdfPage * 2);\n const avgOffset = Math.round(\n offsets.reduce((a, b) => a + b, 0) / offsets.length,\n );\n\n this.log(\n 'info',\n `Backfilling ${failedPages.length} pages with double-sided pattern (offset=${avgOffset})`,\n );\n\n for (const pdfPage of failedPages) {\n const expectedStartPageNo = pdfPage * 2 + avgOffset;\n\n if (expectedStartPageNo < 1) {\n this.log(\n 'info',\n `Backfill skipped for PDF ${pdfPage} (would be ${expectedStartPageNo})`,\n );\n // Mark as cover/intro page with 0\n continue;\n }\n\n this.log(\n 'info',\n `Backfill PDF ${pdfPage}: 0 -> ${expectedStartPageNo}-${expectedStartPageNo + 1}`,\n );\n pageRangeMap[pdfPage] = {\n startPageNo: expectedStartPageNo,\n endPageNo: expectedStartPageNo + 1,\n };\n }\n } else {\n // For single-page: calculate simple offset\n const offsets = successfulPages.map((s) => s.pageNo - s.pdfPage);\n const avgOffset = Math.round(\n offsets.reduce((a, b) => a + b, 0) / offsets.length,\n );\n\n this.log(\n 'info',\n `Backfilling ${failedPages.length} pages with offset ${avgOffset}`,\n );\n\n for (const pdfPage of failedPages) {\n const expectedPageNo = pdfPage + avgOffset;\n\n if (expectedPageNo < 1) {\n this.log(\n 'info',\n `Backfill skipped for PDF ${pdfPage} (would be ${expectedPageNo})`,\n );\n continue;\n }\n\n this.log('info', `Backfill PDF ${pdfPage}: 0 -> ${expectedPageNo}`);\n pageRangeMap[pdfPage] = {\n startPageNo: expectedPageNo,\n endPageNo: expectedPageNo,\n };\n }\n }\n }\n\n /**\n * Build system prompt for Vision LLM\n */\n protected buildSystemPrompt(): string {\n return `You are a page number extraction specialist for document images.\nYou will receive multiple document page images. For EACH image, extract the visible page number(s).\n\n**SCAN TYPES:**\n1. SINGLE PAGE: One document page per image. Return startPageNo only, endPageNo should be null.\n2. DOUBLE-SIDED: Two document pages per image (spread). Return startPageNo (left) and endPageNo (right).\n\n**WHERE TO LOOK:**\n- Bottom center, bottom corners (most common)\n- Top corners (less common)\n- Page numbers are SMALL numbers in MARGINS, NOT in content area\n\n**WHAT TO IGNORE - These are NOT page numbers:**\n- Roman numerals (i, ii, iii, iv, v...) - return null\n- Figure numbers: \"Figure 5\", \"Fig. 5\", \"도 5\", \"그림 5\"\n- Table numbers: \"Table 3\", \"표 3\"\n- Photo numbers: \"Photo 8\", \"사진 8\", \"Plate 4\", \"도판 4\"\n- Years in content: \"2015\", \"(1998)\"\n- Any numbers with text prefix or inside content area\n\n**RESPONSE FORMAT:**\nFor each image (in order), provide:\n- imageIndex: 0-based index of the image\n- startPageNo: The page number found (null if not visible/readable)\n- endPageNo: Right page number for double-sided scans (null for single pages)`;\n }\n\n /**\n * Build user prompt for Vision LLM\n */\n protected buildUserPrompt(pageNos: number[]): string {\n return `I am providing ${pageNos.length} document page images.\nThese are PDF pages: ${pageNos.join(', ')}.\n\nFor each image (in order), extract the visible page number(s).\nReturn null for pages where no page number is visible or readable.\n\nRemember: Look for SMALL numbers in MARGINS only. Ignore figure/table/photo numbers.`;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\nimport type { z } from 'zod';\n\nimport { LLMCaller } from '@heripo/shared';\n\nimport {\n type BaseLLMComponentOptions,\n TextLLMComponent,\n} from '../core/text-llm-component';\n\n/**\n * Base options for all validators\n *\n * Re-exported from BaseLLMComponentOptions for backwards compatibility.\n */\nexport type BaseValidatorOptions = BaseLLMComponentOptions;\n\n/**\n * Abstract base class for LLM-based validators\n *\n * Extends TextLLMComponent to provide common functionality for validators\n * that use LLM to validate/analyze content:\n * - LLM API call wrapper with LLMCaller (via callLLM method)\n * - Standard logging patterns (via log method from base class)\n * - Retry and fallback configuration\n *\n * Token usage is tracked by LLMCaller and should be aggregated by DocumentProcessor.\n *\n * @template TSchema - Zod schema type for validation\n * @template TResult - Result type after parsing with schema\n */\nexport abstract class BaseValidator<\n TSchema extends z.ZodType,\n TResult = z.infer<TSchema>,\n> extends TextLLMComponent {\n /**\n * Validator name for logging (kept for backwards compatibility)\n */\n protected readonly validatorName: string;\n\n /**\n * Constructor for BaseValidator\n *\n * @param logger - Logger instance\n * @param model - Language model to use for validation\n * @param validatorName - Name of the validator for logging (e.g., \"TocContentValidator\")\n * @param options - Optional configuration (maxRetries, temperature)\n * @param fallbackModel - Optional fallback model for retry on failure\n * @param aggregator - Optional token usage aggregator for tracking LLM calls\n */\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n validatorName: string,\n options?: BaseValidatorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(logger, model, validatorName, options, fallbackModel, aggregator);\n this.validatorName = validatorName;\n }\n\n /**\n * Call LLM with LLMCaller\n *\n * This method provides backwards compatibility with existing validators.\n * It wraps the parent callTextLLM method but allows passing a custom aggregator.\n *\n * @param schema - Zod schema for response validation\n * @param systemPrompt - System prompt\n * @param userPrompt - User prompt\n * @param phase - Phase name for tracking (e.g., 'validation', 'batch-validation')\n * @param aggregator - Optional token usage aggregator for tracking this call\n * @returns Parsed and validated LLM response with usage information\n */\n protected async callLLM(\n schema: TSchema,\n systemPrompt: string,\n userPrompt: string,\n phase: string,\n aggregator?: LLMTokenUsageAggregator,\n ): Promise<{ output: TResult; usage: ExtendedTokenUsage }> {\n const result = await LLMCaller.call({\n schema,\n systemPrompt,\n userPrompt,\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.validatorName,\n phase,\n });\n\n // Track to custom aggregator if provided, otherwise use base class aggregator\n if (aggregator) {\n aggregator.track(result.usage);\n } else {\n this.trackUsage(result.usage);\n }\n\n return {\n output: result.output as TResult,\n usage: result.usage,\n };\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport type { BaseValidatorOptions } from './base-validator';\n\nimport { z } from 'zod';\n\nimport { BaseValidator } from './base-validator';\n\n/**\n * Content type for TOC validation\n */\nexport type TocContentType = 'pure_toc' | 'mixed' | 'resource_only' | 'invalid';\n\n/**\n * Schema for TOC content validation response\n */\nexport const TocContentValidationSchema = z.object({\n isValid: z.boolean().describe('Whether valid main document TOC was found'),\n confidence: z\n .number()\n .min(0)\n .max(1)\n .describe('Confidence score between 0 and 1'),\n contentType: z\n .enum(['pure_toc', 'mixed', 'resource_only', 'invalid'])\n .describe('Type of content detected'),\n extractedTocMarkdown: z\n .string()\n .nullable()\n .describe('Extracted main TOC markdown when mixed; null otherwise'),\n reason: z.string().describe('Brief explanation in English'),\n});\n\nexport type TocContentValidationResult = z.infer<\n typeof TocContentValidationSchema\n>;\n\n/**\n * Output type for TOC validation with resolved markdown\n */\nexport interface TocValidationOutput {\n isValid: boolean;\n confidence: number;\n contentType: TocContentType;\n validTocMarkdown: string | null;\n reason: string;\n}\n\n/**\n * Options for TocContentValidator\n */\nexport interface TocContentValidatorOptions extends BaseValidatorOptions {\n /**\n * Minimum confidence to consider valid (default: 0.7)\n */\n confidenceThreshold?: number;\n}\n\n/**\n * TocContentValidator\n *\n * Uses LLM to validate whether extracted markdown content is actually a TOC.\n * This is a semantic validation, not structural validation.\n * Supports mixed content extraction where main TOC is combined with resource indices.\n */\nexport class TocContentValidator extends BaseValidator<\n typeof TocContentValidationSchema,\n TocContentValidationResult\n> {\n private readonly confidenceThreshold: number;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: TocContentValidatorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n 'TocContentValidator',\n options,\n fallbackModel,\n aggregator,\n );\n this.confidenceThreshold = options?.confidenceThreshold ?? 0.7;\n }\n\n /**\n * Validate if the markdown content is a table of contents\n *\n * @param markdown - Markdown content to validate\n * @returns Validation output with resolved markdown for valid TOC\n */\n async validate(markdown: string): Promise<TocValidationOutput> {\n this.logger.info(\n `[TocContentValidator] Validating content (${markdown.length} chars)`,\n );\n\n if (!markdown.trim()) {\n this.logger.info(\n '[TocContentValidator] Empty markdown, returning invalid',\n );\n return {\n isValid: false,\n confidence: 1.0,\n contentType: 'invalid',\n validTocMarkdown: null,\n reason: 'Empty content',\n };\n }\n\n const { output: result } = await this.callLLM(\n TocContentValidationSchema,\n this.buildSystemPrompt(),\n this.buildUserPrompt(markdown),\n 'validation',\n this.aggregator,\n );\n\n this.logger.info(\n `[TocContentValidator] Result: isValid=${result.isValid}, contentType=${result.contentType}, confidence=${result.confidence}`,\n );\n\n // Resolve valid markdown based on content type\n let validTocMarkdown: string | null = null;\n if (result.isValid && result.confidence >= this.confidenceThreshold) {\n if (result.contentType === 'pure_toc') {\n validTocMarkdown = markdown;\n } else if (\n result.contentType === 'mixed' &&\n result.extractedTocMarkdown\n ) {\n validTocMarkdown = result.extractedTocMarkdown;\n }\n }\n\n return {\n isValid: result.isValid,\n confidence: result.confidence,\n contentType: result.contentType,\n validTocMarkdown,\n reason: result.reason,\n };\n }\n\n /**\n * Check if validation result passes threshold\n *\n * @param result - Validation output from validate()\n * @returns true if content is valid TOC with sufficient confidence\n */\n isValid(result: TocValidationOutput): boolean {\n return result.isValid && result.confidence >= this.confidenceThreshold;\n }\n\n /**\n * Get the valid TOC markdown from validation result\n *\n * @param result - Validation output from validate()\n * @returns Valid TOC markdown or null if invalid\n */\n getValidMarkdown(result: TocValidationOutput): string | null {\n return result.validTocMarkdown;\n }\n\n /**\n * Build system prompt for TOC content validation\n */\n protected buildSystemPrompt(): string {\n return `You are a document structure analyst. Your task is to analyze the provided content and classify it into one of four categories.\n\n## Content Type Classification:\n\n### 1. pure_toc\nThe content is ONLY a main document Table of Contents with:\n- Structured list of chapters/sections with page numbers\n- Hierarchical section titles (e.g., \"Chapter 1\", \"제1장\", \"1.1 Introduction\")\n- Multiple entries (3 or more) organized by document structure\n- NO resource indices mixed in\n\n### 2. mixed\nThe content contains BOTH:\n- A valid main document TOC (chapters/sections with page numbers)\n- AND resource indices (photo/table/drawing indices)\n\nWhen classifying as \"mixed\", you MUST extract ONLY the main TOC portion and return it in extractedTocMarkdown.\n\n### 3. resource_only\nThe content contains ONLY resource indices such as:\n- Photo/image indices (사진 목차, 사진목차, Photo Index, List of Figures, List of Photos)\n- Table indices (표 목차, 표목차, Table Index, List of Tables)\n- Drawing/diagram indices (도면 목차, 도면목차, Drawing Index, List of Drawings)\n- Appendix indices (부록 목차, Appendix Index)\n\n### 4. invalid\nThe content is none of the above:\n- Random body text\n- Single entries or incomplete lists (fewer than 3 items)\n- Reference lists or bibliographies\n- Index pages (alphabetical keyword lists)\n- Unstructured content\n\n## Response Guidelines:\n- Set isValid to true for \"pure_toc\" and \"mixed\" types\n- Set isValid to false for \"resource_only\" and \"invalid\" types\n- Set confidence between 0.0 and 1.0 based on your certainty\n- For \"mixed\" type: extractedTocMarkdown MUST contain only the main TOC entries (preserve original formatting)\n- For other types: extractedTocMarkdown should be null\n- IMPORTANT: reason MUST be written in English\n\n## Example Scenarios:\n\n### Scenario 1: pure_toc\nInput: \"제1장 서론 ..... 1\\\\n제2장 조사개요 ..... 5\\\\n제3장 조사결과 ..... 15\"\nOutput: { isValid: true, contentType: \"pure_toc\", extractedTocMarkdown: null }\n\n### Scenario 2: mixed\nInput: \"제1장 서론 ..... 1\\\\n제2장 조사개요 ..... 5\\\\n\\\\n사진목차\\\\n사진 1 전경 ..... 50\\\\n사진 2 유물 ..... 51\"\nOutput: { isValid: true, contentType: \"mixed\", extractedTocMarkdown: \"제1장 서론 ..... 1\\\\n제2장 조사개요 ..... 5\" }\n\n### Scenario 3: resource_only\nInput: \"사진목차\\\\n사진 1 전경 ..... 50\\\\n사진 2 유물 ..... 51\"\nOutput: { isValid: false, contentType: \"resource_only\", extractedTocMarkdown: null }`;\n }\n\n /**\n * Build user prompt with markdown content\n */\n protected buildUserPrompt(markdown: string): string {\n return `Analyze the following content and classify it:\n\n${markdown}`;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { Caption } from '@heripo/model';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport { BatchProcessor, LLMCaller } from '@heripo/shared';\nimport { z } from 'zod';\n\nimport { BaseValidator, type BaseValidatorOptions } from './base-validator';\n\n/**\n * Schema for a single caption validation result\n */\nconst CaptionValidationItemSchema = z.object({\n index: z.number().int().describe('Index of the caption in the input array'),\n isValid: z.boolean().describe('Whether the parsed caption is correct'),\n reason: z\n .string()\n .nullable()\n .describe('Brief explanation if invalid, null if valid'),\n});\n\n/**\n * Schema for batch caption validation response\n */\nconst CaptionValidationBatchSchema = z.object({\n results: z.array(CaptionValidationItemSchema),\n});\n\ntype CaptionValidationBatch = z.infer<typeof CaptionValidationBatchSchema>;\n\n/**\n * Options for CaptionValidator\n */\nexport interface CaptionValidatorOptions extends BaseValidatorOptions {\n // No additional options for now\n}\n\n/**\n * CaptionValidator\n *\n * Validates parsed captions against original text using LLM.\n * Processes captions in batches to optimize LLM API calls.\n *\n * ## Validation Rules\n *\n * Checks if the parsed \"num\" field correctly extracts the prefix + number from original text:\n * 1. **Correctness**: The \"num\" must contain the actual prefix+number from the original text\n * - Example: \"도판 1 유적 전경\" → num=\"도판 1\" ✓\n * - Example: \"도판 1 유적 전경\" → num=\"도판\" ✗ (incomplete)\n *\n * 2. **Spacing**: The spacing in \"num\" must match the original text exactly\n * - Example: \"도판 1\" → num=\"도판 1\" ✓\n * - Example: \"도판1\" → num=\"도판1\" ✓\n * - Example: \"도판 1\" → num=\"도판1\" ✗ (spacing mismatch)\n *\n * 3. **Completeness**: The number part must be fully extracted\n * - Example: \"Figure 2-3\" → num=\"Figure 2-3\" ✓\n * - Example: \"Figure 2-3\" → num=\"Figure 2\" ✗ (incomplete number)\n *\n * 4. **Null handling**: If \"num\" is null, verify that the original text has no number prefix\n * - Example: \"유적 전경 사진\" → num=null ✓\n * - Example: \"도판 1 전경\" → num=null ✗ (should extract \"도판 1\")\n */\nexport class CaptionValidator extends BaseValidator<\n typeof CaptionValidationBatchSchema,\n CaptionValidationBatch\n> {\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: CaptionValidatorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n 'CaptionValidator',\n options,\n fallbackModel,\n aggregator,\n );\n }\n\n /**\n * Validate batch of parsed captions against original texts\n *\n * @param captions - Array of parsed Caption objects\n * @param originalTexts - Array of original caption texts (same order as captions)\n * @param batchSize - Batch size for processing. Set to 0 to skip validation (assume all valid).\n * @returns Array of validation results (boolean) maintaining original order\n */\n async validateBatch(\n captions: Caption[],\n originalTexts: string[],\n batchSize: number,\n ): Promise<boolean[]> {\n this.logger.info(\n `[CaptionValidator] Validating ${captions.length} captions with batch size ${batchSize}...`,\n );\n\n if (captions.length !== originalTexts.length) {\n throw new Error(\n `[CaptionValidator] Captions and originalTexts length mismatch: ${captions.length} vs ${originalTexts.length}`,\n );\n }\n\n if (captions.length === 0) {\n this.logger.info('[CaptionValidator] No captions to validate');\n return [];\n }\n\n if (batchSize === 0) {\n // Skip validation, assume all captions are valid\n this.logger.info(\n '[CaptionValidator] Skipping validation (batchSize=0), assuming all captions are valid',\n );\n return new Array(captions.length).fill(true);\n }\n\n try {\n // Convert to indexed format for batch processing\n const indexedItems = captions.map((caption, index) => ({\n index,\n caption,\n originalText: originalTexts[index],\n }));\n\n // Use BatchProcessor to process in parallel batches\n const batchResults = await BatchProcessor.processBatch(\n indexedItems,\n batchSize,\n async (batch) => this.validateBatchInternal(batch, this.model),\n );\n\n // Sort results by original index to maintain order\n batchResults.sort((a, b) => a.index - b.index);\n const results = batchResults.map((r) => r.isValid);\n\n const validCount = results.filter((r) => r).length;\n this.logger.info(\n `[CaptionValidator] Completed: ${validCount}/${results.length} captions validated as correct`,\n );\n\n // Log token usage summary if aggregator is available\n if (this.aggregator) {\n this.aggregator.logSummary(this.logger);\n }\n\n return results;\n } catch (error) {\n const message = error instanceof Error ? error.message : String(error);\n this.logger.error(`[CaptionValidator] Validation failed: ${message}`);\n throw new CaptionValidationError(\n `Failed to validate captions: ${message}`,\n { cause: error },\n );\n }\n }\n\n /**\n * Internal: Validate batch of captions using LLM\n *\n * @param items - Batch of caption items with original indices\n * @param model - Effective model to use\n * @returns Array of validation results indexed correctly\n */\n private async validateBatchInternal(\n items: Array<{ index: number; caption: Caption; originalText: string }>,\n model: LanguageModel,\n ): Promise<Array<{ index: number; isValid: boolean }>> {\n const result = await LLMCaller.call({\n schema: CaptionValidationBatchSchema,\n systemPrompt: this.buildSystemPrompt(),\n userPrompt: this.buildUserPrompt(items),\n primaryModel: model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: 'CaptionValidator',\n phase: 'validation',\n });\n\n // Track token usage if aggregator is available\n if (this.aggregator) {\n this.aggregator.track(result.usage);\n }\n\n // Map LLM results back to original indices\n return result.output.results.map((item) => ({\n index: item.index,\n isValid: item.isValid,\n }));\n }\n\n protected buildSystemPrompt(): string {\n return `You are a caption validation expert for archaeological excavation reports.\n\nYour task is to validate whether parsed caption prefixes (num field) are correctly extracted from original caption texts.\n\n## Caption Pattern Recognition\n\nA valid caption follows the pattern: <prefix word(s)> <number>\n- The prefix can be ANY Korean/English word(s) that label images/tables/figures\n- Common examples: 도판, 사진, 그림, 원색사진, 흑백사진, Figure, Photo, Plate, etc.\n- The key is the PATTERN (text followed by number), not a specific word list\n- Leading punctuation/brackets should be IGNORED when extracting\n\nValid caption patterns:\n- \"원색사진 1. 조사지역\" → num=\"원색사진 1\" ✓\n- \"흑백사진 2 출토유물\" → num=\"흑백사진 2\" ✓\n- \"도판 1 유적 전경\" → num=\"도판 1\" ✓\n- \"(사진 16> 느티나무\" → num=\"사진 16\" ✓ (ignore leading punctuation)\n- \"<도판 3> 유물 사진\" → num=\"도판 3\" ✓ (ignore angle brackets)\n\nInvalid patterns (num MUST be null):\n- \"39 3월 28일(백제 도로유구)\" → null ✓ (starts with number, no prefix)\n- \"1. 유적 전경\" → null ✓ (numbered list item, not a caption)\n- \"2024년 조사 현황\" → null ✓ (year reference, not a caption)\n\n## Extraction Algorithm:\n\n1. Extract prefix + number from the caption\n - The prefix is the text portion before the number\n - Full extraction: \"원색사진 1\", \"도판 2-3\", \"그림 3.6\", \"Figure 4a\"\n\n2. **Decimal point handling**: Include period/dot after number if directly following\n - \"그림 3.6. 한반도\" → \"그림 3.6\" (period as decimal separator included)\n - \"도판 2. 유적\" → \"도판 2\" (period after space, NOT included)\n\n3. **Stop rules** (extraction must stop at first occurrence of):\n - Punctuation (except decimal point): , : ; ! ? ~ ( ) [ ] { }\n - Whitespace: space, tab, newline\n - Underscore: _\n - Exception: Periods directly after digits are included as decimal separators\n - Exception: Hyphens within numbers are included (e.g., \"2-3\")\n\n## Validation Rules:\n\n1. **Pattern requirement**: The original text MUST follow <prefix> <number> pattern\n - \"원색사진 1. 조사지역\" → num=\"원색사진 1\" ✓ (valid pattern)\n - \"39 3월 28일(백제)\" → num=\"39\" ✗ (starts with number, should be null)\n - \"1. 조사 개요\" → num=\"1\" ✗ (numbered list, should be null)\n\n2. **Correctness**: The parsed \"num\" must contain the actual prefix+number\n - \"도판 1 유적 전경\" → num=\"도판 1\" ✓\n - \"도판 1 유적 전경\" → num=\"도판\" ✗ (incomplete)\n\n3. **Spacing**: The spacing in \"num\" must match the original text exactly\n - \"도판 1\" → num=\"도판 1\" ✓\n - \"도판1\" → num=\"도판1\" ✓\n - \"도판 1\" → num=\"도판1\" ✗ (spacing mismatch)\n\n4. **Completeness**: The number part must be fully extracted\n - \"Figure 2-3\" → num=\"Figure 2-3\" ✓\n - \"Figure 2-3\" → num=\"Figure 2\" ✗ (incomplete number)\n\n5. **Null handling**: If \"num\" is null, verify:\n - Either the original text has no number\n - OR the text starts with a number (no prefix)\n - \"유적 전경 사진\" → num=null ✓ (no number in caption position)\n - \"원색사진 1 조사\" → num=null ✗ (should extract \"원색사진 1\")\n\n## Response:\nFor each caption, return:\n- index: original position\n- isValid: true if parsing is correct, false otherwise\n- reason: null if valid, brief explanation if invalid`;\n }\n\n protected buildUserPrompt(\n items: Array<{ index: number; caption: Caption; originalText: string }>,\n ): string {\n const captionList = items\n .map(\n (item) =>\n `[${item.index}] Original: \"${item.originalText}\" | Parsed num: ${item.caption.num !== undefined ? `\"${item.caption.num}\"` : 'null'}`,\n )\n .join('\\n');\n\n return `Validate the following caption parsing results:\n\n${captionList}\n\nReturn the results as JSON array with \"index\", \"isValid\", and \"reason\" (null if valid, explanation if invalid).\n\nExample format:\n{\n \"results\": [\n { \"index\": 0, \"isValid\": true, \"reason\": null },\n { \"index\": 1, \"isValid\": false, \"reason\": \"Number incomplete: expected '1-2' but got '1'\" },\n { \"index\": 2, \"isValid\": true, \"reason\": null }\n ]\n}`;\n }\n}\n\n/**\n * Error thrown when caption validation fails\n */\nexport class CaptionValidationError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'CaptionValidationError';\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n Caption,\n Chapter,\n DoclingDocument,\n DocumentProcessResult,\n PageRange,\n ProcessedDocument,\n ProcessedFootnote,\n ProcessedImage,\n ProcessedTable,\n ProcessedTableCell,\n} from '@heripo/model';\nimport type { LanguageModel } from 'ai';\n\nimport type { TocEntry } from './types';\n\nimport { LLMTokenUsageAggregator } from '@heripo/shared';\n\nimport { ChapterConverter } from './converters';\nimport {\n TocExtractor,\n TocFinder,\n TocNotFoundError,\n VisionTocExtractor,\n} from './extractors';\nimport { CaptionParser, PageRangeParser } from './parsers';\nimport {\n IdGenerator,\n MarkdownConverter,\n RefResolver,\n TextCleaner,\n} from './utils';\nimport { CaptionValidator, TocContentValidator } from './validators';\n\n/**\n * DocumentProcessor Options\n */\nexport interface DocumentProcessorOptions {\n /**\n * Logger instance\n */\n logger: LoggerMethods;\n\n /**\n * Fallback model - used as fallback when component-specific models are not provided or fail.\n * This is the only required model. Should be set to a frontier model (e.g., Claude Opus 4.5, GPT-5.2)\n * to ensure reliable fallback performance across all components.\n */\n fallbackModel: LanguageModel;\n\n /**\n * Model for PageRangeParser - extracts page numbers from page images.\n * Requires vision capabilities. Falls back to 'fallbackModel' if not provided.\n */\n pageRangeParserModel?: LanguageModel;\n\n /**\n * Model for TocExtractor - extracts structured TOC from Markdown representation.\n * Falls back to 'fallbackModel' if not provided.\n */\n tocExtractorModel?: LanguageModel;\n\n /**\n * Model for validators (TOC content validation, caption validation).\n * Falls back to 'fallbackModel' if not provided.\n */\n validatorModel?: LanguageModel;\n\n /**\n * Model for VisionTocExtractor - extracts TOC directly from page images.\n * Requires vision capabilities. Falls back to 'fallbackModel' if not provided.\n */\n visionTocExtractorModel?: LanguageModel;\n\n /**\n * Model for CaptionParser - extracts caption prefix and number from image/table captions.\n * Falls back to 'fallbackModel' if not provided.\n */\n captionParserModel?: LanguageModel;\n\n /**\n * Batch size for TextCleaner text normalization (synchronous processing)\n */\n textCleanerBatchSize: number;\n\n /**\n * Batch size for CaptionParser LLM parsing (async parallel processing)\n */\n captionParserBatchSize: number;\n\n /**\n * Batch size for CaptionValidator LLM validation (async parallel processing)\n */\n captionValidatorBatchSize: number;\n\n /**\n * Maximum retry count (default: 3)\n */\n maxRetries?: number;\n\n /**\n * Enable fallback retry mechanism - automatically retries with fallback model on failure (default: true)\n * Set to false to disable automatic fallback retry and fail immediately on component-specific model errors\n */\n enableFallbackRetry?: boolean;\n\n /**\n * Abort signal for cancellation support.\n * When aborted, processing stops at the next checkpoint between stages.\n */\n abortSignal?: AbortSignal;\n}\n\n/**\n * DocumentProcessor\n *\n * Main class that converts DoclingDocument to ProcessedDocument.\n *\n * ## Conversion Process\n *\n * 1. Initialize RefResolver - indexing for $ref resolution\n * 2. Initialize IdGenerator - unique ID generator\n * 3. Text filtering and PageRangeMap generation (visionModel)\n * 4. TOC extraction (model) - core step\n * 5. Parallel processing block:\n * - Images conversion (caption extraction)\n * - Tables conversion (excluding TOC tables)\n * 6. Chapters conversion (based on TOC)\n * 7. Assemble ProcessedDocument\n *\n * @example\n * ```typescript\n * import { openai } from '@ai-sdk/openai';\n * import { anthropic } from '@ai-sdk/anthropic';\n * import { DocumentProcessor } from '@heripo/document-processor';\n * import { getLogger } from '@heripo/logger';\n *\n * const logger = getLogger();\n *\n * // Basic usage - all components use the fallback model\n * const processor = new DocumentProcessor({\n * logger,\n * fallbackModel: anthropic('claude-opus-4-5-20251101'), // Frontier model for reliable fallback\n * });\n *\n * // Advanced usage - component-specific models with frontier fallback\n * const advancedProcessor = new DocumentProcessor({\n * logger,\n * fallbackModel: anthropic('claude-opus-4-5-20251101'), // Frontier model for fallback\n * pageRangeParserModel: openai('gpt-5.2'), // Vision-capable\n * tocExtractorModel: openai('gpt-5-mini'), // Structured output\n * validatorModel: openai('gpt-5.2'), // Validation (TOC + caption)\n * visionTocExtractorModel: openai('gpt-5.1'), // Vision-capable\n * captionParserModel: openai('gpt-5-mini'),\n * textCleanerBatchSize: 20, // Sync text processing\n * captionParserBatchSize: 10, // LLM caption parsing\n * captionValidatorBatchSize: 10, // LLM caption validation\n * maxRetries: 3,\n * });\n *\n * const result = await processor.process(\n * doclingDoc,\n * 'report-001',\n * outputPath\n * );\n * ```\n */\nexport class DocumentProcessor {\n private readonly logger: LoggerMethods;\n private readonly fallbackModel: LanguageModel;\n private readonly pageRangeParserModel: LanguageModel;\n private readonly tocExtractorModel: LanguageModel;\n private readonly validatorModel: LanguageModel;\n private readonly visionTocExtractorModel: LanguageModel;\n private readonly captionParserModel: LanguageModel;\n private readonly textCleanerBatchSize: number;\n private readonly captionParserBatchSize: number;\n private readonly captionValidatorBatchSize: number;\n private readonly maxRetries: number;\n private readonly enableFallbackRetry: boolean;\n private readonly abortSignal?: AbortSignal;\n private idGenerator = new IdGenerator();\n private refResolver?: RefResolver;\n private pageRangeParser?: PageRangeParser;\n private tocFinder?: TocFinder;\n private tocExtractor?: TocExtractor;\n private tocContentValidator?: TocContentValidator;\n private captionValidator?: CaptionValidator;\n private visionTocExtractor?: VisionTocExtractor;\n private captionParser?: CaptionParser;\n private chapterConverter?: ChapterConverter;\n private textCleaner = TextCleaner;\n private readonly usageAggregator = new LLMTokenUsageAggregator();\n\n constructor(options: DocumentProcessorOptions) {\n this.logger = options.logger;\n this.fallbackModel = options.fallbackModel;\n this.pageRangeParserModel =\n options.pageRangeParserModel ?? options.fallbackModel;\n this.tocExtractorModel = options.tocExtractorModel ?? options.fallbackModel;\n this.validatorModel = options.validatorModel ?? options.fallbackModel;\n this.visionTocExtractorModel =\n options.visionTocExtractorModel ?? options.fallbackModel;\n this.captionParserModel =\n options.captionParserModel ?? options.fallbackModel;\n this.textCleanerBatchSize = options.textCleanerBatchSize;\n this.captionParserBatchSize = options.captionParserBatchSize;\n this.captionValidatorBatchSize = options.captionValidatorBatchSize;\n this.maxRetries = options.maxRetries ?? 3;\n this.enableFallbackRetry = options.enableFallbackRetry ?? false;\n this.abortSignal = options.abortSignal;\n }\n\n /**\n * Check if abort has been requested and throw error if so\n *\n * @throws {Error} with name 'AbortError' if aborted\n */\n private checkAborted(): void {\n if (this.abortSignal?.aborted) {\n const error = new Error('Document processing was aborted');\n error.name = 'AbortError';\n throw error;\n }\n }\n\n /**\n * Converts DoclingDocument to ProcessedDocument with token usage tracking.\n *\n * Conversion process:\n * 1. Initialize processors and resolvers\n * 2. Normalize and filter texts\n * 3. Clean texts and parse page ranges (parallel)\n * 4. Extract table of contents\n * 5. Convert images and tables (parallel)\n * 6. Convert chapters and link resources\n * 7. Assemble final ProcessedDocument\n * 8. Collect and report token usage\n *\n * @param doclingDoc - Original document extracted from Docling SDK\n * @param reportId - Report unique identifier\n * @param outputPath - Path containing images and pages subdirectories (images/image_0.png, pages/page_0.png, etc.)\n * @returns Document processing result with ProcessedDocument and token usage report\n *\n * @throws {TocExtractError} When TOC extraction fails\n * @throws {PageRangeParseError} When page range parsing fails\n * @throws {ConversionError} When error occurs during conversion\n */\n async process(\n doclingDoc: DoclingDocument,\n reportId: string,\n outputPath: string,\n ): Promise<DocumentProcessResult> {\n this.logger.info('[DocumentProcessor] Starting document processing...');\n this.logger.info('[DocumentProcessor] Report ID:', reportId);\n\n // Reset token usage aggregator for new processing run\n this.usageAggregator.reset();\n\n // Check abort before starting\n this.checkAborted();\n\n this.initializeProcessors(doclingDoc, outputPath);\n\n const startTimeFilter = Date.now();\n const filtered = this.normalizeAndFilterTexts(doclingDoc);\n const filteringTime = Date.now() - startTimeFilter;\n this.logger.info(\n `[DocumentProcessor] Text filtering took ${filteringTime}ms`,\n );\n\n // Check abort after text filtering\n this.checkAborted();\n\n const startTimePageRange = Date.now();\n const pageRangeMap = await this.parsePageRanges(doclingDoc);\n const pageRangeTime = Date.now() - startTimePageRange;\n this.logger.info(\n `[DocumentProcessor] Page range parsing took ${pageRangeTime}ms`,\n );\n\n // Check abort after page range parsing\n this.checkAborted();\n\n const startTimeToc = Date.now();\n const tocEntries = await this.extractTableOfContents(doclingDoc, filtered);\n const tocTime = Date.now() - startTimeToc;\n this.logger.info(`[DocumentProcessor] TOC extraction took ${tocTime}ms`);\n\n // Check abort after TOC extraction\n this.checkAborted();\n\n const startTimeResources = Date.now();\n const { images, tables, footnotes } = await this.convertResources(\n doclingDoc,\n outputPath,\n );\n const resourcesTime = Date.now() - startTimeResources;\n this.logger.info(\n `[DocumentProcessor] Resource conversion took ${resourcesTime}ms`,\n );\n\n // Check abort after resource conversion\n this.checkAborted();\n\n const startTimeChapters = Date.now();\n const chapters = await this.convertChapters(\n doclingDoc,\n tocEntries,\n pageRangeMap,\n images,\n tables,\n footnotes,\n );\n const chaptersTime = Date.now() - startTimeChapters;\n this.logger.info(\n `[DocumentProcessor] Chapter conversion took ${chaptersTime}ms`,\n );\n\n const startTimeAssemble = Date.now();\n const processedDoc = this.assembleProcessedDocument(\n reportId,\n pageRangeMap,\n chapters,\n images,\n tables,\n footnotes,\n );\n const assembleTime = Date.now() - startTimeAssemble;\n this.logger.info(\n `[DocumentProcessor] Document assembly took ${assembleTime}ms`,\n );\n\n this.logger.info('[DocumentProcessor] Document processing completed');\n\n return {\n document: processedDoc,\n usage: this.usageAggregator.getReport(),\n };\n }\n\n /**\n * Initialize all processors and resolvers\n *\n * Sets up RefResolver, PageRangeParser, TocFinder, and TocExtractor\n */\n private initializeProcessors(\n doclingDoc: DoclingDocument,\n outputPath: string,\n ): void {\n this.logger.info('[DocumentProcessor] Initializing processors...');\n\n this.logger.info('[DocumentProcessor] - RefResolver');\n this.refResolver = new RefResolver(this.logger, doclingDoc);\n\n this.logger.info('[DocumentProcessor] - PageRangeParser');\n this.pageRangeParser = new PageRangeParser(\n this.logger,\n this.pageRangeParserModel,\n outputPath,\n this.maxRetries,\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n this.abortSignal,\n );\n\n this.logger.info('[DocumentProcessor] - TocFinder');\n this.tocFinder = new TocFinder(this.logger, this.refResolver);\n\n this.logger.info('[DocumentProcessor] - TocExtractor');\n this.tocExtractor = new TocExtractor(\n this.logger,\n this.tocExtractorModel,\n {\n maxRetries: this.maxRetries,\n },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.abortSignal,\n );\n\n this.logger.info('[DocumentProcessor] - TocContentValidator');\n this.tocContentValidator = new TocContentValidator(\n this.logger,\n this.validatorModel,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - CaptionValidator');\n this.captionValidator = new CaptionValidator(\n this.logger,\n this.validatorModel,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - VisionTocExtractor');\n this.visionTocExtractor = new VisionTocExtractor(\n this.logger,\n this.visionTocExtractorModel,\n outputPath,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - CaptionParser');\n this.captionParser = new CaptionParser(\n this.logger,\n this.captionParserModel,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - ChapterConverter');\n this.chapterConverter = new ChapterConverter(this.logger, this.idGenerator);\n\n this.logger.info('[DocumentProcessor] All processors initialized');\n }\n\n /**\n * Normalize and filter texts using TextCleaner\n *\n * Performs basic text normalization (unicode, whitespace, punctuation)\n * and filters out invalid texts (empty, numbers-only, etc.)\n */\n private normalizeAndFilterTexts(doclingDoc: DoclingDocument): string[] {\n this.logger.info('[DocumentProcessor] Normalizing and filtering texts...');\n\n const texts = doclingDoc.texts.map((text) => text.text);\n const filtered = this.textCleaner.normalizeAndFilterBatch(\n texts,\n this.textCleanerBatchSize,\n );\n\n this.logger.info(\n `[DocumentProcessor] Filtered ${filtered.length} texts from ${texts.length} original texts`,\n );\n\n return filtered;\n }\n\n /**\n * Parse page ranges using Vision LLM\n *\n * Extracts actual page numbers from page images and creates mapping.\n * Token usage is automatically tracked by PageRangeParser into the shared aggregator.\n */\n private async parsePageRanges(\n doclingDoc: DoclingDocument,\n ): Promise<Record<number, PageRange>> {\n this.logger.info('[DocumentProcessor] Starting page range parsing...');\n\n const result = await this.pageRangeParser!.parse(doclingDoc);\n\n const pageRangeMap = result.pageRangeMap;\n\n this.logger.info(\n `[DocumentProcessor] Page range map entries: ${Object.keys(pageRangeMap).length}`,\n );\n\n return pageRangeMap;\n }\n\n /**\n * Convert images, tables, and footnotes\n *\n * Runs conversions:\n * - Images conversion (with caption extraction)\n * - Tables conversion (with caption extraction, excluding TOC tables)\n * - Footnotes conversion (synchronous, from text items with label='footnote')\n */\n private async convertResources(\n doclingDoc: DoclingDocument,\n outputPath: string,\n ): Promise<{\n images: ProcessedImage[];\n tables: ProcessedTable[];\n footnotes: ProcessedFootnote[];\n }> {\n this.logger.info(\n '[DocumentProcessor] Converting images, tables, and footnotes...',\n );\n\n const [images, tables] = await Promise.all([\n this.convertImages(doclingDoc, outputPath),\n this.convertTables(doclingDoc),\n ]);\n\n const footnotes = this.convertFootnotes(doclingDoc);\n\n this.logger.info(\n `[DocumentProcessor] Converted ${images.length} images, ${tables.length} tables, and ${footnotes.length} footnotes`,\n );\n\n return { images, tables, footnotes };\n }\n\n /**\n * Convert footnotes\n *\n * Extracts footnotes from DoclingDocument text items with label='footnote'\n */\n private convertFootnotes(doclingDoc: DoclingDocument): ProcessedFootnote[] {\n const footnoteItems = doclingDoc.texts.filter(\n (item) => item.label === 'footnote',\n );\n this.logger.info(\n `[DocumentProcessor] Converting ${footnoteItems.length} footnotes...`,\n );\n\n const footnotes: ProcessedFootnote[] = [];\n\n for (const item of footnoteItems) {\n if (!this.textCleaner.isValidText(item.text)) {\n continue;\n }\n\n const pdfPageNo = item.prov?.[0]?.page_no ?? 1;\n const footnoteId = this.idGenerator.generateFootnoteId();\n\n footnotes.push({\n id: footnoteId,\n text: this.textCleaner.normalize(item.text),\n pdfPageNo,\n });\n }\n\n this.logger.info(\n `[DocumentProcessor] Converted ${footnotes.length} valid footnotes`,\n );\n\n return footnotes;\n }\n\n /**\n * Assemble the final ProcessedDocument\n *\n * Creates the ProcessedDocument structure with all converted components\n */\n private assembleProcessedDocument(\n reportId: string,\n pageRangeMap: Record<number, PageRange>,\n chapters: Chapter[],\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n ): ProcessedDocument {\n this.logger.info('[DocumentProcessor] Assembling ProcessedDocument...');\n\n const processedDoc: ProcessedDocument = {\n reportId,\n pageRangeMap,\n chapters,\n images,\n tables,\n footnotes,\n };\n\n this.logger.info(\n `[DocumentProcessor] Assembled document with ${chapters.length} chapters, ${images.length} images, ${tables.length} tables, ${footnotes.length} footnotes`,\n );\n\n return processedDoc;\n }\n\n /**\n * Extract table of contents (TOC)\n *\n * Uses rule-based extraction with LLM validation and vision fallback:\n * 1. TocFinder - find TOC area in document (rule-based)\n * 2. MarkdownConverter - convert TOC items to Markdown\n * 3. TocContentValidator - validate if content is actually a TOC (LLM)\n * 4. If invalid: VisionTocExtractor - extract from page images (vision LLM fallback)\n * 5. TocExtractor - LLM-based structured extraction\n */\n private async extractTableOfContents(\n doclingDoc: DoclingDocument,\n _filteredTexts: string[],\n ): Promise<TocEntry[]> {\n this.logger.info('[DocumentProcessor] Extracting TOC...');\n\n let markdown: string | null = null;\n\n // Stage 1: Try rule-based extraction\n try {\n const tocArea = this.tocFinder!.find(doclingDoc);\n this.logger.info(\n `[DocumentProcessor] Found TOC area: pages ${tocArea.startPage}-${tocArea.endPage}`,\n );\n\n // Stage 2: Convert to Markdown\n markdown = MarkdownConverter.convert(tocArea.itemRefs, this.refResolver!);\n this.logger.info(\n `[DocumentProcessor] Converted TOC to Markdown (${markdown.length} chars)`,\n );\n\n // Stage 3: Validate with LLM\n const validation = await this.tocContentValidator!.validate(markdown);\n if (!this.tocContentValidator!.isValid(validation)) {\n this.logger.warn(\n `[DocumentProcessor] TOC validation failed: ${validation.reason}`,\n );\n markdown = null;\n } else {\n const validMarkdown =\n this.tocContentValidator!.getValidMarkdown(validation);\n if (validMarkdown) {\n if (validation.contentType === 'mixed') {\n this.logger.info(\n `[DocumentProcessor] Mixed TOC detected, using extracted main TOC (${validMarkdown.length} chars)`,\n );\n }\n markdown = validMarkdown;\n this.logger.info(\n `[DocumentProcessor] TOC validation passed (confidence: ${validation.confidence})`,\n );\n } else {\n markdown = null;\n }\n }\n } catch (error) {\n if (error instanceof TocNotFoundError) {\n this.logger.info(\n '[DocumentProcessor] Rule-based TOC not found, will try vision fallback',\n );\n } else {\n throw error;\n }\n }\n\n // Stage 4: Vision fallback if needed\n if (!markdown) {\n this.logger.info('[DocumentProcessor] Using vision fallback for TOC');\n const totalPages = Object.keys(doclingDoc.pages).length;\n markdown = await this.visionTocExtractor!.extract(totalPages);\n\n if (!markdown) {\n const reason =\n 'Both rule-based search and vision fallback failed to locate TOC';\n this.logger.error(\n `[DocumentProcessor] TOC extraction failed: ${reason}`,\n );\n throw new TocNotFoundError(\n `Table of contents not found in the document. ${reason}.`,\n );\n }\n\n this.logger.info(\n `[DocumentProcessor] Vision extracted TOC markdown (${markdown.length} chars)`,\n );\n }\n\n // Stage 5: Extract structure with LLM (with fallback retry)\n const tocResult = await this.tocExtractor!.extract(markdown);\n\n // Track token usage\n this.usageAggregator.track(tocResult.usage);\n\n if (tocResult.entries.length === 0) {\n const reason =\n 'TOC area was detected but LLM could not extract any structured entries';\n this.logger.error(`[DocumentProcessor] TOC extraction failed: ${reason}`);\n throw new TocNotFoundError(`${reason}.`);\n }\n\n this.logger.info(\n `[DocumentProcessor] Extracted ${tocResult.entries.length} top-level TOC entries`,\n );\n\n return tocResult.entries;\n }\n\n /**\n * Process resource captions (for images and tables)\n *\n * Common caption processing pipeline:\n * 1. Parse captions in batch\n * 2. Validate parsed captions\n * 3. Reparse failed captions with fallback model\n *\n * @param captionTexts - Array of caption texts to process\n * @param resourceType - Type of resource for logging (e.g., 'image', 'table')\n * @returns Parsed captions with index mapping\n */\n private async processResourceCaptions(\n captionTexts: Array<string | undefined>,\n resourceType: string,\n ): Promise<Map<number, Caption>> {\n const captionsByIndex: Map<number, Caption> = new Map();\n\n // Build map of valid captions with indices\n const validCaptionData: Array<{\n resourceIndex: number;\n filteredIndex: number;\n text: string;\n }> = [];\n\n for (let i = 0; i < captionTexts.length; i++) {\n const text = captionTexts[i];\n if (text !== undefined) {\n validCaptionData.push({\n resourceIndex: i,\n filteredIndex: validCaptionData.length,\n text,\n });\n }\n }\n\n const validCaptionTexts = validCaptionData.map((item) => item.text);\n\n // Step 1: Parse captions in batch\n const parsedCaptions =\n validCaptionTexts.length > 0\n ? await this.captionParser!.parseBatch(\n validCaptionTexts,\n this.captionParserBatchSize,\n )\n : [];\n\n // Handle length mismatch between parsed results and valid captions\n let finalValidCaptionData = validCaptionData;\n let finalParsedCaptions = parsedCaptions;\n\n if (parsedCaptions.length !== validCaptionData.length) {\n this.logger.warn(\n `[DocumentProcessor] Caption parsing length mismatch for ${resourceType}: ` +\n `expected ${validCaptionData.length}, got ${parsedCaptions.length}. ` +\n `Attempting recovery by matching fullText...`,\n );\n\n // Create a map of fullText -> parsed caption for O(1) lookup\n const parsedMap = new Map<string, Caption>();\n for (const parsed of parsedCaptions) {\n parsedMap.set(parsed.fullText, parsed);\n }\n\n // Filter validCaptionData to only include items that were successfully parsed\n const recoveredData: typeof validCaptionData = [];\n for (const item of validCaptionData) {\n if (parsedMap.has(item.text)) {\n recoveredData.push(item);\n } else {\n this.logger.warn(\n `[DocumentProcessor] Skipping ${resourceType} caption at index ${item.resourceIndex}: \"${item.text}\" (not found in parsed results)`,\n );\n }\n }\n\n // Re-map parsedCaptions to match the filtered data\n const recoveredCaptions: Caption[] = [];\n for (const item of recoveredData) {\n const caption = parsedMap.get(item.text);\n if (caption) {\n recoveredCaptions.push(caption);\n }\n }\n\n /* c8 ignore start - defensive guard: recoveredData only contains items where parsedMap.has() returned true */\n if (recoveredCaptions.length !== recoveredData.length) {\n throw new Error(\n `[DocumentProcessor] Failed to recover from length mismatch: ` +\n `recovered ${recoveredCaptions.length} captions for ${recoveredData.length} valid items`,\n );\n }\n /* c8 ignore stop */\n\n finalValidCaptionData = recoveredData;\n finalParsedCaptions = recoveredCaptions;\n\n this.logger.info(\n `[DocumentProcessor] Successfully recovered ${finalParsedCaptions.length} ${resourceType} captions after length mismatch`,\n );\n }\n\n // Store parsed captions by resource index\n for (let i = 0; i < finalParsedCaptions.length; i++) {\n const resourceIndex = finalValidCaptionData[i].resourceIndex;\n captionsByIndex.set(resourceIndex, finalParsedCaptions[i]);\n }\n\n // Step 2: Validate parsed captions\n if (finalParsedCaptions.length > 0) {\n const finalValidCaptionTexts = finalValidCaptionData.map(\n (item) => item.text,\n );\n const validationResults = await this.captionValidator!.validateBatch(\n finalParsedCaptions,\n finalValidCaptionTexts,\n this.captionValidatorBatchSize,\n );\n\n // Step 3: Reparse failed captions with fallback model\n const failedIndices = validationResults\n .map((isValid, index) => (isValid ? -1 : index))\n .filter((index) => index !== -1);\n\n if (failedIndices.length > 0) {\n for (const filteredIndex of failedIndices) {\n const captionData = finalValidCaptionData[filteredIndex];\n const originalText = captionData.text;\n const parsedNum = finalParsedCaptions[filteredIndex].num;\n const resourceIndex = captionData.resourceIndex;\n this.logger.warn(\n `[DocumentProcessor] Invalid ${resourceType} caption [${resourceIndex}]: \"${originalText}\" | parsed num=\"${parsedNum}\"`,\n );\n }\n\n // Reparse failed captions with fallback model if enabled\n if (this.enableFallbackRetry) {\n this.logger.info(\n `[DocumentProcessor] Reparsing ${failedIndices.length} failed ${resourceType} captions with fallback model...`,\n );\n\n // Collect failed caption texts\n const failedCaptionTexts = failedIndices.map(\n (filteredIndex) => finalValidCaptionData[filteredIndex].text,\n );\n\n // Create a new CaptionParser instance with fallback model for separate token tracking\n const fallbackCaptionParser = new CaptionParser(\n this.logger,\n this.fallbackModel,\n {\n maxRetries: this.maxRetries,\n componentName: 'CaptionParser-fallback',\n abortSignal: this.abortSignal,\n },\n undefined, // no fallback for the fallback\n this.usageAggregator,\n );\n\n // Reparse with fallback model (sequential processing for better accuracy)\n const reparsedCaptions = await fallbackCaptionParser.parseBatch(\n failedCaptionTexts,\n 0, // sequential processing\n );\n\n // Update captionsByIndex with reparsed results\n for (let i = 0; i < failedIndices.length; i++) {\n const filteredIndex = failedIndices[i];\n const resourceIndex =\n finalValidCaptionData[filteredIndex].resourceIndex;\n captionsByIndex.set(resourceIndex, reparsedCaptions[i]);\n }\n\n this.logger.info(\n `[DocumentProcessor] Reparsed ${reparsedCaptions.length} ${resourceType} captions`,\n );\n } else {\n this.logger.warn(\n `[DocumentProcessor] ${failedIndices.length} ${resourceType} captions failed validation (kept as-is, fallback retry disabled)`,\n );\n }\n }\n }\n\n return captionsByIndex;\n }\n\n /**\n * Extract caption text from resource\n *\n * Handles both string references and $ref resolution\n */\n private extractCaptionText(\n captions: Array<string | { $ref: string }> | undefined,\n ): string | undefined {\n if (!captions?.[0]) {\n return undefined;\n }\n\n const captionRef = captions[0];\n if (typeof captionRef === 'string') {\n return captionRef;\n }\n\n if (this.refResolver && '$ref' in captionRef) {\n const resolved = this.refResolver.resolveText(captionRef.$ref);\n return resolved?.text;\n }\n\n return undefined;\n }\n\n /**\n * Convert images\n *\n * Converts pictures from DoclingDocument to ProcessedImage\n */\n private async convertImages(\n doclingDoc: DoclingDocument,\n outputPath: string,\n ): Promise<ProcessedImage[]> {\n this.logger.info(\n `[DocumentProcessor] Converting ${doclingDoc.pictures.length} images...`,\n );\n\n const images: ProcessedImage[] = [];\n const captionTexts: Array<string | undefined> = [];\n\n // Step 1: Collect image data and caption texts\n for (const picture of doclingDoc.pictures) {\n const pdfPageNo = picture.prov?.[0]?.page_no ?? 0;\n const imageId =\n this.idGenerator?.generateImageId() ?? `img-${images.length + 1}`;\n\n const captionText = this.extractCaptionText(picture.captions);\n captionTexts.push(captionText);\n\n images.push({\n id: imageId,\n path: `${outputPath}/images/image_${images.length}.png`,\n pdfPageNo,\n // caption will be assigned later\n });\n }\n\n // Step 2: Process captions\n const captionsByIndex = await this.processResourceCaptions(\n captionTexts,\n 'image',\n );\n\n // Step 3: Assign parsed captions to images\n for (let i = 0; i < images.length; i++) {\n if (captionsByIndex.has(i)) {\n images[i].caption = captionsByIndex.get(i);\n }\n }\n\n return images;\n }\n\n /**\n * Convert tables\n *\n * Converts tables from DoclingDocument to ProcessedTable\n */\n private async convertTables(\n doclingDoc: DoclingDocument,\n ): Promise<ProcessedTable[]> {\n this.logger.info(\n `[DocumentProcessor] Converting ${doclingDoc.tables.length} tables...`,\n );\n\n const tables: ProcessedTable[] = [];\n const captionTexts: Array<string | undefined> = [];\n\n // Step 1: Collect table data and caption texts\n for (const table of doclingDoc.tables) {\n const pdfPageNo = table.prov?.[0]?.page_no ?? 0;\n const tableId =\n this.idGenerator?.generateTableId() ?? `tbl-${tables.length + 1}`;\n\n // Convert table cells\n const grid: ProcessedTableCell[][] = table.data.grid.map((row) =>\n row.map((cell) => ({\n text: cell.text,\n rowSpan: cell.row_span ?? 1,\n colSpan: cell.col_span ?? 1,\n isHeader: cell.column_header || cell.row_header || false,\n })),\n );\n\n const captionText = this.extractCaptionText(table.captions);\n captionTexts.push(captionText);\n\n tables.push({\n id: tableId,\n pdfPageNo,\n numRows: grid.length,\n numCols: grid[0]?.length ?? 0,\n grid,\n // caption will be assigned later\n });\n }\n\n // Step 2: Process captions\n const captionsByIndex = await this.processResourceCaptions(\n captionTexts,\n 'table',\n );\n\n // Step 3: Assign parsed captions to tables\n for (let i = 0; i < tables.length; i++) {\n if (captionsByIndex.has(i)) {\n tables[i].caption = captionsByIndex.get(i);\n }\n }\n\n return tables;\n }\n\n /**\n * Convert chapters and link resources\n *\n * Generates chapters based on TOC and links images/tables/footnotes using ChapterConverter.\n * Throws TocNotFoundError if TOC entries are empty (defensive assertion).\n */\n private async convertChapters(\n doclingDoc: DoclingDocument,\n tocEntries: TocEntry[],\n pageRangeMap: Record<number, PageRange>,\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n ): Promise<Chapter[]> {\n this.logger.info('[DocumentProcessor] Converting chapters...');\n\n // Defensive assertion - TOC entries should always be present at this point\n if (tocEntries.length === 0) {\n const reason = 'Cannot convert chapters without TOC entries';\n this.logger.error(`[DocumentProcessor] ${reason}`);\n throw new TocNotFoundError(reason);\n }\n\n // Use ChapterConverter for TOC-based conversion\n const chapters = this.chapterConverter!.convert(\n tocEntries,\n doclingDoc.texts,\n pageRangeMap,\n images,\n tables,\n footnotes,\n );\n\n this.logger.info(\n `[DocumentProcessor] Converted ${chapters.length} top-level chapters`,\n );\n\n return chapters;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AGEA,gBAAyD;AFGlD,IAAM,iBAAN,MAAqB;;;;;;;;;;;;;;;EAe1B,OAAO,cAAiB,OAAY,WAA0B;AAC5D,UAAM,UAAiB,CAAC;AACxB,aAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AAChD,cAAQ,KAAK,MAAM,MAAM,GAAG,IAAI,SAAS,CAAC;IAC5C;AACA,WAAO;EACT;;;;;;;;;;;;;;;;;;;;;;EAuBA,aAAa,aACX,OACA,WACA,WACc;AACd,UAAM,UAAU,KAAK,cAAc,OAAO,SAAS;AACnD,UAAM,UAAU,MAAM,QAAQ,IAAI,QAAQ,IAAI,CAAC,UAAU,UAAU,KAAK,CAAC,CAAC;AAC1E,WAAO,QAAQ,KAAK;EACtB;;;;;;;;;;;;;;;;;;;;EAqBA,OAAO,iBACL,OACA,WACA,WACK;AACL,UAAM,UAAU,KAAK,cAAc,OAAO,SAAS;AACnD,UAAM,UAAU,QAAQ,IAAI,CAAC,UAAU,UAAU,KAAK,CAAC;AACvD,WAAO,QAAQ,KAAK;EACtB;AACF;AEiFO,IAAM,YAAN,MAAgB;;;;;;EAMrB,OAAe,iBAAiB,OAA8B;AAC5D,UAAM,WAAW;AAGjB,QAAI,OAAO,SAAS,YAAY,SAAU,QAAO,SAAS;AAC1D,QAAI,OAAO,SAAS,OAAO,SAAU,QAAO,SAAS;AACrD,QAAI,OAAO,SAAS,UAAU,SAAU,QAAO,SAAS;AACxD,QAAI,OAAO,SAAS,SAAS,SAAU,QAAO,SAAS;AAGvD,WAAO,OAAO,KAAK;EACrB;;;;EAKA,OAAe,WACb,QACA,WACA,UAOA,cACoB;AACpB,WAAO;MACL,WAAW,OAAO;MAClB,OAAO,OAAO;MACd,OAAO,eAAe,aAAa;MACnC;MACA,aAAa,SAAS,OAAO,eAAe;MAC5C,cAAc,SAAS,OAAO,gBAAgB;MAC9C,aAAa,SAAS,OAAO,eAAe;IAC9C;EACF;;;;;;EAOA,aAAqB,oBACnB,QACA,YAQiC;AACjC,UAAM,mBAAmB,KAAK,iBAAiB,OAAO,YAAY;AAGlE,QAAI;AACF,YAAM,WAAW,MAAM,WAAW,OAAO,YAAY;AAErD,aAAO;QACL,QAAQ,SAAS;QACjB,OAAO,KAAK,WAAW,QAAQ,kBAAkB,UAAU,KAAK;QAChE,cAAc;MAChB;IACF,SAAS,cAAc;AAErB,UAAI,OAAO,aAAa,SAAS;AAC/B,cAAM;MACR;AAGA,UAAI,CAAC,OAAO,eAAe;AACzB,cAAM;MACR;AAGA,YAAM,oBAAoB,KAAK,iBAAiB,OAAO,aAAa;AACpE,YAAM,WAAW,MAAM,WAAW,OAAO,aAAa;AAEtD,aAAO;QACL,QAAQ,SAAS;QACjB,OAAO,KAAK,WAAW,QAAQ,mBAAmB,UAAU,IAAI;QAChE,cAAc;MAChB;IACF;EACF;;;;;;;;;;;;;;EAeA,aAAa,KACX,QACiC;AACjC,WAAO,KAAK;MAAoB;MAAQ,CAAC,cACvC,wBAAa;QACX;QACA,QAAQ,iBAAO,OAAO;UACpB,QAAQ,OAAO;QACjB,CAAC;QACD,QAAQ,OAAO;QACf,QAAQ,OAAO;QACf,aAAa,OAAO;QACpB,YAAY,OAAO;QACnB,aAAa,OAAO;MACtB,CAAC;IACH;EACF;;;;;;;;;;;EAYA,aAAa,WACX,QACiC;AACjC,WAAO,KAAK;MAAoB;MAAQ,CAAC,cACvC,wBAAa;QACX;QACA,QAAQ,iBAAO,OAAO;UACpB,QAAQ,OAAO;QACjB,CAAC;QACD,UAAU,OAAO;QACjB,aAAa,OAAO;QACpB,YAAY,OAAO;QACnB,aAAa,OAAO;MACtB,CAAC;IACH;EACF;AACF;AC7SA,SAAS,aAAa,OAA2B;AAC/C,SAAO,GAAG,MAAM,WAAW,WAAW,MAAM,YAAY,YAAY,MAAM,WAAW;AACvF;AAsFO,IAAM,0BAAN,MAA8B;EAC3B,QAA4C,CAAC;;;;;;EAOrD,MAAM,OAAiC;AAErC,QAAI,CAAC,KAAK,MAAM,MAAM,SAAS,GAAG;AAChC,WAAK,MAAM,MAAM,SAAS,IAAI;QAC5B,WAAW,MAAM;QACjB,QAAQ,CAAC;QACT,OAAO;UACL,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;IACF;AAEA,UAAM,YAAY,KAAK,MAAM,MAAM,SAAS;AAG5C,QAAI,CAAC,UAAU,OAAO,MAAM,KAAK,GAAG;AAClC,gBAAU,OAAO,MAAM,KAAK,IAAI;QAC9B,OAAO;UACL,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;IACF;AAEA,UAAM,QAAQ,UAAU,OAAO,MAAM,KAAK;AAG1C,QAAI,MAAM,UAAU,WAAW;AAC7B,UAAI,CAAC,MAAM,SAAS;AAClB,cAAM,UAAU;UACd,WAAW,MAAM;UACjB,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;AAEA,YAAM,QAAQ,eAAe,MAAM;AACnC,YAAM,QAAQ,gBAAgB,MAAM;AACpC,YAAM,QAAQ,eAAe,MAAM;IACrC,WAAW,MAAM,UAAU,YAAY;AACrC,UAAI,CAAC,MAAM,UAAU;AACnB,cAAM,WAAW;UACf,WAAW,MAAM;UACjB,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;AAEA,YAAM,SAAS,eAAe,MAAM;AACpC,YAAM,SAAS,gBAAgB,MAAM;AACrC,YAAM,SAAS,eAAe,MAAM;IACtC;AAGA,UAAM,MAAM,eAAe,MAAM;AACjC,UAAM,MAAM,gBAAgB,MAAM;AAClC,UAAM,MAAM,eAAe,MAAM;AAGjC,cAAU,MAAM,eAAe,MAAM;AACrC,cAAU,MAAM,gBAAgB,MAAM;AACtC,cAAU,MAAM,eAAe,MAAM;EACvC;;;;;;EAOA,iBAAuC;AACrC,WAAO,OAAO,OAAO,KAAK,KAAK;EACjC;;;;;;;;;;EAWA,YA8BE;AACA,UAAM,aA2BD,CAAC;AAEN,eAAW,aAAa,OAAO,OAAO,KAAK,KAAK,GAAG;AACjD,YAAM,SAmBD,CAAC;AAEN,iBAAW,CAAC,WAAW,SAAS,KAAK,OAAO,QAAQ,UAAU,MAAM,GAAG;AACrE,cAAM,cAmBF;UACF,OAAO;UACP,OAAO;YACL,aAAa,UAAU,MAAM;YAC7B,cAAc,UAAU,MAAM;YAC9B,aAAa,UAAU,MAAM;UAC/B;QACF;AAEA,YAAI,UAAU,SAAS;AACrB,sBAAY,UAAU;YACpB,WAAW,UAAU,QAAQ;YAC7B,aAAa,UAAU,QAAQ;YAC/B,cAAc,UAAU,QAAQ;YAChC,aAAa,UAAU,QAAQ;UACjC;QACF;AAEA,YAAI,UAAU,UAAU;AACtB,sBAAY,WAAW;YACrB,WAAW,UAAU,SAAS;YAC9B,aAAa,UAAU,SAAS;YAChC,cAAc,UAAU,SAAS;YACjC,aAAa,UAAU,SAAS;UAClC;QACF;AAEA,eAAO,KAAK,WAAW;MACzB;AAEA,iBAAW,KAAK;QACd,WAAW,UAAU;QACrB;QACA,OAAO;UACL,aAAa,UAAU,MAAM;UAC7B,cAAc,UAAU,MAAM;UAC9B,aAAa,UAAU,MAAM;QAC/B;MACF,CAAC;IACH;AAEA,UAAM,aAAa,KAAK,cAAc;AAEtC,WAAO;MACL;MACA,OAAO;QACL,aAAa,WAAW;QACxB,cAAc,WAAW;QACzB,aAAa,WAAW;MAC1B;IACF;EACF;;;;;;EAOA,gBAA4B;AAC1B,QAAI,aAAa;AACjB,QAAI,cAAc;AAClB,QAAI,cAAc;AAElB,eAAW,aAAa,OAAO,OAAO,KAAK,KAAK,GAAG;AACjD,oBAAc,UAAU,MAAM;AAC9B,qBAAe,UAAU,MAAM;AAC/B,qBAAe,UAAU,MAAM;IACjC;AAEA,WAAO;MACL,aAAa;MACb,cAAc;MACd;IACF;EACF;;;;;;;;;;EAWA,WAAW,QAA6B;AACtC,UAAM,aAAa,KAAK,eAAe;AAEvC,QAAI,WAAW,WAAW,GAAG;AAC3B,aAAO,KAAK,8CAA8C;AAC1D;IACF;AAEA,WAAO,KAAK,0CAA0C;AACtD,WAAO,KAAK,EAAE;AAEd,QAAI,mBAAmB;AACvB,QAAI,oBAAoB;AACxB,QAAI,mBAAmB;AACvB,QAAI,0BAA0B;AAC9B,QAAI,2BAA2B;AAC/B,QAAI,0BAA0B;AAC9B,QAAI,2BAA2B;AAC/B,QAAI,4BAA4B;AAChC,QAAI,2BAA2B;AAE/B,eAAW,aAAa,YAAY;AAClC,aAAO,KAAK,GAAG,UAAU,SAAS,GAAG;AAErC,iBAAW,CAAC,OAAO,SAAS,KAAK,OAAO,QAAQ,UAAU,MAAM,GAAG;AACjE,eAAO,KAAK,OAAO,KAAK,GAAG;AAG3B,YAAI,UAAU,SAAS;AACrB,iBAAO;YACL,kBAAkB,UAAU,QAAQ,SAAS,MAAM,aAAa,UAAU,OAAO,CAAC;UACpF;AACA,qCAA2B,UAAU,QAAQ;AAC7C,sCAA4B,UAAU,QAAQ;AAC9C,qCAA2B,UAAU,QAAQ;QAC/C;AAGA,YAAI,UAAU,UAAU;AACtB,iBAAO;YACL,mBAAmB,UAAU,SAAS,SAAS,MAAM,aAAa,UAAU,QAAQ,CAAC;UACvF;AACA,sCAA4B,UAAU,SAAS;AAC/C,uCAA6B,UAAU,SAAS;AAChD,sCAA4B,UAAU,SAAS;QACjD;AAGA,eAAO,KAAK,mBAAmB,aAAa,UAAU,KAAK,CAAC,EAAE;MAChE;AAEA,aAAO;QACL,KAAK,UAAU,SAAS,WAAW,aAAa,UAAU,KAAK,CAAC;MAClE;AACA,aAAO,KAAK,EAAE;AAEd,0BAAoB,UAAU,MAAM;AACpC,2BAAqB,UAAU,MAAM;AACrC,0BAAoB,UAAU,MAAM;IACtC;AAGA,WAAO,KAAK,iBAAiB;AAC7B,QAAI,0BAA0B,GAAG;AAC/B,aAAO;QACL,kBAAkB,aAAa;UAC7B,aAAa;UACb,cAAc;UACd,aAAa;QACf,CAAC,CAAC;MACJ;IACF;AACA,QAAI,2BAA2B,GAAG;AAChC,aAAO;QACL,mBAAmB,aAAa;UAC9B,aAAa;UACb,cAAc;UACd,aAAa;QACf,CAAC,CAAC;MACJ;IACF;AACA,WAAO;MACL,gBAAgB,aAAa;QAC3B,aAAa;QACb,cAAc;QACd,aAAa;MACf,CAAC,CAAC;IACJ;EACF;;;;;;EAOA,QAAc;AACZ,SAAK,QAAQ,CAAC;EAChB;AACF;;;ACxdO,IAAM,cAAN,MAAkB;AAAA,EACN;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,QAAuB,KAAsB;AACvD,SAAK,SAAS;AACd,SAAK,OAAO,KAAK,kDAAkD;AAEnE,SAAK,UAAU,KAAK,WAAW,IAAI,OAAO,OAAO;AACjD,SAAK,aAAa,KAAK,WAAW,IAAI,UAAU,UAAU;AAC1D,SAAK,WAAW,KAAK,WAAW,IAAI,QAAQ,QAAQ;AACpD,SAAK,WAAW,KAAK,WAAW,IAAI,QAAQ,QAAQ;AAEpD,SAAK,OAAO;AAAA,MACV,yBAAyB,KAAK,QAAQ,IAAI,WAAW,KAAK,WAAW,IAAI,cAAc,KAAK,SAAS,IAAI,YAAY,KAAK,SAAS,IAAI;AAAA,IACzI;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,WACN,OACA,SACgB;AAChB,UAAM,MAAM,oBAAI,IAAe;AAC/B,eAAW,QAAQ,OAAO;AACxB,UAAI,IAAI,KAAK,UAAU,IAAI;AAAA,IAC7B;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,QACE,KAMO;AAGP,UAAM,QAAQ,IAAI,MAAM,aAAa;AACrC,QAAI,CAAC,OAAO;AACV,WAAK,OAAO,KAAK,2CAA2C,GAAG,EAAE;AACjE,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,MAAM,CAAC;AAE1B,QAAI,eAAe,SAAS;AAC1B,YAAM,SAAS,KAAK,QAAQ,IAAI,GAAG,KAAK;AACxC,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,2CAA2C,GAAG,EAAE;AAAA,MACnE;AACA,aAAO;AAAA,IACT;AACA,QAAI,eAAe,YAAY;AAC7B,YAAM,SAAS,KAAK,WAAW,IAAI,GAAG,KAAK;AAC3C,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,8CAA8C,GAAG,EAAE;AAAA,MACtE;AACA,aAAO;AAAA,IACT;AACA,QAAI,eAAe,UAAU;AAC3B,YAAM,SAAS,KAAK,SAAS,IAAI,GAAG,KAAK;AACzC,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,4CAA4C,GAAG,EAAE;AAAA,MACpE;AACA,aAAO;AAAA,IACT;AACA,QAAI,eAAe,UAAU;AAC3B,YAAM,SAAS,KAAK,SAAS,IAAI,GAAG,KAAK;AACzC,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,4CAA4C,GAAG,EAAE;AAAA,MACpE;AACA,aAAO;AAAA,IACT;AAEA,SAAK,OAAO,KAAK,0CAA0C,UAAU,EAAE;AACvE,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,YAAY,KAAqC;AAC/C,WAAO,KAAK,QAAQ,IAAI,GAAG,KAAK;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,eAAe,KAAwC;AACrD,WAAO,KAAK,WAAW,IAAI,GAAG,KAAK;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,aAAa,KAAsC;AACjD,WAAO,KAAK,SAAS,IAAI,GAAG,KAAK;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,aAAa,KAAsC;AACjD,WAAO,KAAK,SAAS,IAAI,GAAG,KAAK;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,YACE,MAOA;AACA,WAAO,KAAK,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,IAAI,CAAC;AAAA,EACjD;AACF;;;ACpJO,IAAM,cAAN,MAAkB;AAAA,EACf,iBAAiB;AAAA,EACjB,eAAe;AAAA,EACf,eAAe;AAAA,EACf,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA,EAM1B,oBAA4B;AAC1B,SAAK;AACL,WAAO,MAAM,KAAK,UAAU,KAAK,cAAc,CAAC;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,kBAA0B;AACxB,SAAK;AACL,WAAO,OAAO,KAAK,UAAU,KAAK,YAAY,CAAC;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,kBAA0B;AACxB,SAAK;AACL,WAAO,OAAO,KAAK,UAAU,KAAK,YAAY,CAAC;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,qBAA6B;AAC3B,SAAK;AACL,WAAO,OAAO,KAAK,UAAU,KAAK,eAAe,CAAC;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA,EAKA,QAAc;AACZ,SAAK,iBAAiB;AACtB,SAAK,eAAe;AACpB,SAAK,eAAe;AACpB,SAAK,kBAAkB;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA,EAKA,cAKE;AACA,WAAO;AAAA,MACL,SAAS,KAAK;AAAA,MACd,OAAO,KAAK;AAAA,MACZ,OAAO,KAAK;AAAA,MACZ,UAAU,KAAK;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,UAAU,KAAqB;AACrC,WAAO,IAAI,SAAS,EAAE,SAAS,GAAG,GAAG;AAAA,EACvC;AACF;;;AC1EO,IAAM,cAAN,MAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvB,OAAO,UAAU,MAAsB;AACrC,QAAI,CAAC,KAAM,QAAO;AAGlB,QAAI,aAAa,KAAK,UAAU,KAAK;AAGrC,iBAAa,WAAW,QAAQ,4BAA4B,GAAG;AAG/D,iBAAa,WAAW,QAAQ,YAAY,GAAG;AAG/C,iBAAa,WAAW,QAAQ,QAAQ,GAAG;AAG3C,iBAAa,WAAW,KAAK;AAE7B,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,OAAO,iBAAiB,MAAsB;AAC5C,QAAI,CAAC,KAAM,QAAO;AAGlB,QAAI,UAAU,KAAK,QAAQ,iBAAiB,EAAE;AAG9C,cAAU,QAAQ,QAAQ,iBAAiB,EAAE;AAE7C,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,YAAY,MAAuB;AACxC,QAAI,CAAC,KAAM,QAAO;AAClB,UAAM,UAAU,KAAK,UAAU,IAAI;AAEnC,WAAO,CAAC,eAAe,KAAK,OAAO;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,eAAe,OAA2B;AAC/C,WAAO,MAAM,IAAI,CAAC,SAAS,KAAK,UAAU,IAAI,CAAC;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,iBAAiB,OAA2B;AACjD,WAAO,MAAM,OAAO,CAAC,SAAS,KAAK,YAAY,IAAI,CAAC;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,OAAO,wBACL,OACA,YAAoB,IACV;AACV,QAAI,cAAc,GAAG;AAEnB,YAAM,UAAoB,CAAC;AAC3B,iBAAW,QAAQ,OAAO;AACxB,cAAM,aAAa,KAAK,UAAU,IAAI;AACtC,YAAI,KAAK,YAAY,UAAU,GAAG;AAChC,kBAAQ,KAAK,UAAU;AAAA,QACzB;AAAA,MACF;AACA,aAAO;AAAA,IACT;AAGA,WAAO,eAAe,iBAAiB,OAAO,WAAW,CAAC,UAAU;AAElE,YAAM,aAAa,KAAK,eAAe,KAAK;AAE5C,aAAO,KAAK,iBAAiB,UAAU;AAAA,IACzC,CAAC;AAAA,EACH;AACF;;;ACjHO,IAAM,oBAAN,MAAM,mBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQ7B,OAAO,QAAQ,MAAgB,aAAkC;AAC/D,QAAI,KAAK,WAAW,GAAG;AACrB,aAAO;AAAA,IACT;AAEA,UAAM,QAAkB,CAAC;AAEzB,eAAW,OAAO,MAAM;AACtB,YAAM,OAAO,YAAY,QAAQ,GAAG;AACpC,UAAI,CAAC,MAAM;AACT;AAAA,MACF;AAGA,UAAI,UAAU,SAAS,KAAK,SAAS,UAAU,KAAK,SAAS,UAAU;AACrE,cAAM,gBAAgB,mBAAkB;AAAA,UACtC;AAAA,UACA;AAAA,UACA;AAAA,QACF;AACA,YAAI,eAAe;AACjB,gBAAM,KAAK,aAAa;AAAA,QAC1B;AAAA,MACF,WAES,UAAU,QAAQ,UAAW,KAA0B,MAAM;AACpE,cAAM,gBAAgB,mBAAkB;AAAA,UACtC;AAAA,QACF;AACA,YAAI,eAAe;AACjB,gBAAM,KAAK,aAAa;AAAA,QAC1B;AAAA,MACF,WAES,UAAU,QAAQ,UAAU,MAAM;AACzC,cAAM,eAAe,mBAAkB;AAAA,UACrC;AAAA,UACA;AAAA,QACF;AACA,YAAI,cAAc;AAChB,gBAAM,KAAK,YAAY;AAAA,QACzB;AAAA,MACF;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,MAAM;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,gBACL,OACA,aACA,cAAc,GACN;AACR,UAAM,QAAkB,CAAC;AAEzB,eAAW,YAAY,MAAM,UAAU;AACrC,YAAM,QAAQ,YAAY,QAAQ,SAAS,IAAI;AAC/C,UAAI,CAAC,OAAO;AACV;AAAA,MACF;AAGA,UACE,UAAU,UACT,MAAM,SAAS,UAAU,MAAM,SAAS,UACzC;AACA,cAAM,iBAAiB,mBAAkB;AAAA,UACvC;AAAA,UACA;AAAA,UACA,cAAc;AAAA,QAChB;AACA,YAAI,gBAAgB;AAClB,gBAAM,KAAK,cAAc;AAAA,QAC3B;AAAA,MACF,WAES,UAAU,SAAS,UAAU,OAAO;AAC3C,cAAM,eAAe,mBAAkB;AAAA,UACrC;AAAA,UACA;AAAA,QACF;AACA,YAAI,cAAc;AAChB,gBAAM,KAAK,YAAY;AAAA,QACzB;AAAA,MACF;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,IAAI;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,OAAO,gBAAgB,OAAiC;AACtD,UAAM,EAAE,KAAK,IAAI,MAAM;AACvB,QAAI,CAAC,QAAQ,KAAK,WAAW,GAAG;AAC9B,aAAO;AAAA,IACT;AAEA,UAAM,QAAkB,CAAC;AAGzB,aAAS,SAAS,GAAG,SAAS,KAAK,QAAQ,UAAU;AACnD,YAAM,MAAM,KAAK,MAAM;AACvB,UAAI,CAAC,OAAO,IAAI,WAAW,GAAG;AAC5B;AAAA,MACF;AAEA,YAAM,QAAQ,IAAI;AAAA,QAAI,CAAC,SACrB,mBAAkB,gBAAgB,KAAK,IAAI;AAAA,MAC7C;AACA,YAAM,KAAK,KAAK,MAAM,KAAK,KAAK,CAAC,IAAI;AAGrC,UAAI,WAAW,GAAG;AAChB,cAAM,YAAY,IAAI,IAAI,MAAM,KAAK,EAAE,KAAK,KAAK;AACjD,cAAM,KAAK,KAAK,SAAS,IAAI;AAAA,MAC/B;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,IAAI;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,eAAe,MAAuB,cAAc,GAAW;AACpE,UAAM,UAAU,KAAK,KAAK,KAAK;AAC/B,QAAI,CAAC,SAAS;AACZ,aAAO;AAAA,IACT;AAEA,UAAM,SAAS,mBAAkB,UAAU,WAAW;AACtD,UAAM,SAAS,mBAAkB;AAAA,MAC/B,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AAEA,WAAO,GAAG,MAAM,GAAG,MAAM,GAAG,OAAO;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAKA,OAAe,cAAc,YAAsB,QAAyB;AAC1E,QAAI,QAAQ;AACV,aAAO,GAAG,MAAM;AAAA,IAClB;AACA,QAAI,eAAe,MAAM;AACvB,aAAO;AAAA,IACT;AACA,QAAI,eAAe,OAAO;AACxB,aAAO;AAAA,IACT;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,OAAe,UAAU,OAAuB;AAC9C,WAAO,KAAK,OAAO,KAAK;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA,EAKA,OAAe,gBAAgB,MAAsB;AACnD,WAAO,KAAK,QAAQ,OAAO,KAAK,EAAE,QAAQ,OAAO,GAAG,EAAE,KAAK;AAAA,EAC7D;AACF;;;AC5JO,IAAM,mBAAN,MAAM,kBAAiB;AAAA,EAC5B,OAAwB,kBAAkB;AAAA,EAC1C,OAAwB,qBAAqB;AAAA,EAE5B;AAAA,EACA;AAAA,EAEjB,YAAY,QAAuB,aAA0B;AAC3D,SAAK,SAAS;AACd,SAAK,cAAc;AAAA,EACrB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,QACE,YACA,WACA,cACA,QACA,QACA,WACW;AACX,SAAK,OAAO,KAAK,mDAAmD;AAGpE,UAAM,cAAc,KAAK,yBAAyB;AAGlD,UAAM,cAAc,KAAK,iBAAiB,UAAU;AACpD,SAAK,OAAO;AAAA,MACV,4BAA4B,YAAY,MAAM;AAAA,IAChD;AAGA,UAAM,cAAc,CAAC,aAAa,GAAG,WAAW;AAGhD,UAAM,eAAe,KAAK,gBAAgB,WAAW;AACrD,UAAM,gBAAgB,KAAK,oBAAoB,cAAc,UAAU;AACvE,SAAK,OAAO;AAAA,MACV,4CAA4C,cAAc,IAAI;AAAA,IAChE;AAGA,UAAM,aAAa,KAAK,kBAAkB,WAAW,YAAY;AACjE,SAAK,iBAAiB,aAAa,YAAY,eAAe,YAAY;AAC1E,SAAK,OAAO;AAAA,MACV,+BAA+B,WAAW,MAAM;AAAA,IAClD;AAGA,SAAK;AAAA,MACH;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,SAAK,OAAO;AAAA,MACV,6BAA6B,OAAO,MAAM,YAAY,OAAO,MAAM,gBAAgB,UAAU,MAAM;AAAA,IACrG;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,2BAAoC;AAC1C,WAAO;AAAA,MACL,IAAI,kBAAiB;AAAA,MACrB,aAAa,kBAAiB;AAAA,MAC9B,OAAO,kBAAiB;AAAA,MACxB,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,YAAY,CAAC;AAAA,MACb,UAAU,CAAC;AAAA,MACX,UAAU,CAAC;AAAA,MACX,aAAa,CAAC;AAAA,IAChB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,iBAAiB,SAAgC;AACvD,WAAO,QAAQ,IAAI,CAAC,UAAU;AAC5B,YAAM,YAAY,KAAK,YAAY,kBAAkB;AAErD,YAAM,UAAmB;AAAA,QACvB,IAAI;AAAA,QACJ,aAAa,MAAM;AAAA,QACnB,OAAO,YAAY,UAAU,MAAM,KAAK;AAAA,QACxC,QAAQ,MAAM;AAAA,QACd,OAAO,MAAM;AAAA,QACb,YAAY,CAAC;AAAA,QACb,UAAU,CAAC;AAAA,QACX,UAAU,CAAC;AAAA,QACX,aAAa,CAAC;AAAA,MAChB;AAEA,UAAI,MAAM,YAAY,MAAM,SAAS,SAAS,GAAG;AAC/C,gBAAQ,WAAW,KAAK,iBAAiB,MAAM,QAAQ;AAAA,MACzD;AAEA,aAAO;AAAA,IACT,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,gBAAgB,UAAoC;AAC1D,UAAM,SAAwB,CAAC;AAE/B,UAAM,UAAU,CAAC,gBAAiC;AAChD,iBAAW,WAAW,aAAa;AACjC,eAAO,KAAK;AAAA,UACV;AAAA,UACA,WAAW,QAAQ;AAAA,QACrB,CAAC;AAED,YAAI,QAAQ,YAAY,QAAQ,SAAS,SAAS,GAAG;AACnD,kBAAQ,QAAQ,QAAQ;AAAA,QAC1B;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,QAAQ;AAChB,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUQ,oBACN,cACA,YAC2B;AAC3B,UAAM,SAAS,oBAAI,IAA0B;AAE7C,QAAI,aAAa,WAAW,GAAG;AAC7B,aAAO;AAAA,IACT;AAGA,UAAM,eACJ,WAAW,SAAS,IAChB,KAAK,IAAI,GAAG,WAAW,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC,IAC3C,OAAO;AAGb,UAAM,cAAc,aAAa;AAAA,MAC/B,CAAC,OAAO,GAAG,QAAQ,OAAO,kBAAiB;AAAA,IAC7C;AAGA,UAAM,SAAS,CAAC,GAAG,WAAW,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,SAAS;AAGxE,WAAO,IAAI,kBAAiB,iBAAiB;AAAA,MAC3C,WAAW;AAAA,MACX,SAAS,eAAe;AAAA,IAC1B,CAAC;AAGD,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,YAAM,UAAU,OAAO,CAAC;AACxB,YAAM,OAAO,OAAO,IAAI,CAAC;AAEzB,aAAO,IAAI,QAAQ,QAAQ,IAAI;AAAA,QAC7B,WAAW,QAAQ;AAAA,QACnB,SAAS,OAAO,KAAK,YAAY,IAAI,OAAO;AAAA,MAC9C,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,OAAwB,oBAAoB,oBAAI,IAAI;AAAA,IAClD;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMD,OAAe,iBAAiB,MAAgC;AAC9D,UAAM,YAAY,KAAK,QAAQ;AAC/B,WAAO,OAAO,cAAc,YAAY,UAAU,WAAW,aAAa;AAAA,EAC5E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,kBACN,WACA,eACa;AACb,WAAO,UACJ;AAAA,MACC,CAAC,SACC,kBAAiB,kBAAkB,IAAI,KAAK,KAAK,KACjD,CAAC,kBAAiB,iBAAiB,IAAI,KACvC,YAAY,YAAY,KAAK,IAAI;AAAA,IACrC,EACC,IAAI,CAAC,SAAS;AACb,YAAM,YAAY,KAAK,OAAO,CAAC,GAAG,WAAW;AAC7C,aAAO;AAAA,QACL,MAAM,YAAY,UAAU,KAAK,IAAI;AAAA,QACrC;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,oBACN,WACA,cACQ;AACR,UAAM,QAAQ,aAAa,SAAS;AACpC,QAAI,CAAC,OAAO;AAEV,aAAO;AAAA,IACT;AAEA,WAAO,MAAM;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,mBACN,cACA,eACe;AACf,QAAI,YAA2B;AAC/B,QAAI,gBAAgB;AAEpB,eAAW,CAAC,WAAW,KAAK,KAAK,eAAe;AAE9C,UAAI,gBAAgB,MAAM,aAAa,gBAAgB,MAAM,SAAS;AAEpE,YAAI,MAAM,YAAY,eAAe;AACnC,0BAAgB,MAAM;AACtB,sBAAY;AAAA,QACd;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,iBACN,UACA,YACA,eACA,cACM;AAEN,UAAM,aAAa,KAAK,gBAAgB,QAAQ;AAEhD,eAAW,aAAa,YAAY;AAClC,YAAM,eAAe,KAAK;AAAA,QACxB,UAAU;AAAA,QACV;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,WAAW,KAAK,SAAS;AAAA,MACtD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,cACN,UACA,QACA,QACA,WACA,eACA,cACM;AAEN,UAAM,aAAa,KAAK,gBAAgB,QAAQ;AAGhD,eAAW,SAAS,QAAQ;AAC1B,YAAM,eAAe,KAAK;AAAA,QACxB,MAAM;AAAA,QACN;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,SAAS,KAAK,MAAM,EAAE;AAAA,MACnD;AAAA,IACF;AAGA,eAAW,SAAS,QAAQ;AAC1B,YAAM,eAAe,KAAK;AAAA,QACxB,MAAM;AAAA,QACN;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,SAAS,KAAK,MAAM,EAAE;AAAA,MACnD;AAAA,IACF;AAGA,eAAW,YAAY,WAAW;AAChC,YAAM,eAAe,KAAK;AAAA,QACxB,SAAS;AAAA,QACT;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,YAAY,KAAK,SAAS,EAAE;AAAA,MACzD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,gBAAgB,UAA2C;AACjE,UAAM,MAAM,oBAAI,IAAqB;AAErC,UAAM,WAAW,CAAC,gBAAiC;AACjD,iBAAW,WAAW,aAAa;AACjC,YAAI,IAAI,QAAQ,IAAI,OAAO;AAE3B,YAAI,QAAQ,YAAY,QAAQ,SAAS,SAAS,GAAG;AACnD,mBAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,IACF;AAEA,aAAS,QAAQ;AACjB,WAAO;AAAA,EACT;AACF;;;AC/XO,IAAM,kBAAN,MAAM,yBAAwB,MAAM;AAAA,EACzC,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,gBAAgB,OAAwB;AAC7C,WAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,EAC9D;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,UAAU,SAAiB,OAAiC;AACjE,WAAO,IAAI;AAAA,MACT,GAAG,OAAO,KAAK,iBAAgB,gBAAgB,KAAK,CAAC;AAAA,MACrD,EAAE,OAAO,MAAM;AAAA,IACjB;AAAA,EACF;AACF;AAOO,IAAM,mBAAN,cAA+B,gBAAgB;AAAA,EACpD,YAAY,UAAU,+CAA+C;AACnE,UAAM,OAAO;AACb,SAAK,OAAO;AAAA,EACd;AACF;AAOO,IAAM,gBAAN,cAA4B,gBAAgB;AAAA,EACjD,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AACF;AAQO,IAAM,qBAAN,cAAiC,gBAAgB;AAAA;AAAA;AAAA;AAAA,EAI7C;AAAA,EAET,YAAY,SAAiB,kBAAuC;AAClE,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,mBAAmB;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA,EAKA,aAAqB;AACnB,UAAM,EAAE,YAAY,OAAO,IAAI,KAAK;AACpC,UAAM,QAAQ;AAAA,MACZ,0BAA0B,UAAU;AAAA,MACpC;AAAA,MACA;AAAA,IACF;AAEA,eAAW,SAAS,QAAQ;AAC1B,YAAM,KAAK,MAAM,MAAM,IAAI,KAAK,MAAM,OAAO,EAAE;AAC/C,YAAM,KAAK,aAAa,MAAM,IAAI,EAAE;AACpC,YAAM;AAAA,QACJ,eAAe,MAAM,MAAM,KAAK,WAAW,MAAM,MAAM,MAAM;AAAA,MAC/D;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,IAAI;AAAA,EACxB;AACF;;;AChHA,IAAM,kBAAkD;AAAA,EACtD,YAAY;AAAA,EACZ,gBAAgB;AAClB;AAQO,IAAM,eAAN,MAAmB;AAAA,EACP;AAAA,EACT;AAAA,EAER,YAAY,SAAgC;AAC1C,SAAK,UAAU;AAAA,MACb,GAAG;AAAA,MACH,GAAG;AAAA,IACL;AACA,SAAK,SAAS,CAAC;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,SAAS,SAA0C;AACjD,SAAK,SAAS,CAAC;AAGf,SAAK,gBAAgB,SAAS,IAAI,MAAM,oBAAI,IAAY,CAAC;AAEzD,UAAM,aAAa,KAAK,OAAO;AAE/B,WAAO;AAAA,MACL,OAAO,eAAe;AAAA,MACtB,QAAQ,CAAC,GAAG,KAAK,MAAM;AAAA,MACvB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,gBAAgB,SAA2B;AACzC,UAAM,SAAS,KAAK,SAAS,OAAO;AAEpC,QAAI,CAAC,OAAO,OAAO;AACjB,YAAM,IAAI;AAAA,QACR,8BAA8B,OAAO,UAAU;AAAA,QAC/C;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,gBACN,SACA,YACA,aACA,UACM;AACN,QAAI,aAAa,aAAa,UAAU;AAExC,aAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,YAAM,QAAQ,QAAQ,CAAC;AACvB,YAAMA,QAAO,aAAa,GAAG,UAAU,aAAa,CAAC,MAAM,IAAI,CAAC;AAGhE,WAAK,cAAc,OAAOA,KAAI;AAG9B,WAAK,oBAAoB,OAAOA,KAAI;AAGpC,WAAK,kBAAkB,OAAOA,KAAI;AAGlC,WAAK,kBAAkB,OAAOA,OAAM,UAAU;AAC9C,mBAAa,MAAM;AAGnB,UAAI,aAAa;AACf,aAAK,wBAAwB,OAAOA,OAAM,WAAW;AAAA,MACvD;AAGA,YAAM,MAAM,GAAG,MAAM,KAAK,IAAI,MAAM,MAAM;AAC1C,WAAK,kBAAkB,OAAOA,OAAM,KAAK,QAAQ;AACjD,eAAS,IAAI,GAAG;AAGhB,UAAI,MAAM,YAAY,MAAM,SAAS,SAAS,GAAG;AAC/C,aAAK,gBAAgB,MAAM,UAAUA,OAAM,OAAO,QAAQ;AAAA,MAC5D;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,OAAiBA,OAAoB;AACzD,QAAI,CAAC,MAAM,SAAS,MAAM,MAAM,KAAK,MAAM,IAAI;AAC7C,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,oBAAoB,OAAiBA,OAAoB;AAC/D,QAAI,MAAM,MAAM,SAAS,KAAK,QAAQ,gBAAgB;AACpD,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,iBAAiB,KAAK,QAAQ,cAAc,gBAAgB,MAAM,MAAM,MAAM;AAAA,QACvF,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBAAkB,OAAiBA,OAAoB;AAC7D,QAAI,MAAM,SAAS,GAAG;AACpB,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,iCAAiC,MAAM,MAAM;AAAA,QACtD,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAEA,QAAI,MAAM,SAAS,KAAK,QAAQ,YAAY;AAC1C,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,eAAe,MAAM,MAAM,kCAAkC,KAAK,QAAQ,UAAU;AAAA,QAC7F,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBACN,OACAA,OACA,YACM;AACN,QAAI,MAAM,SAAS,YAAY;AAC7B,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,8BAA8B,UAAU,OAAO,MAAM,MAAM;AAAA,QACpE,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,wBACN,OACAA,OACA,QACM;AACN,QAAI,MAAM,SAAS,OAAO,QAAQ;AAChC,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,eAAe,MAAM,MAAM,4BAA4B,OAAO,MAAM;AAAA,QAC7E,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBACN,OACAA,OACA,KACA,UACM;AACN,QAAI,SAAS,IAAI,GAAG,GAAG;AACrB,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,qBAAqB,MAAM,KAAK,aAAa,MAAM,MAAM;AAAA,QAClE,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,SAAS,OAAiC;AAChD,SAAK,OAAO,KAAK,KAAK;AAAA,EACxB;AACF;;;ACjOO,IAAM,eAAe;AAAA,EAC1B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AASO,IAAM,uBAAuB;AAAA,EAClC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAMO,IAAM,sBAAsB;AAyB5B,IAAM,YAAN,MAAgB;AAAA,EAIrB,YACmB,QACA,aACjB,SACA;AAHiB;AACA;AAGjB,SAAK,iBAAiB,SAAS,kBAAkB;AACjD,SAAK,WAAW,CAAC,GAAG,cAAc,GAAI,SAAS,sBAAsB,CAAC,CAAE;AAAA,EAC1E;AAAA,EAViB;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBjB,KAAK,KAAqC;AACxC,SAAK,OAAO,KAAK,oCAAoC;AAGrD,UAAM,gBAAgB,KAAK,eAAe,GAAG;AAC7C,QAAI,eAAe;AACjB,WAAK,OAAO;AAAA,QACV,kDAAkD,cAAc,SAAS,IAAI,cAAc,OAAO;AAAA,MACpG;AACA,aAAO;AAAA,IACT;AAGA,UAAM,kBAAkB,KAAK,gBAAgB,GAAG;AAChD,QAAI,iBAAiB;AACnB,WAAK,OAAO;AAAA,QACV,sDAAsD,gBAAgB,SAAS,IAAI,gBAAgB,OAAO;AAAA,MAC5G;AACA,aAAO;AAAA,IACT;AAEA,SAAK,OAAO,KAAK,sCAAsC;AACvD,UAAM,IAAI,iBAAiB;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA,EAKQ,eAAe,KAA4C;AAEjE,eAAW,QAAQ,IAAI,OAAO;AAC5B,UAAI,CAAC,KAAK,mBAAmB,KAAK,IAAI,GAAG;AACvC;AAAA,MACF;AAEA,YAAM,SAAS,KAAK,KAAK,CAAC,GAAG;AAC7B,UAAI,WAAW,UAAa,SAAS,KAAK,gBAAgB;AACxD;AAAA,MACF;AAEA,WAAK,OAAO;AAAA,QACV,kCAAkC,KAAK,IAAI,aAAa,MAAM;AAAA,MAChE;AAGA,YAAM,YAAY,KAAK,QAAQ;AAC/B,UAAI,CAAC,WAAW;AAEd,eAAO;AAAA,UACL,UAAU,CAAC,KAAK,QAAQ;AAAA,UACxB,WAAW;AAAA,UACX,SAAS;AAAA,QACX;AAAA,MACF;AAGA,YAAM,SAAS,KAAK,iBAAiB,KAAK,WAAW,MAAM;AAC3D,UAAI,QAAQ;AACV,eAAO,KAAK,yBAAyB,QAAQ,GAAG;AAAA,MAClD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,gBAAgB,KAA4C;AAClE,UAAM,aAGD,CAAC;AAGN,eAAW,SAAS,IAAI,QAAQ;AAC9B,YAAM,SAAS,KAAK,kBAAkB,KAAK;AAC3C,UAAI,WAAW,UAAa,SAAS,KAAK,gBAAgB;AACxD;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,OAAO,GAAG,GAAG;AACnC,cAAM,QAAQ,KAAK,eAAe,OAAO,MAAM;AAC/C,mBAAW,KAAK;AAAA,UACd,QAAQ;AAAA,YACN,UAAU,CAAC,MAAM,QAAQ;AAAA,YACzB,WAAW;AAAA,YACX,SAAS;AAAA,UACX;AAAA,UACA;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAGA,eAAW,SAAS,IAAI,QAAQ;AAC9B,YAAM,SAAS,MAAM,KAAK,CAAC,GAAG;AAC9B,UAAI,WAAW,UAAa,SAAS,KAAK,gBAAgB;AACxD;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,KAAK,GAAG;AAC9B,cAAM,QAAQ,KAAK,oBAAoB,OAAO,MAAM;AACpD,mBAAW,KAAK;AAAA,UACd,QAAQ;AAAA,YACN,UAAU,CAAC,MAAM,QAAQ;AAAA,YACzB,WAAW;AAAA,YACX,SAAS;AAAA,UACX;AAAA,UACA;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAEA,QAAI,WAAW,WAAW,GAAG;AAC3B,aAAO;AAAA,IACT;AAGA,eAAW,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC3C,UAAM,OAAO,WAAW,CAAC;AAEzB,WAAO,KAAK,yBAAyB,KAAK,QAAQ,GAAG;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA,EAKQ,iBACN,KACA,WACA,QACsB;AAEtB,UAAM,QAAQ,KAAK,YAAY,aAAa,SAAS;AACrD,QAAI,OAAO;AACT,aAAO;AAAA,QACL,UAAU,CAAC,MAAM,QAAQ;AAAA,QACzB,WAAW;AAAA,QACX,SAAS;AAAA,MACX;AAAA,IACF;AAGA,UAAM,QAAQ,KAAK,YAAY,aAAa,SAAS;AACrD,QAAI,OAAO;AACT,aAAO;AAAA,QACL,UAAU,CAAC,MAAM,QAAQ;AAAA,QACzB,WAAW;AAAA,QACX,SAAS;AAAA,MACX;AAAA,IACF;AAGA,UAAM,OAAO,KAAK,YAAY,QAAQ,SAAS;AAC/C,QAAI,QAAQ,KAAK,QAAQ,MAAM;AAC7B,aAAO,KAAK,iBAAiB,KAAK,KAAK,OAAO,MAAM,MAAM;AAAA,IAC5D;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,eACN,OACA,MACS;AACT,QAAI,MAAM,SAAS,UAAU,MAAM,SAAS,SAAS;AACnD,aAAO;AAAA,IACT;AAGA,QAAI,kBAAkB;AACtB,UAAM,WAAW,KAAK,YAAY,YAAY,MAAM,QAAQ;AAE5D,eAAW,SAAS,UAAU;AAC5B,UAAI,CAAC,MAAO;AAGZ,UAAI,UAAU,SAAS,UAAU,OAAO;AACtC,cAAM,WAAW;AACjB,YAAI,oBAAoB,KAAK,SAAS,IAAI,GAAG;AAC3C;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAIA,UAAM,QAAQ,SAAS,OAAO,CAAC,MAAM,MAAM,IAAI,EAAE;AACjD,WAAO,mBAAmB,KAAM,QAAQ,KAAK,kBAAkB,QAAQ;AAAA,EACzE;AAAA;AAAA;AAAA;AAAA,EAKQ,eAAe,OAAkC;AAEvD,QAAI,MAAM,UAAU,kBAAkB;AACpC,aAAO;AAAA,IACT;AAEA,UAAM,EAAE,MAAM,UAAU,SAAS,IAAI,MAAM;AAG3C,QAAI,WAAW,KAAK,WAAW,GAAG;AAChC,aAAO;AAAA,IACT;AAGA,QAAI,cAAc;AAClB,aAAS,MAAM,GAAG,MAAM,KAAK,QAAQ,OAAO;AAC1C,YAAM,WAAW,KAAK,GAAG,IAAI,WAAW,CAAC;AACzC,UAAI,YAAY,QAAQ,KAAK,SAAS,KAAK,KAAK,CAAC,GAAG;AAClD;AAAA,MACF;AAAA,IACF;AAGA,WAAO,cAAc,KAAK,eAAe,WAAW,KAAK;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA,EAKQ,yBACN,SACA,KACe;AACf,UAAM,WAAW,CAAC,GAAG,QAAQ,QAAQ;AACrC,QAAI,UAAU,QAAQ;AAGtB,aACM,SAAS,QAAQ,UAAU,GAC/B,UAAU,KAAK,gBACf,UACA;AACA,YAAM,oBAAoB,KAAK,uBAAuB,KAAK,MAAM;AACjE,UAAI,kBAAkB,WAAW,GAAG;AAClC;AAAA,MACF;AAEA,eAAS,KAAK,GAAG,iBAAiB;AAClC,gBAAU;AAAA,IACZ;AAEA,WAAO;AAAA,MACL;AAAA,MACA,WAAW,QAAQ;AAAA,MACnB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,uBACN,KACA,QACU;AACV,UAAM,OAAiB,CAAC;AAGxB,eAAW,QAAQ,IAAI,OAAO;AAC5B,UAAI,KAAK,KAAK,CAAC,GAAG,YAAY,QAAQ;AACpC;AAAA,MACF;AAEA,UAAI,KAAK,sBAAsB,KAAK,IAAI,GAAG;AACzC,cAAM,YAAY,KAAK,QAAQ;AAC/B,YAAI,WAAW;AACb,gBAAM,QAAQ,KAAK,YAAY,aAAa,SAAS;AACrD,cAAI,OAAO;AACT,iBAAK,KAAK,MAAM,QAAQ;AAAA,UAC1B;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,eAAW,SAAS,IAAI,QAAQ;AAC9B,YAAM,YAAY,KAAK,kBAAkB,KAAK;AAC9C,UAAI,cAAc,QAAQ;AACxB;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,OAAO,GAAG,KAAK,CAAC,KAAK,SAAS,MAAM,QAAQ,GAAG;AACrE,aAAK,KAAK,MAAM,QAAQ;AAAA,MAC1B;AAAA,IACF;AAGA,eAAW,SAAS,IAAI,QAAQ;AAC9B,UAAI,MAAM,KAAK,CAAC,GAAG,YAAY,QAAQ;AACrC;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,KAAK,KAAK,CAAC,KAAK,SAAS,MAAM,QAAQ,GAAG;AAChE,aAAK,KAAK,MAAM,QAAQ;AAAA,MAC1B;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,mBAAmB,MAAuB;AAChD,UAAM,iBAAiB,KAAK,KAAK,EAAE,YAAY;AAC/C,WAAO,KAAK,SAAS;AAAA,MAAK,CAAC,YACzB,eAAe,SAAS,QAAQ,YAAY,CAAC;AAAA,IAC/C;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,sBAAsB,MAAuB;AACnD,UAAM,iBAAiB,KAAK,KAAK,EAAE,YAAY;AAC/C,WAAO,qBAAqB;AAAA,MAAK,CAAC,WAChC,eAAe,SAAS,OAAO,YAAY,CAAC;AAAA,IAC9C;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBAAkB,OAA6C;AACrE,eAAW,YAAY,MAAM,UAAU;AACrC,YAAM,QAAQ,KAAK,YAAY,QAAQ,SAAS,IAAI;AACpD,UAAI,SAAS,UAAU,OAAO;AAC5B,cAAM,OAAQ,MAA0B;AACxC,YAAI,QAAQ,KAAK,CAAC,GAAG,YAAY,QAAW;AAC1C,iBAAO,KAAK,CAAC,EAAE;AAAA,QACjB;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,eAAe,OAAyB,QAAwB;AACtE,QAAI,QAAQ;AAGZ,cAAU,KAAK,iBAAiB,SAAS,KAAK;AAG9C,aAAS,MAAM,SAAS,SAAS;AAGjC,UAAM,WAAW,KAAK,YAAY,YAAY,MAAM,QAAQ;AAC5D,eAAW,SAAS,UAAU;AAC5B,UAAI,SAAS,UAAU,OAAO;AAC5B,cAAM,WAAW;AACjB,YAAI,oBAAoB,KAAK,SAAS,IAAI,GAAG;AAC3C,mBAAS;AAAA,QACX;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,oBAAoB,OAAyB,QAAwB;AAC3E,QAAI,QAAQ;AAGZ,cAAU,KAAK,iBAAiB,SAAS,KAAK;AAG9C,aAAS,MAAM,KAAK,WAAW;AAG/B,QAAI,MAAM,UAAU,kBAAkB;AACpC,eAAS;AAAA,IACX;AAEA,WAAO;AAAA,EACT;AACF;;;AC1eA,iBAAkB;;;AC8BX,IAAe,mBAAf,MAAgC;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYnB,YACE,QACA,OACA,eACA,SACA,eACA,YACA;AACA,SAAK,SAAS;AACd,SAAK,QAAQ;AACb,SAAK,gBAAgB;AACrB,SAAK,aAAa,SAAS,cAAc;AACzC,SAAK,cAAc,SAAS,eAAe;AAC3C,SAAK,gBAAgB;AACrB,SAAK,aAAa;AAClB,SAAK,cAAc,SAAS;AAAA,EAC9B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASU,IACR,OACA,YACG,MACG;AACN,UAAM,mBAAmB,IAAI,KAAK,aAAa,KAAK,OAAO;AAC3D,SAAK,OAAO,KAAK,EAAE,kBAAkB,GAAG,IAAI;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOU,WAAW,OAAiC;AACpD,QAAI,KAAK,YAAY;AACnB,WAAK,WAAW,MAAM,KAAK;AAAA,IAC7B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQU,iBAAiB,OAAmC;AAC5D,WAAO;AAAA,MACL,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,OAAO;AAAA,MACP,WAAW;AAAA,MACX,aAAa;AAAA,MACb,cAAc;AAAA,MACd,aAAa;AAAA,IACf;AAAA,EACF;AAeF;;;AC5GO,IAAe,mBAAf,cAAwC,iBAAiB;AAAA,EAC9D,YACE,QACA,OACA,eACA,SACA,eACA,YACA;AACA,UAAM,QAAQ,OAAO,eAAe,SAAS,eAAe,UAAU;AAAA,EACxE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAgB,YACd,QACA,cACA,YACA,OACkE;AAClE,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB;AAAA,IACF,CAAC;AAED,SAAK,WAAW,OAAO,KAAK;AAE5B,WAAO;AAAA,MACL,QAAQ,OAAO;AAAA,MACf,OAAO,OAAO;AAAA,IAChB;AAAA,EACF;AACF;;;AFtDO,IAAM,iBAAsC,aAAE;AAAA,EAAK,MACxD,aAAE,OAAO;AAAA,IACP,OAAO,aAAE,OAAO,EAAE,SAAS,0BAA0B;AAAA,IACrD,OAAO,aAAE,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,EAAE,SAAS,iCAAiC;AAAA,IACzE,QAAQ,aAAE,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,EAAE,SAAS,sBAAsB;AAAA,IAC/D,UAAU,aAAE,MAAM,cAAc,EAAE,SAAS,EAAE,SAAS,gBAAgB;AAAA,EACxE,CAAC;AACH;AAKO,IAAM,oBAAoB,aAAE,OAAO;AAAA,EACxC,SAAS,aAAE,MAAM,cAAc,EAAE,SAAS,uBAAuB;AACnE,CAAC;AA0BM,IAAM,eAAN,cAA2B,iBAAiB;AAAA,EAChC;AAAA,EACA;AAAA,EAEjB,YACE,QACA,OACA,SACA,eACA,aACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA,EAAE,GAAG,SAAS,YAAY;AAAA,MAC1B;AAAA,IACF;AACA,SAAK,oBAAoB,SAAS;AAClC,SAAK,iBAAiB,SAAS,kBAAkB;AAAA,EACnD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,QACJ,UAC6D;AAC7D,SAAK,IAAI,QAAQ,4BAA4B,SAAS,MAAM,SAAS;AAErE,QAAI,CAAC,SAAS,KAAK,GAAG;AACpB,WAAK,IAAI,SAAS,gDAAgD;AAClE,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI;AACF,YAAM,SAAS,MAAM,KAAK;AAAA,QACxB;AAAA,QACA,KAAK,kBAAkB;AAAA,QACvB,KAAK,gBAAgB,QAAQ;AAAA,QAC7B;AAAA,MACF;AAEA,YAAM,UAAU,KAAK,iBAAiB,OAAO,OAAO,OAAO;AAG3D,UAAI,CAAC,KAAK,gBAAgB;AACxB,aAAK,gBAAgB,OAAO;AAAA,MAC9B;AAEA,WAAK;AAAA,QACH;AAAA,QACA,yBAAyB,QAAQ,MAAM;AAAA,MACzC;AAEA,aAAO,EAAE,SAAS,OAAO,OAAO,MAAM;AAAA,IACxC,SAAS,OAAO;AAEd,UAAI,iBAAiB,oBAAoB;AACvC,aAAK,IAAI,SAAS,sBAAsB,MAAM,OAAO,EAAE;AACvD,cAAM;AAAA,MACR;AAEA,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,WAAK,IAAI,SAAS,sBAAsB,OAAO,EAAE;AACjD,YAAM,IAAI,cAAc,oCAAoC,OAAO,IAAI;AAAA,QACrE,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,gBAAgB,SAA2B;AACjD,QAAI,QAAQ,WAAW,GAAG;AACxB;AAAA,IACF;AAEA,UAAM,YAAY,IAAI,aAAa,KAAK,iBAAiB;AACzD,cAAU,gBAAgB,OAAO;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkDT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,UAA0B;AAClD,WAAO;AAAA;AAAA,EAET,QAAQ;AAAA,EACR;AAAA;AAAA;AAAA;AAAA,EAKQ,iBAAiB,SAAiC;AACxD,QAAI,QAAQ,WAAW,GAAG;AACxB,aAAO,CAAC;AAAA,IACV;AAGA,WAAO,KAAK,eAAe,SAAS,CAAC;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,eACN,SACA,eACY;AACZ,WAAO,QAAQ,IAAI,CAAC,UAAU;AAC5B,YAAM,kBAA4B;AAAA,QAChC,OAAO,MAAM,MAAM,KAAK;AAAA,QACxB,OAAO;AAAA,QACP,QAAQ,MAAM;AAAA,MAChB;AAEA,UAAI,MAAM,YAAY,MAAM,SAAS,SAAS,GAAG;AAC/C,wBAAgB,WAAW,KAAK;AAAA,UAC9B,MAAM;AAAA,UACN,gBAAgB;AAAA,QAClB;AAAA,MACF;AAEA,aAAO;AAAA,IACT,CAAC;AAAA,EACH;AACF;;;AGtPA,IAAAC,MAAoB;AACpB,IAAAC,QAAsB;AACtB,IAAAC,cAAkB;;;ACDlB,SAAoB;AACpB,WAAsB;AA8Bf,IAAe,qBAAf,cAA0C,iBAAiB;AAAA,EAC7C;AAAA,EAEnB,YACE,QACA,OACA,eACA,YACA,SACA,eACA,YACA;AACA,UAAM,QAAQ,OAAO,eAAe,SAAS,eAAe,UAAU;AACtE,SAAK,aAAa;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAgB,cACd,QACA,UAIA,OACkE;AAClE,UAAM,SAAS,MAAM,UAAU,WAAW;AAAA,MACxC;AAAA,MACA;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB;AAAA,IACF,CAAC;AAED,SAAK,WAAW,OAAO,KAAK;AAE5B,WAAO;AAAA,MACL,QAAQ,OAAO;AAAA,MACf,OAAO,OAAO;AAAA,IAChB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQU,kBAAkB,WAA2B;AACrD,UAAM,cAAiB,gBAAa,SAAS;AAC7C,WAAO,YAAY,SAAS,QAAQ;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASU,kBACR,WACA,WAAmB,aACL;AACd,UAAM,eAAoB,gBAAW,SAAS,IAC1C,YACK,aAAQ,KAAK,YAAY,SAAS;AAC3C,UAAM,cAAc,KAAK,kBAAkB,YAAY;AACvD,WAAO;AAAA,MACL,MAAM;AAAA,MACN,OAAO,QAAQ,QAAQ,WAAW,WAAW;AAAA,IAC/C;AAAA,EACF;AACF;;;ADxGO,IAAM,4BAA4B,cAAE,OAAO;AAAA,EAChD,QAAQ,cAAE,QAAQ,EAAE,SAAS,yCAAyC;AAAA,EACtE,aAAa,cACV,OAAO,EACP,SAAS,EACT,SAAS,qDAAqD;AAAA,EACjE,qBAAqB,cAClB,QAAQ,EACR,SAAS,0CAA0C;AACxD,CAAC;AA8BM,IAAM,qBAAN,cAAiC,mBAAmB;AAAA,EACxC;AAAA,EACA;AAAA,EAEjB,YACE,QACA,OACA,YACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,cAAc,IAAI,wBAA6B;AAAA,IACjD;AACA,SAAK,iBAAiB,SAAS,kBAAkB;AACjD,SAAK,kBAAkB,SAAS,mBAAmB;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,QAAQ,YAA4C;AACxD,SAAK,IAAI,QAAQ,gCAAgC,UAAU,QAAQ;AAEnE,QAAI,eAAe,GAAG;AACpB,WAAK,IAAI,QAAQ,oBAAoB;AACrC,aAAO;AAAA,IACT;AAGA,UAAM,gBAAgB,KAAK,IAAI,KAAK,gBAAgB,UAAU;AAC9D,SAAK,IAAI,QAAQ,kCAAkC,aAAa,EAAE;AAElE,UAAM,cAAc,MAAM,KAAK,iBAAiB,GAAG,aAAa;AAEhE,QAAI,YAAY,UAAU,YAAY,aAAa;AAEjD,UAAI,YAAY,uBAAuB,gBAAgB,YAAY;AACjE,aAAK,IAAI,QAAQ,8CAA8C;AAC/D,cAAM,kBAAkB,KAAK;AAAA,UAC3B,gBAAgB,KAAK;AAAA,UACrB;AAAA,QACF;AACA,cAAM,qBAAqB,MAAM,KAAK;AAAA,UACpC,gBAAgB;AAAA,UAChB;AAAA,QACF;AAEA,YAAI,mBAAmB,UAAU,mBAAmB,aAAa;AAC/D,gBAAM,SAAS,KAAK;AAAA,YAClB,YAAY;AAAA,YACZ,mBAAmB;AAAA,UACrB;AACA,eAAK,WAAY,WAAW,KAAK,MAAM;AACvC,eAAK;AAAA,YACH;AAAA,YACA,oCAAoC,OAAO,MAAM;AAAA,UACnD;AACA,iBAAO;AAAA,QACT;AAAA,MACF;AAEA,WAAK,WAAY,WAAW,KAAK,MAAM;AACvC,WAAK;AAAA,QACH;AAAA,QACA,6BAA6B,YAAY,YAAY,MAAM;AAAA,MAC7D;AACA,aAAO,YAAY;AAAA,IACrB;AAGA,QAAI,gBAAgB,YAAY;AAC9B,YAAM,mBAAmB,gBAAgB;AACzC,YAAM,iBAAiB,KAAK;AAAA,QAC1B,gBAAgB,KAAK;AAAA,QACrB;AAAA,MACF;AAEA,WAAK;AAAA,QACH;AAAA,QACA,iCAAiC,gBAAgB,IAAI,cAAc;AAAA,MACrE;AAEA,YAAM,eAAe,MAAM,KAAK;AAAA,QAC9B;AAAA,QACA;AAAA,MACF;AAEA,UAAI,aAAa,UAAU,aAAa,aAAa;AACnD,aAAK,WAAY,WAAW,KAAK,MAAM;AACvC,aAAK;AAAA,UACH;AAAA,UACA,8BAA8B,aAAa,YAAY,MAAM;AAAA,QAC/D;AACA,eAAO,aAAa;AAAA,MACtB;AAAA,IACF;AAEA,SAAK,WAAY,WAAW,KAAK,MAAM;AACvC,SAAK,IAAI,QAAQ,4BAA4B;AAC7C,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,iBACZ,WACA,SACoC;AACpC,SAAK,IAAI,QAAQ,yBAAyB,SAAS,IAAI,OAAO,EAAE;AAEhE,SAAK;AAAA,MACH;AAAA,MACA;AAAA,IACF;AACA,UAAM,gBAAgB,KAAK,eAAe,WAAW,OAAO;AAE5D,SAAK;AAAA,MACH;AAAA,MACA,gDAAgD,SAAS,IAAI,OAAO;AAAA,IACtE;AACA,UAAM,SAAS,MAAM,UAAU,WAAW;AAAA,MACxC,QAAQ;AAAA,MACR,UAAU;AAAA,QACR;AAAA,UACE,MAAM;AAAA,UACN,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM,KAAK,gBAAgB,WAAW,OAAO;AAAA,YAC/C;AAAA,YACA,GAAG;AAAA,UACL;AAAA,QACF;AAAA,MACF;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AACD,SAAK;AAAA,MACH;AAAA,MACA,oCAAoC,SAAS,IAAI,OAAO;AAAA,IAC1D;AAEA,SAAK,WAAW,OAAO,KAAK;AAE5B,WAAO,OAAO;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKQ,eACN,WACA,SACyC;AACzC,UAAM,gBAAyD,CAAC;AAEhE,aAAS,SAAS,WAAW,UAAU,SAAS,UAAU;AAExD,YAAM,YAAiB;AAAA,QACrB,KAAK;AAAA,QACL,cAAc,SAAS,CAAC;AAAA,MAC1B;AACA,YAAM,cAAiB,iBAAa,SAAS;AAC7C,YAAM,cAAc,YAAY,SAAS,QAAQ;AAEjD,oBAAc,KAAK;AAAA,QACjB,MAAM;AAAA,QACN,OAAO,yBAAyB,WAAW;AAAA,MAC7C,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,OAAe,cAA8B;AACjE,WAAO,GAAG,MAAM,KAAK,CAAC;AAAA,EAAK,aAAa,KAAK,CAAC;AAAA,EAChD;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,WAAmB,SAAyB;AACpE,UAAM,YAAY,UAAU,YAAY;AACxC,WAAO;AAAA;AAAA,iBAEM,SAAS,gCAAgC,SAAS,IAAI,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwC5E;AACF;;;AEhTA,IAAAC,cAAkB;AAqBlB,IAAM,sBAAsB,cAAE,OAAO;AAAA,EACnC,KAAK,cACF,OAAO,EACP,SAAS,EACT,SAAS,wEAA8D;AAC5E,CAAC;AAKD,IAAM,0BAA0B,cAAE,OAAO;AAAA,EACvC,OAAO,cAAE,OAAO,EAAE,IAAI,EAAE,SAAS,yCAAyC;AAAA,EAC1E,KAAK,cACF,OAAO,EACP,SAAS,EACT,SAAS,wEAA8D;AAC5E,CAAC;AAKD,IAAM,qBAAqB,cAAE,OAAO;AAAA,EAClC,SAAS,cAAE,MAAM,uBAAuB;AAC1C,CAAC;AAgBM,IAAM,gBAAN,cAA4B,iBAAiB;AAAA,EAClD,YACE,QACA,OACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA,SAAS,iBAAiB;AAAA,MAC1B;AAAA,MACA;AAAA,MACA,cAAc,IAAI,wBAA6B;AAAA,IACjD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,WACJ,UACA,WACA,eACoB;AACpB,UAAM,iBAAiB,iBAAiB,KAAK;AAC7C,UAAM,aAAa,kBAAkB;AACrC,UAAM,YACH,eAAwC,WACxC,eAAmC,MACpC;AACF,SAAK;AAAA,MACH;AAAA,MACA,gCAAgC,SAAS,MAAM,kBAAkB,aAAa,cAAc,EAAE,UAAU,SAAS;AAAA,IACnH;AAEA,QAAI,SAAS,WAAW,GAAG;AACzB,WAAK,IAAI,QAAQ,sBAAsB;AACvC,aAAO,CAAC;AAAA,IACV;AAEA,QAAI;AACF,UAAI,cAAc,GAAG;AAEnB,aAAK,IAAI,QAAQ,2CAA2C;AAC5D,cAAMC,WAAqB,CAAC;AAE5B,iBAAS,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;AACxC,gBAAM,WAAW,SAAS,CAAC;AAG3B,eAAK,IAAI,QAAQ,cAAc,IAAI,CAAC,MAAM,SAAS,MAAM,KAAK;AAE9D,gBAAM,SAAS,MAAM,UAAU,KAAK;AAAA,YAClC,QAAQ;AAAA,YACR,cAAc,KAAK,kBAAkB,QAAQ;AAAA,YAC7C,YAAY,KAAK,sBAAsB,QAAQ;AAAA,YAC/C,cAAc;AAAA,YACd,eAAe,KAAK;AAAA,YACpB,YAAY,KAAK;AAAA,YACjB,aAAa,KAAK;AAAA,YAClB,aAAa,KAAK;AAAA,YAClB,WAAW,KAAK;AAAA,YAChB,OAAO;AAAA,UACT,CAAC;AAED,eAAK,WAAW,OAAO,KAAK;AAE5B,gBAAM,WAAW,KAAK;AAAA,YACpB;AAAA,YACA,OAAO,OAAO;AAAA,UAChB;AACA,UAAAA,SAAQ,KAAK,EAAE,UAAU,KAAK,SAAS,CAAC;AAAA,QAC1C;AAGA,aAAK,WAAY,WAAW,KAAK,MAAM;AAEvC,aAAK;AAAA,UACH;AAAA,UACA,cAAcA,SAAQ,MAAM,qBAAqBA,SAAQ,OAAO,CAAC,MAAM,EAAE,GAAG,EAAE,MAAM;AAAA,QACtF;AAEA,eAAOA;AAAA,MACT;AAGA,YAAM,kBAAkB,SAAS,IAAI,CAAC,MAAM,WAAW,EAAE,OAAO,KAAK,EAAE;AAGvE,YAAM,eAAe,MAAM,eAAe;AAAA,QACxC;AAAA,QACA;AAAA,QACA,OAAO,UAAU,KAAK,mBAAmB,OAAO,cAAc;AAAA,MAChE;AAGA,mBAAa,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC7C,YAAM,UAAU,aAAa,IAAI,CAAC,MAAM,EAAE,OAAO;AAGjD,WAAK,WAAY,WAAW,KAAK,MAAM;AAEvC,WAAK;AAAA,QACH;AAAA,QACA,cAAc,QAAQ,MAAM,qBAAqB,QAAQ,OAAO,CAAC,MAAM,EAAE,GAAG,EAAE,MAAM;AAAA,MACtF;AAEA,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,WAAK,IAAI,SAAS,mBAAmB,OAAO,EAAE;AAC9C,YAAM,IAAI,kBAAkB,6BAA6B,OAAO,IAAI;AAAA,QAClE,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAc,mBACZ,UACA,OACqD;AACrD,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC,QAAQ;AAAA,MACR,cAAc,KAAK,kBAAkB;AAAA,MACrC,YAAY,KAAK,gBAAgB,QAAQ;AAAA,MACzC,cAAc;AAAA,MACd,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB,OAAO;AAAA,IACT,CAAC;AAGD,SAAK,WAAW,OAAO,KAAK;AAG5B,QAAI,OAAO,OAAO,QAAQ,WAAW,SAAS,QAAQ;AACpD,WAAK;AAAA,QACH;AAAA,QACA,gBAAgB,OAAO,OAAO,QAAQ,MAAM,gBAAgB,SAAS,MAAM;AAAA,MAE7E;AAAA,IACF;AAGA,UAAM,aAAa,IAAI,IAAI,SAAS,IAAI,CAAC,MAAM,CAAC,EAAE,OAAO,EAAE,IAAI,CAAC,CAAC;AAEjE,WAAO,OAAO,OAAO,QAAQ,IAAI,CAAC,eAAe;AAG/C,YAAM,kBAAkB,SAAS,WAAW,KAAK;AACjD,YAAM,gBAAgB,iBAAiB,SAAS,WAAW;AAC3D,YAAM,WAAW,WAAW,IAAI,aAAa,KAAK;AAClD,YAAM,WAAW,KAAK,uBAAuB,UAAU,WAAW,GAAG;AAErE,aAAO;AAAA,QACL,OAAO;AAAA,QACP,SAAS;AAAA,UACP;AAAA,UACA,KAAK;AAAA,QACP;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYQ,uBACN,UACA,cACoB;AACpB,QAAI,CAAC,aAAc,QAAO;AAE1B,QAAI,aAAa,SAAS,QAAQ,YAAY;AAE9C,QAAI,eAAe,IAAI;AAErB,YAAM,gBAAgB,SAAS,YAAY;AAC3C,YAAM,WAAW,aAAa,YAAY;AAC1C,mBAAa,cAAc,QAAQ,QAAQ;AAE3C,UAAI,eAAe,IAAI;AAErB,eAAO,SAAS,UAAU,YAAY,aAAa,aAAa,MAAM;AAAA,MACxE;AAEA,aAAO;AAAA,IACT;AAGA,WAAO,SAAS,UAAU,YAAY,aAAa,aAAa,MAAM;AAAA,EACxE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOU,kBAAkB,OAA2B,SAAiB;AACtE,UAAM,QACJ,SAAS,UACL,qLACA;AAEN,WAAO;AAAA;AAAA,EAET,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CL;AAAA;AAAA;AAAA;AAAA,EAKU,gBACR,UACQ;AACR,UAAM,cAAc,SACjB,IAAI,CAAC,MAAM,IAAI,EAAE,KAAK,KAAK,EAAE,IAAI,EAAE,EACnC,KAAK,IAAI;AAEZ,WAAO;AAAA;AAAA,EAET,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUX;AAAA;AAAA;AAAA;AAAA,EAKQ,sBAAsB,SAAyB;AACrD,WAAO;AAAA;AAAA,GAER,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBR;AACF;AAKO,IAAM,oBAAN,cAAgC,MAAM;AAAA,EAC3C,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AACF;;;ACnZO,IAAM,sBAAN,MAAM,6BAA4B,MAAM;AAAA,EAC7C,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,gBAAgB,OAAwB;AAC7C,WAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,EAC9D;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,UAAU,SAAiB,OAAqC;AACrE,WAAO,IAAI;AAAA,MACT,GAAG,OAAO,KAAK,qBAAoB,gBAAgB,KAAK,CAAC;AAAA,MACzD,EAAE,OAAO,MAAM;AAAA,IACjB;AAAA,EACF;AACF;;;ACbA,IAAAC,MAAoB;AACpB,IAAAC,QAAsB;AACtB,IAAAC,cAAkB;AAQX,IAAK,cAAL,kBAAKC,iBAAL;AAEL,EAAAA,aAAA,sBAAmB;AAEnB,EAAAA,aAAA,kBAAe;AAEf,EAAAA,aAAA,YAAS;AAET,EAAAA,aAAA,aAAU;AARA,SAAAA;AAAA,GAAA;AA4CL,IAAM,kBAAN,cAA8B,mBAAmB;AAAA;AAAA,EAErC,cAAc;AAAA,EACd,sBAAsB;AAAA,EACtB,iBAAiB;AAAA,EAElC,YACE,QACA,OACA,YACA,aAAqB,GACrB,eACA,YACA,aACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,EAAE,YAAY,YAAY;AAAA,MAC1B;AAAA,MACA,cAAc,IAAI,wBAA6B;AAAA,IACjD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,MAAM,YAGT;AACD,SAAK,IAAI,QAAQ,gCAAgC;AAGjD,UAAM,QAAQ,KAAK,aAAa,UAAU;AAC1C,QAAI,MAAM,WAAW,GAAG;AACtB,WAAK,IAAI,QAAQ,gBAAgB;AACjC,YAAM,aAAa,KAAK,iBAAiB,UAAU;AACnD,WAAK,WAAW,UAAU;AAC1B,aAAO;AAAA,QACL,cAAc,CAAC;AAAA,QACf,OAAO,CAAC,UAAU;AAAA,MACpB;AAAA,IACF;AAEA,UAAM,aAAa,KAAK,aAAa,KAAK;AAC1C,SAAK;AAAA,MACH;AAAA,MACA,SAAS,WAAW,MAAM,yBAAyB,MAAM,MAAM;AAAA,IACjE;AAGA,UAAM,eAA0C,CAAC;AACjD,UAAM,YAAkC,CAAC;AAEzC,aAAS,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK;AAC1C,YAAM,QAAQ,WAAW,CAAC;AAC1B,WAAK;AAAA,QACH;AAAA,QACA,oBAAoB,IAAI,CAAC,IAAI,WAAW,MAAM,KAAK,MAAM,QAAQ,MAAM;AAAA,MACzE;AAEA,YAAM,cAAc,MAAM,KAAK,aAAa,OAAO,OAAO,KAAK,KAAK;AACpE,aAAO,OAAO,cAAc,YAAY,YAAY;AACpD,gBAAU,KAAK,GAAG,YAAY,KAAK;AAAA,IACrC;AAGA,eAAW,SAAS,WAAW;AAC7B,WAAK,WAAW,KAAK;AAAA,IACvB;AAGA,SAAK,YAAY,YAAY;AAE7B,SAAK;AAAA,MACH;AAAA,MACA,cAAc,OAAO,KAAK,YAAY,EAAE,MAAM;AAAA,IAChD;AAEA,WAAO,EAAE,cAAc,OAAO,UAAU;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,YAA4C;AAC/D,UAAM,WAAW,OAAO,KAAK,WAAW,KAAK,EAC1C,IAAI,MAAM,EACV,OAAO,CAAC,MAAM,CAAC,OAAO,MAAM,CAAC,CAAC,EAC9B,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAEvB,WAAO,SAAS,IAAI,CAAC,QAAQ,WAAW,MAAM,OAAO,GAAG,CAAC,CAAC;AAAA,EAC5D;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,OAAuC;AAC1D,UAAM,SAA0B,CAAC;AACjC,QAAI,eAAqC;AAEzC,eAAW,QAAQ,OAAO;AACxB,YAAM,UAAU,KAAK,cAAc,KAAK,KAAK,OAAO,KAAK,KAAK,MAAM;AAEpE,UAAI,CAAC,gBAAgB,aAAa,YAAY,SAAS;AAErD,uBAAe,EAAE,SAAS,SAAS,CAAC,KAAK,OAAO,EAAE;AAClD,eAAO,KAAK,YAAY;AAAA,MAC1B,OAAO;AAEL,qBAAa,QAAQ,KAAK,KAAK,OAAO;AAAA,MACxC;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,OAAe,QAAwB;AAC3D,UAAM,eAAe,KAAK,MAAM,QAAQ,KAAK,cAAc;AAC3D,UAAM,gBAAgB,KAAK,MAAM,SAAS,KAAK,cAAc;AAC7D,WAAO,GAAG,YAAY,IAAI,aAAa;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,aACZ,OACA,OACA,OAIC;AACD,UAAM,EAAE,QAAQ,IAAI;AACpB,UAAM,YAAkC,CAAC;AAGzC,QAAI,QAAQ,UAAU,KAAK,aAAa;AACtC,WAAK;AAAA,QACH;AAAA,QACA,gBAAgB,QAAQ,MAAM;AAAA,MAChC;AACA,YAAM,SAAS,MAAM,KAAK,qBAAqB,OAAO,SAAS,KAAK;AACpE,gBAAU,KAAK,OAAO,KAAK;AAC3B,aAAO;AAAA,QACL,cAAc,KAAK,aAAa,OAAO,OAAO;AAAA,QAC9C,OAAO;AAAA,MACT;AAAA,IACF;AAGA,UAAM,eAAe,oBAAI,IAAY;AAErC,aAAS,UAAU,GAAG,WAAW,KAAK,qBAAqB,WAAW;AAEpE,YAAM,gBAAgB,KAAK;AAAA,QACzB;AAAA,QACA,KAAK;AAAA,QACL;AAAA,MACF;AAGA,iBAAW,KAAK,eAAe;AAC7B,qBAAa,IAAI,CAAC;AAAA,MACpB;AAEA,WAAK;AAAA,QACH;AAAA,QACA,WAAW,UAAU,CAAC,IAAI,KAAK,sBAAsB,CAAC,oBAAoB,cAAc,KAAK,IAAI,CAAC;AAAA,MACpG;AAGA,YAAM,SAAS,MAAM,KAAK;AAAA,QACxB;AAAA,QACA;AAAA,QACA;AAAA,MACF;AACA,gBAAU,KAAK,OAAO,KAAK;AAC3B,YAAM,UAAU,OAAO;AAGvB,YAAM,UAAU,KAAK,cAAc,OAAO;AAE1C,UAAI,QAAQ,YAAY,yBAAqB;AAE3C,aAAK;AAAA,UACH;AAAA,UACA,qBAAqB,QAAQ,OAAO,YAAY,QAAQ,MAAM,eAAe,QAAQ,SAAS;AAAA,QAChG;AACA,eAAO;AAAA,UACL,cAAc,KAAK,aAAa,SAAS,OAAO;AAAA,UAChD,OAAO;AAAA,QACT;AAAA,MACF;AAGA,WAAK;AAAA,QACH;AAAA,QACA,qCAAqC,UAAU,CAAC,IAAI,KAAK,sBAAsB,CAAC;AAAA,MAClF;AAAA,IACF;AAGA,UAAM,IAAI;AAAA,MACR,uCAAuC,KAAK,sBAAsB,CAAC,iCAAiC,QAAQ,MAAM;AAAA,IACpH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,oBACN,SACA,OACA,UAAuB,oBAAI,IAAI,GACrB;AAEV,UAAM,YAAY,QAAQ,OAAO,CAAC,MAAM,CAAC,QAAQ,IAAI,CAAC,CAAC;AAGvD,UAAM,OAAO,UAAU,UAAU,QAAQ,YAAY;AAGrD,UAAM,WAAW,CAAC,GAAG,IAAI;AACzB,aAAS,IAAI,SAAS,SAAS,GAAG,IAAI,GAAG,KAAK;AAC5C,YAAM,IAAI,KAAK,MAAM,KAAK,OAAO,KAAK,IAAI,EAAE;AAC5C,OAAC,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC;AAAA,IACxD;AAGA,WAAO,SAAS,MAAM,GAAG,KAAK,EAAE,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,qBACZ,OACA,SACA,OACiE;AACjE,SAAK,IAAI,QAAQ,cAAc,QAAQ,MAAM,2BAA2B;AAGxE,UAAM,gBAAyD,CAAC;AAEhE,eAAW,UAAU,SAAS;AAC5B,YAAM,OAAO,MAAM,SAAS,CAAC;AAC7B,YAAM,YAAiB,cAAQ,KAAK,YAAY,KAAK,MAAM,GAAG;AAC9D,YAAM,cAAiB,iBAAa,SAAS;AAC7C,YAAM,cAAc,YAAY,SAAS,QAAQ;AACjD,YAAM,WAAW,KAAK,MAAM,YAAY;AAExC,oBAAc,KAAK;AAAA,QACjB,MAAM;AAAA,QACN,OAAO,QAAQ,QAAQ,WAAW,WAAW;AAAA,MAC/C,CAAC;AAAA,IACH;AAGA,UAAM,SAAS,cAAE,OAAO;AAAA,MACtB,OAAO,cACJ;AAAA,QACC,cAAE,OAAO;AAAA,UACP,YAAY,cACT,OAAO,EACP,SAAS,2CAA2C;AAAA,UACvD,aAAa,cACV,OAAO,EACP,SAAS,EACT,SAAS,uCAAuC;AAAA,UACnD,WAAW,cACR,OAAO,EACP,SAAS,EACT;AAAA,YACC;AAAA,UACF;AAAA,QACJ,CAAC;AAAA,MACH,EACC,SAAS,uCAAuC;AAAA,IACrD,CAAC;AAED,QAAI;AACF,YAAM,SAAS,MAAM,UAAU,WAAW;AAAA,QACxC;AAAA,QACA,UAAU;AAAA,UACR;AAAA,YACE,MAAM;AAAA,YACN,SAAS;AAAA,cACP,EAAE,MAAM,QAAQ,MAAM,KAAK,gBAAgB,OAAO,EAAE;AAAA,cACpD,GAAG;AAAA,YACL;AAAA,UACF;AAAA,QACF;AAAA,QACA,cAAc;AAAA,QACd,eAAe,KAAK;AAAA,QACpB,YAAY,KAAK;AAAA,QACjB,aAAa;AAAA,QACb,aAAa,KAAK;AAAA,QAClB,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAGD,YAAM,UAAU,OAAO,OAAO,MAAM,IAAI,CAAC,OAAO;AAAA,QAC9C,WAAW,QAAQ,EAAE,UAAU;AAAA,QAC/B,aAAa,EAAE;AAAA,QACf,WAAW,EAAE;AAAA,MACf,EAAE;AAEF,aAAO,EAAE,SAAS,OAAO,OAAO,MAAM;AAAA,IACxC,SAAS,OAAO;AACd,WAAK,IAAI,SAAS,kCAAkC,KAAK;AACzD,YAAM,oBAAoB;AAAA,QACxB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,SAA0C;AAE9D,UAAM,eAAe,QAAQ,OAAO,CAAC,MAAM,EAAE,gBAAgB,IAAI;AAEjE,QAAI,aAAa,SAAS,GAAG;AAC3B,aAAO,EAAE,SAAS,yBAAqB,QAAQ,GAAG,WAAW,EAAE;AAAA,IACjE;AAGA,iBAAa,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,SAAS;AAGrD,UAAM,WAAW,aAAa,MAAM,CAAC,GAAG,MAAM;AAE5C,UAAI,EAAE,cAAc,QAAQ,EAAE,gBAAgB,EAAE,UAAW,QAAO;AAClE,UAAI,MAAM,EAAG,QAAO;AACpB,YAAM,OAAO,aAAa,IAAI,CAAC;AAC/B,YAAM,mBAAmB,EAAE,YAAY,KAAK;AAC5C,aAAO,EAAE,gBAAgB,KAAK,cAAe;AAAA,IAC/C,CAAC;AAED,QAAI,UAAU;AACZ,YAAM,cAAc,aAAa,CAAC;AAClC,YAAM,SAAS,YAAY,cAAe,YAAY;AACtD,aAAO,EAAE,SAAS,2CAA8B,QAAQ,WAAW,EAAE;AAAA,IACvE;AAKA,UAAM,gBAAgB,aAAa,MAAM,CAAC,GAAG,MAAM;AAEjD,UAAI,EAAE,cAAc,KAAM,QAAO;AACjC,UAAI,EAAE,cAAc,EAAE,cAAe,EAAG,QAAO;AAC/C,UAAI,MAAM,EAAG,QAAO;AAIpB,YAAM,OAAO,aAAa,IAAI,CAAC;AAC/B,YAAM,UAAU,EAAE,YAAY,KAAK;AACnC,YAAM,oBAAoB,UAAU;AACpC,YAAM,kBAAkB,EAAE,cAAe,KAAK;AAC9C,aAAO,oBAAoB;AAAA,IAC7B,CAAC;AAED,QAAI,eAAe;AACjB,YAAM,cAAc,aAAa,CAAC;AAClC,YAAM,SAAS,YAAY,cAAe,YAAY,YAAY;AAClE,aAAO,EAAE,SAAS,mCAA0B,QAAQ,WAAW,EAAE;AAAA,IACnE;AAGA,UAAM,UAAU,aAAa,IAAI,CAAC,MAAM,EAAE,cAAe,EAAE,SAAS;AACpE,UAAM,YAAY,KAAK;AAAA,MACrB,QAAQ,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,QAAQ;AAAA,IAC/C;AACA,UAAM,qBAAqB,QAAQ;AAAA,MACjC,CAAC,MAAM,KAAK,IAAI,IAAI,SAAS,KAAK;AAAA,IACpC;AAEA,QAAI,oBAAoB;AACtB,aAAO,EAAE,SAAS,uBAAoB,QAAQ,WAAW,WAAW,EAAE;AAAA,IACxE;AAEA,WAAO,EAAE,SAAS,yBAAqB,QAAQ,GAAG,WAAW,EAAE;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA,EAKQ,aACN,SACA,SAC2B;AAC3B,UAAM,SAAoC,CAAC;AAE3C,eAAW,aAAa,SAAS;AAC/B,cAAQ,QAAQ,SAAS;AAAA,QACvB,KAAK;AAAA,QACL,KAAK,uBAAoB;AACvB,gBAAM,SAAS,YAAY,QAAQ;AACnC,iBAAO,SAAS,IAAI;AAAA,YAClB,aAAa;AAAA,YACb,WAAW;AAAA,UACb;AACA;AAAA,QACF;AAAA,QAEA,KAAK,mCAA0B;AAC7B,gBAAM,QAAQ,YAAY,IAAI,QAAQ;AACtC,iBAAO,SAAS,IAAI;AAAA,YAClB,aAAa;AAAA,YACb,WAAW,QAAQ;AAAA,UACrB;AACA;AAAA,QACF;AAAA,QAEA;AACE,iBAAO,SAAS,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,MACvD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,SAAoD;AACvE,UAAM,SAAoC,CAAC;AAE3C,eAAW,UAAU,SAAS;AAC5B,UAAI,OAAO,gBAAgB,MAAM;AAC/B,eAAO,OAAO,SAAS,IAAI;AAAA,UACzB,aAAa,OAAO;AAAA,UACpB,WAAW,OAAO,aAAa,OAAO;AAAA,QACxC;AAAA,MACF,OAAO;AACL,eAAO,OAAO,SAAS,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,MAC5D;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,YAAY,cAA+C;AAMjE,SAAK,wBAAwB,YAAY;AACzC,SAAK,qBAAqB,YAAY;AACtC,SAAK,mBAAmB,YAAY;AACpC,SAAK,oBAAoB,YAAY;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYQ,wBACN,cACM;AACN,UAAM,WAAW,OAAO,KAAK,YAAY,EACtC,IAAI,MAAM,EACV,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAEvB,QAAI,SAAS,SAAS,EAAG;AAGzB,UAAM,sBAAsB,KAAK;AAAA,MAC/B;AAAA,MACA;AAAA,IACF;AAEA,QAAI,wBAAwB,QAAQ,uBAAuB,EAAG;AAE9D,UAAM,qBAAqB,SAAS,mBAAmB;AACvD,UAAM,oBAAoB,aAAa,kBAAkB,EAAE;AAI3D,QAAI,cAAc;AAClB,aAAS,IAAI,GAAG,IAAI,qBAAqB,KAAK;AAC5C,YAAM,UAAU,SAAS,CAAC;AAC1B,YAAM,SAAS,aAAa,OAAO,EAAE;AAErC,UAAI,WAAW,EAAG;AAGlB,YAAM,UAAU,qBAAqB;AAGrC,YAAM,gBAAgB,KAAK;AAAA,QACzB,aAAa,kBAAkB;AAAA,MACjC;AACA,YAAM,iBAAiB,gBACnB,oBAAoB,UAAU,IAC9B,oBAAoB;AAIxB,UAAI,SAAS,iBAAiB,IAAI;AAChC,aAAK;AAAA,UACH;AAAA,UACA,yBAAyB,OAAO,IAAI,MAAM,eAAe,cAAc;AAAA,QACzE;AACA,qBAAa,OAAO,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AACvD,sBAAc;AAAA,MAChB;AAAA,IACF;AAEA,QAAI,aAAa;AACf,WAAK,IAAI,QAAQ,qDAAqD;AAAA,IACxE;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWQ,wBACN,cACA,UACe;AACf,UAAM,sBAAsB;AAE5B,aACM,WAAW,GACf,YAAY,SAAS,SAAS,qBAC9B,YACA;AACA,UAAI,kBAAkB;AACtB,UAAI,oBAAmC;AAEvC,eAAS,IAAI,GAAG,IAAI,sBAAsB,GAAG,KAAK;AAChD,cAAM,cAAc,SAAS,WAAW,CAAC;AACzC,cAAM,cAAc,SAAS,WAAW,IAAI,CAAC;AAC7C,cAAM,YAAY,aAAa,WAAW;AAC1C,cAAM,YAAY,aAAa,WAAW;AAG1C,YAAI,UAAU,gBAAgB,KAAK,UAAU,gBAAgB,GAAG;AAC9D,4BAAkB;AAClB;AAAA,QACF;AAGA,cAAM,gBAAgB,UAAU,cAAc,UAAU;AACxD,cAAM,eAAe,cAAc;AAGnC,cAAM,gBAAgB,KAAK,mBAAmB,SAAS;AACvD,cAAM,0BAA0B,gBAAgB,IAAI;AACpD,cAAM,WAAW,eAAe;AAEhC,YAAI,sBAAsB,MAAM;AAC9B,8BAAoB;AAAA,QACtB;AAGA,YAAI,kBAAkB,UAAU;AAC9B,4BAAkB;AAClB;AAAA,QACF;AAAA,MACF;AAEA,UAAI,iBAAiB;AACnB,eAAO;AAAA,MACT;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,mBAAmB,OAA2B;AACpD,WACE,MAAM,cAAc,QACpB,MAAM,cAAc,MAAM,eAC1B,MAAM,cAAc,MAAM,cAAc;AAAA,EAE5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,qBAAqB,cAA+C;AAC1E,UAAM,WAAW,OAAO,KAAK,YAAY,EACtC,IAAI,MAAM,EACV,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAEvB,QAAI,SAAS,SAAS,EAAG;AAEzB,aAAS,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;AACxC,YAAM,cAAc,SAAS,IAAI,CAAC;AAClC,YAAM,cAAc,SAAS,CAAC;AAC9B,YAAM,aAAa,aAAa,WAAW,EAAE;AAC7C,YAAM,aAAa,aAAa,WAAW,EAAE;AAG7C,UAAI,eAAe,KAAK,eAAe,EAAG;AAG1C,UACE,aAAa,KACb,aAAa,cACb,aAAa,aAAa,GAC1B;AACA,aAAK;AAAA,UACH;AAAA,UACA,2BAA2B,WAAW,IAAI,UAAU,WAAW,WAAW,IAAI,UAAU;AAAA,QAC1F;AAGA,cAAM,gBAAgB,KAAK;AAAA,UACzB,aAAa,WAAW;AAAA,QAC1B;AAGA,iBAAS,IAAI,IAAI,GAAG,KAAK,GAAG,KAAK;AAC/B,gBAAM,UAAU,SAAS,CAAC;AAC1B,gBAAM,WAAW,cAAc;AAE/B,cAAI,eAAe;AAEjB,kBAAM,sBAAsB,aAAa,WAAW;AAEpD,gBAAI,sBAAsB,GAAG;AAC3B,2BAAa,OAAO,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,YACzD,OAAO;AACL,2BAAa,OAAO,IAAI;AAAA,gBACtB,aAAa;AAAA,gBACb,WAAW,sBAAsB;AAAA,cACnC;AAAA,YACF;AAAA,UACF,OAAO;AAEL,kBAAM,iBAAiB,aAAa;AAEpC,gBAAI,iBAAiB,GAAG;AACtB,2BAAa,OAAO,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,YACzD,OAAO;AACL,2BAAa,OAAO,IAAI;AAAA,gBACtB,aAAa;AAAA,gBACb,WAAW;AAAA,cACb;AAAA,YACF;AAAA,UACF;AACA,eAAK;AAAA,YACH;AAAA,YACA,oBAAoB,OAAO,OAAO,aAAa,OAAO,EAAE,WAAW;AAAA,UACrE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,mBAAmB,cAA+C;AACxE,eAAW,CAAC,YAAY,KAAK,KAAK,OAAO,QAAQ,YAAY,GAAG;AAC9D,UAAI,MAAM,cAAc,KAAK,MAAM,YAAY,GAAG;AAChD,aAAK,IAAI,QAAQ,6BAA6B,UAAU,OAAO;AAC/D,qBAAa,OAAO,UAAU,CAAC,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,MACpE;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,oBAAoB,cAA+C;AACzE,UAAM,WAAW,OAAO,KAAK,YAAY,EACtC,IAAI,MAAM,EACV,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAGvB,UAAM,cAAc,SAAS;AAAA,MAC3B,CAAC,MAAM,aAAa,CAAC,EAAE,gBAAgB;AAAA,IACzC;AACA,QAAI,YAAY,WAAW,EAAG;AAG9B,UAAM,kBAAkB,SACrB,OAAO,CAAC,MAAM,aAAa,CAAC,EAAE,cAAc,CAAC,EAC7C,IAAI,CAAC,OAAO;AAAA,MACX,SAAS;AAAA,MACT,QAAQ,aAAa,CAAC,EAAE;AAAA,MACxB,eAAe,KAAK,mBAAmB,aAAa,CAAC,CAAC;AAAA,IACxD,EAAE;AAEJ,QAAI,gBAAgB,SAAS,GAAG;AAC9B,WAAK,IAAI,QAAQ,0CAA0C;AAC3D;AAAA,IACF;AAGA,UAAM,mBAAmB,gBAAgB;AAAA,MACvC,CAAC,MAAM,EAAE;AAAA,IACX,EAAE;AACF,UAAM,gBAAgB,mBAAmB,gBAAgB,SAAS;AAElE,QAAI,eAAe;AAEjB,YAAM,UAAU,gBAAgB,IAAI,CAAC,MAAM,EAAE,SAAS,EAAE,UAAU,CAAC;AACnE,YAAM,YAAY,KAAK;AAAA,QACrB,QAAQ,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,QAAQ;AAAA,MAC/C;AAEA,WAAK;AAAA,QACH;AAAA,QACA,eAAe,YAAY,MAAM,4CAA4C,SAAS;AAAA,MACxF;AAEA,iBAAW,WAAW,aAAa;AACjC,cAAM,sBAAsB,UAAU,IAAI;AAE1C,YAAI,sBAAsB,GAAG;AAC3B,eAAK;AAAA,YACH;AAAA,YACA,4BAA4B,OAAO,cAAc,mBAAmB;AAAA,UACtE;AAEA;AAAA,QACF;AAEA,aAAK;AAAA,UACH;AAAA,UACA,gBAAgB,OAAO,UAAU,mBAAmB,IAAI,sBAAsB,CAAC;AAAA,QACjF;AACA,qBAAa,OAAO,IAAI;AAAA,UACtB,aAAa;AAAA,UACb,WAAW,sBAAsB;AAAA,QACnC;AAAA,MACF;AAAA,IACF,OAAO;AAEL,YAAM,UAAU,gBAAgB,IAAI,CAAC,MAAM,EAAE,SAAS,EAAE,OAAO;AAC/D,YAAM,YAAY,KAAK;AAAA,QACrB,QAAQ,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,QAAQ;AAAA,MAC/C;AAEA,WAAK;AAAA,QACH;AAAA,QACA,eAAe,YAAY,MAAM,sBAAsB,SAAS;AAAA,MAClE;AAEA,iBAAW,WAAW,aAAa;AACjC,cAAM,iBAAiB,UAAU;AAEjC,YAAI,iBAAiB,GAAG;AACtB,eAAK;AAAA,YACH;AAAA,YACA,4BAA4B,OAAO,cAAc,cAAc;AAAA,UACjE;AACA;AAAA,QACF;AAEA,aAAK,IAAI,QAAQ,gBAAgB,OAAO,UAAU,cAAc,EAAE;AAClE,qBAAa,OAAO,IAAI;AAAA,UACtB,aAAa;AAAA,UACb,WAAW;AAAA,QACb;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,SAA2B;AACnD,WAAO,kBAAkB,QAAQ,MAAM;AAAA,uBACpB,QAAQ,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMvC;AACF;;;ACh3BO,IAAe,gBAAf,cAGG,iBAAiB;AAAA;AAAA;AAAA;AAAA,EAIN;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYnB,YACE,QACA,OACA,eACA,SACA,eACA,YACA;AACA,UAAM,QAAQ,OAAO,eAAe,SAAS,eAAe,UAAU;AACtE,SAAK,gBAAgB;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,MAAgB,QACd,QACA,cACA,YACA,OACA,YACyD;AACzD,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB;AAAA,IACF,CAAC;AAGD,QAAI,YAAY;AACd,iBAAW,MAAM,OAAO,KAAK;AAAA,IAC/B,OAAO;AACL,WAAK,WAAW,OAAO,KAAK;AAAA,IAC9B;AAEA,WAAO;AAAA,MACL,QAAQ,OAAO;AAAA,MACf,OAAO,OAAO;AAAA,IAChB;AAAA,EACF;AACF;;;AC1GA,IAAAC,cAAkB;AAYX,IAAM,6BAA6B,cAAE,OAAO;AAAA,EACjD,SAAS,cAAE,QAAQ,EAAE,SAAS,2CAA2C;AAAA,EACzE,YAAY,cACT,OAAO,EACP,IAAI,CAAC,EACL,IAAI,CAAC,EACL,SAAS,kCAAkC;AAAA,EAC9C,aAAa,cACV,KAAK,CAAC,YAAY,SAAS,iBAAiB,SAAS,CAAC,EACtD,SAAS,0BAA0B;AAAA,EACtC,sBAAsB,cACnB,OAAO,EACP,SAAS,EACT,SAAS,wDAAwD;AAAA,EACpE,QAAQ,cAAE,OAAO,EAAE,SAAS,8BAA8B;AAC5D,CAAC;AAkCM,IAAM,sBAAN,cAAkC,cAGvC;AAAA,EACiB;AAAA,EAEjB,YACE,QACA,OACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,SAAK,sBAAsB,SAAS,uBAAuB;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,SAAS,UAAgD;AAC7D,SAAK,OAAO;AAAA,MACV,6CAA6C,SAAS,MAAM;AAAA,IAC9D;AAEA,QAAI,CAAC,SAAS,KAAK,GAAG;AACpB,WAAK,OAAO;AAAA,QACV;AAAA,MACF;AACA,aAAO;AAAA,QACL,SAAS;AAAA,QACT,YAAY;AAAA,QACZ,aAAa;AAAA,QACb,kBAAkB;AAAA,QAClB,QAAQ;AAAA,MACV;AAAA,IACF;AAEA,UAAM,EAAE,QAAQ,OAAO,IAAI,MAAM,KAAK;AAAA,MACpC;AAAA,MACA,KAAK,kBAAkB;AAAA,MACvB,KAAK,gBAAgB,QAAQ;AAAA,MAC7B;AAAA,MACA,KAAK;AAAA,IACP;AAEA,SAAK,OAAO;AAAA,MACV,yCAAyC,OAAO,OAAO,iBAAiB,OAAO,WAAW,gBAAgB,OAAO,UAAU;AAAA,IAC7H;AAGA,QAAI,mBAAkC;AACtC,QAAI,OAAO,WAAW,OAAO,cAAc,KAAK,qBAAqB;AACnE,UAAI,OAAO,gBAAgB,YAAY;AACrC,2BAAmB;AAAA,MACrB,WACE,OAAO,gBAAgB,WACvB,OAAO,sBACP;AACA,2BAAmB,OAAO;AAAA,MAC5B;AAAA,IACF;AAEA,WAAO;AAAA,MACL,SAAS,OAAO;AAAA,MAChB,YAAY,OAAO;AAAA,MACnB,aAAa,OAAO;AAAA,MACpB;AAAA,MACA,QAAQ,OAAO;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,QAAQ,QAAsC;AAC5C,WAAO,OAAO,WAAW,OAAO,cAAc,KAAK;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,iBAAiB,QAA4C;AAC3D,WAAO,OAAO;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsDT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,UAA0B;AAClD,WAAO;AAAA;AAAA,EAET,QAAQ;AAAA,EACR;AACF;;;ACvOA,IAAAC,cAAkB;AAOlB,IAAM,8BAA8B,cAAE,OAAO;AAAA,EAC3C,OAAO,cAAE,OAAO,EAAE,IAAI,EAAE,SAAS,yCAAyC;AAAA,EAC1E,SAAS,cAAE,QAAQ,EAAE,SAAS,uCAAuC;AAAA,EACrE,QAAQ,cACL,OAAO,EACP,SAAS,EACT,SAAS,6CAA6C;AAC3D,CAAC;AAKD,IAAM,+BAA+B,cAAE,OAAO;AAAA,EAC5C,SAAS,cAAE,MAAM,2BAA2B;AAC9C,CAAC;AAqCM,IAAM,mBAAN,cAA+B,cAGpC;AAAA,EACA,YACE,QACA,OACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,cACJ,UACA,eACA,WACoB;AACpB,SAAK,OAAO;AAAA,MACV,iCAAiC,SAAS,MAAM,6BAA6B,SAAS;AAAA,IACxF;AAEA,QAAI,SAAS,WAAW,cAAc,QAAQ;AAC5C,YAAM,IAAI;AAAA,QACR,kEAAkE,SAAS,MAAM,OAAO,cAAc,MAAM;AAAA,MAC9G;AAAA,IACF;AAEA,QAAI,SAAS,WAAW,GAAG;AACzB,WAAK,OAAO,KAAK,4CAA4C;AAC7D,aAAO,CAAC;AAAA,IACV;AAEA,QAAI,cAAc,GAAG;AAEnB,WAAK,OAAO;AAAA,QACV;AAAA,MACF;AACA,aAAO,IAAI,MAAM,SAAS,MAAM,EAAE,KAAK,IAAI;AAAA,IAC7C;AAEA,QAAI;AAEF,YAAM,eAAe,SAAS,IAAI,CAAC,SAAS,WAAW;AAAA,QACrD;AAAA,QACA;AAAA,QACA,cAAc,cAAc,KAAK;AAAA,MACnC,EAAE;AAGF,YAAM,eAAe,MAAM,eAAe;AAAA,QACxC;AAAA,QACA;AAAA,QACA,OAAO,UAAU,KAAK,sBAAsB,OAAO,KAAK,KAAK;AAAA,MAC/D;AAGA,mBAAa,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC7C,YAAM,UAAU,aAAa,IAAI,CAAC,MAAM,EAAE,OAAO;AAEjD,YAAM,aAAa,QAAQ,OAAO,CAAC,MAAM,CAAC,EAAE;AAC5C,WAAK,OAAO;AAAA,QACV,iCAAiC,UAAU,IAAI,QAAQ,MAAM;AAAA,MAC/D;AAGA,UAAI,KAAK,YAAY;AACnB,aAAK,WAAW,WAAW,KAAK,MAAM;AAAA,MACxC;AAEA,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,WAAK,OAAO,MAAM,yCAAyC,OAAO,EAAE;AACpE,YAAM,IAAI;AAAA,QACR,gCAAgC,OAAO;AAAA,QACvC,EAAE,OAAO,MAAM;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAc,sBACZ,OACA,OACqD;AACrD,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC,QAAQ;AAAA,MACR,cAAc,KAAK,kBAAkB;AAAA,MACrC,YAAY,KAAK,gBAAgB,KAAK;AAAA,MACtC,cAAc;AAAA,MACd,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAGD,QAAI,KAAK,YAAY;AACnB,WAAK,WAAW,MAAM,OAAO,KAAK;AAAA,IACpC;AAGA,WAAO,OAAO,OAAO,QAAQ,IAAI,CAAC,UAAU;AAAA,MAC1C,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK;AAAA,IAChB,EAAE;AAAA,EACJ;AAAA,EAEU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwET;AAAA,EAEU,gBACR,OACQ;AACR,UAAM,cAAc,MACjB;AAAA,MACC,CAAC,SACC,IAAI,KAAK,KAAK,gBAAgB,KAAK,YAAY,mBAAmB,KAAK,QAAQ,QAAQ,SAAY,IAAI,KAAK,QAAQ,GAAG,MAAM,MAAM;AAAA,IACvI,EACC,KAAK,IAAI;AAEZ,WAAO;AAAA;AAAA,EAET,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYX;AACF;AAKO,IAAM,yBAAN,cAAqC,MAAM;AAAA,EAChD,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AACF;;;AC3IO,IAAM,oBAAN,MAAwB;AAAA,EACZ;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACT,cAAc,IAAI,YAAY;AAAA,EAC9B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,cAAc;AAAA,EACL,kBAAkB,IAAI,wBAAwB;AAAA,EAE/D,YAAY,SAAmC;AAC7C,SAAK,SAAS,QAAQ;AACtB,SAAK,gBAAgB,QAAQ;AAC7B,SAAK,uBACH,QAAQ,wBAAwB,QAAQ;AAC1C,SAAK,oBAAoB,QAAQ,qBAAqB,QAAQ;AAC9D,SAAK,iBAAiB,QAAQ,kBAAkB,QAAQ;AACxD,SAAK,0BACH,QAAQ,2BAA2B,QAAQ;AAC7C,SAAK,qBACH,QAAQ,sBAAsB,QAAQ;AACxC,SAAK,uBAAuB,QAAQ;AACpC,SAAK,yBAAyB,QAAQ;AACtC,SAAK,4BAA4B,QAAQ;AACzC,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,sBAAsB,QAAQ,uBAAuB;AAC1D,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,eAAqB;AAC3B,QAAI,KAAK,aAAa,SAAS;AAC7B,YAAM,QAAQ,IAAI,MAAM,iCAAiC;AACzD,YAAM,OAAO;AACb,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,QACJ,YACA,UACA,YACgC;AAChC,SAAK,OAAO,KAAK,qDAAqD;AACtE,SAAK,OAAO,KAAK,kCAAkC,QAAQ;AAG3D,SAAK,gBAAgB,MAAM;AAG3B,SAAK,aAAa;AAElB,SAAK,qBAAqB,YAAY,UAAU;AAEhD,UAAM,kBAAkB,KAAK,IAAI;AACjC,UAAM,WAAW,KAAK,wBAAwB,UAAU;AACxD,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,SAAK,OAAO;AAAA,MACV,2CAA2C,aAAa;AAAA,IAC1D;AAGA,SAAK,aAAa;AAElB,UAAM,qBAAqB,KAAK,IAAI;AACpC,UAAM,eAAe,MAAM,KAAK,gBAAgB,UAAU;AAC1D,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,SAAK,OAAO;AAAA,MACV,+CAA+C,aAAa;AAAA,IAC9D;AAGA,SAAK,aAAa;AAElB,UAAM,eAAe,KAAK,IAAI;AAC9B,UAAM,aAAa,MAAM,KAAK,uBAAuB,YAAY,QAAQ;AACzE,UAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,SAAK,OAAO,KAAK,2CAA2C,OAAO,IAAI;AAGvE,SAAK,aAAa;AAElB,UAAM,qBAAqB,KAAK,IAAI;AACpC,UAAM,EAAE,QAAQ,QAAQ,UAAU,IAAI,MAAM,KAAK;AAAA,MAC/C;AAAA,MACA;AAAA,IACF;AACA,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,SAAK,OAAO;AAAA,MACV,gDAAgD,aAAa;AAAA,IAC/D;AAGA,SAAK,aAAa;AAElB,UAAM,oBAAoB,KAAK,IAAI;AACnC,UAAM,WAAW,MAAM,KAAK;AAAA,MAC1B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,UAAM,eAAe,KAAK,IAAI,IAAI;AAClC,SAAK,OAAO;AAAA,MACV,+CAA+C,YAAY;AAAA,IAC7D;AAEA,UAAM,oBAAoB,KAAK,IAAI;AACnC,UAAM,eAAe,KAAK;AAAA,MACxB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,UAAM,eAAe,KAAK,IAAI,IAAI;AAClC,SAAK,OAAO;AAAA,MACV,8CAA8C,YAAY;AAAA,IAC5D;AAEA,SAAK,OAAO,KAAK,mDAAmD;AAEpE,WAAO;AAAA,MACL,UAAU;AAAA,MACV,OAAO,KAAK,gBAAgB,UAAU;AAAA,IACxC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,qBACN,YACA,YACM;AACN,SAAK,OAAO,KAAK,gDAAgD;AAEjE,SAAK,OAAO,KAAK,mCAAmC;AACpD,SAAK,cAAc,IAAI,YAAY,KAAK,QAAQ,UAAU;AAE1D,SAAK,OAAO,KAAK,uCAAuC;AACxD,SAAK,kBAAkB,IAAI;AAAA,MACzB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,iCAAiC;AAClD,SAAK,YAAY,IAAI,UAAU,KAAK,QAAQ,KAAK,WAAW;AAE5D,SAAK,OAAO,KAAK,oCAAoC;AACrD,SAAK,eAAe,IAAI;AAAA,MACtB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,QACE,YAAY,KAAK;AAAA,MACnB;AAAA,MACA,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,2CAA2C;AAC5D,SAAK,sBAAsB,IAAI;AAAA,MAC7B,KAAK;AAAA,MACL,KAAK;AAAA,MACL,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,wCAAwC;AACzD,SAAK,mBAAmB,IAAI;AAAA,MAC1B,KAAK;AAAA,MACL,KAAK;AAAA,MACL,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,0CAA0C;AAC3D,SAAK,qBAAqB,IAAI;AAAA,MAC5B,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,qCAAqC;AACtD,SAAK,gBAAgB,IAAI;AAAA,MACvB,KAAK;AAAA,MACL,KAAK;AAAA,MACL,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,wCAAwC;AACzD,SAAK,mBAAmB,IAAI,iBAAiB,KAAK,QAAQ,KAAK,WAAW;AAE1E,SAAK,OAAO,KAAK,gDAAgD;AAAA,EACnE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,wBAAwB,YAAuC;AACrE,SAAK,OAAO,KAAK,wDAAwD;AAEzE,UAAM,QAAQ,WAAW,MAAM,IAAI,CAAC,SAAS,KAAK,IAAI;AACtD,UAAM,WAAW,KAAK,YAAY;AAAA,MAChC;AAAA,MACA,KAAK;AAAA,IACP;AAEA,SAAK,OAAO;AAAA,MACV,gCAAgC,SAAS,MAAM,eAAe,MAAM,MAAM;AAAA,IAC5E;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,gBACZ,YACoC;AACpC,SAAK,OAAO,KAAK,oDAAoD;AAErE,UAAM,SAAS,MAAM,KAAK,gBAAiB,MAAM,UAAU;AAE3D,UAAM,eAAe,OAAO;AAE5B,SAAK,OAAO;AAAA,MACV,+CAA+C,OAAO,KAAK,YAAY,EAAE,MAAM;AAAA,IACjF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAc,iBACZ,YACA,YAKC;AACD,SAAK,OAAO;AAAA,MACV;AAAA,IACF;AAEA,UAAM,CAAC,QAAQ,MAAM,IAAI,MAAM,QAAQ,IAAI;AAAA,MACzC,KAAK,cAAc,YAAY,UAAU;AAAA,MACzC,KAAK,cAAc,UAAU;AAAA,IAC/B,CAAC;AAED,UAAM,YAAY,KAAK,iBAAiB,UAAU;AAElD,SAAK,OAAO;AAAA,MACV,iCAAiC,OAAO,MAAM,YAAY,OAAO,MAAM,gBAAgB,UAAU,MAAM;AAAA,IACzG;AAEA,WAAO,EAAE,QAAQ,QAAQ,UAAU;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,iBAAiB,YAAkD;AACzE,UAAM,gBAAgB,WAAW,MAAM;AAAA,MACrC,CAAC,SAAS,KAAK,UAAU;AAAA,IAC3B;AACA,SAAK,OAAO;AAAA,MACV,kCAAkC,cAAc,MAAM;AAAA,IACxD;AAEA,UAAM,YAAiC,CAAC;AAExC,eAAW,QAAQ,eAAe;AAChC,UAAI,CAAC,KAAK,YAAY,YAAY,KAAK,IAAI,GAAG;AAC5C;AAAA,MACF;AAEA,YAAM,YAAY,KAAK,OAAO,CAAC,GAAG,WAAW;AAC7C,YAAM,aAAa,KAAK,YAAY,mBAAmB;AAEvD,gBAAU,KAAK;AAAA,QACb,IAAI;AAAA,QACJ,MAAM,KAAK,YAAY,UAAU,KAAK,IAAI;AAAA,QAC1C;AAAA,MACF,CAAC;AAAA,IACH;AAEA,SAAK,OAAO;AAAA,MACV,iCAAiC,UAAU,MAAM;AAAA,IACnD;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,0BACN,UACA,cACA,UACA,QACA,QACA,WACmB;AACnB,SAAK,OAAO,KAAK,qDAAqD;AAEtE,UAAM,eAAkC;AAAA,MACtC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,SAAK,OAAO;AAAA,MACV,+CAA+C,SAAS,MAAM,cAAc,OAAO,MAAM,YAAY,OAAO,MAAM,YAAY,UAAU,MAAM;AAAA,IAChJ;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAc,uBACZ,YACA,gBACqB;AACrB,SAAK,OAAO,KAAK,uCAAuC;AAExD,QAAI,WAA0B;AAG9B,QAAI;AACF,YAAM,UAAU,KAAK,UAAW,KAAK,UAAU;AAC/C,WAAK,OAAO;AAAA,QACV,6CAA6C,QAAQ,SAAS,IAAI,QAAQ,OAAO;AAAA,MACnF;AAGA,iBAAW,kBAAkB,QAAQ,QAAQ,UAAU,KAAK,WAAY;AACxE,WAAK,OAAO;AAAA,QACV,kDAAkD,SAAS,MAAM;AAAA,MACnE;AAGA,YAAM,aAAa,MAAM,KAAK,oBAAqB,SAAS,QAAQ;AACpE,UAAI,CAAC,KAAK,oBAAqB,QAAQ,UAAU,GAAG;AAClD,aAAK,OAAO;AAAA,UACV,8CAA8C,WAAW,MAAM;AAAA,QACjE;AACA,mBAAW;AAAA,MACb,OAAO;AACL,cAAM,gBACJ,KAAK,oBAAqB,iBAAiB,UAAU;AACvD,YAAI,eAAe;AACjB,cAAI,WAAW,gBAAgB,SAAS;AACtC,iBAAK,OAAO;AAAA,cACV,qEAAqE,cAAc,MAAM;AAAA,YAC3F;AAAA,UACF;AACA,qBAAW;AACX,eAAK,OAAO;AAAA,YACV,0DAA0D,WAAW,UAAU;AAAA,UACjF;AAAA,QACF,OAAO;AACL,qBAAW;AAAA,QACb;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,UAAI,iBAAiB,kBAAkB;AACrC,aAAK,OAAO;AAAA,UACV;AAAA,QACF;AAAA,MACF,OAAO;AACL,cAAM;AAAA,MACR;AAAA,IACF;AAGA,QAAI,CAAC,UAAU;AACb,WAAK,OAAO,KAAK,mDAAmD;AACpE,YAAM,aAAa,OAAO,KAAK,WAAW,KAAK,EAAE;AACjD,iBAAW,MAAM,KAAK,mBAAoB,QAAQ,UAAU;AAE5D,UAAI,CAAC,UAAU;AACb,cAAM,SACJ;AACF,aAAK,OAAO;AAAA,UACV,8CAA8C,MAAM;AAAA,QACtD;AACA,cAAM,IAAI;AAAA,UACR,gDAAgD,MAAM;AAAA,QACxD;AAAA,MACF;AAEA,WAAK,OAAO;AAAA,QACV,sDAAsD,SAAS,MAAM;AAAA,MACvE;AAAA,IACF;AAGA,UAAM,YAAY,MAAM,KAAK,aAAc,QAAQ,QAAQ;AAG3D,SAAK,gBAAgB,MAAM,UAAU,KAAK;AAE1C,QAAI,UAAU,QAAQ,WAAW,GAAG;AAClC,YAAM,SACJ;AACF,WAAK,OAAO,MAAM,8CAA8C,MAAM,EAAE;AACxE,YAAM,IAAI,iBAAiB,GAAG,MAAM,GAAG;AAAA,IACzC;AAEA,SAAK,OAAO;AAAA,MACV,iCAAiC,UAAU,QAAQ,MAAM;AAAA,IAC3D;AAEA,WAAO,UAAU;AAAA,EACnB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,MAAc,wBACZ,cACA,cAC+B;AAC/B,UAAM,kBAAwC,oBAAI,IAAI;AAGtD,UAAM,mBAID,CAAC;AAEN,aAAS,IAAI,GAAG,IAAI,aAAa,QAAQ,KAAK;AAC5C,YAAM,OAAO,aAAa,CAAC;AAC3B,UAAI,SAAS,QAAW;AACtB,yBAAiB,KAAK;AAAA,UACpB,eAAe;AAAA,UACf,eAAe,iBAAiB;AAAA,UAChC;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAEA,UAAM,oBAAoB,iBAAiB,IAAI,CAAC,SAAS,KAAK,IAAI;AAGlE,UAAM,iBACJ,kBAAkB,SAAS,IACvB,MAAM,KAAK,cAAe;AAAA,MACxB;AAAA,MACA,KAAK;AAAA,IACP,IACA,CAAC;AAGP,QAAI,wBAAwB;AAC5B,QAAI,sBAAsB;AAE1B,QAAI,eAAe,WAAW,iBAAiB,QAAQ;AACrD,WAAK,OAAO;AAAA,QACV,2DAA2D,YAAY,cACzD,iBAAiB,MAAM,SAAS,eAAe,MAAM;AAAA,MAErE;AAGA,YAAM,YAAY,oBAAI,IAAqB;AAC3C,iBAAW,UAAU,gBAAgB;AACnC,kBAAU,IAAI,OAAO,UAAU,MAAM;AAAA,MACvC;AAGA,YAAM,gBAAyC,CAAC;AAChD,iBAAW,QAAQ,kBAAkB;AACnC,YAAI,UAAU,IAAI,KAAK,IAAI,GAAG;AAC5B,wBAAc,KAAK,IAAI;AAAA,QACzB,OAAO;AACL,eAAK,OAAO;AAAA,YACV,gCAAgC,YAAY,qBAAqB,KAAK,aAAa,MAAM,KAAK,IAAI;AAAA,UACpG;AAAA,QACF;AAAA,MACF;AAGA,YAAM,oBAA+B,CAAC;AACtC,iBAAW,QAAQ,eAAe;AAChC,cAAM,UAAU,UAAU,IAAI,KAAK,IAAI;AACvC,YAAI,SAAS;AACX,4BAAkB,KAAK,OAAO;AAAA,QAChC;AAAA,MACF;AAGA,UAAI,kBAAkB,WAAW,cAAc,QAAQ;AACrD,cAAM,IAAI;AAAA,UACR,yEACe,kBAAkB,MAAM,iBAAiB,cAAc,MAAM;AAAA,QAC9E;AAAA,MACF;AAGA,8BAAwB;AACxB,4BAAsB;AAEtB,WAAK,OAAO;AAAA,QACV,8CAA8C,oBAAoB,MAAM,IAAI,YAAY;AAAA,MAC1F;AAAA,IACF;AAGA,aAAS,IAAI,GAAG,IAAI,oBAAoB,QAAQ,KAAK;AACnD,YAAM,gBAAgB,sBAAsB,CAAC,EAAE;AAC/C,sBAAgB,IAAI,eAAe,oBAAoB,CAAC,CAAC;AAAA,IAC3D;AAGA,QAAI,oBAAoB,SAAS,GAAG;AAClC,YAAM,yBAAyB,sBAAsB;AAAA,QACnD,CAAC,SAAS,KAAK;AAAA,MACjB;AACA,YAAM,oBAAoB,MAAM,KAAK,iBAAkB;AAAA,QACrD;AAAA,QACA;AAAA,QACA,KAAK;AAAA,MACP;AAGA,YAAM,gBAAgB,kBACnB,IAAI,CAAC,SAAS,UAAW,UAAU,KAAK,KAAM,EAC9C,OAAO,CAAC,UAAU,UAAU,EAAE;AAEjC,UAAI,cAAc,SAAS,GAAG;AAC5B,mBAAW,iBAAiB,eAAe;AACzC,gBAAM,cAAc,sBAAsB,aAAa;AACvD,gBAAM,eAAe,YAAY;AACjC,gBAAM,YAAY,oBAAoB,aAAa,EAAE;AACrD,gBAAM,gBAAgB,YAAY;AAClC,eAAK,OAAO;AAAA,YACV,+BAA+B,YAAY,aAAa,aAAa,OAAO,YAAY,mBAAmB,SAAS;AAAA,UACtH;AAAA,QACF;AAGA,YAAI,KAAK,qBAAqB;AAC5B,eAAK,OAAO;AAAA,YACV,iCAAiC,cAAc,MAAM,WAAW,YAAY;AAAA,UAC9E;AAGA,gBAAM,qBAAqB,cAAc;AAAA,YACvC,CAAC,kBAAkB,sBAAsB,aAAa,EAAE;AAAA,UAC1D;AAGA,gBAAM,wBAAwB,IAAI;AAAA,YAChC,KAAK;AAAA,YACL,KAAK;AAAA,YACL;AAAA,cACE,YAAY,KAAK;AAAA,cACjB,eAAe;AAAA,cACf,aAAa,KAAK;AAAA,YACpB;AAAA,YACA;AAAA;AAAA,YACA,KAAK;AAAA,UACP;AAGA,gBAAM,mBAAmB,MAAM,sBAAsB;AAAA,YACnD;AAAA,YACA;AAAA;AAAA,UACF;AAGA,mBAAS,IAAI,GAAG,IAAI,cAAc,QAAQ,KAAK;AAC7C,kBAAM,gBAAgB,cAAc,CAAC;AACrC,kBAAM,gBACJ,sBAAsB,aAAa,EAAE;AACvC,4BAAgB,IAAI,eAAe,iBAAiB,CAAC,CAAC;AAAA,UACxD;AAEA,eAAK,OAAO;AAAA,YACV,gCAAgC,iBAAiB,MAAM,IAAI,YAAY;AAAA,UACzE;AAAA,QACF,OAAO;AACL,eAAK,OAAO;AAAA,YACV,uBAAuB,cAAc,MAAM,IAAI,YAAY;AAAA,UAC7D;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,mBACN,UACoB;AACpB,QAAI,CAAC,WAAW,CAAC,GAAG;AAClB,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,SAAS,CAAC;AAC7B,QAAI,OAAO,eAAe,UAAU;AAClC,aAAO;AAAA,IACT;AAEA,QAAI,KAAK,eAAe,UAAU,YAAY;AAC5C,YAAM,WAAW,KAAK,YAAY,YAAY,WAAW,IAAI;AAC7D,aAAO,UAAU;AAAA,IACnB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,cACZ,YACA,YAC2B;AAC3B,SAAK,OAAO;AAAA,MACV,kCAAkC,WAAW,SAAS,MAAM;AAAA,IAC9D;AAEA,UAAM,SAA2B,CAAC;AAClC,UAAM,eAA0C,CAAC;AAGjD,eAAW,WAAW,WAAW,UAAU;AACzC,YAAM,YAAY,QAAQ,OAAO,CAAC,GAAG,WAAW;AAChD,YAAM,UACJ,KAAK,aAAa,gBAAgB,KAAK,OAAO,OAAO,SAAS,CAAC;AAEjE,YAAM,cAAc,KAAK,mBAAmB,QAAQ,QAAQ;AAC5D,mBAAa,KAAK,WAAW;AAE7B,aAAO,KAAK;AAAA,QACV,IAAI;AAAA,QACJ,MAAM,GAAG,UAAU,iBAAiB,OAAO,MAAM;AAAA,QACjD;AAAA;AAAA,MAEF,CAAC;AAAA,IACH;AAGA,UAAM,kBAAkB,MAAM,KAAK;AAAA,MACjC;AAAA,MACA;AAAA,IACF;AAGA,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,UAAI,gBAAgB,IAAI,CAAC,GAAG;AAC1B,eAAO,CAAC,EAAE,UAAU,gBAAgB,IAAI,CAAC;AAAA,MAC3C;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,cACZ,YAC2B;AAC3B,SAAK,OAAO;AAAA,MACV,kCAAkC,WAAW,OAAO,MAAM;AAAA,IAC5D;AAEA,UAAM,SAA2B,CAAC;AAClC,UAAM,eAA0C,CAAC;AAGjD,eAAW,SAAS,WAAW,QAAQ;AACrC,YAAM,YAAY,MAAM,OAAO,CAAC,GAAG,WAAW;AAC9C,YAAM,UACJ,KAAK,aAAa,gBAAgB,KAAK,OAAO,OAAO,SAAS,CAAC;AAGjE,YAAM,OAA+B,MAAM,KAAK,KAAK;AAAA,QAAI,CAAC,QACxD,IAAI,IAAI,CAAC,UAAU;AAAA,UACjB,MAAM,KAAK;AAAA,UACX,SAAS,KAAK,YAAY;AAAA,UAC1B,SAAS,KAAK,YAAY;AAAA,UAC1B,UAAU,KAAK,iBAAiB,KAAK,cAAc;AAAA,QACrD,EAAE;AAAA,MACJ;AAEA,YAAM,cAAc,KAAK,mBAAmB,MAAM,QAAQ;AAC1D,mBAAa,KAAK,WAAW;AAE7B,aAAO,KAAK;AAAA,QACV,IAAI;AAAA,QACJ;AAAA,QACA,SAAS,KAAK;AAAA,QACd,SAAS,KAAK,CAAC,GAAG,UAAU;AAAA,QAC5B;AAAA;AAAA,MAEF,CAAC;AAAA,IACH;AAGA,UAAM,kBAAkB,MAAM,KAAK;AAAA,MACjC;AAAA,MACA;AAAA,IACF;AAGA,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,UAAI,gBAAgB,IAAI,CAAC,GAAG;AAC1B,eAAO,CAAC,EAAE,UAAU,gBAAgB,IAAI,CAAC;AAAA,MAC3C;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,gBACZ,YACA,YACA,cACA,QACA,QACA,WACoB;AACpB,SAAK,OAAO,KAAK,4CAA4C;AAG7D,QAAI,WAAW,WAAW,GAAG;AAC3B,YAAM,SAAS;AACf,WAAK,OAAO,MAAM,uBAAuB,MAAM,EAAE;AACjD,YAAM,IAAI,iBAAiB,MAAM;AAAA,IACnC;AAGA,UAAM,WAAW,KAAK,iBAAkB;AAAA,MACtC;AAAA,MACA,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,SAAK,OAAO;AAAA,MACV,iCAAiC,SAAS,MAAM;AAAA,IAClD;AAEA,WAAO;AAAA,EACT;AACF;","names":["path","fs","path","import_zod","import_zod","results","fs","path","import_zod","PagePattern","import_zod","import_zod"]}
|
|
1
|
+
{"version":3,"sources":["../src/index.ts","../../shared/src/utils/batch-processor.ts","../../shared/src/utils/spawn-utils.ts","../../shared/src/utils/llm-caller.ts","../../shared/src/utils/llm-token-usage-aggregator.ts","../src/utils/ref-resolver.ts","../src/utils/id-generator.ts","../src/utils/text-cleaner.ts","../src/utils/markdown-converter.ts","../src/converters/chapter-converter.ts","../src/extractors/toc-extract-error.ts","../src/extractors/toc-validator.ts","../src/extractors/toc-finder.ts","../src/extractors/toc-extractor.ts","../src/core/base-llm-component.ts","../src/core/text-llm-component.ts","../src/extractors/vision-toc-extractor.ts","../src/core/vision-llm-component.ts","../src/parsers/caption-parser.ts","../src/parsers/page-range-parse-error.ts","../src/parsers/page-range-parser.ts","../src/validators/base-validator.ts","../src/validators/toc-content-validator.ts","../src/validators/caption-validator.ts","../src/document-processor.ts"],"sourcesContent":["/**\n * @heripo/document-processor\n *\n * Document preprocessing package that converts DoclingDocument to ProcessedDocument.\n *\n * ## Key Features\n *\n * - TOC extraction and structuring (LLM-based)\n * - Page range mapping (Vision LLM)\n * - Text cleaning and sentence merging (lightweight LLM)\n * - Caption parsing (lightweight LLM)\n * - Chapter tree construction\n * - Image/table conversion\n *\n * @packageDocumentation\n */\n\nexport { DocumentProcessor } from './document-processor';\nexport { BaseLLMComponent, TextLLMComponent, VisionLLMComponent } from './core';\nexport type {\n BaseLLMComponentOptions,\n VisionLLMComponentOptions,\n ImageContent,\n} from './core';\nexport type { DocumentProcessorOptions } from './document-processor';\nexport type { TocEntry, TocAreaResult, PageSizeGroup } from './types';\nexport {\n CaptionParser,\n CaptionParseError,\n PageRangeParser,\n PagePattern,\n PageRangeParseError,\n} from './parsers';\nexport type { CaptionParserOptions } from './parsers';\nexport {\n TocFinder,\n TocExtractor,\n TocExtractError,\n TocNotFoundError,\n TocParseError,\n TOC_KEYWORDS,\n CONTINUATION_MARKERS,\n PAGE_NUMBER_PATTERN,\n TocEntrySchema,\n TocResponseSchema,\n VisionTocExtractor,\n VisionTocExtractionSchema,\n} from './extractors';\nexport type {\n TocFinderOptions,\n TocExtractorOptions,\n TocResponse,\n VisionTocExtractorOptions,\n VisionTocExtractionResult,\n} from './extractors';\nexport {\n BaseValidator,\n TocContentValidator,\n TocContentValidationSchema,\n CaptionValidator,\n CaptionValidationError,\n} from './validators';\nexport type {\n BaseValidatorOptions,\n TocContentValidatorOptions,\n TocContentValidationResult,\n CaptionValidatorOptions,\n} from './validators';\nexport { ChapterConverter } from './converters';\n","/**\n * BatchProcessor - Batch processing utility\n *\n * Provides functionality to split large arrays into batches for parallel processing.\n */\nexport class BatchProcessor {\n /**\n * Splits an array into batches of specified size.\n *\n * @param items - Array to split\n * @param batchSize - Size of each batch\n * @returns Array of batches\n *\n * @example\n * ```typescript\n * const items = [1, 2, 3, 4, 5];\n * const batches = BatchProcessor.createBatches(items, 2);\n * // [[1, 2], [3, 4], [5]]\n * ```\n */\n static createBatches<T>(items: T[], batchSize: number): T[][] {\n const batches: T[][] = [];\n for (let i = 0; i < items.length; i += batchSize) {\n batches.push(items.slice(i, i + batchSize));\n }\n return batches;\n }\n\n /**\n * Splits an array into batches and executes async function in parallel.\n *\n * @param items - Array to process\n * @param batchSize - Size of each batch\n * @param processFn - Async function to process each batch\n * @returns Flattened array of processed results\n *\n * @example\n * ```typescript\n * const texts = ['a', 'b', 'c', 'd', 'e'];\n * const results = await BatchProcessor.processBatch(\n * texts,\n * 2,\n * async (batch) => {\n * return batch.map(t => t.toUpperCase());\n * }\n * );\n * // ['A', 'B', 'C', 'D', 'E']\n * ```\n */\n static async processBatch<T, R>(\n items: T[],\n batchSize: number,\n processFn: (batch: T[]) => Promise<R[]>,\n ): Promise<R[]> {\n const batches = this.createBatches(items, batchSize);\n const results = await Promise.all(batches.map((batch) => processFn(batch)));\n return results.flat();\n }\n\n /**\n * Splits an array into batches and executes sync function in parallel.\n *\n * @param items - Array to process\n * @param batchSize - Size of each batch\n * @param processFn - Sync function to process each batch\n * @returns Flattened array of processed results\n *\n * @example\n * ```typescript\n * const numbers = [1, 2, 3, 4, 5];\n * const results = BatchProcessor.processBatchSync(\n * numbers,\n * 2,\n * (batch) => batch.map(n => n * 2)\n * );\n * // [2, 4, 6, 8, 10]\n * ```\n */\n static processBatchSync<T, R>(\n items: T[],\n batchSize: number,\n processFn: (batch: T[]) => R[],\n ): R[] {\n const batches = this.createBatches(items, batchSize);\n const results = batches.map((batch) => processFn(batch));\n return results.flat();\n }\n}\n","import type { SpawnOptions } from 'node:child_process';\n\nimport { spawn } from 'node:child_process';\n\n/**\n * Result of a spawn operation\n */\nexport interface SpawnResult {\n stdout: string;\n stderr: string;\n code: number;\n}\n\n/**\n * Extended spawn options with output capture control\n */\nexport interface SpawnAsyncOptions extends SpawnOptions {\n /**\n * Whether to capture stdout (default: true)\n */\n captureStdout?: boolean;\n\n /**\n * Whether to capture stderr (default: true)\n */\n captureStderr?: boolean;\n}\n\n/**\n * Execute a command asynchronously and return the result\n *\n * Eliminates the repetitive Promise wrapper pattern used throughout\n * DoclingEnvironment for spawn operations.\n *\n * @param command - The command to execute\n * @param args - Arguments to pass to the command\n * @param options - Spawn options with optional output capture control\n * @returns Promise resolving to stdout, stderr, and exit code\n *\n * @example\n * ```typescript\n * // Simple usage\n * const result = await spawnAsync('python3', ['--version']);\n * console.log(result.stdout); // \"Python 3.12.0\"\n *\n * // With options\n * const result = await spawnAsync('pip', ['install', 'package'], {\n * cwd: '/path/to/venv',\n * captureStderr: true,\n * });\n * ```\n */\nexport function spawnAsync(\n command: string,\n args: string[],\n options: SpawnAsyncOptions = {},\n): Promise<SpawnResult> {\n const {\n captureStdout = true,\n captureStderr = true,\n ...spawnOptions\n } = options;\n\n return new Promise((resolve, reject) => {\n const proc = spawn(command, args, spawnOptions);\n\n let stdout = '';\n let stderr = '';\n\n if (captureStdout && proc.stdout) {\n proc.stdout.on('data', (data) => {\n stdout += data.toString();\n });\n }\n\n if (captureStderr && proc.stderr) {\n proc.stderr.on('data', (data) => {\n stderr += data.toString();\n });\n }\n\n proc.on('close', (code) => {\n resolve({ stdout, stderr, code: code ?? 0 });\n });\n\n proc.on('error', reject);\n });\n}\n","import type { z } from 'zod';\n\nimport { type LanguageModel, Output, generateText } from 'ai';\n\n/**\n * Configuration for LLM API call with retry and fallback support\n */\nexport interface LLMCallConfig<TSchema extends z.ZodType> {\n /**\n * Zod schema for response validation\n */\n schema: TSchema;\n\n /**\n * System prompt for LLM\n */\n systemPrompt: string;\n\n /**\n * User prompt for LLM\n */\n userPrompt: string;\n\n /**\n * Primary model for the call (required)\n */\n primaryModel: LanguageModel;\n\n /**\n * Fallback model for retry after primary model exhausts maxRetries (optional)\n */\n fallbackModel?: LanguageModel;\n\n /**\n * Maximum retry count per model (default: 3)\n */\n maxRetries: number;\n\n /**\n * Temperature for generation (optional, 0-1)\n */\n temperature?: number;\n\n /**\n * Abort signal for cancellation support\n */\n abortSignal?: AbortSignal;\n\n /**\n * Component name for tracking (e.g., 'TocExtractor', 'PageRangeParser')\n */\n component: string;\n\n /**\n * Phase name for tracking (e.g., 'extraction', 'validation', 'sampling')\n */\n phase: string;\n}\n\n/**\n * Configuration for LLM vision call with message format\n */\nexport interface LLMVisionCallConfig<TSchema extends z.ZodType> {\n /**\n * Zod schema for response validation\n */\n schema: TSchema;\n\n /**\n * Messages array for vision LLM (instead of systemPrompt/userPrompt)\n */\n messages: Array<{ role: 'user' | 'assistant'; content: any[] | string }>;\n\n /**\n * Primary model for the call (required)\n */\n primaryModel: LanguageModel;\n\n /**\n * Fallback model for retry after primary model exhausts maxRetries (optional)\n */\n fallbackModel?: LanguageModel;\n\n /**\n * Maximum retry count per model (default: 3)\n */\n maxRetries: number;\n\n /**\n * Temperature for generation (optional, 0-1)\n */\n temperature?: number;\n\n /**\n * Abort signal for cancellation support\n */\n abortSignal?: AbortSignal;\n\n /**\n * Component name for tracking (e.g., 'TocExtractor', 'PageRangeParser')\n */\n component: string;\n\n /**\n * Phase name for tracking (e.g., 'extraction', 'validation', 'sampling')\n */\n phase: string;\n}\n\n/**\n * Token usage information with model tracking\n */\nexport interface ExtendedTokenUsage {\n component: string;\n phase: string;\n model: 'primary' | 'fallback';\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n}\n\n/**\n * Result of LLM call including usage information\n */\nexport interface LLMCallResult<T> {\n output: T;\n usage: ExtendedTokenUsage;\n usedFallback: boolean;\n}\n\n/**\n * Base execution configuration for LLM calls\n */\ninterface ExecutionConfig {\n primaryModel: LanguageModel;\n fallbackModel?: LanguageModel;\n abortSignal?: AbortSignal;\n component: string;\n phase: string;\n}\n\n/**\n * LLMCaller - Centralized LLM API caller with retry and fallback support\n *\n * Wraps AI SDK's generateText with enhanced retry strategy:\n * 1. Try primary model with maxRetries\n * 2. If all attempts fail and fallbackModel provided, try fallback with maxRetries\n * 3. Return usage data with model type indicator\n *\n * @example\n * ```typescript\n * const result = await LLMCaller.call({\n * schema: MyZodSchema,\n * systemPrompt: 'You are a helpful assistant',\n * userPrompt: 'Extract the TOC from this markdown',\n * primaryModel: openai('gpt-5'),\n * fallbackModel: anthropic('claude-opus-4-5'),\n * maxRetries: 3,\n * component: 'TocExtractor',\n * phase: 'extraction',\n * });\n *\n * console.log(result.output); // Parsed result\n * console.log(result.usage); // Token usage with model info\n * console.log(result.usedFallback); // Whether fallback was used\n * ```\n */\nexport class LLMCaller {\n /**\n * Extract model name from LanguageModel object\n *\n * Attempts to get model ID from various possible fields in the LanguageModel object.\n */\n private static extractModelName(model: LanguageModel): string {\n const modelObj = model as Record<string, unknown>;\n\n // Try common field names\n if (typeof modelObj.modelId === 'string') return modelObj.modelId;\n if (typeof modelObj.id === 'string') return modelObj.id;\n if (typeof modelObj.model === 'string') return modelObj.model;\n if (typeof modelObj.name === 'string') return modelObj.name;\n\n // Fallback: return object representation\n return String(model);\n }\n\n /**\n * Build usage information from response\n */\n private static buildUsage(\n config: ExecutionConfig,\n modelName: string,\n response: {\n usage?: {\n inputTokens?: number;\n outputTokens?: number;\n totalTokens?: number;\n };\n },\n usedFallback: boolean,\n ): ExtendedTokenUsage {\n return {\n component: config.component,\n phase: config.phase,\n model: usedFallback ? 'fallback' : 'primary',\n modelName,\n inputTokens: response.usage?.inputTokens ?? 0,\n outputTokens: response.usage?.outputTokens ?? 0,\n totalTokens: response.usage?.totalTokens ?? 0,\n };\n }\n\n /**\n * Execute LLM call with fallback support\n *\n * Common execution logic for both text and vision calls.\n */\n private static async executeWithFallback<TOutput>(\n config: ExecutionConfig,\n generateFn: (model: LanguageModel) => Promise<{\n output: TOutput;\n usage?: {\n inputTokens?: number;\n outputTokens?: number;\n totalTokens?: number;\n };\n }>,\n ): Promise<LLMCallResult<TOutput>> {\n const primaryModelName = this.extractModelName(config.primaryModel);\n\n // Attempt 1: Try primary model\n try {\n const response = await generateFn(config.primaryModel);\n\n return {\n output: response.output,\n usage: this.buildUsage(config, primaryModelName, response, false),\n usedFallback: false,\n };\n } catch (primaryError) {\n // If aborted, don't try fallback - re-throw immediately\n if (config.abortSignal?.aborted) {\n throw primaryError;\n }\n\n // If no fallback model, throw immediately\n if (!config.fallbackModel) {\n throw primaryError;\n }\n\n // Attempt 2: Try fallback model\n const fallbackModelName = this.extractModelName(config.fallbackModel);\n const response = await generateFn(config.fallbackModel);\n\n return {\n output: response.output,\n usage: this.buildUsage(config, fallbackModelName, response, true),\n usedFallback: true,\n };\n }\n }\n\n /**\n * Call LLM with retry and fallback support\n *\n * Retry Strategy:\n * 1. Try primary model up to maxRetries times\n * 2. If all fail and fallbackModel provided, try fallback up to maxRetries times\n * 3. Throw error if all attempts exhausted\n *\n * @template TOutput - Output type from schema validation\n * @param config - LLM call configuration\n * @returns Result with parsed object and usage information\n * @throws Error if all retry attempts fail\n */\n static async call<TOutput = unknown>(\n config: LLMCallConfig<z.ZodType<TOutput>>,\n ): Promise<LLMCallResult<TOutput>> {\n return this.executeWithFallback(config, (model) =>\n generateText({\n model,\n output: Output.object({\n schema: config.schema,\n }),\n system: config.systemPrompt,\n prompt: config.userPrompt,\n temperature: config.temperature,\n maxRetries: config.maxRetries,\n abortSignal: config.abortSignal,\n }),\n );\n }\n\n /**\n * Call LLM for vision tasks with message format support\n *\n * Same retry and fallback logic as call(), but using message format instead of system/user prompts.\n *\n * @template TOutput - Output type from schema validation\n * @param config - LLM vision call configuration\n * @returns Result with parsed object and usage information\n * @throws Error if all retry attempts fail\n */\n static async callVision<TOutput = unknown>(\n config: LLMVisionCallConfig<z.ZodType<TOutput>>,\n ): Promise<LLMCallResult<TOutput>> {\n return this.executeWithFallback(config, (model) =>\n generateText({\n model,\n output: Output.object({\n schema: config.schema,\n }),\n messages: config.messages,\n temperature: config.temperature,\n maxRetries: config.maxRetries,\n abortSignal: config.abortSignal,\n }),\n );\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\n\nimport type { ExtendedTokenUsage } from './llm-caller';\n\n/**\n * Token usage totals\n */\nexport interface TokenUsage {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n}\n\n/**\n * Format token usage as a human-readable string\n *\n * @param usage - Token usage object with input, output, and total counts\n * @returns Formatted string like \"1500 input, 300 output, 1800 total\"\n */\nfunction formatTokens(usage: TokenUsage): string {\n return `${usage.inputTokens} input, ${usage.outputTokens} output, ${usage.totalTokens} total`;\n}\n\n/**\n * Aggregated token usage for a specific component\n */\ninterface ComponentAggregate {\n component: string;\n phases: Record<\n string,\n {\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }\n >;\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n}\n\n/**\n * LLMTokenUsageAggregator - Aggregates token usage across all LLM calls\n *\n * Unlike LLMTokenUsageTracker which logs immediately after each component,\n * this aggregator collects usage data from all components and logs a comprehensive\n * summary at the end of document processing.\n *\n * Tracks usage by:\n * - Component (TocExtractor, PageRangeParser, etc.)\n * - Phase (extraction, validation, sampling, etc.)\n * - Model (primary vs fallback)\n *\n * @example\n * ```typescript\n * const aggregator = new LLMTokenUsageAggregator();\n *\n * // Track usage from each LLM call\n * aggregator.track({\n * component: 'TocExtractor',\n * phase: 'extraction',\n * model: 'primary',\n * modelName: 'gpt-5',\n * inputTokens: 1500,\n * outputTokens: 300,\n * totalTokens: 1800,\n * });\n *\n * aggregator.track({\n * component: 'PageRangeParser',\n * phase: 'sampling',\n * model: 'fallback',\n * modelName: 'claude-opus-4-5',\n * inputTokens: 2000,\n * outputTokens: 100,\n * totalTokens: 2100,\n * });\n *\n * // Log comprehensive summary\n * aggregator.logSummary(logger);\n * // Outputs:\n * // [DocumentProcessor] Token usage summary:\n * // TocExtractor:\n * // - extraction (primary: gpt-5): 1500 input, 300 output, 1800 total\n * // TocExtractor total: 1500 input, 300 output, 1800 total\n * // PageRangeParser:\n * // - sampling (fallback: claude-opus-4-5): 2000 input, 100 output, 2100 total\n * // PageRangeParser total: 2000 input, 100 output, 2100 total\n * // Grand total: 3500 input, 400 output, 3900 total\n * ```\n */\nexport class LLMTokenUsageAggregator {\n private usage: Record<string, ComponentAggregate> = {};\n\n /**\n * Track token usage from an LLM call\n *\n * @param usage - Extended token usage with component/phase/model information\n */\n track(usage: ExtendedTokenUsage): void {\n // Initialize component if not seen before\n if (!this.usage[usage.component]) {\n this.usage[usage.component] = {\n component: usage.component,\n phases: {},\n total: {\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n },\n };\n }\n\n const component = this.usage[usage.component];\n\n // Initialize phase if not seen before\n if (!component.phases[usage.phase]) {\n component.phases[usage.phase] = {\n total: {\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n },\n };\n }\n\n const phase = component.phases[usage.phase];\n\n // Track by model type\n if (usage.model === 'primary') {\n if (!phase.primary) {\n phase.primary = {\n modelName: usage.modelName,\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n };\n }\n\n phase.primary.inputTokens += usage.inputTokens;\n phase.primary.outputTokens += usage.outputTokens;\n phase.primary.totalTokens += usage.totalTokens;\n } else if (usage.model === 'fallback') {\n if (!phase.fallback) {\n phase.fallback = {\n modelName: usage.modelName,\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n };\n }\n\n phase.fallback.inputTokens += usage.inputTokens;\n phase.fallback.outputTokens += usage.outputTokens;\n phase.fallback.totalTokens += usage.totalTokens;\n }\n\n // Update phase total\n phase.total.inputTokens += usage.inputTokens;\n phase.total.outputTokens += usage.outputTokens;\n phase.total.totalTokens += usage.totalTokens;\n\n // Update component total\n component.total.inputTokens += usage.inputTokens;\n component.total.outputTokens += usage.outputTokens;\n component.total.totalTokens += usage.totalTokens;\n }\n\n /**\n * Get aggregated usage grouped by component\n *\n * @returns Array of component aggregates with phase breakdown\n */\n getByComponent(): ComponentAggregate[] {\n return Object.values(this.usage);\n }\n\n /**\n * Get token usage report in structured JSON format\n *\n * Converts internal usage data to external TokenUsageReport format suitable\n * for serialization and reporting. The report includes component breakdown,\n * phase-level details, and both primary and fallback model usage.\n *\n * @returns Structured token usage report with components and total\n */\n getReport(): {\n components: Array<{\n component: string;\n phases: Array<{\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }>;\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }>;\n total: TokenUsage;\n } {\n const components: Array<{\n component: string;\n phases: Array<{\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }>;\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }> = [];\n\n for (const component of Object.values(this.usage)) {\n const phases: Array<{\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n }> = [];\n\n for (const [phaseName, phaseData] of Object.entries(component.phases)) {\n const phaseReport: {\n phase: string;\n primary?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n fallback?: {\n modelName: string;\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n total: {\n inputTokens: number;\n outputTokens: number;\n totalTokens: number;\n };\n } = {\n phase: phaseName,\n total: {\n inputTokens: phaseData.total.inputTokens,\n outputTokens: phaseData.total.outputTokens,\n totalTokens: phaseData.total.totalTokens,\n },\n };\n\n if (phaseData.primary) {\n phaseReport.primary = {\n modelName: phaseData.primary.modelName,\n inputTokens: phaseData.primary.inputTokens,\n outputTokens: phaseData.primary.outputTokens,\n totalTokens: phaseData.primary.totalTokens,\n };\n }\n\n if (phaseData.fallback) {\n phaseReport.fallback = {\n modelName: phaseData.fallback.modelName,\n inputTokens: phaseData.fallback.inputTokens,\n outputTokens: phaseData.fallback.outputTokens,\n totalTokens: phaseData.fallback.totalTokens,\n };\n }\n\n phases.push(phaseReport);\n }\n\n components.push({\n component: component.component,\n phases,\n total: {\n inputTokens: component.total.inputTokens,\n outputTokens: component.total.outputTokens,\n totalTokens: component.total.totalTokens,\n },\n });\n }\n\n const totalUsage = this.getTotalUsage();\n\n return {\n components,\n total: {\n inputTokens: totalUsage.inputTokens,\n outputTokens: totalUsage.outputTokens,\n totalTokens: totalUsage.totalTokens,\n },\n };\n }\n\n /**\n * Get total usage across all components and phases\n *\n * @returns Aggregated token usage totals\n */\n getTotalUsage(): TokenUsage {\n let totalInput = 0;\n let totalOutput = 0;\n let totalTokens = 0;\n\n for (const component of Object.values(this.usage)) {\n totalInput += component.total.inputTokens;\n totalOutput += component.total.outputTokens;\n totalTokens += component.total.totalTokens;\n }\n\n return {\n inputTokens: totalInput,\n outputTokens: totalOutput,\n totalTokens: totalTokens,\n };\n }\n\n /**\n * Log comprehensive token usage summary\n *\n * Outputs usage grouped by component, with phase and model breakdown.\n * Shows primary and fallback token usage separately for each phase.\n * Call this once at the end of document processing.\n *\n * @param logger - Logger instance for output\n */\n logSummary(logger: LoggerMethods): void {\n const components = this.getByComponent();\n\n if (components.length === 0) {\n logger.info('[DocumentProcessor] No token usage to report');\n return;\n }\n\n logger.info('[DocumentProcessor] Token usage summary:');\n logger.info('');\n\n let grandInputTokens = 0;\n let grandOutputTokens = 0;\n let grandTotalTokens = 0;\n let grandPrimaryInputTokens = 0;\n let grandPrimaryOutputTokens = 0;\n let grandPrimaryTotalTokens = 0;\n let grandFallbackInputTokens = 0;\n let grandFallbackOutputTokens = 0;\n let grandFallbackTotalTokens = 0;\n\n for (const component of components) {\n logger.info(`${component.component}:`);\n\n for (const [phase, phaseData] of Object.entries(component.phases)) {\n logger.info(` - ${phase}:`);\n\n // Show primary model usage\n if (phaseData.primary) {\n logger.info(\n ` primary (${phaseData.primary.modelName}): ${formatTokens(phaseData.primary)}`,\n );\n grandPrimaryInputTokens += phaseData.primary.inputTokens;\n grandPrimaryOutputTokens += phaseData.primary.outputTokens;\n grandPrimaryTotalTokens += phaseData.primary.totalTokens;\n }\n\n // Show fallback model usage\n if (phaseData.fallback) {\n logger.info(\n ` fallback (${phaseData.fallback.modelName}): ${formatTokens(phaseData.fallback)}`,\n );\n grandFallbackInputTokens += phaseData.fallback.inputTokens;\n grandFallbackOutputTokens += phaseData.fallback.outputTokens;\n grandFallbackTotalTokens += phaseData.fallback.totalTokens;\n }\n\n // Show phase subtotal\n logger.info(` subtotal: ${formatTokens(phaseData.total)}`);\n }\n\n logger.info(\n ` ${component.component} total: ${formatTokens(component.total)}`,\n );\n logger.info('');\n\n grandInputTokens += component.total.inputTokens;\n grandOutputTokens += component.total.outputTokens;\n grandTotalTokens += component.total.totalTokens;\n }\n\n // Show grand total with primary/fallback breakdown\n logger.info('--- Summary ---');\n if (grandPrimaryTotalTokens > 0) {\n logger.info(\n `Primary total: ${formatTokens({\n inputTokens: grandPrimaryInputTokens,\n outputTokens: grandPrimaryOutputTokens,\n totalTokens: grandPrimaryTotalTokens,\n })}`,\n );\n }\n if (grandFallbackTotalTokens > 0) {\n logger.info(\n `Fallback total: ${formatTokens({\n inputTokens: grandFallbackInputTokens,\n outputTokens: grandFallbackOutputTokens,\n totalTokens: grandFallbackTotalTokens,\n })}`,\n );\n }\n logger.info(\n `Grand total: ${formatTokens({\n inputTokens: grandInputTokens,\n outputTokens: grandOutputTokens,\n totalTokens: grandTotalTokens,\n })}`,\n );\n }\n\n /**\n * Reset all tracked usage\n *\n * Call this at the start of a new document processing run.\n */\n reset(): void {\n this.usage = {};\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n DoclingDocument,\n DoclingGroupItem,\n DoclingPictureItem,\n DoclingTableItem,\n DoclingTextItem,\n} from '@heripo/model';\n\n/**\n * Resolves $ref references in DoclingDocument to actual objects.\n *\n * DoclingDocument uses JSON references (e.g., \"#/texts/0\") to link nodes.\n * This class builds an index for quick lookups of texts, pictures, tables, and groups.\n */\nexport class RefResolver {\n private readonly logger: LoggerMethods;\n private readonly textMap: Map<string, DoclingTextItem>;\n private readonly pictureMap: Map<string, DoclingPictureItem>;\n private readonly tableMap: Map<string, DoclingTableItem>;\n private readonly groupMap: Map<string, DoclingGroupItem>;\n\n constructor(logger: LoggerMethods, doc: DoclingDocument) {\n this.logger = logger;\n this.logger.info('[RefResolver] Initializing reference resolver...');\n\n this.textMap = this.buildIndex(doc.texts, 'texts');\n this.pictureMap = this.buildIndex(doc.pictures, 'pictures');\n this.tableMap = this.buildIndex(doc.tables, 'tables');\n this.groupMap = this.buildIndex(doc.groups, 'groups');\n\n this.logger.info(\n `[RefResolver] Indexed ${this.textMap.size} texts, ${this.pictureMap.size} pictures, ${this.tableMap.size} tables, ${this.groupMap.size} groups`,\n );\n }\n\n /**\n * Build an index mapping self_ref to the actual item\n */\n private buildIndex<T extends { self_ref: string }>(\n items: T[],\n _prefix: string,\n ): Map<string, T> {\n const map = new Map<string, T>();\n for (const item of items) {\n map.set(item.self_ref, item);\n }\n return map;\n }\n\n /**\n * Resolve a $ref string to the actual item\n * @param ref - Reference string (e.g., \"#/texts/0\")\n * @returns The resolved item, or null if not found\n */\n resolve(\n ref: string,\n ):\n | DoclingTextItem\n | DoclingPictureItem\n | DoclingTableItem\n | DoclingGroupItem\n | null {\n // Extract the collection type from the reference\n // Format: \"#/texts/0\" or \"#/pictures/5\" etc.\n const match = ref.match(/^#\\/(\\w+)\\//);\n if (!match) {\n this.logger.warn(`[RefResolver] Invalid reference format: ${ref}`);\n return null;\n }\n\n const collection = match[1];\n\n if (collection === 'texts') {\n const result = this.textMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Text reference not found: ${ref}`);\n }\n return result;\n }\n if (collection === 'pictures') {\n const result = this.pictureMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Picture reference not found: ${ref}`);\n }\n return result;\n }\n if (collection === 'tables') {\n const result = this.tableMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Table reference not found: ${ref}`);\n }\n return result;\n }\n if (collection === 'groups') {\n const result = this.groupMap.get(ref) ?? null;\n if (!result) {\n this.logger.warn(`[RefResolver] Group reference not found: ${ref}`);\n }\n return result;\n }\n\n this.logger.warn(`[RefResolver] Unknown collection type: ${collection}`);\n return null;\n }\n\n /**\n * Resolve a text reference\n * @param ref - Reference string (e.g., \"#/texts/0\")\n * @returns The resolved text item, or null if not found\n */\n resolveText(ref: string): DoclingTextItem | null {\n return this.textMap.get(ref) ?? null;\n }\n\n /**\n * Resolve a picture reference\n * @param ref - Reference string (e.g., \"#/pictures/0\")\n * @returns The resolved picture item, or null if not found\n */\n resolvePicture(ref: string): DoclingPictureItem | null {\n return this.pictureMap.get(ref) ?? null;\n }\n\n /**\n * Resolve a table reference\n * @param ref - Reference string (e.g., \"#/tables/0\")\n * @returns The resolved table item, or null if not found\n */\n resolveTable(ref: string): DoclingTableItem | null {\n return this.tableMap.get(ref) ?? null;\n }\n\n /**\n * Resolve a group reference\n * @param ref - Reference string (e.g., \"#/groups/0\")\n * @returns The resolved group item, or null if not found\n */\n resolveGroup(ref: string): DoclingGroupItem | null {\n return this.groupMap.get(ref) ?? null;\n }\n\n /**\n * Resolve multiple references at once\n * @param refs - Array of reference objects with $ref property\n * @returns Array of resolved items (null for unresolved references)\n */\n resolveMany(\n refs: Array<{ $ref: string }>,\n ): Array<\n | DoclingTextItem\n | DoclingPictureItem\n | DoclingTableItem\n | DoclingGroupItem\n | null\n > {\n return refs.map((ref) => this.resolve(ref.$ref));\n }\n}\n","/**\n * Generates sequential IDs for different types of items.\n *\n * IDs are formatted as: `{prefix}-{number}` where number is zero-padded to 3 digits.\n * - Chapters: ch-001, ch-002, ...\n * - Images: img-001, img-002, ...\n * - Tables: tbl-001, tbl-002, ...\n *\n * Each type maintains its own independent counter.\n */\nexport class IdGenerator {\n private chapterCounter = 0;\n private imageCounter = 0;\n private tableCounter = 0;\n private footnoteCounter = 0;\n\n /**\n * Generate a chapter ID\n * @returns A chapter ID in the format \"ch-001\"\n */\n generateChapterId(): string {\n this.chapterCounter++;\n return `ch-${this.padNumber(this.chapterCounter)}`;\n }\n\n /**\n * Generate an image ID\n * @returns An image ID in the format \"img-001\"\n */\n generateImageId(): string {\n this.imageCounter++;\n return `img-${this.padNumber(this.imageCounter)}`;\n }\n\n /**\n * Generate a table ID\n * @returns A table ID in the format \"tbl-001\"\n */\n generateTableId(): string {\n this.tableCounter++;\n return `tbl-${this.padNumber(this.tableCounter)}`;\n }\n\n /**\n * Generate a footnote ID\n * @returns A footnote ID in the format \"ftn-001\"\n */\n generateFootnoteId(): string {\n this.footnoteCounter++;\n return `ftn-${this.padNumber(this.footnoteCounter)}`;\n }\n\n /**\n * Reset all counters to zero\n */\n reset(): void {\n this.chapterCounter = 0;\n this.imageCounter = 0;\n this.tableCounter = 0;\n this.footnoteCounter = 0;\n }\n\n /**\n * Get current counter values (for testing/debugging)\n */\n getCounters(): {\n chapter: number;\n image: number;\n table: number;\n footnote: number;\n } {\n return {\n chapter: this.chapterCounter,\n image: this.imageCounter,\n table: this.tableCounter,\n footnote: this.footnoteCounter,\n };\n }\n\n /**\n * Pad a number to 3 digits with leading zeros\n */\n private padNumber(num: number): string {\n return num.toString().padStart(3, '0');\n }\n}\n","import { BatchProcessor } from '@heripo/shared';\n\n/**\n * TextCleaner - Text normalization and cleaning\n *\n * Utility for normalizing text from DoclingDocument.\n * - Whitespace normalization (remove consecutive spaces, clean line breaks)\n * - Special character removal/normalization\n * - Unicode normalization\n * - Batch normalization + filtering\n */\nexport class TextCleaner {\n /**\n * Normalizes text\n * - Converts consecutive spaces/line breaks to single space\n * - Trims leading and trailing spaces\n * - Normalizes special whitespace characters (tabs, non-breaking spaces, etc.)\n */\n static normalize(text: string): string {\n if (!text) return '';\n\n // Unicode normalization (NFC)\n let normalized = text.normalize('NFC');\n\n // Convert special whitespace characters to regular space\n normalized = normalized.replace(/[\\t\\u00A0\\u2000-\\u200B]/g, ' ');\n\n // Convert line breaks to space\n normalized = normalized.replace(/[\\r\\n]+/g, ' ');\n\n // Convert consecutive spaces to single space\n normalized = normalized.replace(/\\s+/g, ' ');\n\n // Trim leading and trailing spaces\n normalized = normalized.trim();\n\n return normalized;\n }\n\n /**\n * Clean text starting/ending with punctuation marks\n * - Remove commas/periods at sentence start\n * - Clean spaces and punctuation at sentence end\n */\n static cleanPunctuation(text: string): string {\n if (!text) return '';\n\n // Remove commas/periods at start\n let cleaned = text.replace(/^[,.:;!?]+\\s*/, '');\n\n // Clean spaces at end\n cleaned = cleaned.replace(/\\s+[,.:;!?]*$/, '');\n\n return cleaned;\n }\n\n /**\n * Filter text consisting only of numbers and spaces\n */\n static isValidText(text: string): boolean {\n if (!text) return false;\n const cleaned = this.normalize(text);\n // Invalid if only numbers and spaces\n return !/^\\s*[\\d\\s]*$/.test(cleaned);\n }\n\n /**\n * Batch normalization (for bulk processing)\n */\n static normalizeBatch(texts: string[]): string[] {\n return texts.map((text) => this.normalize(text));\n }\n\n /**\n * Batch filtering (returns only valid text)\n */\n static filterValidTexts(texts: string[]): string[] {\n return texts.filter((text) => this.isValidText(text));\n }\n\n /**\n * Batch normalization + filtering (stage 1 + stage 2 combined)\n *\n * Performs TextCleaner's basic normalization and filtering in batch processing at once.\n * Splits large amounts of text into batches for efficient processing.\n *\n * If batchSize is 0, processes items sequentially without batch processing.\n *\n * @param texts - Original text array\n * @param batchSize - Batch size (default: 10). Set to 0 for sequential processing without batching.\n * @returns Normalized and filtered text array\n *\n * @example\n * ```typescript\n * const rawTexts = [' text 1 ', '123', 'text 2\\n'];\n * const cleaned = TextCleaner.normalizeAndFilterBatch(rawTexts, 10);\n * // ['text 1', 'text 2']\n *\n * // Sequential processing (no batching)\n * const cleanedSequential = TextCleaner.normalizeAndFilterBatch(rawTexts, 0);\n * // ['text 1', 'text 2']\n * ```\n */\n static normalizeAndFilterBatch(\n texts: string[],\n batchSize: number = 10,\n ): string[] {\n if (batchSize === 0) {\n // Sequential processing without BatchProcessor\n const results: string[] = [];\n for (const text of texts) {\n const normalized = this.normalize(text);\n if (this.isValidText(normalized)) {\n results.push(normalized);\n }\n }\n return results;\n }\n\n // Batch processing: normalize then filter for each batch\n return BatchProcessor.processBatchSync(texts, batchSize, (batch) => {\n // Stage 1: Normalize\n const normalized = this.normalizeBatch(batch);\n // Stage 2: Filter\n return this.filterValidTexts(normalized);\n });\n }\n}\n","import type {\n DoclingGroupItem,\n DoclingTableItem,\n DoclingTextItem,\n} from '@heripo/model';\n\nimport type { RefResolver } from './ref-resolver';\n\n/**\n * MarkdownConverter\n *\n * Converts TOC-related groups and tables to Markdown format for LLM processing.\n * Provides static utility methods for conversion.\n */\nexport class MarkdownConverter {\n /**\n * Convert TOC items (groups/tables) to Markdown string\n *\n * @param refs - Array of item references from TocAreaResult\n * @param refResolver - RefResolver for resolving references\n * @returns Markdown string representation of TOC\n */\n static convert(refs: string[], refResolver: RefResolver): string {\n if (refs.length === 0) {\n return '';\n }\n\n const lines: string[] = [];\n\n for (const ref of refs) {\n const item = refResolver.resolve(ref);\n if (!item) {\n continue;\n }\n\n // Check if it's a group item\n if ('name' in item && (item.name === 'list' || item.name === 'group')) {\n const groupMarkdown = MarkdownConverter.groupToMarkdown(\n item as DoclingGroupItem,\n refResolver,\n 0,\n );\n if (groupMarkdown) {\n lines.push(groupMarkdown);\n }\n }\n // Check if it's a table item\n else if ('data' in item && 'grid' in (item as DoclingTableItem).data) {\n const tableMarkdown = MarkdownConverter.tableToMarkdown(\n item as DoclingTableItem,\n );\n if (tableMarkdown) {\n lines.push(tableMarkdown);\n }\n }\n // Check if it's a text item\n else if ('text' in item && 'orig' in item) {\n const textMarkdown = MarkdownConverter.textToMarkdown(\n item as DoclingTextItem,\n 0,\n );\n if (textMarkdown) {\n lines.push(textMarkdown);\n }\n }\n }\n\n return lines.join('\\n\\n');\n }\n\n /**\n * Convert a group item to Markdown list format\n *\n * Handles nested lists and preserves hierarchy.\n *\n * @example\n * Output:\n * - Chapter 1 Introduction ..... 1\n * - 1.1 Background ..... 3\n * - 1.2 Objectives ..... 5\n * - Chapter 2 Methodology ..... 10\n */\n static groupToMarkdown(\n group: DoclingGroupItem,\n refResolver: RefResolver,\n indentLevel = 0,\n ): string {\n const lines: string[] = [];\n\n for (const childRef of group.children) {\n const child = refResolver.resolve(childRef.$ref);\n if (!child) {\n continue;\n }\n\n // Handle nested group\n if (\n 'name' in child &&\n (child.name === 'list' || child.name === 'group')\n ) {\n const nestedMarkdown = MarkdownConverter.groupToMarkdown(\n child as DoclingGroupItem,\n refResolver,\n indentLevel + 1,\n );\n if (nestedMarkdown) {\n lines.push(nestedMarkdown);\n }\n }\n // Handle text item\n else if ('text' in child && 'orig' in child) {\n const textMarkdown = MarkdownConverter.textToMarkdown(\n child as DoclingTextItem,\n indentLevel,\n );\n if (textMarkdown) {\n lines.push(textMarkdown);\n }\n }\n }\n\n return lines.join('\\n');\n }\n\n /**\n * Convert a table item to Markdown table format\n *\n * @example\n * Output:\n * | Chapter | Page |\n * |---------|------|\n * | Chapter 1 Introduction | 1 |\n * | Chapter 2 Methodology | 10 |\n */\n static tableToMarkdown(table: DoclingTableItem): string {\n const { grid } = table.data;\n if (!grid || grid.length === 0) {\n return '';\n }\n\n const lines: string[] = [];\n\n // Build rows from grid\n for (let rowIdx = 0; rowIdx < grid.length; rowIdx++) {\n const row = grid[rowIdx];\n if (!row || row.length === 0) {\n continue;\n }\n\n const cells = row.map((cell) =>\n MarkdownConverter.escapeTableCell(cell.text),\n );\n lines.push(`| ${cells.join(' | ')} |`);\n\n // Add separator after header row (first row)\n if (rowIdx === 0) {\n const separator = row.map(() => '---').join(' | ');\n lines.push(`| ${separator} |`);\n }\n }\n\n return lines.join('\\n');\n }\n\n /**\n * Convert a text item to Markdown line\n */\n static textToMarkdown(text: DoclingTextItem, indentLevel = 0): string {\n const content = text.text.trim();\n if (!content) {\n return '';\n }\n\n const indent = MarkdownConverter.getIndent(indentLevel);\n const marker = MarkdownConverter.getListMarker(\n text.enumerated,\n text.marker,\n );\n\n return `${indent}${marker}${content}`;\n }\n\n /**\n * Generate list marker based on enumeration and marker\n */\n private static getListMarker(enumerated?: boolean, marker?: string): string {\n if (marker) {\n return `${marker} `;\n }\n if (enumerated === true) {\n return '1. ';\n }\n if (enumerated === false) {\n return '- ';\n }\n return '- ';\n }\n\n /**\n * Generate indent string (2 spaces per level)\n */\n private static getIndent(level: number): string {\n return ' '.repeat(level);\n }\n\n /**\n * Escape special characters in table cell content\n */\n private static escapeTableCell(text: string): string {\n return text.replace(/\\|/g, '\\\\|').replace(/\\n/g, ' ').trim();\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n Chapter,\n DoclingTextItem,\n PageRange,\n ProcessedFootnote,\n ProcessedImage,\n ProcessedTable,\n TextBlock,\n} from '@heripo/model';\n\nimport type { TocEntry } from '../types';\nimport type { IdGenerator } from '../utils';\n\nimport { TextCleaner } from '../utils';\n\n/**\n * Flattened chapter with TOC page number for range calculation\n */\ninterface FlatChapter {\n chapter: Chapter;\n tocPageNo: number;\n}\n\n/**\n * Chapter page range for content assignment\n */\ninterface ChapterRange {\n startPage: number;\n endPage: number;\n}\n\n/**\n * ChapterConverter\n *\n * Converts TocEntry[] to Chapter[] with text blocks, images, and tables.\n *\n * ## Conversion Process\n *\n * 1. Create Front Matter chapter (ch-000) for pre-TOC content\n * 2. Build chapter tree from TocEntry[] (recursive)\n * 3. Calculate page ranges for each chapter\n * 4. Assign text blocks to chapters based on page ranges\n * 5. Link images/tables to chapters based on page ranges\n *\n * ## Page Assignment Strategy\n *\n * Uses \"start page first\" strategy: resources are assigned to the chapter\n * whose startPage is the largest value that is still <= the resource's page.\n *\n * ## Front Matter\n *\n * A special chapter (ch-000) is always created to hold content that appears\n * before the first TOC entry (e.g., cover, preface, table of contents itself).\n */\nexport class ChapterConverter {\n private static readonly FRONT_MATTER_ID = 'ch-000';\n private static readonly FRONT_MATTER_TITLE = 'Front Matter';\n\n private readonly logger: LoggerMethods;\n private readonly idGenerator: IdGenerator;\n\n constructor(logger: LoggerMethods, idGenerator: IdGenerator) {\n this.logger = logger;\n this.idGenerator = idGenerator;\n }\n\n /**\n * Convert TocEntry[] to Chapter[]\n *\n * @param tocEntries - Table of contents entries\n * @param textItems - DoclingDocument.texts (with prov for page numbers)\n * @param pageRangeMap - PDF page to actual page mapping\n * @param images - Converted images\n * @param tables - Converted tables\n * @param footnotes - Converted footnotes\n * @returns Converted chapters with text blocks and resource references\n */\n convert(\n tocEntries: TocEntry[],\n textItems: DoclingTextItem[],\n pageRangeMap: Record<number, PageRange>,\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n ): Chapter[] {\n this.logger.info('[ChapterConverter] Starting chapter conversion...');\n\n // Step 1: Create Front Matter chapter\n const frontMatter = this.createFrontMatterChapter();\n\n // Step 2: Build chapter tree from TOC\n const tocChapters = this.buildChapterTree(tocEntries);\n this.logger.info(\n `[ChapterConverter] Built ${tocChapters.length} TOC chapters + Front Matter`,\n );\n\n // Step 3: Combine all chapters (Front Matter first)\n const allChapters = [frontMatter, ...tocChapters];\n\n // Step 4: Calculate page ranges\n const flatChapters = this.flattenChapters(allChapters);\n const chapterRanges = this.calculatePageRanges(flatChapters, tocEntries);\n this.logger.info(\n `[ChapterConverter] Calculated ranges for ${chapterRanges.size} chapters`,\n );\n\n // Step 5: Convert and assign text blocks\n const textBlocks = this.convertTextBlocks(textItems, pageRangeMap);\n this.assignTextBlocks(allChapters, textBlocks, chapterRanges, pageRangeMap);\n this.logger.info(\n `[ChapterConverter] Assigned ${textBlocks.length} text blocks`,\n );\n\n // Step 6: Link resources\n this.linkResources(\n allChapters,\n images,\n tables,\n footnotes,\n chapterRanges,\n pageRangeMap,\n );\n this.logger.info(\n `[ChapterConverter] Linked ${images.length} images, ${tables.length} tables, and ${footnotes.length} footnotes`,\n );\n\n return allChapters;\n }\n\n /**\n * Create Front Matter chapter for pre-TOC content\n */\n private createFrontMatterChapter(): Chapter {\n return {\n id: ChapterConverter.FRONT_MATTER_ID,\n originTitle: ChapterConverter.FRONT_MATTER_TITLE,\n title: ChapterConverter.FRONT_MATTER_TITLE,\n pageNo: 1,\n level: 1,\n textBlocks: [],\n imageIds: [],\n tableIds: [],\n footnoteIds: [],\n };\n }\n\n /**\n * Build chapter tree from TocEntry[]\n * Recursively processes children\n */\n private buildChapterTree(entries: TocEntry[]): Chapter[] {\n return entries.map((entry) => {\n const chapterId = this.idGenerator.generateChapterId();\n\n const chapter: Chapter = {\n id: chapterId,\n originTitle: entry.title,\n title: TextCleaner.normalize(entry.title),\n pageNo: entry.pageNo,\n level: entry.level,\n textBlocks: [],\n imageIds: [],\n tableIds: [],\n footnoteIds: [],\n };\n\n if (entry.children && entry.children.length > 0) {\n chapter.children = this.buildChapterTree(entry.children);\n }\n\n return chapter;\n });\n }\n\n /**\n * Flatten chapter tree for page range calculation\n * Preserves original TOC page numbers\n */\n private flattenChapters(chapters: Chapter[]): FlatChapter[] {\n const result: FlatChapter[] = [];\n\n const flatten = (chapterList: Chapter[]): void => {\n for (const chapter of chapterList) {\n result.push({\n chapter,\n tocPageNo: chapter.pageNo,\n });\n\n if (chapter.children && chapter.children.length > 0) {\n flatten(chapter.children);\n }\n }\n };\n\n flatten(chapters);\n return result;\n }\n\n /**\n * Calculate page range for each chapter\n * Uses next chapter's start page as end boundary\n *\n * Front Matter (ch-000) gets special handling:\n * - startPage: 1\n * - endPage: first TOC entry's page - 1 (or 0 if TOC starts at page 1)\n */\n private calculatePageRanges(\n flatChapters: FlatChapter[],\n tocEntries: TocEntry[],\n ): Map<string, ChapterRange> {\n const ranges = new Map<string, ChapterRange>();\n\n if (flatChapters.length === 0) {\n return ranges;\n }\n\n // Find first TOC page (minimum page number from TOC entries)\n const firstTocPage =\n tocEntries.length > 0\n ? Math.min(...tocEntries.map((e) => e.pageNo))\n : Number.MAX_SAFE_INTEGER;\n\n // Filter out Front Matter for sorting (it's handled separately)\n const tocChapters = flatChapters.filter(\n (fc) => fc.chapter.id !== ChapterConverter.FRONT_MATTER_ID,\n );\n\n // Sort by TOC page number\n const sorted = [...tocChapters].sort((a, b) => a.tocPageNo - b.tocPageNo);\n\n // Set Front Matter range (always page 1 to firstTocPage - 1)\n ranges.set(ChapterConverter.FRONT_MATTER_ID, {\n startPage: 1,\n endPage: firstTocPage - 1,\n });\n\n // Set ranges for TOC chapters\n for (let i = 0; i < sorted.length; i++) {\n const current = sorted[i];\n const next = sorted[i + 1];\n\n ranges.set(current.chapter.id, {\n startPage: current.tocPageNo,\n endPage: next ? next.tocPageNo - 1 : Number.MAX_SAFE_INTEGER,\n });\n }\n\n return ranges;\n }\n\n /**\n * Valid labels for text blocks\n * Only these labels are included in chapter text blocks\n */\n private static readonly VALID_TEXT_LABELS = new Set([\n 'text',\n 'section_header',\n 'list_item',\n ]);\n\n /**\n * Check if text item has a picture parent\n * Items with parent.$ref starting with \"#/pictures/\" are excluded\n */\n private static hasPictureParent(item: DoclingTextItem): boolean {\n const parentRef = item.parent?.$ref;\n return typeof parentRef === 'string' && parentRef.startsWith('#/pictures/');\n }\n\n /**\n * Convert text items to text blocks\n * Filters by label (text, section_header, list_item), excludes picture children,\n * and extracts page numbers from prov\n */\n private convertTextBlocks(\n textItems: DoclingTextItem[],\n _pageRangeMap: Record<number, PageRange>,\n ): TextBlock[] {\n return textItems\n .filter(\n (item) =>\n ChapterConverter.VALID_TEXT_LABELS.has(item.label) &&\n !ChapterConverter.hasPictureParent(item) &&\n TextCleaner.isValidText(item.text),\n )\n .map((item) => {\n const pdfPageNo = item.prov?.[0]?.page_no ?? 1;\n return {\n text: TextCleaner.normalize(item.text),\n pdfPageNo,\n };\n });\n }\n\n /**\n * Convert PDF page number to actual document page number\n * Falls back to pdfPageNo if mapping is missing\n */\n private pdfPageToActualPage(\n pdfPageNo: number,\n pageRangeMap: Record<number, PageRange>,\n ): number {\n const range = pageRangeMap[pdfPageNo];\n if (!range) {\n // Fallback: assume 1:1 mapping\n return pdfPageNo;\n }\n // Return start page for the actual document page\n return range.startPageNo;\n }\n\n /**\n * Find chapter ID for a given actual page number\n * Uses \"start page first\" strategy\n */\n private findChapterForPage(\n actualPageNo: number,\n chapterRanges: Map<string, ChapterRange>,\n ): string | null {\n let bestMatch: string | null = null;\n let bestStartPage = -1;\n\n for (const [chapterId, range] of chapterRanges) {\n // Check if page is within range\n if (actualPageNo >= range.startPage && actualPageNo <= range.endPage) {\n // Use \"start page first\" strategy: prefer chapter with largest startPage <= actualPageNo\n if (range.startPage > bestStartPage) {\n bestStartPage = range.startPage;\n bestMatch = chapterId;\n }\n }\n }\n\n return bestMatch;\n }\n\n /**\n * Assign text blocks to chapters based on page ranges\n */\n private assignTextBlocks(\n chapters: Chapter[],\n textBlocks: TextBlock[],\n chapterRanges: Map<string, ChapterRange>,\n pageRangeMap: Record<number, PageRange>,\n ): void {\n // Build chapter map for O(1) lookup\n const chapterMap = this.buildChapterMap(chapters);\n\n for (const textBlock of textBlocks) {\n const actualPageNo = this.pdfPageToActualPage(\n textBlock.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.textBlocks.push(textBlock);\n }\n }\n }\n\n /**\n * Link images, tables, and footnotes to chapters based on page ranges\n */\n private linkResources(\n chapters: Chapter[],\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n chapterRanges: Map<string, ChapterRange>,\n pageRangeMap: Record<number, PageRange>,\n ): void {\n // Build chapter map for O(1) lookup\n const chapterMap = this.buildChapterMap(chapters);\n\n // Link images\n for (const image of images) {\n const actualPageNo = this.pdfPageToActualPage(\n image.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.imageIds.push(image.id);\n }\n }\n\n // Link tables\n for (const table of tables) {\n const actualPageNo = this.pdfPageToActualPage(\n table.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.tableIds.push(table.id);\n }\n }\n\n // Link footnotes\n for (const footnote of footnotes) {\n const actualPageNo = this.pdfPageToActualPage(\n footnote.pdfPageNo,\n pageRangeMap,\n );\n const chapterId = this.findChapterForPage(actualPageNo, chapterRanges);\n\n if (chapterId && chapterMap.has(chapterId)) {\n chapterMap.get(chapterId)!.footnoteIds.push(footnote.id);\n }\n }\n }\n\n /**\n * Build flat chapter map for O(1) lookup\n */\n private buildChapterMap(chapters: Chapter[]): Map<string, Chapter> {\n const map = new Map<string, Chapter>();\n\n const addToMap = (chapterList: Chapter[]): void => {\n for (const chapter of chapterList) {\n map.set(chapter.id, chapter);\n\n if (chapter.children && chapter.children.length > 0) {\n addToMap(chapter.children);\n }\n }\n };\n\n addToMap(chapters);\n return map;\n }\n}\n","import type { TocEntry } from '../types';\n\n/**\n * Single validation issue detected during TOC validation\n */\nexport interface TocValidationIssue {\n /**\n * Issue code (V001, V002, etc.)\n */\n code: string;\n\n /**\n * Human-readable error message\n */\n message: string;\n\n /**\n * Path to the problematic entry (e.g., \"[0].children[2]\")\n */\n path: string;\n\n /**\n * The problematic entry\n */\n entry: TocEntry;\n}\n\n/**\n * Result of TOC validation\n */\nexport interface TocValidationResult {\n /**\n * Whether validation passed (no errors)\n */\n valid: boolean;\n\n /**\n * List of validation issues\n */\n issues: TocValidationIssue[];\n\n /**\n * Error count\n */\n errorCount: number;\n}\n\n/**\n * TocExtractError\n *\n * Base error class for TOC extraction failures.\n */\nexport class TocExtractError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'TocExtractError';\n }\n\n /**\n * Extract error message from unknown error type\n */\n static getErrorMessage(error: unknown): string {\n return error instanceof Error ? error.message : String(error);\n }\n\n /**\n * Create TocExtractError from unknown error with context\n */\n static fromError(context: string, error: unknown): TocExtractError {\n return new TocExtractError(\n `${context}: ${TocExtractError.getErrorMessage(error)}`,\n { cause: error },\n );\n }\n}\n\n/**\n * TocNotFoundError\n *\n * Error thrown when TOC area cannot be found in the document.\n */\nexport class TocNotFoundError extends TocExtractError {\n constructor(message = 'Table of contents not found in the document') {\n super(message);\n this.name = 'TocNotFoundError';\n }\n}\n\n/**\n * TocParseError\n *\n * Error thrown when LLM fails to parse TOC structure.\n */\nexport class TocParseError extends TocExtractError {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'TocParseError';\n }\n}\n\n/**\n * TocValidationError\n *\n * Error thrown when TOC validation fails.\n * Contains detailed information about validation issues.\n */\nexport class TocValidationError extends TocExtractError {\n /**\n * Validation result with detailed issues\n */\n readonly validationResult: TocValidationResult;\n\n constructor(message: string, validationResult: TocValidationResult) {\n super(message);\n this.name = 'TocValidationError';\n this.validationResult = validationResult;\n }\n\n /**\n * Get formatted error summary\n */\n getSummary(): string {\n const { errorCount, issues } = this.validationResult;\n const lines = [\n `TOC validation failed: ${errorCount} error(s)`,\n '',\n 'Issues:',\n ];\n\n for (const issue of issues) {\n lines.push(` [${issue.code}] ${issue.message}`);\n lines.push(` Path: ${issue.path}`);\n lines.push(\n ` Entry: \"${issue.entry.title}\" (page ${issue.entry.pageNo})`,\n );\n }\n\n return lines.join('\\n');\n }\n}\n","import type { TocEntry } from '../types';\nimport type {\n TocValidationIssue,\n TocValidationResult,\n} from './toc-extract-error';\n\nimport { TocValidationError } from './toc-extract-error';\n\n/**\n * Validation options for TocValidator\n */\nexport interface TocValidationOptions {\n /**\n * Total page count of the document (for range validation)\n * If not provided, page range upper bound validation is skipped\n */\n totalPages?: number;\n\n /**\n * Maximum allowed title length (default: 200)\n */\n maxTitleLength?: number;\n\n /**\n * Maximum ratio of the first entry's page to total pages (default: 0.3)\n * Used for V007 completeness check - if the first level-1 entry\n * starts beyond max(50, totalPages * ratio), the TOC may be incomplete.\n */\n maxFirstEntryPageRatio?: number;\n}\n\n/**\n * Default validation options\n */\nconst DEFAULT_OPTIONS: Required<TocValidationOptions> = {\n totalPages: Infinity,\n maxTitleLength: 200,\n maxFirstEntryPageRatio: 0.3,\n};\n\n/**\n * TocValidator\n *\n * Validates TocEntry[] structure for consistency and correctness.\n * Performs hierarchical validation including parent-child relationships.\n */\nexport class TocValidator {\n private readonly options: Required<TocValidationOptions>;\n private issues: TocValidationIssue[];\n\n constructor(options?: TocValidationOptions) {\n this.options = {\n ...DEFAULT_OPTIONS,\n ...options,\n };\n this.issues = [];\n }\n\n /**\n * Validate TocEntry array\n *\n * @param entries - TOC entries to validate\n * @returns Validation result\n */\n validate(entries: TocEntry[]): TocValidationResult {\n this.issues = [];\n\n // Validate all entries recursively\n this.validateEntries(entries, '', null, new Set<string>());\n\n // V007: First entry page position (completeness check)\n this.validateFirstEntryPagePosition(entries);\n\n const errorCount = this.issues.length;\n\n return {\n valid: errorCount === 0,\n issues: [...this.issues],\n errorCount,\n };\n }\n\n /**\n * Validate and throw if invalid\n *\n * @param entries - TOC entries to validate\n * @throws {TocValidationError} When validation fails\n */\n validateOrThrow(entries: TocEntry[]): void {\n const result = this.validate(entries);\n\n if (!result.valid) {\n const details = result.issues\n .map(\n (issue) =>\n ` [${issue.code}] ${issue.message} (path: ${issue.path}, entry: \"${issue.entry.title}\" page ${issue.entry.pageNo})`,\n )\n .join('\\n');\n throw new TocValidationError(\n `TOC validation failed with ${result.errorCount} error(s):\\n${details}`,\n result,\n );\n }\n }\n\n /**\n * Recursively validate entries\n */\n private validateEntries(\n entries: TocEntry[],\n parentPath: string,\n parentEntry: TocEntry | null,\n seenKeys: Set<string>,\n ): void {\n let prevPageNo = parentEntry?.pageNo ?? 0;\n\n for (let i = 0; i < entries.length; i++) {\n const entry = entries[i];\n const path = parentPath ? `${parentPath}.children[${i}]` : `[${i}]`;\n\n // V003: Empty title\n this.validateTitle(entry, path);\n\n // V004: Title length\n this.validateTitleLength(entry, path);\n\n // V002: Page range\n this.validatePageRange(entry, path);\n\n // V001: Page order (within same level)\n this.validatePageOrder(entry, path, prevPageNo);\n prevPageNo = entry.pageNo;\n\n // V005: Parent-child page relationship\n if (parentEntry) {\n this.validateParentChildPage(entry, path, parentEntry);\n }\n\n // V006: Duplicate detection\n const key = `${entry.title}:${entry.pageNo}`;\n this.validateDuplicate(entry, path, key, seenKeys);\n seenKeys.add(key);\n\n // Recursive validation for children\n if (entry.children && entry.children.length > 0) {\n this.validateEntries(entry.children, path, entry, seenKeys);\n }\n }\n }\n\n /**\n * V003: Validate title is not empty\n */\n private validateTitle(entry: TocEntry, path: string): void {\n if (!entry.title || entry.title.trim() === '') {\n this.addIssue({\n code: 'V003',\n message: 'Title is empty or contains only whitespace',\n path,\n entry,\n });\n }\n }\n\n /**\n * V004: Validate title length\n */\n private validateTitleLength(entry: TocEntry, path: string): void {\n if (entry.title.length > this.options.maxTitleLength) {\n this.addIssue({\n code: 'V004',\n message: `Title exceeds ${this.options.maxTitleLength} characters (${entry.title.length})`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V002: Validate page number range\n */\n private validatePageRange(entry: TocEntry, path: string): void {\n if (entry.pageNo < 1) {\n this.addIssue({\n code: 'V002',\n message: `Page number must be >= 1, got ${entry.pageNo}`,\n path,\n entry,\n });\n }\n\n if (entry.pageNo > this.options.totalPages) {\n this.addIssue({\n code: 'V002',\n message: `Page number ${entry.pageNo} exceeds document total pages (${this.options.totalPages})`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V001: Validate page order within same level\n */\n private validatePageOrder(\n entry: TocEntry,\n path: string,\n prevPageNo: number,\n ): void {\n if (entry.pageNo < prevPageNo) {\n this.addIssue({\n code: 'V001',\n message: `Page number decreased from ${prevPageNo} to ${entry.pageNo}`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V005: Validate parent-child page relationship\n */\n private validateParentChildPage(\n entry: TocEntry,\n path: string,\n parent: TocEntry,\n ): void {\n if (entry.pageNo < parent.pageNo) {\n this.addIssue({\n code: 'V005',\n message: `Child page (${entry.pageNo}) is before parent page (${parent.pageNo})`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V006: Validate no duplicates\n */\n private validateDuplicate(\n entry: TocEntry,\n path: string,\n key: string,\n seenKeys: Set<string>,\n ): void {\n if (seenKeys.has(key)) {\n this.addIssue({\n code: 'V006',\n message: `Duplicate entry: \"${entry.title}\" at page ${entry.pageNo}`,\n path,\n entry,\n });\n }\n }\n\n /**\n * V007: Validate first entry page position (completeness check)\n *\n * If the first level-1 entry starts too late in the document,\n * earlier entries might be missing from the TOC.\n */\n private validateFirstEntryPagePosition(entries: TocEntry[]): void {\n if (entries.length === 0) {\n return;\n }\n\n // Skip when totalPages is not provided (Infinity)\n if (!isFinite(this.options.totalPages)) {\n return;\n }\n\n const firstEntry = entries[0];\n const threshold = Math.max(\n 50,\n Math.floor(this.options.totalPages * this.options.maxFirstEntryPageRatio),\n );\n\n if (firstEntry.pageNo > threshold) {\n this.addIssue({\n code: 'V007',\n message: `TOC may be incomplete - first entry starts at page ${firstEntry.pageNo}, expected within first ${threshold} pages. Earlier entries might be missing.`,\n path: '[0]',\n entry: firstEntry,\n });\n }\n }\n\n /**\n * Add issue to the list\n */\n private addIssue(issue: TocValidationIssue): void {\n this.issues.push(issue);\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n DoclingDocument,\n DoclingGroupItem,\n DoclingTableItem,\n DoclingTextItem,\n} from '@heripo/model';\n\nimport type { TocAreaResult } from '../types';\nimport type { RefResolver } from '../utils';\n\nimport { TocNotFoundError } from './toc-extract-error';\n\n/**\n * TOC keyword patterns for different languages\n * Korean: 목차, 차례, 목 차\n * Chinese: 目录, 目 录, 内容, 內容\n * Japanese: 目次, 目 次\n * English: Contents, Table of Contents, etc.\n */\nexport const TOC_KEYWORDS = [\n '목차',\n '차례',\n '목 차',\n '目录',\n '目 录',\n '内容',\n '內容',\n '目次',\n '目 次',\n 'Contents',\n 'Table of Contents',\n 'TABLE OF CONTENTS',\n 'CONTENTS',\n] as const;\n\n/**\n * Continuation marker patterns for multi-page TOC\n * Korean: 목차(계속), 목차 (계속), (계속)\n * Chinese: 目录(续), 目录 (续), (续), 续表\n * Japanese: 目次(続), 目次 (続), (続)\n * English: (continued), (Continued), etc.\n */\nexport const CONTINUATION_MARKERS = [\n '목차(계속)',\n '목차 (계속)',\n '(계속)',\n '目录(续)',\n '目录 (续)',\n '(续)',\n '续表',\n '目次(続)',\n '目次 (続)',\n '(続)',\n '(continued)',\n '(Continued)',\n '(CONTINUED)',\n 'continued',\n] as const;\n\n/**\n * Page number pattern regex for detecting TOC-like structures\n * Matches patterns like \"... 123\", \".... 45\", ending with numbers\n */\nexport const PAGE_NUMBER_PATTERN = /\\.{2,}\\s*\\d+\\s*$|…+\\s*\\d+\\s*$|\\s+\\d+\\s*$/;\n\n/**\n * TocFinder options\n */\nexport interface TocFinderOptions {\n /**\n * Maximum pages to search for TOC (default: 10)\n */\n maxSearchPages?: number;\n\n /**\n * Custom TOC keywords to add (optional)\n */\n additionalKeywords?: string[];\n}\n\n/**\n * TocFinder\n *\n * Finds TOC area in DoclingDocument using multi-stage search strategy:\n * 1. Keyword search in texts (section_header, list_item labels)\n * 2. Structure analysis for lists/tables with page number patterns\n * 3. Position heuristic (prioritize early pages)\n */\nexport class TocFinder {\n private readonly maxSearchPages: number;\n private readonly keywords: string[];\n\n constructor(\n private readonly logger: LoggerMethods,\n private readonly refResolver: RefResolver,\n options?: TocFinderOptions,\n ) {\n this.maxSearchPages = options?.maxSearchPages ?? 10;\n this.keywords = [...TOC_KEYWORDS, ...(options?.additionalKeywords ?? [])];\n }\n\n /**\n * Find TOC area in the document\n *\n * @throws {TocNotFoundError} When no TOC area is found\n */\n find(doc: DoclingDocument): TocAreaResult {\n this.logger.info('[TocFinder] Starting TOC search...');\n\n // Stage 1: Search by keywords\n const keywordResult = this.findByKeywords(doc);\n if (keywordResult) {\n this.logger.info(\n `[TocFinder] Found TOC by keyword search: pages ${keywordResult.startPage}-${keywordResult.endPage}`,\n );\n return keywordResult;\n }\n\n // Stage 2: Search by structure\n const structureResult = this.findByStructure(doc);\n if (structureResult) {\n this.logger.info(\n `[TocFinder] Found TOC by structure analysis: pages ${structureResult.startPage}-${structureResult.endPage}`,\n );\n return structureResult;\n }\n\n this.logger.warn('[TocFinder] No TOC found in document');\n throw new TocNotFoundError();\n }\n\n /**\n * Stage 1: Search by keywords in text items\n */\n private findByKeywords(doc: DoclingDocument): TocAreaResult | null {\n // Find text items containing TOC keywords\n for (const text of doc.texts) {\n if (!this.containsTocKeyword(text.text)) {\n continue;\n }\n\n const pageNo = text.prov[0]?.page_no;\n if (pageNo === undefined || pageNo > this.maxSearchPages) {\n continue;\n }\n\n this.logger.info(\n `[TocFinder] Found TOC keyword \"${text.text}\" on page ${pageNo}`,\n );\n\n // Find the parent group or table containing this text\n const parentRef = text.parent?.$ref;\n if (!parentRef) {\n // Single text item, return it directly\n return {\n itemRefs: [text.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n };\n }\n\n // Try to find group containing TOC structure\n const result = this.findTocContainer(doc, parentRef, pageNo);\n if (result) {\n return this.expandToConsecutivePages(result, doc);\n }\n }\n\n return null;\n }\n\n /**\n * Stage 2: Search by structure (lists/tables with page numbers)\n */\n private findByStructure(doc: DoclingDocument): TocAreaResult | null {\n const candidates: Array<{\n result: TocAreaResult;\n score: number;\n }> = [];\n\n // Check groups for TOC-like structure\n for (const group of doc.groups) {\n const pageNo = this.getGroupFirstPage(group);\n if (pageNo === undefined || pageNo > this.maxSearchPages) {\n continue;\n }\n\n if (this.isGroupTocLike(group, doc)) {\n const score = this.calculateScore(group, pageNo);\n candidates.push({\n result: {\n itemRefs: [group.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n },\n score,\n });\n }\n }\n\n // Check tables for TOC-like structure\n for (const table of doc.tables) {\n const pageNo = table.prov[0]?.page_no;\n if (pageNo === undefined || pageNo > this.maxSearchPages) {\n continue;\n }\n\n if (this.isTableTocLike(table)) {\n const score = this.calculateTableScore(table, pageNo);\n candidates.push({\n result: {\n itemRefs: [table.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n },\n score,\n });\n }\n }\n\n if (candidates.length === 0) {\n return null;\n }\n\n // Sort by score (higher is better) and return best match\n candidates.sort((a, b) => b.score - a.score);\n const best = candidates[0];\n\n return this.expandToConsecutivePages(best.result, doc);\n }\n\n /**\n * Find the TOC container (group or table) from a parent reference\n */\n private findTocContainer(\n doc: DoclingDocument,\n parentRef: string,\n pageNo: number,\n ): TocAreaResult | null {\n // Check if parent is a group\n const group = this.refResolver.resolveGroup(parentRef);\n if (group) {\n return {\n itemRefs: [group.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n };\n }\n\n // Check if parent is a table\n const table = this.refResolver.resolveTable(parentRef);\n if (table) {\n return {\n itemRefs: [table.self_ref],\n startPage: pageNo,\n endPage: pageNo,\n };\n }\n\n // Try parent's parent (navigate up hierarchy)\n const item = this.refResolver.resolve(parentRef);\n if (item && item.parent?.$ref) {\n return this.findTocContainer(doc, item.parent.$ref, pageNo);\n }\n\n return null;\n }\n\n /**\n * Check if a group contains TOC-like structure\n */\n private isGroupTocLike(\n group: DoclingGroupItem,\n _doc: DoclingDocument,\n ): boolean {\n if (group.name !== 'list' && group.name !== 'group') {\n return false;\n }\n\n // Count children with page number patterns\n let pageNumberCount = 0;\n const children = this.refResolver.resolveMany(group.children);\n\n for (const child of children) {\n if (!child) continue;\n\n // Check if it's a text item with page number pattern\n if ('text' in child && 'orig' in child) {\n const textItem = child as DoclingTextItem;\n if (PAGE_NUMBER_PATTERN.test(textItem.text)) {\n pageNumberCount++;\n }\n }\n }\n\n // Consider TOC-like if at least 3 items have page numbers\n // or if more than 50% of items have page numbers\n const total = children.filter((c) => c !== null).length;\n return pageNumberCount >= 3 || (total > 0 && pageNumberCount / total > 0.5);\n }\n\n /**\n * Check if a table contains TOC-like structure\n */\n private isTableTocLike(table: DoclingTableItem): boolean {\n // Check for document_index label (Docling specific)\n if (table.label === 'document_index') {\n return true;\n }\n\n const { grid, num_rows, num_cols } = table.data;\n\n // Need at least 3 rows and 2 columns typically\n if (num_rows < 3 || num_cols < 2) {\n return false;\n }\n\n // Check if last column contains mostly numbers (page numbers)\n let numberCount = 0;\n for (let row = 1; row < grid.length; row++) {\n const lastCell = grid[row]?.[num_cols - 1];\n if (lastCell && /^\\d+$/.test(lastCell.text.trim())) {\n numberCount++;\n }\n }\n\n // More than 50% of data rows have numeric last column\n return numberCount > 0 && numberCount / (num_rows - 1) > 0.5;\n }\n\n /**\n * Expand TOC area to consecutive pages (both backward and forward)\n */\n private expandToConsecutivePages(\n initial: TocAreaResult,\n doc: DoclingDocument,\n ): TocAreaResult {\n const itemRefs = [...initial.itemRefs];\n const seenRefs = new Set<string>(itemRefs);\n let startPage = initial.startPage;\n let endPage = initial.endPage;\n\n // Backward expansion (preceding pages)\n for (let pageNo = initial.startPage - 1; pageNo >= 1; pageNo--) {\n const continuationItems = this.findContinuationOnPage(doc, pageNo);\n if (continuationItems.length === 0) {\n break;\n }\n\n const newItems = continuationItems.filter((ref) => !seenRefs.has(ref));\n for (const ref of newItems) {\n seenRefs.add(ref);\n }\n itemRefs.unshift(...newItems);\n startPage = pageNo;\n this.logger.info(`[TocFinder] Expanded TOC backward to page ${pageNo}`);\n }\n\n // Forward expansion (subsequent pages)\n for (\n let pageNo = initial.endPage + 1;\n pageNo <= this.maxSearchPages;\n pageNo++\n ) {\n const continuationItems = this.findContinuationOnPage(doc, pageNo);\n if (continuationItems.length === 0) {\n break;\n }\n\n const newItems = continuationItems.filter((ref) => !seenRefs.has(ref));\n for (const ref of newItems) {\n seenRefs.add(ref);\n }\n itemRefs.push(...newItems);\n endPage = pageNo;\n this.logger.info(`[TocFinder] Expanded TOC forward to page ${pageNo}`);\n }\n\n return {\n itemRefs,\n startPage,\n endPage,\n };\n }\n\n /**\n * Find TOC continuation items on a specific page\n */\n private findContinuationOnPage(\n doc: DoclingDocument,\n pageNo: number,\n ): string[] {\n const refs: string[] = [];\n\n // Check for continuation markers in texts\n for (const text of doc.texts) {\n if (text.prov[0]?.page_no !== pageNo) {\n continue;\n }\n\n if (this.hasContinuationMarker(text.text)) {\n const parentRef = text.parent?.$ref;\n if (parentRef) {\n const group = this.refResolver.resolveGroup(parentRef);\n if (group) {\n refs.push(group.self_ref);\n }\n }\n }\n }\n\n // Check for TOC-like groups on this page\n for (const group of doc.groups) {\n const groupPage = this.getGroupFirstPage(group);\n if (groupPage !== pageNo) {\n continue;\n }\n\n if (this.isGroupTocLike(group, doc) && !refs.includes(group.self_ref)) {\n refs.push(group.self_ref);\n }\n }\n\n // Check for TOC-like tables on this page\n for (const table of doc.tables) {\n if (table.prov[0]?.page_no !== pageNo) {\n continue;\n }\n\n if (this.isTableTocLike(table) && !refs.includes(table.self_ref)) {\n refs.push(table.self_ref);\n }\n }\n\n return refs;\n }\n\n /**\n * Check if text contains TOC keyword\n */\n private containsTocKeyword(text: string): boolean {\n const normalizedText = text.trim().toLowerCase();\n return this.keywords.some((keyword) =>\n normalizedText.includes(keyword.toLowerCase()),\n );\n }\n\n /**\n * Check for continuation markers\n */\n private hasContinuationMarker(text: string): boolean {\n const normalizedText = text.trim().toLowerCase();\n return CONTINUATION_MARKERS.some((marker) =>\n normalizedText.includes(marker.toLowerCase()),\n );\n }\n\n /**\n * Get first page number of a group by checking its children\n */\n private getGroupFirstPage(group: DoclingGroupItem): number | undefined {\n for (const childRef of group.children) {\n const child = this.refResolver.resolve(childRef.$ref);\n if (child && 'prov' in child) {\n const prov = (child as DoclingTextItem).prov;\n if (prov && prov[0]?.page_no !== undefined) {\n return prov[0].page_no;\n }\n }\n }\n return undefined;\n }\n\n /**\n * Calculate score for a group candidate\n * Higher score = better match\n */\n private calculateScore(group: DoclingGroupItem, pageNo: number): number {\n let score = 0;\n\n // Earlier pages get higher score\n score += (this.maxSearchPages - pageNo + 1) * 10;\n\n // More children (TOC entries) = higher score\n score += group.children.length * 2;\n\n // Count items with page numbers\n const children = this.refResolver.resolveMany(group.children);\n for (const child of children) {\n if (child && 'text' in child) {\n const textItem = child as DoclingTextItem;\n if (PAGE_NUMBER_PATTERN.test(textItem.text)) {\n score += 5;\n }\n }\n }\n\n return score;\n }\n\n /**\n * Calculate score for a table candidate\n */\n private calculateTableScore(table: DoclingTableItem, pageNo: number): number {\n let score = 0;\n\n // Earlier pages get higher score\n score += (this.maxSearchPages - pageNo + 1) * 10;\n\n // More rows = higher score\n score += table.data.num_rows * 2;\n\n // document_index label is a strong indicator\n if (table.label === 'document_index') {\n score += 50;\n }\n\n return score;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { ExtendedTokenUsage } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport type { TocEntry } from '../types';\nimport type { TocValidationIssue } from './toc-extract-error';\nimport type { TocValidationOptions } from './toc-validator';\n\nimport { z } from 'zod';\n\nimport {\n type BaseLLMComponentOptions,\n TextLLMComponent,\n} from '../core/text-llm-component';\nimport { TocParseError, TocValidationError } from './toc-extract-error';\nimport { TocValidator } from './toc-validator';\n\n// TODO: Make configurable via TocExtractorOptions when exposing to DocumentProcessorOptions\nconst MAX_VALIDATION_RETRIES = 3;\n\n/**\n * Validation error code descriptions for correction prompts\n */\nconst VALIDATION_CODE_DESCRIPTIONS: Record<string, string> = {\n V001: 'Page numbers must be in non-decreasing order within the same level. A decrease usually means a hierarchy or page number error.',\n V002: 'Page number is out of valid range (must be >= 1 and <= total pages).',\n V003: 'Title is empty or contains only whitespace.',\n V004: 'Title exceeds the maximum allowed length.',\n V005: 'Child page number is before parent page number. Children must start on or after the parent page.',\n V006: 'Duplicate entry detected (same title and page number).',\n V007: 'First TOC entry starts too late in the document. Earlier entries may be missing.',\n};\n\n/**\n * Zod schema for recursive TocEntry structure\n */\nexport const TocEntrySchema: z.ZodType<TocEntry> = z.lazy(() =>\n z.object({\n title: z.string().describe('Chapter or section title'),\n level: z.number().int().min(1).describe('Hierarchy depth (1 = top level)'),\n pageNo: z.number().int().min(1).describe('Starting page number'),\n children: z\n .array(TocEntrySchema)\n .describe('Child sections (use empty array [] if none)'),\n }),\n);\n\n/**\n * Schema for LLM response\n */\nexport const TocResponseSchema = z.object({\n entries: z.array(TocEntrySchema).describe('Extracted TOC entries'),\n});\n\nexport type TocResponse = z.infer<typeof TocResponseSchema>;\n\n/**\n * TocExtractor options\n */\nexport interface TocExtractorOptions extends BaseLLMComponentOptions {\n /**\n * Validation options (optional)\n * If not provided, validation is performed with default settings\n */\n validation?: TocValidationOptions;\n\n /**\n * Whether to skip validation entirely (default: false)\n */\n skipValidation?: boolean;\n}\n\n/**\n * TocExtractor\n *\n * Uses high-performance LLM to extract structured TOC from Markdown representation.\n * Extends TextLLMComponent for standardized LLM call handling.\n *\n * When validation fails, automatically retries with correction feedback\n * up to MAX_VALIDATION_RETRIES times before throwing.\n */\nexport class TocExtractor extends TextLLMComponent {\n private readonly validationOptions?: TocValidationOptions;\n private readonly skipValidation: boolean;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: TocExtractorOptions,\n fallbackModel?: LanguageModel,\n abortSignal?: AbortSignal,\n ) {\n super(\n logger,\n model,\n 'TocExtractor',\n { ...options, abortSignal },\n fallbackModel,\n );\n this.validationOptions = options?.validation;\n this.skipValidation = options?.skipValidation ?? false;\n }\n\n /**\n * Extract TOC structure from Markdown\n *\n * When validation fails, retries with correction feedback up to MAX_VALIDATION_RETRIES times.\n *\n * @param markdown - Markdown representation of TOC area\n * @param validationOverrides - Optional overrides for validation options (merged with constructor options)\n * @returns Object with entries array and token usage array (initial extraction + any corrections)\n * @throws {TocParseError} When LLM fails to parse structure\n * @throws {TocValidationError} When validation fails after all retries\n */\n async extract(\n markdown: string,\n validationOverrides?: Partial<TocValidationOptions>,\n ): Promise<{ entries: TocEntry[]; usages: ExtendedTokenUsage[] }> {\n this.log('info', `Starting TOC extraction (${markdown.length} chars)`);\n\n if (!markdown.trim()) {\n this.log('error', 'Cannot extract TOC from empty markdown content');\n throw new TocParseError(\n 'TOC extraction failed: provided markdown content is empty',\n );\n }\n\n try {\n // Initial extraction\n const result = await this.callTextLLM(\n TocResponseSchema,\n this.buildSystemPrompt(),\n this.buildUserPrompt(markdown),\n 'extraction',\n );\n\n const usages: ExtendedTokenUsage[] = [result.usage];\n let entries = this.normalizeEntries(result.output.entries);\n\n // Validate and retry if needed\n if (!this.skipValidation) {\n let validationError = this.tryValidateEntries(\n entries,\n validationOverrides,\n );\n\n // Retry loop with correction feedback\n for (\n let attempt = 1;\n attempt <= MAX_VALIDATION_RETRIES && validationError !== null;\n attempt++\n ) {\n this.log(\n 'warn',\n `Validation failed (attempt ${attempt}/${MAX_VALIDATION_RETRIES}), retrying with correction feedback`,\n );\n\n const correctionPrompt = this.buildCorrectionPrompt(\n markdown,\n entries,\n validationError.validationResult.issues,\n );\n\n const correctionResult = await this.callTextLLM(\n TocResponseSchema,\n this.buildSystemPrompt(),\n correctionPrompt,\n `correction-${attempt}`,\n );\n\n usages.push(correctionResult.usage);\n entries = this.normalizeEntries(correctionResult.output.entries);\n validationError = this.tryValidateEntries(\n entries,\n validationOverrides,\n );\n }\n\n // If still failing after all retries, throw the last error\n if (validationError !== null) {\n this.log(\n 'error',\n `Validation failed after ${MAX_VALIDATION_RETRIES} retries:\\n${validationError.getSummary()}`,\n );\n throw validationError;\n }\n }\n\n this.log(\n 'info',\n `Extraction completed: ${entries.length} top-level entries (${usages.length} LLM call(s))`,\n );\n\n return { entries, usages };\n } catch (error) {\n // Re-throw TocValidationError as-is\n if (error instanceof TocValidationError) {\n throw error;\n }\n\n const message = error instanceof Error ? error.message : String(error);\n this.log('error', `Extraction failed: ${message}`);\n throw new TocParseError(`Failed to extract TOC structure: ${message}`, {\n cause: error,\n });\n }\n }\n\n /**\n * Validate extracted entries and return error or null\n *\n * Unlike validateOrThrow, this returns the error instead of throwing,\n * allowing the retry loop to handle it.\n *\n * @returns TocValidationError if validation fails, null if valid\n */\n private tryValidateEntries(\n entries: TocEntry[],\n overrides?: Partial<TocValidationOptions>,\n ): TocValidationError | null {\n if (entries.length === 0) {\n return null;\n }\n\n const options = { ...this.validationOptions, ...overrides };\n const validator = new TocValidator(options);\n const result = validator.validate(entries);\n\n if (!result.valid) {\n const details = result.issues\n .map(\n (issue) =>\n ` [${issue.code}] ${issue.message} (path: ${issue.path}, entry: \"${issue.entry.title}\" page ${issue.entry.pageNo})`,\n )\n .join('\\n');\n return new TocValidationError(\n `TOC validation failed with ${result.errorCount} error(s):\\n${details}`,\n result,\n );\n }\n\n return null;\n }\n\n /**\n * Build correction prompt with validation error feedback\n *\n * Includes the original markdown, previous extraction result,\n * validation errors, and guidance for fixing common mistakes.\n */\n protected buildCorrectionPrompt(\n markdown: string,\n previousEntries: TocEntry[],\n issues: TocValidationIssue[],\n ): string {\n const errorLines = issues.map((issue) => {\n const desc =\n VALIDATION_CODE_DESCRIPTIONS[issue.code] ?? 'Unknown validation error.';\n return `- [${issue.code}] ${issue.message}\\n Path: ${issue.path}\\n Entry: \"${issue.entry.title}\" (page ${issue.entry.pageNo})\\n Rule: ${desc}`;\n });\n\n return `Your previous TOC extraction had validation errors. Please fix them and re-extract.\n\n## Validation Errors\n\n${errorLines.join('\\n\\n')}\n\n## Common Mistakes to Avoid\n\n1. **Hierarchy confusion**: Entries with the same numbering prefix (e.g., \"4)\") can belong to different hierarchy levels depending on context. Use indentation and surrounding entries to determine the correct parent-child relationship.\n2. **Page number misread**: Carefully distinguish Roman numerals (VI=6) from Arabic numerals. \"VI. 고찰\" at page 277 is NOT \"V. 고찰\" at page 27.\n3. **Page order**: Within the same parent, sibling entries must have non-decreasing page numbers. If a page number decreases, the entry likely belongs to a different hierarchy level.\n\n## Original Markdown\n\n${markdown}\n\n## Your Previous Extraction (with errors)\n\n${JSON.stringify(previousEntries, null, 2)}\n\n## Instructions\n\nRe-extract the TOC structure from the original markdown above. Fix all validation errors listed above. Return the corrected entries.`;\n }\n\n /**\n * Build system prompt for TOC extraction\n */\n protected buildSystemPrompt(): string {\n return `You are a document structure extraction assistant. Your task is to parse a table of contents (TOC) from markdown format and extract structured entries.\n\n## Instructions\n\n1. **Title**: Extract the exact chapter/section title from each line. Remove page number indicators like \"..... 10\" or \"... 5\" at the end.\n\n2. **Level**: Determine the hierarchy depth:\n - Level 1: Top-level chapters (e.g., \"제1장\", \"Chapter 1\", \"I.\", \"Part 1\")\n - Level 2: Main sections within chapters (e.g., \"1.\", \"1.1\", \"A.\")\n - Level 3: Subsections (e.g., \"1.1.1\", \"a.\", \"(1)\")\n - Use indentation and numbering patterns to infer level\n\n3. **Page Number**: Extract the page number from each entry. Use only Arabic numerals for page numbers.\n\n4. **Children**: Nest child entries under parent entries based on their hierarchy level.\n\n5. **IMPORTANT - Extract Main TOC Only**: Only extract the main document table of contents. EXCLUDE the following:\n - **Front matter with Roman numeral pages**: Entries whose page numbers are Roman numerals (i, ii, xxi, etc.) such as 일러두기, 발간사, 서문, 범례, Preface, Foreword, Editorial Notes. These use a separate page numbering system and are not part of the main content.\n - Photo/image indices (사진 목차, 사진목차, 화보 목차, Photo Index, List of Photos, List of Figures)\n - Drawing/diagram indices (도면 목차, 도면목차, 삽도 목차, Drawing Index, List of Drawings)\n - Table indices (표 목차, 표목차, Table Index, List of Tables)\n - Appendix indices (부록 목차, Appendix Index)\n - Any other supplementary material indices\n\n## Output Format\n\nReturn a flat array of top-level entries. Each entry at level 1 should contain its children (level 2+) nested properly.\n\n## Example\n\nInput:\n- 제1장 서론 ..... 1\n - 1. 연구 배경 ..... 3\n - 2. 연구 목적 ..... 5\n- 제2장 방법론 ..... 10\n\nOutput:\n{\n \"entries\": [\n {\n \"title\": \"제1장 서론\",\n \"level\": 1,\n \"pageNo\": 1,\n \"children\": [\n { \"title\": \"1. 연구 배경\", \"level\": 2, \"pageNo\": 3, \"children\": [] },\n { \"title\": \"2. 연구 목적\", \"level\": 2, \"pageNo\": 5, \"children\": [] }\n ]\n },\n { \"title\": \"제2장 방법론\", \"level\": 1, \"pageNo\": 10, \"children\": [] }\n ]\n}`;\n }\n\n /**\n * Build user prompt with Markdown content\n */\n protected buildUserPrompt(markdown: string): string {\n return `Extract the table of contents structure from the following markdown:\n\n${markdown}`;\n }\n\n /**\n * Normalize and validate extracted entries\n */\n private normalizeEntries(entries: TocEntry[]): TocEntry[] {\n if (entries.length === 0) {\n return [];\n }\n\n // Normalize level consistency starting from level 1\n return this.normalizeLevel(entries, 1);\n }\n\n /**\n * Recursively ensure level consistency\n *\n * Children must have level = parent.level + 1\n */\n private normalizeLevel(\n entries: TocEntry[],\n expectedLevel: number,\n ): TocEntry[] {\n return entries.map((entry) => {\n const normalizedEntry: TocEntry = {\n title: entry.title.trim(),\n level: expectedLevel,\n pageNo: entry.pageNo,\n };\n\n if (entry.children && entry.children.length > 0) {\n normalizedEntry.children = this.normalizeLevel(\n entry.children,\n expectedLevel + 1,\n );\n }\n\n return normalizedEntry;\n });\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\n/**\n * Base options for all LLM-based components\n */\nexport interface BaseLLMComponentOptions {\n /**\n * Maximum retry count for LLM API (default: 3)\n */\n maxRetries?: number;\n\n /**\n * Temperature for LLM generation (default: 0)\n */\n temperature?: number;\n\n /**\n * Abort signal for cancellation support\n */\n abortSignal?: AbortSignal;\n}\n\n/**\n * Abstract base class for all LLM-based components\n *\n * Provides common functionality:\n * - Consistent logging with component name prefix\n * - Token usage tracking via optional aggregator\n * - Standard configuration (model, fallback, retries, temperature)\n *\n * Subclasses must implement buildSystemPrompt() and buildUserPrompt().\n */\nexport abstract class BaseLLMComponent {\n protected readonly logger: LoggerMethods;\n protected readonly model: LanguageModel;\n protected readonly fallbackModel?: LanguageModel;\n protected readonly maxRetries: number;\n protected readonly temperature: number;\n protected readonly componentName: string;\n protected readonly aggregator?: LLMTokenUsageAggregator;\n protected readonly abortSignal?: AbortSignal;\n\n /**\n * Constructor for BaseLLMComponent\n *\n * @param logger - Logger instance for logging\n * @param model - Primary language model for LLM calls\n * @param componentName - Name of the component for logging (e.g., \"TocExtractor\")\n * @param options - Optional configuration (maxRetries, temperature)\n * @param fallbackModel - Optional fallback model for retry on failure\n * @param aggregator - Optional token usage aggregator for tracking LLM calls\n */\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n componentName: string,\n options?: BaseLLMComponentOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n this.logger = logger;\n this.model = model;\n this.componentName = componentName;\n this.maxRetries = options?.maxRetries ?? 3;\n this.temperature = options?.temperature ?? 0;\n this.fallbackModel = fallbackModel;\n this.aggregator = aggregator;\n this.abortSignal = options?.abortSignal;\n }\n\n /**\n * Log a message with consistent component name prefix\n *\n * @param level - Log level ('info', 'warn', 'error')\n * @param message - Message to log (without prefix)\n * @param args - Additional arguments to pass to logger\n */\n protected log(\n level: 'info' | 'warn' | 'error',\n message: string,\n ...args: unknown[]\n ): void {\n const formattedMessage = `[${this.componentName}] ${message}`;\n this.logger[level](formattedMessage, ...args);\n }\n\n /**\n * Track token usage to aggregator if available\n *\n * @param usage - Token usage information to track\n */\n protected trackUsage(usage: ExtendedTokenUsage): void {\n if (this.aggregator) {\n this.aggregator.track(usage);\n }\n }\n\n /**\n * Create an empty usage record for edge cases (e.g., empty input)\n *\n * @param phase - Phase name for the usage record\n * @returns Empty ExtendedTokenUsage object\n */\n protected createEmptyUsage(phase: string): ExtendedTokenUsage {\n return {\n component: this.componentName,\n phase,\n model: 'primary',\n modelName: 'none',\n inputTokens: 0,\n outputTokens: 0,\n totalTokens: 0,\n };\n }\n\n /**\n * Build system prompt for LLM call\n *\n * Subclasses must implement this to provide component-specific system prompts.\n */\n protected abstract buildSystemPrompt(...args: unknown[]): string;\n\n /**\n * Build user prompt for LLM call\n *\n * Subclasses must implement this to construct prompts from input data.\n */\n protected abstract buildUserPrompt(...args: unknown[]): string;\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\nimport type { z } from 'zod';\n\nimport { LLMCaller } from '@heripo/shared';\n\nimport {\n BaseLLMComponent,\n type BaseLLMComponentOptions,\n} from './base-llm-component';\n\nexport type { BaseLLMComponentOptions } from './base-llm-component';\n\n/**\n * Abstract base class for text-based LLM components\n *\n * Extends BaseLLMComponent with helper method for text-based LLM calls\n * using LLMCaller.call() (non-vision).\n *\n * Subclasses: TocExtractor, CaptionParser, BaseValidator\n */\nexport abstract class TextLLMComponent extends BaseLLMComponent {\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n componentName: string,\n options?: BaseLLMComponentOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(logger, model, componentName, options, fallbackModel, aggregator);\n }\n\n /**\n * Call LLM with text-based prompts using LLMCaller.call()\n *\n * @template TSchema - Zod schema type for response validation\n * @param schema - Zod schema for response validation\n * @param systemPrompt - System prompt for LLM\n * @param userPrompt - User prompt for LLM\n * @param phase - Phase name for tracking (e.g., 'extraction', 'validation')\n * @returns Promise with parsed object and usage information\n */\n protected async callTextLLM<TSchema extends z.ZodType>(\n schema: TSchema,\n systemPrompt: string,\n userPrompt: string,\n phase: string,\n ): Promise<{ output: z.infer<TSchema>; usage: ExtendedTokenUsage }> {\n const result = await LLMCaller.call({\n schema,\n systemPrompt,\n userPrompt,\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase,\n });\n\n this.trackUsage(result.usage);\n\n return {\n output: result.output as z.infer<TSchema>,\n usage: result.usage,\n };\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport {\n LLMCaller,\n LLMTokenUsageAggregator as LLMTokenUsageAggregatorClass,\n} from '@heripo/shared';\nimport * as fs from 'node:fs';\nimport * as path from 'node:path';\nimport { z } from 'zod';\n\nimport {\n VisionLLMComponent,\n type VisionLLMComponentOptions,\n} from '../core/vision-llm-component';\n\n/**\n * Schema for vision-based TOC extraction response\n */\nexport const VisionTocExtractionSchema = z.object({\n hasToc: z.boolean().describe('Whether a TOC is visible on these pages'),\n tocMarkdown: z\n .string()\n .nullable()\n .describe('Extracted TOC in markdown format, null if not found'),\n continuesOnNextPage: z\n .boolean()\n .describe('Whether TOC continues beyond these pages'),\n});\n\nexport type VisionTocExtractionResult = z.infer<\n typeof VisionTocExtractionSchema\n>;\n\n/**\n * Options for VisionTocExtractor\n */\nexport interface VisionTocExtractorOptions extends VisionLLMComponentOptions {\n /**\n * Number of pages for first batch (default: 10)\n */\n firstBatchSize?: number;\n\n /**\n * Number of pages for second batch (default: 10)\n */\n secondBatchSize?: number;\n}\n\n/**\n * VisionTocExtractor\n *\n * Uses vision LLM to find and extract TOC directly from page images.\n * Fallback strategy when rule-based extraction fails or produces invalid content.\n * Extends VisionLLMComponent for standardized vision LLM call handling.\n *\n * Output format matches MarkdownConverter.convert() for consistency.\n */\nexport class VisionTocExtractor extends VisionLLMComponent {\n private readonly firstBatchSize: number;\n private readonly secondBatchSize: number;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n outputPath: string,\n options?: VisionTocExtractorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n 'VisionTocExtractor',\n outputPath,\n options,\n fallbackModel,\n aggregator ?? new LLMTokenUsageAggregatorClass(),\n );\n this.firstBatchSize = options?.firstBatchSize ?? 10;\n this.secondBatchSize = options?.secondBatchSize ?? 10;\n }\n\n /**\n * Extract TOC from page images\n *\n * Searches pages 1-10 first, then 11-20 if not found.\n *\n * @param totalPages - Total number of pages in the document\n * @returns Extracted TOC markdown or null if not found\n */\n async extract(totalPages: number): Promise<string | null> {\n this.log('info', `Starting TOC extraction from ${totalPages} pages`);\n\n if (totalPages === 0) {\n this.log('info', 'No pages to search');\n return null;\n }\n\n // First batch: pages 1-10 (or fewer if document is smaller)\n const firstBatchEnd = Math.min(this.firstBatchSize, totalPages);\n this.log('info', `Searching first batch: pages 1-${firstBatchEnd}`);\n\n const firstResult = await this.extractFromBatch(1, firstBatchEnd);\n\n if (firstResult.hasToc && firstResult.tocMarkdown) {\n // Check if TOC continues\n if (firstResult.continuesOnNextPage && firstBatchEnd < totalPages) {\n this.log('info', 'TOC continues on next pages, extracting more');\n const continuationEnd = Math.min(\n firstBatchEnd + this.secondBatchSize,\n totalPages,\n );\n const continuationResult = await this.extractFromBatch(\n firstBatchEnd + 1,\n continuationEnd,\n );\n\n if (continuationResult.hasToc && continuationResult.tocMarkdown) {\n const merged = this.mergeMarkdown(\n firstResult.tocMarkdown,\n continuationResult.tocMarkdown,\n );\n this.aggregator!.logSummary(this.logger);\n this.log(\n 'info',\n `TOC extracted with continuation (${merged.length} chars)`,\n );\n return merged;\n }\n }\n\n this.aggregator!.logSummary(this.logger);\n this.log(\n 'info',\n `TOC found in first batch (${firstResult.tocMarkdown.length} chars)`,\n );\n return firstResult.tocMarkdown;\n }\n\n // Second batch: pages 11-20 (only if first batch didn't find TOC)\n if (firstBatchEnd < totalPages) {\n const secondBatchStart = firstBatchEnd + 1;\n const secondBatchEnd = Math.min(\n firstBatchEnd + this.secondBatchSize,\n totalPages,\n );\n\n this.log(\n 'info',\n `Searching second batch: pages ${secondBatchStart}-${secondBatchEnd}`,\n );\n\n const secondResult = await this.extractFromBatch(\n secondBatchStart,\n secondBatchEnd,\n );\n\n if (secondResult.hasToc && secondResult.tocMarkdown) {\n this.aggregator!.logSummary(this.logger);\n this.log(\n 'info',\n `TOC found in second batch (${secondResult.tocMarkdown.length} chars)`,\n );\n return secondResult.tocMarkdown;\n }\n }\n\n this.aggregator!.logSummary(this.logger);\n this.log('info', 'TOC not found in any batch');\n return null;\n }\n\n /**\n * Extract TOC from a specific batch of pages\n */\n private async extractFromBatch(\n startPage: number,\n endPage: number,\n ): Promise<VisionTocExtractionResult> {\n this.log('info', `Extracting from pages ${startPage}-${endPage}`);\n\n this.log(\n 'info',\n `Preparing images for vision analysis. This can be very slow (10+ minutes, sometimes 30+ minutes) depending on batch size and image resolution.`,\n );\n const imageContents = this.loadPageImages(startPage, endPage);\n\n this.log(\n 'info',\n `Calling vision LLM for TOC extraction (pages ${startPage}-${endPage})`,\n );\n const result = await LLMCaller.callVision({\n schema: VisionTocExtractionSchema,\n messages: [\n {\n role: 'user',\n content: [\n {\n type: 'text',\n text: this.buildUserPrompt(startPage, endPage),\n },\n ...imageContents,\n ],\n },\n ],\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: 'VisionTocExtractor',\n phase: 'extraction',\n });\n this.log(\n 'info',\n `Vision LLM call completed (pages ${startPage}-${endPage})`,\n );\n\n this.trackUsage(result.usage);\n\n return result.output;\n }\n\n /**\n * Load page images and build message content\n */\n private loadPageImages(\n startPage: number,\n endPage: number,\n ): Array<{ type: 'image'; image: string }> {\n const imageContents: Array<{ type: 'image'; image: string }> = [];\n\n for (let pageNo = startPage; pageNo <= endPage; pageNo++) {\n // Page files are 0-indexed: page_0.png, page_1.png, etc.\n const imagePath = path.resolve(\n this.outputPath,\n `pages/page_${pageNo - 1}.png`,\n );\n const imageBuffer = fs.readFileSync(imagePath);\n const base64Image = imageBuffer.toString('base64');\n\n imageContents.push({\n type: 'image',\n image: `data:image/png;base64,${base64Image}`,\n });\n }\n\n return imageContents;\n }\n\n /**\n * Merge markdown from multiple batches\n */\n private mergeMarkdown(first: string, continuation: string): string {\n return `${first.trim()}\\n${continuation.trim()}`;\n }\n\n /**\n * Build system prompt for vision LLM (not used, but required by abstract class)\n */\n protected buildSystemPrompt(): string {\n return '';\n }\n\n /**\n * Build user prompt with page range information\n */\n protected buildUserPrompt(startPage: number, endPage: number): string {\n const pageCount = endPage - startPage + 1;\n return `You are a document analysis specialist. Your task is to find and extract the Table of Contents (TOC) from document page images.\n\nI am providing ${pageCount} document page images (pages ${startPage}-${endPage}).\n\n## Where to Look for TOC:\n- TOC typically appears in the first 10-20 pages of a document\n- Look for pages with headings like \"목차\", \"차례\", \"Contents\", \"Table of Contents\"\n- Look for structured lists with chapter titles and page numbers\n\n## What to Extract:\nExtract the TOC content as markdown format that matches this exact structure:\n- Use \"- \" prefix for each list item\n- Use 2-space indentation for hierarchy levels\n- Include \"..... \" followed by page number at the end of each entry\n- Preserve original chapter/section numbering from the document\n\n## Output Format Example:\n\\`\\`\\`\n- 제1장 서론 ..... 1\n - 1. 연구 배경 ..... 3\n - 2. 연구 목적 ..... 5\n- 제2장 연구 방법 ..... 10\n - 1. 조사 지역 ..... 10\n - 2. 조사 방법 ..... 15\n- 제3장 연구 결과 ..... 25\n\\`\\`\\`\n\n## Important Rules:\n1. Extract ONLY the main document TOC\n2. DO NOT include supplementary indices:\n - Photo indices (사진 목차, 사진목차)\n - Table indices (표 목차, 표목차)\n - Figure indices (도면 목차, 도면목차)\n3. If no TOC is found, set hasToc to false and tocMarkdown to null\n4. Set continuesOnNextPage to true if the TOC appears to continue beyond the visible pages\n\nPlease examine these pages and:\n1. Determine if any page contains a Table of Contents (TOC)\n2. If found, extract the complete TOC in markdown format\n3. Indicate if the TOC continues beyond these pages\n\nRemember: Extract the main document TOC only. Ignore photo/table/figure indices.`;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\nimport type { z } from 'zod';\n\nimport { LLMCaller } from '@heripo/shared';\nimport * as fs from 'node:fs';\nimport * as path from 'node:path';\n\nimport {\n BaseLLMComponent,\n type BaseLLMComponentOptions,\n} from './base-llm-component';\n\n/**\n * Options for VisionLLMComponent\n */\nexport interface VisionLLMComponentOptions extends BaseLLMComponentOptions {\n // Vision components may have additional options in future\n}\n\n/**\n * Image content structure for vision LLM messages\n */\nexport interface ImageContent {\n type: 'image';\n image: string;\n}\n\n/**\n * Abstract base class for vision-based LLM components\n *\n * Extends BaseLLMComponent with helper methods for vision-based LLM calls\n * using LLMCaller.callVision().\n *\n * Subclasses: PageRangeParser, VisionTocExtractor\n */\nexport abstract class VisionLLMComponent extends BaseLLMComponent {\n protected readonly outputPath: string;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n componentName: string,\n outputPath: string,\n options?: VisionLLMComponentOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(logger, model, componentName, options, fallbackModel, aggregator);\n this.outputPath = outputPath;\n }\n\n /**\n * Call LLM with vision capabilities using LLMCaller.callVision()\n *\n * @template TSchema - Zod schema type for response validation\n * @param schema - Zod schema for response validation\n * @param messages - Messages array including image content\n * @param phase - Phase name for tracking (e.g., 'extraction', 'sampling')\n * @returns Promise with parsed object and usage information\n */\n protected async callVisionLLM<TSchema extends z.ZodType>(\n schema: TSchema,\n messages: Array<{\n role: 'user' | 'assistant';\n content: unknown[] | string;\n }>,\n phase: string,\n ): Promise<{ output: z.infer<TSchema>; usage: ExtendedTokenUsage }> {\n const result = await LLMCaller.callVision({\n schema,\n messages,\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase,\n });\n\n this.trackUsage(result.usage);\n\n return {\n output: result.output as z.infer<TSchema>,\n usage: result.usage,\n };\n }\n\n /**\n * Load an image file and encode it as base64\n *\n * @param imagePath - Absolute path to the image file\n * @returns Base64 encoded image string\n */\n protected loadImageAsBase64(imagePath: string): string {\n const imageBuffer = fs.readFileSync(imagePath);\n return imageBuffer.toString('base64');\n }\n\n /**\n * Build image content object for vision LLM messages\n *\n * @param imagePath - Path to the image file (relative to outputPath or absolute)\n * @param mimeType - MIME type of the image (default: 'image/png')\n * @returns ImageContent object for LLM message\n */\n protected buildImageContent(\n imagePath: string,\n mimeType: string = 'image/png',\n ): ImageContent {\n const absolutePath = path.isAbsolute(imagePath)\n ? imagePath\n : path.resolve(this.outputPath, imagePath);\n const base64Image = this.loadImageAsBase64(absolutePath);\n return {\n type: 'image',\n image: `data:${mimeType};base64,${base64Image}`,\n };\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { Caption } from '@heripo/model';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport {\n BatchProcessor,\n LLMCaller,\n LLMTokenUsageAggregator as LLMTokenUsageAggregatorClass,\n} from '@heripo/shared';\nimport { z } from 'zod';\n\nimport {\n type BaseLLMComponentOptions,\n TextLLMComponent,\n} from '../core/text-llm-component';\n\n/**\n * CaptionParser options\n */\nexport interface CaptionParserOptions extends BaseLLMComponentOptions {\n /**\n * Custom component name for token usage tracking.\n * Defaults to 'CaptionParser'.\n */\n componentName?: string;\n}\n\n/**\n * Schema for a single caption extraction result (used for sequential processing)\n */\nconst CaptionSingleSchema = z.object({\n num: z\n .string()\n .nullable()\n .describe('Extracted caption prefix + number (e.g., \"도판 1\", \"Figure 2\")'),\n});\n\n/**\n * Schema for a single caption extraction result with index (used for batch processing)\n */\nconst CaptionExtractionSchema = z.object({\n index: z.number().int().describe('Index of the caption in the input array'),\n num: z\n .string()\n .nullable()\n .describe('Extracted caption prefix + number (e.g., \"도판 1\", \"Figure 2\")'),\n});\n\n/**\n * Schema for batch caption response\n */\nconst CaptionBatchSchema = z.object({\n results: z.array(CaptionExtractionSchema),\n});\n\n/**\n * CaptionParser\n *\n * Extracts caption prefix and number from image/table captions using LLM.\n * Preserves original spacing from input text.\n * Extends TextLLMComponent for standardized LLM call handling.\n *\n * ## Algorithm\n *\n * 1. Collect caption texts\n * 2. Split into batches based on batchSize\n * 3. For each batch: call LLM to extract caption prefix + number\n * 4. Flatten results and return\n */\nexport class CaptionParser extends TextLLMComponent {\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: CaptionParserOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n options?.componentName ?? 'CaptionParser',\n options,\n fallbackModel,\n aggregator ?? new LLMTokenUsageAggregatorClass(),\n );\n }\n\n /**\n * Parse batch of captions\n *\n * @param captions - Array of caption full texts\n * @param batchSize - Batch size for processing. Set to 0 for sequential processing without batching.\n * @param overrideModel - Optional model to use instead of the default model\n * @returns Array of Caption objects with num extracted (maintains original order)\n */\n async parseBatch(\n captions: string[],\n batchSize: number,\n overrideModel?: LanguageModel,\n ): Promise<Caption[]> {\n const effectiveModel = overrideModel ?? this.model;\n const isOverride = overrideModel !== undefined;\n const modelName =\n (effectiveModel as { modelId?: string }).modelId ??\n (effectiveModel as { id?: string }).id ??\n 'unknown';\n this.log(\n 'info',\n `Starting caption parsing for ${captions.length} captions with ${isOverride ? 'override ' : ''}model: ${modelName}`,\n );\n\n if (captions.length === 0) {\n this.log('info', 'No captions to parse');\n return [];\n }\n\n try {\n if (batchSize === 0) {\n // Sequential processing (one-by-one) without batch processing\n this.log('info', 'Using sequential processing (batchSize=0)');\n const results: Caption[] = [];\n\n for (let i = 0; i < captions.length; i++) {\n const fullText = captions[i];\n\n // Log progress\n this.log('info', `Processing ${i + 1} / ${captions.length}...`);\n\n const result = await LLMCaller.call({\n schema: CaptionSingleSchema,\n systemPrompt: this.buildSystemPrompt('single'),\n userPrompt: this.buildUserPromptSingle(fullText),\n primaryModel: effectiveModel,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase: 'caption-extraction',\n });\n\n this.trackUsage(result.usage);\n\n const finalNum = this.extractNumFromFullText(\n fullText,\n result.output.num,\n );\n results.push({ fullText, num: finalNum });\n }\n\n // Log token usage summary\n this.aggregator!.logSummary(this.logger);\n\n this.log(\n 'info',\n `Completed: ${results.length} captions parsed, ${results.filter((r) => r.num).length} with extracted numbers`,\n );\n\n return results;\n }\n\n // Batch processing: Convert to indexed format for batch processing\n const indexedCaptions = captions.map((text, index) => ({ index, text }));\n\n // Use BatchProcessor to process captions in parallel batches\n const batchResults = await BatchProcessor.processBatch(\n indexedCaptions,\n batchSize,\n async (batch) => this.parseBatchInternal(batch, effectiveModel),\n );\n\n // Sort results by original index to maintain order\n batchResults.sort((a, b) => a.index - b.index);\n const results = batchResults.map((r) => r.caption);\n\n // Log token usage summary\n this.aggregator!.logSummary(this.logger);\n\n this.log(\n 'info',\n `Completed: ${results.length} captions parsed, ${results.filter((r) => r.num).length} with extracted numbers`,\n );\n\n return results;\n } catch (error) {\n const message = error instanceof Error ? error.message : String(error);\n this.log('error', `Parsing failed: ${message}`);\n throw new CaptionParseError(`Failed to parse captions: ${message}`, {\n cause: error,\n });\n }\n }\n\n /**\n * Internal: Parse batch of captions using LLM\n *\n * @param captions - Batch of caption texts with original indices\n * @param model - Effective model to use\n * @returns Array of Caption objects indexed correctly\n */\n private async parseBatchInternal(\n captions: Array<{ index: number; text: string }>,\n model: LanguageModel,\n ): Promise<Array<{ index: number; caption: Caption }>> {\n const result = await LLMCaller.call({\n schema: CaptionBatchSchema,\n systemPrompt: this.buildSystemPrompt(),\n userPrompt: this.buildUserPrompt(captions),\n primaryModel: model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.componentName,\n phase: 'caption-extraction',\n });\n\n // Track token usage\n this.trackUsage(result.usage);\n\n // Warn if LLM returned incomplete results (fewer results than inputs)\n if (result.output.results.length !== captions.length) {\n this.log(\n 'warn',\n `LLM returned ${result.output.results.length} results for ${captions.length} captions. ` +\n `This may cause index mismatch.`,\n );\n }\n\n // Map LLM results back to original indices\n const captionMap = new Map(captions.map((c) => [c.index, c.text]));\n\n return result.output.results.map((resultItem) => {\n // resultItem.index is the position within this batch (0, 1, 2...)\n // We need to use the original caption at that position to get the actual index\n const originalCaption = captions[resultItem.index];\n const originalIndex = originalCaption?.index ?? resultItem.index;\n const fullText = captionMap.get(originalIndex) || '';\n const finalNum = this.extractNumFromFullText(fullText, resultItem.num);\n\n return {\n index: originalIndex,\n caption: {\n fullText,\n num: finalNum,\n },\n };\n });\n }\n\n /**\n * Extract and normalize caption number from full text\n *\n * Finds the extracted num pattern in the full text and extracts it\n * with original casing. Handles case-insensitive matching.\n *\n * @param fullText - The full caption text\n * @param extractedNum - The num extracted by LLM (may have different casing)\n * @returns Normalized num or undefined if no match\n */\n private extractNumFromFullText(\n fullText: string,\n extractedNum: string | null,\n ): string | undefined {\n if (!extractedNum) return undefined;\n\n let matchIndex = fullText.indexOf(extractedNum);\n\n if (matchIndex === -1) {\n // Pattern not found directly - try case-insensitive search\n const lowerFullText = fullText.toLowerCase();\n const lowerNum = extractedNum.toLowerCase();\n matchIndex = lowerFullText.indexOf(lowerNum);\n\n if (matchIndex !== -1) {\n // Found case-insensitive match - extract from match position using original casing\n return fullText.substring(matchIndex, matchIndex + extractedNum.length);\n }\n // If still not found, keep the original extracted num\n return extractedNum;\n }\n\n // Found the pattern - extract from match position to end of the matched pattern\n return fullText.substring(matchIndex, matchIndex + extractedNum.length);\n }\n\n /**\n * Build system prompt for caption parsing\n *\n * @param mode - 'batch' for multiple captions, 'single' for single caption\n */\n protected buildSystemPrompt(mode: 'batch' | 'single' = 'batch'): string {\n const intro =\n mode === 'batch'\n ? 'Extract the caption prefix and number (e.g., \"도판 1\", \"Figure 2\") from image/table captions.\\nReturn the prefix + number part as a string, or null if no number exists.'\n : 'Extract the caption prefix and number (e.g., \"도판 1\", \"Figure 2\") from an image/table caption.\\nReturn the prefix + number part as a string, or null if no number exists.';\n\n return `You are a caption prefix extractor for archaeological excavation reports.\n\n${intro}\n\nRules:\n1. Extract if the text follows a caption pattern: <prefix word(s)> <number>\n - The prefix can be ANY Korean/English word(s) that label images/tables/figures\n - Common examples: 도판, 사진, 그림, 도면, 표, 원색사진, 흑백사진, Figure, Photo, Plate, etc.\n - The key is the PATTERN (text followed by number), not a specific word list\n - \"원색사진 1. 조사지역\" → \"원색사진 1\" (valid: prefix + number pattern)\n - \"흑백사진 2 출토유물\" → \"흑백사진 2\" (valid: prefix + number pattern)\n2. IGNORE leading punctuation/brackets when extracting:\n - \"(사진 16> 느티나무\" → \"사진 16\" (ignore leading '(' and extract the pattern inside)\n - \"<도판 1> 유적\" → \"도판 1\" (ignore angle brackets)\n - \"[그림 2] 전경\" → \"그림 2\" (ignore square brackets)\n3. Do NOT extract (return null) if:\n - It's a numbered list item starting with just a number: \"1. 유적 전경\" → null\n - It's a date/time reference: \"39 3월 28일...\" → null\n - It's a year reference: \"2024년 조사 현황\" → null\n - It starts with a number without a prefix: \"123 설명\" → null\n4. PRESERVE original spacing from the input text exactly (after ignoring leading punctuation)\n5. Include the full number (e.g., \"1-2\", \"3a\") not just the first digit\n6. Include period/dot after number if it directly follows (e.g., \"3.6\" → \"도판 3.6\")\n - \"그림 3.6. 한반도 중부\" → \"그림 3.6\" (period after decimal number included)\n - \"도판 2. 유적\" → \"도판 2\" (period after space NOT included)\n7. Stop at the first punctuation (except decimal point), whitespace, or underscore after the number\n - \"사진 1_ㅇㅇㅇ\" → \"사진 1\" (stop at underscore)\n - \"사진 1 ㅇㅇㅇ\" → \"사진 1\" (stop at space)\n - \"그림 3.6. 한반도\" → \"그림 3.6\" (period included as decimal separator)\n\nExamples:\n- \"도판 1 유적 전경\" → \"도판 1\"\n- \"원색사진 1. 조사지역 원경\" → \"원색사진 1\"\n- \"흑백사진 2 출토유물\" → \"흑백사진 2\"\n- \"(사진 16> 느티나무의 접선단면\" → \"사진 16\" (ignore leading punctuation)\n- \"<도판 3> 유물 사진\" → \"도판 3\" (ignore angle brackets)\n- \"도판1 어쩌구\" → \"도판1\" (no space preserved)\n- \"사진 2. 출토 유물\" → \"사진 2\" (period after space, not included)\n- \"그림 3.6. 한반도 중부 및 남부의 ㅇㅇㅇ\" → \"그림 3.6\" (period as decimal included)\n- \"Figure 3: Site plan\" → \"Figure 3\"\n- \"Table 4a. Artifact list\" → \"Table 4a\"\n- \"도판 5-2 층위 단면\" → \"도판 5-2\"\n- \"설명 없는 이미지\" → null\n- \"39 3월 28일(백제 도로유구 내부 조사)\" → null (starts with number, no prefix)\n- \"1. 유구 현황\" → null (numbered list, not caption)\n- \"2024-05-01 촬영\" → null (date, not caption)`;\n }\n\n /**\n * Build user prompt for caption parsing\n */\n protected buildUserPrompt(\n captions: Array<{ index: number; text: string }>,\n ): string {\n const captionList = captions\n .map((c) => `[${c.index}] ${c.text}`)\n .join('\\n');\n\n return `Extract caption prefix and number from the following captions:\n\n${captionList}\n\nReturn the results as JSON array with \"index\" (original position) and \"num\" (extracted prefix + number or null).\n\nExample format:\n[\n { \"index\": 0, \"num\": \"도판 1\" },\n { \"index\": 1, \"num\": \"Figure 2\" },\n { \"index\": 2, \"num\": null }\n]`;\n }\n\n /**\n * Build user prompt for single caption parsing\n */\n private buildUserPromptSingle(caption: string): string {\n return `Extract caption prefix and number from the following caption:\n\n\"${caption}\"\n\nCRITICAL: Return ONLY the JSON object directly with a \"num\" field.\n- DO NOT wrap the JSON in quotes or additional formatting\n- DO NOT output \"final:\", \"result:\", or any prefix labels\n- DO NOT wrap in backticks or code blocks\n- Return ONLY valid JSON: { \"num\": value }\n\nThe value must be:\n- A string with the extracted caption prefix + number (e.g., \"도판 1\", \"Figure 2\")\n- null if no number exists\n\nValid outputs:\n{ \"num\": \"도판 1\" }\n{ \"num\": null }\n\nInvalid outputs (NEVER do these):\n- { \"final\": \"...\" } ❌\n- \\`\\`\\`json { \"num\": \"...\" } \\`\\`\\` ❌\n- \"{ \"num\": \"...\" }\" ❌\n- { \"num\": { \"value\": \"...\" } } ❌`;\n }\n}\n\n/**\n * Error thrown when caption parsing fails\n */\nexport class CaptionParseError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'CaptionParseError';\n }\n}\n","/**\n * PageRangeParseError\n *\n * Custom error thrown when page range parsing fails.\n */\nexport class PageRangeParseError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'PageRangeParseError';\n }\n\n /**\n * Extract error message from unknown error type\n */\n static getErrorMessage(error: unknown): string {\n return error instanceof Error ? error.message : String(error);\n }\n\n /**\n * Create PageRangeParseError from unknown error with context\n */\n static fromError(context: string, error: unknown): PageRangeParseError {\n return new PageRangeParseError(\n `${context}: ${PageRangeParseError.getErrorMessage(error)}`,\n { cause: error },\n );\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { DoclingDocument, DoclingPage, PageRange } from '@heripo/model';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport type { PageSizeGroup } from '../types';\n\nimport {\n LLMCaller,\n LLMTokenUsageAggregator as LLMTokenUsageAggregatorClass,\n} from '@heripo/shared';\nimport * as fs from 'node:fs';\nimport * as path from 'node:path';\nimport { z } from 'zod';\n\nimport { VisionLLMComponent } from '../core/vision-llm-component';\nimport { PageRangeParseError } from './page-range-parse-error';\n\n/**\n * Pattern types for page number sequences\n */\nexport enum PagePattern {\n /** Simple increment: [1, 2, 3, 4, ...] */\n SIMPLE_INCREMENT = 'simple_increment',\n /** Double-sided scan: [1-2, 3-4, 5-6, ...] */\n DOUBLE_SIDED = 'double_sided',\n /** Offset pattern: PDF page != actual page (consistent offset) */\n OFFSET = 'offset',\n /** No clear pattern detected */\n UNKNOWN = 'unknown',\n}\n\n/**\n * Pattern analysis result\n */\ninterface PatternAnalysis {\n pattern: PagePattern;\n offset: number;\n increment: number;\n}\n\n/**\n * Sample extraction result from Vision LLM\n */\ninterface SampleResult {\n pdfPageNo: number;\n startPageNo: number | null;\n endPageNo: number | null;\n}\n\n/**\n * PageRangeParser\n *\n * Extracts actual document page numbers from PDF page images using Vision LLM.\n * Uses random sampling + pattern detection to minimize LLM calls.\n * Extends VisionLLMComponent for standardized vision LLM call handling.\n *\n * ## Algorithm\n *\n * 1. Group pages by size (consecutive pages with same dimensions)\n * 2. For each group:\n * - If ≤3 pages: send all to LLM at once\n * - If >3 pages: random sample 3 pages, detect pattern, apply to all\n * 3. Post-process: handle drops, normalize negatives, backfill failed pages\n */\nexport class PageRangeParser extends VisionLLMComponent {\n // Configuration constants\n private readonly SAMPLE_SIZE = 3;\n private readonly MAX_PATTERN_RETRIES = 19;\n private readonly SIZE_TOLERANCE = 5.0;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n outputPath: string,\n maxRetries: number = 3,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n abortSignal?: AbortSignal,\n ) {\n super(\n logger,\n model,\n 'PageRangeParser',\n outputPath,\n { maxRetries, abortSignal },\n fallbackModel,\n aggregator ?? new LLMTokenUsageAggregatorClass(),\n );\n }\n\n /**\n * Main parse method\n *\n * Extracts page range mapping from DoclingDocument using Vision LLM.\n * Automatically tracks token usage in the aggregator if one was provided.\n *\n * @param doclingDoc - DoclingDocument to extract page ranges from\n * @returns Object with page range mapping and token usage information\n */\n async parse(doclingDoc: DoclingDocument): Promise<{\n pageRangeMap: Record<number, PageRange>;\n usage: ExtendedTokenUsage[];\n }> {\n this.log('info', 'Starting page range parsing...');\n\n // Step 1: Extract and group pages by size\n const pages = this.extractPages(doclingDoc);\n if (pages.length === 0) {\n this.log('warn', 'No pages found');\n const emptyUsage = this.createEmptyUsage('sampling');\n this.trackUsage(emptyUsage);\n return {\n pageRangeMap: {},\n usage: [emptyUsage],\n };\n }\n\n const sizeGroups = this.analyzeSizes(pages);\n this.log(\n 'info',\n `Found ${sizeGroups.length} size group(s), total ${pages.length} pages`,\n );\n\n // Step 2: Process each size group\n const pageRangeMap: Record<number, PageRange> = {};\n const usageList: ExtendedTokenUsage[] = [];\n\n for (let i = 0; i < sizeGroups.length; i++) {\n const group = sizeGroups[i];\n this.log(\n 'info',\n `Processing group ${i + 1}/${sizeGroups.length}: ${group.pageNos.length} pages`,\n );\n\n const groupResult = await this.processGroup(pages, group, this.model);\n Object.assign(pageRangeMap, groupResult.pageRangeMap);\n usageList.push(...groupResult.usage);\n }\n\n // Step 3: Track all usage in aggregator\n for (const usage of usageList) {\n this.trackUsage(usage);\n }\n\n // Step 4: Post-processing\n this.postProcess(pageRangeMap);\n\n this.log(\n 'info',\n `Completed: ${Object.keys(pageRangeMap).length} pages mapped`,\n );\n\n return { pageRangeMap, usage: usageList };\n }\n\n /**\n * Extract pages array from DoclingDocument\n */\n private extractPages(doclingDoc: DoclingDocument): DoclingPage[] {\n const pageKeys = Object.keys(doclingDoc.pages)\n .map(Number)\n .filter((n) => !Number.isNaN(n))\n .sort((a, b) => a - b);\n\n return pageKeys.map((key) => doclingDoc.pages[String(key)]);\n }\n\n /**\n * Analyze page sizes and group consecutive pages with same dimensions\n */\n private analyzeSizes(pages: DoclingPage[]): PageSizeGroup[] {\n const groups: PageSizeGroup[] = [];\n let currentGroup: PageSizeGroup | null = null;\n\n for (const page of pages) {\n const sizeKey = this.createSizeKey(page.size.width, page.size.height);\n\n if (!currentGroup || currentGroup.sizeKey !== sizeKey) {\n // Start new group\n currentGroup = { sizeKey, pageNos: [page.page_no] };\n groups.push(currentGroup);\n } else {\n // Add to current group\n currentGroup.pageNos.push(page.page_no);\n }\n }\n\n return groups;\n }\n\n /**\n * Create size key with tolerance for floating point comparison\n */\n private createSizeKey(width: number, height: number): string {\n const roundedWidth = Math.round(width / this.SIZE_TOLERANCE);\n const roundedHeight = Math.round(height / this.SIZE_TOLERANCE);\n return `${roundedWidth}x${roundedHeight}`;\n }\n\n /**\n * Process a single size group\n */\n private async processGroup(\n pages: DoclingPage[],\n group: PageSizeGroup,\n model: LanguageModel,\n ): Promise<{\n pageRangeMap: Record<number, PageRange>;\n usage: ExtendedTokenUsage[];\n }> {\n const { pageNos } = group;\n const usageList: ExtendedTokenUsage[] = [];\n\n // Special case: 3 or fewer pages - send all at once\n if (pageNos.length <= this.SAMPLE_SIZE) {\n this.log(\n 'info',\n `Small group (${pageNos.length} pages), extracting all at once`,\n );\n const result = await this.extractMultiplePages(pages, pageNos, model);\n usageList.push(result.usage);\n return {\n pageRangeMap: this.samplesToMap(result.samples),\n usage: usageList,\n };\n }\n\n // Larger groups: random sampling + pattern detection\n const sampledPages = new Set<number>();\n\n for (let attempt = 0; attempt <= this.MAX_PATTERN_RETRIES; attempt++) {\n // Select 3 random pages (excluding previously sampled if possible)\n const samplePageNos = this.selectRandomSamples(\n pageNos,\n this.SAMPLE_SIZE,\n sampledPages,\n );\n\n // Track which pages we've sampled\n for (const p of samplePageNos) {\n sampledPages.add(p);\n }\n\n this.log(\n 'info',\n `Attempt ${attempt + 1}/${this.MAX_PATTERN_RETRIES + 1}: sampling pages ${samplePageNos.join(', ')}`,\n );\n\n // Send all 3 images at once to Vision LLM\n const result = await this.extractMultiplePages(\n pages,\n samplePageNos,\n model,\n );\n usageList.push(result.usage);\n const samples = result.samples;\n\n // Try to detect pattern\n const pattern = this.detectPattern(samples);\n\n if (pattern.pattern !== PagePattern.UNKNOWN) {\n // Pattern found! Apply to all pages\n this.log(\n 'info',\n `Pattern detected: ${pattern.pattern} (offset=${pattern.offset}, increment=${pattern.increment})`,\n );\n return {\n pageRangeMap: this.applyPattern(pageNos, pattern),\n usage: usageList,\n };\n }\n\n // Pattern not found - log and retry\n this.log(\n 'warn',\n `Pattern detection failed, attempt ${attempt + 1}/${this.MAX_PATTERN_RETRIES + 1}`,\n );\n }\n\n // All retries exhausted - throw error\n throw new PageRangeParseError(\n `Failed to detect page pattern after ${this.MAX_PATTERN_RETRIES + 1} attempts for size group with ${pageNos.length} pages`,\n );\n }\n\n /**\n * Select random samples from page numbers\n */\n private selectRandomSamples(\n pageNos: number[],\n count: number,\n exclude: Set<number> = new Set(),\n ): number[] {\n // Get available pages (not previously sampled)\n const available = pageNos.filter((p) => !exclude.has(p));\n\n // If not enough unsampled pages, allow reuse\n const pool = available.length >= count ? available : pageNos;\n\n // Fisher-Yates shuffle for random selection\n const shuffled = [...pool];\n for (let i = shuffled.length - 1; i > 0; i--) {\n const j = Math.floor(Math.random() * (i + 1));\n [shuffled[i], shuffled[j]] = [shuffled[j], shuffled[i]];\n }\n\n // Return first 'count' elements, sorted by page number for consistency\n return shuffled.slice(0, count).sort((a, b) => a - b);\n }\n\n /**\n * Extract page numbers from multiple pages in a single LLM call\n */\n private async extractMultiplePages(\n pages: DoclingPage[],\n pageNos: number[],\n model: LanguageModel,\n ): Promise<{ samples: SampleResult[]; usage: ExtendedTokenUsage }> {\n this.log('info', `Extracting ${pageNos.length} pages in single LLM call`);\n\n // Build image content array\n const imageContents: Array<{ type: 'image'; image: string }> = [];\n\n for (const pageNo of pageNos) {\n const page = pages[pageNo - 1];\n const imagePath = path.resolve(this.outputPath, page.image.uri);\n const imageBuffer = fs.readFileSync(imagePath);\n const base64Image = imageBuffer.toString('base64');\n const mimeType = page.image.mimetype || 'image/png';\n\n imageContents.push({\n type: 'image',\n image: `data:${mimeType};base64,${base64Image}`,\n });\n }\n\n // Build schema for multi-page response\n const schema = z.object({\n pages: z\n .array(\n z.object({\n imageIndex: z\n .number()\n .describe('0-based index of the image in the request'),\n startPageNo: z\n .number()\n .nullable()\n .describe('Start page number (null if not found)'),\n endPageNo: z\n .number()\n .nullable()\n .describe(\n 'End page number for double-sided scans (null for single page)',\n ),\n }),\n )\n .describe('Extracted page numbers for each image'),\n });\n\n try {\n const result = await LLMCaller.callVision({\n schema,\n messages: [\n {\n role: 'user',\n content: [\n { type: 'text', text: this.buildUserPrompt(pageNos) },\n ...imageContents,\n ],\n },\n ],\n primaryModel: model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: 0,\n abortSignal: this.abortSignal,\n component: 'PageRangeParser',\n phase: 'sampling',\n });\n\n // Convert response to SampleResult array\n const samples = result.output.pages.map((p) => ({\n pdfPageNo: pageNos[p.imageIndex],\n startPageNo: p.startPageNo,\n endPageNo: p.endPageNo,\n }));\n\n return { samples, usage: result.usage };\n } catch (error) {\n this.log('error', 'Multi-image extraction failed:', error);\n throw PageRangeParseError.fromError(\n 'Multi-image extraction failed',\n error,\n );\n }\n }\n\n /**\n * Detect pattern from sample results\n */\n private detectPattern(samples: SampleResult[]): PatternAnalysis {\n // Filter out null results\n const validSamples = samples.filter((s) => s.startPageNo !== null);\n\n if (validSamples.length < 2) {\n return { pattern: PagePattern.UNKNOWN, offset: 0, increment: 1 };\n }\n\n // Sort by PDF page number\n validSamples.sort((a, b) => a.pdfPageNo - b.pdfPageNo);\n\n // Check for SIMPLE_INCREMENT pattern\n const isSimple = validSamples.every((s, i) => {\n // startPageNo should equal endPageNo (or endPageNo is null)\n if (s.endPageNo !== null && s.startPageNo !== s.endPageNo) return false;\n if (i === 0) return true;\n const prev = validSamples[i - 1];\n const expectedIncrease = s.pdfPageNo - prev.pdfPageNo;\n return s.startPageNo === prev.startPageNo! + expectedIncrease;\n });\n\n if (isSimple) {\n const firstSample = validSamples[0];\n const offset = firstSample.startPageNo! - firstSample.pdfPageNo;\n return { pattern: PagePattern.SIMPLE_INCREMENT, offset, increment: 1 };\n }\n\n // Check for DOUBLE_SIDED pattern\n // Each PDF page contains 2 actual pages: [startPageNo, startPageNo+1]\n // Formula: startPageNo = pdfPageNo * 2 + offset (where offset is usually -1 for 1-based)\n const isDoubleSided = validSamples.every((s, i) => {\n // Each page must have endPageNo = startPageNo + 1\n if (s.endPageNo === null) return false;\n if (s.endPageNo !== s.startPageNo! + 1) return false;\n if (i === 0) return true;\n\n // For non-consecutive samples, check the formula consistency\n // startPageNo should follow: pdfPageNo * 2 + offset\n const prev = validSamples[i - 1];\n const pdfDiff = s.pdfPageNo - prev.pdfPageNo;\n const expectedStartDiff = pdfDiff * 2; // Each PDF page = 2 actual pages\n const actualStartDiff = s.startPageNo! - prev.startPageNo!;\n return actualStartDiff === expectedStartDiff;\n });\n\n if (isDoubleSided) {\n const firstSample = validSamples[0];\n const offset = firstSample.startPageNo! - firstSample.pdfPageNo * 2;\n return { pattern: PagePattern.DOUBLE_SIDED, offset, increment: 2 };\n }\n\n // Check for OFFSET pattern (consistent offset with ±1 tolerance)\n const offsets = validSamples.map((s) => s.startPageNo! - s.pdfPageNo);\n const avgOffset = Math.round(\n offsets.reduce((a, b) => a + b, 0) / offsets.length,\n );\n const isConsistentOffset = offsets.every(\n (o) => Math.abs(o - avgOffset) <= 1,\n );\n\n if (isConsistentOffset) {\n return { pattern: PagePattern.OFFSET, offset: avgOffset, increment: 1 };\n }\n\n return { pattern: PagePattern.UNKNOWN, offset: 0, increment: 1 };\n }\n\n /**\n * Apply detected pattern to generate page range map\n */\n private applyPattern(\n pageNos: number[],\n pattern: PatternAnalysis,\n ): Record<number, PageRange> {\n const result: Record<number, PageRange> = {};\n\n for (const pdfPageNo of pageNos) {\n switch (pattern.pattern) {\n case PagePattern.SIMPLE_INCREMENT:\n case PagePattern.OFFSET: {\n const pageNo = pdfPageNo + pattern.offset;\n result[pdfPageNo] = {\n startPageNo: pageNo,\n endPageNo: pageNo,\n };\n break;\n }\n\n case PagePattern.DOUBLE_SIDED: {\n const start = pdfPageNo * 2 + pattern.offset;\n result[pdfPageNo] = {\n startPageNo: start,\n endPageNo: start + 1,\n };\n break;\n }\n\n default:\n result[pdfPageNo] = { startPageNo: 0, endPageNo: 0 };\n }\n }\n\n return result;\n }\n\n /**\n * Convert sample results to page range map (for small groups)\n */\n private samplesToMap(samples: SampleResult[]): Record<number, PageRange> {\n const result: Record<number, PageRange> = {};\n\n for (const sample of samples) {\n if (sample.startPageNo !== null) {\n result[sample.pdfPageNo] = {\n startPageNo: sample.startPageNo,\n endPageNo: sample.endPageNo ?? sample.startPageNo,\n };\n } else {\n result[sample.pdfPageNo] = { startPageNo: 0, endPageNo: 0 };\n }\n }\n\n return result;\n }\n\n /**\n * Post-process the page range map\n */\n private postProcess(pageRangeMap: Record<number, PageRange>): void {\n // Order matters:\n // 1. Detect outliers (abnormally high values at beginning)\n // 2. Handle drops\n // 3. Normalize negatives\n // 4. Backfill failed pages\n this.detectAndHandleOutliers(pageRangeMap);\n this.detectAndHandleDrops(pageRangeMap);\n this.normalizeNegatives(pageRangeMap);\n this.backfillFailedPages(pageRangeMap);\n }\n\n /**\n * Detect and handle outlier page numbers at the beginning of document\n *\n * When early PDF pages have abnormally high page numbers compared to\n * subsequent pages (e.g., PDF 1-9 = 75-83, but PDF 10+ = 2,3,4...),\n * the LLM likely misread figure/photo numbers as page numbers.\n *\n * Detection: If page numbers at the beginning are significantly higher\n * than subsequent pages (which follow a normal pattern), mark them as failed.\n */\n private detectAndHandleOutliers(\n pageRangeMap: Record<number, PageRange>,\n ): void {\n const pdfPages = Object.keys(pageRangeMap)\n .map(Number)\n .sort((a, b) => a - b);\n\n if (pdfPages.length < 3) return;\n\n // Find the first \"normal\" sequence (at least 3 consecutive pages following a pattern)\n const normalSequenceStart = this.findNormalSequenceStart(\n pageRangeMap,\n pdfPages,\n );\n\n if (normalSequenceStart === null || normalSequenceStart <= 0) return;\n\n const normalStartPdfPage = pdfPages[normalSequenceStart];\n const normalStartPageNo = pageRangeMap[normalStartPdfPage].startPageNo;\n\n // Check if pages before the normal sequence are outliers\n // (their page numbers are much higher than what they should be)\n let hasOutliers = false;\n for (let i = 0; i < normalSequenceStart; i++) {\n const pdfPage = pdfPages[i];\n const pageNo = pageRangeMap[pdfPage].startPageNo;\n\n if (pageNo === 0) continue;\n\n // Calculate expected page number based on the normal sequence\n const pdfDiff = normalStartPdfPage - pdfPage;\n\n // For double-sided: each PDF page = 2 actual pages\n const isDoubleSided = this.isDoubleSidedRange(\n pageRangeMap[normalStartPdfPage],\n );\n const expectedPageNo = isDoubleSided\n ? normalStartPageNo - pdfDiff * 2\n : normalStartPageNo - pdfDiff;\n\n // If actual page number is significantly higher than expected, it's an outlier\n // Use threshold: actual > expected + 10 (to avoid false positives)\n if (pageNo > expectedPageNo + 10) {\n this.log(\n 'info',\n `Outlier detected: PDF ${pdfPage}=${pageNo} (expected ~${expectedPageNo})`,\n );\n pageRangeMap[pdfPage] = { startPageNo: 0, endPageNo: 0 };\n hasOutliers = true;\n }\n }\n\n if (hasOutliers) {\n this.log('info', `Outliers marked as failed, will be backfilled later`);\n }\n }\n\n /**\n * Find the start index of a \"normal\" sequence in the page range map\n *\n * A normal sequence is defined as at least 3 consecutive PDF pages where:\n * - Page numbers are increasing (for single-page) or increasing by 2 (for double-sided)\n * - The pattern is consistent\n *\n * Returns the index in pdfPages array, or null if not found.\n */\n private findNormalSequenceStart(\n pageRangeMap: Record<number, PageRange>,\n pdfPages: number[],\n ): number | null {\n const MIN_SEQUENCE_LENGTH = 3;\n\n for (\n let startIdx = 0;\n startIdx <= pdfPages.length - MIN_SEQUENCE_LENGTH;\n startIdx++\n ) {\n let isValidSequence = true;\n let expectedIncrement: number | null = null;\n\n for (let i = 0; i < MIN_SEQUENCE_LENGTH - 1; i++) {\n const currPdfPage = pdfPages[startIdx + i];\n const nextPdfPage = pdfPages[startIdx + i + 1];\n const currRange = pageRangeMap[currPdfPage];\n const nextRange = pageRangeMap[nextPdfPage];\n\n // Skip if either has failed extraction\n if (currRange.startPageNo === 0 || nextRange.startPageNo === 0) {\n isValidSequence = false;\n break;\n }\n\n // Calculate increment\n const pageIncrement = nextRange.startPageNo - currRange.startPageNo;\n const pdfIncrement = nextPdfPage - currPdfPage;\n\n // Determine expected increment (1 for single-page, 2 for double-sided per PDF page)\n const isDoubleSided = this.isDoubleSidedRange(currRange);\n const expectedIncrementPerPdf = isDoubleSided ? 2 : 1;\n const expected = pdfIncrement * expectedIncrementPerPdf;\n\n if (expectedIncrement === null) {\n expectedIncrement = pageIncrement;\n }\n\n // Check if increment is reasonable (should match expected pattern)\n if (pageIncrement !== expected) {\n isValidSequence = false;\n break;\n }\n }\n\n if (isValidSequence) {\n return startIdx;\n }\n }\n\n return null;\n }\n\n /**\n * Check if a page range represents a double-sided scan\n */\n private isDoubleSidedRange(range: PageRange): boolean {\n return (\n range.endPageNo !== null &&\n range.endPageNo !== range.startPageNo &&\n range.endPageNo === range.startPageNo + 1\n );\n }\n\n /**\n * Detect and handle page number drops\n *\n * When page numbers suddenly decrease (e.g., 8,9 -> 3,4),\n * recalculate previous pages based on the drop point.\n */\n private detectAndHandleDrops(pageRangeMap: Record<number, PageRange>): void {\n const pdfPages = Object.keys(pageRangeMap)\n .map(Number)\n .sort((a, b) => a - b);\n\n if (pdfPages.length < 2) return;\n\n for (let i = 1; i < pdfPages.length; i++) {\n const prevPdfPage = pdfPages[i - 1];\n const currPdfPage = pdfPages[i];\n const prevPageNo = pageRangeMap[prevPdfPage].startPageNo;\n const currPageNo = pageRangeMap[currPdfPage].startPageNo;\n\n // Skip if either is 0 (extraction failed)\n if (prevPageNo === 0 || currPageNo === 0) continue;\n\n // Detect significant drop (more than 1)\n if (\n currPageNo > 0 &&\n prevPageNo > currPageNo &&\n prevPageNo - currPageNo > 1\n ) {\n this.log(\n 'info',\n `Page drop detected: PDF ${prevPdfPage}=${prevPageNo} -> PDF ${currPdfPage}=${currPageNo}`,\n );\n\n // Determine if the reference page is double-sided\n const isDoubleSided = this.isDoubleSidedRange(\n pageRangeMap[currPdfPage],\n );\n\n // Recalculate all previous pages based on drop point\n for (let j = i - 1; j >= 0; j--) {\n const pdfPage = pdfPages[j];\n const distance = currPdfPage - pdfPage;\n\n if (isDoubleSided) {\n // Double-sided: each PDF page = 2 actual pages\n const expectedStartPageNo = currPageNo - distance * 2;\n\n if (expectedStartPageNo < 1) {\n pageRangeMap[pdfPage] = { startPageNo: 0, endPageNo: 0 };\n } else {\n pageRangeMap[pdfPage] = {\n startPageNo: expectedStartPageNo,\n endPageNo: expectedStartPageNo + 1,\n };\n }\n } else {\n // Single-page pattern\n const expectedPageNo = currPageNo - distance;\n\n if (expectedPageNo < 1) {\n pageRangeMap[pdfPage] = { startPageNo: 0, endPageNo: 0 };\n } else {\n pageRangeMap[pdfPage] = {\n startPageNo: expectedPageNo,\n endPageNo: expectedPageNo,\n };\n }\n }\n this.log(\n 'info',\n `Recalculated PDF ${pdfPage} -> ${pageRangeMap[pdfPage].startPageNo}`,\n );\n }\n }\n }\n }\n\n /**\n * Normalize negative page numbers to 0\n */\n private normalizeNegatives(pageRangeMap: Record<number, PageRange>): void {\n for (const [pdfPageStr, range] of Object.entries(pageRangeMap)) {\n if (range.startPageNo < 0 || range.endPageNo < 0) {\n this.log('info', `Normalizing negative: PDF ${pdfPageStr} -> 0`);\n pageRangeMap[Number(pdfPageStr)] = { startPageNo: 0, endPageNo: 0 };\n }\n }\n }\n\n /**\n * Backfill pages marked with 0 using detected pattern\n */\n private backfillFailedPages(pageRangeMap: Record<number, PageRange>): void {\n const pdfPages = Object.keys(pageRangeMap)\n .map(Number)\n .sort((a, b) => a - b);\n\n // Find pages with startPageNo === 0 (extraction failed)\n const failedPages = pdfPages.filter(\n (p) => pageRangeMap[p].startPageNo === 0,\n );\n if (failedPages.length === 0) return;\n\n // Find successful pages to detect pattern\n const successfulPages = pdfPages\n .filter((p) => pageRangeMap[p].startPageNo > 0)\n .map((p) => ({\n pdfPage: p,\n pageNo: pageRangeMap[p].startPageNo,\n isDoubleSided: this.isDoubleSidedRange(pageRangeMap[p]),\n }));\n\n if (successfulPages.length < 2) {\n this.log('warn', 'Not enough successful pages for backfill');\n return;\n }\n\n // Detect if this is a double-sided pattern\n const doubleSidedCount = successfulPages.filter(\n (s) => s.isDoubleSided,\n ).length;\n const isDoubleSided = doubleSidedCount > successfulPages.length / 2;\n\n if (isDoubleSided) {\n // For double-sided: calculate offset using formula startPageNo = pdfPage * 2 + offset\n const offsets = successfulPages.map((s) => s.pageNo - s.pdfPage * 2);\n const avgOffset = Math.round(\n offsets.reduce((a, b) => a + b, 0) / offsets.length,\n );\n\n this.log(\n 'info',\n `Backfilling ${failedPages.length} pages with double-sided pattern (offset=${avgOffset})`,\n );\n\n for (const pdfPage of failedPages) {\n const expectedStartPageNo = pdfPage * 2 + avgOffset;\n\n if (expectedStartPageNo < 1) {\n this.log(\n 'info',\n `Backfill skipped for PDF ${pdfPage} (would be ${expectedStartPageNo})`,\n );\n // Mark as cover/intro page with 0\n continue;\n }\n\n this.log(\n 'info',\n `Backfill PDF ${pdfPage}: 0 -> ${expectedStartPageNo}-${expectedStartPageNo + 1}`,\n );\n pageRangeMap[pdfPage] = {\n startPageNo: expectedStartPageNo,\n endPageNo: expectedStartPageNo + 1,\n };\n }\n } else {\n // For single-page: calculate simple offset\n const offsets = successfulPages.map((s) => s.pageNo - s.pdfPage);\n const avgOffset = Math.round(\n offsets.reduce((a, b) => a + b, 0) / offsets.length,\n );\n\n this.log(\n 'info',\n `Backfilling ${failedPages.length} pages with offset ${avgOffset}`,\n );\n\n for (const pdfPage of failedPages) {\n const expectedPageNo = pdfPage + avgOffset;\n\n if (expectedPageNo < 1) {\n this.log(\n 'info',\n `Backfill skipped for PDF ${pdfPage} (would be ${expectedPageNo})`,\n );\n continue;\n }\n\n this.log('info', `Backfill PDF ${pdfPage}: 0 -> ${expectedPageNo}`);\n pageRangeMap[pdfPage] = {\n startPageNo: expectedPageNo,\n endPageNo: expectedPageNo,\n };\n }\n }\n }\n\n /**\n * Build system prompt for Vision LLM\n */\n protected buildSystemPrompt(): string {\n return `You are a page number extraction specialist for document images.\nYou will receive multiple document page images. For EACH image, extract the visible page number(s).\n\n**SCAN TYPES:**\n1. SINGLE PAGE: One document page per image. Return startPageNo only, endPageNo should be null.\n2. DOUBLE-SIDED: Two document pages per image (spread). Return startPageNo (left) and endPageNo (right).\n\n**WHERE TO LOOK:**\n- Bottom center, bottom corners (most common)\n- Top corners (less common)\n- Page numbers are SMALL numbers in MARGINS, NOT in content area\n\n**WHAT TO IGNORE - These are NOT page numbers:**\n- Roman numerals (i, ii, iii, iv, v...) - return null\n- Figure numbers: \"Figure 5\", \"Fig. 5\", \"도 5\", \"그림 5\"\n- Table numbers: \"Table 3\", \"표 3\"\n- Photo numbers: \"Photo 8\", \"사진 8\", \"Plate 4\", \"도판 4\"\n- Years in content: \"2015\", \"(1998)\"\n- Any numbers with text prefix or inside content area\n\n**RESPONSE FORMAT:**\nFor each image (in order), provide:\n- imageIndex: 0-based index of the image\n- startPageNo: The page number found (null if not visible/readable)\n- endPageNo: Right page number for double-sided scans (null for single pages)`;\n }\n\n /**\n * Build user prompt for Vision LLM\n */\n protected buildUserPrompt(pageNos: number[]): string {\n return `I am providing ${pageNos.length} document page images.\nThese are PDF pages: ${pageNos.join(', ')}.\n\nFor each image (in order), extract the visible page number(s).\nReturn null for pages where no page number is visible or readable.\n\nRemember: Look for SMALL numbers in MARGINS only. Ignore figure/table/photo numbers.`;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n ExtendedTokenUsage,\n LLMTokenUsageAggregator,\n} from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\nimport type { z } from 'zod';\n\nimport { LLMCaller } from '@heripo/shared';\n\nimport {\n type BaseLLMComponentOptions,\n TextLLMComponent,\n} from '../core/text-llm-component';\n\n/**\n * Base options for all validators\n *\n * Re-exported from BaseLLMComponentOptions for backwards compatibility.\n */\nexport type BaseValidatorOptions = BaseLLMComponentOptions;\n\n/**\n * Abstract base class for LLM-based validators\n *\n * Extends TextLLMComponent to provide common functionality for validators\n * that use LLM to validate/analyze content:\n * - LLM API call wrapper with LLMCaller (via callLLM method)\n * - Standard logging patterns (via log method from base class)\n * - Retry and fallback configuration\n *\n * Token usage is tracked by LLMCaller and should be aggregated by DocumentProcessor.\n *\n * @template TSchema - Zod schema type for validation\n * @template TResult - Result type after parsing with schema\n */\nexport abstract class BaseValidator<\n TSchema extends z.ZodType,\n TResult = z.infer<TSchema>,\n> extends TextLLMComponent {\n /**\n * Validator name for logging (kept for backwards compatibility)\n */\n protected readonly validatorName: string;\n\n /**\n * Constructor for BaseValidator\n *\n * @param logger - Logger instance\n * @param model - Language model to use for validation\n * @param validatorName - Name of the validator for logging (e.g., \"TocContentValidator\")\n * @param options - Optional configuration (maxRetries, temperature)\n * @param fallbackModel - Optional fallback model for retry on failure\n * @param aggregator - Optional token usage aggregator for tracking LLM calls\n */\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n validatorName: string,\n options?: BaseValidatorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(logger, model, validatorName, options, fallbackModel, aggregator);\n this.validatorName = validatorName;\n }\n\n /**\n * Call LLM with LLMCaller\n *\n * This method provides backwards compatibility with existing validators.\n * It wraps the parent callTextLLM method but allows passing a custom aggregator.\n *\n * @param schema - Zod schema for response validation\n * @param systemPrompt - System prompt\n * @param userPrompt - User prompt\n * @param phase - Phase name for tracking (e.g., 'validation', 'batch-validation')\n * @param aggregator - Optional token usage aggregator for tracking this call\n * @returns Parsed and validated LLM response with usage information\n */\n protected async callLLM(\n schema: TSchema,\n systemPrompt: string,\n userPrompt: string,\n phase: string,\n aggregator?: LLMTokenUsageAggregator,\n ): Promise<{ output: TResult; usage: ExtendedTokenUsage }> {\n const result = await LLMCaller.call({\n schema,\n systemPrompt,\n userPrompt,\n primaryModel: this.model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: this.validatorName,\n phase,\n });\n\n // Track to custom aggregator if provided, otherwise use base class aggregator\n if (aggregator) {\n aggregator.track(result.usage);\n } else {\n this.trackUsage(result.usage);\n }\n\n return {\n output: result.output as TResult,\n usage: result.usage,\n };\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport type { BaseValidatorOptions } from './base-validator';\n\nimport { z } from 'zod';\n\nimport { BaseValidator } from './base-validator';\n\n/**\n * Content type for TOC validation\n */\nexport type TocContentType = 'pure_toc' | 'mixed' | 'resource_only' | 'invalid';\n\n/**\n * Schema for TOC content validation response\n */\nexport const TocContentValidationSchema = z.object({\n isValid: z.boolean().describe('Whether valid main document TOC was found'),\n confidence: z\n .number()\n .min(0)\n .max(1)\n .describe('Confidence score between 0 and 1'),\n contentType: z\n .enum(['pure_toc', 'mixed', 'resource_only', 'invalid'])\n .describe('Type of content detected'),\n extractedTocMarkdown: z\n .string()\n .nullable()\n .describe('Extracted main TOC markdown when mixed; null otherwise'),\n reason: z.string().describe('Brief explanation in English'),\n});\n\nexport type TocContentValidationResult = z.infer<\n typeof TocContentValidationSchema\n>;\n\n/**\n * Output type for TOC validation with resolved markdown\n */\nexport interface TocValidationOutput {\n isValid: boolean;\n confidence: number;\n contentType: TocContentType;\n validTocMarkdown: string | null;\n reason: string;\n}\n\n/**\n * Options for TocContentValidator\n */\nexport interface TocContentValidatorOptions extends BaseValidatorOptions {\n /**\n * Minimum confidence to consider valid (default: 0.7)\n */\n confidenceThreshold?: number;\n}\n\n/**\n * TocContentValidator\n *\n * Uses LLM to validate whether extracted markdown content is actually a TOC.\n * This is a semantic validation, not structural validation.\n * Supports mixed content extraction where main TOC is combined with resource indices.\n */\nexport class TocContentValidator extends BaseValidator<\n typeof TocContentValidationSchema,\n TocContentValidationResult\n> {\n private readonly confidenceThreshold: number;\n\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: TocContentValidatorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n 'TocContentValidator',\n options,\n fallbackModel,\n aggregator,\n );\n this.confidenceThreshold = options?.confidenceThreshold ?? 0.7;\n }\n\n /**\n * Validate if the markdown content is a table of contents\n *\n * @param markdown - Markdown content to validate\n * @returns Validation output with resolved markdown for valid TOC\n */\n async validate(markdown: string): Promise<TocValidationOutput> {\n this.logger.info(\n `[TocContentValidator] Validating content (${markdown.length} chars)`,\n );\n\n if (!markdown.trim()) {\n this.logger.info(\n '[TocContentValidator] Empty markdown, returning invalid',\n );\n return {\n isValid: false,\n confidence: 1.0,\n contentType: 'invalid',\n validTocMarkdown: null,\n reason: 'Empty content',\n };\n }\n\n const { output: result } = await this.callLLM(\n TocContentValidationSchema,\n this.buildSystemPrompt(),\n this.buildUserPrompt(markdown),\n 'validation',\n this.aggregator,\n );\n\n this.logger.info(\n `[TocContentValidator] Result: isValid=${result.isValid}, contentType=${result.contentType}, confidence=${result.confidence}`,\n );\n\n // Resolve valid markdown based on content type\n let validTocMarkdown: string | null = null;\n if (result.isValid && result.confidence >= this.confidenceThreshold) {\n if (result.contentType === 'pure_toc') {\n validTocMarkdown = markdown;\n } else if (\n result.contentType === 'mixed' &&\n result.extractedTocMarkdown\n ) {\n validTocMarkdown = result.extractedTocMarkdown;\n }\n }\n\n return {\n isValid: result.isValid,\n confidence: result.confidence,\n contentType: result.contentType,\n validTocMarkdown,\n reason: result.reason,\n };\n }\n\n /**\n * Check if validation result passes threshold\n *\n * @param result - Validation output from validate()\n * @returns true if content is valid TOC with sufficient confidence\n */\n isValid(result: TocValidationOutput): boolean {\n return result.isValid && result.confidence >= this.confidenceThreshold;\n }\n\n /**\n * Get the valid TOC markdown from validation result\n *\n * @param result - Validation output from validate()\n * @returns Valid TOC markdown or null if invalid\n */\n getValidMarkdown(result: TocValidationOutput): string | null {\n return result.validTocMarkdown;\n }\n\n /**\n * Build system prompt for TOC content validation\n */\n protected buildSystemPrompt(): string {\n return `You are a document structure analyst. Your task is to analyze the provided content and classify it into one of four categories.\n\n## Content Type Classification:\n\n### 1. pure_toc\nThe content is ONLY a main document Table of Contents with:\n- Structured list of chapters/sections with page numbers\n- Hierarchical section titles (e.g., \"Chapter 1\", \"제1장\", \"1.1 Introduction\")\n- Multiple entries (3 or more) organized by document structure\n- NO resource indices mixed in\n\n### 2. mixed\nThe content contains BOTH:\n- A valid main document TOC (chapters/sections with page numbers)\n- AND resource indices (photo/table/drawing indices)\n\nWhen classifying as \"mixed\", you MUST extract ONLY the main TOC portion and return it in extractedTocMarkdown.\n\n### 3. resource_only\nThe content contains ONLY resource indices such as:\n- Photo/image indices (사진 목차, 사진목차, Photo Index, List of Figures, List of Photos)\n- Table indices (표 목차, 표목차, Table Index, List of Tables)\n- Drawing/diagram indices (도면 목차, 도면목차, Drawing Index, List of Drawings)\n- Appendix indices (부록 목차, Appendix Index)\n\n### 4. invalid\nThe content is none of the above:\n- Random body text\n- Single entries or incomplete lists (fewer than 3 items)\n- Reference lists or bibliographies\n- Index pages (alphabetical keyword lists)\n- Unstructured content\n\n## Response Guidelines:\n- Set isValid to true for \"pure_toc\" and \"mixed\" types\n- Set isValid to false for \"resource_only\" and \"invalid\" types\n- Set confidence between 0.0 and 1.0 based on your certainty\n- For \"mixed\" type: extractedTocMarkdown MUST contain only the main TOC entries (preserve original formatting)\n- For other types: extractedTocMarkdown should be null\n- IMPORTANT: reason MUST be written in English\n\n## Example Scenarios:\n\n### Scenario 1: pure_toc\nInput: \"제1장 서론 ..... 1\\\\n제2장 조사개요 ..... 5\\\\n제3장 조사결과 ..... 15\"\nOutput: { isValid: true, contentType: \"pure_toc\", extractedTocMarkdown: null }\n\n### Scenario 2: mixed\nInput: \"제1장 서론 ..... 1\\\\n제2장 조사개요 ..... 5\\\\n\\\\n사진목차\\\\n사진 1 전경 ..... 50\\\\n사진 2 유물 ..... 51\"\nOutput: { isValid: true, contentType: \"mixed\", extractedTocMarkdown: \"제1장 서론 ..... 1\\\\n제2장 조사개요 ..... 5\" }\n\n### Scenario 3: resource_only\nInput: \"사진목차\\\\n사진 1 전경 ..... 50\\\\n사진 2 유물 ..... 51\"\nOutput: { isValid: false, contentType: \"resource_only\", extractedTocMarkdown: null }`;\n }\n\n /**\n * Build user prompt with markdown content\n */\n protected buildUserPrompt(markdown: string): string {\n return `Analyze the following content and classify it:\n\n${markdown}`;\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type { Caption } from '@heripo/model';\nimport type { LLMTokenUsageAggregator } from '@heripo/shared';\nimport type { LanguageModel } from 'ai';\n\nimport { BatchProcessor, LLMCaller } from '@heripo/shared';\nimport { z } from 'zod';\n\nimport { BaseValidator, type BaseValidatorOptions } from './base-validator';\n\n/**\n * Schema for a single caption validation result\n */\nconst CaptionValidationItemSchema = z.object({\n index: z.number().int().describe('Index of the caption in the input array'),\n isValid: z.boolean().describe('Whether the parsed caption is correct'),\n reason: z\n .string()\n .nullable()\n .describe('Brief explanation if invalid, null if valid'),\n});\n\n/**\n * Schema for batch caption validation response\n */\nconst CaptionValidationBatchSchema = z.object({\n results: z.array(CaptionValidationItemSchema),\n});\n\ntype CaptionValidationBatch = z.infer<typeof CaptionValidationBatchSchema>;\n\n/**\n * Options for CaptionValidator\n */\nexport interface CaptionValidatorOptions extends BaseValidatorOptions {\n // No additional options for now\n}\n\n/**\n * CaptionValidator\n *\n * Validates parsed captions against original text using LLM.\n * Processes captions in batches to optimize LLM API calls.\n *\n * ## Validation Rules\n *\n * Checks if the parsed \"num\" field correctly extracts the prefix + number from original text:\n * 1. **Correctness**: The \"num\" must contain the actual prefix+number from the original text\n * - Example: \"도판 1 유적 전경\" → num=\"도판 1\" ✓\n * - Example: \"도판 1 유적 전경\" → num=\"도판\" ✗ (incomplete)\n *\n * 2. **Spacing**: The spacing in \"num\" must match the original text exactly\n * - Example: \"도판 1\" → num=\"도판 1\" ✓\n * - Example: \"도판1\" → num=\"도판1\" ✓\n * - Example: \"도판 1\" → num=\"도판1\" ✗ (spacing mismatch)\n *\n * 3. **Completeness**: The number part must be fully extracted\n * - Example: \"Figure 2-3\" → num=\"Figure 2-3\" ✓\n * - Example: \"Figure 2-3\" → num=\"Figure 2\" ✗ (incomplete number)\n *\n * 4. **Null handling**: If \"num\" is null, verify that the original text has no number prefix\n * - Example: \"유적 전경 사진\" → num=null ✓\n * - Example: \"도판 1 전경\" → num=null ✗ (should extract \"도판 1\")\n */\nexport class CaptionValidator extends BaseValidator<\n typeof CaptionValidationBatchSchema,\n CaptionValidationBatch\n> {\n constructor(\n logger: LoggerMethods,\n model: LanguageModel,\n options?: CaptionValidatorOptions,\n fallbackModel?: LanguageModel,\n aggregator?: LLMTokenUsageAggregator,\n ) {\n super(\n logger,\n model,\n 'CaptionValidator',\n options,\n fallbackModel,\n aggregator,\n );\n }\n\n /**\n * Validate batch of parsed captions against original texts\n *\n * @param captions - Array of parsed Caption objects\n * @param originalTexts - Array of original caption texts (same order as captions)\n * @param batchSize - Batch size for processing. Set to 0 to skip validation (assume all valid).\n * @returns Array of validation results (boolean) maintaining original order\n */\n async validateBatch(\n captions: Caption[],\n originalTexts: string[],\n batchSize: number,\n ): Promise<boolean[]> {\n this.logger.info(\n `[CaptionValidator] Validating ${captions.length} captions with batch size ${batchSize}...`,\n );\n\n if (captions.length !== originalTexts.length) {\n throw new Error(\n `[CaptionValidator] Captions and originalTexts length mismatch: ${captions.length} vs ${originalTexts.length}`,\n );\n }\n\n if (captions.length === 0) {\n this.logger.info('[CaptionValidator] No captions to validate');\n return [];\n }\n\n if (batchSize === 0) {\n // Skip validation, assume all captions are valid\n this.logger.info(\n '[CaptionValidator] Skipping validation (batchSize=0), assuming all captions are valid',\n );\n return new Array(captions.length).fill(true);\n }\n\n try {\n // Convert to indexed format for batch processing\n const indexedItems = captions.map((caption, index) => ({\n index,\n caption,\n originalText: originalTexts[index],\n }));\n\n // Use BatchProcessor to process in parallel batches\n const batchResults = await BatchProcessor.processBatch(\n indexedItems,\n batchSize,\n async (batch) => this.validateBatchInternal(batch, this.model),\n );\n\n // Sort results by original index to maintain order\n batchResults.sort((a, b) => a.index - b.index);\n const results = batchResults.map((r) => r.isValid);\n\n const validCount = results.filter((r) => r).length;\n this.logger.info(\n `[CaptionValidator] Completed: ${validCount}/${results.length} captions validated as correct`,\n );\n\n // Log token usage summary if aggregator is available\n if (this.aggregator) {\n this.aggregator.logSummary(this.logger);\n }\n\n return results;\n } catch (error) {\n const message = error instanceof Error ? error.message : String(error);\n this.logger.error(`[CaptionValidator] Validation failed: ${message}`);\n throw new CaptionValidationError(\n `Failed to validate captions: ${message}`,\n { cause: error },\n );\n }\n }\n\n /**\n * Internal: Validate batch of captions using LLM\n *\n * @param items - Batch of caption items with original indices\n * @param model - Effective model to use\n * @returns Array of validation results indexed correctly\n */\n private async validateBatchInternal(\n items: Array<{ index: number; caption: Caption; originalText: string }>,\n model: LanguageModel,\n ): Promise<Array<{ index: number; isValid: boolean }>> {\n const result = await LLMCaller.call({\n schema: CaptionValidationBatchSchema,\n systemPrompt: this.buildSystemPrompt(),\n userPrompt: this.buildUserPrompt(items),\n primaryModel: model,\n fallbackModel: this.fallbackModel,\n maxRetries: this.maxRetries,\n temperature: this.temperature,\n abortSignal: this.abortSignal,\n component: 'CaptionValidator',\n phase: 'validation',\n });\n\n // Track token usage if aggregator is available\n if (this.aggregator) {\n this.aggregator.track(result.usage);\n }\n\n // Map LLM results back to original indices\n return result.output.results.map((item) => ({\n index: item.index,\n isValid: item.isValid,\n }));\n }\n\n protected buildSystemPrompt(): string {\n return `You are a caption validation expert for archaeological excavation reports.\n\nYour task is to validate whether parsed caption prefixes (num field) are correctly extracted from original caption texts.\n\n## Caption Pattern Recognition\n\nA valid caption follows the pattern: <prefix word(s)> <number>\n- The prefix can be ANY Korean/English word(s) that label images/tables/figures\n- Common examples: 도판, 사진, 그림, 원색사진, 흑백사진, Figure, Photo, Plate, etc.\n- The key is the PATTERN (text followed by number), not a specific word list\n- Leading punctuation/brackets should be IGNORED when extracting\n\nValid caption patterns:\n- \"원색사진 1. 조사지역\" → num=\"원색사진 1\" ✓\n- \"흑백사진 2 출토유물\" → num=\"흑백사진 2\" ✓\n- \"도판 1 유적 전경\" → num=\"도판 1\" ✓\n- \"(사진 16> 느티나무\" → num=\"사진 16\" ✓ (ignore leading punctuation)\n- \"<도판 3> 유물 사진\" → num=\"도판 3\" ✓ (ignore angle brackets)\n\nInvalid patterns (num MUST be null):\n- \"39 3월 28일(백제 도로유구)\" → null ✓ (starts with number, no prefix)\n- \"1. 유적 전경\" → null ✓ (numbered list item, not a caption)\n- \"2024년 조사 현황\" → null ✓ (year reference, not a caption)\n\n## Extraction Algorithm:\n\n1. Extract prefix + number from the caption\n - The prefix is the text portion before the number\n - Full extraction: \"원색사진 1\", \"도판 2-3\", \"그림 3.6\", \"Figure 4a\"\n\n2. **Decimal point handling**: Include period/dot after number if directly following\n - \"그림 3.6. 한반도\" → \"그림 3.6\" (period as decimal separator included)\n - \"도판 2. 유적\" → \"도판 2\" (period after space, NOT included)\n\n3. **Stop rules** (extraction must stop at first occurrence of):\n - Punctuation (except decimal point): , : ; ! ? ~ ( ) [ ] { }\n - Whitespace: space, tab, newline\n - Underscore: _\n - Exception: Periods directly after digits are included as decimal separators\n - Exception: Hyphens within numbers are included (e.g., \"2-3\")\n\n## Validation Rules:\n\n1. **Pattern requirement**: The original text MUST follow <prefix> <number> pattern\n - \"원색사진 1. 조사지역\" → num=\"원색사진 1\" ✓ (valid pattern)\n - \"39 3월 28일(백제)\" → num=\"39\" ✗ (starts with number, should be null)\n - \"1. 조사 개요\" → num=\"1\" ✗ (numbered list, should be null)\n\n2. **Correctness**: The parsed \"num\" must contain the actual prefix+number\n - \"도판 1 유적 전경\" → num=\"도판 1\" ✓\n - \"도판 1 유적 전경\" → num=\"도판\" ✗ (incomplete)\n\n3. **Spacing**: The spacing in \"num\" must match the original text exactly\n - \"도판 1\" → num=\"도판 1\" ✓\n - \"도판1\" → num=\"도판1\" ✓\n - \"도판 1\" → num=\"도판1\" ✗ (spacing mismatch)\n\n4. **Completeness**: The number part must be fully extracted\n - \"Figure 2-3\" → num=\"Figure 2-3\" ✓\n - \"Figure 2-3\" → num=\"Figure 2\" ✗ (incomplete number)\n\n5. **Null handling**: If \"num\" is null, verify:\n - Either the original text has no number\n - OR the text starts with a number (no prefix)\n - \"유적 전경 사진\" → num=null ✓ (no number in caption position)\n - \"원색사진 1 조사\" → num=null ✗ (should extract \"원색사진 1\")\n\n## Response:\nFor each caption, return:\n- index: original position\n- isValid: true if parsing is correct, false otherwise\n- reason: null if valid, brief explanation if invalid`;\n }\n\n protected buildUserPrompt(\n items: Array<{ index: number; caption: Caption; originalText: string }>,\n ): string {\n const captionList = items\n .map(\n (item) =>\n `[${item.index}] Original: \"${item.originalText}\" | Parsed num: ${item.caption.num !== undefined ? `\"${item.caption.num}\"` : 'null'}`,\n )\n .join('\\n');\n\n return `Validate the following caption parsing results:\n\n${captionList}\n\nReturn the results as JSON array with \"index\", \"isValid\", and \"reason\" (null if valid, explanation if invalid).\n\nExample format:\n{\n \"results\": [\n { \"index\": 0, \"isValid\": true, \"reason\": null },\n { \"index\": 1, \"isValid\": false, \"reason\": \"Number incomplete: expected '1-2' but got '1'\" },\n { \"index\": 2, \"isValid\": true, \"reason\": null }\n ]\n}`;\n }\n}\n\n/**\n * Error thrown when caption validation fails\n */\nexport class CaptionValidationError extends Error {\n constructor(message: string, options?: ErrorOptions) {\n super(message, options);\n this.name = 'CaptionValidationError';\n }\n}\n","import type { LoggerMethods } from '@heripo/logger';\nimport type {\n Caption,\n Chapter,\n DoclingDocument,\n DocumentProcessResult,\n PageRange,\n ProcessedDocument,\n ProcessedFootnote,\n ProcessedImage,\n ProcessedTable,\n ProcessedTableCell,\n} from '@heripo/model';\nimport type { LanguageModel } from 'ai';\n\nimport type { TocEntry } from './types';\n\nimport { LLMTokenUsageAggregator } from '@heripo/shared';\n\nimport { ChapterConverter } from './converters';\nimport {\n TocExtractor,\n TocFinder,\n TocNotFoundError,\n VisionTocExtractor,\n} from './extractors';\nimport { CaptionParser, PageRangeParser } from './parsers';\nimport {\n IdGenerator,\n MarkdownConverter,\n RefResolver,\n TextCleaner,\n} from './utils';\nimport { CaptionValidator, TocContentValidator } from './validators';\n\n/**\n * DocumentProcessor Options\n */\nexport interface DocumentProcessorOptions {\n /**\n * Logger instance\n */\n logger: LoggerMethods;\n\n /**\n * Fallback model - used as fallback when component-specific models are not provided or fail.\n * This is the only required model. Should be set to a frontier model (e.g., Claude Opus 4.5, GPT-5.2)\n * to ensure reliable fallback performance across all components.\n */\n fallbackModel: LanguageModel;\n\n /**\n * Model for PageRangeParser - extracts page numbers from page images.\n * Requires vision capabilities. Falls back to 'fallbackModel' if not provided.\n */\n pageRangeParserModel?: LanguageModel;\n\n /**\n * Model for TocExtractor - extracts structured TOC from Markdown representation.\n * Falls back to 'fallbackModel' if not provided.\n */\n tocExtractorModel?: LanguageModel;\n\n /**\n * Model for validators (TOC content validation, caption validation).\n * Falls back to 'fallbackModel' if not provided.\n */\n validatorModel?: LanguageModel;\n\n /**\n * Model for VisionTocExtractor - extracts TOC directly from page images.\n * Requires vision capabilities. Falls back to 'fallbackModel' if not provided.\n */\n visionTocExtractorModel?: LanguageModel;\n\n /**\n * Model for CaptionParser - extracts caption prefix and number from image/table captions.\n * Falls back to 'fallbackModel' if not provided.\n */\n captionParserModel?: LanguageModel;\n\n /**\n * Batch size for TextCleaner text normalization (synchronous processing)\n */\n textCleanerBatchSize: number;\n\n /**\n * Batch size for CaptionParser LLM parsing (async parallel processing)\n */\n captionParserBatchSize: number;\n\n /**\n * Batch size for CaptionValidator LLM validation (async parallel processing)\n */\n captionValidatorBatchSize: number;\n\n /**\n * Maximum retry count (default: 3)\n */\n maxRetries?: number;\n\n /**\n * Enable fallback retry mechanism - automatically retries with fallback model on failure (default: true)\n * Set to false to disable automatic fallback retry and fail immediately on component-specific model errors\n */\n enableFallbackRetry?: boolean;\n\n /**\n * Abort signal for cancellation support.\n * When aborted, processing stops at the next checkpoint between stages.\n */\n abortSignal?: AbortSignal;\n}\n\n/**\n * DocumentProcessor\n *\n * Main class that converts DoclingDocument to ProcessedDocument.\n *\n * ## Conversion Process\n *\n * 1. Initialize RefResolver - indexing for $ref resolution\n * 2. Initialize IdGenerator - unique ID generator\n * 3. Text filtering and PageRangeMap generation (visionModel)\n * 4. TOC extraction (model) - core step\n * 5. Parallel processing block:\n * - Images conversion (caption extraction)\n * - Tables conversion (excluding TOC tables)\n * 6. Chapters conversion (based on TOC)\n * 7. Assemble ProcessedDocument\n *\n * @example\n * ```typescript\n * import { openai } from '@ai-sdk/openai';\n * import { anthropic } from '@ai-sdk/anthropic';\n * import { DocumentProcessor } from '@heripo/document-processor';\n * import { getLogger } from '@heripo/logger';\n *\n * const logger = getLogger();\n *\n * // Basic usage - all components use the fallback model\n * const processor = new DocumentProcessor({\n * logger,\n * fallbackModel: anthropic('claude-opus-4-5-20251101'), // Frontier model for reliable fallback\n * });\n *\n * // Advanced usage - component-specific models with frontier fallback\n * const advancedProcessor = new DocumentProcessor({\n * logger,\n * fallbackModel: anthropic('claude-opus-4-5-20251101'), // Frontier model for fallback\n * pageRangeParserModel: openai('gpt-5.2'), // Vision-capable\n * tocExtractorModel: openai('gpt-5-mini'), // Structured output\n * validatorModel: openai('gpt-5.2'), // Validation (TOC + caption)\n * visionTocExtractorModel: openai('gpt-5.1'), // Vision-capable\n * captionParserModel: openai('gpt-5-mini'),\n * textCleanerBatchSize: 20, // Sync text processing\n * captionParserBatchSize: 10, // LLM caption parsing\n * captionValidatorBatchSize: 10, // LLM caption validation\n * maxRetries: 3,\n * });\n *\n * const result = await processor.process(\n * doclingDoc,\n * 'report-001',\n * outputPath\n * );\n * ```\n */\nexport class DocumentProcessor {\n private readonly logger: LoggerMethods;\n private readonly fallbackModel: LanguageModel;\n private readonly pageRangeParserModel: LanguageModel;\n private readonly tocExtractorModel: LanguageModel;\n private readonly validatorModel: LanguageModel;\n private readonly visionTocExtractorModel: LanguageModel;\n private readonly captionParserModel: LanguageModel;\n private readonly textCleanerBatchSize: number;\n private readonly captionParserBatchSize: number;\n private readonly captionValidatorBatchSize: number;\n private readonly maxRetries: number;\n private readonly enableFallbackRetry: boolean;\n private readonly abortSignal?: AbortSignal;\n private idGenerator = new IdGenerator();\n private refResolver?: RefResolver;\n private pageRangeParser?: PageRangeParser;\n private tocFinder?: TocFinder;\n private tocExtractor?: TocExtractor;\n private tocContentValidator?: TocContentValidator;\n private captionValidator?: CaptionValidator;\n private visionTocExtractor?: VisionTocExtractor;\n private captionParser?: CaptionParser;\n private chapterConverter?: ChapterConverter;\n private textCleaner = TextCleaner;\n private readonly usageAggregator = new LLMTokenUsageAggregator();\n\n constructor(options: DocumentProcessorOptions) {\n this.logger = options.logger;\n this.fallbackModel = options.fallbackModel;\n this.pageRangeParserModel =\n options.pageRangeParserModel ?? options.fallbackModel;\n this.tocExtractorModel = options.tocExtractorModel ?? options.fallbackModel;\n this.validatorModel = options.validatorModel ?? options.fallbackModel;\n this.visionTocExtractorModel =\n options.visionTocExtractorModel ?? options.fallbackModel;\n this.captionParserModel =\n options.captionParserModel ?? options.fallbackModel;\n this.textCleanerBatchSize = options.textCleanerBatchSize;\n this.captionParserBatchSize = options.captionParserBatchSize;\n this.captionValidatorBatchSize = options.captionValidatorBatchSize;\n this.maxRetries = options.maxRetries ?? 3;\n this.enableFallbackRetry = options.enableFallbackRetry ?? false;\n this.abortSignal = options.abortSignal;\n }\n\n /**\n * Check if abort has been requested and throw error if so\n *\n * @throws {Error} with name 'AbortError' if aborted\n */\n private checkAborted(): void {\n if (this.abortSignal?.aborted) {\n const error = new Error('Document processing was aborted');\n error.name = 'AbortError';\n throw error;\n }\n }\n\n /**\n * Converts DoclingDocument to ProcessedDocument with token usage tracking.\n *\n * Conversion process:\n * 1. Initialize processors and resolvers\n * 2. Normalize and filter texts\n * 3. Clean texts and parse page ranges (parallel)\n * 4. Extract table of contents\n * 5. Convert images and tables (parallel)\n * 6. Convert chapters and link resources\n * 7. Assemble final ProcessedDocument\n * 8. Collect and report token usage\n *\n * @param doclingDoc - Original document extracted from Docling SDK\n * @param reportId - Report unique identifier\n * @param outputPath - Path containing images and pages subdirectories (images/image_0.png, pages/page_0.png, etc.)\n * @returns Document processing result with ProcessedDocument and token usage report\n *\n * @throws {TocExtractError} When TOC extraction fails\n * @throws {PageRangeParseError} When page range parsing fails\n * @throws {ConversionError} When error occurs during conversion\n */\n async process(\n doclingDoc: DoclingDocument,\n reportId: string,\n outputPath: string,\n ): Promise<DocumentProcessResult> {\n this.logger.info('[DocumentProcessor] Starting document processing...');\n this.logger.info('[DocumentProcessor] Report ID:', reportId);\n\n // Reset token usage aggregator for new processing run\n this.usageAggregator.reset();\n\n // Check abort before starting\n this.checkAborted();\n\n this.initializeProcessors(doclingDoc, outputPath);\n\n const startTimeFilter = Date.now();\n const filtered = this.normalizeAndFilterTexts(doclingDoc);\n const filteringTime = Date.now() - startTimeFilter;\n this.logger.info(\n `[DocumentProcessor] Text filtering took ${filteringTime}ms`,\n );\n\n // Check abort after text filtering\n this.checkAborted();\n\n const startTimePageRange = Date.now();\n const pageRangeMap = await this.parsePageRanges(doclingDoc);\n const pageRangeTime = Date.now() - startTimePageRange;\n this.logger.info(\n `[DocumentProcessor] Page range parsing took ${pageRangeTime}ms`,\n );\n\n // Check abort after page range parsing\n this.checkAborted();\n\n const startTimeToc = Date.now();\n const tocEntries = await this.extractTableOfContents(doclingDoc, filtered);\n const tocTime = Date.now() - startTimeToc;\n this.logger.info(`[DocumentProcessor] TOC extraction took ${tocTime}ms`);\n\n // Check abort after TOC extraction\n this.checkAborted();\n\n const startTimeResources = Date.now();\n const { images, tables, footnotes } = await this.convertResources(\n doclingDoc,\n outputPath,\n );\n const resourcesTime = Date.now() - startTimeResources;\n this.logger.info(\n `[DocumentProcessor] Resource conversion took ${resourcesTime}ms`,\n );\n\n // Check abort after resource conversion\n this.checkAborted();\n\n const startTimeChapters = Date.now();\n const chapters = await this.convertChapters(\n doclingDoc,\n tocEntries,\n pageRangeMap,\n images,\n tables,\n footnotes,\n );\n const chaptersTime = Date.now() - startTimeChapters;\n this.logger.info(\n `[DocumentProcessor] Chapter conversion took ${chaptersTime}ms`,\n );\n\n const startTimeAssemble = Date.now();\n const processedDoc = this.assembleProcessedDocument(\n reportId,\n pageRangeMap,\n chapters,\n images,\n tables,\n footnotes,\n );\n const assembleTime = Date.now() - startTimeAssemble;\n this.logger.info(\n `[DocumentProcessor] Document assembly took ${assembleTime}ms`,\n );\n\n this.logger.info('[DocumentProcessor] Document processing completed');\n\n return {\n document: processedDoc,\n usage: this.usageAggregator.getReport(),\n };\n }\n\n /**\n * Initialize all processors and resolvers\n *\n * Sets up RefResolver, PageRangeParser, TocFinder, and TocExtractor\n */\n private initializeProcessors(\n doclingDoc: DoclingDocument,\n outputPath: string,\n ): void {\n this.logger.info('[DocumentProcessor] Initializing processors...');\n\n this.logger.info('[DocumentProcessor] - RefResolver');\n this.refResolver = new RefResolver(this.logger, doclingDoc);\n\n this.logger.info('[DocumentProcessor] - PageRangeParser');\n this.pageRangeParser = new PageRangeParser(\n this.logger,\n this.pageRangeParserModel,\n outputPath,\n this.maxRetries,\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n this.abortSignal,\n );\n\n this.logger.info('[DocumentProcessor] - TocFinder');\n this.tocFinder = new TocFinder(this.logger, this.refResolver);\n\n this.logger.info('[DocumentProcessor] - TocExtractor');\n this.tocExtractor = new TocExtractor(\n this.logger,\n this.tocExtractorModel,\n {\n maxRetries: this.maxRetries,\n },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.abortSignal,\n );\n\n this.logger.info('[DocumentProcessor] - TocContentValidator');\n this.tocContentValidator = new TocContentValidator(\n this.logger,\n this.validatorModel,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - CaptionValidator');\n this.captionValidator = new CaptionValidator(\n this.logger,\n this.validatorModel,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - VisionTocExtractor');\n this.visionTocExtractor = new VisionTocExtractor(\n this.logger,\n this.visionTocExtractorModel,\n outputPath,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - CaptionParser');\n this.captionParser = new CaptionParser(\n this.logger,\n this.captionParserModel,\n { maxRetries: this.maxRetries, abortSignal: this.abortSignal },\n this.enableFallbackRetry ? this.fallbackModel : undefined,\n this.usageAggregator,\n );\n\n this.logger.info('[DocumentProcessor] - ChapterConverter');\n this.chapterConverter = new ChapterConverter(this.logger, this.idGenerator);\n\n this.logger.info('[DocumentProcessor] All processors initialized');\n }\n\n /**\n * Normalize and filter texts using TextCleaner\n *\n * Performs basic text normalization (unicode, whitespace, punctuation)\n * and filters out invalid texts (empty, numbers-only, etc.)\n */\n private normalizeAndFilterTexts(doclingDoc: DoclingDocument): string[] {\n this.logger.info('[DocumentProcessor] Normalizing and filtering texts...');\n\n const texts = doclingDoc.texts.map((text) => text.text);\n const filtered = this.textCleaner.normalizeAndFilterBatch(\n texts,\n this.textCleanerBatchSize,\n );\n\n this.logger.info(\n `[DocumentProcessor] Filtered ${filtered.length} texts from ${texts.length} original texts`,\n );\n\n return filtered;\n }\n\n /**\n * Parse page ranges using Vision LLM\n *\n * Extracts actual page numbers from page images and creates mapping.\n * Token usage is automatically tracked by PageRangeParser into the shared aggregator.\n */\n private async parsePageRanges(\n doclingDoc: DoclingDocument,\n ): Promise<Record<number, PageRange>> {\n this.logger.info('[DocumentProcessor] Starting page range parsing...');\n\n const result = await this.pageRangeParser!.parse(doclingDoc);\n\n const pageRangeMap = result.pageRangeMap;\n\n this.logger.info(\n `[DocumentProcessor] Page range map entries: ${Object.keys(pageRangeMap).length}`,\n );\n\n return pageRangeMap;\n }\n\n /**\n * Convert images, tables, and footnotes\n *\n * Runs conversions:\n * - Images conversion (with caption extraction)\n * - Tables conversion (with caption extraction, excluding TOC tables)\n * - Footnotes conversion (synchronous, from text items with label='footnote')\n */\n private async convertResources(\n doclingDoc: DoclingDocument,\n outputPath: string,\n ): Promise<{\n images: ProcessedImage[];\n tables: ProcessedTable[];\n footnotes: ProcessedFootnote[];\n }> {\n this.logger.info(\n '[DocumentProcessor] Converting images, tables, and footnotes...',\n );\n\n const [images, tables] = await Promise.all([\n this.convertImages(doclingDoc, outputPath),\n this.convertTables(doclingDoc),\n ]);\n\n const footnotes = this.convertFootnotes(doclingDoc);\n\n this.logger.info(\n `[DocumentProcessor] Converted ${images.length} images, ${tables.length} tables, and ${footnotes.length} footnotes`,\n );\n\n return { images, tables, footnotes };\n }\n\n /**\n * Convert footnotes\n *\n * Extracts footnotes from DoclingDocument text items with label='footnote'\n */\n private convertFootnotes(doclingDoc: DoclingDocument): ProcessedFootnote[] {\n const footnoteItems = doclingDoc.texts.filter(\n (item) => item.label === 'footnote',\n );\n this.logger.info(\n `[DocumentProcessor] Converting ${footnoteItems.length} footnotes...`,\n );\n\n const footnotes: ProcessedFootnote[] = [];\n\n for (const item of footnoteItems) {\n if (!this.textCleaner.isValidText(item.text)) {\n continue;\n }\n\n const pdfPageNo = item.prov?.[0]?.page_no ?? 1;\n const footnoteId = this.idGenerator.generateFootnoteId();\n\n footnotes.push({\n id: footnoteId,\n text: this.textCleaner.normalize(item.text),\n pdfPageNo,\n });\n }\n\n this.logger.info(\n `[DocumentProcessor] Converted ${footnotes.length} valid footnotes`,\n );\n\n return footnotes;\n }\n\n /**\n * Assemble the final ProcessedDocument\n *\n * Creates the ProcessedDocument structure with all converted components\n */\n private assembleProcessedDocument(\n reportId: string,\n pageRangeMap: Record<number, PageRange>,\n chapters: Chapter[],\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n ): ProcessedDocument {\n this.logger.info('[DocumentProcessor] Assembling ProcessedDocument...');\n\n const processedDoc: ProcessedDocument = {\n reportId,\n pageRangeMap,\n chapters,\n images,\n tables,\n footnotes,\n };\n\n this.logger.info(\n `[DocumentProcessor] Assembled document with ${chapters.length} chapters, ${images.length} images, ${tables.length} tables, ${footnotes.length} footnotes`,\n );\n\n return processedDoc;\n }\n\n /**\n * Extract table of contents (TOC)\n *\n * Uses rule-based extraction with LLM validation and vision fallback:\n * 1. TocFinder - find TOC area in document (rule-based)\n * 2. MarkdownConverter - convert TOC items to Markdown\n * 3. TocContentValidator - validate if content is actually a TOC (LLM)\n * 4. If invalid: VisionTocExtractor - extract from page images (vision LLM fallback)\n * 5. TocExtractor - LLM-based structured extraction\n */\n private async extractTableOfContents(\n doclingDoc: DoclingDocument,\n _filteredTexts: string[],\n ): Promise<TocEntry[]> {\n this.logger.info('[DocumentProcessor] Extracting TOC...');\n\n let markdown: string | null = null;\n\n // Stage 1: Try rule-based extraction\n try {\n const tocArea = this.tocFinder!.find(doclingDoc);\n this.logger.info(\n `[DocumentProcessor] Found TOC area: pages ${tocArea.startPage}-${tocArea.endPage}`,\n );\n\n // Stage 2: Convert to Markdown\n markdown = MarkdownConverter.convert(tocArea.itemRefs, this.refResolver!);\n this.logger.info(\n `[DocumentProcessor] Converted TOC to Markdown (${markdown.length} chars)`,\n );\n\n // Stage 3: Validate with LLM\n const validation = await this.tocContentValidator!.validate(markdown);\n if (!this.tocContentValidator!.isValid(validation)) {\n this.logger.warn(\n `[DocumentProcessor] TOC validation failed: ${validation.reason}`,\n );\n markdown = null;\n } else {\n const validMarkdown =\n this.tocContentValidator!.getValidMarkdown(validation);\n if (validMarkdown) {\n if (validation.contentType === 'mixed') {\n this.logger.info(\n `[DocumentProcessor] Mixed TOC detected, using extracted main TOC (${validMarkdown.length} chars)`,\n );\n }\n markdown = validMarkdown;\n this.logger.info(\n `[DocumentProcessor] TOC validation passed (confidence: ${validation.confidence})`,\n );\n } else {\n markdown = null;\n }\n }\n } catch (error) {\n if (error instanceof TocNotFoundError) {\n this.logger.info(\n '[DocumentProcessor] Rule-based TOC not found, will try vision fallback',\n );\n } else {\n throw error;\n }\n }\n\n // Stage 4: Vision fallback if needed\n if (!markdown) {\n this.logger.info('[DocumentProcessor] Using vision fallback for TOC');\n const totalPages = Object.keys(doclingDoc.pages).length;\n markdown = await this.visionTocExtractor!.extract(totalPages);\n\n if (!markdown) {\n const reason =\n 'Both rule-based search and vision fallback failed to locate TOC';\n this.logger.error(\n `[DocumentProcessor] TOC extraction failed: ${reason}`,\n );\n throw new TocNotFoundError(\n `Table of contents not found in the document. ${reason}.`,\n );\n }\n\n this.logger.info(\n `[DocumentProcessor] Vision extracted TOC markdown (${markdown.length} chars)`,\n );\n }\n\n // Stage 5: Extract structure with LLM (with fallback retry)\n const totalPages = Object.keys(doclingDoc.pages).length;\n const tocResult = await this.tocExtractor!.extract(markdown, {\n totalPages,\n });\n\n // Track token usage (initial extraction + any correction retries)\n for (const usage of tocResult.usages) {\n this.usageAggregator.track(usage);\n }\n\n if (tocResult.entries.length === 0) {\n const reason =\n 'TOC area was detected but LLM could not extract any structured entries';\n this.logger.error(`[DocumentProcessor] TOC extraction failed: ${reason}`);\n throw new TocNotFoundError(`${reason}.`);\n }\n\n this.logger.info(\n `[DocumentProcessor] Extracted ${tocResult.entries.length} top-level TOC entries`,\n );\n\n return tocResult.entries;\n }\n\n /**\n * Process resource captions (for images and tables)\n *\n * Common caption processing pipeline:\n * 1. Parse captions in batch\n * 2. Validate parsed captions\n * 3. Reparse failed captions with fallback model\n *\n * @param captionTexts - Array of caption texts to process\n * @param resourceType - Type of resource for logging (e.g., 'image', 'table')\n * @returns Parsed captions with index mapping\n */\n private async processResourceCaptions(\n captionTexts: Array<string | undefined>,\n resourceType: string,\n ): Promise<Map<number, Caption>> {\n const captionsByIndex: Map<number, Caption> = new Map();\n\n // Build map of valid captions with indices\n const validCaptionData: Array<{\n resourceIndex: number;\n filteredIndex: number;\n text: string;\n }> = [];\n\n for (let i = 0; i < captionTexts.length; i++) {\n const text = captionTexts[i];\n if (text !== undefined) {\n validCaptionData.push({\n resourceIndex: i,\n filteredIndex: validCaptionData.length,\n text,\n });\n }\n }\n\n const validCaptionTexts = validCaptionData.map((item) => item.text);\n\n // Step 1: Parse captions in batch\n const parsedCaptions =\n validCaptionTexts.length > 0\n ? await this.captionParser!.parseBatch(\n validCaptionTexts,\n this.captionParserBatchSize,\n )\n : [];\n\n // Handle length mismatch between parsed results and valid captions\n let finalValidCaptionData = validCaptionData;\n let finalParsedCaptions = parsedCaptions;\n\n if (parsedCaptions.length !== validCaptionData.length) {\n this.logger.warn(\n `[DocumentProcessor] Caption parsing length mismatch for ${resourceType}: ` +\n `expected ${validCaptionData.length}, got ${parsedCaptions.length}. ` +\n `Attempting recovery by matching fullText...`,\n );\n\n // Create a map of fullText -> parsed caption for O(1) lookup\n const parsedMap = new Map<string, Caption>();\n for (const parsed of parsedCaptions) {\n parsedMap.set(parsed.fullText, parsed);\n }\n\n // Filter validCaptionData to only include items that were successfully parsed\n const recoveredData: typeof validCaptionData = [];\n for (const item of validCaptionData) {\n if (parsedMap.has(item.text)) {\n recoveredData.push(item);\n } else {\n this.logger.warn(\n `[DocumentProcessor] Skipping ${resourceType} caption at index ${item.resourceIndex}: \"${item.text}\" (not found in parsed results)`,\n );\n }\n }\n\n // Re-map parsedCaptions to match the filtered data\n /* c8 ignore start - defensive guard: recoveredData only contains items where parsedMap.has() returned true */\n const recoveredCaptions: Caption[] = [];\n for (const item of recoveredData) {\n const caption = parsedMap.get(item.text);\n if (caption) {\n recoveredCaptions.push(caption);\n }\n }\n /* c8 ignore stop */\n\n /* c8 ignore start - defensive guard: recoveredData only contains items where parsedMap.has() returned true */\n if (recoveredCaptions.length !== recoveredData.length) {\n throw new Error(\n `[DocumentProcessor] Failed to recover from length mismatch: ` +\n `recovered ${recoveredCaptions.length} captions for ${recoveredData.length} valid items`,\n );\n }\n /* c8 ignore stop */\n\n finalValidCaptionData = recoveredData;\n finalParsedCaptions = recoveredCaptions;\n\n this.logger.info(\n `[DocumentProcessor] Successfully recovered ${finalParsedCaptions.length} ${resourceType} captions after length mismatch`,\n );\n }\n\n // Store parsed captions by resource index\n for (let i = 0; i < finalParsedCaptions.length; i++) {\n const resourceIndex = finalValidCaptionData[i].resourceIndex;\n captionsByIndex.set(resourceIndex, finalParsedCaptions[i]);\n }\n\n // Step 2: Validate parsed captions\n if (finalParsedCaptions.length > 0) {\n const finalValidCaptionTexts = finalValidCaptionData.map(\n (item) => item.text,\n );\n const validationResults = await this.captionValidator!.validateBatch(\n finalParsedCaptions,\n finalValidCaptionTexts,\n this.captionValidatorBatchSize,\n );\n\n // Step 3: Reparse failed captions with fallback model\n const failedIndices = validationResults\n .map((isValid, index) => (isValid ? -1 : index))\n .filter((index) => index !== -1);\n\n if (failedIndices.length > 0) {\n for (const filteredIndex of failedIndices) {\n const captionData = finalValidCaptionData[filteredIndex];\n const originalText = captionData.text;\n const parsedNum = finalParsedCaptions[filteredIndex].num;\n const resourceIndex = captionData.resourceIndex;\n this.logger.warn(\n `[DocumentProcessor] Invalid ${resourceType} caption [${resourceIndex}]: \"${originalText}\" | parsed num=\"${parsedNum}\"`,\n );\n }\n\n // Reparse failed captions with fallback model if enabled\n if (this.enableFallbackRetry) {\n this.logger.info(\n `[DocumentProcessor] Reparsing ${failedIndices.length} failed ${resourceType} captions with fallback model...`,\n );\n\n // Collect failed caption texts\n const failedCaptionTexts = failedIndices.map(\n (filteredIndex) => finalValidCaptionData[filteredIndex].text,\n );\n\n // Create a new CaptionParser instance with fallback model for separate token tracking\n const fallbackCaptionParser = new CaptionParser(\n this.logger,\n this.fallbackModel,\n {\n maxRetries: this.maxRetries,\n componentName: 'CaptionParser-fallback',\n abortSignal: this.abortSignal,\n },\n undefined, // no fallback for the fallback\n this.usageAggregator,\n );\n\n // Reparse with fallback model (sequential processing for better accuracy)\n const reparsedCaptions = await fallbackCaptionParser.parseBatch(\n failedCaptionTexts,\n 0, // sequential processing\n );\n\n // Update captionsByIndex with reparsed results\n for (let i = 0; i < failedIndices.length; i++) {\n const filteredIndex = failedIndices[i];\n const resourceIndex =\n finalValidCaptionData[filteredIndex].resourceIndex;\n captionsByIndex.set(resourceIndex, reparsedCaptions[i]);\n }\n\n this.logger.info(\n `[DocumentProcessor] Reparsed ${reparsedCaptions.length} ${resourceType} captions`,\n );\n } else {\n this.logger.warn(\n `[DocumentProcessor] ${failedIndices.length} ${resourceType} captions failed validation (kept as-is, fallback retry disabled)`,\n );\n }\n }\n }\n\n return captionsByIndex;\n }\n\n /**\n * Extract caption text from resource\n *\n * Handles both string references and $ref resolution\n */\n private extractCaptionText(\n captions: Array<string | { $ref: string }> | undefined,\n ): string | undefined {\n if (!captions?.[0]) {\n return undefined;\n }\n\n const captionRef = captions[0];\n if (typeof captionRef === 'string') {\n return captionRef;\n }\n\n if (this.refResolver && '$ref' in captionRef) {\n const resolved = this.refResolver.resolveText(captionRef.$ref);\n return resolved?.text;\n }\n\n return undefined;\n }\n\n /**\n * Convert images\n *\n * Converts pictures from DoclingDocument to ProcessedImage\n */\n private async convertImages(\n doclingDoc: DoclingDocument,\n outputPath: string,\n ): Promise<ProcessedImage[]> {\n this.logger.info(\n `[DocumentProcessor] Converting ${doclingDoc.pictures.length} images...`,\n );\n\n const images: ProcessedImage[] = [];\n const captionTexts: Array<string | undefined> = [];\n\n // Step 1: Collect image data and caption texts\n for (const picture of doclingDoc.pictures) {\n const pdfPageNo = picture.prov?.[0]?.page_no ?? 0;\n const imageId =\n this.idGenerator?.generateImageId() ?? `img-${images.length + 1}`;\n\n const captionText = this.extractCaptionText(picture.captions);\n captionTexts.push(captionText);\n\n images.push({\n id: imageId,\n path: `${outputPath}/images/image_${images.length}.png`,\n pdfPageNo,\n // caption will be assigned later\n });\n }\n\n // Step 2: Process captions\n const captionsByIndex = await this.processResourceCaptions(\n captionTexts,\n 'image',\n );\n\n // Step 3: Assign parsed captions to images\n for (let i = 0; i < images.length; i++) {\n if (captionsByIndex.has(i)) {\n images[i].caption = captionsByIndex.get(i);\n }\n }\n\n return images;\n }\n\n /**\n * Convert tables\n *\n * Converts tables from DoclingDocument to ProcessedTable\n */\n private async convertTables(\n doclingDoc: DoclingDocument,\n ): Promise<ProcessedTable[]> {\n this.logger.info(\n `[DocumentProcessor] Converting ${doclingDoc.tables.length} tables...`,\n );\n\n const tables: ProcessedTable[] = [];\n const captionTexts: Array<string | undefined> = [];\n\n // Step 1: Collect table data and caption texts\n for (const table of doclingDoc.tables) {\n const pdfPageNo = table.prov?.[0]?.page_no ?? 0;\n const tableId =\n this.idGenerator?.generateTableId() ?? `tbl-${tables.length + 1}`;\n\n // Convert table cells\n const grid: ProcessedTableCell[][] = table.data.grid.map((row) =>\n row.map((cell) => ({\n text: cell.text,\n rowSpan: cell.row_span ?? 1,\n colSpan: cell.col_span ?? 1,\n isHeader: cell.column_header || cell.row_header || false,\n })),\n );\n\n const captionText = this.extractCaptionText(table.captions);\n captionTexts.push(captionText);\n\n tables.push({\n id: tableId,\n pdfPageNo,\n numRows: grid.length,\n numCols: grid[0]?.length ?? 0,\n grid,\n // caption will be assigned later\n });\n }\n\n // Step 2: Process captions\n const captionsByIndex = await this.processResourceCaptions(\n captionTexts,\n 'table',\n );\n\n // Step 3: Assign parsed captions to tables\n for (let i = 0; i < tables.length; i++) {\n if (captionsByIndex.has(i)) {\n tables[i].caption = captionsByIndex.get(i);\n }\n }\n\n return tables;\n }\n\n /**\n * Convert chapters and link resources\n *\n * Generates chapters based on TOC and links images/tables/footnotes using ChapterConverter.\n * Throws TocNotFoundError if TOC entries are empty (defensive assertion).\n */\n private async convertChapters(\n doclingDoc: DoclingDocument,\n tocEntries: TocEntry[],\n pageRangeMap: Record<number, PageRange>,\n images: ProcessedImage[],\n tables: ProcessedTable[],\n footnotes: ProcessedFootnote[],\n ): Promise<Chapter[]> {\n this.logger.info('[DocumentProcessor] Converting chapters...');\n\n // Defensive assertion - TOC entries should always be present at this point\n if (tocEntries.length === 0) {\n const reason = 'Cannot convert chapters without TOC entries';\n this.logger.error(`[DocumentProcessor] ${reason}`);\n throw new TocNotFoundError(reason);\n }\n\n // Use ChapterConverter for TOC-based conversion\n const chapters = this.chapterConverter!.convert(\n tocEntries,\n doclingDoc.texts,\n pageRangeMap,\n images,\n tables,\n footnotes,\n );\n\n this.logger.info(\n `[DocumentProcessor] Converted ${chapters.length} top-level chapters`,\n );\n\n return chapters;\n }\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;AGEA,gBAAyD;AFGlD,IAAM,iBAAN,MAAqB;;;;;;;;;;;;;;;EAe1B,OAAO,cAAiB,OAAY,WAA0B;AAC5D,UAAM,UAAiB,CAAC;AACxB,aAAS,IAAI,GAAG,IAAI,MAAM,QAAQ,KAAK,WAAW;AAChD,cAAQ,KAAK,MAAM,MAAM,GAAG,IAAI,SAAS,CAAC;IAC5C;AACA,WAAO;EACT;;;;;;;;;;;;;;;;;;;;;;EAuBA,aAAa,aACX,OACA,WACA,WACc;AACd,UAAM,UAAU,KAAK,cAAc,OAAO,SAAS;AACnD,UAAM,UAAU,MAAM,QAAQ,IAAI,QAAQ,IAAI,CAAC,UAAU,UAAU,KAAK,CAAC,CAAC;AAC1E,WAAO,QAAQ,KAAK;EACtB;;;;;;;;;;;;;;;;;;;;EAqBA,OAAO,iBACL,OACA,WACA,WACK;AACL,UAAM,UAAU,KAAK,cAAc,OAAO,SAAS;AACnD,UAAM,UAAU,QAAQ,IAAI,CAAC,UAAU,UAAU,KAAK,CAAC;AACvD,WAAO,QAAQ,KAAK;EACtB;AACF;AEiFO,IAAM,YAAN,MAAgB;;;;;;EAMrB,OAAe,iBAAiB,OAA8B;AAC5D,UAAM,WAAW;AAGjB,QAAI,OAAO,SAAS,YAAY,SAAU,QAAO,SAAS;AAC1D,QAAI,OAAO,SAAS,OAAO,SAAU,QAAO,SAAS;AACrD,QAAI,OAAO,SAAS,UAAU,SAAU,QAAO,SAAS;AACxD,QAAI,OAAO,SAAS,SAAS,SAAU,QAAO,SAAS;AAGvD,WAAO,OAAO,KAAK;EACrB;;;;EAKA,OAAe,WACb,QACA,WACA,UAOA,cACoB;AACpB,WAAO;MACL,WAAW,OAAO;MAClB,OAAO,OAAO;MACd,OAAO,eAAe,aAAa;MACnC;MACA,aAAa,SAAS,OAAO,eAAe;MAC5C,cAAc,SAAS,OAAO,gBAAgB;MAC9C,aAAa,SAAS,OAAO,eAAe;IAC9C;EACF;;;;;;EAOA,aAAqB,oBACnB,QACA,YAQiC;AACjC,UAAM,mBAAmB,KAAK,iBAAiB,OAAO,YAAY;AAGlE,QAAI;AACF,YAAM,WAAW,MAAM,WAAW,OAAO,YAAY;AAErD,aAAO;QACL,QAAQ,SAAS;QACjB,OAAO,KAAK,WAAW,QAAQ,kBAAkB,UAAU,KAAK;QAChE,cAAc;MAChB;IACF,SAAS,cAAc;AAErB,UAAI,OAAO,aAAa,SAAS;AAC/B,cAAM;MACR;AAGA,UAAI,CAAC,OAAO,eAAe;AACzB,cAAM;MACR;AAGA,YAAM,oBAAoB,KAAK,iBAAiB,OAAO,aAAa;AACpE,YAAM,WAAW,MAAM,WAAW,OAAO,aAAa;AAEtD,aAAO;QACL,QAAQ,SAAS;QACjB,OAAO,KAAK,WAAW,QAAQ,mBAAmB,UAAU,IAAI;QAChE,cAAc;MAChB;IACF;EACF;;;;;;;;;;;;;;EAeA,aAAa,KACX,QACiC;AACjC,WAAO,KAAK;MAAoB;MAAQ,CAAC,cACvC,wBAAa;QACX;QACA,QAAQ,iBAAO,OAAO;UACpB,QAAQ,OAAO;QACjB,CAAC;QACD,QAAQ,OAAO;QACf,QAAQ,OAAO;QACf,aAAa,OAAO;QACpB,YAAY,OAAO;QACnB,aAAa,OAAO;MACtB,CAAC;IACH;EACF;;;;;;;;;;;EAYA,aAAa,WACX,QACiC;AACjC,WAAO,KAAK;MAAoB;MAAQ,CAAC,cACvC,wBAAa;QACX;QACA,QAAQ,iBAAO,OAAO;UACpB,QAAQ,OAAO;QACjB,CAAC;QACD,UAAU,OAAO;QACjB,aAAa,OAAO;QACpB,YAAY,OAAO;QACnB,aAAa,OAAO;MACtB,CAAC;IACH;EACF;AACF;AC7SA,SAAS,aAAa,OAA2B;AAC/C,SAAO,GAAG,MAAM,WAAW,WAAW,MAAM,YAAY,YAAY,MAAM,WAAW;AACvF;AAsFO,IAAM,0BAAN,MAA8B;EAC3B,QAA4C,CAAC;;;;;;EAOrD,MAAM,OAAiC;AAErC,QAAI,CAAC,KAAK,MAAM,MAAM,SAAS,GAAG;AAChC,WAAK,MAAM,MAAM,SAAS,IAAI;QAC5B,WAAW,MAAM;QACjB,QAAQ,CAAC;QACT,OAAO;UACL,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;IACF;AAEA,UAAM,YAAY,KAAK,MAAM,MAAM,SAAS;AAG5C,QAAI,CAAC,UAAU,OAAO,MAAM,KAAK,GAAG;AAClC,gBAAU,OAAO,MAAM,KAAK,IAAI;QAC9B,OAAO;UACL,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;IACF;AAEA,UAAM,QAAQ,UAAU,OAAO,MAAM,KAAK;AAG1C,QAAI,MAAM,UAAU,WAAW;AAC7B,UAAI,CAAC,MAAM,SAAS;AAClB,cAAM,UAAU;UACd,WAAW,MAAM;UACjB,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;AAEA,YAAM,QAAQ,eAAe,MAAM;AACnC,YAAM,QAAQ,gBAAgB,MAAM;AACpC,YAAM,QAAQ,eAAe,MAAM;IACrC,WAAW,MAAM,UAAU,YAAY;AACrC,UAAI,CAAC,MAAM,UAAU;AACnB,cAAM,WAAW;UACf,WAAW,MAAM;UACjB,aAAa;UACb,cAAc;UACd,aAAa;QACf;MACF;AAEA,YAAM,SAAS,eAAe,MAAM;AACpC,YAAM,SAAS,gBAAgB,MAAM;AACrC,YAAM,SAAS,eAAe,MAAM;IACtC;AAGA,UAAM,MAAM,eAAe,MAAM;AACjC,UAAM,MAAM,gBAAgB,MAAM;AAClC,UAAM,MAAM,eAAe,MAAM;AAGjC,cAAU,MAAM,eAAe,MAAM;AACrC,cAAU,MAAM,gBAAgB,MAAM;AACtC,cAAU,MAAM,eAAe,MAAM;EACvC;;;;;;EAOA,iBAAuC;AACrC,WAAO,OAAO,OAAO,KAAK,KAAK;EACjC;;;;;;;;;;EAWA,YA8BE;AACA,UAAM,aA2BD,CAAC;AAEN,eAAW,aAAa,OAAO,OAAO,KAAK,KAAK,GAAG;AACjD,YAAM,SAmBD,CAAC;AAEN,iBAAW,CAAC,WAAW,SAAS,KAAK,OAAO,QAAQ,UAAU,MAAM,GAAG;AACrE,cAAM,cAmBF;UACF,OAAO;UACP,OAAO;YACL,aAAa,UAAU,MAAM;YAC7B,cAAc,UAAU,MAAM;YAC9B,aAAa,UAAU,MAAM;UAC/B;QACF;AAEA,YAAI,UAAU,SAAS;AACrB,sBAAY,UAAU;YACpB,WAAW,UAAU,QAAQ;YAC7B,aAAa,UAAU,QAAQ;YAC/B,cAAc,UAAU,QAAQ;YAChC,aAAa,UAAU,QAAQ;UACjC;QACF;AAEA,YAAI,UAAU,UAAU;AACtB,sBAAY,WAAW;YACrB,WAAW,UAAU,SAAS;YAC9B,aAAa,UAAU,SAAS;YAChC,cAAc,UAAU,SAAS;YACjC,aAAa,UAAU,SAAS;UAClC;QACF;AAEA,eAAO,KAAK,WAAW;MACzB;AAEA,iBAAW,KAAK;QACd,WAAW,UAAU;QACrB;QACA,OAAO;UACL,aAAa,UAAU,MAAM;UAC7B,cAAc,UAAU,MAAM;UAC9B,aAAa,UAAU,MAAM;QAC/B;MACF,CAAC;IACH;AAEA,UAAM,aAAa,KAAK,cAAc;AAEtC,WAAO;MACL;MACA,OAAO;QACL,aAAa,WAAW;QACxB,cAAc,WAAW;QACzB,aAAa,WAAW;MAC1B;IACF;EACF;;;;;;EAOA,gBAA4B;AAC1B,QAAI,aAAa;AACjB,QAAI,cAAc;AAClB,QAAI,cAAc;AAElB,eAAW,aAAa,OAAO,OAAO,KAAK,KAAK,GAAG;AACjD,oBAAc,UAAU,MAAM;AAC9B,qBAAe,UAAU,MAAM;AAC/B,qBAAe,UAAU,MAAM;IACjC;AAEA,WAAO;MACL,aAAa;MACb,cAAc;MACd;IACF;EACF;;;;;;;;;;EAWA,WAAW,QAA6B;AACtC,UAAM,aAAa,KAAK,eAAe;AAEvC,QAAI,WAAW,WAAW,GAAG;AAC3B,aAAO,KAAK,8CAA8C;AAC1D;IACF;AAEA,WAAO,KAAK,0CAA0C;AACtD,WAAO,KAAK,EAAE;AAEd,QAAI,mBAAmB;AACvB,QAAI,oBAAoB;AACxB,QAAI,mBAAmB;AACvB,QAAI,0BAA0B;AAC9B,QAAI,2BAA2B;AAC/B,QAAI,0BAA0B;AAC9B,QAAI,2BAA2B;AAC/B,QAAI,4BAA4B;AAChC,QAAI,2BAA2B;AAE/B,eAAW,aAAa,YAAY;AAClC,aAAO,KAAK,GAAG,UAAU,SAAS,GAAG;AAErC,iBAAW,CAAC,OAAO,SAAS,KAAK,OAAO,QAAQ,UAAU,MAAM,GAAG;AACjE,eAAO,KAAK,OAAO,KAAK,GAAG;AAG3B,YAAI,UAAU,SAAS;AACrB,iBAAO;YACL,kBAAkB,UAAU,QAAQ,SAAS,MAAM,aAAa,UAAU,OAAO,CAAC;UACpF;AACA,qCAA2B,UAAU,QAAQ;AAC7C,sCAA4B,UAAU,QAAQ;AAC9C,qCAA2B,UAAU,QAAQ;QAC/C;AAGA,YAAI,UAAU,UAAU;AACtB,iBAAO;YACL,mBAAmB,UAAU,SAAS,SAAS,MAAM,aAAa,UAAU,QAAQ,CAAC;UACvF;AACA,sCAA4B,UAAU,SAAS;AAC/C,uCAA6B,UAAU,SAAS;AAChD,sCAA4B,UAAU,SAAS;QACjD;AAGA,eAAO,KAAK,mBAAmB,aAAa,UAAU,KAAK,CAAC,EAAE;MAChE;AAEA,aAAO;QACL,KAAK,UAAU,SAAS,WAAW,aAAa,UAAU,KAAK,CAAC;MAClE;AACA,aAAO,KAAK,EAAE;AAEd,0BAAoB,UAAU,MAAM;AACpC,2BAAqB,UAAU,MAAM;AACrC,0BAAoB,UAAU,MAAM;IACtC;AAGA,WAAO,KAAK,iBAAiB;AAC7B,QAAI,0BAA0B,GAAG;AAC/B,aAAO;QACL,kBAAkB,aAAa;UAC7B,aAAa;UACb,cAAc;UACd,aAAa;QACf,CAAC,CAAC;MACJ;IACF;AACA,QAAI,2BAA2B,GAAG;AAChC,aAAO;QACL,mBAAmB,aAAa;UAC9B,aAAa;UACb,cAAc;UACd,aAAa;QACf,CAAC,CAAC;MACJ;IACF;AACA,WAAO;MACL,gBAAgB,aAAa;QAC3B,aAAa;QACb,cAAc;QACd,aAAa;MACf,CAAC,CAAC;IACJ;EACF;;;;;;EAOA,QAAc;AACZ,SAAK,QAAQ,CAAC;EAChB;AACF;;;ACxdO,IAAM,cAAN,MAAkB;AAAA,EACN;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EAEjB,YAAY,QAAuB,KAAsB;AACvD,SAAK,SAAS;AACd,SAAK,OAAO,KAAK,kDAAkD;AAEnE,SAAK,UAAU,KAAK,WAAW,IAAI,OAAO,OAAO;AACjD,SAAK,aAAa,KAAK,WAAW,IAAI,UAAU,UAAU;AAC1D,SAAK,WAAW,KAAK,WAAW,IAAI,QAAQ,QAAQ;AACpD,SAAK,WAAW,KAAK,WAAW,IAAI,QAAQ,QAAQ;AAEpD,SAAK,OAAO;AAAA,MACV,yBAAyB,KAAK,QAAQ,IAAI,WAAW,KAAK,WAAW,IAAI,cAAc,KAAK,SAAS,IAAI,YAAY,KAAK,SAAS,IAAI;AAAA,IACzI;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,WACN,OACA,SACgB;AAChB,UAAM,MAAM,oBAAI,IAAe;AAC/B,eAAW,QAAQ,OAAO;AACxB,UAAI,IAAI,KAAK,UAAU,IAAI;AAAA,IAC7B;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,QACE,KAMO;AAGP,UAAM,QAAQ,IAAI,MAAM,aAAa;AACrC,QAAI,CAAC,OAAO;AACV,WAAK,OAAO,KAAK,2CAA2C,GAAG,EAAE;AACjE,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,MAAM,CAAC;AAE1B,QAAI,eAAe,SAAS;AAC1B,YAAM,SAAS,KAAK,QAAQ,IAAI,GAAG,KAAK;AACxC,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,2CAA2C,GAAG,EAAE;AAAA,MACnE;AACA,aAAO;AAAA,IACT;AACA,QAAI,eAAe,YAAY;AAC7B,YAAM,SAAS,KAAK,WAAW,IAAI,GAAG,KAAK;AAC3C,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,8CAA8C,GAAG,EAAE;AAAA,MACtE;AACA,aAAO;AAAA,IACT;AACA,QAAI,eAAe,UAAU;AAC3B,YAAM,SAAS,KAAK,SAAS,IAAI,GAAG,KAAK;AACzC,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,4CAA4C,GAAG,EAAE;AAAA,MACpE;AACA,aAAO;AAAA,IACT;AACA,QAAI,eAAe,UAAU;AAC3B,YAAM,SAAS,KAAK,SAAS,IAAI,GAAG,KAAK;AACzC,UAAI,CAAC,QAAQ;AACX,aAAK,OAAO,KAAK,4CAA4C,GAAG,EAAE;AAAA,MACpE;AACA,aAAO;AAAA,IACT;AAEA,SAAK,OAAO,KAAK,0CAA0C,UAAU,EAAE;AACvE,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,YAAY,KAAqC;AAC/C,WAAO,KAAK,QAAQ,IAAI,GAAG,KAAK;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,eAAe,KAAwC;AACrD,WAAO,KAAK,WAAW,IAAI,GAAG,KAAK;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,aAAa,KAAsC;AACjD,WAAO,KAAK,SAAS,IAAI,GAAG,KAAK;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,aAAa,KAAsC;AACjD,WAAO,KAAK,SAAS,IAAI,GAAG,KAAK;AAAA,EACnC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,YACE,MAOA;AACA,WAAO,KAAK,IAAI,CAAC,QAAQ,KAAK,QAAQ,IAAI,IAAI,CAAC;AAAA,EACjD;AACF;;;ACpJO,IAAM,cAAN,MAAkB;AAAA,EACf,iBAAiB;AAAA,EACjB,eAAe;AAAA,EACf,eAAe;AAAA,EACf,kBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA,EAM1B,oBAA4B;AAC1B,SAAK;AACL,WAAO,MAAM,KAAK,UAAU,KAAK,cAAc,CAAC;AAAA,EAClD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,kBAA0B;AACxB,SAAK;AACL,WAAO,OAAO,KAAK,UAAU,KAAK,YAAY,CAAC;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,kBAA0B;AACxB,SAAK;AACL,WAAO,OAAO,KAAK,UAAU,KAAK,YAAY,CAAC;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,qBAA6B;AAC3B,SAAK;AACL,WAAO,OAAO,KAAK,UAAU,KAAK,eAAe,CAAC;AAAA,EACpD;AAAA;AAAA;AAAA;AAAA,EAKA,QAAc;AACZ,SAAK,iBAAiB;AACtB,SAAK,eAAe;AACpB,SAAK,eAAe;AACpB,SAAK,kBAAkB;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA,EAKA,cAKE;AACA,WAAO;AAAA,MACL,SAAS,KAAK;AAAA,MACd,OAAO,KAAK;AAAA,MACZ,OAAO,KAAK;AAAA,MACZ,UAAU,KAAK;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,UAAU,KAAqB;AACrC,WAAO,IAAI,SAAS,EAAE,SAAS,GAAG,GAAG;AAAA,EACvC;AACF;;;AC1EO,IAAM,cAAN,MAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOvB,OAAO,UAAU,MAAsB;AACrC,QAAI,CAAC,KAAM,QAAO;AAGlB,QAAI,aAAa,KAAK,UAAU,KAAK;AAGrC,iBAAa,WAAW,QAAQ,4BAA4B,GAAG;AAG/D,iBAAa,WAAW,QAAQ,YAAY,GAAG;AAG/C,iBAAa,WAAW,QAAQ,QAAQ,GAAG;AAG3C,iBAAa,WAAW,KAAK;AAE7B,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,OAAO,iBAAiB,MAAsB;AAC5C,QAAI,CAAC,KAAM,QAAO;AAGlB,QAAI,UAAU,KAAK,QAAQ,iBAAiB,EAAE;AAG9C,cAAU,QAAQ,QAAQ,iBAAiB,EAAE;AAE7C,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,YAAY,MAAuB;AACxC,QAAI,CAAC,KAAM,QAAO;AAClB,UAAM,UAAU,KAAK,UAAU,IAAI;AAEnC,WAAO,CAAC,eAAe,KAAK,OAAO;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,eAAe,OAA2B;AAC/C,WAAO,MAAM,IAAI,CAAC,SAAS,KAAK,UAAU,IAAI,CAAC;AAAA,EACjD;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,iBAAiB,OAA2B;AACjD,WAAO,MAAM,OAAO,CAAC,SAAS,KAAK,YAAY,IAAI,CAAC;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBA,OAAO,wBACL,OACA,YAAoB,IACV;AACV,QAAI,cAAc,GAAG;AAEnB,YAAM,UAAoB,CAAC;AAC3B,iBAAW,QAAQ,OAAO;AACxB,cAAM,aAAa,KAAK,UAAU,IAAI;AACtC,YAAI,KAAK,YAAY,UAAU,GAAG;AAChC,kBAAQ,KAAK,UAAU;AAAA,QACzB;AAAA,MACF;AACA,aAAO;AAAA,IACT;AAGA,WAAO,eAAe,iBAAiB,OAAO,WAAW,CAAC,UAAU;AAElE,YAAM,aAAa,KAAK,eAAe,KAAK;AAE5C,aAAO,KAAK,iBAAiB,UAAU;AAAA,IACzC,CAAC;AAAA,EACH;AACF;;;ACjHO,IAAM,oBAAN,MAAM,mBAAkB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQ7B,OAAO,QAAQ,MAAgB,aAAkC;AAC/D,QAAI,KAAK,WAAW,GAAG;AACrB,aAAO;AAAA,IACT;AAEA,UAAM,QAAkB,CAAC;AAEzB,eAAW,OAAO,MAAM;AACtB,YAAM,OAAO,YAAY,QAAQ,GAAG;AACpC,UAAI,CAAC,MAAM;AACT;AAAA,MACF;AAGA,UAAI,UAAU,SAAS,KAAK,SAAS,UAAU,KAAK,SAAS,UAAU;AACrE,cAAM,gBAAgB,mBAAkB;AAAA,UACtC;AAAA,UACA;AAAA,UACA;AAAA,QACF;AACA,YAAI,eAAe;AACjB,gBAAM,KAAK,aAAa;AAAA,QAC1B;AAAA,MACF,WAES,UAAU,QAAQ,UAAW,KAA0B,MAAM;AACpE,cAAM,gBAAgB,mBAAkB;AAAA,UACtC;AAAA,QACF;AACA,YAAI,eAAe;AACjB,gBAAM,KAAK,aAAa;AAAA,QAC1B;AAAA,MACF,WAES,UAAU,QAAQ,UAAU,MAAM;AACzC,cAAM,eAAe,mBAAkB;AAAA,UACrC;AAAA,UACA;AAAA,QACF;AACA,YAAI,cAAc;AAChB,gBAAM,KAAK,YAAY;AAAA,QACzB;AAAA,MACF;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,MAAM;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,gBACL,OACA,aACA,cAAc,GACN;AACR,UAAM,QAAkB,CAAC;AAEzB,eAAW,YAAY,MAAM,UAAU;AACrC,YAAM,QAAQ,YAAY,QAAQ,SAAS,IAAI;AAC/C,UAAI,CAAC,OAAO;AACV;AAAA,MACF;AAGA,UACE,UAAU,UACT,MAAM,SAAS,UAAU,MAAM,SAAS,UACzC;AACA,cAAM,iBAAiB,mBAAkB;AAAA,UACvC;AAAA,UACA;AAAA,UACA,cAAc;AAAA,QAChB;AACA,YAAI,gBAAgB;AAClB,gBAAM,KAAK,cAAc;AAAA,QAC3B;AAAA,MACF,WAES,UAAU,SAAS,UAAU,OAAO;AAC3C,cAAM,eAAe,mBAAkB;AAAA,UACrC;AAAA,UACA;AAAA,QACF;AACA,YAAI,cAAc;AAChB,gBAAM,KAAK,YAAY;AAAA,QACzB;AAAA,MACF;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,IAAI;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,OAAO,gBAAgB,OAAiC;AACtD,UAAM,EAAE,KAAK,IAAI,MAAM;AACvB,QAAI,CAAC,QAAQ,KAAK,WAAW,GAAG;AAC9B,aAAO;AAAA,IACT;AAEA,UAAM,QAAkB,CAAC;AAGzB,aAAS,SAAS,GAAG,SAAS,KAAK,QAAQ,UAAU;AACnD,YAAM,MAAM,KAAK,MAAM;AACvB,UAAI,CAAC,OAAO,IAAI,WAAW,GAAG;AAC5B;AAAA,MACF;AAEA,YAAM,QAAQ,IAAI;AAAA,QAAI,CAAC,SACrB,mBAAkB,gBAAgB,KAAK,IAAI;AAAA,MAC7C;AACA,YAAM,KAAK,KAAK,MAAM,KAAK,KAAK,CAAC,IAAI;AAGrC,UAAI,WAAW,GAAG;AAChB,cAAM,YAAY,IAAI,IAAI,MAAM,KAAK,EAAE,KAAK,KAAK;AACjD,cAAM,KAAK,KAAK,SAAS,IAAI;AAAA,MAC/B;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,IAAI;AAAA,EACxB;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,eAAe,MAAuB,cAAc,GAAW;AACpE,UAAM,UAAU,KAAK,KAAK,KAAK;AAC/B,QAAI,CAAC,SAAS;AACZ,aAAO;AAAA,IACT;AAEA,UAAM,SAAS,mBAAkB,UAAU,WAAW;AACtD,UAAM,SAAS,mBAAkB;AAAA,MAC/B,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AAEA,WAAO,GAAG,MAAM,GAAG,MAAM,GAAG,OAAO;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA,EAKA,OAAe,cAAc,YAAsB,QAAyB;AAC1E,QAAI,QAAQ;AACV,aAAO,GAAG,MAAM;AAAA,IAClB;AACA,QAAI,eAAe,MAAM;AACvB,aAAO;AAAA,IACT;AACA,QAAI,eAAe,OAAO;AACxB,aAAO;AAAA,IACT;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,OAAe,UAAU,OAAuB;AAC9C,WAAO,KAAK,OAAO,KAAK;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA,EAKA,OAAe,gBAAgB,MAAsB;AACnD,WAAO,KAAK,QAAQ,OAAO,KAAK,EAAE,QAAQ,OAAO,GAAG,EAAE,KAAK;AAAA,EAC7D;AACF;;;AC5JO,IAAM,mBAAN,MAAM,kBAAiB;AAAA,EAC5B,OAAwB,kBAAkB;AAAA,EAC1C,OAAwB,qBAAqB;AAAA,EAE5B;AAAA,EACA;AAAA,EAEjB,YAAY,QAAuB,aAA0B;AAC3D,SAAK,SAAS;AACd,SAAK,cAAc;AAAA,EACrB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,QACE,YACA,WACA,cACA,QACA,QACA,WACW;AACX,SAAK,OAAO,KAAK,mDAAmD;AAGpE,UAAM,cAAc,KAAK,yBAAyB;AAGlD,UAAM,cAAc,KAAK,iBAAiB,UAAU;AACpD,SAAK,OAAO;AAAA,MACV,4BAA4B,YAAY,MAAM;AAAA,IAChD;AAGA,UAAM,cAAc,CAAC,aAAa,GAAG,WAAW;AAGhD,UAAM,eAAe,KAAK,gBAAgB,WAAW;AACrD,UAAM,gBAAgB,KAAK,oBAAoB,cAAc,UAAU;AACvE,SAAK,OAAO;AAAA,MACV,4CAA4C,cAAc,IAAI;AAAA,IAChE;AAGA,UAAM,aAAa,KAAK,kBAAkB,WAAW,YAAY;AACjE,SAAK,iBAAiB,aAAa,YAAY,eAAe,YAAY;AAC1E,SAAK,OAAO;AAAA,MACV,+BAA+B,WAAW,MAAM;AAAA,IAClD;AAGA,SAAK;AAAA,MACH;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,SAAK,OAAO;AAAA,MACV,6BAA6B,OAAO,MAAM,YAAY,OAAO,MAAM,gBAAgB,UAAU,MAAM;AAAA,IACrG;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,2BAAoC;AAC1C,WAAO;AAAA,MACL,IAAI,kBAAiB;AAAA,MACrB,aAAa,kBAAiB;AAAA,MAC9B,OAAO,kBAAiB;AAAA,MACxB,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,YAAY,CAAC;AAAA,MACb,UAAU,CAAC;AAAA,MACX,UAAU,CAAC;AAAA,MACX,aAAa,CAAC;AAAA,IAChB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,iBAAiB,SAAgC;AACvD,WAAO,QAAQ,IAAI,CAAC,UAAU;AAC5B,YAAM,YAAY,KAAK,YAAY,kBAAkB;AAErD,YAAM,UAAmB;AAAA,QACvB,IAAI;AAAA,QACJ,aAAa,MAAM;AAAA,QACnB,OAAO,YAAY,UAAU,MAAM,KAAK;AAAA,QACxC,QAAQ,MAAM;AAAA,QACd,OAAO,MAAM;AAAA,QACb,YAAY,CAAC;AAAA,QACb,UAAU,CAAC;AAAA,QACX,UAAU,CAAC;AAAA,QACX,aAAa,CAAC;AAAA,MAChB;AAEA,UAAI,MAAM,YAAY,MAAM,SAAS,SAAS,GAAG;AAC/C,gBAAQ,WAAW,KAAK,iBAAiB,MAAM,QAAQ;AAAA,MACzD;AAEA,aAAO;AAAA,IACT,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,gBAAgB,UAAoC;AAC1D,UAAM,SAAwB,CAAC;AAE/B,UAAM,UAAU,CAAC,gBAAiC;AAChD,iBAAW,WAAW,aAAa;AACjC,eAAO,KAAK;AAAA,UACV;AAAA,UACA,WAAW,QAAQ;AAAA,QACrB,CAAC;AAED,YAAI,QAAQ,YAAY,QAAQ,SAAS,SAAS,GAAG;AACnD,kBAAQ,QAAQ,QAAQ;AAAA,QAC1B;AAAA,MACF;AAAA,IACF;AAEA,YAAQ,QAAQ;AAChB,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUQ,oBACN,cACA,YAC2B;AAC3B,UAAM,SAAS,oBAAI,IAA0B;AAE7C,QAAI,aAAa,WAAW,GAAG;AAC7B,aAAO;AAAA,IACT;AAGA,UAAM,eACJ,WAAW,SAAS,IAChB,KAAK,IAAI,GAAG,WAAW,IAAI,CAAC,MAAM,EAAE,MAAM,CAAC,IAC3C,OAAO;AAGb,UAAM,cAAc,aAAa;AAAA,MAC/B,CAAC,OAAO,GAAG,QAAQ,OAAO,kBAAiB;AAAA,IAC7C;AAGA,UAAM,SAAS,CAAC,GAAG,WAAW,EAAE,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,SAAS;AAGxE,WAAO,IAAI,kBAAiB,iBAAiB;AAAA,MAC3C,WAAW;AAAA,MACX,SAAS,eAAe;AAAA,IAC1B,CAAC;AAGD,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,YAAM,UAAU,OAAO,CAAC;AACxB,YAAM,OAAO,OAAO,IAAI,CAAC;AAEzB,aAAO,IAAI,QAAQ,QAAQ,IAAI;AAAA,QAC7B,WAAW,QAAQ;AAAA,QACnB,SAAS,OAAO,KAAK,YAAY,IAAI,OAAO;AAAA,MAC9C,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMA,OAAwB,oBAAoB,oBAAI,IAAI;AAAA,IAClD;AAAA,IACA;AAAA,IACA;AAAA,EACF,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,EAMD,OAAe,iBAAiB,MAAgC;AAC9D,UAAM,YAAY,KAAK,QAAQ;AAC/B,WAAO,OAAO,cAAc,YAAY,UAAU,WAAW,aAAa;AAAA,EAC5E;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,kBACN,WACA,eACa;AACb,WAAO,UACJ;AAAA,MACC,CAAC,SACC,kBAAiB,kBAAkB,IAAI,KAAK,KAAK,KACjD,CAAC,kBAAiB,iBAAiB,IAAI,KACvC,YAAY,YAAY,KAAK,IAAI;AAAA,IACrC,EACC,IAAI,CAAC,SAAS;AACb,YAAM,YAAY,KAAK,OAAO,CAAC,GAAG,WAAW;AAC7C,aAAO;AAAA,QACL,MAAM,YAAY,UAAU,KAAK,IAAI;AAAA,QACrC;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACL;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,oBACN,WACA,cACQ;AACR,UAAM,QAAQ,aAAa,SAAS;AACpC,QAAI,CAAC,OAAO;AAEV,aAAO;AAAA,IACT;AAEA,WAAO,MAAM;AAAA,EACf;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,mBACN,cACA,eACe;AACf,QAAI,YAA2B;AAC/B,QAAI,gBAAgB;AAEpB,eAAW,CAAC,WAAW,KAAK,KAAK,eAAe;AAE9C,UAAI,gBAAgB,MAAM,aAAa,gBAAgB,MAAM,SAAS;AAEpE,YAAI,MAAM,YAAY,eAAe;AACnC,0BAAgB,MAAM;AACtB,sBAAY;AAAA,QACd;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,iBACN,UACA,YACA,eACA,cACM;AAEN,UAAM,aAAa,KAAK,gBAAgB,QAAQ;AAEhD,eAAW,aAAa,YAAY;AAClC,YAAM,eAAe,KAAK;AAAA,QACxB,UAAU;AAAA,QACV;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,WAAW,KAAK,SAAS;AAAA,MACtD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,cACN,UACA,QACA,QACA,WACA,eACA,cACM;AAEN,UAAM,aAAa,KAAK,gBAAgB,QAAQ;AAGhD,eAAW,SAAS,QAAQ;AAC1B,YAAM,eAAe,KAAK;AAAA,QACxB,MAAM;AAAA,QACN;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,SAAS,KAAK,MAAM,EAAE;AAAA,MACnD;AAAA,IACF;AAGA,eAAW,SAAS,QAAQ;AAC1B,YAAM,eAAe,KAAK;AAAA,QACxB,MAAM;AAAA,QACN;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,SAAS,KAAK,MAAM,EAAE;AAAA,MACnD;AAAA,IACF;AAGA,eAAW,YAAY,WAAW;AAChC,YAAM,eAAe,KAAK;AAAA,QACxB,SAAS;AAAA,QACT;AAAA,MACF;AACA,YAAM,YAAY,KAAK,mBAAmB,cAAc,aAAa;AAErE,UAAI,aAAa,WAAW,IAAI,SAAS,GAAG;AAC1C,mBAAW,IAAI,SAAS,EAAG,YAAY,KAAK,SAAS,EAAE;AAAA,MACzD;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,gBAAgB,UAA2C;AACjE,UAAM,MAAM,oBAAI,IAAqB;AAErC,UAAM,WAAW,CAAC,gBAAiC;AACjD,iBAAW,WAAW,aAAa;AACjC,YAAI,IAAI,QAAQ,IAAI,OAAO;AAE3B,YAAI,QAAQ,YAAY,QAAQ,SAAS,SAAS,GAAG;AACnD,mBAAS,QAAQ,QAAQ;AAAA,QAC3B;AAAA,MACF;AAAA,IACF;AAEA,aAAS,QAAQ;AACjB,WAAO;AAAA,EACT;AACF;;;AC/XO,IAAM,kBAAN,MAAM,yBAAwB,MAAM;AAAA,EACzC,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,gBAAgB,OAAwB;AAC7C,WAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,EAC9D;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,UAAU,SAAiB,OAAiC;AACjE,WAAO,IAAI;AAAA,MACT,GAAG,OAAO,KAAK,iBAAgB,gBAAgB,KAAK,CAAC;AAAA,MACrD,EAAE,OAAO,MAAM;AAAA,IACjB;AAAA,EACF;AACF;AAOO,IAAM,mBAAN,cAA+B,gBAAgB;AAAA,EACpD,YAAY,UAAU,+CAA+C;AACnE,UAAM,OAAO;AACb,SAAK,OAAO;AAAA,EACd;AACF;AAOO,IAAM,gBAAN,cAA4B,gBAAgB;AAAA,EACjD,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AACF;AAQO,IAAM,qBAAN,cAAiC,gBAAgB;AAAA;AAAA;AAAA;AAAA,EAI7C;AAAA,EAET,YAAY,SAAiB,kBAAuC;AAClE,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,mBAAmB;AAAA,EAC1B;AAAA;AAAA;AAAA;AAAA,EAKA,aAAqB;AACnB,UAAM,EAAE,YAAY,OAAO,IAAI,KAAK;AACpC,UAAM,QAAQ;AAAA,MACZ,0BAA0B,UAAU;AAAA,MACpC;AAAA,MACA;AAAA,IACF;AAEA,eAAW,SAAS,QAAQ;AAC1B,YAAM,KAAK,MAAM,MAAM,IAAI,KAAK,MAAM,OAAO,EAAE;AAC/C,YAAM,KAAK,aAAa,MAAM,IAAI,EAAE;AACpC,YAAM;AAAA,QACJ,eAAe,MAAM,MAAM,KAAK,WAAW,MAAM,MAAM,MAAM;AAAA,MAC/D;AAAA,IACF;AAEA,WAAO,MAAM,KAAK,IAAI;AAAA,EACxB;AACF;;;ACzGA,IAAM,kBAAkD;AAAA,EACtD,YAAY;AAAA,EACZ,gBAAgB;AAAA,EAChB,wBAAwB;AAC1B;AAQO,IAAM,eAAN,MAAmB;AAAA,EACP;AAAA,EACT;AAAA,EAER,YAAY,SAAgC;AAC1C,SAAK,UAAU;AAAA,MACb,GAAG;AAAA,MACH,GAAG;AAAA,IACL;AACA,SAAK,SAAS,CAAC;AAAA,EACjB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,SAAS,SAA0C;AACjD,SAAK,SAAS,CAAC;AAGf,SAAK,gBAAgB,SAAS,IAAI,MAAM,oBAAI,IAAY,CAAC;AAGzD,SAAK,+BAA+B,OAAO;AAE3C,UAAM,aAAa,KAAK,OAAO;AAE/B,WAAO;AAAA,MACL,OAAO,eAAe;AAAA,MACtB,QAAQ,CAAC,GAAG,KAAK,MAAM;AAAA,MACvB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,gBAAgB,SAA2B;AACzC,UAAM,SAAS,KAAK,SAAS,OAAO;AAEpC,QAAI,CAAC,OAAO,OAAO;AACjB,YAAM,UAAU,OAAO,OACpB;AAAA,QACC,CAAC,UACC,MAAM,MAAM,IAAI,KAAK,MAAM,OAAO,WAAW,MAAM,IAAI,aAAa,MAAM,MAAM,KAAK,UAAU,MAAM,MAAM,MAAM;AAAA,MACrH,EACC,KAAK,IAAI;AACZ,YAAM,IAAI;AAAA,QACR,8BAA8B,OAAO,UAAU;AAAA,EAAe,OAAO;AAAA,QACrE;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,gBACN,SACA,YACA,aACA,UACM;AACN,QAAI,aAAa,aAAa,UAAU;AAExC,aAAS,IAAI,GAAG,IAAI,QAAQ,QAAQ,KAAK;AACvC,YAAM,QAAQ,QAAQ,CAAC;AACvB,YAAMA,QAAO,aAAa,GAAG,UAAU,aAAa,CAAC,MAAM,IAAI,CAAC;AAGhE,WAAK,cAAc,OAAOA,KAAI;AAG9B,WAAK,oBAAoB,OAAOA,KAAI;AAGpC,WAAK,kBAAkB,OAAOA,KAAI;AAGlC,WAAK,kBAAkB,OAAOA,OAAM,UAAU;AAC9C,mBAAa,MAAM;AAGnB,UAAI,aAAa;AACf,aAAK,wBAAwB,OAAOA,OAAM,WAAW;AAAA,MACvD;AAGA,YAAM,MAAM,GAAG,MAAM,KAAK,IAAI,MAAM,MAAM;AAC1C,WAAK,kBAAkB,OAAOA,OAAM,KAAK,QAAQ;AACjD,eAAS,IAAI,GAAG;AAGhB,UAAI,MAAM,YAAY,MAAM,SAAS,SAAS,GAAG;AAC/C,aAAK,gBAAgB,MAAM,UAAUA,OAAM,OAAO,QAAQ;AAAA,MAC5D;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,OAAiBA,OAAoB;AACzD,QAAI,CAAC,MAAM,SAAS,MAAM,MAAM,KAAK,MAAM,IAAI;AAC7C,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS;AAAA,QACT,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,oBAAoB,OAAiBA,OAAoB;AAC/D,QAAI,MAAM,MAAM,SAAS,KAAK,QAAQ,gBAAgB;AACpD,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,iBAAiB,KAAK,QAAQ,cAAc,gBAAgB,MAAM,MAAM,MAAM;AAAA,QACvF,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBAAkB,OAAiBA,OAAoB;AAC7D,QAAI,MAAM,SAAS,GAAG;AACpB,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,iCAAiC,MAAM,MAAM;AAAA,QACtD,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAEA,QAAI,MAAM,SAAS,KAAK,QAAQ,YAAY;AAC1C,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,eAAe,MAAM,MAAM,kCAAkC,KAAK,QAAQ,UAAU;AAAA,QAC7F,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBACN,OACAA,OACA,YACM;AACN,QAAI,MAAM,SAAS,YAAY;AAC7B,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,8BAA8B,UAAU,OAAO,MAAM,MAAM;AAAA,QACpE,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,wBACN,OACAA,OACA,QACM;AACN,QAAI,MAAM,SAAS,OAAO,QAAQ;AAChC,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,eAAe,MAAM,MAAM,4BAA4B,OAAO,MAAM;AAAA,QAC7E,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBACN,OACAA,OACA,KACA,UACM;AACN,QAAI,SAAS,IAAI,GAAG,GAAG;AACrB,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,qBAAqB,MAAM,KAAK,aAAa,MAAM,MAAM;AAAA,QAClE,MAAAA;AAAA,QACA;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,+BAA+B,SAA2B;AAChE,QAAI,QAAQ,WAAW,GAAG;AACxB;AAAA,IACF;AAGA,QAAI,CAAC,SAAS,KAAK,QAAQ,UAAU,GAAG;AACtC;AAAA,IACF;AAEA,UAAM,aAAa,QAAQ,CAAC;AAC5B,UAAM,YAAY,KAAK;AAAA,MACrB;AAAA,MACA,KAAK,MAAM,KAAK,QAAQ,aAAa,KAAK,QAAQ,sBAAsB;AAAA,IAC1E;AAEA,QAAI,WAAW,SAAS,WAAW;AACjC,WAAK,SAAS;AAAA,QACZ,MAAM;AAAA,QACN,SAAS,sDAAsD,WAAW,MAAM,2BAA2B,SAAS;AAAA,QACpH,MAAM;AAAA,QACN,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,SAAS,OAAiC;AAChD,SAAK,OAAO,KAAK,KAAK;AAAA,EACxB;AACF;;;AClRO,IAAM,eAAe;AAAA,EAC1B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AASO,IAAM,uBAAuB;AAAA,EAClC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAMO,IAAM,sBAAsB;AAyB5B,IAAM,YAAN,MAAgB;AAAA,EAIrB,YACmB,QACA,aACjB,SACA;AAHiB;AACA;AAGjB,SAAK,iBAAiB,SAAS,kBAAkB;AACjD,SAAK,WAAW,CAAC,GAAG,cAAc,GAAI,SAAS,sBAAsB,CAAC,CAAE;AAAA,EAC1E;AAAA,EAViB;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAgBjB,KAAK,KAAqC;AACxC,SAAK,OAAO,KAAK,oCAAoC;AAGrD,UAAM,gBAAgB,KAAK,eAAe,GAAG;AAC7C,QAAI,eAAe;AACjB,WAAK,OAAO;AAAA,QACV,kDAAkD,cAAc,SAAS,IAAI,cAAc,OAAO;AAAA,MACpG;AACA,aAAO;AAAA,IACT;AAGA,UAAM,kBAAkB,KAAK,gBAAgB,GAAG;AAChD,QAAI,iBAAiB;AACnB,WAAK,OAAO;AAAA,QACV,sDAAsD,gBAAgB,SAAS,IAAI,gBAAgB,OAAO;AAAA,MAC5G;AACA,aAAO;AAAA,IACT;AAEA,SAAK,OAAO,KAAK,sCAAsC;AACvD,UAAM,IAAI,iBAAiB;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA,EAKQ,eAAe,KAA4C;AAEjE,eAAW,QAAQ,IAAI,OAAO;AAC5B,UAAI,CAAC,KAAK,mBAAmB,KAAK,IAAI,GAAG;AACvC;AAAA,MACF;AAEA,YAAM,SAAS,KAAK,KAAK,CAAC,GAAG;AAC7B,UAAI,WAAW,UAAa,SAAS,KAAK,gBAAgB;AACxD;AAAA,MACF;AAEA,WAAK,OAAO;AAAA,QACV,kCAAkC,KAAK,IAAI,aAAa,MAAM;AAAA,MAChE;AAGA,YAAM,YAAY,KAAK,QAAQ;AAC/B,UAAI,CAAC,WAAW;AAEd,eAAO;AAAA,UACL,UAAU,CAAC,KAAK,QAAQ;AAAA,UACxB,WAAW;AAAA,UACX,SAAS;AAAA,QACX;AAAA,MACF;AAGA,YAAM,SAAS,KAAK,iBAAiB,KAAK,WAAW,MAAM;AAC3D,UAAI,QAAQ;AACV,eAAO,KAAK,yBAAyB,QAAQ,GAAG;AAAA,MAClD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,gBAAgB,KAA4C;AAClE,UAAM,aAGD,CAAC;AAGN,eAAW,SAAS,IAAI,QAAQ;AAC9B,YAAM,SAAS,KAAK,kBAAkB,KAAK;AAC3C,UAAI,WAAW,UAAa,SAAS,KAAK,gBAAgB;AACxD;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,OAAO,GAAG,GAAG;AACnC,cAAM,QAAQ,KAAK,eAAe,OAAO,MAAM;AAC/C,mBAAW,KAAK;AAAA,UACd,QAAQ;AAAA,YACN,UAAU,CAAC,MAAM,QAAQ;AAAA,YACzB,WAAW;AAAA,YACX,SAAS;AAAA,UACX;AAAA,UACA;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAGA,eAAW,SAAS,IAAI,QAAQ;AAC9B,YAAM,SAAS,MAAM,KAAK,CAAC,GAAG;AAC9B,UAAI,WAAW,UAAa,SAAS,KAAK,gBAAgB;AACxD;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,KAAK,GAAG;AAC9B,cAAM,QAAQ,KAAK,oBAAoB,OAAO,MAAM;AACpD,mBAAW,KAAK;AAAA,UACd,QAAQ;AAAA,YACN,UAAU,CAAC,MAAM,QAAQ;AAAA,YACzB,WAAW;AAAA,YACX,SAAS;AAAA,UACX;AAAA,UACA;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAEA,QAAI,WAAW,WAAW,GAAG;AAC3B,aAAO;AAAA,IACT;AAGA,eAAW,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC3C,UAAM,OAAO,WAAW,CAAC;AAEzB,WAAO,KAAK,yBAAyB,KAAK,QAAQ,GAAG;AAAA,EACvD;AAAA;AAAA;AAAA;AAAA,EAKQ,iBACN,KACA,WACA,QACsB;AAEtB,UAAM,QAAQ,KAAK,YAAY,aAAa,SAAS;AACrD,QAAI,OAAO;AACT,aAAO;AAAA,QACL,UAAU,CAAC,MAAM,QAAQ;AAAA,QACzB,WAAW;AAAA,QACX,SAAS;AAAA,MACX;AAAA,IACF;AAGA,UAAM,QAAQ,KAAK,YAAY,aAAa,SAAS;AACrD,QAAI,OAAO;AACT,aAAO;AAAA,QACL,UAAU,CAAC,MAAM,QAAQ;AAAA,QACzB,WAAW;AAAA,QACX,SAAS;AAAA,MACX;AAAA,IACF;AAGA,UAAM,OAAO,KAAK,YAAY,QAAQ,SAAS;AAC/C,QAAI,QAAQ,KAAK,QAAQ,MAAM;AAC7B,aAAO,KAAK,iBAAiB,KAAK,KAAK,OAAO,MAAM,MAAM;AAAA,IAC5D;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,eACN,OACA,MACS;AACT,QAAI,MAAM,SAAS,UAAU,MAAM,SAAS,SAAS;AACnD,aAAO;AAAA,IACT;AAGA,QAAI,kBAAkB;AACtB,UAAM,WAAW,KAAK,YAAY,YAAY,MAAM,QAAQ;AAE5D,eAAW,SAAS,UAAU;AAC5B,UAAI,CAAC,MAAO;AAGZ,UAAI,UAAU,SAAS,UAAU,OAAO;AACtC,cAAM,WAAW;AACjB,YAAI,oBAAoB,KAAK,SAAS,IAAI,GAAG;AAC3C;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAIA,UAAM,QAAQ,SAAS,OAAO,CAAC,MAAM,MAAM,IAAI,EAAE;AACjD,WAAO,mBAAmB,KAAM,QAAQ,KAAK,kBAAkB,QAAQ;AAAA,EACzE;AAAA;AAAA;AAAA;AAAA,EAKQ,eAAe,OAAkC;AAEvD,QAAI,MAAM,UAAU,kBAAkB;AACpC,aAAO;AAAA,IACT;AAEA,UAAM,EAAE,MAAM,UAAU,SAAS,IAAI,MAAM;AAG3C,QAAI,WAAW,KAAK,WAAW,GAAG;AAChC,aAAO;AAAA,IACT;AAGA,QAAI,cAAc;AAClB,aAAS,MAAM,GAAG,MAAM,KAAK,QAAQ,OAAO;AAC1C,YAAM,WAAW,KAAK,GAAG,IAAI,WAAW,CAAC;AACzC,UAAI,YAAY,QAAQ,KAAK,SAAS,KAAK,KAAK,CAAC,GAAG;AAClD;AAAA,MACF;AAAA,IACF;AAGA,WAAO,cAAc,KAAK,eAAe,WAAW,KAAK;AAAA,EAC3D;AAAA;AAAA;AAAA;AAAA,EAKQ,yBACN,SACA,KACe;AACf,UAAM,WAAW,CAAC,GAAG,QAAQ,QAAQ;AACrC,UAAM,WAAW,IAAI,IAAY,QAAQ;AACzC,QAAI,YAAY,QAAQ;AACxB,QAAI,UAAU,QAAQ;AAGtB,aAAS,SAAS,QAAQ,YAAY,GAAG,UAAU,GAAG,UAAU;AAC9D,YAAM,oBAAoB,KAAK,uBAAuB,KAAK,MAAM;AACjE,UAAI,kBAAkB,WAAW,GAAG;AAClC;AAAA,MACF;AAEA,YAAM,WAAW,kBAAkB,OAAO,CAAC,QAAQ,CAAC,SAAS,IAAI,GAAG,CAAC;AACrE,iBAAW,OAAO,UAAU;AAC1B,iBAAS,IAAI,GAAG;AAAA,MAClB;AACA,eAAS,QAAQ,GAAG,QAAQ;AAC5B,kBAAY;AACZ,WAAK,OAAO,KAAK,6CAA6C,MAAM,EAAE;AAAA,IACxE;AAGA,aACM,SAAS,QAAQ,UAAU,GAC/B,UAAU,KAAK,gBACf,UACA;AACA,YAAM,oBAAoB,KAAK,uBAAuB,KAAK,MAAM;AACjE,UAAI,kBAAkB,WAAW,GAAG;AAClC;AAAA,MACF;AAEA,YAAM,WAAW,kBAAkB,OAAO,CAAC,QAAQ,CAAC,SAAS,IAAI,GAAG,CAAC;AACrE,iBAAW,OAAO,UAAU;AAC1B,iBAAS,IAAI,GAAG;AAAA,MAClB;AACA,eAAS,KAAK,GAAG,QAAQ;AACzB,gBAAU;AACV,WAAK,OAAO,KAAK,4CAA4C,MAAM,EAAE;AAAA,IACvE;AAEA,WAAO;AAAA,MACL;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,uBACN,KACA,QACU;AACV,UAAM,OAAiB,CAAC;AAGxB,eAAW,QAAQ,IAAI,OAAO;AAC5B,UAAI,KAAK,KAAK,CAAC,GAAG,YAAY,QAAQ;AACpC;AAAA,MACF;AAEA,UAAI,KAAK,sBAAsB,KAAK,IAAI,GAAG;AACzC,cAAM,YAAY,KAAK,QAAQ;AAC/B,YAAI,WAAW;AACb,gBAAM,QAAQ,KAAK,YAAY,aAAa,SAAS;AACrD,cAAI,OAAO;AACT,iBAAK,KAAK,MAAM,QAAQ;AAAA,UAC1B;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAGA,eAAW,SAAS,IAAI,QAAQ;AAC9B,YAAM,YAAY,KAAK,kBAAkB,KAAK;AAC9C,UAAI,cAAc,QAAQ;AACxB;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,OAAO,GAAG,KAAK,CAAC,KAAK,SAAS,MAAM,QAAQ,GAAG;AACrE,aAAK,KAAK,MAAM,QAAQ;AAAA,MAC1B;AAAA,IACF;AAGA,eAAW,SAAS,IAAI,QAAQ;AAC9B,UAAI,MAAM,KAAK,CAAC,GAAG,YAAY,QAAQ;AACrC;AAAA,MACF;AAEA,UAAI,KAAK,eAAe,KAAK,KAAK,CAAC,KAAK,SAAS,MAAM,QAAQ,GAAG;AAChE,aAAK,KAAK,MAAM,QAAQ;AAAA,MAC1B;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,mBAAmB,MAAuB;AAChD,UAAM,iBAAiB,KAAK,KAAK,EAAE,YAAY;AAC/C,WAAO,KAAK,SAAS;AAAA,MAAK,CAAC,YACzB,eAAe,SAAS,QAAQ,YAAY,CAAC;AAAA,IAC/C;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,sBAAsB,MAAuB;AACnD,UAAM,iBAAiB,KAAK,KAAK,EAAE,YAAY;AAC/C,WAAO,qBAAqB;AAAA,MAAK,CAAC,WAChC,eAAe,SAAS,OAAO,YAAY,CAAC;AAAA,IAC9C;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,kBAAkB,OAA6C;AACrE,eAAW,YAAY,MAAM,UAAU;AACrC,YAAM,QAAQ,KAAK,YAAY,QAAQ,SAAS,IAAI;AACpD,UAAI,SAAS,UAAU,OAAO;AAC5B,cAAM,OAAQ,MAA0B;AACxC,YAAI,QAAQ,KAAK,CAAC,GAAG,YAAY,QAAW;AAC1C,iBAAO,KAAK,CAAC,EAAE;AAAA,QACjB;AAAA,MACF;AAAA,IACF;AACA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA,EAMQ,eAAe,OAAyB,QAAwB;AACtE,QAAI,QAAQ;AAGZ,cAAU,KAAK,iBAAiB,SAAS,KAAK;AAG9C,aAAS,MAAM,SAAS,SAAS;AAGjC,UAAM,WAAW,KAAK,YAAY,YAAY,MAAM,QAAQ;AAC5D,eAAW,SAAS,UAAU;AAC5B,UAAI,SAAS,UAAU,OAAO;AAC5B,cAAM,WAAW;AACjB,YAAI,oBAAoB,KAAK,SAAS,IAAI,GAAG;AAC3C,mBAAS;AAAA,QACX;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,oBAAoB,OAAyB,QAAwB;AAC3E,QAAI,QAAQ;AAGZ,cAAU,KAAK,iBAAiB,SAAS,KAAK;AAG9C,aAAS,MAAM,KAAK,WAAW;AAG/B,QAAI,MAAM,UAAU,kBAAkB;AACpC,eAAS;AAAA,IACX;AAEA,WAAO;AAAA,EACT;AACF;;;AChgBA,iBAAkB;;;AC6BX,IAAe,mBAAf,MAAgC;AAAA,EAClB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYnB,YACE,QACA,OACA,eACA,SACA,eACA,YACA;AACA,SAAK,SAAS;AACd,SAAK,QAAQ;AACb,SAAK,gBAAgB;AACrB,SAAK,aAAa,SAAS,cAAc;AACzC,SAAK,cAAc,SAAS,eAAe;AAC3C,SAAK,gBAAgB;AACrB,SAAK,aAAa;AAClB,SAAK,cAAc,SAAS;AAAA,EAC9B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASU,IACR,OACA,YACG,MACG;AACN,UAAM,mBAAmB,IAAI,KAAK,aAAa,KAAK,OAAO;AAC3D,SAAK,OAAO,KAAK,EAAE,kBAAkB,GAAG,IAAI;AAAA,EAC9C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOU,WAAW,OAAiC;AACpD,QAAI,KAAK,YAAY;AACnB,WAAK,WAAW,MAAM,KAAK;AAAA,IAC7B;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQU,iBAAiB,OAAmC;AAC5D,WAAO;AAAA,MACL,WAAW,KAAK;AAAA,MAChB;AAAA,MACA,OAAO;AAAA,MACP,WAAW;AAAA,MACX,aAAa;AAAA,MACb,cAAc;AAAA,MACd,aAAa;AAAA,IACf;AAAA,EACF;AAeF;;;AC5GO,IAAe,mBAAf,cAAwC,iBAAiB;AAAA,EAC9D,YACE,QACA,OACA,eACA,SACA,eACA,YACA;AACA,UAAM,QAAQ,OAAO,eAAe,SAAS,eAAe,UAAU;AAAA,EACxE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAgB,YACd,QACA,cACA,YACA,OACkE;AAClE,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB;AAAA,IACF,CAAC;AAED,SAAK,WAAW,OAAO,KAAK;AAE5B,WAAO;AAAA,MACL,QAAQ,OAAO;AAAA,MACf,OAAO,OAAO;AAAA,IAChB;AAAA,EACF;AACF;;;AFvDA,IAAM,yBAAyB;AAK/B,IAAM,+BAAuD;AAAA,EAC3D,MAAM;AAAA,EACN,MAAM;AAAA,EACN,MAAM;AAAA,EACN,MAAM;AAAA,EACN,MAAM;AAAA,EACN,MAAM;AAAA,EACN,MAAM;AACR;AAKO,IAAM,iBAAsC,aAAE;AAAA,EAAK,MACxD,aAAE,OAAO;AAAA,IACP,OAAO,aAAE,OAAO,EAAE,SAAS,0BAA0B;AAAA,IACrD,OAAO,aAAE,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,EAAE,SAAS,iCAAiC;AAAA,IACzE,QAAQ,aAAE,OAAO,EAAE,IAAI,EAAE,IAAI,CAAC,EAAE,SAAS,sBAAsB;AAAA,IAC/D,UAAU,aACP,MAAM,cAAc,EACpB,SAAS,6CAA6C;AAAA,EAC3D,CAAC;AACH;AAKO,IAAM,oBAAoB,aAAE,OAAO;AAAA,EACxC,SAAS,aAAE,MAAM,cAAc,EAAE,SAAS,uBAAuB;AACnE,CAAC;AA6BM,IAAM,eAAN,cAA2B,iBAAiB;AAAA,EAChC;AAAA,EACA;AAAA,EAEjB,YACE,QACA,OACA,SACA,eACA,aACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA,EAAE,GAAG,SAAS,YAAY;AAAA,MAC1B;AAAA,IACF;AACA,SAAK,oBAAoB,SAAS;AAClC,SAAK,iBAAiB,SAAS,kBAAkB;AAAA,EACnD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAaA,MAAM,QACJ,UACA,qBACgE;AAChE,SAAK,IAAI,QAAQ,4BAA4B,SAAS,MAAM,SAAS;AAErE,QAAI,CAAC,SAAS,KAAK,GAAG;AACpB,WAAK,IAAI,SAAS,gDAAgD;AAClE,YAAM,IAAI;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAEA,QAAI;AAEF,YAAM,SAAS,MAAM,KAAK;AAAA,QACxB;AAAA,QACA,KAAK,kBAAkB;AAAA,QACvB,KAAK,gBAAgB,QAAQ;AAAA,QAC7B;AAAA,MACF;AAEA,YAAM,SAA+B,CAAC,OAAO,KAAK;AAClD,UAAI,UAAU,KAAK,iBAAiB,OAAO,OAAO,OAAO;AAGzD,UAAI,CAAC,KAAK,gBAAgB;AACxB,YAAI,kBAAkB,KAAK;AAAA,UACzB;AAAA,UACA;AAAA,QACF;AAGA,iBACM,UAAU,GACd,WAAW,0BAA0B,oBAAoB,MACzD,WACA;AACA,eAAK;AAAA,YACH;AAAA,YACA,8BAA8B,OAAO,IAAI,sBAAsB;AAAA,UACjE;AAEA,gBAAM,mBAAmB,KAAK;AAAA,YAC5B;AAAA,YACA;AAAA,YACA,gBAAgB,iBAAiB;AAAA,UACnC;AAEA,gBAAM,mBAAmB,MAAM,KAAK;AAAA,YAClC;AAAA,YACA,KAAK,kBAAkB;AAAA,YACvB;AAAA,YACA,cAAc,OAAO;AAAA,UACvB;AAEA,iBAAO,KAAK,iBAAiB,KAAK;AAClC,oBAAU,KAAK,iBAAiB,iBAAiB,OAAO,OAAO;AAC/D,4BAAkB,KAAK;AAAA,YACrB;AAAA,YACA;AAAA,UACF;AAAA,QACF;AAGA,YAAI,oBAAoB,MAAM;AAC5B,eAAK;AAAA,YACH;AAAA,YACA,2BAA2B,sBAAsB;AAAA,EAAc,gBAAgB,WAAW,CAAC;AAAA,UAC7F;AACA,gBAAM;AAAA,QACR;AAAA,MACF;AAEA,WAAK;AAAA,QACH;AAAA,QACA,yBAAyB,QAAQ,MAAM,uBAAuB,OAAO,MAAM;AAAA,MAC7E;AAEA,aAAO,EAAE,SAAS,OAAO;AAAA,IAC3B,SAAS,OAAO;AAEd,UAAI,iBAAiB,oBAAoB;AACvC,cAAM;AAAA,MACR;AAEA,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,WAAK,IAAI,SAAS,sBAAsB,OAAO,EAAE;AACjD,YAAM,IAAI,cAAc,oCAAoC,OAAO,IAAI;AAAA,QACrE,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUQ,mBACN,SACA,WAC2B;AAC3B,QAAI,QAAQ,WAAW,GAAG;AACxB,aAAO;AAAA,IACT;AAEA,UAAM,UAAU,EAAE,GAAG,KAAK,mBAAmB,GAAG,UAAU;AAC1D,UAAM,YAAY,IAAI,aAAa,OAAO;AAC1C,UAAM,SAAS,UAAU,SAAS,OAAO;AAEzC,QAAI,CAAC,OAAO,OAAO;AACjB,YAAM,UAAU,OAAO,OACpB;AAAA,QACC,CAAC,UACC,MAAM,MAAM,IAAI,KAAK,MAAM,OAAO,WAAW,MAAM,IAAI,aAAa,MAAM,MAAM,KAAK,UAAU,MAAM,MAAM,MAAM;AAAA,MACrH,EACC,KAAK,IAAI;AACZ,aAAO,IAAI;AAAA,QACT,8BAA8B,OAAO,UAAU;AAAA,EAAe,OAAO;AAAA,QACrE;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQU,sBACR,UACA,iBACA,QACQ;AACR,UAAM,aAAa,OAAO,IAAI,CAAC,UAAU;AACvC,YAAM,OACJ,6BAA6B,MAAM,IAAI,KAAK;AAC9C,aAAO,MAAM,MAAM,IAAI,KAAK,MAAM,OAAO;AAAA,UAAa,MAAM,IAAI;AAAA,YAAe,MAAM,MAAM,KAAK,WAAW,MAAM,MAAM,MAAM;AAAA,UAAc,IAAI;AAAA,IACjJ,CAAC;AAED,WAAO;AAAA;AAAA;AAAA;AAAA,EAIT,WAAW,KAAK,MAAM,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUvB,QAAQ;AAAA;AAAA;AAAA;AAAA,EAIR,KAAK,UAAU,iBAAiB,MAAM,CAAC,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA,EAKxC;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAmDT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,UAA0B;AAClD,WAAO;AAAA;AAAA,EAET,QAAQ;AAAA,EACR;AAAA;AAAA;AAAA;AAAA,EAKQ,iBAAiB,SAAiC;AACxD,QAAI,QAAQ,WAAW,GAAG;AACxB,aAAO,CAAC;AAAA,IACV;AAGA,WAAO,KAAK,eAAe,SAAS,CAAC;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,eACN,SACA,eACY;AACZ,WAAO,QAAQ,IAAI,CAAC,UAAU;AAC5B,YAAM,kBAA4B;AAAA,QAChC,OAAO,MAAM,MAAM,KAAK;AAAA,QACxB,OAAO;AAAA,QACP,QAAQ,MAAM;AAAA,MAChB;AAEA,UAAI,MAAM,YAAY,MAAM,SAAS,SAAS,GAAG;AAC/C,wBAAgB,WAAW,KAAK;AAAA,UAC9B,MAAM;AAAA,UACN,gBAAgB;AAAA,QAClB;AAAA,MACF;AAEA,aAAO;AAAA,IACT,CAAC;AAAA,EACH;AACF;;;AG9XA,IAAAC,MAAoB;AACpB,IAAAC,QAAsB;AACtB,IAAAC,cAAkB;;;ACDlB,SAAoB;AACpB,WAAsB;AA8Bf,IAAe,qBAAf,cAA0C,iBAAiB;AAAA,EAC7C;AAAA,EAEnB,YACE,QACA,OACA,eACA,YACA,SACA,eACA,YACA;AACA,UAAM,QAAQ,OAAO,eAAe,SAAS,eAAe,UAAU;AACtE,SAAK,aAAa;AAAA,EACpB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAgB,cACd,QACA,UAIA,OACkE;AAClE,UAAM,SAAS,MAAM,UAAU,WAAW;AAAA,MACxC;AAAA,MACA;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB;AAAA,IACF,CAAC;AAED,SAAK,WAAW,OAAO,KAAK;AAE5B,WAAO;AAAA,MACL,QAAQ,OAAO;AAAA,MACf,OAAO,OAAO;AAAA,IAChB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQU,kBAAkB,WAA2B;AACrD,UAAM,cAAiB,gBAAa,SAAS;AAC7C,WAAO,YAAY,SAAS,QAAQ;AAAA,EACtC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASU,kBACR,WACA,WAAmB,aACL;AACd,UAAM,eAAoB,gBAAW,SAAS,IAC1C,YACK,aAAQ,KAAK,YAAY,SAAS;AAC3C,UAAM,cAAc,KAAK,kBAAkB,YAAY;AACvD,WAAO;AAAA,MACL,MAAM;AAAA,MACN,OAAO,QAAQ,QAAQ,WAAW,WAAW;AAAA,IAC/C;AAAA,EACF;AACF;;;ADxGO,IAAM,4BAA4B,cAAE,OAAO;AAAA,EAChD,QAAQ,cAAE,QAAQ,EAAE,SAAS,yCAAyC;AAAA,EACtE,aAAa,cACV,OAAO,EACP,SAAS,EACT,SAAS,qDAAqD;AAAA,EACjE,qBAAqB,cAClB,QAAQ,EACR,SAAS,0CAA0C;AACxD,CAAC;AA8BM,IAAM,qBAAN,cAAiC,mBAAmB;AAAA,EACxC;AAAA,EACA;AAAA,EAEjB,YACE,QACA,OACA,YACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,cAAc,IAAI,wBAA6B;AAAA,IACjD;AACA,SAAK,iBAAiB,SAAS,kBAAkB;AACjD,SAAK,kBAAkB,SAAS,mBAAmB;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,QAAQ,YAA4C;AACxD,SAAK,IAAI,QAAQ,gCAAgC,UAAU,QAAQ;AAEnE,QAAI,eAAe,GAAG;AACpB,WAAK,IAAI,QAAQ,oBAAoB;AACrC,aAAO;AAAA,IACT;AAGA,UAAM,gBAAgB,KAAK,IAAI,KAAK,gBAAgB,UAAU;AAC9D,SAAK,IAAI,QAAQ,kCAAkC,aAAa,EAAE;AAElE,UAAM,cAAc,MAAM,KAAK,iBAAiB,GAAG,aAAa;AAEhE,QAAI,YAAY,UAAU,YAAY,aAAa;AAEjD,UAAI,YAAY,uBAAuB,gBAAgB,YAAY;AACjE,aAAK,IAAI,QAAQ,8CAA8C;AAC/D,cAAM,kBAAkB,KAAK;AAAA,UAC3B,gBAAgB,KAAK;AAAA,UACrB;AAAA,QACF;AACA,cAAM,qBAAqB,MAAM,KAAK;AAAA,UACpC,gBAAgB;AAAA,UAChB;AAAA,QACF;AAEA,YAAI,mBAAmB,UAAU,mBAAmB,aAAa;AAC/D,gBAAM,SAAS,KAAK;AAAA,YAClB,YAAY;AAAA,YACZ,mBAAmB;AAAA,UACrB;AACA,eAAK,WAAY,WAAW,KAAK,MAAM;AACvC,eAAK;AAAA,YACH;AAAA,YACA,oCAAoC,OAAO,MAAM;AAAA,UACnD;AACA,iBAAO;AAAA,QACT;AAAA,MACF;AAEA,WAAK,WAAY,WAAW,KAAK,MAAM;AACvC,WAAK;AAAA,QACH;AAAA,QACA,6BAA6B,YAAY,YAAY,MAAM;AAAA,MAC7D;AACA,aAAO,YAAY;AAAA,IACrB;AAGA,QAAI,gBAAgB,YAAY;AAC9B,YAAM,mBAAmB,gBAAgB;AACzC,YAAM,iBAAiB,KAAK;AAAA,QAC1B,gBAAgB,KAAK;AAAA,QACrB;AAAA,MACF;AAEA,WAAK;AAAA,QACH;AAAA,QACA,iCAAiC,gBAAgB,IAAI,cAAc;AAAA,MACrE;AAEA,YAAM,eAAe,MAAM,KAAK;AAAA,QAC9B;AAAA,QACA;AAAA,MACF;AAEA,UAAI,aAAa,UAAU,aAAa,aAAa;AACnD,aAAK,WAAY,WAAW,KAAK,MAAM;AACvC,aAAK;AAAA,UACH;AAAA,UACA,8BAA8B,aAAa,YAAY,MAAM;AAAA,QAC/D;AACA,eAAO,aAAa;AAAA,MACtB;AAAA,IACF;AAEA,SAAK,WAAY,WAAW,KAAK,MAAM;AACvC,SAAK,IAAI,QAAQ,4BAA4B;AAC7C,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,iBACZ,WACA,SACoC;AACpC,SAAK,IAAI,QAAQ,yBAAyB,SAAS,IAAI,OAAO,EAAE;AAEhE,SAAK;AAAA,MACH;AAAA,MACA;AAAA,IACF;AACA,UAAM,gBAAgB,KAAK,eAAe,WAAW,OAAO;AAE5D,SAAK;AAAA,MACH;AAAA,MACA,gDAAgD,SAAS,IAAI,OAAO;AAAA,IACtE;AACA,UAAM,SAAS,MAAM,UAAU,WAAW;AAAA,MACxC,QAAQ;AAAA,MACR,UAAU;AAAA,QACR;AAAA,UACE,MAAM;AAAA,UACN,SAAS;AAAA,YACP;AAAA,cACE,MAAM;AAAA,cACN,MAAM,KAAK,gBAAgB,WAAW,OAAO;AAAA,YAC/C;AAAA,YACA,GAAG;AAAA,UACL;AAAA,QACF;AAAA,MACF;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AACD,SAAK;AAAA,MACH;AAAA,MACA,oCAAoC,SAAS,IAAI,OAAO;AAAA,IAC1D;AAEA,SAAK,WAAW,OAAO,KAAK;AAE5B,WAAO,OAAO;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKQ,eACN,WACA,SACyC;AACzC,UAAM,gBAAyD,CAAC;AAEhE,aAAS,SAAS,WAAW,UAAU,SAAS,UAAU;AAExD,YAAM,YAAiB;AAAA,QACrB,KAAK;AAAA,QACL,cAAc,SAAS,CAAC;AAAA,MAC1B;AACA,YAAM,cAAiB,iBAAa,SAAS;AAC7C,YAAM,cAAc,YAAY,SAAS,QAAQ;AAEjD,oBAAc,KAAK;AAAA,QACjB,MAAM;AAAA,QACN,OAAO,yBAAyB,WAAW;AAAA,MAC7C,CAAC;AAAA,IACH;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,OAAe,cAA8B;AACjE,WAAO,GAAG,MAAM,KAAK,CAAC;AAAA,EAAK,aAAa,KAAK,CAAC;AAAA,EAChD;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,WAAmB,SAAyB;AACpE,UAAM,YAAY,UAAU,YAAY;AACxC,WAAO;AAAA;AAAA,iBAEM,SAAS,gCAAgC,SAAS,IAAI,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwC5E;AACF;;;AEhTA,IAAAC,cAAkB;AAqBlB,IAAM,sBAAsB,cAAE,OAAO;AAAA,EACnC,KAAK,cACF,OAAO,EACP,SAAS,EACT,SAAS,wEAA8D;AAC5E,CAAC;AAKD,IAAM,0BAA0B,cAAE,OAAO;AAAA,EACvC,OAAO,cAAE,OAAO,EAAE,IAAI,EAAE,SAAS,yCAAyC;AAAA,EAC1E,KAAK,cACF,OAAO,EACP,SAAS,EACT,SAAS,wEAA8D;AAC5E,CAAC;AAKD,IAAM,qBAAqB,cAAE,OAAO;AAAA,EAClC,SAAS,cAAE,MAAM,uBAAuB;AAC1C,CAAC;AAgBM,IAAM,gBAAN,cAA4B,iBAAiB;AAAA,EAClD,YACE,QACA,OACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA,SAAS,iBAAiB;AAAA,MAC1B;AAAA,MACA;AAAA,MACA,cAAc,IAAI,wBAA6B;AAAA,IACjD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,WACJ,UACA,WACA,eACoB;AACpB,UAAM,iBAAiB,iBAAiB,KAAK;AAC7C,UAAM,aAAa,kBAAkB;AACrC,UAAM,YACH,eAAwC,WACxC,eAAmC,MACpC;AACF,SAAK;AAAA,MACH;AAAA,MACA,gCAAgC,SAAS,MAAM,kBAAkB,aAAa,cAAc,EAAE,UAAU,SAAS;AAAA,IACnH;AAEA,QAAI,SAAS,WAAW,GAAG;AACzB,WAAK,IAAI,QAAQ,sBAAsB;AACvC,aAAO,CAAC;AAAA,IACV;AAEA,QAAI;AACF,UAAI,cAAc,GAAG;AAEnB,aAAK,IAAI,QAAQ,2CAA2C;AAC5D,cAAMC,WAAqB,CAAC;AAE5B,iBAAS,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;AACxC,gBAAM,WAAW,SAAS,CAAC;AAG3B,eAAK,IAAI,QAAQ,cAAc,IAAI,CAAC,MAAM,SAAS,MAAM,KAAK;AAE9D,gBAAM,SAAS,MAAM,UAAU,KAAK;AAAA,YAClC,QAAQ;AAAA,YACR,cAAc,KAAK,kBAAkB,QAAQ;AAAA,YAC7C,YAAY,KAAK,sBAAsB,QAAQ;AAAA,YAC/C,cAAc;AAAA,YACd,eAAe,KAAK;AAAA,YACpB,YAAY,KAAK;AAAA,YACjB,aAAa,KAAK;AAAA,YAClB,aAAa,KAAK;AAAA,YAClB,WAAW,KAAK;AAAA,YAChB,OAAO;AAAA,UACT,CAAC;AAED,eAAK,WAAW,OAAO,KAAK;AAE5B,gBAAM,WAAW,KAAK;AAAA,YACpB;AAAA,YACA,OAAO,OAAO;AAAA,UAChB;AACA,UAAAA,SAAQ,KAAK,EAAE,UAAU,KAAK,SAAS,CAAC;AAAA,QAC1C;AAGA,aAAK,WAAY,WAAW,KAAK,MAAM;AAEvC,aAAK;AAAA,UACH;AAAA,UACA,cAAcA,SAAQ,MAAM,qBAAqBA,SAAQ,OAAO,CAAC,MAAM,EAAE,GAAG,EAAE,MAAM;AAAA,QACtF;AAEA,eAAOA;AAAA,MACT;AAGA,YAAM,kBAAkB,SAAS,IAAI,CAAC,MAAM,WAAW,EAAE,OAAO,KAAK,EAAE;AAGvE,YAAM,eAAe,MAAM,eAAe;AAAA,QACxC;AAAA,QACA;AAAA,QACA,OAAO,UAAU,KAAK,mBAAmB,OAAO,cAAc;AAAA,MAChE;AAGA,mBAAa,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC7C,YAAM,UAAU,aAAa,IAAI,CAAC,MAAM,EAAE,OAAO;AAGjD,WAAK,WAAY,WAAW,KAAK,MAAM;AAEvC,WAAK;AAAA,QACH;AAAA,QACA,cAAc,QAAQ,MAAM,qBAAqB,QAAQ,OAAO,CAAC,MAAM,EAAE,GAAG,EAAE,MAAM;AAAA,MACtF;AAEA,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,WAAK,IAAI,SAAS,mBAAmB,OAAO,EAAE;AAC9C,YAAM,IAAI,kBAAkB,6BAA6B,OAAO,IAAI;AAAA,QAClE,OAAO;AAAA,MACT,CAAC;AAAA,IACH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAc,mBACZ,UACA,OACqD;AACrD,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC,QAAQ;AAAA,MACR,cAAc,KAAK,kBAAkB;AAAA,MACrC,YAAY,KAAK,gBAAgB,QAAQ;AAAA,MACzC,cAAc;AAAA,MACd,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB,OAAO;AAAA,IACT,CAAC;AAGD,SAAK,WAAW,OAAO,KAAK;AAG5B,QAAI,OAAO,OAAO,QAAQ,WAAW,SAAS,QAAQ;AACpD,WAAK;AAAA,QACH;AAAA,QACA,gBAAgB,OAAO,OAAO,QAAQ,MAAM,gBAAgB,SAAS,MAAM;AAAA,MAE7E;AAAA,IACF;AAGA,UAAM,aAAa,IAAI,IAAI,SAAS,IAAI,CAAC,MAAM,CAAC,EAAE,OAAO,EAAE,IAAI,CAAC,CAAC;AAEjE,WAAO,OAAO,OAAO,QAAQ,IAAI,CAAC,eAAe;AAG/C,YAAM,kBAAkB,SAAS,WAAW,KAAK;AACjD,YAAM,gBAAgB,iBAAiB,SAAS,WAAW;AAC3D,YAAM,WAAW,WAAW,IAAI,aAAa,KAAK;AAClD,YAAM,WAAW,KAAK,uBAAuB,UAAU,WAAW,GAAG;AAErE,aAAO;AAAA,QACL,OAAO;AAAA,QACP,SAAS;AAAA,UACP;AAAA,UACA,KAAK;AAAA,QACP;AAAA,MACF;AAAA,IACF,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYQ,uBACN,UACA,cACoB;AACpB,QAAI,CAAC,aAAc,QAAO;AAE1B,QAAI,aAAa,SAAS,QAAQ,YAAY;AAE9C,QAAI,eAAe,IAAI;AAErB,YAAM,gBAAgB,SAAS,YAAY;AAC3C,YAAM,WAAW,aAAa,YAAY;AAC1C,mBAAa,cAAc,QAAQ,QAAQ;AAE3C,UAAI,eAAe,IAAI;AAErB,eAAO,SAAS,UAAU,YAAY,aAAa,aAAa,MAAM;AAAA,MACxE;AAEA,aAAO;AAAA,IACT;AAGA,WAAO,SAAS,UAAU,YAAY,aAAa,aAAa,MAAM;AAAA,EACxE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOU,kBAAkB,OAA2B,SAAiB;AACtE,UAAM,QACJ,SAAS,UACL,qLACA;AAEN,WAAO;AAAA;AAAA,EAET,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4CL;AAAA;AAAA;AAAA;AAAA,EAKU,gBACR,UACQ;AACR,UAAM,cAAc,SACjB,IAAI,CAAC,MAAM,IAAI,EAAE,KAAK,KAAK,EAAE,IAAI,EAAE,EACnC,KAAK,IAAI;AAEZ,WAAO;AAAA;AAAA,EAET,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUX;AAAA;AAAA;AAAA;AAAA,EAKQ,sBAAsB,SAAyB;AACrD,WAAO;AAAA;AAAA,GAER,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqBR;AACF;AAKO,IAAM,oBAAN,cAAgC,MAAM;AAAA,EAC3C,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AACF;;;ACnZO,IAAM,sBAAN,MAAM,6BAA4B,MAAM;AAAA,EAC7C,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,gBAAgB,OAAwB;AAC7C,WAAO,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AAAA,EAC9D;AAAA;AAAA;AAAA;AAAA,EAKA,OAAO,UAAU,SAAiB,OAAqC;AACrE,WAAO,IAAI;AAAA,MACT,GAAG,OAAO,KAAK,qBAAoB,gBAAgB,KAAK,CAAC;AAAA,MACzD,EAAE,OAAO,MAAM;AAAA,IACjB;AAAA,EACF;AACF;;;ACbA,IAAAC,MAAoB;AACpB,IAAAC,QAAsB;AACtB,IAAAC,cAAkB;AAQX,IAAK,cAAL,kBAAKC,iBAAL;AAEL,EAAAA,aAAA,sBAAmB;AAEnB,EAAAA,aAAA,kBAAe;AAEf,EAAAA,aAAA,YAAS;AAET,EAAAA,aAAA,aAAU;AARA,SAAAA;AAAA,GAAA;AA4CL,IAAM,kBAAN,cAA8B,mBAAmB;AAAA;AAAA,EAErC,cAAc;AAAA,EACd,sBAAsB;AAAA,EACtB,iBAAiB;AAAA,EAElC,YACE,QACA,OACA,YACA,aAAqB,GACrB,eACA,YACA,aACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA,EAAE,YAAY,YAAY;AAAA,MAC1B;AAAA,MACA,cAAc,IAAI,wBAA6B;AAAA,IACjD;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,MAAM,MAAM,YAGT;AACD,SAAK,IAAI,QAAQ,gCAAgC;AAGjD,UAAM,QAAQ,KAAK,aAAa,UAAU;AAC1C,QAAI,MAAM,WAAW,GAAG;AACtB,WAAK,IAAI,QAAQ,gBAAgB;AACjC,YAAM,aAAa,KAAK,iBAAiB,UAAU;AACnD,WAAK,WAAW,UAAU;AAC1B,aAAO;AAAA,QACL,cAAc,CAAC;AAAA,QACf,OAAO,CAAC,UAAU;AAAA,MACpB;AAAA,IACF;AAEA,UAAM,aAAa,KAAK,aAAa,KAAK;AAC1C,SAAK;AAAA,MACH;AAAA,MACA,SAAS,WAAW,MAAM,yBAAyB,MAAM,MAAM;AAAA,IACjE;AAGA,UAAM,eAA0C,CAAC;AACjD,UAAM,YAAkC,CAAC;AAEzC,aAAS,IAAI,GAAG,IAAI,WAAW,QAAQ,KAAK;AAC1C,YAAM,QAAQ,WAAW,CAAC;AAC1B,WAAK;AAAA,QACH;AAAA,QACA,oBAAoB,IAAI,CAAC,IAAI,WAAW,MAAM,KAAK,MAAM,QAAQ,MAAM;AAAA,MACzE;AAEA,YAAM,cAAc,MAAM,KAAK,aAAa,OAAO,OAAO,KAAK,KAAK;AACpE,aAAO,OAAO,cAAc,YAAY,YAAY;AACpD,gBAAU,KAAK,GAAG,YAAY,KAAK;AAAA,IACrC;AAGA,eAAW,SAAS,WAAW;AAC7B,WAAK,WAAW,KAAK;AAAA,IACvB;AAGA,SAAK,YAAY,YAAY;AAE7B,SAAK;AAAA,MACH;AAAA,MACA,cAAc,OAAO,KAAK,YAAY,EAAE,MAAM;AAAA,IAChD;AAEA,WAAO,EAAE,cAAc,OAAO,UAAU;AAAA,EAC1C;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,YAA4C;AAC/D,UAAM,WAAW,OAAO,KAAK,WAAW,KAAK,EAC1C,IAAI,MAAM,EACV,OAAO,CAAC,MAAM,CAAC,OAAO,MAAM,CAAC,CAAC,EAC9B,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAEvB,WAAO,SAAS,IAAI,CAAC,QAAQ,WAAW,MAAM,OAAO,GAAG,CAAC,CAAC;AAAA,EAC5D;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,OAAuC;AAC1D,UAAM,SAA0B,CAAC;AACjC,QAAI,eAAqC;AAEzC,eAAW,QAAQ,OAAO;AACxB,YAAM,UAAU,KAAK,cAAc,KAAK,KAAK,OAAO,KAAK,KAAK,MAAM;AAEpE,UAAI,CAAC,gBAAgB,aAAa,YAAY,SAAS;AAErD,uBAAe,EAAE,SAAS,SAAS,CAAC,KAAK,OAAO,EAAE;AAClD,eAAO,KAAK,YAAY;AAAA,MAC1B,OAAO;AAEL,qBAAa,QAAQ,KAAK,KAAK,OAAO;AAAA,MACxC;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,OAAe,QAAwB;AAC3D,UAAM,eAAe,KAAK,MAAM,QAAQ,KAAK,cAAc;AAC3D,UAAM,gBAAgB,KAAK,MAAM,SAAS,KAAK,cAAc;AAC7D,WAAO,GAAG,YAAY,IAAI,aAAa;AAAA,EACzC;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,aACZ,OACA,OACA,OAIC;AACD,UAAM,EAAE,QAAQ,IAAI;AACpB,UAAM,YAAkC,CAAC;AAGzC,QAAI,QAAQ,UAAU,KAAK,aAAa;AACtC,WAAK;AAAA,QACH;AAAA,QACA,gBAAgB,QAAQ,MAAM;AAAA,MAChC;AACA,YAAM,SAAS,MAAM,KAAK,qBAAqB,OAAO,SAAS,KAAK;AACpE,gBAAU,KAAK,OAAO,KAAK;AAC3B,aAAO;AAAA,QACL,cAAc,KAAK,aAAa,OAAO,OAAO;AAAA,QAC9C,OAAO;AAAA,MACT;AAAA,IACF;AAGA,UAAM,eAAe,oBAAI,IAAY;AAErC,aAAS,UAAU,GAAG,WAAW,KAAK,qBAAqB,WAAW;AAEpE,YAAM,gBAAgB,KAAK;AAAA,QACzB;AAAA,QACA,KAAK;AAAA,QACL;AAAA,MACF;AAGA,iBAAW,KAAK,eAAe;AAC7B,qBAAa,IAAI,CAAC;AAAA,MACpB;AAEA,WAAK;AAAA,QACH;AAAA,QACA,WAAW,UAAU,CAAC,IAAI,KAAK,sBAAsB,CAAC,oBAAoB,cAAc,KAAK,IAAI,CAAC;AAAA,MACpG;AAGA,YAAM,SAAS,MAAM,KAAK;AAAA,QACxB;AAAA,QACA;AAAA,QACA;AAAA,MACF;AACA,gBAAU,KAAK,OAAO,KAAK;AAC3B,YAAM,UAAU,OAAO;AAGvB,YAAM,UAAU,KAAK,cAAc,OAAO;AAE1C,UAAI,QAAQ,YAAY,yBAAqB;AAE3C,aAAK;AAAA,UACH;AAAA,UACA,qBAAqB,QAAQ,OAAO,YAAY,QAAQ,MAAM,eAAe,QAAQ,SAAS;AAAA,QAChG;AACA,eAAO;AAAA,UACL,cAAc,KAAK,aAAa,SAAS,OAAO;AAAA,UAChD,OAAO;AAAA,QACT;AAAA,MACF;AAGA,WAAK;AAAA,QACH;AAAA,QACA,qCAAqC,UAAU,CAAC,IAAI,KAAK,sBAAsB,CAAC;AAAA,MAClF;AAAA,IACF;AAGA,UAAM,IAAI;AAAA,MACR,uCAAuC,KAAK,sBAAsB,CAAC,iCAAiC,QAAQ,MAAM;AAAA,IACpH;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,oBACN,SACA,OACA,UAAuB,oBAAI,IAAI,GACrB;AAEV,UAAM,YAAY,QAAQ,OAAO,CAAC,MAAM,CAAC,QAAQ,IAAI,CAAC,CAAC;AAGvD,UAAM,OAAO,UAAU,UAAU,QAAQ,YAAY;AAGrD,UAAM,WAAW,CAAC,GAAG,IAAI;AACzB,aAAS,IAAI,SAAS,SAAS,GAAG,IAAI,GAAG,KAAK;AAC5C,YAAM,IAAI,KAAK,MAAM,KAAK,OAAO,KAAK,IAAI,EAAE;AAC5C,OAAC,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC,IAAI,CAAC,SAAS,CAAC,GAAG,SAAS,CAAC,CAAC;AAAA,IACxD;AAGA,WAAO,SAAS,MAAM,GAAG,KAAK,EAAE,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAAA,EACtD;AAAA;AAAA;AAAA;AAAA,EAKA,MAAc,qBACZ,OACA,SACA,OACiE;AACjE,SAAK,IAAI,QAAQ,cAAc,QAAQ,MAAM,2BAA2B;AAGxE,UAAM,gBAAyD,CAAC;AAEhE,eAAW,UAAU,SAAS;AAC5B,YAAM,OAAO,MAAM,SAAS,CAAC;AAC7B,YAAM,YAAiB,cAAQ,KAAK,YAAY,KAAK,MAAM,GAAG;AAC9D,YAAM,cAAiB,iBAAa,SAAS;AAC7C,YAAM,cAAc,YAAY,SAAS,QAAQ;AACjD,YAAM,WAAW,KAAK,MAAM,YAAY;AAExC,oBAAc,KAAK;AAAA,QACjB,MAAM;AAAA,QACN,OAAO,QAAQ,QAAQ,WAAW,WAAW;AAAA,MAC/C,CAAC;AAAA,IACH;AAGA,UAAM,SAAS,cAAE,OAAO;AAAA,MACtB,OAAO,cACJ;AAAA,QACC,cAAE,OAAO;AAAA,UACP,YAAY,cACT,OAAO,EACP,SAAS,2CAA2C;AAAA,UACvD,aAAa,cACV,OAAO,EACP,SAAS,EACT,SAAS,uCAAuC;AAAA,UACnD,WAAW,cACR,OAAO,EACP,SAAS,EACT;AAAA,YACC;AAAA,UACF;AAAA,QACJ,CAAC;AAAA,MACH,EACC,SAAS,uCAAuC;AAAA,IACrD,CAAC;AAED,QAAI;AACF,YAAM,SAAS,MAAM,UAAU,WAAW;AAAA,QACxC;AAAA,QACA,UAAU;AAAA,UACR;AAAA,YACE,MAAM;AAAA,YACN,SAAS;AAAA,cACP,EAAE,MAAM,QAAQ,MAAM,KAAK,gBAAgB,OAAO,EAAE;AAAA,cACpD,GAAG;AAAA,YACL;AAAA,UACF;AAAA,QACF;AAAA,QACA,cAAc;AAAA,QACd,eAAe,KAAK;AAAA,QACpB,YAAY,KAAK;AAAA,QACjB,aAAa;AAAA,QACb,aAAa,KAAK;AAAA,QAClB,WAAW;AAAA,QACX,OAAO;AAAA,MACT,CAAC;AAGD,YAAM,UAAU,OAAO,OAAO,MAAM,IAAI,CAAC,OAAO;AAAA,QAC9C,WAAW,QAAQ,EAAE,UAAU;AAAA,QAC/B,aAAa,EAAE;AAAA,QACf,WAAW,EAAE;AAAA,MACf,EAAE;AAEF,aAAO,EAAE,SAAS,OAAO,OAAO,MAAM;AAAA,IACxC,SAAS,OAAO;AACd,WAAK,IAAI,SAAS,kCAAkC,KAAK;AACzD,YAAM,oBAAoB;AAAA,QACxB;AAAA,QACA;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,cAAc,SAA0C;AAE9D,UAAM,eAAe,QAAQ,OAAO,CAAC,MAAM,EAAE,gBAAgB,IAAI;AAEjE,QAAI,aAAa,SAAS,GAAG;AAC3B,aAAO,EAAE,SAAS,yBAAqB,QAAQ,GAAG,WAAW,EAAE;AAAA,IACjE;AAGA,iBAAa,KAAK,CAAC,GAAG,MAAM,EAAE,YAAY,EAAE,SAAS;AAGrD,UAAM,WAAW,aAAa,MAAM,CAAC,GAAG,MAAM;AAE5C,UAAI,EAAE,cAAc,QAAQ,EAAE,gBAAgB,EAAE,UAAW,QAAO;AAClE,UAAI,MAAM,EAAG,QAAO;AACpB,YAAM,OAAO,aAAa,IAAI,CAAC;AAC/B,YAAM,mBAAmB,EAAE,YAAY,KAAK;AAC5C,aAAO,EAAE,gBAAgB,KAAK,cAAe;AAAA,IAC/C,CAAC;AAED,QAAI,UAAU;AACZ,YAAM,cAAc,aAAa,CAAC;AAClC,YAAM,SAAS,YAAY,cAAe,YAAY;AACtD,aAAO,EAAE,SAAS,2CAA8B,QAAQ,WAAW,EAAE;AAAA,IACvE;AAKA,UAAM,gBAAgB,aAAa,MAAM,CAAC,GAAG,MAAM;AAEjD,UAAI,EAAE,cAAc,KAAM,QAAO;AACjC,UAAI,EAAE,cAAc,EAAE,cAAe,EAAG,QAAO;AAC/C,UAAI,MAAM,EAAG,QAAO;AAIpB,YAAM,OAAO,aAAa,IAAI,CAAC;AAC/B,YAAM,UAAU,EAAE,YAAY,KAAK;AACnC,YAAM,oBAAoB,UAAU;AACpC,YAAM,kBAAkB,EAAE,cAAe,KAAK;AAC9C,aAAO,oBAAoB;AAAA,IAC7B,CAAC;AAED,QAAI,eAAe;AACjB,YAAM,cAAc,aAAa,CAAC;AAClC,YAAM,SAAS,YAAY,cAAe,YAAY,YAAY;AAClE,aAAO,EAAE,SAAS,mCAA0B,QAAQ,WAAW,EAAE;AAAA,IACnE;AAGA,UAAM,UAAU,aAAa,IAAI,CAAC,MAAM,EAAE,cAAe,EAAE,SAAS;AACpE,UAAM,YAAY,KAAK;AAAA,MACrB,QAAQ,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,QAAQ;AAAA,IAC/C;AACA,UAAM,qBAAqB,QAAQ;AAAA,MACjC,CAAC,MAAM,KAAK,IAAI,IAAI,SAAS,KAAK;AAAA,IACpC;AAEA,QAAI,oBAAoB;AACtB,aAAO,EAAE,SAAS,uBAAoB,QAAQ,WAAW,WAAW,EAAE;AAAA,IACxE;AAEA,WAAO,EAAE,SAAS,yBAAqB,QAAQ,GAAG,WAAW,EAAE;AAAA,EACjE;AAAA;AAAA;AAAA;AAAA,EAKQ,aACN,SACA,SAC2B;AAC3B,UAAM,SAAoC,CAAC;AAE3C,eAAW,aAAa,SAAS;AAC/B,cAAQ,QAAQ,SAAS;AAAA,QACvB,KAAK;AAAA,QACL,KAAK,uBAAoB;AACvB,gBAAM,SAAS,YAAY,QAAQ;AACnC,iBAAO,SAAS,IAAI;AAAA,YAClB,aAAa;AAAA,YACb,WAAW;AAAA,UACb;AACA;AAAA,QACF;AAAA,QAEA,KAAK,mCAA0B;AAC7B,gBAAM,QAAQ,YAAY,IAAI,QAAQ;AACtC,iBAAO,SAAS,IAAI;AAAA,YAClB,aAAa;AAAA,YACb,WAAW,QAAQ;AAAA,UACrB;AACA;AAAA,QACF;AAAA,QAEA;AACE,iBAAO,SAAS,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,MACvD;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,aAAa,SAAoD;AACvE,UAAM,SAAoC,CAAC;AAE3C,eAAW,UAAU,SAAS;AAC5B,UAAI,OAAO,gBAAgB,MAAM;AAC/B,eAAO,OAAO,SAAS,IAAI;AAAA,UACzB,aAAa,OAAO;AAAA,UACpB,WAAW,OAAO,aAAa,OAAO;AAAA,QACxC;AAAA,MACF,OAAO;AACL,eAAO,OAAO,SAAS,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,MAC5D;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,YAAY,cAA+C;AAMjE,SAAK,wBAAwB,YAAY;AACzC,SAAK,qBAAqB,YAAY;AACtC,SAAK,mBAAmB,YAAY;AACpC,SAAK,oBAAoB,YAAY;AAAA,EACvC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYQ,wBACN,cACM;AACN,UAAM,WAAW,OAAO,KAAK,YAAY,EACtC,IAAI,MAAM,EACV,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAEvB,QAAI,SAAS,SAAS,EAAG;AAGzB,UAAM,sBAAsB,KAAK;AAAA,MAC/B;AAAA,MACA;AAAA,IACF;AAEA,QAAI,wBAAwB,QAAQ,uBAAuB,EAAG;AAE9D,UAAM,qBAAqB,SAAS,mBAAmB;AACvD,UAAM,oBAAoB,aAAa,kBAAkB,EAAE;AAI3D,QAAI,cAAc;AAClB,aAAS,IAAI,GAAG,IAAI,qBAAqB,KAAK;AAC5C,YAAM,UAAU,SAAS,CAAC;AAC1B,YAAM,SAAS,aAAa,OAAO,EAAE;AAErC,UAAI,WAAW,EAAG;AAGlB,YAAM,UAAU,qBAAqB;AAGrC,YAAM,gBAAgB,KAAK;AAAA,QACzB,aAAa,kBAAkB;AAAA,MACjC;AACA,YAAM,iBAAiB,gBACnB,oBAAoB,UAAU,IAC9B,oBAAoB;AAIxB,UAAI,SAAS,iBAAiB,IAAI;AAChC,aAAK;AAAA,UACH;AAAA,UACA,yBAAyB,OAAO,IAAI,MAAM,eAAe,cAAc;AAAA,QACzE;AACA,qBAAa,OAAO,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AACvD,sBAAc;AAAA,MAChB;AAAA,IACF;AAEA,QAAI,aAAa;AACf,WAAK,IAAI,QAAQ,qDAAqD;AAAA,IACxE;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWQ,wBACN,cACA,UACe;AACf,UAAM,sBAAsB;AAE5B,aACM,WAAW,GACf,YAAY,SAAS,SAAS,qBAC9B,YACA;AACA,UAAI,kBAAkB;AACtB,UAAI,oBAAmC;AAEvC,eAAS,IAAI,GAAG,IAAI,sBAAsB,GAAG,KAAK;AAChD,cAAM,cAAc,SAAS,WAAW,CAAC;AACzC,cAAM,cAAc,SAAS,WAAW,IAAI,CAAC;AAC7C,cAAM,YAAY,aAAa,WAAW;AAC1C,cAAM,YAAY,aAAa,WAAW;AAG1C,YAAI,UAAU,gBAAgB,KAAK,UAAU,gBAAgB,GAAG;AAC9D,4BAAkB;AAClB;AAAA,QACF;AAGA,cAAM,gBAAgB,UAAU,cAAc,UAAU;AACxD,cAAM,eAAe,cAAc;AAGnC,cAAM,gBAAgB,KAAK,mBAAmB,SAAS;AACvD,cAAM,0BAA0B,gBAAgB,IAAI;AACpD,cAAM,WAAW,eAAe;AAEhC,YAAI,sBAAsB,MAAM;AAC9B,8BAAoB;AAAA,QACtB;AAGA,YAAI,kBAAkB,UAAU;AAC9B,4BAAkB;AAClB;AAAA,QACF;AAAA,MACF;AAEA,UAAI,iBAAiB;AACnB,eAAO;AAAA,MACT;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA,EAKQ,mBAAmB,OAA2B;AACpD,WACE,MAAM,cAAc,QACpB,MAAM,cAAc,MAAM,eAC1B,MAAM,cAAc,MAAM,cAAc;AAAA,EAE5C;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,qBAAqB,cAA+C;AAC1E,UAAM,WAAW,OAAO,KAAK,YAAY,EACtC,IAAI,MAAM,EACV,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAEvB,QAAI,SAAS,SAAS,EAAG;AAEzB,aAAS,IAAI,GAAG,IAAI,SAAS,QAAQ,KAAK;AACxC,YAAM,cAAc,SAAS,IAAI,CAAC;AAClC,YAAM,cAAc,SAAS,CAAC;AAC9B,YAAM,aAAa,aAAa,WAAW,EAAE;AAC7C,YAAM,aAAa,aAAa,WAAW,EAAE;AAG7C,UAAI,eAAe,KAAK,eAAe,EAAG;AAG1C,UACE,aAAa,KACb,aAAa,cACb,aAAa,aAAa,GAC1B;AACA,aAAK;AAAA,UACH;AAAA,UACA,2BAA2B,WAAW,IAAI,UAAU,WAAW,WAAW,IAAI,UAAU;AAAA,QAC1F;AAGA,cAAM,gBAAgB,KAAK;AAAA,UACzB,aAAa,WAAW;AAAA,QAC1B;AAGA,iBAAS,IAAI,IAAI,GAAG,KAAK,GAAG,KAAK;AAC/B,gBAAM,UAAU,SAAS,CAAC;AAC1B,gBAAM,WAAW,cAAc;AAE/B,cAAI,eAAe;AAEjB,kBAAM,sBAAsB,aAAa,WAAW;AAEpD,gBAAI,sBAAsB,GAAG;AAC3B,2BAAa,OAAO,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,YACzD,OAAO;AACL,2BAAa,OAAO,IAAI;AAAA,gBACtB,aAAa;AAAA,gBACb,WAAW,sBAAsB;AAAA,cACnC;AAAA,YACF;AAAA,UACF,OAAO;AAEL,kBAAM,iBAAiB,aAAa;AAEpC,gBAAI,iBAAiB,GAAG;AACtB,2BAAa,OAAO,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,YACzD,OAAO;AACL,2BAAa,OAAO,IAAI;AAAA,gBACtB,aAAa;AAAA,gBACb,WAAW;AAAA,cACb;AAAA,YACF;AAAA,UACF;AACA,eAAK;AAAA,YACH;AAAA,YACA,oBAAoB,OAAO,OAAO,aAAa,OAAO,EAAE,WAAW;AAAA,UACrE;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,mBAAmB,cAA+C;AACxE,eAAW,CAAC,YAAY,KAAK,KAAK,OAAO,QAAQ,YAAY,GAAG;AAC9D,UAAI,MAAM,cAAc,KAAK,MAAM,YAAY,GAAG;AAChD,aAAK,IAAI,QAAQ,6BAA6B,UAAU,OAAO;AAC/D,qBAAa,OAAO,UAAU,CAAC,IAAI,EAAE,aAAa,GAAG,WAAW,EAAE;AAAA,MACpE;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKQ,oBAAoB,cAA+C;AACzE,UAAM,WAAW,OAAO,KAAK,YAAY,EACtC,IAAI,MAAM,EACV,KAAK,CAAC,GAAG,MAAM,IAAI,CAAC;AAGvB,UAAM,cAAc,SAAS;AAAA,MAC3B,CAAC,MAAM,aAAa,CAAC,EAAE,gBAAgB;AAAA,IACzC;AACA,QAAI,YAAY,WAAW,EAAG;AAG9B,UAAM,kBAAkB,SACrB,OAAO,CAAC,MAAM,aAAa,CAAC,EAAE,cAAc,CAAC,EAC7C,IAAI,CAAC,OAAO;AAAA,MACX,SAAS;AAAA,MACT,QAAQ,aAAa,CAAC,EAAE;AAAA,MACxB,eAAe,KAAK,mBAAmB,aAAa,CAAC,CAAC;AAAA,IACxD,EAAE;AAEJ,QAAI,gBAAgB,SAAS,GAAG;AAC9B,WAAK,IAAI,QAAQ,0CAA0C;AAC3D;AAAA,IACF;AAGA,UAAM,mBAAmB,gBAAgB;AAAA,MACvC,CAAC,MAAM,EAAE;AAAA,IACX,EAAE;AACF,UAAM,gBAAgB,mBAAmB,gBAAgB,SAAS;AAElE,QAAI,eAAe;AAEjB,YAAM,UAAU,gBAAgB,IAAI,CAAC,MAAM,EAAE,SAAS,EAAE,UAAU,CAAC;AACnE,YAAM,YAAY,KAAK;AAAA,QACrB,QAAQ,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,QAAQ;AAAA,MAC/C;AAEA,WAAK;AAAA,QACH;AAAA,QACA,eAAe,YAAY,MAAM,4CAA4C,SAAS;AAAA,MACxF;AAEA,iBAAW,WAAW,aAAa;AACjC,cAAM,sBAAsB,UAAU,IAAI;AAE1C,YAAI,sBAAsB,GAAG;AAC3B,eAAK;AAAA,YACH;AAAA,YACA,4BAA4B,OAAO,cAAc,mBAAmB;AAAA,UACtE;AAEA;AAAA,QACF;AAEA,aAAK;AAAA,UACH;AAAA,UACA,gBAAgB,OAAO,UAAU,mBAAmB,IAAI,sBAAsB,CAAC;AAAA,QACjF;AACA,qBAAa,OAAO,IAAI;AAAA,UACtB,aAAa;AAAA,UACb,WAAW,sBAAsB;AAAA,QACnC;AAAA,MACF;AAAA,IACF,OAAO;AAEL,YAAM,UAAU,gBAAgB,IAAI,CAAC,MAAM,EAAE,SAAS,EAAE,OAAO;AAC/D,YAAM,YAAY,KAAK;AAAA,QACrB,QAAQ,OAAO,CAAC,GAAG,MAAM,IAAI,GAAG,CAAC,IAAI,QAAQ;AAAA,MAC/C;AAEA,WAAK;AAAA,QACH;AAAA,QACA,eAAe,YAAY,MAAM,sBAAsB,SAAS;AAAA,MAClE;AAEA,iBAAW,WAAW,aAAa;AACjC,cAAM,iBAAiB,UAAU;AAEjC,YAAI,iBAAiB,GAAG;AACtB,eAAK;AAAA,YACH;AAAA,YACA,4BAA4B,OAAO,cAAc,cAAc;AAAA,UACjE;AACA;AAAA,QACF;AAEA,aAAK,IAAI,QAAQ,gBAAgB,OAAO,UAAU,cAAc,EAAE;AAClE,qBAAa,OAAO,IAAI;AAAA,UACtB,aAAa;AAAA,UACb,WAAW;AAAA,QACb;AAAA,MACF;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAyBT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,SAA2B;AACnD,WAAO,kBAAkB,QAAQ,MAAM;AAAA,uBACpB,QAAQ,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAMvC;AACF;;;ACh3BO,IAAe,gBAAf,cAGG,iBAAiB;AAAA;AAAA;AAAA;AAAA,EAIN;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYnB,YACE,QACA,OACA,eACA,SACA,eACA,YACA;AACA,UAAM,QAAQ,OAAO,eAAe,SAAS,eAAe,UAAU;AACtE,SAAK,gBAAgB;AAAA,EACvB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAeA,MAAgB,QACd,QACA,cACA,YACA,OACA,YACyD;AACzD,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC;AAAA,MACA;AAAA,MACA;AAAA,MACA,cAAc,KAAK;AAAA,MACnB,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW,KAAK;AAAA,MAChB;AAAA,IACF,CAAC;AAGD,QAAI,YAAY;AACd,iBAAW,MAAM,OAAO,KAAK;AAAA,IAC/B,OAAO;AACL,WAAK,WAAW,OAAO,KAAK;AAAA,IAC9B;AAEA,WAAO;AAAA,MACL,QAAQ,OAAO;AAAA,MACf,OAAO,OAAO;AAAA,IAChB;AAAA,EACF;AACF;;;AC1GA,IAAAC,cAAkB;AAYX,IAAM,6BAA6B,cAAE,OAAO;AAAA,EACjD,SAAS,cAAE,QAAQ,EAAE,SAAS,2CAA2C;AAAA,EACzE,YAAY,cACT,OAAO,EACP,IAAI,CAAC,EACL,IAAI,CAAC,EACL,SAAS,kCAAkC;AAAA,EAC9C,aAAa,cACV,KAAK,CAAC,YAAY,SAAS,iBAAiB,SAAS,CAAC,EACtD,SAAS,0BAA0B;AAAA,EACtC,sBAAsB,cACnB,OAAO,EACP,SAAS,EACT,SAAS,wDAAwD;AAAA,EACpE,QAAQ,cAAE,OAAO,EAAE,SAAS,8BAA8B;AAC5D,CAAC;AAkCM,IAAM,sBAAN,cAAkC,cAGvC;AAAA,EACiB;AAAA,EAEjB,YACE,QACA,OACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,SAAK,sBAAsB,SAAS,uBAAuB;AAAA,EAC7D;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAM,SAAS,UAAgD;AAC7D,SAAK,OAAO;AAAA,MACV,6CAA6C,SAAS,MAAM;AAAA,IAC9D;AAEA,QAAI,CAAC,SAAS,KAAK,GAAG;AACpB,WAAK,OAAO;AAAA,QACV;AAAA,MACF;AACA,aAAO;AAAA,QACL,SAAS;AAAA,QACT,YAAY;AAAA,QACZ,aAAa;AAAA,QACb,kBAAkB;AAAA,QAClB,QAAQ;AAAA,MACV;AAAA,IACF;AAEA,UAAM,EAAE,QAAQ,OAAO,IAAI,MAAM,KAAK;AAAA,MACpC;AAAA,MACA,KAAK,kBAAkB;AAAA,MACvB,KAAK,gBAAgB,QAAQ;AAAA,MAC7B;AAAA,MACA,KAAK;AAAA,IACP;AAEA,SAAK,OAAO;AAAA,MACV,yCAAyC,OAAO,OAAO,iBAAiB,OAAO,WAAW,gBAAgB,OAAO,UAAU;AAAA,IAC7H;AAGA,QAAI,mBAAkC;AACtC,QAAI,OAAO,WAAW,OAAO,cAAc,KAAK,qBAAqB;AACnE,UAAI,OAAO,gBAAgB,YAAY;AACrC,2BAAmB;AAAA,MACrB,WACE,OAAO,gBAAgB,WACvB,OAAO,sBACP;AACA,2BAAmB,OAAO;AAAA,MAC5B;AAAA,IACF;AAEA,WAAO;AAAA,MACL,SAAS,OAAO;AAAA,MAChB,YAAY,OAAO;AAAA,MACnB,aAAa,OAAO;AAAA,MACpB;AAAA,MACA,QAAQ,OAAO;AAAA,IACjB;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,QAAQ,QAAsC;AAC5C,WAAO,OAAO,WAAW,OAAO,cAAc,KAAK;AAAA,EACrD;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,iBAAiB,QAA4C;AAC3D,WAAO,OAAO;AAAA,EAChB;AAAA;AAAA;AAAA;AAAA,EAKU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsDT;AAAA;AAAA;AAAA;AAAA,EAKU,gBAAgB,UAA0B;AAClD,WAAO;AAAA;AAAA,EAET,QAAQ;AAAA,EACR;AACF;;;ACvOA,IAAAC,cAAkB;AAOlB,IAAM,8BAA8B,cAAE,OAAO;AAAA,EAC3C,OAAO,cAAE,OAAO,EAAE,IAAI,EAAE,SAAS,yCAAyC;AAAA,EAC1E,SAAS,cAAE,QAAQ,EAAE,SAAS,uCAAuC;AAAA,EACrE,QAAQ,cACL,OAAO,EACP,SAAS,EACT,SAAS,6CAA6C;AAC3D,CAAC;AAKD,IAAM,+BAA+B,cAAE,OAAO;AAAA,EAC5C,SAAS,cAAE,MAAM,2BAA2B;AAC9C,CAAC;AAqCM,IAAM,mBAAN,cAA+B,cAGpC;AAAA,EACA,YACE,QACA,OACA,SACA,eACA,YACA;AACA;AAAA,MACE;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAM,cACJ,UACA,eACA,WACoB;AACpB,SAAK,OAAO;AAAA,MACV,iCAAiC,SAAS,MAAM,6BAA6B,SAAS;AAAA,IACxF;AAEA,QAAI,SAAS,WAAW,cAAc,QAAQ;AAC5C,YAAM,IAAI;AAAA,QACR,kEAAkE,SAAS,MAAM,OAAO,cAAc,MAAM;AAAA,MAC9G;AAAA,IACF;AAEA,QAAI,SAAS,WAAW,GAAG;AACzB,WAAK,OAAO,KAAK,4CAA4C;AAC7D,aAAO,CAAC;AAAA,IACV;AAEA,QAAI,cAAc,GAAG;AAEnB,WAAK,OAAO;AAAA,QACV;AAAA,MACF;AACA,aAAO,IAAI,MAAM,SAAS,MAAM,EAAE,KAAK,IAAI;AAAA,IAC7C;AAEA,QAAI;AAEF,YAAM,eAAe,SAAS,IAAI,CAAC,SAAS,WAAW;AAAA,QACrD;AAAA,QACA;AAAA,QACA,cAAc,cAAc,KAAK;AAAA,MACnC,EAAE;AAGF,YAAM,eAAe,MAAM,eAAe;AAAA,QACxC;AAAA,QACA;AAAA,QACA,OAAO,UAAU,KAAK,sBAAsB,OAAO,KAAK,KAAK;AAAA,MAC/D;AAGA,mBAAa,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AAC7C,YAAM,UAAU,aAAa,IAAI,CAAC,MAAM,EAAE,OAAO;AAEjD,YAAM,aAAa,QAAQ,OAAO,CAAC,MAAM,CAAC,EAAE;AAC5C,WAAK,OAAO;AAAA,QACV,iCAAiC,UAAU,IAAI,QAAQ,MAAM;AAAA,MAC/D;AAGA,UAAI,KAAK,YAAY;AACnB,aAAK,WAAW,WAAW,KAAK,MAAM;AAAA,MACxC;AAEA,aAAO;AAAA,IACT,SAAS,OAAO;AACd,YAAM,UAAU,iBAAiB,QAAQ,MAAM,UAAU,OAAO,KAAK;AACrE,WAAK,OAAO,MAAM,yCAAyC,OAAO,EAAE;AACpE,YAAM,IAAI;AAAA,QACR,gCAAgC,OAAO;AAAA,QACvC,EAAE,OAAO,MAAM;AAAA,MACjB;AAAA,IACF;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,MAAc,sBACZ,OACA,OACqD;AACrD,UAAM,SAAS,MAAM,UAAU,KAAK;AAAA,MAClC,QAAQ;AAAA,MACR,cAAc,KAAK,kBAAkB;AAAA,MACrC,YAAY,KAAK,gBAAgB,KAAK;AAAA,MACtC,cAAc;AAAA,MACd,eAAe,KAAK;AAAA,MACpB,YAAY,KAAK;AAAA,MACjB,aAAa,KAAK;AAAA,MAClB,aAAa,KAAK;AAAA,MAClB,WAAW;AAAA,MACX,OAAO;AAAA,IACT,CAAC;AAGD,QAAI,KAAK,YAAY;AACnB,WAAK,WAAW,MAAM,OAAO,KAAK;AAAA,IACpC;AAGA,WAAO,OAAO,OAAO,QAAQ,IAAI,CAAC,UAAU;AAAA,MAC1C,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK;AAAA,IAChB,EAAE;AAAA,EACJ;AAAA,EAEU,oBAA4B;AACpC,WAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwET;AAAA,EAEU,gBACR,OACQ;AACR,UAAM,cAAc,MACjB;AAAA,MACC,CAAC,SACC,IAAI,KAAK,KAAK,gBAAgB,KAAK,YAAY,mBAAmB,KAAK,QAAQ,QAAQ,SAAY,IAAI,KAAK,QAAQ,GAAG,MAAM,MAAM;AAAA,IACvI,EACC,KAAK,IAAI;AAEZ,WAAO;AAAA;AAAA,EAET,WAAW;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYX;AACF;AAKO,IAAM,yBAAN,cAAqC,MAAM;AAAA,EAChD,YAAY,SAAiB,SAAwB;AACnD,UAAM,SAAS,OAAO;AACtB,SAAK,OAAO;AAAA,EACd;AACF;;;AC3IO,IAAM,oBAAN,MAAwB;AAAA,EACZ;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACT,cAAc,IAAI,YAAY;AAAA,EAC9B;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA,cAAc;AAAA,EACL,kBAAkB,IAAI,wBAAwB;AAAA,EAE/D,YAAY,SAAmC;AAC7C,SAAK,SAAS,QAAQ;AACtB,SAAK,gBAAgB,QAAQ;AAC7B,SAAK,uBACH,QAAQ,wBAAwB,QAAQ;AAC1C,SAAK,oBAAoB,QAAQ,qBAAqB,QAAQ;AAC9D,SAAK,iBAAiB,QAAQ,kBAAkB,QAAQ;AACxD,SAAK,0BACH,QAAQ,2BAA2B,QAAQ;AAC7C,SAAK,qBACH,QAAQ,sBAAsB,QAAQ;AACxC,SAAK,uBAAuB,QAAQ;AACpC,SAAK,yBAAyB,QAAQ;AACtC,SAAK,4BAA4B,QAAQ;AACzC,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,sBAAsB,QAAQ,uBAAuB;AAC1D,SAAK,cAAc,QAAQ;AAAA,EAC7B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,eAAqB;AAC3B,QAAI,KAAK,aAAa,SAAS;AAC7B,YAAM,QAAQ,IAAI,MAAM,iCAAiC;AACzD,YAAM,OAAO;AACb,YAAM;AAAA,IACR;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBA,MAAM,QACJ,YACA,UACA,YACgC;AAChC,SAAK,OAAO,KAAK,qDAAqD;AACtE,SAAK,OAAO,KAAK,kCAAkC,QAAQ;AAG3D,SAAK,gBAAgB,MAAM;AAG3B,SAAK,aAAa;AAElB,SAAK,qBAAqB,YAAY,UAAU;AAEhD,UAAM,kBAAkB,KAAK,IAAI;AACjC,UAAM,WAAW,KAAK,wBAAwB,UAAU;AACxD,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,SAAK,OAAO;AAAA,MACV,2CAA2C,aAAa;AAAA,IAC1D;AAGA,SAAK,aAAa;AAElB,UAAM,qBAAqB,KAAK,IAAI;AACpC,UAAM,eAAe,MAAM,KAAK,gBAAgB,UAAU;AAC1D,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,SAAK,OAAO;AAAA,MACV,+CAA+C,aAAa;AAAA,IAC9D;AAGA,SAAK,aAAa;AAElB,UAAM,eAAe,KAAK,IAAI;AAC9B,UAAM,aAAa,MAAM,KAAK,uBAAuB,YAAY,QAAQ;AACzE,UAAM,UAAU,KAAK,IAAI,IAAI;AAC7B,SAAK,OAAO,KAAK,2CAA2C,OAAO,IAAI;AAGvE,SAAK,aAAa;AAElB,UAAM,qBAAqB,KAAK,IAAI;AACpC,UAAM,EAAE,QAAQ,QAAQ,UAAU,IAAI,MAAM,KAAK;AAAA,MAC/C;AAAA,MACA;AAAA,IACF;AACA,UAAM,gBAAgB,KAAK,IAAI,IAAI;AACnC,SAAK,OAAO;AAAA,MACV,gDAAgD,aAAa;AAAA,IAC/D;AAGA,SAAK,aAAa;AAElB,UAAM,oBAAoB,KAAK,IAAI;AACnC,UAAM,WAAW,MAAM,KAAK;AAAA,MAC1B;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,UAAM,eAAe,KAAK,IAAI,IAAI;AAClC,SAAK,OAAO;AAAA,MACV,+CAA+C,YAAY;AAAA,IAC7D;AAEA,UAAM,oBAAoB,KAAK,IAAI;AACnC,UAAM,eAAe,KAAK;AAAA,MACxB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,UAAM,eAAe,KAAK,IAAI,IAAI;AAClC,SAAK,OAAO;AAAA,MACV,8CAA8C,YAAY;AAAA,IAC5D;AAEA,SAAK,OAAO,KAAK,mDAAmD;AAEpE,WAAO;AAAA,MACL,UAAU;AAAA,MACV,OAAO,KAAK,gBAAgB,UAAU;AAAA,IACxC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,qBACN,YACA,YACM;AACN,SAAK,OAAO,KAAK,gDAAgD;AAEjE,SAAK,OAAO,KAAK,mCAAmC;AACpD,SAAK,cAAc,IAAI,YAAY,KAAK,QAAQ,UAAU;AAE1D,SAAK,OAAO,KAAK,uCAAuC;AACxD,SAAK,kBAAkB,IAAI;AAAA,MACzB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,KAAK;AAAA,MACL,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,MACL,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,iCAAiC;AAClD,SAAK,YAAY,IAAI,UAAU,KAAK,QAAQ,KAAK,WAAW;AAE5D,SAAK,OAAO,KAAK,oCAAoC;AACrD,SAAK,eAAe,IAAI;AAAA,MACtB,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,QACE,YAAY,KAAK;AAAA,MACnB;AAAA,MACA,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,2CAA2C;AAC5D,SAAK,sBAAsB,IAAI;AAAA,MAC7B,KAAK;AAAA,MACL,KAAK;AAAA,MACL,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,wCAAwC;AACzD,SAAK,mBAAmB,IAAI;AAAA,MAC1B,KAAK;AAAA,MACL,KAAK;AAAA,MACL,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,0CAA0C;AAC3D,SAAK,qBAAqB,IAAI;AAAA,MAC5B,KAAK;AAAA,MACL,KAAK;AAAA,MACL;AAAA,MACA,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,qCAAqC;AACtD,SAAK,gBAAgB,IAAI;AAAA,MACvB,KAAK;AAAA,MACL,KAAK;AAAA,MACL,EAAE,YAAY,KAAK,YAAY,aAAa,KAAK,YAAY;AAAA,MAC7D,KAAK,sBAAsB,KAAK,gBAAgB;AAAA,MAChD,KAAK;AAAA,IACP;AAEA,SAAK,OAAO,KAAK,wCAAwC;AACzD,SAAK,mBAAmB,IAAI,iBAAiB,KAAK,QAAQ,KAAK,WAAW;AAE1E,SAAK,OAAO,KAAK,gDAAgD;AAAA,EACnE;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,wBAAwB,YAAuC;AACrE,SAAK,OAAO,KAAK,wDAAwD;AAEzE,UAAM,QAAQ,WAAW,MAAM,IAAI,CAAC,SAAS,KAAK,IAAI;AACtD,UAAM,WAAW,KAAK,YAAY;AAAA,MAChC;AAAA,MACA,KAAK;AAAA,IACP;AAEA,SAAK,OAAO;AAAA,MACV,gCAAgC,SAAS,MAAM,eAAe,MAAM,MAAM;AAAA,IAC5E;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,gBACZ,YACoC;AACpC,SAAK,OAAO,KAAK,oDAAoD;AAErE,UAAM,SAAS,MAAM,KAAK,gBAAiB,MAAM,UAAU;AAE3D,UAAM,eAAe,OAAO;AAE5B,SAAK,OAAO;AAAA,MACV,+CAA+C,OAAO,KAAK,YAAY,EAAE,MAAM;AAAA,IACjF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,MAAc,iBACZ,YACA,YAKC;AACD,SAAK,OAAO;AAAA,MACV;AAAA,IACF;AAEA,UAAM,CAAC,QAAQ,MAAM,IAAI,MAAM,QAAQ,IAAI;AAAA,MACzC,KAAK,cAAc,YAAY,UAAU;AAAA,MACzC,KAAK,cAAc,UAAU;AAAA,IAC/B,CAAC;AAED,UAAM,YAAY,KAAK,iBAAiB,UAAU;AAElD,SAAK,OAAO;AAAA,MACV,iCAAiC,OAAO,MAAM,YAAY,OAAO,MAAM,gBAAgB,UAAU,MAAM;AAAA,IACzG;AAEA,WAAO,EAAE,QAAQ,QAAQ,UAAU;AAAA,EACrC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,iBAAiB,YAAkD;AACzE,UAAM,gBAAgB,WAAW,MAAM;AAAA,MACrC,CAAC,SAAS,KAAK,UAAU;AAAA,IAC3B;AACA,SAAK,OAAO;AAAA,MACV,kCAAkC,cAAc,MAAM;AAAA,IACxD;AAEA,UAAM,YAAiC,CAAC;AAExC,eAAW,QAAQ,eAAe;AAChC,UAAI,CAAC,KAAK,YAAY,YAAY,KAAK,IAAI,GAAG;AAC5C;AAAA,MACF;AAEA,YAAM,YAAY,KAAK,OAAO,CAAC,GAAG,WAAW;AAC7C,YAAM,aAAa,KAAK,YAAY,mBAAmB;AAEvD,gBAAU,KAAK;AAAA,QACb,IAAI;AAAA,QACJ,MAAM,KAAK,YAAY,UAAU,KAAK,IAAI;AAAA,QAC1C;AAAA,MACF,CAAC;AAAA,IACH;AAEA,SAAK,OAAO;AAAA,MACV,iCAAiC,UAAU,MAAM;AAAA,IACnD;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,0BACN,UACA,cACA,UACA,QACA,QACA,WACmB;AACnB,SAAK,OAAO,KAAK,qDAAqD;AAEtE,UAAM,eAAkC;AAAA,MACtC;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,SAAK,OAAO;AAAA,MACV,+CAA+C,SAAS,MAAM,cAAc,OAAO,MAAM,YAAY,OAAO,MAAM,YAAY,UAAU,MAAM;AAAA,IAChJ;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYA,MAAc,uBACZ,YACA,gBACqB;AACrB,SAAK,OAAO,KAAK,uCAAuC;AAExD,QAAI,WAA0B;AAG9B,QAAI;AACF,YAAM,UAAU,KAAK,UAAW,KAAK,UAAU;AAC/C,WAAK,OAAO;AAAA,QACV,6CAA6C,QAAQ,SAAS,IAAI,QAAQ,OAAO;AAAA,MACnF;AAGA,iBAAW,kBAAkB,QAAQ,QAAQ,UAAU,KAAK,WAAY;AACxE,WAAK,OAAO;AAAA,QACV,kDAAkD,SAAS,MAAM;AAAA,MACnE;AAGA,YAAM,aAAa,MAAM,KAAK,oBAAqB,SAAS,QAAQ;AACpE,UAAI,CAAC,KAAK,oBAAqB,QAAQ,UAAU,GAAG;AAClD,aAAK,OAAO;AAAA,UACV,8CAA8C,WAAW,MAAM;AAAA,QACjE;AACA,mBAAW;AAAA,MACb,OAAO;AACL,cAAM,gBACJ,KAAK,oBAAqB,iBAAiB,UAAU;AACvD,YAAI,eAAe;AACjB,cAAI,WAAW,gBAAgB,SAAS;AACtC,iBAAK,OAAO;AAAA,cACV,qEAAqE,cAAc,MAAM;AAAA,YAC3F;AAAA,UACF;AACA,qBAAW;AACX,eAAK,OAAO;AAAA,YACV,0DAA0D,WAAW,UAAU;AAAA,UACjF;AAAA,QACF,OAAO;AACL,qBAAW;AAAA,QACb;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,UAAI,iBAAiB,kBAAkB;AACrC,aAAK,OAAO;AAAA,UACV;AAAA,QACF;AAAA,MACF,OAAO;AACL,cAAM;AAAA,MACR;AAAA,IACF;AAGA,QAAI,CAAC,UAAU;AACb,WAAK,OAAO,KAAK,mDAAmD;AACpE,YAAMC,cAAa,OAAO,KAAK,WAAW,KAAK,EAAE;AACjD,iBAAW,MAAM,KAAK,mBAAoB,QAAQA,WAAU;AAE5D,UAAI,CAAC,UAAU;AACb,cAAM,SACJ;AACF,aAAK,OAAO;AAAA,UACV,8CAA8C,MAAM;AAAA,QACtD;AACA,cAAM,IAAI;AAAA,UACR,gDAAgD,MAAM;AAAA,QACxD;AAAA,MACF;AAEA,WAAK,OAAO;AAAA,QACV,sDAAsD,SAAS,MAAM;AAAA,MACvE;AAAA,IACF;AAGA,UAAM,aAAa,OAAO,KAAK,WAAW,KAAK,EAAE;AACjD,UAAM,YAAY,MAAM,KAAK,aAAc,QAAQ,UAAU;AAAA,MAC3D;AAAA,IACF,CAAC;AAGD,eAAW,SAAS,UAAU,QAAQ;AACpC,WAAK,gBAAgB,MAAM,KAAK;AAAA,IAClC;AAEA,QAAI,UAAU,QAAQ,WAAW,GAAG;AAClC,YAAM,SACJ;AACF,WAAK,OAAO,MAAM,8CAA8C,MAAM,EAAE;AACxE,YAAM,IAAI,iBAAiB,GAAG,MAAM,GAAG;AAAA,IACzC;AAEA,SAAK,OAAO;AAAA,MACV,iCAAiC,UAAU,QAAQ,MAAM;AAAA,IAC3D;AAEA,WAAO,UAAU;AAAA,EACnB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,MAAc,wBACZ,cACA,cAC+B;AAC/B,UAAM,kBAAwC,oBAAI,IAAI;AAGtD,UAAM,mBAID,CAAC;AAEN,aAAS,IAAI,GAAG,IAAI,aAAa,QAAQ,KAAK;AAC5C,YAAM,OAAO,aAAa,CAAC;AAC3B,UAAI,SAAS,QAAW;AACtB,yBAAiB,KAAK;AAAA,UACpB,eAAe;AAAA,UACf,eAAe,iBAAiB;AAAA,UAChC;AAAA,QACF,CAAC;AAAA,MACH;AAAA,IACF;AAEA,UAAM,oBAAoB,iBAAiB,IAAI,CAAC,SAAS,KAAK,IAAI;AAGlE,UAAM,iBACJ,kBAAkB,SAAS,IACvB,MAAM,KAAK,cAAe;AAAA,MACxB;AAAA,MACA,KAAK;AAAA,IACP,IACA,CAAC;AAGP,QAAI,wBAAwB;AAC5B,QAAI,sBAAsB;AAE1B,QAAI,eAAe,WAAW,iBAAiB,QAAQ;AACrD,WAAK,OAAO;AAAA,QACV,2DAA2D,YAAY,cACzD,iBAAiB,MAAM,SAAS,eAAe,MAAM;AAAA,MAErE;AAGA,YAAM,YAAY,oBAAI,IAAqB;AAC3C,iBAAW,UAAU,gBAAgB;AACnC,kBAAU,IAAI,OAAO,UAAU,MAAM;AAAA,MACvC;AAGA,YAAM,gBAAyC,CAAC;AAChD,iBAAW,QAAQ,kBAAkB;AACnC,YAAI,UAAU,IAAI,KAAK,IAAI,GAAG;AAC5B,wBAAc,KAAK,IAAI;AAAA,QACzB,OAAO;AACL,eAAK,OAAO;AAAA,YACV,gCAAgC,YAAY,qBAAqB,KAAK,aAAa,MAAM,KAAK,IAAI;AAAA,UACpG;AAAA,QACF;AAAA,MACF;AAIA,YAAM,oBAA+B,CAAC;AACtC,iBAAW,QAAQ,eAAe;AAChC,cAAM,UAAU,UAAU,IAAI,KAAK,IAAI;AACvC,YAAI,SAAS;AACX,4BAAkB,KAAK,OAAO;AAAA,QAChC;AAAA,MACF;AAIA,UAAI,kBAAkB,WAAW,cAAc,QAAQ;AACrD,cAAM,IAAI;AAAA,UACR,yEACe,kBAAkB,MAAM,iBAAiB,cAAc,MAAM;AAAA,QAC9E;AAAA,MACF;AAGA,8BAAwB;AACxB,4BAAsB;AAEtB,WAAK,OAAO;AAAA,QACV,8CAA8C,oBAAoB,MAAM,IAAI,YAAY;AAAA,MAC1F;AAAA,IACF;AAGA,aAAS,IAAI,GAAG,IAAI,oBAAoB,QAAQ,KAAK;AACnD,YAAM,gBAAgB,sBAAsB,CAAC,EAAE;AAC/C,sBAAgB,IAAI,eAAe,oBAAoB,CAAC,CAAC;AAAA,IAC3D;AAGA,QAAI,oBAAoB,SAAS,GAAG;AAClC,YAAM,yBAAyB,sBAAsB;AAAA,QACnD,CAAC,SAAS,KAAK;AAAA,MACjB;AACA,YAAM,oBAAoB,MAAM,KAAK,iBAAkB;AAAA,QACrD;AAAA,QACA;AAAA,QACA,KAAK;AAAA,MACP;AAGA,YAAM,gBAAgB,kBACnB,IAAI,CAAC,SAAS,UAAW,UAAU,KAAK,KAAM,EAC9C,OAAO,CAAC,UAAU,UAAU,EAAE;AAEjC,UAAI,cAAc,SAAS,GAAG;AAC5B,mBAAW,iBAAiB,eAAe;AACzC,gBAAM,cAAc,sBAAsB,aAAa;AACvD,gBAAM,eAAe,YAAY;AACjC,gBAAM,YAAY,oBAAoB,aAAa,EAAE;AACrD,gBAAM,gBAAgB,YAAY;AAClC,eAAK,OAAO;AAAA,YACV,+BAA+B,YAAY,aAAa,aAAa,OAAO,YAAY,mBAAmB,SAAS;AAAA,UACtH;AAAA,QACF;AAGA,YAAI,KAAK,qBAAqB;AAC5B,eAAK,OAAO;AAAA,YACV,iCAAiC,cAAc,MAAM,WAAW,YAAY;AAAA,UAC9E;AAGA,gBAAM,qBAAqB,cAAc;AAAA,YACvC,CAAC,kBAAkB,sBAAsB,aAAa,EAAE;AAAA,UAC1D;AAGA,gBAAM,wBAAwB,IAAI;AAAA,YAChC,KAAK;AAAA,YACL,KAAK;AAAA,YACL;AAAA,cACE,YAAY,KAAK;AAAA,cACjB,eAAe;AAAA,cACf,aAAa,KAAK;AAAA,YACpB;AAAA,YACA;AAAA;AAAA,YACA,KAAK;AAAA,UACP;AAGA,gBAAM,mBAAmB,MAAM,sBAAsB;AAAA,YACnD;AAAA,YACA;AAAA;AAAA,UACF;AAGA,mBAAS,IAAI,GAAG,IAAI,cAAc,QAAQ,KAAK;AAC7C,kBAAM,gBAAgB,cAAc,CAAC;AACrC,kBAAM,gBACJ,sBAAsB,aAAa,EAAE;AACvC,4BAAgB,IAAI,eAAe,iBAAiB,CAAC,CAAC;AAAA,UACxD;AAEA,eAAK,OAAO;AAAA,YACV,gCAAgC,iBAAiB,MAAM,IAAI,YAAY;AAAA,UACzE;AAAA,QACF,OAAO;AACL,eAAK,OAAO;AAAA,YACV,uBAAuB,cAAc,MAAM,IAAI,YAAY;AAAA,UAC7D;AAAA,QACF;AAAA,MACF;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOQ,mBACN,UACoB;AACpB,QAAI,CAAC,WAAW,CAAC,GAAG;AAClB,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,SAAS,CAAC;AAC7B,QAAI,OAAO,eAAe,UAAU;AAClC,aAAO;AAAA,IACT;AAEA,QAAI,KAAK,eAAe,UAAU,YAAY;AAC5C,YAAM,WAAW,KAAK,YAAY,YAAY,WAAW,IAAI;AAC7D,aAAO,UAAU;AAAA,IACnB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,cACZ,YACA,YAC2B;AAC3B,SAAK,OAAO;AAAA,MACV,kCAAkC,WAAW,SAAS,MAAM;AAAA,IAC9D;AAEA,UAAM,SAA2B,CAAC;AAClC,UAAM,eAA0C,CAAC;AAGjD,eAAW,WAAW,WAAW,UAAU;AACzC,YAAM,YAAY,QAAQ,OAAO,CAAC,GAAG,WAAW;AAChD,YAAM,UACJ,KAAK,aAAa,gBAAgB,KAAK,OAAO,OAAO,SAAS,CAAC;AAEjE,YAAM,cAAc,KAAK,mBAAmB,QAAQ,QAAQ;AAC5D,mBAAa,KAAK,WAAW;AAE7B,aAAO,KAAK;AAAA,QACV,IAAI;AAAA,QACJ,MAAM,GAAG,UAAU,iBAAiB,OAAO,MAAM;AAAA,QACjD;AAAA;AAAA,MAEF,CAAC;AAAA,IACH;AAGA,UAAM,kBAAkB,MAAM,KAAK;AAAA,MACjC;AAAA,MACA;AAAA,IACF;AAGA,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,UAAI,gBAAgB,IAAI,CAAC,GAAG;AAC1B,eAAO,CAAC,EAAE,UAAU,gBAAgB,IAAI,CAAC;AAAA,MAC3C;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,MAAc,cACZ,YAC2B;AAC3B,SAAK,OAAO;AAAA,MACV,kCAAkC,WAAW,OAAO,MAAM;AAAA,IAC5D;AAEA,UAAM,SAA2B,CAAC;AAClC,UAAM,eAA0C,CAAC;AAGjD,eAAW,SAAS,WAAW,QAAQ;AACrC,YAAM,YAAY,MAAM,OAAO,CAAC,GAAG,WAAW;AAC9C,YAAM,UACJ,KAAK,aAAa,gBAAgB,KAAK,OAAO,OAAO,SAAS,CAAC;AAGjE,YAAM,OAA+B,MAAM,KAAK,KAAK;AAAA,QAAI,CAAC,QACxD,IAAI,IAAI,CAAC,UAAU;AAAA,UACjB,MAAM,KAAK;AAAA,UACX,SAAS,KAAK,YAAY;AAAA,UAC1B,SAAS,KAAK,YAAY;AAAA,UAC1B,UAAU,KAAK,iBAAiB,KAAK,cAAc;AAAA,QACrD,EAAE;AAAA,MACJ;AAEA,YAAM,cAAc,KAAK,mBAAmB,MAAM,QAAQ;AAC1D,mBAAa,KAAK,WAAW;AAE7B,aAAO,KAAK;AAAA,QACV,IAAI;AAAA,QACJ;AAAA,QACA,SAAS,KAAK;AAAA,QACd,SAAS,KAAK,CAAC,GAAG,UAAU;AAAA,QAC5B;AAAA;AAAA,MAEF,CAAC;AAAA,IACH;AAGA,UAAM,kBAAkB,MAAM,KAAK;AAAA,MACjC;AAAA,MACA;AAAA,IACF;AAGA,aAAS,IAAI,GAAG,IAAI,OAAO,QAAQ,KAAK;AACtC,UAAI,gBAAgB,IAAI,CAAC,GAAG;AAC1B,eAAO,CAAC,EAAE,UAAU,gBAAgB,IAAI,CAAC;AAAA,MAC3C;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQA,MAAc,gBACZ,YACA,YACA,cACA,QACA,QACA,WACoB;AACpB,SAAK,OAAO,KAAK,4CAA4C;AAG7D,QAAI,WAAW,WAAW,GAAG;AAC3B,YAAM,SAAS;AACf,WAAK,OAAO,MAAM,uBAAuB,MAAM,EAAE;AACjD,YAAM,IAAI,iBAAiB,MAAM;AAAA,IACnC;AAGA,UAAM,WAAW,KAAK,iBAAkB;AAAA,MACtC;AAAA,MACA,WAAW;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAEA,SAAK,OAAO;AAAA,MACV,iCAAiC,SAAS,MAAM;AAAA,IAClD;AAEA,WAAO;AAAA,EACT;AACF;","names":["path","fs","path","import_zod","import_zod","results","fs","path","import_zod","PagePattern","import_zod","import_zod","totalPages"]}
|