@semiont/inference 0.2.28-build.40
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +243 -0
- package/dist/index.d.ts +205 -0
- package/dist/index.js +850 -0
- package/dist/index.js.map +1 -0
- package/package.json +53 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/factory.ts","../src/entity-extractor.ts","../src/motivation-prompts.ts","../src/motivation-parsers.ts"],"sourcesContent":["import Anthropic from '@anthropic-ai/sdk';\nimport { getLocaleEnglishName } from '@semiont/api-client';\nimport type { GenerationContext } from '@semiont/api-client';\nimport type { EnvironmentConfig } from '@semiont/core';\n\nfunction getLanguageName(locale: string): string {\n return getLocaleEnglishName(locale) || locale;\n}\n\n// Singleton instance\nlet inferenceClient: Anthropic | null = null;\n\n/**\n * Get or create the inference client\n * Following the singleton pattern from graph factory\n */\nexport async function getInferenceClient(config: EnvironmentConfig): Promise<Anthropic> {\n if (inferenceClient) {\n return inferenceClient;\n }\n\n const inferenceConfig = config.services.inference;\n if (!inferenceConfig) {\n throw new Error('services.inference is required in environment config');\n }\n\n // Expand environment variables in apiKey\n let apiKey = inferenceConfig.apiKey;\n if (apiKey?.startsWith('${') && apiKey.endsWith('}')) {\n const envVarName = apiKey.slice(2, -1);\n const envValue = process.env[envVarName];\n if (!envValue) {\n throw new Error(`Environment variable ${envVarName} is not set`);\n }\n apiKey = envValue;\n }\n\n console.log('Inference config loaded:', {\n type: inferenceConfig.type,\n model: inferenceConfig.model,\n endpoint: inferenceConfig.endpoint,\n hasApiKey: !!apiKey\n });\n\n inferenceClient = new Anthropic({\n apiKey: apiKey,\n baseURL: inferenceConfig.endpoint || inferenceConfig.baseURL || 'https://api.anthropic.com',\n });\n\n console.log(`Initialized ${inferenceConfig.type} inference client with model ${inferenceConfig.model}`);\n return inferenceClient;\n}\n\n/**\n * Get the configured model name\n */\nexport function getInferenceModel(config: EnvironmentConfig): string {\n const inferenceConfig = config.services.inference;\n if (!inferenceConfig?.model) {\n throw new Error('Inference model not configured! Set it in your environment configuration.');\n }\n return inferenceConfig.model;\n}\n\n/**\n * Helper function to make a simple inference call\n */\nexport async function generateText(\n prompt: string,\n config: EnvironmentConfig,\n maxTokens: number = 500,\n temperature: number = 0.7\n): Promise<string> {\n console.log('generateText called with prompt length:', prompt.length, 'maxTokens:', maxTokens, 'temp:', temperature);\n\n const client = await getInferenceClient(config);\n\n const response = await client.messages.create({\n model: getInferenceModel(config),\n max_tokens: maxTokens,\n temperature,\n messages: [\n {\n role: 'user',\n content: prompt\n }\n ]\n });\n\n console.log('Inference response received, content blocks:', response.content.length);\n\n const textContent = response.content.find(c => c.type === 'text');\n\n if (!textContent || textContent.type !== 'text') {\n console.error('No text content in response:', response.content);\n throw new Error('No text content in inference response');\n }\n\n console.log('Returning text content of length:', textContent.text.length);\n return textContent.text;\n\n}\n\n/**\n * Generate resource content using inference\n */\nexport async function generateResourceFromTopic(\n topic: string,\n entityTypes: string[],\n config: EnvironmentConfig,\n userPrompt?: string,\n locale?: string,\n context?: GenerationContext,\n temperature?: number,\n maxTokens?: number\n): Promise<{ title: string; content: string }> {\n console.log('generateResourceFromTopic called with:', {\n topic: topic.substring(0, 100),\n entityTypes,\n hasUserPrompt: !!userPrompt,\n locale,\n hasContext: !!context,\n temperature,\n maxTokens\n });\n\n const inferenceConfig = config.services.inference;\n const provider = inferenceConfig?.type || 'anthropic';\n console.log('Using provider:', provider, 'with model:', inferenceConfig?.model);\n\n // Use provided values or defaults\n const finalTemperature = temperature ?? 0.7;\n const finalMaxTokens = maxTokens ?? 500;\n\n // Determine language instruction\n const languageInstruction = locale && locale !== 'en'\n ? `\\n\\nIMPORTANT: Write the entire resource in ${getLanguageName(locale)}.`\n : '';\n\n // Build context section if available\n let contextSection = '';\n if (context?.sourceContext) {\n const { before, selected, after } = context.sourceContext;\n contextSection = `\\n\\nSource document context:\n---\n${before ? `...${before}` : ''}\n**[${selected}]**\n${after ? `${after}...` : ''}\n---\n`;\n }\n\n // Simple, direct prompt - just ask for markdown content\n const prompt = `Generate a concise, informative resource about \"${topic}\".\n${entityTypes.length > 0 ? `Focus on these entity types: ${entityTypes.join(', ')}.` : ''}\n${userPrompt ? `Additional context: ${userPrompt}` : ''}${contextSection}${languageInstruction}\n\nRequirements:\n- Start with a clear heading (# Title)\n- Write 2-3 paragraphs of substantive content\n- Be factual and informative\n- Use markdown formatting\n- Return ONLY the markdown content, no JSON, no code fences, no additional wrapper`;\n\n // Simple parser - just use the response directly as markdown\n const parseResponse = (response: string): { title: string; content: string } => {\n // Clean up any markdown code fences if present\n let content = response.trim();\n if (content.startsWith('```markdown') || content.startsWith('```md')) {\n content = content.slice(content.indexOf('\\n') + 1);\n const endIndex = content.lastIndexOf('```');\n if (endIndex !== -1) {\n content = content.slice(0, endIndex);\n }\n } else if (content.startsWith('```')) {\n content = content.slice(3);\n const endIndex = content.lastIndexOf('```');\n if (endIndex !== -1) {\n content = content.slice(0, endIndex);\n }\n }\n\n content = content.trim();\n\n // Title is provided by the caller (topic), not extracted from generated content\n // This matches how it's actually used in generation-worker.ts line 87\n return {\n title: topic,\n content: content\n };\n };\n\n console.log('Sending prompt to inference (length:', prompt.length, 'chars)', 'temp:', finalTemperature, 'maxTokens:', finalMaxTokens);\n const response = await generateText(prompt, config, finalMaxTokens, finalTemperature);\n console.log('Got raw response (length:', response.length, 'chars)');\n\n const result = parseResponse(response);\n console.log('Parsed result:', {\n hasTitle: !!result.title,\n titleLength: result.title?.length,\n hasContent: !!result.content,\n contentLength: result.content?.length\n });\n\n return result;\n}\n\n/**\n * Generate an intelligent summary for a resource\n */\nexport async function generateResourceSummary(\n resourceName: string,\n content: string,\n entityTypes: string[],\n config: EnvironmentConfig\n): Promise<string> {\n // Truncate content if too long\n const truncatedContent = content.length > 2000\n ? content.substring(0, 2000) + '...'\n : content;\n\n const prompt = `Create a brief, intelligent summary of this resource titled \"${resourceName}\".\n${entityTypes.length > 0 ? `Key entity types: ${entityTypes.join(', ')}` : ''}\n\nResource content:\n${truncatedContent}\n\nWrite a 2-3 sentence summary that captures the key points and would help someone understand what this resource contains.`;\n\n return await generateText(prompt, config, 150, 0.5);\n}\n\n/**\n * Generate smart suggestions for a reference\n */\nexport async function generateReferenceSuggestions(\n referenceTitle: string,\n config: EnvironmentConfig,\n entityType?: string,\n currentContent?: string\n): Promise<string[] | null> {\n const prompt = `For a reference titled \"${referenceTitle}\"${entityType ? ` (type: ${entityType})` : ''}${currentContent ? ` with current stub: \"${currentContent}\"` : ''}, suggest 3 specific, actionable next steps or related topics to explore.\n\nFormat as a simple list, one suggestion per line.`;\n\n const response = await generateText(prompt, config, 200, 0.8);\n if (!response) {\n return null;\n }\n\n // Parse into array of suggestions\n return response\n .split('\\n')\n .map(line => line.replace(/^[-*•]\\s*/, '').trim())\n .filter(line => line.length > 0)\n .slice(0, 3);\n}","import { getInferenceClient, getInferenceModel } from './factory';\nimport type { EnvironmentConfig } from '@semiont/core';\n\n/**\n * Entity reference extracted from text\n */\nexport interface ExtractedEntity {\n exact: string; // The actual text span\n entityType: string; // The detected entity type\n startOffset: number; // Character offset where entity starts\n endOffset: number; // Character offset where entity ends\n prefix?: string; // Text immediately before entity (for disambiguation)\n suffix?: string; // Text immediately after entity (for disambiguation)\n}\n\n/**\n * Extract entity references from text using AI\n *\n * @param text - The text to analyze\n * @param entityTypes - Array of entity types to detect (optionally with examples)\n * @param config - Application configuration\n * @param includeDescriptiveReferences - Include anaphoric/cataphoric references (default: false)\n * @returns Array of extracted entities with their character offsets\n */\nexport async function extractEntities(\n exact: string,\n entityTypes: string[] | { type: string; examples?: string[] }[],\n config: EnvironmentConfig,\n includeDescriptiveReferences: boolean = false\n): Promise<ExtractedEntity[]> {\n console.log('extractEntities called with:', {\n textLength: exact.length,\n entityTypes: Array.isArray(entityTypes) ? entityTypes.map(et => typeof et === 'string' ? et : et.type) : []\n });\n\n const client = await getInferenceClient(config);\n\n // Format entity types for the prompt\n const entityTypesDescription = entityTypes.map(et => {\n if (typeof et === 'string') {\n return et;\n }\n return et.examples && et.examples.length > 0\n ? `${et.type} (examples: ${et.examples.slice(0, 3).join(', ')})`\n : et.type;\n }).join(', ');\n\n // Build prompt with optional support for anaphoric/cataphoric references\n // Anaphora: references that point backward (e.g., \"John arrived. He was tired.\")\n // Cataphora: references that point forward (e.g., \"When she arrived, Mary was surprised.\")\n // When enabled, include substantive descriptive references beyond simple pronouns\n const descriptiveReferenceGuidance = includeDescriptiveReferences\n ? `\nInclude both:\n- Direct mentions (names, proper nouns)\n- Descriptive references (substantive phrases that refer to entities)\n\nFor descriptive references, include:\n- Definite descriptions: \"the Nobel laureate\", \"the tech giant\", \"the former president\"\n- Role-based references: \"the CEO\", \"the physicist\", \"the author\", \"the owner\", \"the contractor\"\n- Epithets with context: \"the Cupertino-based company\", \"the iPhone maker\"\n- References to entities even when identity is unknown or unspecified\n\nDo NOT include:\n- Simple pronouns alone: he, she, it, they, him, her, them\n- Generic determiners alone: this, that, these, those\n- Possessives without substance: his, her, their, its\n\nExamples:\n- For \"Marie Curie\", include \"the Nobel laureate\" and \"the physicist\" but NOT \"she\"\n- For an unknown person, include \"the owner\" or \"the contractor\" (role-based references count even when identity is unspecified)\n`\n : `\nFind direct mentions only (names, proper nouns). Do not include pronouns or descriptive references.\n`;\n\n const prompt = `Identify entity references in the following text. Look for mentions of: ${entityTypesDescription}.\n${descriptiveReferenceGuidance}\nText to analyze:\n\"\"\"\n${exact}\n\"\"\"\n\nReturn ONLY a JSON array of entities found. Each entity should have:\n- exact: the exact text span from the input\n- entityType: one of the provided entity types\n- startOffset: character position where the entity starts (0-indexed)\n- endOffset: character position where the entity ends\n- prefix: up to 32 characters of text immediately before the entity (helps identify correct occurrence)\n- suffix: up to 32 characters of text immediately after the entity (helps identify correct occurrence)\n\nReturn empty array [] if no entities found.\nDo not include markdown formatting or code fences, just the raw JSON array.\n\nExample output:\n[{\"exact\":\"Alice\",\"entityType\":\"Person\",\"startOffset\":0,\"endOffset\":5,\"prefix\":\"\",\"suffix\":\" went to\"},{\"exact\":\"Paris\",\"entityType\":\"Location\",\"startOffset\":20,\"endOffset\":25,\"prefix\":\"went to \",\"suffix\":\" yesterday\"}]`;\n\n console.log('Sending entity extraction request to model:', getInferenceModel(config));\n const response = await client.messages.create({\n model: getInferenceModel(config),\n max_tokens: 4000, // Increased to handle many entities without truncation\n temperature: 0.3, // Lower temperature for more consistent extraction\n messages: [\n {\n role: 'user',\n content: prompt\n }\n ]\n });\n console.log('Got entity extraction response');\n\n const textContent = response.content.find(c => c.type === 'text');\n if (!textContent || textContent.type !== 'text') {\n console.warn('No text content in entity extraction response');\n return [];\n }\n\n console.log('Entity extraction raw response length:', textContent.text.length);\n\n try {\n // Clean up response if wrapped in markdown\n let jsonStr = textContent.text.trim();\n if (jsonStr.startsWith('```')) {\n jsonStr = jsonStr.replace(/^```(?:json)?\\n?/, '').replace(/\\n?```$/, '');\n }\n\n const entities = JSON.parse(jsonStr);\n console.log('Parsed', entities.length, 'entities from response');\n\n // Check if response was truncated - this is an ERROR condition\n if (response.stop_reason === 'max_tokens') {\n const errorMsg = `AI response truncated: Found ${entities.length} entities but response hit max_tokens limit. Increase max_tokens or reduce resource size.`;\n console.error(`❌ ${errorMsg}`);\n throw new Error(errorMsg);\n }\n\n // Validate and fix offsets\n return entities.map((entity: any, idx: number) => {\n let startOffset = entity.startOffset;\n let endOffset = entity.endOffset;\n\n console.log(`\\n[Entity ${idx + 1}/${entities.length}]`);\n console.log(` Type: ${entity.entityType}`);\n console.log(` Text: \"${entity.exact}\"`);\n console.log(` Offsets from AI: [${startOffset}, ${endOffset}]`);\n\n // Verify the offsets are correct by checking if the text matches\n const extractedText = exact.substring(startOffset, endOffset);\n\n // If the extracted text doesn't match, find the correct position using context\n if (extractedText !== entity.exact) {\n console.log(` ⚠️ Offset mismatch!`);\n console.log(` Expected: \"${entity.exact}\"`);\n console.log(` Found at AI offsets [${startOffset}:${endOffset}]: \"${extractedText}\"`);\n\n // Show context around the AI-provided offset\n const contextStart = Math.max(0, startOffset - 50);\n const contextEnd = Math.min(exact.length, endOffset + 50);\n const contextBefore = exact.substring(contextStart, startOffset);\n const contextAfter = exact.substring(endOffset, contextEnd);\n console.log(` Context: \"...${contextBefore}[${extractedText}]${contextAfter}...\"`);\n\n console.log(` Searching for exact match in resource...`);\n\n // Try to find using prefix/suffix context if provided\n let found = false;\n if (entity.prefix || entity.suffix) {\n console.log(` Using LLM-provided context for disambiguation:`);\n if (entity.prefix) console.log(` Prefix: \"${entity.prefix}\"`);\n if (entity.suffix) console.log(` Suffix: \"${entity.suffix}\"`);\n\n // Search for all occurrences and find the one with matching context\n let searchPos = 0;\n while ((searchPos = exact.indexOf(entity.exact, searchPos)) !== -1) {\n const candidatePrefix = exact.substring(Math.max(0, searchPos - 32), searchPos);\n const candidateSuffix = exact.substring(\n searchPos + entity.exact.length,\n Math.min(exact.length, searchPos + entity.exact.length + 32)\n );\n\n // Check if context matches (allowing for partial matches at boundaries)\n const prefixMatch = !entity.prefix || candidatePrefix.endsWith(entity.prefix);\n const suffixMatch = !entity.suffix || candidateSuffix.startsWith(entity.suffix);\n\n if (prefixMatch && suffixMatch) {\n console.log(` ✅ Found match using context at offset ${searchPos} (diff: ${searchPos - startOffset})`);\n console.log(` Candidate prefix: \"${candidatePrefix}\"`);\n console.log(` Candidate suffix: \"${candidateSuffix}\"`);\n startOffset = searchPos;\n endOffset = searchPos + entity.exact.length;\n found = true;\n break;\n }\n\n searchPos++;\n }\n\n if (!found) {\n console.log(` ⚠️ No occurrence found with matching context`);\n }\n }\n\n // Fallback to first occurrence if context didn't help\n if (!found) {\n const index = exact.indexOf(entity.exact);\n if (index !== -1) {\n console.log(` ⚠️ Using first occurrence at offset ${index} (diff: ${index - startOffset})`);\n startOffset = index;\n endOffset = index + entity.exact.length;\n } else {\n console.log(` ❌ Cannot find \"${entity.exact}\" anywhere in resource`);\n console.log(` Resource starts with: \"${exact.substring(0, 200)}...\"`);\n // If we still can't find it, skip this entity\n return null;\n }\n }\n } else {\n console.log(` ✅ Offsets correct`);\n }\n\n return {\n exact: entity.exact,\n entityType: entity.entityType,\n startOffset: startOffset,\n endOffset: endOffset,\n prefix: entity.prefix,\n suffix: entity.suffix\n };\n }).filter((entity: ExtractedEntity | null): entity is ExtractedEntity => {\n // Filter out nulls and ensure we have valid offsets\n if (entity === null) {\n console.log('❌ Filtered entity: null');\n return false;\n }\n if (entity.startOffset === undefined || entity.endOffset === undefined) {\n console.log(`❌ Filtered entity \"${entity.exact}\": missing offsets`);\n return false;\n }\n if (entity.startOffset < 0) {\n console.log(`❌ Filtered entity \"${entity.exact}\": negative startOffset (${entity.startOffset})`);\n return false;\n }\n if (entity.endOffset > exact.length) {\n console.log(`❌ Filtered entity \"${entity.exact}\": endOffset (${entity.endOffset}) > text length (${exact.length})`);\n return false;\n }\n\n // Verify the text at the offsets matches\n const extractedText = exact.substring(entity.startOffset, entity.endOffset);\n if (extractedText !== entity.exact) {\n console.log(`❌ Filtered entity \"${entity.exact}\": offset mismatch`);\n console.log(` Expected: \"${entity.exact}\"`);\n console.log(` Got at [${entity.startOffset}:${entity.endOffset}]: \"${extractedText}\"`);\n return false;\n }\n\n console.log(`✅ Accepted entity \"${entity.exact}\" at [${entity.startOffset}:${entity.endOffset}]`);\n return true;\n });\n } catch (error) {\n console.error('Failed to parse entity extraction response:', error);\n return [];\n }\n}","/**\n * Prompt builders for annotation detection motivations\n *\n * Provides static methods to build AI prompts for each Web Annotation motivation type.\n * Extracted from worker implementations to centralize prompt logic.\n */\n\nexport class MotivationPrompts {\n /**\n * Build a prompt for detecting comment-worthy passages\n *\n * @param content - The text content to analyze (will be truncated to 8000 chars)\n * @param instructions - Optional user-provided instructions\n * @param tone - Optional tone guidance (e.g., \"academic\", \"conversational\")\n * @param density - Optional target number of comments per 2000 words\n * @returns Formatted prompt string\n */\n static buildCommentPrompt(\n content: string,\n instructions?: string,\n tone?: string,\n density?: number\n ): string {\n let prompt: string;\n\n if (instructions) {\n // User provided specific instructions - minimal prompt, let instructions drive behavior\n const toneGuidance = tone ? ` Use a ${tone} tone.` : '';\n const densityGuidance = density\n ? `\\n\\nAim for approximately ${density} comments per 2000 words of text.`\n : ''; // Let user instructions determine density\n\n prompt = `Add comments to passages in this text following these instructions:\n\n${instructions}${toneGuidance}${densityGuidance}\n\nText to analyze:\n---\n${content.substring(0, 8000)}\n---\n\nReturn a JSON array of comments. Each comment must have:\n- \"exact\": the exact text passage being commented on (quoted verbatim from source)\n- \"start\": character offset where the passage starts\n- \"end\": character offset where the passage ends\n- \"prefix\": up to 32 characters of text immediately before the passage\n- \"suffix\": up to 32 characters of text immediately after the passage\n- \"comment\": your comment following the instructions above\n\nReturn ONLY a valid JSON array, no additional text or explanation.\n\nExample:\n[\n {\"exact\": \"the quarterly review meeting\", \"start\": 142, \"end\": 169, \"prefix\": \"We need to schedule \", \"suffix\": \" for next month.\", \"comment\": \"Who will lead this? Should we invite the external auditors?\"}\n]`;\n } else {\n // No specific instructions - fall back to explanatory/educational mode\n const toneGuidance = tone\n ? `\\n\\nTone: Use a ${tone} style in your comments.`\n : '';\n const densityGuidance = density\n ? `\\n- Aim for approximately ${density} comments per 2000 words`\n : `\\n- Aim for 3-8 comments per 2000 words (not too sparse or dense)`;\n\n prompt = `Identify passages in this text that would benefit from explanatory comments.\nFor each passage, provide contextual information, clarification, or background.${toneGuidance}\n\nGuidelines:\n- Select passages that reference technical terms, historical figures, complex concepts, or unclear references\n- Provide comments that ADD VALUE beyond restating the text\n- Focus on explanation, background, or connections to other ideas\n- Avoid obvious or trivial comments\n- Keep comments concise (1-3 sentences typically)${densityGuidance}\n\nText to analyze:\n---\n${content.substring(0, 8000)}\n---\n\nReturn a JSON array of comments. Each comment should have:\n- \"exact\": the exact text passage being commented on (quoted verbatim from source)\n- \"start\": character offset where the passage starts\n- \"end\": character offset where the passage ends\n- \"prefix\": up to 32 characters of text immediately before the passage\n- \"suffix\": up to 32 characters of text immediately after the passage\n- \"comment\": your explanatory comment (1-3 sentences, provide context/background/clarification)\n\nReturn ONLY a valid JSON array, no additional text or explanation.\n\nExample format:\n[\n {\"exact\": \"Ouranos\", \"start\": 52, \"end\": 59, \"prefix\": \"In the beginning, \", \"suffix\": \" ruled the universe\", \"comment\": \"Ouranos (also spelled Uranus) is the primordial Greek deity personifying the sky. In Hesiod's Theogony, he is the son and husband of Gaia (Earth) and father of the Titans.\"}\n]`;\n }\n\n return prompt;\n }\n\n /**\n * Build a prompt for detecting highlight-worthy passages\n *\n * @param content - The text content to analyze (will be truncated to 8000 chars)\n * @param instructions - Optional user-provided instructions\n * @param density - Optional target number of highlights per 2000 words\n * @returns Formatted prompt string\n */\n static buildHighlightPrompt(\n content: string,\n instructions?: string,\n density?: number\n ): string {\n let prompt: string;\n\n if (instructions) {\n // User provided specific instructions - minimal prompt, let instructions drive behavior\n const densityGuidance = density\n ? `\\n\\nAim for approximately ${density} highlights per 2000 words of text.`\n : ''; // Let user instructions determine density\n\n prompt = `Identify passages in this text to highlight following these instructions:\n\n${instructions}${densityGuidance}\n\nText to analyze:\n---\n${content.substring(0, 8000)}\n---\n\nReturn a JSON array of highlights. Each highlight must have:\n- \"exact\": the exact text passage to highlight (quoted verbatim from source)\n- \"start\": character offset where the passage starts\n- \"end\": character offset where the passage ends\n- \"prefix\": up to 32 characters of text immediately before the passage\n- \"suffix\": up to 32 characters of text immediately after the passage\n\nReturn ONLY a valid JSON array, no additional text or explanation.\n\nExample:\n[\n {\"exact\": \"revenue grew 45% year-over-year\", \"start\": 142, \"end\": 174, \"prefix\": \"In Q3 2024, \", \"suffix\": \", exceeding all forecasts.\"}\n]`;\n } else {\n // No specific instructions - fall back to importance/salience mode\n const densityGuidance = density\n ? `\\n- Aim for approximately ${density} highlights per 2000 words`\n : `\\n- Aim for 3-8 highlights per 2000 words (be selective)`;\n\n prompt = `Identify passages in this text that merit highlighting for their importance or salience.\nFocus on content that readers should notice and remember.\n\nGuidelines:\n- Highlight key claims, findings, or conclusions\n- Highlight important definitions, terminology, or concepts\n- Highlight notable quotes or particularly striking statements\n- Highlight critical decisions, action items, or turning points\n- Select passages that are SIGNIFICANT, not just interesting\n- Avoid trivial or obvious content${densityGuidance}\n\nText to analyze:\n---\n${content.substring(0, 8000)}\n---\n\nReturn a JSON array of highlights. Each highlight should have:\n- \"exact\": the exact text passage to highlight (quoted verbatim from source)\n- \"start\": character offset where the passage starts\n- \"end\": character offset where the passage ends\n- \"prefix\": up to 32 characters of text immediately before the passage\n- \"suffix\": up to 32 characters of text immediately after the passage\n\nReturn ONLY a valid JSON array, no additional text or explanation.\n\nExample format:\n[\n {\"exact\": \"we will discontinue support for legacy systems by March 2025\", \"start\": 52, \"end\": 113, \"prefix\": \"After careful consideration, \", \"suffix\": \". This decision affects\"}\n]`;\n }\n\n return prompt;\n }\n\n /**\n * Build a prompt for detecting assessment-worthy passages\n *\n * @param content - The text content to analyze (will be truncated to 8000 chars)\n * @param instructions - Optional user-provided instructions\n * @param tone - Optional tone guidance (e.g., \"critical\", \"supportive\")\n * @param density - Optional target number of assessments per 2000 words\n * @returns Formatted prompt string\n */\n static buildAssessmentPrompt(\n content: string,\n instructions?: string,\n tone?: string,\n density?: number\n ): string {\n let prompt: string;\n\n if (instructions) {\n // User provided specific instructions - minimal prompt, let instructions drive behavior\n const toneGuidance = tone ? ` Use a ${tone} tone.` : '';\n const densityGuidance = density\n ? `\\n\\nAim for approximately ${density} assessments per 2000 words of text.`\n : ''; // Let user instructions determine density\n\n prompt = `Assess passages in this text following these instructions:\n\n${instructions}${toneGuidance}${densityGuidance}\n\nText to analyze:\n---\n${content.substring(0, 8000)}\n---\n\nReturn a JSON array of assessments. Each assessment must have:\n- \"exact\": the exact text passage being assessed (quoted verbatim from source)\n- \"start\": character offset where the passage starts\n- \"end\": character offset where the passage ends\n- \"prefix\": up to 32 characters of text immediately before the passage\n- \"suffix\": up to 32 characters of text immediately after the passage\n- \"assessment\": your assessment following the instructions above\n\nReturn ONLY a valid JSON array, no additional text or explanation.\n\nExample:\n[\n {\"exact\": \"the quarterly revenue target\", \"start\": 142, \"end\": 169, \"prefix\": \"We established \", \"suffix\": \" for Q4 2024.\", \"assessment\": \"This target seems ambitious given market conditions. Consider revising based on recent trends.\"}\n]`;\n } else {\n // No specific instructions - fall back to analytical/evaluation mode\n const toneGuidance = tone\n ? `\\n\\nTone: Use a ${tone} style in your assessments.`\n : '';\n const densityGuidance = density\n ? `\\n- Aim for approximately ${density} assessments per 2000 words`\n : `\\n- Aim for 2-6 assessments per 2000 words (focus on key passages)`;\n\n prompt = `Identify passages in this text that merit critical assessment or evaluation.\nFor each passage, provide analysis of its validity, strength, or implications.${toneGuidance}\n\nGuidelines:\n- Select passages containing claims, arguments, conclusions, or assertions\n- Assess evidence quality, logical soundness, or practical implications\n- Provide assessments that ADD INSIGHT beyond restating the text\n- Focus on passages where evaluation would help readers form judgments\n- Keep assessments concise yet substantive (1-3 sentences typically)${densityGuidance}\n\nText to analyze:\n---\n${content.substring(0, 8000)}\n---\n\nReturn a JSON array of assessments. Each assessment should have:\n- \"exact\": the exact text passage being assessed (quoted verbatim from source)\n- \"start\": character offset where the passage starts\n- \"end\": character offset where the passage ends\n- \"prefix\": up to 32 characters of text immediately before the passage\n- \"suffix\": up to 32 characters of text immediately after the passage\n- \"assessment\": your analytical assessment (1-3 sentences, evaluate validity/strength/implications)\n\nReturn ONLY a valid JSON array, no additional text or explanation.\n\nExample format:\n[\n {\"exact\": \"AI will replace most jobs by 2030\", \"start\": 52, \"end\": 89, \"prefix\": \"Many experts predict that \", \"suffix\": \", fundamentally reshaping\", \"assessment\": \"This claim lacks nuance and supporting evidence. Employment patterns historically show job transformation rather than wholesale replacement. The timeline appears speculative without specific sector analysis.\"}\n]`;\n }\n\n return prompt;\n }\n\n /**\n * Build a prompt for detecting structural tags\n *\n * @param content - The full text content to analyze (NOT truncated for structural analysis)\n * @param category - The specific category to detect\n * @param schemaName - Human-readable schema name\n * @param schemaDescription - Schema description\n * @param schemaDomain - Schema domain\n * @param categoryDescription - Category description\n * @param categoryExamples - Example questions/guidance for this category\n * @returns Formatted prompt string\n */\n static buildTagPrompt(\n content: string,\n category: string,\n schemaName: string,\n schemaDescription: string,\n schemaDomain: string,\n categoryDescription: string,\n categoryExamples: string[]\n ): string {\n // Build prompt with schema context and category-specific guidance\n const prompt = `You are analyzing a text using the ${schemaName} framework.\n\nSchema: ${schemaDescription}\nDomain: ${schemaDomain}\n\nYour task: Identify passages that serve the structural role of \"${category}\".\n\nCategory: ${category}\nDescription: ${categoryDescription}\nKey questions:\n${categoryExamples.map(ex => `- ${ex}`).join('\\n')}\n\nGuidelines:\n- Focus on STRUCTURAL FUNCTION, not semantic content\n- A passage serves the \"${category}\" role if it performs this function in the document's structure\n- Look for passages that explicitly fulfill this role\n- Passages can be sentences, paragraphs, or sections\n- Aim for precision - only tag passages that clearly serve this structural role\n- Typical documents have 1-5 instances of each category (some may have 0)\n\nText to analyze:\n---\n${content}\n---\n\nReturn a JSON array of tags. Each tag should have:\n- \"exact\": the exact text passage (quoted verbatim from source)\n- \"start\": character offset where the passage starts\n- \"end\": character offset where the passage ends\n- \"prefix\": up to 32 characters of text immediately before the passage\n- \"suffix\": up to 32 characters of text immediately after the passage\n\nReturn ONLY a valid JSON array, no additional text or explanation.\n\nExample format:\n[\n {\"exact\": \"What duty did the defendant owe?\", \"start\": 142, \"end\": 175, \"prefix\": \"The central question is: \", \"suffix\": \" This question must be\"},\n {\"exact\": \"In tort law, a duty of care is established when...\", \"start\": 412, \"end\": 520, \"prefix\": \"Legal framework:\\\\n\", \"suffix\": \"\\\\n\\\\nApplying this standard\"}\n]`;\n\n return prompt;\n }\n}\n","/**\n * Response parsers for annotation detection motivations\n *\n * Provides static methods to parse and validate AI responses for each motivation type.\n * Includes offset validation and correction logic.\n * Extracted from worker implementations to centralize parsing logic.\n */\n\nimport { validateAndCorrectOffsets } from '@semiont/api-client';\n\n/**\n * Represents a detected comment with validated position\n */\nexport interface CommentMatch {\n exact: string;\n start: number;\n end: number;\n prefix?: string;\n suffix?: string;\n comment: string;\n}\n\n/**\n * Represents a detected highlight with validated position\n */\nexport interface HighlightMatch {\n exact: string;\n start: number;\n end: number;\n prefix?: string;\n suffix?: string;\n}\n\n/**\n * Represents a detected assessment with validated position\n */\nexport interface AssessmentMatch {\n exact: string;\n start: number;\n end: number;\n prefix?: string;\n suffix?: string;\n assessment: string;\n}\n\n/**\n * Represents a detected tag with validated position\n */\nexport interface TagMatch {\n exact: string;\n start: number;\n end: number;\n prefix?: string;\n suffix?: string;\n category: string;\n}\n\nexport class MotivationParsers {\n /**\n * Parse and validate AI response for comment detection\n *\n * @param response - Raw AI response string (may include markdown code fences)\n * @param content - Original content to validate offsets against\n * @returns Array of validated comment matches\n */\n static parseComments(response: string, content: string): CommentMatch[] {\n try {\n // Clean up markdown code fences if present\n let cleaned = response.trim();\n if (cleaned.startsWith('```')) {\n cleaned = cleaned.replace(/^```(?:json)?\\n?/, '').replace(/\\n?```$/, '');\n }\n\n const parsed = JSON.parse(cleaned);\n\n if (!Array.isArray(parsed)) {\n console.warn('[MotivationParsers] Comment response is not an array');\n return [];\n }\n\n // Validate and filter\n const valid = parsed.filter((c: any) =>\n c &&\n typeof c.exact === 'string' &&\n typeof c.start === 'number' &&\n typeof c.end === 'number' &&\n typeof c.comment === 'string' &&\n c.comment.trim().length > 0\n );\n\n console.log(`[MotivationParsers] Parsed ${valid.length} valid comments from ${parsed.length} total`);\n\n // Validate and correct AI's offsets, then extract proper context\n // AI sometimes returns offsets that don't match the actual text position\n const validatedComments: CommentMatch[] = [];\n\n for (const comment of valid) {\n try {\n const validated = validateAndCorrectOffsets(content, comment.start, comment.end, comment.exact);\n validatedComments.push({\n ...comment,\n start: validated.start,\n end: validated.end,\n prefix: validated.prefix,\n suffix: validated.suffix\n });\n } catch (error) {\n console.warn(`[MotivationParsers] Skipping invalid comment \"${comment.exact}\":`, error);\n // Skip this comment - AI hallucinated text that doesn't exist\n }\n }\n\n return validatedComments;\n } catch (error) {\n console.error('[MotivationParsers] Failed to parse AI comment response:', error);\n return [];\n }\n }\n\n /**\n * Parse and validate AI response for highlight detection\n *\n * @param response - Raw AI response string (may include markdown code fences)\n * @param content - Original content to validate offsets against\n * @returns Array of validated highlight matches\n */\n static parseHighlights(response: string, content: string): HighlightMatch[] {\n try {\n // Clean up response - remove markdown code fences if present\n let cleaned = response.trim();\n if (cleaned.startsWith('```json') || cleaned.startsWith('```')) {\n cleaned = cleaned.slice(cleaned.indexOf('\\n') + 1);\n const endIndex = cleaned.lastIndexOf('```');\n if (endIndex !== -1) {\n cleaned = cleaned.slice(0, endIndex);\n }\n }\n\n const parsed = JSON.parse(cleaned);\n if (!Array.isArray(parsed)) {\n console.warn('[MotivationParsers] Highlight response was not an array');\n return [];\n }\n\n // Validate and filter results\n const highlights = parsed.filter((h: any) =>\n h && typeof h.exact === 'string' &&\n typeof h.start === 'number' &&\n typeof h.end === 'number'\n );\n\n // Validate and correct AI's offsets, then extract proper context\n // AI sometimes returns offsets that don't match the actual text position\n const validatedHighlights: HighlightMatch[] = [];\n\n for (const highlight of highlights) {\n try {\n const validated = validateAndCorrectOffsets(content, highlight.start, highlight.end, highlight.exact);\n validatedHighlights.push({\n ...highlight,\n start: validated.start,\n end: validated.end,\n prefix: validated.prefix,\n suffix: validated.suffix\n });\n } catch (error) {\n console.warn(`[MotivationParsers] Skipping invalid highlight \"${highlight.exact}\":`, error);\n // Skip this highlight - AI hallucinated text that doesn't exist\n }\n }\n\n return validatedHighlights;\n } catch (error) {\n console.error('[MotivationParsers] Failed to parse AI highlight response:', error);\n console.error('Raw response:', response);\n return [];\n }\n }\n\n /**\n * Parse and validate AI response for assessment detection\n *\n * @param response - Raw AI response string (may include markdown code fences)\n * @param content - Original content to validate offsets against\n * @returns Array of validated assessment matches\n */\n static parseAssessments(response: string, content: string): AssessmentMatch[] {\n try {\n // Clean up response - remove markdown code fences if present\n let cleaned = response.trim();\n if (cleaned.startsWith('```json') || cleaned.startsWith('```')) {\n cleaned = cleaned.slice(cleaned.indexOf('\\n') + 1);\n const endIndex = cleaned.lastIndexOf('```');\n if (endIndex !== -1) {\n cleaned = cleaned.slice(0, endIndex);\n }\n }\n\n const parsed = JSON.parse(cleaned);\n if (!Array.isArray(parsed)) {\n console.warn('[MotivationParsers] Assessment response was not an array');\n return [];\n }\n\n // Validate and filter results\n const assessments = parsed.filter((a: any) =>\n a && typeof a.exact === 'string' &&\n typeof a.start === 'number' &&\n typeof a.end === 'number' &&\n typeof a.assessment === 'string'\n );\n\n // Validate and correct AI's offsets, then extract proper context\n // AI sometimes returns offsets that don't match the actual text position\n const validatedAssessments: AssessmentMatch[] = [];\n\n for (const assessment of assessments) {\n try {\n const validated = validateAndCorrectOffsets(content, assessment.start, assessment.end, assessment.exact);\n validatedAssessments.push({\n ...assessment,\n start: validated.start,\n end: validated.end,\n prefix: validated.prefix,\n suffix: validated.suffix\n });\n } catch (error) {\n console.warn(`[MotivationParsers] Skipping invalid assessment \"${assessment.exact}\":`, error);\n // Skip this assessment - AI hallucinated text that doesn't exist\n }\n }\n\n return validatedAssessments;\n } catch (error) {\n console.error('[MotivationParsers] Failed to parse AI assessment response:', error);\n console.error('Raw response:', response);\n return [];\n }\n }\n\n /**\n * Parse and validate AI response for tag detection\n * Note: Does NOT validate offsets - caller must do that with content\n *\n * @param response - Raw AI response string (may include markdown code fences)\n * @returns Array of tag matches (offsets not yet validated)\n */\n static parseTags(response: string): Omit<TagMatch, 'category'>[] {\n try {\n // Clean up markdown code fences if present\n let cleaned = response.trim();\n if (cleaned.startsWith('```')) {\n cleaned = cleaned.replace(/^```(?:json)?\\n?/, '').replace(/\\n?```$/, '');\n }\n\n const parsed = JSON.parse(cleaned);\n\n if (!Array.isArray(parsed)) {\n console.warn('[MotivationParsers] Tag response is not an array');\n return [];\n }\n\n // Validate and filter\n const valid = parsed.filter((t: any) =>\n t &&\n typeof t.exact === 'string' &&\n typeof t.start === 'number' &&\n typeof t.end === 'number' &&\n t.exact.trim().length > 0\n );\n\n console.log(`[MotivationParsers] Parsed ${valid.length} valid tags from ${parsed.length} total`);\n\n return valid;\n } catch (error) {\n console.error('[MotivationParsers] Failed to parse AI tag response:', error);\n return [];\n }\n }\n\n /**\n * Validate tag offsets against content and add category\n * Helper for tag detection after initial parsing\n *\n * @param tags - Parsed tags without validated offsets\n * @param content - Original content to validate against\n * @param category - Category to assign to validated tags\n * @returns Array of validated tag matches\n */\n static validateTagOffsets(\n tags: Omit<TagMatch, 'category'>[],\n content: string,\n category: string\n ): TagMatch[] {\n const validatedTags: TagMatch[] = [];\n\n for (const tag of tags) {\n try {\n const validated = validateAndCorrectOffsets(content, tag.start, tag.end, tag.exact);\n validatedTags.push({\n ...tag,\n category,\n start: validated.start,\n end: validated.end,\n prefix: validated.prefix,\n suffix: validated.suffix\n });\n } catch (error) {\n console.warn(`[MotivationParsers] Skipping invalid tag for category \"${category}\":`, error);\n // Skip this tag - AI hallucinated text that doesn't exist\n }\n }\n\n return validatedTags;\n }\n}\n"],"mappings":";AAAA,OAAO,eAAe;AACtB,SAAS,4BAA4B;AAIrC,SAAS,gBAAgB,QAAwB;AAC/C,SAAO,qBAAqB,MAAM,KAAK;AACzC;AAGA,IAAI,kBAAoC;AAMxC,eAAsB,mBAAmB,QAA+C;AACtF,MAAI,iBAAiB;AACnB,WAAO;AAAA,EACT;AAEA,QAAM,kBAAkB,OAAO,SAAS;AACxC,MAAI,CAAC,iBAAiB;AACpB,UAAM,IAAI,MAAM,sDAAsD;AAAA,EACxE;AAGA,MAAI,SAAS,gBAAgB;AAC7B,MAAI,QAAQ,WAAW,IAAI,KAAK,OAAO,SAAS,GAAG,GAAG;AACpD,UAAM,aAAa,OAAO,MAAM,GAAG,EAAE;AACrC,UAAM,WAAW,QAAQ,IAAI,UAAU;AACvC,QAAI,CAAC,UAAU;AACb,YAAM,IAAI,MAAM,wBAAwB,UAAU,aAAa;AAAA,IACjE;AACA,aAAS;AAAA,EACX;AAEA,UAAQ,IAAI,4BAA4B;AAAA,IACtC,MAAM,gBAAgB;AAAA,IACtB,OAAO,gBAAgB;AAAA,IACvB,UAAU,gBAAgB;AAAA,IAC1B,WAAW,CAAC,CAAC;AAAA,EACf,CAAC;AAED,oBAAkB,IAAI,UAAU;AAAA,IAC9B;AAAA,IACA,SAAS,gBAAgB,YAAY,gBAAgB,WAAW;AAAA,EAClE,CAAC;AAED,UAAQ,IAAI,eAAe,gBAAgB,IAAI,gCAAgC,gBAAgB,KAAK,EAAE;AACtG,SAAO;AACT;AAKO,SAAS,kBAAkB,QAAmC;AACnE,QAAM,kBAAkB,OAAO,SAAS;AACxC,MAAI,CAAC,iBAAiB,OAAO;AAC3B,UAAM,IAAI,MAAM,2EAA2E;AAAA,EAC7F;AACA,SAAO,gBAAgB;AACzB;AAKA,eAAsB,aACpB,QACA,QACA,YAAoB,KACpB,cAAsB,KACL;AACjB,UAAQ,IAAI,2CAA2C,OAAO,QAAQ,cAAc,WAAW,SAAS,WAAW;AAEnH,QAAM,SAAS,MAAM,mBAAmB,MAAM;AAE9C,QAAM,WAAW,MAAM,OAAO,SAAS,OAAO;AAAA,IAC5C,OAAO,kBAAkB,MAAM;AAAA,IAC/B,YAAY;AAAA,IACZ;AAAA,IACA,UAAU;AAAA,MACR;AAAA,QACE,MAAM;AAAA,QACN,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF,CAAC;AAED,UAAQ,IAAI,gDAAgD,SAAS,QAAQ,MAAM;AAEnF,QAAM,cAAc,SAAS,QAAQ,KAAK,OAAK,EAAE,SAAS,MAAM;AAEhE,MAAI,CAAC,eAAe,YAAY,SAAS,QAAQ;AAC/C,YAAQ,MAAM,gCAAgC,SAAS,OAAO;AAC9D,UAAM,IAAI,MAAM,uCAAuC;AAAA,EACzD;AAEA,UAAQ,IAAI,qCAAqC,YAAY,KAAK,MAAM;AACxE,SAAO,YAAY;AAErB;AAKA,eAAsB,0BACpB,OACA,aACA,QACA,YACA,QACA,SACA,aACA,WAC6C;AAC7C,UAAQ,IAAI,0CAA0C;AAAA,IACpD,OAAO,MAAM,UAAU,GAAG,GAAG;AAAA,IAC7B;AAAA,IACA,eAAe,CAAC,CAAC;AAAA,IACjB;AAAA,IACA,YAAY,CAAC,CAAC;AAAA,IACd;AAAA,IACA;AAAA,EACF,CAAC;AAED,QAAM,kBAAkB,OAAO,SAAS;AACxC,QAAM,WAAW,iBAAiB,QAAQ;AAC1C,UAAQ,IAAI,mBAAmB,UAAU,eAAe,iBAAiB,KAAK;AAG9E,QAAM,mBAAmB,eAAe;AACxC,QAAM,iBAAiB,aAAa;AAGpC,QAAM,sBAAsB,UAAU,WAAW,OAC7C;AAAA;AAAA,0CAA+C,gBAAgB,MAAM,CAAC,MACtE;AAGJ,MAAI,iBAAiB;AACrB,MAAI,SAAS,eAAe;AAC1B,UAAM,EAAE,QAAQ,UAAU,MAAM,IAAI,QAAQ;AAC5C,qBAAiB;AAAA;AAAA;AAAA;AAAA,EAEnB,SAAS,MAAM,MAAM,KAAK,EAAE;AAAA,KACzB,QAAQ;AAAA,EACX,QAAQ,GAAG,KAAK,QAAQ,EAAE;AAAA;AAAA;AAAA,EAG1B;AAGA,QAAM,SAAS,mDAAmD,KAAK;AAAA,EACvE,YAAY,SAAS,IAAI,gCAAgC,YAAY,KAAK,IAAI,CAAC,MAAM,EAAE;AAAA,EACvF,aAAa,uBAAuB,UAAU,KAAK,EAAE,GAAG,cAAc,GAAG,mBAAmB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAU5F,QAAM,gBAAgB,CAACA,cAAyD;AAE9E,QAAI,UAAUA,UAAS,KAAK;AAC5B,QAAI,QAAQ,WAAW,aAAa,KAAK,QAAQ,WAAW,OAAO,GAAG;AACpE,gBAAU,QAAQ,MAAM,QAAQ,QAAQ,IAAI,IAAI,CAAC;AACjD,YAAM,WAAW,QAAQ,YAAY,KAAK;AAC1C,UAAI,aAAa,IAAI;AACnB,kBAAU,QAAQ,MAAM,GAAG,QAAQ;AAAA,MACrC;AAAA,IACF,WAAW,QAAQ,WAAW,KAAK,GAAG;AACpC,gBAAU,QAAQ,MAAM,CAAC;AACzB,YAAM,WAAW,QAAQ,YAAY,KAAK;AAC1C,UAAI,aAAa,IAAI;AACnB,kBAAU,QAAQ,MAAM,GAAG,QAAQ;AAAA,MACrC;AAAA,IACF;AAEA,cAAU,QAAQ,KAAK;AAIvB,WAAO;AAAA,MACL,OAAO;AAAA,MACP;AAAA,IACF;AAAA,EACF;AAEA,UAAQ,IAAI,wCAAwC,OAAO,QAAQ,UAAU,SAAS,kBAAkB,cAAc,cAAc;AACpI,QAAM,WAAW,MAAM,aAAa,QAAQ,QAAQ,gBAAgB,gBAAgB;AACpF,UAAQ,IAAI,6BAA6B,SAAS,QAAQ,QAAQ;AAElE,QAAM,SAAS,cAAc,QAAQ;AACrC,UAAQ,IAAI,kBAAkB;AAAA,IAC5B,UAAU,CAAC,CAAC,OAAO;AAAA,IACnB,aAAa,OAAO,OAAO;AAAA,IAC3B,YAAY,CAAC,CAAC,OAAO;AAAA,IACrB,eAAe,OAAO,SAAS;AAAA,EACjC,CAAC;AAED,SAAO;AACT;AAKA,eAAsB,wBACpB,cACA,SACA,aACA,QACiB;AAEjB,QAAM,mBAAmB,QAAQ,SAAS,MACtC,QAAQ,UAAU,GAAG,GAAI,IAAI,QAC7B;AAEJ,QAAM,SAAS,gEAAgE,YAAY;AAAA,EAC3F,YAAY,SAAS,IAAI,qBAAqB,YAAY,KAAK,IAAI,CAAC,KAAK,EAAE;AAAA;AAAA;AAAA,EAG3E,gBAAgB;AAAA;AAAA;AAIhB,SAAO,MAAM,aAAa,QAAQ,QAAQ,KAAK,GAAG;AACpD;AAKA,eAAsB,6BACpB,gBACA,QACA,YACA,gBAC0B;AAC1B,QAAM,SAAS,2BAA2B,cAAc,IAAI,aAAa,WAAW,UAAU,MAAM,EAAE,GAAG,iBAAiB,wBAAwB,cAAc,MAAM,EAAE;AAAA;AAAA;AAIxK,QAAM,WAAW,MAAM,aAAa,QAAQ,QAAQ,KAAK,GAAG;AAC5D,MAAI,CAAC,UAAU;AACb,WAAO;AAAA,EACT;AAGA,SAAO,SACJ,MAAM,IAAI,EACV,IAAI,UAAQ,KAAK,QAAQ,aAAa,EAAE,EAAE,KAAK,CAAC,EAChD,OAAO,UAAQ,KAAK,SAAS,CAAC,EAC9B,MAAM,GAAG,CAAC;AACf;;;ACxOA,eAAsB,gBACpB,OACA,aACA,QACA,+BAAwC,OACZ;AAC5B,UAAQ,IAAI,gCAAgC;AAAA,IAC1C,YAAY,MAAM;AAAA,IAClB,aAAa,MAAM,QAAQ,WAAW,IAAI,YAAY,IAAI,QAAM,OAAO,OAAO,WAAW,KAAK,GAAG,IAAI,IAAI,CAAC;AAAA,EAC5G,CAAC;AAED,QAAM,SAAS,MAAM,mBAAmB,MAAM;AAG9C,QAAM,yBAAyB,YAAY,IAAI,QAAM;AACnD,QAAI,OAAO,OAAO,UAAU;AAC1B,aAAO;AAAA,IACT;AACA,WAAO,GAAG,YAAY,GAAG,SAAS,SAAS,IACvC,GAAG,GAAG,IAAI,eAAe,GAAG,SAAS,MAAM,GAAG,CAAC,EAAE,KAAK,IAAI,CAAC,MAC3D,GAAG;AAAA,EACT,CAAC,EAAE,KAAK,IAAI;AAMZ,QAAM,+BAA+B,+BACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAoBA;AAAA;AAAA;AAIJ,QAAM,SAAS,2EAA2E,sBAAsB;AAAA,EAChH,4BAA4B;AAAA;AAAA;AAAA,EAG5B,KAAK;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAiBL,UAAQ,IAAI,+CAA+C,kBAAkB,MAAM,CAAC;AACpF,QAAM,WAAW,MAAM,OAAO,SAAS,OAAO;AAAA,IAC5C,OAAO,kBAAkB,MAAM;AAAA,IAC/B,YAAY;AAAA;AAAA,IACZ,aAAa;AAAA;AAAA,IACb,UAAU;AAAA,MACR;AAAA,QACE,MAAM;AAAA,QACN,SAAS;AAAA,MACX;AAAA,IACF;AAAA,EACF,CAAC;AACD,UAAQ,IAAI,gCAAgC;AAE5C,QAAM,cAAc,SAAS,QAAQ,KAAK,OAAK,EAAE,SAAS,MAAM;AAChE,MAAI,CAAC,eAAe,YAAY,SAAS,QAAQ;AAC/C,YAAQ,KAAK,+CAA+C;AAC5D,WAAO,CAAC;AAAA,EACV;AAEA,UAAQ,IAAI,0CAA0C,YAAY,KAAK,MAAM;AAE7E,MAAI;AAEF,QAAI,UAAU,YAAY,KAAK,KAAK;AACpC,QAAI,QAAQ,WAAW,KAAK,GAAG;AAC7B,gBAAU,QAAQ,QAAQ,oBAAoB,EAAE,EAAE,QAAQ,WAAW,EAAE;AAAA,IACzE;AAEA,UAAM,WAAW,KAAK,MAAM,OAAO;AACnC,YAAQ,IAAI,UAAU,SAAS,QAAQ,wBAAwB;AAG/D,QAAI,SAAS,gBAAgB,cAAc;AACzC,YAAM,WAAW,gCAAgC,SAAS,MAAM;AAChE,cAAQ,MAAM,UAAK,QAAQ,EAAE;AAC7B,YAAM,IAAI,MAAM,QAAQ;AAAA,IAC1B;AAGA,WAAO,SAAS,IAAI,CAAC,QAAa,QAAgB;AAChD,UAAI,cAAc,OAAO;AACzB,UAAI,YAAY,OAAO;AAEvB,cAAQ,IAAI;AAAA,UAAa,MAAM,CAAC,IAAI,SAAS,MAAM,GAAG;AACtD,cAAQ,IAAI,WAAW,OAAO,UAAU,EAAE;AAC1C,cAAQ,IAAI,YAAY,OAAO,KAAK,GAAG;AACvC,cAAQ,IAAI,uBAAuB,WAAW,KAAK,SAAS,GAAG;AAG/D,YAAM,gBAAgB,MAAM,UAAU,aAAa,SAAS;AAG5D,UAAI,kBAAkB,OAAO,OAAO;AAClC,gBAAQ,IAAI,kCAAwB;AACpC,gBAAQ,IAAI,gBAAgB,OAAO,KAAK,GAAG;AAC3C,gBAAQ,IAAI,0BAA0B,WAAW,IAAI,SAAS,OAAO,aAAa,GAAG;AAGrF,cAAM,eAAe,KAAK,IAAI,GAAG,cAAc,EAAE;AACjD,cAAM,aAAa,KAAK,IAAI,MAAM,QAAQ,YAAY,EAAE;AACxD,cAAM,gBAAgB,MAAM,UAAU,cAAc,WAAW;AAC/D,cAAM,eAAe,MAAM,UAAU,WAAW,UAAU;AAC1D,gBAAQ,IAAI,kBAAkB,aAAa,IAAI,aAAa,IAAI,YAAY,MAAM;AAElF,gBAAQ,IAAI,4CAA4C;AAGxD,YAAI,QAAQ;AACZ,YAAI,OAAO,UAAU,OAAO,QAAQ;AAClC,kBAAQ,IAAI,kDAAkD;AAC9D,cAAI,OAAO,OAAQ,SAAQ,IAAI,gBAAgB,OAAO,MAAM,GAAG;AAC/D,cAAI,OAAO,OAAQ,SAAQ,IAAI,gBAAgB,OAAO,MAAM,GAAG;AAG/D,cAAI,YAAY;AAChB,kBAAQ,YAAY,MAAM,QAAQ,OAAO,OAAO,SAAS,OAAO,IAAI;AAClE,kBAAM,kBAAkB,MAAM,UAAU,KAAK,IAAI,GAAG,YAAY,EAAE,GAAG,SAAS;AAC9E,kBAAM,kBAAkB,MAAM;AAAA,cAC5B,YAAY,OAAO,MAAM;AAAA,cACzB,KAAK,IAAI,MAAM,QAAQ,YAAY,OAAO,MAAM,SAAS,EAAE;AAAA,YAC7D;AAGA,kBAAM,cAAc,CAAC,OAAO,UAAU,gBAAgB,SAAS,OAAO,MAAM;AAC5E,kBAAM,cAAc,CAAC,OAAO,UAAU,gBAAgB,WAAW,OAAO,MAAM;AAE9E,gBAAI,eAAe,aAAa;AAC9B,sBAAQ,IAAI,gDAA2C,SAAS,WAAW,YAAY,WAAW,GAAG;AACrG,sBAAQ,IAAI,0BAA0B,eAAe,GAAG;AACxD,sBAAQ,IAAI,0BAA0B,eAAe,GAAG;AACxD,4BAAc;AACd,0BAAY,YAAY,OAAO,MAAM;AACrC,sBAAQ;AACR;AAAA,YACF;AAEA;AAAA,UACF;AAEA,cAAI,CAAC,OAAO;AACV,oBAAQ,IAAI,2DAAiD;AAAA,UAC/D;AAAA,QACF;AAGA,YAAI,CAAC,OAAO;AACV,gBAAM,QAAQ,MAAM,QAAQ,OAAO,KAAK;AACxC,cAAI,UAAU,IAAI;AAChB,oBAAQ,IAAI,oDAA0C,KAAK,WAAW,QAAQ,WAAW,GAAG;AAC5F,0BAAc;AACd,wBAAY,QAAQ,OAAO,MAAM;AAAA,UACnC,OAAO;AACL,oBAAQ,IAAI,yBAAoB,OAAO,KAAK,wBAAwB;AACpE,oBAAQ,IAAI,4BAA4B,MAAM,UAAU,GAAG,GAAG,CAAC,MAAM;AAErE,mBAAO;AAAA,UACT;AAAA,QACF;AAAA,MACF,OAAO;AACL,gBAAQ,IAAI,0BAAqB;AAAA,MACnC;AAEA,aAAO;AAAA,QACL,OAAO,OAAO;AAAA,QACd,YAAY,OAAO;AAAA,QACnB;AAAA,QACA;AAAA,QACA,QAAQ,OAAO;AAAA,QACf,QAAQ,OAAO;AAAA,MACjB;AAAA,IACF,CAAC,EAAE,OAAO,CAAC,WAA8D;AAEvE,UAAI,WAAW,MAAM;AACnB,gBAAQ,IAAI,8BAAyB;AACrC,eAAO;AAAA,MACT;AACA,UAAI,OAAO,gBAAgB,UAAa,OAAO,cAAc,QAAW;AACtE,gBAAQ,IAAI,2BAAsB,OAAO,KAAK,oBAAoB;AAClE,eAAO;AAAA,MACT;AACA,UAAI,OAAO,cAAc,GAAG;AAC1B,gBAAQ,IAAI,2BAAsB,OAAO,KAAK,4BAA4B,OAAO,WAAW,GAAG;AAC/F,eAAO;AAAA,MACT;AACA,UAAI,OAAO,YAAY,MAAM,QAAQ;AACnC,gBAAQ,IAAI,2BAAsB,OAAO,KAAK,iBAAiB,OAAO,SAAS,oBAAoB,MAAM,MAAM,GAAG;AAClH,eAAO;AAAA,MACT;AAGA,YAAM,gBAAgB,MAAM,UAAU,OAAO,aAAa,OAAO,SAAS;AAC1E,UAAI,kBAAkB,OAAO,OAAO;AAClC,gBAAQ,IAAI,2BAAsB,OAAO,KAAK,oBAAoB;AAClE,gBAAQ,IAAI,iBAAiB,OAAO,KAAK,GAAG;AAC5C,gBAAQ,IAAI,cAAc,OAAO,WAAW,IAAI,OAAO,SAAS,OAAO,aAAa,GAAG;AACvF,eAAO;AAAA,MACT;AAEA,cAAQ,IAAI,2BAAsB,OAAO,KAAK,SAAS,OAAO,WAAW,IAAI,OAAO,SAAS,GAAG;AAChG,aAAO;AAAA,IACT,CAAC;AAAA,EACH,SAAS,OAAO;AACd,YAAQ,MAAM,+CAA+C,KAAK;AAClE,WAAO,CAAC;AAAA,EACV;AACF;;;AChQO,IAAM,oBAAN,MAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAU7B,OAAO,mBACL,SACA,cACA,MACA,SACQ;AACR,QAAI;AAEJ,QAAI,cAAc;AAEhB,YAAM,eAAe,OAAO,UAAU,IAAI,WAAW;AACrD,YAAM,kBAAkB,UACpB;AAAA;AAAA,wBAA6B,OAAO,sCACpC;AAEJ,eAAS;AAAA;AAAA,EAEb,YAAY,GAAG,YAAY,GAAG,eAAe;AAAA;AAAA;AAAA;AAAA,EAI7C,QAAQ,UAAU,GAAG,GAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAiBxB,OAAO;AAEL,YAAM,eAAe,OACjB;AAAA;AAAA,cAAmB,IAAI,6BACvB;AACJ,YAAM,kBAAkB,UACpB;AAAA,0BAA6B,OAAO,6BACpC;AAAA;AAEJ,eAAS;AAAA,iFACkE,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,mDAO1C,eAAe;AAAA;AAAA;AAAA;AAAA,EAIhE,QAAQ,UAAU,GAAG,GAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAiBxB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,OAAO,qBACL,SACA,cACA,SACQ;AACR,QAAI;AAEJ,QAAI,cAAc;AAEhB,YAAM,kBAAkB,UACpB;AAAA;AAAA,wBAA6B,OAAO,wCACpC;AAEJ,eAAS;AAAA;AAAA,EAEb,YAAY,GAAG,eAAe;AAAA;AAAA;AAAA;AAAA,EAI9B,QAAQ,UAAU,GAAG,GAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAgBxB,OAAO;AAEL,YAAM,kBAAkB,UACpB;AAAA,0BAA6B,OAAO,+BACpC;AAAA;AAEJ,eAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,oCASqB,eAAe;AAAA;AAAA;AAAA;AAAA,EAIjD,QAAQ,UAAU,GAAG,GAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAgBxB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,sBACL,SACA,cACA,MACA,SACQ;AACR,QAAI;AAEJ,QAAI,cAAc;AAEhB,YAAM,eAAe,OAAO,UAAU,IAAI,WAAW;AACrD,YAAM,kBAAkB,UACpB;AAAA;AAAA,wBAA6B,OAAO,yCACpC;AAEJ,eAAS;AAAA;AAAA,EAEb,YAAY,GAAG,YAAY,GAAG,eAAe;AAAA;AAAA;AAAA;AAAA,EAI7C,QAAQ,UAAU,GAAG,GAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAiBxB,OAAO;AAEL,YAAM,eAAe,OACjB;AAAA;AAAA,cAAmB,IAAI,gCACvB;AACJ,YAAM,kBAAkB,UACpB;AAAA,0BAA6B,OAAO,gCACpC;AAAA;AAEJ,eAAS;AAAA,gFACiE,YAAY;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,sEAOtB,eAAe;AAAA;AAAA;AAAA;AAAA,EAInF,QAAQ,UAAU,GAAG,GAAI,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAiBxB;AAEA,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,OAAO,eACL,SACA,UACA,YACA,mBACA,cACA,qBACA,kBACQ;AAER,UAAM,SAAS,sCAAsC,UAAU;AAAA;AAAA,UAEzD,iBAAiB;AAAA,UACjB,YAAY;AAAA;AAAA,kEAE4C,QAAQ;AAAA;AAAA,YAE9D,QAAQ;AAAA,eACL,mBAAmB;AAAA;AAAA,EAEhC,iBAAiB,IAAI,QAAM,KAAK,EAAE,EAAE,EAAE,KAAK,IAAI,CAAC;AAAA;AAAA;AAAA;AAAA,0BAIxB,QAAQ;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQhC,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAkBL,WAAO;AAAA,EACT;AACF;;;ACvUA,SAAS,iCAAiC;AAiDnC,IAAM,oBAAN,MAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQ7B,OAAO,cAAc,UAAkB,SAAiC;AACtE,QAAI;AAEF,UAAI,UAAU,SAAS,KAAK;AAC5B,UAAI,QAAQ,WAAW,KAAK,GAAG;AAC7B,kBAAU,QAAQ,QAAQ,oBAAoB,EAAE,EAAE,QAAQ,WAAW,EAAE;AAAA,MACzE;AAEA,YAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,UAAI,CAAC,MAAM,QAAQ,MAAM,GAAG;AAC1B,gBAAQ,KAAK,sDAAsD;AACnE,eAAO,CAAC;AAAA,MACV;AAGA,YAAM,QAAQ,OAAO;AAAA,QAAO,CAAC,MAC3B,KACA,OAAO,EAAE,UAAU,YACnB,OAAO,EAAE,UAAU,YACnB,OAAO,EAAE,QAAQ,YACjB,OAAO,EAAE,YAAY,YACrB,EAAE,QAAQ,KAAK,EAAE,SAAS;AAAA,MAC5B;AAEA,cAAQ,IAAI,8BAA8B,MAAM,MAAM,wBAAwB,OAAO,MAAM,QAAQ;AAInG,YAAM,oBAAoC,CAAC;AAE3C,iBAAW,WAAW,OAAO;AAC3B,YAAI;AACF,gBAAM,YAAY,0BAA0B,SAAS,QAAQ,OAAO,QAAQ,KAAK,QAAQ,KAAK;AAC9F,4BAAkB,KAAK;AAAA,YACrB,GAAG;AAAA,YACH,OAAO,UAAU;AAAA,YACjB,KAAK,UAAU;AAAA,YACf,QAAQ,UAAU;AAAA,YAClB,QAAQ,UAAU;AAAA,UACpB,CAAC;AAAA,QACH,SAAS,OAAO;AACd,kBAAQ,KAAK,iDAAiD,QAAQ,KAAK,MAAM,KAAK;AAAA,QAExF;AAAA,MACF;AAEA,aAAO;AAAA,IACT,SAAS,OAAO;AACd,cAAQ,MAAM,4DAA4D,KAAK;AAC/E,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,gBAAgB,UAAkB,SAAmC;AAC1E,QAAI;AAEF,UAAI,UAAU,SAAS,KAAK;AAC5B,UAAI,QAAQ,WAAW,SAAS,KAAK,QAAQ,WAAW,KAAK,GAAG;AAC9D,kBAAU,QAAQ,MAAM,QAAQ,QAAQ,IAAI,IAAI,CAAC;AACjD,cAAM,WAAW,QAAQ,YAAY,KAAK;AAC1C,YAAI,aAAa,IAAI;AACnB,oBAAU,QAAQ,MAAM,GAAG,QAAQ;AAAA,QACrC;AAAA,MACF;AAEA,YAAM,SAAS,KAAK,MAAM,OAAO;AACjC,UAAI,CAAC,MAAM,QAAQ,MAAM,GAAG;AAC1B,gBAAQ,KAAK,yDAAyD;AACtE,eAAO,CAAC;AAAA,MACV;AAGA,YAAM,aAAa,OAAO;AAAA,QAAO,CAAC,MAChC,KAAK,OAAO,EAAE,UAAU,YACxB,OAAO,EAAE,UAAU,YACnB,OAAO,EAAE,QAAQ;AAAA,MACnB;AAIA,YAAM,sBAAwC,CAAC;AAE/C,iBAAW,aAAa,YAAY;AAClC,YAAI;AACF,gBAAM,YAAY,0BAA0B,SAAS,UAAU,OAAO,UAAU,KAAK,UAAU,KAAK;AACpG,8BAAoB,KAAK;AAAA,YACvB,GAAG;AAAA,YACH,OAAO,UAAU;AAAA,YACjB,KAAK,UAAU;AAAA,YACf,QAAQ,UAAU;AAAA,YAClB,QAAQ,UAAU;AAAA,UACpB,CAAC;AAAA,QACH,SAAS,OAAO;AACd,kBAAQ,KAAK,mDAAmD,UAAU,KAAK,MAAM,KAAK;AAAA,QAE5F;AAAA,MACF;AAEA,aAAO;AAAA,IACT,SAAS,OAAO;AACd,cAAQ,MAAM,8DAA8D,KAAK;AACjF,cAAQ,MAAM,iBAAiB,QAAQ;AACvC,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,iBAAiB,UAAkB,SAAoC;AAC5E,QAAI;AAEF,UAAI,UAAU,SAAS,KAAK;AAC5B,UAAI,QAAQ,WAAW,SAAS,KAAK,QAAQ,WAAW,KAAK,GAAG;AAC9D,kBAAU,QAAQ,MAAM,QAAQ,QAAQ,IAAI,IAAI,CAAC;AACjD,cAAM,WAAW,QAAQ,YAAY,KAAK;AAC1C,YAAI,aAAa,IAAI;AACnB,oBAAU,QAAQ,MAAM,GAAG,QAAQ;AAAA,QACrC;AAAA,MACF;AAEA,YAAM,SAAS,KAAK,MAAM,OAAO;AACjC,UAAI,CAAC,MAAM,QAAQ,MAAM,GAAG;AAC1B,gBAAQ,KAAK,0DAA0D;AACvE,eAAO,CAAC;AAAA,MACV;AAGA,YAAM,cAAc,OAAO;AAAA,QAAO,CAAC,MACjC,KAAK,OAAO,EAAE,UAAU,YACxB,OAAO,EAAE,UAAU,YACnB,OAAO,EAAE,QAAQ,YACjB,OAAO,EAAE,eAAe;AAAA,MAC1B;AAIA,YAAM,uBAA0C,CAAC;AAEjD,iBAAW,cAAc,aAAa;AACpC,YAAI;AACF,gBAAM,YAAY,0BAA0B,SAAS,WAAW,OAAO,WAAW,KAAK,WAAW,KAAK;AACvG,+BAAqB,KAAK;AAAA,YACxB,GAAG;AAAA,YACH,OAAO,UAAU;AAAA,YACjB,KAAK,UAAU;AAAA,YACf,QAAQ,UAAU;AAAA,YAClB,QAAQ,UAAU;AAAA,UACpB,CAAC;AAAA,QACH,SAAS,OAAO;AACd,kBAAQ,KAAK,oDAAoD,WAAW,KAAK,MAAM,KAAK;AAAA,QAE9F;AAAA,MACF;AAEA,aAAO;AAAA,IACT,SAAS,OAAO;AACd,cAAQ,MAAM,+DAA+D,KAAK;AAClF,cAAQ,MAAM,iBAAiB,QAAQ;AACvC,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,OAAO,UAAU,UAAgD;AAC/D,QAAI;AAEF,UAAI,UAAU,SAAS,KAAK;AAC5B,UAAI,QAAQ,WAAW,KAAK,GAAG;AAC7B,kBAAU,QAAQ,QAAQ,oBAAoB,EAAE,EAAE,QAAQ,WAAW,EAAE;AAAA,MACzE;AAEA,YAAM,SAAS,KAAK,MAAM,OAAO;AAEjC,UAAI,CAAC,MAAM,QAAQ,MAAM,GAAG;AAC1B,gBAAQ,KAAK,kDAAkD;AAC/D,eAAO,CAAC;AAAA,MACV;AAGA,YAAM,QAAQ,OAAO;AAAA,QAAO,CAAC,MAC3B,KACA,OAAO,EAAE,UAAU,YACnB,OAAO,EAAE,UAAU,YACnB,OAAO,EAAE,QAAQ,YACjB,EAAE,MAAM,KAAK,EAAE,SAAS;AAAA,MAC1B;AAEA,cAAQ,IAAI,8BAA8B,MAAM,MAAM,oBAAoB,OAAO,MAAM,QAAQ;AAE/F,aAAO;AAAA,IACT,SAAS,OAAO;AACd,cAAQ,MAAM,wDAAwD,KAAK;AAC3E,aAAO,CAAC;AAAA,IACV;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,OAAO,mBACL,MACA,SACA,UACY;AACZ,UAAM,gBAA4B,CAAC;AAEnC,eAAW,OAAO,MAAM;AACtB,UAAI;AACF,cAAM,YAAY,0BAA0B,SAAS,IAAI,OAAO,IAAI,KAAK,IAAI,KAAK;AAClF,sBAAc,KAAK;AAAA,UACjB,GAAG;AAAA,UACH;AAAA,UACA,OAAO,UAAU;AAAA,UACjB,KAAK,UAAU;AAAA,UACf,QAAQ,UAAU;AAAA,UAClB,QAAQ,UAAU;AAAA,QACpB,CAAC;AAAA,MACH,SAAS,OAAO;AACd,gBAAQ,KAAK,0DAA0D,QAAQ,MAAM,KAAK;AAAA,MAE5F;AAAA,IACF;AAEA,WAAO;AAAA,EACT;AACF;","names":["response"]}
|
package/package.json
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@semiont/inference",
|
|
3
|
+
"version": "0.2.28-build.40",
|
|
4
|
+
"type": "module",
|
|
5
|
+
"description": "AI inference capabilities for entity extraction, text generation, and resource creation",
|
|
6
|
+
"main": "./dist/index.js",
|
|
7
|
+
"types": "./dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"import": "./dist/index.js"
|
|
12
|
+
}
|
|
13
|
+
},
|
|
14
|
+
"files": [
|
|
15
|
+
"dist",
|
|
16
|
+
"README.md"
|
|
17
|
+
],
|
|
18
|
+
"scripts": {
|
|
19
|
+
"build": "npm run typecheck && tsup",
|
|
20
|
+
"typecheck": "tsc --noEmit",
|
|
21
|
+
"clean": "rm -rf dist",
|
|
22
|
+
"test": "vitest run",
|
|
23
|
+
"test:watch": "vitest"
|
|
24
|
+
},
|
|
25
|
+
"dependencies": {
|
|
26
|
+
"@anthropic-ai/sdk": "^0.63.0",
|
|
27
|
+
"@semiont/api-client": "*",
|
|
28
|
+
"@semiont/core": "*"
|
|
29
|
+
},
|
|
30
|
+
"devDependencies": {
|
|
31
|
+
"tsup": "^8.0.1",
|
|
32
|
+
"typescript": "^5.6.3",
|
|
33
|
+
"vitest": "^3.2.4"
|
|
34
|
+
},
|
|
35
|
+
"publishConfig": {
|
|
36
|
+
"access": "public"
|
|
37
|
+
},
|
|
38
|
+
"keywords": [
|
|
39
|
+
"ai",
|
|
40
|
+
"inference",
|
|
41
|
+
"entity-extraction",
|
|
42
|
+
"llm",
|
|
43
|
+
"anthropic",
|
|
44
|
+
"text-generation"
|
|
45
|
+
],
|
|
46
|
+
"author": "The AI Alliance",
|
|
47
|
+
"license": "Apache-2.0",
|
|
48
|
+
"repository": {
|
|
49
|
+
"type": "git",
|
|
50
|
+
"url": "https://github.com/The-AI-Alliance/semiont.git",
|
|
51
|
+
"directory": "packages/inference"
|
|
52
|
+
}
|
|
53
|
+
}
|