@warmdrift/kgauto 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1 @@
1
+ {"version":3,"sources":["../src/tokenizer.ts","../profiles.json","../src/profiles.ts","../src/policies.ts","../src/adapter.ts","../src/logger.ts"],"sourcesContent":["/**\n * Token counting — lightweight estimation for token budgeting.\n *\n * Uses char/4 heuristic by default (within 15% of tiktoken for English).\n * The adapter's 10% safety margin on context budget absorbs the error.\n *\n * For precise counting, callers can inject a custom tokenizer via setTokenizer().\n * Example: setTokenizer(text => tiktoken.encode(text).length)\n */\n\nimport type { ToolDefinition, Message } from './types';\n\n/** Average tokens per tool definition (name + description + parameters schema). */\nexport const AVG_TOKENS_PER_TOOL = 350;\n\n/** Default tokenizer: chars/3.5 for English (slightly conservative). */\nlet _tokenizer: (text: string) => number = (text: string) =>\n Math.max(0, Math.ceil(text.length / 3.5));\n\n/**\n * Override the default tokenizer with a precise one.\n *\n * Example with js-tiktoken:\n * import { encodingForModel } from 'js-tiktoken';\n * const enc = encodingForModel('gpt-4o');\n * setTokenizer(text => enc.encode(text).length);\n */\nexport function setTokenizer(fn: (text: string) => number): void {\n _tokenizer = fn;\n}\n\n/** Count tokens in a string. */\nexport function countTokens(text: string): number {\n return _tokenizer(text);\n}\n\n/** Estimate total tokens consumed by tool definitions. */\nexport function estimateToolTokens(tools: ToolDefinition[]): number {\n let total = 0;\n for (const tool of tools) {\n try {\n const text = JSON.stringify(tool);\n total += countTokens(text);\n } catch {\n total += AVG_TOKENS_PER_TOOL;\n }\n }\n return total;\n}\n\n/** Estimate total tokens in a message array. */\nexport function estimateMessagesTokens(messages: Message[]): number {\n let total = 0;\n for (const msg of messages) {\n total += countTokens(msg.content) + 4; // +4 for role/formatting overhead\n }\n return total;\n}\n","{\n \"claude-opus-4-6\": {\n \"provider\": \"anthropic\",\n \"status\": \"current\",\n \"max_tools\": 20,\n \"max_context_tokens\": 1000000,\n \"max_output_tokens\": 128000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"refusal returns stop_reason 'refusal' with schema-violating output\",\n \"rate limits pooled across all Opus 4.x versions\"\n ],\n \"strengths\": [\n \"complex_reasoning\",\n \"judgment\",\n \"nuance\",\n \"reliability\",\n \"1m_context\",\n \"adaptive_thinking\"\n ],\n \"weaknesses\": [\n \"cost\",\n \"latency\"\n ],\n \"cost_input_per_1m\": 5.0,\n \"cost_output_per_1m\": 25.0,\n \"step_limit_default\": 10,\n \"notes\": \"Current frontier. 1M context. Adaptive thinking. Fast mode available at 6x rates. Auditor model for KG.\"\n },\n \"claude-sonnet-4-6\": {\n \"provider\": \"anthropic\",\n \"status\": \"current\",\n \"max_tools\": 20,\n \"max_context_tokens\": 1000000,\n \"max_output_tokens\": 64000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"refusal returns stop_reason 'refusal' with schema-violating output\",\n \"rate limits pooled across all Sonnet 4.x versions\",\n \"pricing jumps to $6/$22.50 per 1M tokens above 200k context\"\n ],\n \"strengths\": [\n \"structured_output\",\n \"tool_use\",\n \"instruction_following\",\n \"reliability\",\n \"1m_context\",\n \"adaptive_thinking\"\n ],\n \"weaknesses\": [\n \"cost_for_simple_tasks\"\n ],\n \"cost_input_per_1m\": 3.0,\n \"cost_output_per_1m\": 15.0,\n \"step_limit_default\": 10,\n \"notes\": \"Current recommended workhorse. 1M context. generateObject works with complex schemas. Adaptive thinking.\"\n },\n \"claude-haiku-4.5\": {\n \"provider\": \"anthropic\",\n \"status\": \"current\",\n \"max_tools\": 20,\n \"max_context_tokens\": 200000,\n \"max_output_tokens\": 64000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"refusal returns stop_reason 'refusal' with schema-violating output\",\n \"no adaptive thinking \\u2014 only extended thinking\"\n ],\n \"strengths\": [\n \"speed\",\n \"classification\",\n \"simple_routing\",\n \"cost\",\n \"structured_output\"\n ],\n \"weaknesses\": [\n \"complex_reasoning\",\n \"nuance\",\n \"200k_context_limit\"\n ],\n \"cost_input_per_1m\": 1.0,\n \"cost_output_per_1m\": 5.0,\n \"step_limit_default\": 6,\n \"notes\": \"Fast and cheap. Good for classification, routing, simple tasks. 200k context (not 1M).\"\n },\n \"claude-sonnet-4\": {\n \"provider\": \"anthropic\",\n \"status\": \"legacy\",\n \"max_tools\": 20,\n \"max_context_tokens\": 200000,\n \"max_output_tokens\": 64000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"1M context beta retiring April 30 2026 \\u2014 requests over 200k will error\",\n \"refusal returns stop_reason 'refusal' with schema-violating output\"\n ],\n \"strengths\": [\n \"structured_output\",\n \"tool_use\",\n \"instruction_following\",\n \"reliability\"\n ],\n \"weaknesses\": [\n \"legacy\",\n \"cost_for_simple_tasks\"\n ],\n \"cost_input_per_1m\": 3.0,\n \"cost_output_per_1m\": 15.0,\n \"step_limit_default\": 10,\n \"notes\": \"Legacy \\u2014 use claude-sonnet-4-6 instead. 1M context beta expiring.\"\n },\n \"claude-opus-4\": {\n \"provider\": \"anthropic\",\n \"status\": \"legacy\",\n \"max_tools\": 20,\n \"max_context_tokens\": 200000,\n \"max_output_tokens\": 32000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"refusal returns stop_reason 'refusal' with schema-violating output\"\n ],\n \"strengths\": [\n \"complex_reasoning\",\n \"judgment\"\n ],\n \"weaknesses\": [\n \"legacy\",\n \"expensive_vs_4.6\",\n \"lower_output_limit\"\n ],\n \"cost_input_per_1m\": 15.0,\n \"cost_output_per_1m\": 75.0,\n \"step_limit_default\": 10,\n \"notes\": \"Legacy \\u2014 use claude-opus-4-6 instead. 3x more expensive for same tier.\"\n },\n \"gpt-4.1\": {\n \"provider\": \"openai\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 1047576,\n \"max_output_tokens\": 32768,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"structured output incompatible with parallel_tool_calls \\u2014 must set parallel_tool_calls=false\",\n \"first request with new JSON schema has preprocessing delay up to 60s\",\n \"structured output truncates silently at max_tokens \\u2014 unparseable JSON\",\n \"safety refusal returns 'refusal' field instead of schema-conforming object\",\n \"model alias points to latest snapshot \\u2014 behavior can change without warning\"\n ],\n \"strengths\": [\n \"all_rounder\",\n \"structured_output\",\n \"tool_use\",\n \"reliability\",\n \"1m_context\",\n \"coding\"\n ],\n \"weaknesses\": [],\n \"cost_input_per_1m\": 2.0,\n \"cost_output_per_1m\": 8.0,\n \"step_limit_default\": 10,\n \"notes\": \"OpenAI stable flagship. 1M context. Strong coding benchmarks. Natural shadow-test candidate for complex tasks vs Opus.\"\n },\n \"gpt-4.1-mini\": {\n \"provider\": \"openai\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 1047576,\n \"max_output_tokens\": 32768,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"structured output incompatible with parallel_tool_calls \\u2014 must set parallel_tool_calls=false\",\n \"structured output truncates silently at max_tokens\",\n \"safety refusal returns 'refusal' field instead of schema-conforming object\"\n ],\n \"strengths\": [\n \"speed\",\n \"cost\",\n \"structured_output\",\n \"1m_context\"\n ],\n \"weaknesses\": [\n \"complex_reasoning\"\n ],\n \"cost_input_per_1m\": 0.4,\n \"cost_output_per_1m\": 1.6,\n \"step_limit_default\": 8,\n \"notes\": \"OpenAI mid-tier. 1M context. Good for standard tasks.\"\n },\n \"gpt-4.1-nano\": {\n \"provider\": \"openai\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 1047576,\n \"max_output_tokens\": 32768,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"structured output incompatible with parallel_tool_calls \\u2014 must set parallel_tool_calls=false\",\n \"instruction drift in long conversations\"\n ],\n \"strengths\": [\n \"speed\",\n \"cost\",\n \"1m_context\"\n ],\n \"weaknesses\": [\n \"complex_reasoning\",\n \"nuance\"\n ],\n \"cost_input_per_1m\": 0.1,\n \"cost_output_per_1m\": 0.4,\n \"step_limit_default\": 6,\n \"notes\": \"OpenAI cheapest. 1M context. Simple tasks only.\"\n },\n \"gpt-4o\": {\n \"provider\": \"openai\",\n \"status\": \"legacy\",\n \"max_tools\": 128,\n \"max_context_tokens\": 128000,\n \"max_output_tokens\": 16384,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"structured output incompatible with parallel_tool_calls \\u2014 must set parallel_tool_calls=false\",\n \"structured output truncates silently at max_tokens\",\n \"safety refusal returns 'refusal' field instead of schema-conforming object\",\n \"hallucination rate higher than expected at temperature=0\"\n ],\n \"strengths\": [\n \"all_rounder\",\n \"structured_output\",\n \"tool_use\",\n \"reliability\"\n ],\n \"weaknesses\": [\n \"legacy\",\n \"128k_context_limit\"\n ],\n \"cost_input_per_1m\": 2.5,\n \"cost_output_per_1m\": 10.0,\n \"step_limit_default\": 10,\n \"notes\": \"Legacy \\u2014 use gpt-4.1 instead. Better quality, cheaper, 1M context.\"\n },\n \"gpt-4o-mini\": {\n \"provider\": \"openai\",\n \"status\": \"legacy\",\n \"max_tools\": 128,\n \"max_context_tokens\": 128000,\n \"max_output_tokens\": 16384,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"structured output incompatible with parallel_tool_calls \\u2014 must set parallel_tool_calls=false\",\n \"instruction drift in long conversations \\u2014 worse than gpt-4o\"\n ],\n \"strengths\": [\n \"speed\",\n \"cost\",\n \"simple_tasks\"\n ],\n \"weaknesses\": [\n \"legacy\",\n \"complex_reasoning\",\n \"128k_context_limit\"\n ],\n \"cost_input_per_1m\": 0.15,\n \"cost_output_per_1m\": 0.6,\n \"step_limit_default\": 6,\n \"notes\": \"Legacy \\u2014 use gpt-4.1-mini or gpt-4.1-nano instead.\"\n },\n \"o3\": {\n \"provider\": \"openai\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 200000,\n \"max_output_tokens\": 100000,\n \"parallel_tool_calls\": false,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"no_system_with_developer\"\n ],\n \"known_failures\": [\n \"reasoning tokens unpredictable and unbounded \\u2014 single call cost variance 10-50x\",\n \"parallel_tool_calls=true errors or silently ignored \\u2014 always set false\",\n \"reasoning summaries unreliable \\u2014 omitted >90% of cases\",\n \"abandons hard tasks mid-work \\u2014 non-deterministic behavioral pattern\",\n \"reasoning tokens discarded between turns \\u2014 cannot reference prior reasoning chain\",\n \"hallucinates tool invocations for tools not in schema\"\n ],\n \"strengths\": [\n \"reasoning\",\n \"complex_analysis\",\n \"code\",\n \"math\"\n ],\n \"weaknesses\": [\n \"cost_variance\",\n \"sequential_tools\",\n \"unpredictable_latency\"\n ],\n \"cost_input_per_1m\": 2.0,\n \"cost_output_per_1m\": 8.0,\n \"step_limit_default\": 8,\n \"notes\": \"Frontier reasoning. Cost is unpredictable due to reasoning tokens. Budget with max_completion_tokens. reasoning_effort param controls cost/quality tradeoff.\"\n },\n \"gemini-2.5-flash\": {\n \"provider\": \"google\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 1048576,\n \"max_output_tokens\": 65535,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"ban_cot_phrases\",\n \"hard_word_limit\",\n \"explicit_format\",\n \"disable_thinking_for_short_output\"\n ],\n \"known_failures\": [\n \"thinking tokens consume maxOutputTokens \\u2014 empty response if budget exhausted by reasoning\",\n \"MALFORMED_FUNCTION_CALL maps to 'stop' in LiteLLM \\u2014 silent failure in agentic loops\",\n \"empty response after tool call result submission \\u2014 reproducible for specific inputs\",\n \"parallel tool call parsing broken in streaming \\u2014 SDKs only parse first functionCall in parts array\",\n \"deeply nested or large schemas rejected at API level\",\n \"unsupported JSON Schema properties silently ignored \\u2014 partial silent failure\",\n \"quality degrades significantly with large documents or high-context prompts\",\n \"10-20 tools recommended despite 128 hard limit \\u2014 reliability drops above 20\"\n ],\n \"strengths\": [\n \"speed\",\n \"volume\",\n \"classification\",\n \"parallel_tool_calls\",\n \"1m_context\",\n \"cost\"\n ],\n \"weaknesses\": [\n \"complex_schemas\",\n \"large_tool_sets_unreliable\",\n \"thinking_token_drain\",\n \"streaming_tool_parsing\"\n ],\n \"cost_input_per_1m\": 0.3,\n \"cost_output_per_1m\": 2.5,\n \"step_limit_default\": 6,\n \"notes\": \"Fast and cheap with 1M context. Thinking ON by default \\u2014 set thinkingBudget=0 for short outputs or max_tokens>=1024. Tool reliability drops above 10-20 tools despite 128 limit. The primary silent failure model in the pool.\"\n },\n \"gemini-2.5-pro\": {\n \"provider\": \"google\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 1048576,\n \"max_output_tokens\": 65535,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"disable_thinking_for_short_output\"\n ],\n \"known_failures\": [\n \"thinking tokens consume maxOutputTokens\",\n \"MALFORMED_FUNCTION_CALL maps to 'stop' in LiteLLM\",\n \"pricing doubles above 200k context tokens\"\n ],\n \"strengths\": [\n \"reasoning\",\n \"1m_context\",\n \"structured_output\",\n \"tool_use\"\n ],\n \"weaknesses\": [\n \"pricing_above_200k\"\n ],\n \"cost_input_per_1m\": 1.25,\n \"cost_output_per_1m\": 10.0,\n \"step_limit_default\": 10,\n \"notes\": \"Google stable frontier. 1M context. Pricing doubles above 200k tokens \\u2014 budget carefully for long-context use.\"\n },\n \"deepseek-v4\": {\n \"provider\": \"deepseek\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 1000000,\n \"max_output_tokens\": 64000,\n \"parallel_tool_calls\": false,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"explicit_format\"\n ],\n \"known_failures\": [\n \"uptime is primary production risk \\u2014 documented multi-day outages\",\n \"503 server overloaded without backpressure \\u2014 no clean 429s, just silent slow responses\",\n \"tool call JSON occasionally malformed \\u2014 parse errors must be handled explicitly\"\n ],\n \"strengths\": [\n \"cost\",\n \"reasoning\",\n \"1m_context\",\n \"coding\"\n ],\n \"weaknesses\": [\n \"uptime\",\n \"sequential_tool_calls\",\n \"no_backpressure\"\n ],\n \"cost_input_per_1m\": 0.3,\n \"cost_output_per_1m\": 0.5,\n \"step_limit_default\": 8,\n \"notes\": \"Extraordinary value \\u2014 frontier quality at 10-50x cheaper than Western providers. 1M context. Uptime is the risk.\"\n },\n \"deepseek-chat\": {\n \"provider\": \"deepseek\",\n \"status\": \"legacy\",\n \"max_tools\": 128,\n \"max_context_tokens\": 128000,\n \"max_output_tokens\": 8000,\n \"parallel_tool_calls\": false,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"explicit_format\"\n ],\n \"known_failures\": [\n \"uptime is primary production risk \\u2014 documented multi-day outages\",\n \"503 server overloaded errors \\u2014 streaming stalls mid-response without error\",\n \"tool call JSON occasionally malformed\",\n \"7-8 sequential tool calls \\u2014 5x latency vs parallel models for same task\"\n ],\n \"strengths\": [\n \"cost\",\n \"general_reasoning\"\n ],\n \"weaknesses\": [\n \"sequential_tool_calls\",\n \"latency_with_tools\",\n \"uptime\",\n \"legacy\"\n ],\n \"cost_input_per_1m\": 0.28,\n \"cost_output_per_1m\": 0.42,\n \"step_limit_default\": 6,\n \"notes\": \"Legacy V3.2. Use deepseek-v4 instead. 10x cheaper cache hits ($0.028/1M). Sequential tool calls only.\"\n },\n \"deepseek-reasoner\": {\n \"provider\": \"deepseek\",\n \"status\": \"legacy\",\n \"max_tools\": 128,\n \"max_context_tokens\": 128000,\n \"max_output_tokens\": 64000,\n \"parallel_tool_calls\": false,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"explicit_format\"\n ],\n \"known_failures\": [\n \"uptime is primary production risk\",\n \"tool calling added in V3.2 \\u2014 older integrations may not support it\",\n \"the V3.2-Speciale variant does NOT support tool calling \\u2014 do not confuse\"\n ],\n \"strengths\": [\n \"reasoning\",\n \"cost_effective_reasoning\"\n ],\n \"weaknesses\": [\n \"sequential_tool_calls\",\n \"latency\",\n \"uptime\",\n \"legacy\"\n ],\n \"cost_input_per_1m\": 0.28,\n \"cost_output_per_1m\": 0.42,\n \"step_limit_default\": 8,\n \"notes\": \"Legacy V3.2 reasoning mode. Same pricing as deepseek-chat in V3.2. Use deepseek-v4 instead.\"\n },\n \"mistral-small\": {\n \"provider\": \"mistral\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 256000,\n \"max_output_tokens\": 16000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"reinforce_json_in_prompt\"\n ],\n \"known_failures\": [\n \"json_object mode only 64% reliable on complex schemas \\u2014 must use json_schema strict mode\",\n \"must explicitly instruct JSON output in prompt even when using JSON mode\",\n \"max output governed by context remainder \\u2014 250k input leaves only 6k for output\"\n ],\n \"strengths\": [\n \"speed\",\n \"cost\",\n \"european_hosting\",\n \"256k_context\",\n \"structured_output_strict\"\n ],\n \"weaknesses\": [\n \"complex_reasoning\",\n \"json_object_unreliable\"\n ],\n \"cost_input_per_1m\": 0.2,\n \"cost_output_per_1m\": 0.6,\n \"step_limit_default\": 6,\n \"notes\": \"Mistral Small 4 (2603). 256k context. European hosting. Always use json_schema strict mode, never json_object.\"\n },\n \"mistral-large\": {\n \"provider\": \"mistral\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 256000,\n \"max_output_tokens\": 16000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [\n \"reinforce_json_in_prompt\"\n ],\n \"known_failures\": [\n \"json_object mode only 64% reliable \\u2014 must use json_schema strict mode\",\n \"must explicitly instruct JSON output in prompt even when using JSON mode\"\n ],\n \"strengths\": [\n \"reasoning\",\n \"european_hosting\",\n \"structured_output\",\n \"256k_context\"\n ],\n \"weaknesses\": [\n \"cost_vs_gpt41\"\n ],\n \"cost_input_per_1m\": 2.0,\n \"cost_output_per_1m\": 6.0,\n \"step_limit_default\": 10,\n \"notes\": \"Mistral Large 3 (2512). 256k context. European hosting. Strong reasoning.\"\n },\n \"grok-3\": {\n \"provider\": \"xai\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 131072,\n \"max_output_tokens\": 16000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"function call tags leak into thinking blocks \\u2014 breaks tool-call parsers\",\n \"enters repetitive tool-call generation loops \\u2014 add hard ceiling on iterations\",\n \"reasoning_effort param NOT supported \\u2014 only on grok-3-mini\",\n \"100k TPM cap per customer \\u2014 lower than comparable models\"\n ],\n \"strengths\": [\n \"reasoning\",\n \"speed\"\n ],\n \"weaknesses\": [\n \"tool_call_tag_leaks\",\n \"tool_loop_risk\",\n \"tpm_cap\"\n ],\n \"cost_input_per_1m\": 3.0,\n \"cost_output_per_1m\": 15.0,\n \"step_limit_default\": 10,\n \"notes\": \"Strong reasoning. Young API with thinner operational track record. Watch for tool-call parsing issues.\"\n },\n \"grok-3-mini\": {\n \"provider\": \"xai\",\n \"status\": \"current\",\n \"max_tools\": 128,\n \"max_context_tokens\": 131072,\n \"max_output_tokens\": 16000,\n \"parallel_tool_calls\": true,\n \"output_modes\": [\n \"generateText\",\n \"generateObject\"\n ],\n \"prompt_rules\": [],\n \"known_failures\": [\n \"function call tags leak into thinking blocks\",\n \"enters repetitive tool-call generation loops\",\n \"100k TPM cap per customer\"\n ],\n \"strengths\": [\n \"speed\",\n \"cost\",\n \"medium_reasoning\",\n \"reasoning_effort_control\"\n ],\n \"weaknesses\": [\n \"tool_call_tag_leaks\",\n \"tool_loop_risk\",\n \"tpm_cap\"\n ],\n \"cost_input_per_1m\": 0.3,\n \"cost_output_per_1m\": 0.5,\n \"step_limit_default\": 8,\n \"notes\": \"Fast, cheap. reasoning_effort param (low/high) controls reasoning token budget. Full CoT exposed in every response.\"\n }\n}","/**\n * Model profiles — bundled at build time from profiles/models.yaml.\n *\n * Adding a model = adding an entry to profiles.json + npm version bump.\n */\n\nimport type { ModelProfile } from './types';\nimport rawProfiles from '../profiles.json';\n\nconst _profiles: Record<string, ModelProfile> = {};\n\n// Parse raw JSON into typed profiles\nfor (const [id, raw] of Object.entries(rawProfiles as Record<string, Record<string, unknown>>)) {\n _profiles[id] = {\n id,\n provider: raw.provider as string,\n status: raw.status as ModelProfile['status'],\n max_tools: raw.max_tools as number,\n max_context_tokens: raw.max_context_tokens as number,\n max_output_tokens: raw.max_output_tokens as number,\n parallel_tool_calls: raw.parallel_tool_calls as boolean,\n output_modes: (raw.output_modes as string[]) as ModelProfile['output_modes'],\n prompt_rules: (raw.prompt_rules as string[]) ?? [],\n known_failures: (raw.known_failures as string[]) ?? [],\n strengths: (raw.strengths as string[]) ?? [],\n weaknesses: (raw.weaknesses as string[]) ?? [],\n cost_input_per_1m: raw.cost_input_per_1m as number,\n cost_output_per_1m: raw.cost_output_per_1m as number,\n step_limit_default: (raw.step_limit_default as number) ?? 10,\n notes: (raw.notes as string) ?? '',\n };\n}\n\n/** All loaded model profiles, keyed by model ID. */\nexport const PROFILES: Readonly<Record<string, ModelProfile>> = _profiles;\n\n/** Get a single profile by ID. Throws if not found. */\nexport function getProfile(modelId: string): ModelProfile {\n const p = _profiles[modelId];\n if (!p) {\n const available = Object.keys(_profiles).sort().join(', ');\n throw new Error(`Unknown model '${modelId}'. Available: ${available}`);\n }\n return p;\n}\n\n/** Get all profiles with status \"current\". */\nexport function getCurrentProfiles(): Record<string, ModelProfile> {\n return Object.fromEntries(\n Object.entries(_profiles).filter(([, p]) => p.status === 'current')\n );\n}\n\n/** Get profiles filtered by provider. */\nexport function getProfilesByProvider(provider: string): Record<string, ModelProfile> {\n return Object.fromEntries(\n Object.entries(_profiles).filter(([, p]) => p.provider === provider)\n );\n}\n","/**\n * Per-tool execution policies — advises consumers on parallel safety.\n *\n * These tools cause side effects when called concurrently.\n * The adapter advises; the consumer enforces.\n *\n * Source: tt-intelligence production data (61 fixtures, 50+ sessions).\n */\n\nimport type { ToolDefinition, ToolPolicy } from './types';\n\n/** Tools that must NOT run in parallel. */\nexport const SERIAL_TOOLS: Record<string, string> = {\n update_dashboard: 'UI state mutation — parallel calls cause stuttering and duplicate filter applications',\n suggest_next_moves: 'Epilogue tool — max 1 per response, generates suggestion pills',\n start_hunt: 'Session-level action — max 1 per response, creates a hunt mission',\n propose_strategy: 'Advisory tool — max 1 per response, generates strategy cards',\n start_wave_run: 'Orchestration action — max 1 per response, launches autonomous wave',\n};\n\n/** Compute tool policies for a set of tools. Only returns policies for serial-only tools. */\nexport function computeToolPolicies(tools: ToolDefinition[]): ToolPolicy[] {\n const policies: ToolPolicy[] = [];\n\n for (const tool of tools) {\n const reason = SERIAL_TOOLS[tool.name];\n if (reason) {\n policies.push({\n name: tool.name,\n parallelSafe: false,\n maxPerResponse: 1,\n reason,\n });\n }\n }\n\n return policies;\n}\n\n/** Detect parallelism violations in a list of called tools. */\nexport function detectParallelismWarnings(toolsCalled: string[]): string[] {\n if (!toolsCalled.length) return [];\n\n const counts: Record<string, number> = {};\n for (const name of toolsCalled) {\n counts[name] = (counts[name] || 0) + 1;\n }\n\n const warnings: string[] = [];\n for (const [name, count] of Object.entries(counts)) {\n if (name in SERIAL_TOOLS && count > 1) {\n warnings.push(`${name} called ${count}x (should be max 1): ${SERIAL_TOOLS[name]}`);\n }\n }\n\n return warnings;\n}\n","/**\n * Request Adapter — transforms raw requests so the target model handles them correctly.\n *\n * Port of gateway/adapter.py. Pure transform, no IO, no state.\n *\n * Three stages applied in order:\n * 1. Unified token budget (tools + system prompt + messages share one context window)\n * 2. Prompt rewriting (composable rules from model profile)\n * 3. Output strategy selection (generateObject vs generateText)\n */\n\nimport { countTokens, estimateToolTokens, estimateMessagesTokens } from './tokenizer';\nimport { getProfile } from './profiles';\nimport { computeToolPolicies } from './policies';\nimport type {\n ModelProfile,\n Message,\n ToolDefinition,\n AdapterConstraints,\n PrepareInput,\n PrepareResult,\n} from './types';\n\n/** Safety margin: use 90% of max_context_tokens. */\nconst CONTEXT_SAFETY_FACTOR = 0.90;\n\n/** Truncation marker appended when messages are cut. */\nconst TRUNCATION_MARKER = '\\n[...earlier messages truncated to fit context window]';\n\n// ============================================================================\n// STAGE 1: Unified Token Budget\n// ============================================================================\n\nfunction selectTools(\n tools: ToolDefinition[],\n tokenBudget: number,\n profile: ModelProfile,\n constraints: AdapterConstraints,\n): ToolDefinition[] {\n if (!tools.length) return tools;\n\n const maxTools = profile.max_tools;\n let candidates: ToolDefinition[];\n\n if (constraints.relevanceHints) {\n // Sort by relevance score descending\n const scored = tools.map(t => ({\n score: constraints.relevanceHints![t.name] ?? 0,\n tool: t,\n }));\n scored.sort((a, b) => b.score - a.score);\n candidates = scored.map(s => s.tool);\n } else {\n // Deterministic shuffle seeded by tool names (avoid alphabetical bias)\n const seed = tools.map(t => t.name).join(',');\n candidates = [...tools];\n // Simple deterministic shuffle using string hash\n let hash = 0;\n for (let i = 0; i < seed.length; i++) {\n hash = ((hash << 5) - hash + seed.charCodeAt(i)) | 0;\n }\n for (let i = candidates.length - 1; i > 0; i--) {\n hash = ((hash << 5) - hash + i) | 0;\n const j = Math.abs(hash) % (i + 1);\n [candidates[i], candidates[j]] = [candidates[j], candidates[i]];\n }\n }\n\n // Greedily add tools until budget or max_tools exhausted\n const selected: ToolDefinition[] = [];\n let tokensUsed = 0;\n\n for (const tool of candidates) {\n if (selected.length >= maxTools) break;\n\n let toolTokens: number;\n try {\n toolTokens = countTokens(JSON.stringify(tool));\n } catch {\n toolTokens = 350;\n }\n\n if (tokensUsed + toolTokens > tokenBudget) break;\n selected.push(tool);\n tokensUsed += toolTokens;\n }\n\n return selected;\n}\n\nfunction truncateMessages(messages: Message[], tokenBudget: number): Message[] {\n if (!messages.length) return messages;\n\n // Always keep the last message\n const last = messages[messages.length - 1];\n const lastTokens = countTokens(last.content) + 4;\n\n if (lastTokens >= tokenBudget) {\n // Even the last message is too long — hard truncate\n const markerTokens = countTokens(TRUNCATION_MARKER);\n const maxContentTokens = tokenBudget - 4 - markerTokens;\n if (maxContentTokens > 0) {\n // Rough char-based truncation (token-perfect would need encode/decode)\n const maxChars = maxContentTokens * 4;\n const truncated = last.content.slice(0, maxChars) + TRUNCATION_MARKER;\n return [{ role: last.role, content: truncated }];\n }\n return [last];\n }\n\n // Walk backwards, adding messages until budget exhausted\n const result: Message[] = [];\n let remaining = tokenBudget;\n\n for (let i = messages.length - 1; i >= 0; i--) {\n const msgTokens = countTokens(messages[i].content) + 4;\n if (remaining - msgTokens < 0 && result.length > 0) break;\n result.unshift(messages[i]);\n remaining -= msgTokens;\n }\n\n if (result.length < messages.length) {\n result[0] = {\n role: result[0].role,\n content: TRUNCATION_MARKER + '\\n' + result[0].content,\n };\n }\n\n return result;\n}\n\nfunction budgetTokens(\n systemPrompt: string,\n messages: Message[],\n tools: ToolDefinition[],\n profile: ModelProfile,\n constraints: AdapterConstraints,\n): { selectedTools: ToolDefinition[]; trimmedMessages: Message[]; tokensEstimated: number } {\n const budget = Math.floor(profile.max_context_tokens * CONTEXT_SAFETY_FACTOR);\n const systemTokens = countTokens(systemPrompt);\n\n if (systemTokens > budget) {\n throw new Error(\n `System prompt (${systemTokens} tokens) exceeds ${profile.id}'s context budget ` +\n `(${budget} tokens at ${Math.round(CONTEXT_SAFETY_FACTOR * 100)}% of ${profile.max_context_tokens}). ` +\n `Reduce the system prompt or use a model with a larger context window.`\n );\n }\n\n const toolTokens = estimateToolTokens(tools);\n const messageTokens = estimateMessagesTokens(messages);\n const total = systemTokens + toolTokens + messageTokens;\n\n // Everything fits\n if (total <= budget) {\n return { selectedTools: tools, trimmedMessages: messages, tokensEstimated: total };\n }\n\n // Over budget — reduce tools first\n let selected = tools;\n let selectedToolTokens = toolTokens;\n const remaining = budget - systemTokens;\n\n if (toolTokens > 0 && (toolTokens + systemTokens) > remaining * 0.5) {\n selected = selectTools(tools, Math.floor(remaining / 2), profile, constraints);\n selectedToolTokens = estimateToolTokens(selected);\n }\n\n const remainingForMessages = budget - systemTokens - selectedToolTokens;\n\n // Truncate messages if still over\n let trimmed = messages;\n let trimmedMessageTokens = messageTokens;\n if (messageTokens > remainingForMessages) {\n trimmed = truncateMessages(messages, remainingForMessages);\n trimmedMessageTokens = estimateMessagesTokens(trimmed);\n }\n\n return {\n selectedTools: selected,\n trimmedMessages: trimmed,\n tokensEstimated: systemTokens + selectedToolTokens + trimmedMessageTokens,\n };\n}\n\n// ============================================================================\n// STAGE 2: Prompt Rewriting\n// ============================================================================\n\nconst COT_BAN =\n '\\n\\nCRITICAL: NEVER use phrases like \\'I should\\', \\'Let me\\', \\'However\\', ' +\n '\\'I need to\\', \\'First, I will\\', \\'Let me think\\'. ' +\n 'Respond directly without narrating your thought process.';\n\nconst EXPLICIT_FORMAT =\n '\\n\\nIMPORTANT: If the expected output is JSON, respond with ONLY valid JSON. ' +\n 'No markdown code fences. No explanation before or after. Just the JSON object.';\n\nconst NO_MARKDOWN_HEADERS = '\\n\\nDo not use markdown headers (# ## ###) in your response.';\n\nfunction applyPromptRules(\n systemPrompt: string,\n profile: ModelProfile,\n constraints: AdapterConstraints,\n): { adaptedPrompt: string; rulesApplied: string[] } {\n const rulesApplied: string[] = [];\n let prompt = systemPrompt;\n\n for (const rule of profile.prompt_rules) {\n switch (rule) {\n case 'ban_cot_phrases':\n prompt += COT_BAN;\n rulesApplied.push('ban_cot_phrases');\n break;\n\n case 'hard_word_limit':\n if (constraints.maxResponseWords) {\n prompt += `\\n\\nResponse MUST be under ${constraints.maxResponseWords} words. Be concise.`;\n rulesApplied.push(`hard_word_limit(${constraints.maxResponseWords})`);\n }\n break;\n\n case 'explicit_format':\n prompt += EXPLICIT_FORMAT;\n rulesApplied.push('explicit_format');\n break;\n\n case 'no_markdown_headers':\n prompt += NO_MARKDOWN_HEADERS;\n rulesApplied.push('no_markdown_headers');\n break;\n\n case 'force_json_mode':\n rulesApplied.push('force_json_mode');\n break;\n\n case 'disable_thinking_for_short_output':\n rulesApplied.push('disable_thinking_for_short_output');\n break;\n\n case 'reinforce_json_in_prompt':\n prompt += '\\n\\nYou MUST respond with valid JSON. Follow the exact schema specified. ' +\n 'No additional text, no explanations, just the JSON object.';\n rulesApplied.push('reinforce_json_in_prompt');\n break;\n\n case 'no_system_with_developer':\n rulesApplied.push('no_system_with_developer');\n break;\n\n default:\n // Unknown rule — skip silently\n break;\n }\n }\n\n return { adaptedPrompt: prompt, rulesApplied };\n}\n\n// ============================================================================\n// STAGE 3: Output Strategy\n// ============================================================================\n\nfunction selectOutputStrategy(profile: ModelProfile, constraints: AdapterConstraints): 'generateText' | 'generateObject' {\n if (!constraints.structuredOutput) return 'generateText';\n if (profile.output_modes.includes('generateObject')) return 'generateObject';\n return 'generateText';\n}\n\n// ============================================================================\n// ORCHESTRATOR\n// ============================================================================\n\n/** Generate a UUID v4. */\nfunction uuid(): string {\n return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, c => {\n const r = (Math.random() * 16) | 0;\n const v = c === 'x' ? r : (r & 0x3) | 0x8;\n return v.toString(16);\n });\n}\n\n/**\n * Prepare a request for a specific model.\n *\n * Pure function. Takes the raw request and returns an adapted request\n * with budgeted tools, rewritten prompt, and output strategy.\n *\n * Throws Error if the system prompt exceeds the model's context budget.\n */\nexport function prepare(input: PrepareInput): PrepareResult {\n const profile = getProfile(input.model);\n const constraints: AdapterConstraints = input.constraints ?? {};\n const tools = input.tools ?? [];\n const originalToolCount = tools.length;\n\n // Stage 1: Unified token budget\n const { selectedTools, trimmedMessages, tokensEstimated: baseTokens } = budgetTokens(\n input.systemPrompt, input.messages, tools, profile, constraints,\n );\n\n // Stage 2: Prompt rewriting\n const { adaptedPrompt, rulesApplied } = applyPromptRules(\n input.systemPrompt, profile, constraints,\n );\n\n // Recalculate tokens after prompt rewriting\n let tokensEstimated = baseTokens;\n if (rulesApplied.length > 0) {\n const promptDelta = countTokens(adaptedPrompt) - countTokens(input.systemPrompt);\n tokensEstimated += promptDelta;\n }\n\n // Stage 3: Output strategy\n const outputStrategy = selectOutputStrategy(profile, constraints);\n\n // Tool policies\n const toolPolicies = computeToolPolicies(selectedTools);\n\n return {\n requestId: uuid(),\n model: profile.id,\n provider: profile.provider,\n systemPrompt: adaptedPrompt,\n messages: trimmedMessages,\n tools: selectedTools,\n outputStrategy,\n promptRulesApplied: rulesApplied,\n tokensEstimated,\n contextBudget: profile.max_context_tokens,\n toolsOriginalCount: originalToolCount,\n toolsSelectedCount: selectedTools.length,\n toolPolicies: toolPolicies.length > 0 ? toolPolicies : null,\n };\n}\n","/**\n * Outcome logger — diagnostics for every model call.\n *\n * Three destinations:\n * - console (default): human-readable log line\n * - http: POST to a centralized endpoint (fire-and-forget)\n * - silent: no output (for tests)\n *\n * Every log() call returns instant diagnostics:\n * - efficiency flag (input/output ratio assessment)\n * - text-only detection (tools offered but none used)\n * - parallelism warnings (serial-only tools called multiple times)\n */\n\nimport { detectParallelismWarnings } from './policies';\nimport type { LogInput, LogResult } from './types';\n\n/** Compute efficiency flag from input/output token ratio. */\nfunction computeEfficiencyFlag(tokensIn: number, tokensOut: number): 'healthy' | 'warning' | 'critical' {\n const total = tokensIn + tokensOut;\n if (total === 0) return 'healthy';\n\n const ratio = tokensIn / total;\n if (ratio > 0.95) return 'critical';\n if (ratio > 0.85) return 'warning';\n return 'healthy';\n}\n\n/** Detect text-only responses where the model should have used tools. */\nfunction detectTextOnly(input: LogInput): boolean {\n const toolsOffered = input.toolsOffered ?? 0;\n const toolsUsed = input.toolsUsed ?? 0;\n const emptyResponse = input.emptyResponse ?? false;\n\n // text-only = tools offered, none used, and there IS a response (not empty)\n if (toolsOffered > 0 && toolsUsed === 0 && !emptyResponse) return true;\n\n // Also via tools_called list\n if (toolsOffered > 0 && input.toolsCalled && input.toolsCalled.length === 0 && !emptyResponse) return true;\n\n return false;\n}\n\n/** Compute all diagnostics for a log entry. */\nexport function computeDiagnostics(input: LogInput): LogResult {\n const total = input.tokensIn + input.tokensOut;\n const inputRatio = total > 0 ? Math.round((input.tokensIn / total) * 10000) / 10000 : 0;\n const efficiencyFlag = computeEfficiencyFlag(input.tokensIn, input.tokensOut);\n const textOnly = detectTextOnly(input);\n const parallelismWarnings = input.toolsCalled\n ? detectParallelismWarnings(input.toolsCalled)\n : [];\n\n return {\n logged: true,\n requestId: input.requestId,\n inputRatio,\n efficiencyFlag,\n textOnly,\n parallelismWarnings: parallelismWarnings.length > 0 ? parallelismWarnings : null,\n };\n}\n\n// ── Log destinations ──\n\nexport interface LogDestination {\n write(input: LogInput, result: LogResult): void;\n}\n\nexport class ConsoleDestination implements LogDestination {\n write(input: LogInput, result: LogResult): void {\n const flags: string[] = [result.efficiencyFlag ?? 'unknown'];\n if (result.textOnly) flags.push('text-only');\n if (result.parallelismWarnings) flags.push(`parallel(${result.parallelismWarnings.length})`);\n\n console.log(\n `[kg-auto] ${input.model} | ` +\n `${input.tokensIn}→${input.tokensOut} tokens | ` +\n `${input.latencyMs}ms | ` +\n `${input.success ? 'ok' : 'FAIL'} | ` +\n `${flags.join(', ')}`\n );\n }\n}\n\nexport class HttpDestination implements LogDestination {\n constructor(private url: string) {}\n\n write(input: LogInput, _result: LogResult): void {\n // Fire-and-forget — never block the caller\n fetch(this.url, {\n method: 'POST',\n headers: { 'Content-Type': 'application/json' },\n body: JSON.stringify({\n request_id: input.requestId,\n model: input.model,\n provider: input.provider,\n project: input.project ?? 'default',\n intent: input.intent,\n tools_offered: input.toolsOffered ?? 0,\n tools_selected: input.toolsSelected ?? 0,\n tools_used: input.toolsUsed ?? 0,\n tokens_in: input.tokensIn,\n tokens_out: input.tokensOut,\n latency_ms: input.latencyMs,\n success: input.success,\n empty_response: input.emptyResponse ?? false,\n error_type: input.errorType,\n adapter_rules_applied: input.adapterRulesApplied,\n mode: input.mode,\n tools_called: input.toolsCalled,\n }),\n }).catch(() => {\n // Logging failure is non-critical — the model call already succeeded\n });\n }\n}\n\nexport class SilentDestination implements LogDestination {\n write(): void {\n // No-op\n }\n}\n\n/**\n * Log an outcome and return instant diagnostics.\n *\n * The diagnostics are computed synchronously. The write to the destination\n * is fire-and-forget (may be async for HTTP, but never blocks the caller).\n */\nexport function log(input: LogInput, destination: LogDestination): LogResult {\n const result = computeDiagnostics(input);\n destination.write(input, result);\n return result;\n}\n"],"mappings":";AAaO,IAAM,sBAAsB;AAGnC,IAAI,aAAuC,CAAC,SAC1C,KAAK,IAAI,GAAG,KAAK,KAAK,KAAK,SAAS,GAAG,CAAC;AAUnC,SAAS,aAAa,IAAoC;AAC/D,eAAa;AACf;AAGO,SAAS,YAAY,MAAsB;AAChD,SAAO,WAAW,IAAI;AACxB;AAGO,SAAS,mBAAmB,OAAiC;AAClE,MAAI,QAAQ;AACZ,aAAW,QAAQ,OAAO;AACxB,QAAI;AACF,YAAM,OAAO,KAAK,UAAU,IAAI;AAChC,eAAS,YAAY,IAAI;AAAA,IAC3B,QAAQ;AACN,eAAS;AAAA,IACX;AAAA,EACF;AACA,SAAO;AACT;AAGO,SAAS,uBAAuB,UAA6B;AAClE,MAAI,QAAQ;AACZ,aAAW,OAAO,UAAU;AAC1B,aAAS,YAAY,IAAI,OAAO,IAAI;AAAA,EACtC;AACA,SAAO;AACT;;;ACzDA;AAAA,EACE,mBAAmB;AAAA,IACjB,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,qBAAqB;AAAA,IACnB,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,oBAAoB;AAAA,IAClB,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,mBAAmB;AAAA,IACjB,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,iBAAiB;AAAA,IACf,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,WAAW;AAAA,IACT,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc,CAAC;AAAA,IACf,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,gBAAgB;AAAA,IACd,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,gBAAgB;AAAA,IACd,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,UAAU;AAAA,IACR,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,eAAe;AAAA,IACb,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,IAAM;AAAA,IACJ,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,oBAAoB;AAAA,IAClB,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,kBAAkB;AAAA,IAChB,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,eAAe;AAAA,IACb,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,iBAAiB;AAAA,IACf,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,qBAAqB;AAAA,IACnB,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,iBAAiB;AAAA,IACf,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,iBAAiB;AAAA,IACf,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB;AAAA,MACd;AAAA,IACF;AAAA,IACA,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,UAAU;AAAA,IACR,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AAAA,EACA,eAAe;AAAA,IACb,UAAY;AAAA,IACZ,QAAU;AAAA,IACV,WAAa;AAAA,IACb,oBAAsB;AAAA,IACtB,mBAAqB;AAAA,IACrB,qBAAuB;AAAA,IACvB,cAAgB;AAAA,MACd;AAAA,MACA;AAAA,IACF;AAAA,IACA,cAAgB,CAAC;AAAA,IACjB,gBAAkB;AAAA,MAChB;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,WAAa;AAAA,MACX;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,YAAc;AAAA,MACZ;AAAA,MACA;AAAA,MACA;AAAA,IACF;AAAA,IACA,mBAAqB;AAAA,IACrB,oBAAsB;AAAA,IACtB,oBAAsB;AAAA,IACtB,OAAS;AAAA,EACX;AACF;;;ACtpBA,IAAM,YAA0C,CAAC;AAGjD,WAAW,CAAC,IAAI,GAAG,KAAK,OAAO,QAAQ,gBAAsD,GAAG;AAC9F,YAAU,EAAE,IAAI;AAAA,IACd;AAAA,IACA,UAAU,IAAI;AAAA,IACd,QAAQ,IAAI;AAAA,IACZ,WAAW,IAAI;AAAA,IACf,oBAAoB,IAAI;AAAA,IACxB,mBAAmB,IAAI;AAAA,IACvB,qBAAqB,IAAI;AAAA,IACzB,cAAe,IAAI;AAAA,IACnB,cAAe,IAAI,gBAA6B,CAAC;AAAA,IACjD,gBAAiB,IAAI,kBAA+B,CAAC;AAAA,IACrD,WAAY,IAAI,aAA0B,CAAC;AAAA,IAC3C,YAAa,IAAI,cAA2B,CAAC;AAAA,IAC7C,mBAAmB,IAAI;AAAA,IACvB,oBAAoB,IAAI;AAAA,IACxB,oBAAqB,IAAI,sBAAiC;AAAA,IAC1D,OAAQ,IAAI,SAAoB;AAAA,EAClC;AACF;AAGO,IAAM,WAAmD;AAGzD,SAAS,WAAW,SAA+B;AACxD,QAAM,IAAI,UAAU,OAAO;AAC3B,MAAI,CAAC,GAAG;AACN,UAAM,YAAY,OAAO,KAAK,SAAS,EAAE,KAAK,EAAE,KAAK,IAAI;AACzD,UAAM,IAAI,MAAM,kBAAkB,OAAO,iBAAiB,SAAS,EAAE;AAAA,EACvE;AACA,SAAO;AACT;AAGO,SAAS,qBAAmD;AACjE,SAAO,OAAO;AAAA,IACZ,OAAO,QAAQ,SAAS,EAAE,OAAO,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,WAAW,SAAS;AAAA,EACpE;AACF;AAGO,SAAS,sBAAsB,UAAgD;AACpF,SAAO,OAAO;AAAA,IACZ,OAAO,QAAQ,SAAS,EAAE,OAAO,CAAC,CAAC,EAAE,CAAC,MAAM,EAAE,aAAa,QAAQ;AAAA,EACrE;AACF;;;AC9CO,IAAM,eAAuC;AAAA,EAClD,kBAAkB;AAAA,EAClB,oBAAoB;AAAA,EACpB,YAAY;AAAA,EACZ,kBAAkB;AAAA,EAClB,gBAAgB;AAClB;AAGO,SAAS,oBAAoB,OAAuC;AACzE,QAAM,WAAyB,CAAC;AAEhC,aAAW,QAAQ,OAAO;AACxB,UAAM,SAAS,aAAa,KAAK,IAAI;AACrC,QAAI,QAAQ;AACV,eAAS,KAAK;AAAA,QACZ,MAAM,KAAK;AAAA,QACX,cAAc;AAAA,QACd,gBAAgB;AAAA,QAChB;AAAA,MACF,CAAC;AAAA,IACH;AAAA,EACF;AAEA,SAAO;AACT;AAGO,SAAS,0BAA0B,aAAiC;AACzE,MAAI,CAAC,YAAY,OAAQ,QAAO,CAAC;AAEjC,QAAM,SAAiC,CAAC;AACxC,aAAW,QAAQ,aAAa;AAC9B,WAAO,IAAI,KAAK,OAAO,IAAI,KAAK,KAAK;AAAA,EACvC;AAEA,QAAM,WAAqB,CAAC;AAC5B,aAAW,CAAC,MAAM,KAAK,KAAK,OAAO,QAAQ,MAAM,GAAG;AAClD,QAAI,QAAQ,gBAAgB,QAAQ,GAAG;AACrC,eAAS,KAAK,GAAG,IAAI,WAAW,KAAK,wBAAwB,aAAa,IAAI,CAAC,EAAE;AAAA,IACnF;AAAA,EACF;AAEA,SAAO;AACT;;;AChCA,IAAM,wBAAwB;AAG9B,IAAM,oBAAoB;AAM1B,SAAS,YACP,OACA,aACA,SACA,aACkB;AAClB,MAAI,CAAC,MAAM,OAAQ,QAAO;AAE1B,QAAM,WAAW,QAAQ;AACzB,MAAI;AAEJ,MAAI,YAAY,gBAAgB;AAE9B,UAAM,SAAS,MAAM,IAAI,QAAM;AAAA,MAC7B,OAAO,YAAY,eAAgB,EAAE,IAAI,KAAK;AAAA,MAC9C,MAAM;AAAA,IACR,EAAE;AACF,WAAO,KAAK,CAAC,GAAG,MAAM,EAAE,QAAQ,EAAE,KAAK;AACvC,iBAAa,OAAO,IAAI,OAAK,EAAE,IAAI;AAAA,EACrC,OAAO;AAEL,UAAM,OAAO,MAAM,IAAI,OAAK,EAAE,IAAI,EAAE,KAAK,GAAG;AAC5C,iBAAa,CAAC,GAAG,KAAK;AAEtB,QAAI,OAAO;AACX,aAAS,IAAI,GAAG,IAAI,KAAK,QAAQ,KAAK;AACpC,cAAS,QAAQ,KAAK,OAAO,KAAK,WAAW,CAAC,IAAK;AAAA,IACrD;AACA,aAAS,IAAI,WAAW,SAAS,GAAG,IAAI,GAAG,KAAK;AAC9C,cAAS,QAAQ,KAAK,OAAO,IAAK;AAClC,YAAM,IAAI,KAAK,IAAI,IAAI,KAAK,IAAI;AAChC,OAAC,WAAW,CAAC,GAAG,WAAW,CAAC,CAAC,IAAI,CAAC,WAAW,CAAC,GAAG,WAAW,CAAC,CAAC;AAAA,IAChE;AAAA,EACF;AAGA,QAAM,WAA6B,CAAC;AACpC,MAAI,aAAa;AAEjB,aAAW,QAAQ,YAAY;AAC7B,QAAI,SAAS,UAAU,SAAU;AAEjC,QAAI;AACJ,QAAI;AACF,mBAAa,YAAY,KAAK,UAAU,IAAI,CAAC;AAAA,IAC/C,QAAQ;AACN,mBAAa;AAAA,IACf;AAEA,QAAI,aAAa,aAAa,YAAa;AAC3C,aAAS,KAAK,IAAI;AAClB,kBAAc;AAAA,EAChB;AAEA,SAAO;AACT;AAEA,SAAS,iBAAiB,UAAqB,aAAgC;AAC7E,MAAI,CAAC,SAAS,OAAQ,QAAO;AAG7B,QAAM,OAAO,SAAS,SAAS,SAAS,CAAC;AACzC,QAAM,aAAa,YAAY,KAAK,OAAO,IAAI;AAE/C,MAAI,cAAc,aAAa;AAE7B,UAAM,eAAe,YAAY,iBAAiB;AAClD,UAAM,mBAAmB,cAAc,IAAI;AAC3C,QAAI,mBAAmB,GAAG;AAExB,YAAM,WAAW,mBAAmB;AACpC,YAAM,YAAY,KAAK,QAAQ,MAAM,GAAG,QAAQ,IAAI;AACpD,aAAO,CAAC,EAAE,MAAM,KAAK,MAAM,SAAS,UAAU,CAAC;AAAA,IACjD;AACA,WAAO,CAAC,IAAI;AAAA,EACd;AAGA,QAAM,SAAoB,CAAC;AAC3B,MAAI,YAAY;AAEhB,WAAS,IAAI,SAAS,SAAS,GAAG,KAAK,GAAG,KAAK;AAC7C,UAAM,YAAY,YAAY,SAAS,CAAC,EAAE,OAAO,IAAI;AACrD,QAAI,YAAY,YAAY,KAAK,OAAO,SAAS,EAAG;AACpD,WAAO,QAAQ,SAAS,CAAC,CAAC;AAC1B,iBAAa;AAAA,EACf;AAEA,MAAI,OAAO,SAAS,SAAS,QAAQ;AACnC,WAAO,CAAC,IAAI;AAAA,MACV,MAAM,OAAO,CAAC,EAAE;AAAA,MAChB,SAAS,oBAAoB,OAAO,OAAO,CAAC,EAAE;AAAA,IAChD;AAAA,EACF;AAEA,SAAO;AACT;AAEA,SAAS,aACP,cACA,UACA,OACA,SACA,aAC0F;AAC1F,QAAM,SAAS,KAAK,MAAM,QAAQ,qBAAqB,qBAAqB;AAC5E,QAAM,eAAe,YAAY,YAAY;AAE7C,MAAI,eAAe,QAAQ;AACzB,UAAM,IAAI;AAAA,MACR,kBAAkB,YAAY,oBAAoB,QAAQ,EAAE,sBACxD,MAAM,cAAc,KAAK,MAAM,wBAAwB,GAAG,CAAC,QAAQ,QAAQ,kBAAkB;AAAA,IAEnG;AAAA,EACF;AAEA,QAAM,aAAa,mBAAmB,KAAK;AAC3C,QAAM,gBAAgB,uBAAuB,QAAQ;AACrD,QAAM,QAAQ,eAAe,aAAa;AAG1C,MAAI,SAAS,QAAQ;AACnB,WAAO,EAAE,eAAe,OAAO,iBAAiB,UAAU,iBAAiB,MAAM;AAAA,EACnF;AAGA,MAAI,WAAW;AACf,MAAI,qBAAqB;AACzB,QAAM,YAAY,SAAS;AAE3B,MAAI,aAAa,KAAM,aAAa,eAAgB,YAAY,KAAK;AACnE,eAAW,YAAY,OAAO,KAAK,MAAM,YAAY,CAAC,GAAG,SAAS,WAAW;AAC7E,yBAAqB,mBAAmB,QAAQ;AAAA,EAClD;AAEA,QAAM,uBAAuB,SAAS,eAAe;AAGrD,MAAI,UAAU;AACd,MAAI,uBAAuB;AAC3B,MAAI,gBAAgB,sBAAsB;AACxC,cAAU,iBAAiB,UAAU,oBAAoB;AACzD,2BAAuB,uBAAuB,OAAO;AAAA,EACvD;AAEA,SAAO;AAAA,IACL,eAAe;AAAA,IACf,iBAAiB;AAAA,IACjB,iBAAiB,eAAe,qBAAqB;AAAA,EACvD;AACF;AAMA,IAAM,UACJ;AAIF,IAAM,kBACJ;AAGF,IAAM,sBAAsB;AAE5B,SAAS,iBACP,cACA,SACA,aACmD;AACnD,QAAM,eAAyB,CAAC;AAChC,MAAI,SAAS;AAEb,aAAW,QAAQ,QAAQ,cAAc;AACvC,YAAQ,MAAM;AAAA,MACZ,KAAK;AACH,kBAAU;AACV,qBAAa,KAAK,iBAAiB;AACnC;AAAA,MAEF,KAAK;AACH,YAAI,YAAY,kBAAkB;AAChC,oBAAU;AAAA;AAAA,yBAA8B,YAAY,gBAAgB;AACpE,uBAAa,KAAK,mBAAmB,YAAY,gBAAgB,GAAG;AAAA,QACtE;AACA;AAAA,MAEF,KAAK;AACH,kBAAU;AACV,qBAAa,KAAK,iBAAiB;AACnC;AAAA,MAEF,KAAK;AACH,kBAAU;AACV,qBAAa,KAAK,qBAAqB;AACvC;AAAA,MAEF,KAAK;AACH,qBAAa,KAAK,iBAAiB;AACnC;AAAA,MAEF,KAAK;AACH,qBAAa,KAAK,mCAAmC;AACrD;AAAA,MAEF,KAAK;AACH,kBAAU;AAEV,qBAAa,KAAK,0BAA0B;AAC5C;AAAA,MAEF,KAAK;AACH,qBAAa,KAAK,0BAA0B;AAC5C;AAAA,MAEF;AAEE;AAAA,IACJ;AAAA,EACF;AAEA,SAAO,EAAE,eAAe,QAAQ,aAAa;AAC/C;AAMA,SAAS,qBAAqB,SAAuB,aAAoE;AACvH,MAAI,CAAC,YAAY,iBAAkB,QAAO;AAC1C,MAAI,QAAQ,aAAa,SAAS,gBAAgB,EAAG,QAAO;AAC5D,SAAO;AACT;AAOA,SAAS,OAAe;AACtB,SAAO,uCAAuC,QAAQ,SAAS,OAAK;AAClE,UAAM,IAAK,KAAK,OAAO,IAAI,KAAM;AACjC,UAAM,IAAI,MAAM,MAAM,IAAK,IAAI,IAAO;AACtC,WAAO,EAAE,SAAS,EAAE;AAAA,EACtB,CAAC;AACH;AAUO,SAAS,QAAQ,OAAoC;AAC1D,QAAM,UAAU,WAAW,MAAM,KAAK;AACtC,QAAM,cAAkC,MAAM,eAAe,CAAC;AAC9D,QAAM,QAAQ,MAAM,SAAS,CAAC;AAC9B,QAAM,oBAAoB,MAAM;AAGhC,QAAM,EAAE,eAAe,iBAAiB,iBAAiB,WAAW,IAAI;AAAA,IACtE,MAAM;AAAA,IAAc,MAAM;AAAA,IAAU;AAAA,IAAO;AAAA,IAAS;AAAA,EACtD;AAGA,QAAM,EAAE,eAAe,aAAa,IAAI;AAAA,IACtC,MAAM;AAAA,IAAc;AAAA,IAAS;AAAA,EAC/B;AAGA,MAAI,kBAAkB;AACtB,MAAI,aAAa,SAAS,GAAG;AAC3B,UAAM,cAAc,YAAY,aAAa,IAAI,YAAY,MAAM,YAAY;AAC/E,uBAAmB;AAAA,EACrB;AAGA,QAAM,iBAAiB,qBAAqB,SAAS,WAAW;AAGhE,QAAM,eAAe,oBAAoB,aAAa;AAEtD,SAAO;AAAA,IACL,WAAW,KAAK;AAAA,IAChB,OAAO,QAAQ;AAAA,IACf,UAAU,QAAQ;AAAA,IAClB,cAAc;AAAA,IACd,UAAU;AAAA,IACV,OAAO;AAAA,IACP;AAAA,IACA,oBAAoB;AAAA,IACpB;AAAA,IACA,eAAe,QAAQ;AAAA,IACvB,oBAAoB;AAAA,IACpB,oBAAoB,cAAc;AAAA,IAClC,cAAc,aAAa,SAAS,IAAI,eAAe;AAAA,EACzD;AACF;;;AC5TA,SAAS,sBAAsB,UAAkB,WAAuD;AACtG,QAAM,QAAQ,WAAW;AACzB,MAAI,UAAU,EAAG,QAAO;AAExB,QAAM,QAAQ,WAAW;AACzB,MAAI,QAAQ,KAAM,QAAO;AACzB,MAAI,QAAQ,KAAM,QAAO;AACzB,SAAO;AACT;AAGA,SAAS,eAAe,OAA0B;AAChD,QAAM,eAAe,MAAM,gBAAgB;AAC3C,QAAM,YAAY,MAAM,aAAa;AACrC,QAAM,gBAAgB,MAAM,iBAAiB;AAG7C,MAAI,eAAe,KAAK,cAAc,KAAK,CAAC,cAAe,QAAO;AAGlE,MAAI,eAAe,KAAK,MAAM,eAAe,MAAM,YAAY,WAAW,KAAK,CAAC,cAAe,QAAO;AAEtG,SAAO;AACT;AAGO,SAAS,mBAAmB,OAA4B;AAC7D,QAAM,QAAQ,MAAM,WAAW,MAAM;AACrC,QAAM,aAAa,QAAQ,IAAI,KAAK,MAAO,MAAM,WAAW,QAAS,GAAK,IAAI,MAAQ;AACtF,QAAM,iBAAiB,sBAAsB,MAAM,UAAU,MAAM,SAAS;AAC5E,QAAM,WAAW,eAAe,KAAK;AACrC,QAAM,sBAAsB,MAAM,cAC9B,0BAA0B,MAAM,WAAW,IAC3C,CAAC;AAEL,SAAO;AAAA,IACL,QAAQ;AAAA,IACR,WAAW,MAAM;AAAA,IACjB;AAAA,IACA;AAAA,IACA;AAAA,IACA,qBAAqB,oBAAoB,SAAS,IAAI,sBAAsB;AAAA,EAC9E;AACF;AAQO,IAAM,qBAAN,MAAmD;AAAA,EACxD,MAAM,OAAiB,QAAyB;AAC9C,UAAM,QAAkB,CAAC,OAAO,kBAAkB,SAAS;AAC3D,QAAI,OAAO,SAAU,OAAM,KAAK,WAAW;AAC3C,QAAI,OAAO,oBAAqB,OAAM,KAAK,YAAY,OAAO,oBAAoB,MAAM,GAAG;AAE3F,YAAQ;AAAA,MACN,aAAa,MAAM,KAAK,MACrB,MAAM,QAAQ,SAAI,MAAM,SAAS,aACjC,MAAM,SAAS,QACf,MAAM,UAAU,OAAO,MAAM,MAC7B,MAAM,KAAK,IAAI,CAAC;AAAA,IACrB;AAAA,EACF;AACF;AAEO,IAAM,kBAAN,MAAgD;AAAA,EACrD,YAAoB,KAAa;AAAb;AAAA,EAAc;AAAA,EAElC,MAAM,OAAiB,SAA0B;AAE/C,UAAM,KAAK,KAAK;AAAA,MACd,QAAQ;AAAA,MACR,SAAS,EAAE,gBAAgB,mBAAmB;AAAA,MAC9C,MAAM,KAAK,UAAU;AAAA,QACnB,YAAY,MAAM;AAAA,QAClB,OAAO,MAAM;AAAA,QACb,UAAU,MAAM;AAAA,QAChB,SAAS,MAAM,WAAW;AAAA,QAC1B,QAAQ,MAAM;AAAA,QACd,eAAe,MAAM,gBAAgB;AAAA,QACrC,gBAAgB,MAAM,iBAAiB;AAAA,QACvC,YAAY,MAAM,aAAa;AAAA,QAC/B,WAAW,MAAM;AAAA,QACjB,YAAY,MAAM;AAAA,QAClB,YAAY,MAAM;AAAA,QAClB,SAAS,MAAM;AAAA,QACf,gBAAgB,MAAM,iBAAiB;AAAA,QACvC,YAAY,MAAM;AAAA,QAClB,uBAAuB,MAAM;AAAA,QAC7B,MAAM,MAAM;AAAA,QACZ,cAAc,MAAM;AAAA,MACtB,CAAC;AAAA,IACH,CAAC,EAAE,MAAM,MAAM;AAAA,IAEf,CAAC;AAAA,EACH;AACF;AAEO,IAAM,oBAAN,MAAkD;AAAA,EACvD,QAAc;AAAA,EAEd;AACF;AAQO,SAAS,IAAI,OAAiB,aAAwC;AAC3E,QAAM,SAAS,mBAAmB,KAAK;AACvC,cAAY,MAAM,OAAO,MAAM;AAC/B,SAAO;AACT;","names":[]}
package/package.json ADDED
@@ -0,0 +1,41 @@
1
+ {
2
+ "name": "@warmdrift/kgauto",
3
+ "version": "1.0.0",
4
+ "description": "AI model intelligence router — adapt every request for the target model",
5
+ "main": "./dist/index.js",
6
+ "module": "./dist/index.mjs",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.mjs",
12
+ "require": "./dist/index.js"
13
+ }
14
+ },
15
+ "files": [
16
+ "dist",
17
+ "profiles.json"
18
+ ],
19
+ "scripts": {
20
+ "build": "tsup",
21
+ "test": "vitest run",
22
+ "test:watch": "vitest",
23
+ "prepublishOnly": "npm run build && npm run test"
24
+ },
25
+ "keywords": ["ai", "llm", "model-router", "token-budget", "prompt-adaptation"],
26
+ "license": "MIT",
27
+ "dependencies": {},
28
+ "peerDependencies": {
29
+ "js-tiktoken": ">=1.0.0"
30
+ },
31
+ "peerDependenciesMeta": {
32
+ "js-tiktoken": {
33
+ "optional": true
34
+ }
35
+ },
36
+ "devDependencies": {
37
+ "tsup": "^8.4.0",
38
+ "typescript": "^5.7.0",
39
+ "vitest": "^3.1.0"
40
+ }
41
+ }